summaryrefslogtreecommitdiff
path: root/target/arm/solidrun-imx6/patches/3.14.43/solidrun.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/solidrun-imx6/patches/3.14.43/solidrun.patch')
-rw-r--r--target/arm/solidrun-imx6/patches/3.14.43/solidrun.patch324878
1 files changed, 324878 insertions, 0 deletions
diff --git a/target/arm/solidrun-imx6/patches/3.14.43/solidrun.patch b/target/arm/solidrun-imx6/patches/3.14.43/solidrun.patch
new file mode 100644
index 000000000..871a870e1
--- /dev/null
+++ b/target/arm/solidrun-imx6/patches/3.14.43/solidrun.patch
@@ -0,0 +1,324878 @@
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/clcd-panels.dtsi linux-3.14.40/arch/arm/boot/dts/clcd-panels.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/clcd-panels.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/clcd-panels.dtsi 2015-05-01 14:57:57.319427001 -0500
+@@ -0,0 +1,52 @@
++/*
++ * ARM Ltd. Versatile Express
++ *
++ */
++
++/ {
++ panels {
++ panel@0 {
++ compatible = "panel";
++ mode = "VGA";
++ refresh = <60>;
++ xres = <640>;
++ yres = <480>;
++ pixclock = <39721>;
++ left_margin = <40>;
++ right_margin = <24>;
++ upper_margin = <32>;
++ lower_margin = <11>;
++ hsync_len = <96>;
++ vsync_len = <2>;
++ sync = <0>;
++ vmode = "FB_VMODE_NONINTERLACED";
++
++ tim2 = "TIM2_BCD", "TIM2_IPC";
++ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
++ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
++ bpp = <16>;
++ };
++
++ panel@1 {
++ compatible = "panel";
++ mode = "XVGA";
++ refresh = <60>;
++ xres = <1024>;
++ yres = <768>;
++ pixclock = <15748>;
++ left_margin = <152>;
++ right_margin = <48>;
++ upper_margin = <23>;
++ lower_margin = <3>;
++ hsync_len = <104>;
++ vsync_len = <4>;
++ sync = <0>;
++ vmode = "FB_VMODE_NONINTERLACED";
++
++ tim2 = "TIM2_BCD", "TIM2_IPC";
++ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
++ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
++ bpp = <16>;
++ };
++ };
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/efm32gg-dk3750.dts linux-3.14.40/arch/arm/boot/dts/efm32gg-dk3750.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/efm32gg-dk3750.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/efm32gg-dk3750.dts 2015-05-01 14:57:57.335427001 -0500
+@@ -26,7 +26,7 @@
+ };
+
+ i2c@4000a000 {
+- location = <3>;
++ efm32,location = <3>;
+ status = "ok";
+
+ temp@48 {
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx23.dtsi linux-3.14.40/arch/arm/boot/dts/imx23.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx23.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx23.dtsi 2015-05-01 14:57:57.351427001 -0500
+@@ -363,7 +363,8 @@
+ compatible = "fsl,imx23-lcdif";
+ reg = <0x80030000 2000>;
+ interrupts = <46 45>;
+- clocks = <&clks 38>;
++ clocks = <&clks 38>, <&clks 38>;
++ clock-names = "pix", "axi";
+ status = "disabled";
+ };
+
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx25.dtsi linux-3.14.40/arch/arm/boot/dts/imx25.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx25.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx25.dtsi 2015-05-01 14:57:57.359427001 -0500
+@@ -13,6 +13,7 @@
+
+ / {
+ aliases {
++ ethernet0 = &fec;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+@@ -56,6 +57,7 @@
+
+ osc {
+ compatible = "fsl,imx-osc", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx25-karo-tx25.dts linux-3.14.40/arch/arm/boot/dts/imx25-karo-tx25.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx25-karo-tx25.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx25-karo-tx25.dts 2015-05-01 14:57:57.363427001 -0500
+@@ -16,6 +16,10 @@
+ model = "Ka-Ro TX25";
+ compatible = "karo,imx25-tx25", "fsl,imx25";
+
++ chosen {
++ stdout-path = &uart1;
++ };
++
+ memory {
+ reg = <0x80000000 0x02000000 0x90000000 0x02000000>;
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx27-apf27.dts linux-3.14.40/arch/arm/boot/dts/imx27-apf27.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx27-apf27.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx27-apf27.dts 2015-05-01 14:57:57.379427001 -0500
+@@ -29,6 +29,7 @@
+
+ osc26m {
+ compatible = "fsl,imx-osc26m", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx27.dtsi linux-3.14.40/arch/arm/boot/dts/imx27.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx27.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx27.dtsi 2015-05-01 14:57:57.379427001 -0500
+@@ -13,6 +13,7 @@
+
+ / {
+ aliases {
++ ethernet0 = &fec;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+@@ -46,6 +47,7 @@
+
+ osc26m {
+ compatible = "fsl,imx-osc26m", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ };
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts linux-3.14.40/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts 2015-05-01 14:57:57.411427001 -0500
+@@ -15,6 +15,10 @@
+ model = "Phytec pca100 rapid development kit";
+ compatible = "phytec,imx27-pca100-rdk", "phytec,imx27-pca100", "fsl,imx27";
+
++ chosen {
++ stdout-path = &uart1;
++ };
++
+ display: display {
+ model = "Primeview-PD050VL1";
+ native-mode = <&timing0>;
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx28.dtsi linux-3.14.40/arch/arm/boot/dts/imx28.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx28.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx28.dtsi 2015-05-01 14:57:57.411427001 -0500
+@@ -840,7 +840,8 @@
+ compatible = "fsl,imx28-lcdif";
+ reg = <0x80030000 0x2000>;
+ interrupts = <38>;
+- clocks = <&clks 55>;
++ clocks = <&clks 55>, <&clks 55>;
++ clock-names = "pix", "axi";
+ dmas = <&dma_apbh 13>;
+ dma-names = "rx";
+ status = "disabled";
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx51-babbage.dts linux-3.14.40/arch/arm/boot/dts/imx51-babbage.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx51-babbage.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx51-babbage.dts 2015-05-01 14:57:57.415427001 -0500
+@@ -17,6 +17,10 @@
+ model = "Freescale i.MX51 Babbage Board";
+ compatible = "fsl,imx51-babbage", "fsl,imx51";
+
++ chosen {
++ stdout-path = &uart1;
++ };
++
+ memory {
+ reg = <0x90000000 0x20000000>;
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx51.dtsi linux-3.14.40/arch/arm/boot/dts/imx51.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx51.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx51.dtsi 2015-05-01 14:57:57.419427001 -0500
+@@ -15,6 +15,7 @@
+
+ / {
+ aliases {
++ ethernet0 = &fec;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+@@ -43,21 +44,25 @@
+
+ ckil {
+ compatible = "fsl,imx-ckil", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ ckih1 {
+ compatible = "fsl,imx-ckih1", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ ckih2 {
+ compatible = "fsl,imx-ckih2", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ osc {
+ compatible = "fsl,imx-osc", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx53.dtsi linux-3.14.40/arch/arm/boot/dts/imx53.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx53.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx53.dtsi 2015-05-01 14:57:57.427427001 -0500
+@@ -15,6 +15,7 @@
+
+ / {
+ aliases {
++ ethernet0 = &fec;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+@@ -59,21 +60,25 @@
+
+ ckil {
+ compatible = "fsl,imx-ckil", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ ckih1 {
+ compatible = "fsl,imx-ckih1", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <22579200>;
+ };
+
+ ckih2 {
+ compatible = "fsl,imx-ckih2", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ osc {
+ compatible = "fsl,imx-osc", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx53-mba53.dts linux-3.14.40/arch/arm/boot/dts/imx53-mba53.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx53-mba53.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx53-mba53.dts 2015-05-01 14:57:57.431427001 -0500
+@@ -25,6 +25,10 @@
+ enable-active-low;
+ };
+
++ chosen {
++ stdout-path = &uart2;
++ };
++
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm2 0 50000>;
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-dfi-fs700-m60.dts linux-3.14.40/arch/arm/boot/dts/imx6dl-dfi-fs700-m60.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-dfi-fs700-m60.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-dfi-fs700-m60.dts 2015-05-01 14:57:57.431427001 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2013 Sascha Hauer <s.hauer@pengutronix.de>
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef __DTS_V1__
++#define __DTS_V1__
++/dts-v1/;
++#endif
++
++#include "imx6dl.dtsi"
++#include "imx6qdl-dfi-fs700-m60.dtsi"
++
++/ {
++ model = "DFI FS700-M60-6DL i.MX6dl Q7 Board";
++ compatible = "dfi,fs700-m60-6dl", "dfi,fs700e-m60", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl.dtsi linux-3.14.40/arch/arm/boot/dts/imx6dl.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl.dtsi 2015-05-01 14:57:57.431427001 -0500
+@@ -8,6 +8,7 @@
+ *
+ */
+
++#include <dt-bindings/interrupt-controller/irq.h>
+ #include "imx6dl-pinfunc.h"
+ #include "imx6qdl.dtsi"
+
+@@ -21,6 +22,26 @@
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2>;
++ operating-points = <
++ /* kHz uV */
++ 996000 1275000
++ 792000 1175000
++ 396000 1075000
++ >;
++ fsl,soc-operating-points = <
++ /* ARM kHz SOC-PU uV */
++ 996000 1175000
++ 792000 1175000
++ 396000 1175000
++ >;
++ clock-latency = <61036>; /* two CLK32 periods */
++ clocks = <&clks 104>, <&clks 6>, <&clks 16>,
++ <&clks 17>, <&clks 170>;
++ clock-names = "arm", "pll2_pfd2_396m", "step",
++ "pll1_sw", "pll1_sys";
++ arm-supply = <&reg_arm>;
++ pu-supply = <&reg_pu>;
++ soc-supply = <&reg_soc>;
+ };
+
+ cpu@1 {
+@@ -32,40 +53,124 @@
+ };
+
+ soc {
++
++ busfreq { /* BUSFREQ */
++ compatible = "fsl,imx6_busfreq";
++ clocks = <&clks 171>, <&clks 6>, <&clks 11>, <&clks 104>, <&clks 172>, <&clks 58>,
++ <&clks 18>, <&clks 60>, <&clks 20>, <&clks 3>, <&clks 22> , <&clks 8>;
++ clock-names = "pll2_bus", "pll2_pfd2_396m", "pll2_198m", "arm", "pll3_usb_otg", "periph",
++ "periph_pre", "periph_clk2", "periph_clk2_sel", "osc", "axi_sel", "pll3_pfd1_540m";
++ interrupts = <0 107 0x04>, <0 112 0x4>;
++ interrupt-names = "irq_busfreq_0", "irq_busfreq_1";
++ fsl,max_ddr_freq = <400000000>;
++ };
++
++ gpu@00130000 {
++ compatible = "fsl,imx6dl-gpu", "fsl,imx6q-gpu";
++ reg = <0x00130000 0x4000>, <0x00134000 0x4000>,
++ <0x0 0x0>;
++ reg-names = "iobase_3d", "iobase_2d",
++ "phys_baseaddr";
++ interrupts = <0 9 0x04>, <0 10 0x04>;
++ interrupt-names = "irq_3d", "irq_2d";
++ clocks = <&clks 143>, <&clks 27>,
++ <&clks 121>, <&clks 122>,
++ <&clks 0>;
++ clock-names = "gpu2d_axi_clk", "gpu3d_axi_clk",
++ "gpu2d_clk", "gpu3d_clk",
++ "gpu3d_shader_clk";
++ resets = <&src 0>, <&src 3>;
++ reset-names = "gpu3d", "gpu2d";
++ pu-supply = <&reg_pu>;
++ };
++
+ ocram: sram@00900000 {
+ compatible = "mmio-sram";
+ reg = <0x00900000 0x20000>;
+ clocks = <&clks 142>;
+ };
+
++ hdmi_core: hdmi_core@00120000 {
++ compatible = "fsl,imx6dl-hdmi-core";
++ reg = <0x00120000 0x9000>;
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ status = "disabled";
++ };
++
++ hdmi_video: hdmi_video@020e0000 {
++ compatible = "fsl,imx6dl-hdmi-video";
++ reg = <0x020e0000 0x1000>;
++ reg-names = "hdmi_gpr";
++ interrupts = <0 115 0x04>;
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ status = "disabled";
++ };
++
++ hdmi_audio: hdmi_audio@00120000 {
++ compatible = "fsl,imx6dl-hdmi-audio";
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ dmas = <&sdma 2 23 0>;
++ dma-names = "tx";
++ status = "disabled";
++ };
++
++ hdmi_cec: hdmi_cec@00120000 {
++ compatible = "fsl,imx6dl-hdmi-cec";
++ interrupts = <0 115 0x04>;
++ status = "disabled";
++ };
++
+ aips1: aips-bus@02000000 {
++ vpu@02040000 {
++ iramsize = <0>;
++ status = "okay";
++ };
++
+ iomuxc: iomuxc@020e0000 {
+ compatible = "fsl,imx6dl-iomuxc";
+ };
+
+ pxp: pxp@020f0000 {
++ compatible = "fsl,imx6dl-pxp-dma";
+ reg = <0x020f0000 0x4000>;
+- interrupts = <0 98 0x04>;
++ interrupts = <0 98 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 133>;
++ clock-names = "pxp-axi";
++ status = "disabled";
+ };
+
+ epdc: epdc@020f4000 {
+ reg = <0x020f4000 0x4000>;
+- interrupts = <0 97 0x04>;
++ interrupts = <0 97 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ lcdif: lcdif@020f8000 {
+ reg = <0x020f8000 0x4000>;
+- interrupts = <0 39 0x04>;
++ interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ aips2: aips-bus@02100000 {
++ mipi_dsi: mipi@021e0000 {
++ compatible = "fsl,imx6dl-mipi-dsi";
++ reg = <0x021e0000 0x4000>;
++ interrupts = <0 102 0x04>;
++ gpr = <&gpr>;
++ clocks = <&clks 138>, <&clks 209>;
++ clock-names = "mipi_pllref_clk", "mipi_cfg_clk";
++ status = "disabled";
++ };
++
+ i2c4: i2c@021f8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- compatible = "fsl,imx1-i2c";
++ compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+ reg = <0x021f8000 0x4000>;
+- interrupts = <0 35 0x04>;
++ interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 116>;
+ status = "disabled";
+ };
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-gw51xx.dts linux-3.14.40/arch/arm/boot/dts/imx6dl-gw51xx.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-gw51xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-gw51xx.dts 2015-05-01 14:57:57.431427001 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl.dtsi"
++#include "imx6qdl-gw51xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 DualLite GW51XX";
++ compatible = "gw,imx6dl-gw51xx", "gw,ventana", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-gw52xx.dts linux-3.14.40/arch/arm/boot/dts/imx6dl-gw52xx.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-gw52xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-gw52xx.dts 2015-05-01 14:57:57.431427001 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl.dtsi"
++#include "imx6qdl-gw52xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 DualLite GW52XX";
++ compatible = "gw,imx6dl-gw52xx", "gw,ventana", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-gw53xx.dts linux-3.14.40/arch/arm/boot/dts/imx6dl-gw53xx.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-gw53xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-gw53xx.dts 2015-05-01 14:57:57.431427001 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl.dtsi"
++#include "imx6qdl-gw53xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 DualLite GW53XX";
++ compatible = "gw,imx6dl-gw53xx", "gw,ventana", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-gw54xx.dts linux-3.14.40/arch/arm/boot/dts/imx6dl-gw54xx.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-gw54xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-gw54xx.dts 2015-05-01 14:57:57.431427001 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl.dtsi"
++#include "imx6qdl-gw54xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 DualLite GW54XX";
++ compatible = "gw,imx6dl-gw54xx", "gw,ventana", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-hummingboard.dts linux-3.14.40/arch/arm/boot/dts/imx6dl-hummingboard.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-hummingboard.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-hummingboard.dts 2015-05-01 14:57:57.435427001 -0500
+@@ -1,163 +1,13 @@
+ /*
+- * Copyright (C) 2013,2014 Russell King
++ * Copyright (C) 2014 Rabeeh Khoury (rabeeh@solid-run.com)
++ * Based on work by Russell King
+ */
+ /dts-v1/;
+
+ #include "imx6dl.dtsi"
+-#include "imx6qdl-microsom.dtsi"
+-#include "imx6qdl-microsom-ar8035.dtsi"
++#include "imx6qdl-hummingboard.dtsi"
+
+ / {
+- model = "SolidRun HummingBoard DL/Solo";
+- compatible = "solidrun,hummingboard", "fsl,imx6dl";
+-
+- ir_recv: ir-receiver {
+- compatible = "gpio-ir-receiver";
+- gpios = <&gpio1 2 1>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hummingboard_gpio1_2>;
+- };
+-
+- regulators {
+- compatible = "simple-bus";
+-
+- reg_3p3v: 3p3v {
+- compatible = "regulator-fixed";
+- regulator-name = "3P3V";
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- regulator-always-on;
+- };
+-
+- reg_usbh1_vbus: usb-h1-vbus {
+- compatible = "regulator-fixed";
+- enable-active-high;
+- gpio = <&gpio1 0 0>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hummingboard_usbh1_vbus>;
+- regulator-name = "usb_h1_vbus";
+- regulator-min-microvolt = <5000000>;
+- regulator-max-microvolt = <5000000>;
+- };
+-
+- reg_usbotg_vbus: usb-otg-vbus {
+- compatible = "regulator-fixed";
+- enable-active-high;
+- gpio = <&gpio3 22 0>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hummingboard_usbotg_vbus>;
+- regulator-name = "usb_otg_vbus";
+- regulator-min-microvolt = <5000000>;
+- regulator-max-microvolt = <5000000>;
+- };
+- };
+-
+- sound-spdif {
+- compatible = "fsl,imx-audio-spdif";
+- model = "imx-spdif";
+- /* IMX6 doesn't implement this yet */
+- spdif-controller = <&spdif>;
+- spdif-out;
+- };
+-};
+-
+-&can1 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hummingboard_flexcan1>;
+- status = "okay";
+-};
+-
+-&i2c1 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hummingboard_i2c1>;
+-
+- /*
+- * Not fitted on Carrier-1 board... yet
+- status = "okay";
+-
+- rtc: pcf8523@68 {
+- compatible = "nxp,pcf8523";
+- reg = <0x68>;
+- };
+- */
+-};
+-
+-&iomuxc {
+- hummingboard {
+- pinctrl_hummingboard_flexcan1: hummingboard-flexcan1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD3_CLK__FLEXCAN1_RX 0x80000000
+- MX6QDL_PAD_SD3_CMD__FLEXCAN1_TX 0x80000000
+- >;
+- };
+-
+- pinctrl_hummingboard_gpio1_2: hummingboard-gpio1_2 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000
+- >;
+- };
+-
+- pinctrl_hummingboard_i2c1: hummingboard-i2c1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+- MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_hummingboard_spdif: hummingboard-spdif {
+- fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
+- };
+-
+- pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus {
+- fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
+- };
+-
+- pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus {
+- fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
+- };
+-
+- pinctrl_hummingboard_usdhc2_aux: hummingboard-usdhc2-aux {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1f071
+- >;
+- };
+-
+- pinctrl_hummingboard_usdhc2: hummingboard-usdhc2 {
+- fsl,pins = <
+- MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
+- MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
+- MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+- MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+- MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+- MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
+- >;
+- };
+- };
+-};
+-
+-&spdif {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hummingboard_spdif>;
+- status = "okay";
+-};
+-
+-&usbh1 {
+- vbus-supply = <&reg_usbh1_vbus>;
+- status = "okay";
+-};
+-
+-&usbotg {
+- vbus-supply = <&reg_usbotg_vbus>;
+- status = "okay";
+-};
+-
+-&usdhc2 {
+- pinctrl-names = "default";
+- pinctrl-0 = <
+- &pinctrl_hummingboard_usdhc2_aux
+- &pinctrl_hummingboard_usdhc2
+- >;
+- vmmc-supply = <&reg_3p3v>;
+- cd-gpios = <&gpio1 4 0>;
+- status = "okay";
++ model = "SolidRun HummingBoard Solo/DualLite";
++ compatible = "solidrun,hummingboard/dl", "fsl,imx6dl";
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-nitrogen6x.dts linux-3.14.40/arch/arm/boot/dts/imx6dl-nitrogen6x.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-nitrogen6x.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-nitrogen6x.dts 2015-05-01 14:57:57.439427001 -0500
+@@ -0,0 +1,21 @@
++/*
++ * Copyright 2013 Boundary Devices, Inc.
++ * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl.dtsi"
++#include "imx6qdl-nitrogen6x.dtsi"
++
++/ {
++ model = "Freescale i.MX6 DualLite Nitrogen6x Board";
++ compatible = "fsl,imx6dl-nitrogen6x", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-phytec-pbab01.dts linux-3.14.40/arch/arm/boot/dts/imx6dl-phytec-pbab01.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-phytec-pbab01.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-phytec-pbab01.dts 2015-05-01 14:57:57.439427001 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright 2013 Christian Hemp, Phytec Messtechnik GmbH
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl-phytec-pfla02.dtsi"
++#include "imx6qdl-phytec-pbab01.dtsi"
++
++/ {
++ model = "Phytec phyFLEX-i.MX6 DualLite/Solo Carrier-Board";
++ compatible = "phytec,imx6dl-pbab01", "phytec,imx6dl-pfla02", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-phytec-pfla02.dtsi linux-3.14.40/arch/arm/boot/dts/imx6dl-phytec-pfla02.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-phytec-pfla02.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-phytec-pfla02.dtsi 2015-05-01 14:57:57.439427001 -0500
+@@ -0,0 +1,22 @@
++/*
++ * Copyright 2013 Christian Hemp, Phytec Messtechnik GmbH
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include "imx6dl.dtsi"
++#include "imx6qdl-phytec-pfla02.dtsi"
++
++/ {
++ model = "Phytec phyFLEX-i.MX6 DualLite/Solo";
++ compatible = "phytec,imx6dl-pfla02", "fsl,imx6dl";
++
++ memory {
++ reg = <0x10000000 0x20000000>;
++ };
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-pinfunc.h linux-3.14.40/arch/arm/boot/dts/imx6dl-pinfunc.h
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-pinfunc.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-pinfunc.h 2015-05-01 14:57:57.439427001 -0500
+@@ -755,6 +755,7 @@
+ #define MX6QDL_PAD_GPIO_5__I2C3_SCL 0x230 0x600 0x878 0x6 0x2
+ #define MX6QDL_PAD_GPIO_5__ARM_EVENTI 0x230 0x600 0x000 0x7 0x0
+ #define MX6QDL_PAD_GPIO_6__ESAI_TX_CLK 0x234 0x604 0x840 0x0 0x1
++#define MX6QDL_PAD_GPIO_6__ENET_IRQ 0x234 0x604 0x03c 0x11 0xff000609
+ #define MX6QDL_PAD_GPIO_6__I2C3_SDA 0x234 0x604 0x87c 0x2 0x2
+ #define MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x234 0x604 0x000 0x5 0x0
+ #define MX6QDL_PAD_GPIO_6__SD2_LCTL 0x234 0x604 0x000 0x6 0x0
+@@ -950,6 +951,7 @@
+ #define MX6QDL_PAD_RGMII_TXC__GPIO6_IO19 0x2d8 0x6c0 0x000 0x5 0x0
+ #define MX6QDL_PAD_RGMII_TXC__XTALOSC_REF_CLK_24M 0x2d8 0x6c0 0x000 0x7 0x0
+ #define MX6QDL_PAD_SD1_CLK__SD1_CLK 0x2dc 0x6c4 0x928 0x0 0x1
++#define MX6QDL_PAD_SD1_CLK__OSC32K_32K_OUT 0x2dc 0x6c4 0x000 0x2 0x0
+ #define MX6QDL_PAD_SD1_CLK__GPT_CLKIN 0x2dc 0x6c4 0x000 0x3 0x0
+ #define MX6QDL_PAD_SD1_CLK__GPIO1_IO20 0x2dc 0x6c4 0x000 0x5 0x0
+ #define MX6QDL_PAD_SD1_CMD__SD1_CMD 0x2e0 0x6c8 0x000 0x0 0x0
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-sabreauto.dts linux-3.14.40/arch/arm/boot/dts/imx6dl-sabreauto.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-sabreauto.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-sabreauto.dts 2015-05-01 14:57:57.443427001 -0500
+@@ -15,3 +15,16 @@
+ model = "Freescale i.MX6 DualLite/Solo SABRE Automotive Board";
+ compatible = "fsl,imx6dl-sabreauto", "fsl,imx6dl";
+ };
++
++&ldb {
++ ipu_id = <0>;
++ sec_ipu_id = <0>;
++};
++
++&mxcfb1 {
++ status = "okay";
++};
++
++&mxcfb2 {
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-sabrelite.dts linux-3.14.40/arch/arm/boot/dts/imx6dl-sabrelite.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-sabrelite.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-sabrelite.dts 2015-05-01 14:57:57.443427001 -0500
+@@ -0,0 +1,20 @@
++/*
++ * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6dl.dtsi"
++#include "imx6qdl-sabrelite.dtsi"
++
++/ {
++ model = "Freescale i.MX6 DualLite SABRE Lite Board";
++ compatible = "fsl,imx6dl-sabrelite", "fsl,imx6dl";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-sabresd.dts linux-3.14.40/arch/arm/boot/dts/imx6dl-sabresd.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-sabresd.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-sabresd.dts 2015-05-01 14:57:57.443427001 -0500
+@@ -15,3 +15,20 @@
+ model = "Freescale i.MX6 DualLite SABRE Smart Device Board";
+ compatible = "fsl,imx6dl-sabresd", "fsl,imx6dl";
+ };
++
++&ldb {
++ ipu_id = <0>;
++ sec_ipu_id = <0>;
++};
++
++&pxp {
++ status = "okay";
++};
++
++&mxcfb1 {
++ status = "okay";
++};
++
++&mxcfb2 {
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-sabresd-hdcp.dts linux-3.14.40/arch/arm/boot/dts/imx6dl-sabresd-hdcp.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6dl-sabresd-hdcp.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6dl-sabresd-hdcp.dts 2015-05-01 14:57:57.443427001 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "imx6dl-sabresd.dts"
++
++&hdmi_video {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hdmi_hdcp>;
++ fsl,hdcp;
++};
++
++&i2c2 {
++ status = "disable";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-arm2.dts linux-3.14.40/arch/arm/boot/dts/imx6q-arm2.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-arm2.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-arm2.dts 2015-05-01 14:57:57.443427001 -0500
+@@ -23,14 +23,27 @@
+
+ regulators {
+ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
+
+- reg_3p3v: 3p3v {
++ reg_3p3v: regulator@0 {
+ compatible = "regulator-fixed";
++ reg = <0>;
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
++
++ reg_usb_otg_vbus: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
+ };
+
+ leds {
+@@ -46,7 +59,7 @@
+
+ &gpmi {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_gpmi_nand_1>;
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
+ status = "disabled"; /* gpmi nand conflicts with SD */
+ };
+
+@@ -54,28 +67,131 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+- hog {
++ imx6q-arm2 {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D25__GPIO3_IO25 0x80000000
+ >;
+ };
+- };
+
+- arm2 {
+- pinctrl_usdhc3_arm2: usdhc3grp-arm2 {
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_KEY_COL2__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D26__UART2_RX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D27__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D28__UART2_DTE_CTS_B 0x1b0b1
++ MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart4: uart4grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
++ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
++ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
++ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3_cdwp: usdhc3cdwp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_CS0__GPIO6_IO11 0x80000000
+ MX6QDL_PAD_NANDF_CS1__GPIO6_IO14 0x80000000
+ >;
+ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
++ MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
++ MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
++ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
++ >;
++ };
+ };
+ };
+
+ &fec {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_2>;
++ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii";
++ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
+ status = "okay";
+ };
+
+@@ -84,8 +200,8 @@
+ wp-gpios = <&gpio6 14 0>;
+ vmmc-supply = <&reg_3p3v>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_1
+- &pinctrl_usdhc3_arm2>;
++ pinctrl-0 = <&pinctrl_usdhc3
++ &pinctrl_usdhc3_cdwp>;
+ status = "okay";
+ };
+
+@@ -93,13 +209,13 @@
+ non-removable;
+ vmmc-supply = <&reg_3p3v>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc4_1>;
++ pinctrl-0 = <&pinctrl_usdhc4>;
+ status = "okay";
+ };
+
+ &uart2 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart2_2>;
++ pinctrl-0 = <&pinctrl_uart2>;
+ fsl,dte-mode;
+ fsl,uart-has-rtscts;
+ status = "okay";
+@@ -107,6 +223,6 @@
+
+ &uart4 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart4_1>;
++ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-arm2-hsic.dts linux-3.14.40/arch/arm/boot/dts/imx6q-arm2-hsic.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-arm2-hsic.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-arm2-hsic.dts 2015-05-01 14:57:57.443427001 -0500
+@@ -0,0 +1,32 @@
++/*
++ * Copyright 2013 Freescale Semiconductor, Inc.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include "imx6q-arm2.dts"
++
++&fec {
++ status = "disabled";
++};
++
++&usbh2 {
++ pinctrl-names = "idle", "active";
++ pinctrl-0 = <&pinctrl_usbh2_1>;
++ pinctrl-1 = <&pinctrl_usbh2_2>;
++ osc-clkgate-delay = <0x3>;
++ status = "okay";
++};
++
++&usbh3 {
++ pinctrl-names = "idle", "active";
++ pinctrl-0 = <&pinctrl_usbh3_1>;
++ pinctrl-1 = <&pinctrl_usbh3_2>;
++ osc-clkgate-delay = <0x3>;
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-cm-fx6.dts linux-3.14.40/arch/arm/boot/dts/imx6q-cm-fx6.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-cm-fx6.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-cm-fx6.dts 2015-05-01 14:57:57.443427001 -0500
+@@ -0,0 +1,107 @@
++/*
++ * Copyright 2013 CompuLab Ltd.
++ *
++ * Author: Valentin Raevsky <valentin@compulab.co.il>
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++
++/ {
++ model = "CompuLab CM-FX6";
++ compatible = "compulab,cm-fx6", "fsl,imx6q";
++
++ memory {
++ reg = <0x10000000 0x80000000>;
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ heartbeat-led {
++ label = "Heartbeat";
++ gpios = <&gpio2 31 0>;
++ linux,default-trigger = "heartbeat";
++ };
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ status = "okay";
++};
++
++&gpmi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ status = "okay";
++};
++
++&iomuxc {
++ imx6q-cm-fx6 {
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
++ >;
++ };
++
++ pinctrl_uart4: uart4grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
++ >;
++ };
++ };
++};
++
++&uart4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart4>;
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-cubox-i.dts linux-3.14.40/arch/arm/boot/dts/imx6q-cubox-i.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-cubox-i.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-cubox-i.dts 2015-05-01 14:57:57.443427001 -0500
+@@ -13,4 +13,8 @@
+
+ &sata {
+ status = "okay";
++ fsl,transmit-level-mV = <1104>;
++ fsl,transmit-boost-mdB = <0>;
++ fsl,transmit-atten-16ths = <9>;
++ fsl,no-spread-spectrum;
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-dfi-fs700-m60.dts linux-3.14.40/arch/arm/boot/dts/imx6q-dfi-fs700-m60.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-dfi-fs700-m60.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-dfi-fs700-m60.dts 2015-05-01 14:57:57.443427001 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2013 Sascha Hauer <s.hauer@pengutronix.de>
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef __DTS_V1__
++#define __DTS_V1__
++/dts-v1/;
++#endif
++
++#include "imx6q.dtsi"
++#include "imx6qdl-dfi-fs700-m60.dtsi"
++
++/ {
++ model = "DFI FS700-M60-6QD i.MX6qd Q7 Board";
++ compatible = "dfi,fs700-m60-6qd", "dfi,fs700e-m60", "fsl,imx6q";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2015-05-01 14:57:57.443427001 -0500
+@@ -5,11 +5,33 @@
+ #include "imx6qdl-microsom-ar8035.dtsi"
+
+ / {
++ chosen {
++ bootargs = "quiet console=ttymxc0,115200 root=/dev/mmcblk0p2 rw";
++ };
++
++ aliases {
++ mxcfb0 = &mxcfb1;
++ };
++
+ ir_recv: ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio3 9 1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cubox_i_ir>;
++ linux,rc-map-name = "rc-rc6-mce";
++ };
++
++ pwmleds {
++ compatible = "pwm-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_cubox_i_pwm1>;
++
++ front {
++ active-low;
++ label = "imx6:red:front";
++ max-brightness = <248>;
++ pwms = <&pwm1 0 50000>;
++ };
+ };
+
+ regulators {
+@@ -49,10 +71,62 @@
+ sound-spdif {
+ compatible = "fsl,imx-audio-spdif";
+ model = "imx-spdif";
+- /* IMX6 doesn't implement this yet */
+ spdif-controller = <&spdif>;
+ spdif-out;
+ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <32>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "okay";
++ };
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <0>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++&hdmi_cec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_cubox_i_hdmi>;
++ status = "okay";
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_cubox_i_i2c2>;
++
++ status = "okay";
++
++ ddc: imx6_hdmi_i2c@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
+ };
+
+ &i2c3 {
+@@ -69,6 +143,19 @@
+
+ &iomuxc {
+ cubox_i {
++ pinctrl_cubox_i_hdmi: cubox-i-hdmi {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
++ >;
++ };
++
++ pinctrl_cubox_i_i2c2: cubox-i-i2c2 {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
+ pinctrl_cubox_i_i2c3: cubox-i-i2c3 {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
+@@ -82,16 +169,35 @@
+ >;
+ };
+
++ pinctrl_cubox_i_pwm1: cubox-i-pwm1-front-led {
++ fsl,pins = <MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0>;
++ };
++
+ pinctrl_cubox_i_spdif: cubox-i-spdif {
+ fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
+ };
+
++ pinctrl_cubox_i_usbh1: cubox-i-usbh1 {
++ fsl,pins = <MX6QDL_PAD_GPIO_3__USB_H1_OC 0x1b0b0>;
++ };
++
+ pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus {
+- fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>;
++ fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
++ };
++
++ pinctrl_cubox_i_usbotg: cubox-i-usbotg {
++ /*
++ * The Cubox-i pulls ID low, but as it's pointless
++ * leaving it as a pull-up, even if it is just 10uA.
++ */
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059
++ MX6QDL_PAD_KEY_COL4__USB_OTG_OC 0x1b0b0
++ >;
+ };
+
+ pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus {
+- fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x4001b0b0>;
++ fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
+ };
+
+ pinctrl_cubox_i_usdhc2_aux: cubox-i-usdhc2-aux {
+@@ -111,29 +217,76 @@
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
+ >;
+ };
++
++ pinctrl_cubox_i_usdhc2_100mhz: cubox-i-usdhc2-100mhz {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130b9
++ >;
++ };
++
++ pinctrl_cubox_i_usdhc2_200mhz: cubox-i-usdhc2-200mhz {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130f9
++ >;
++ };
+ };
+ };
+
+ &spdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cubox_i_spdif>;
++ clocks = <&clks 197>, <&clks 0>,
++ <&clks 197>, <&clks 0>,
++ <&clks 0>, <&clks 0>,
++ <&clks 0>, <&clks 0>,
++ <&clks 0>;
++ clock-names = "core", "rxtx0",
++ "rxtx1", "rxtx2",
++ "rxtx3", "rxtx4",
++ "rxtx5", "rxtx6",
++ "rxtx7";
+ status = "okay";
+ };
+
+ &usbh1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_cubox_i_usbh1>;
+ vbus-supply = <&reg_usbh1_vbus>;
+ status = "okay";
+ };
+
+ &usbotg {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_cubox_i_usbotg>;
+ vbus-supply = <&reg_usbotg_vbus>;
+ status = "okay";
+ };
+
+ &usdhc2 {
+- pinctrl-names = "default";
++ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
++ pinctrl-1 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_100mhz>;
++ pinctrl-2 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_200mhz>;
+ vmmc-supply = <&reg_3p3v>;
+ cd-gpios = <&gpio1 4 0>;
++ no-1-8-v;
+ status = "okay";
+ };
++
++
++&gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi 2015-05-01 14:57:57.443427001 -0500
+@@ -0,0 +1,199 @@
++/ {
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ dummy_reg: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "dummy-supply";
++ };
++
++ reg_usb_otg_vbus: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ chosen {
++ stdout-path = &uart1;
++ };
++};
++
++&ecspi3 {
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio4 24 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi3>;
++ status = "okay";
++
++ flash: m25p80@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "sst,sst25vf040b", "m25p80";
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ status = "okay";
++ phy-mode = "rgmii";
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6qdl-dfi-fs700-m60 {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x80000000
++ MX6QDL_PAD_GPIO_18__GPIO7_IO13 0x80000000 /* PMIC irq */
++ MX6QDL_PAD_EIM_D26__GPIO3_IO26 0x80000000 /* MAX11801 irq */
++ MX6QDL_PAD_NANDF_D5__GPIO2_IO05 0x000030b0 /* Backlight enable */
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D16__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc2: usdhc2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
++ MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x80000000 /* card detect */
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
++ MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
++ MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
++ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_ecspi3: ecspi3grp {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT2__ECSPI3_MISO 0x100b1
++ MX6QDL_PAD_DISP0_DAT1__ECSPI3_MOSI 0x100b1
++ MX6QDL_PAD_DISP0_DAT0__ECSPI3_SCLK 0x100b1
++ MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24 0x80000000 /* SPI NOR chipselect */
++ >;
++ };
++ };
++};
++
++&i2c2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&usbh1 {
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ dr_mode = "host";
++ status = "okay";
++};
++
++&usdhc2 { /* module slot */
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc2>;
++ cd-gpios = <&gpio2 2 0>;
++ status = "okay";
++};
++
++&usdhc3 { /* baseboard slot */
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++};
++
++&usdhc4 { /* eMMC */
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ bus-width = <8>;
++ non-removable;
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl.dtsi 2015-05-01 14:57:57.447427001 -0500
+@@ -10,10 +10,16 @@
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
++#include <dt-bindings/interrupt-controller/arm-gic.h>
++
+ #include "skeleton.dtsi"
++#include <dt-bindings/gpio/gpio.h>
+
+ / {
+ aliases {
++ ethernet0 = &fec;
++ can0 = &can1;
++ can1 = &can2;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+@@ -24,6 +30,11 @@
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
++ ipu0 = &ipu1;
++ mmc0 = &usdhc1;
++ mmc1 = &usdhc2;
++ mmc2 = &usdhc3;
++ mmc3 = &usdhc4;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+@@ -33,13 +44,13 @@
+ spi1 = &ecspi2;
+ spi2 = &ecspi3;
+ spi3 = &ecspi4;
++ usbphy0 = &usbphy1;
++ usbphy1 = &usbphy2;
+ };
+
+ intc: interrupt-controller@00a01000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+- #address-cells = <1>;
+- #size-cells = <1>;
+ interrupt-controller;
+ reg = <0x00a01000 0x1000>,
+ <0x00a00100 0x100>;
+@@ -51,20 +62,27 @@
+
+ ckil {
+ compatible = "fsl,imx-ckil", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ ckih1 {
+ compatible = "fsl,imx-ckih1", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ osc {
+ compatible = "fsl,imx-osc", "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+
++ pu_dummy: pudummy_reg {
++ compatible = "fsl,imx6-dummy-pureg"; /* only used in ldo-bypass */
++ };
++
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+@@ -75,7 +93,10 @@
+ dma_apbh: dma-apbh@00110000 {
+ compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x00110000 0x2000>;
+- interrupts = <0 13 0x04>, <0 13 0x04>, <0 13 0x04>, <0 13 0x04>;
++ interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,
++ <0 13 IRQ_TYPE_LEVEL_HIGH>,
++ <0 13 IRQ_TYPE_LEVEL_HIGH>,
++ <0 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gpmi0", "gpmi1", "gpmi2", "gpmi3";
+ #dma-cells = <1>;
+ dma-channels = <4>;
+@@ -88,7 +109,7 @@
+ #size-cells = <1>;
+ reg = <0x00112000 0x2000>, <0x00114000 0x2000>;
+ reg-names = "gpmi-nand", "bch";
+- interrupts = <0 15 0x04>;
++ interrupts = <0 15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "bch";
+ clocks = <&clks 152>, <&clks 153>, <&clks 151>,
+ <&clks 150>, <&clks 149>;
+@@ -109,11 +130,13 @@
+ L2: l2-cache@00a02000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x00a02000 0x1000>;
+- interrupts = <0 92 0x04>;
++ interrupts = <0 92 IRQ_TYPE_LEVEL_HIGH>;
+ cache-unified;
+ cache-level = <2>;
+ arm,tag-latency = <4 2 3>;
+ arm,data-latency = <4 2 3>;
++ arm,dynamic-clk-gating;
++ arm,standby-mode;
+ };
+
+ pcie: pcie@0x01000000 {
+@@ -126,15 +149,22 @@
+ 0x81000000 0 0 0x01f80000 0 0x00010000 /* downstream I/O */
+ 0x82000000 0 0x01000000 0x01000000 0 0x00f00000>; /* non-prefetchable memory */
+ num-lanes = <1>;
+- interrupts = <0 123 0x04>;
+- clocks = <&clks 189>, <&clks 187>, <&clks 206>, <&clks 144>;
+- clock-names = "pcie_ref_125m", "sata_ref_100m", "lvds_gate", "pcie_axi";
++ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "pme";
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 0x7>;
++ interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
++ <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
++ <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
++ <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 144>, <&clks 221>, <&clks 189>, <&clks 187>;
++ clock-names = "pcie_axi", "lvds_gate", "pcie_ref_125m", "sata_ref_100m";
+ status = "disabled";
+ };
+
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+- interrupts = <0 94 0x04>;
++ interrupts = <0 94 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ aips-bus@02000000 { /* AIPS1 */
+@@ -154,7 +184,7 @@
+ spdif: spdif@02004000 {
+ compatible = "fsl,imx35-spdif";
+ reg = <0x02004000 0x4000>;
+- interrupts = <0 52 0x04>;
++ interrupts = <0 52 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 14 18 0>,
+ <&sdma 15 18 0>;
+ dma-names = "rx", "tx";
+@@ -176,9 +206,11 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02008000 0x4000>;
+- interrupts = <0 31 0x04>;
++ interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 112>, <&clks 112>;
+ clock-names = "ipg", "per";
++ dmas = <&sdma 3 7 1>, <&sdma 4 7 2>;
++ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+@@ -187,9 +219,11 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x0200c000 0x4000>;
+- interrupts = <0 32 0x04>;
++ interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 113>, <&clks 113>;
+ clock-names = "ipg", "per";
++ dmas = <&sdma 5 7 1>, <&sdma 6 7 2>;
++ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+@@ -198,9 +232,11 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02010000 0x4000>;
+- interrupts = <0 33 0x04>;
++ interrupts = <0 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 114>, <&clks 114>;
+ clock-names = "ipg", "per";
++ dmas = <&sdma 7 7 1>, <&sdma 8 7 2>;
++ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+@@ -209,16 +245,18 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02014000 0x4000>;
+- interrupts = <0 34 0x04>;
++ interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 115>, <&clks 115>;
+ clock-names = "ipg", "per";
++ dmas = <&sdma 9 7 1>, <&sdma 10 7 2>;
++ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ uart1: serial@02020000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02020000 0x4000>;
+- interrupts = <0 26 0x04>;
++ interrupts = <0 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma 25 4 0>, <&sdma 26 4 0>;
+@@ -227,15 +265,23 @@
+ };
+
+ esai: esai@02024000 {
++ compatible = "fsl,imx6q-esai";
+ reg = <0x02024000 0x4000>;
+- interrupts = <0 51 0x04>;
++ interrupts = <0 51 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 118>;
++ fsl,esai-dma-events = <24 23>;
++ fsl,flags = <1>;
++ status = "disabled";
+ };
+
+ ssi1: ssi@02028000 {
+- compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
++ compatible = "fsl,imx6q-ssi",
++ "fsl,imx51-ssi",
++ "fsl,imx21-ssi";
+ reg = <0x02028000 0x4000>;
+- interrupts = <0 46 0x04>;
+- clocks = <&clks 178>;
++ interrupts = <0 46 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 178>, <&clks 157>;
++ clock-names = "ipg", "baud";
+ dmas = <&sdma 37 1 0>,
+ <&sdma 38 1 0>;
+ dma-names = "rx", "tx";
+@@ -245,10 +291,13 @@
+ };
+
+ ssi2: ssi@0202c000 {
+- compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
++ compatible = "fsl,imx6q-ssi",
++ "fsl,imx51-ssi",
++ "fsl,imx21-ssi";
+ reg = <0x0202c000 0x4000>;
+- interrupts = <0 47 0x04>;
+- clocks = <&clks 179>;
++ interrupts = <0 47 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 179>, <&clks 158>;
++ clock-names = "ipg", "baud";
+ dmas = <&sdma 41 1 0>,
+ <&sdma 42 1 0>;
+ dma-names = "rx", "tx";
+@@ -258,10 +307,13 @@
+ };
+
+ ssi3: ssi@02030000 {
+- compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
++ compatible = "fsl,imx6q-ssi",
++ "fsl,imx51-ssi",
++ "fsl,imx21-ssi";
+ reg = <0x02030000 0x4000>;
+- interrupts = <0 48 0x04>;
+- clocks = <&clks 180>;
++ interrupts = <0 48 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 180>, <&clks 159>;
++ clock-names = "ipg", "baud";
+ dmas = <&sdma 45 1 0>,
+ <&sdma 46 1 0>;
+ dma-names = "rx", "tx";
+@@ -271,8 +323,25 @@
+ };
+
+ asrc: asrc@02034000 {
++ compatible = "fsl,imx53-asrc";
+ reg = <0x02034000 0x4000>;
+- interrupts = <0 50 0x04>;
++ interrupts = <0 50 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 107>, <&clks 156>;
++ clock-names = "core", "dma";
++ dmas = <&sdma 17 20 1>, <&sdma 18 20 1>, <&sdma 19 20 1>,
++ <&sdma 20 20 1>, <&sdma 21 20 1>, <&sdma 22 20 1>;
++ dma-names = "rxa", "rxb", "rxc",
++ "txa", "txb", "txc";
++ status = "okay";
++ };
++
++ asrc_p2p: asrc_p2p {
++ compatible = "fsl,imx6q-asrc-p2p";
++ fsl,output-rate = <48000>;
++ fsl,output-width = <16>;
++ fsl,asrc-dma-rx-events = <17 18 19>;
++ fsl,asrc-dma-tx-events = <20 21 22>;
++ status = "okay";
+ };
+
+ spba@0203c000 {
+@@ -281,8 +350,19 @@
+ };
+
+ vpu: vpu@02040000 {
++ compatible = "fsl,imx6-vpu";
+ reg = <0x02040000 0x3c000>;
+- interrupts = <0 3 0x04 0 12 0x04>;
++ reg-names = "vpu_regs";
++ interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>,
++ <0 12 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "vpu_jpu_irq", "vpu_ipi_irq";
++ clocks = <&clks 168>, <&clks 140>, <&clks 142>;
++ clock-names = "vpu_clk", "mmdc_ch0_axi", "ocram";
++ iramsize = <0x21000>;
++ iram = <&ocram>;
++ resets = <&src 1>;
++ pu-supply = <&reg_pu>;
++ status = "disabled";
+ };
+
+ aipstz@0207c000 { /* AIPSTZ1 */
+@@ -293,7 +373,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+ reg = <0x02080000 0x4000>;
+- interrupts = <0 83 0x04>;
++ interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 62>, <&clks 145>;
+ clock-names = "ipg", "per";
+ };
+@@ -302,7 +382,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+ reg = <0x02084000 0x4000>;
+- interrupts = <0 84 0x04>;
++ interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 62>, <&clks 146>;
+ clock-names = "ipg", "per";
+ };
+@@ -311,7 +391,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+ reg = <0x02088000 0x4000>;
+- interrupts = <0 85 0x04>;
++ interrupts = <0 85 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 62>, <&clks 147>;
+ clock-names = "ipg", "per";
+ };
+@@ -320,7 +400,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+ reg = <0x0208c000 0x4000>;
+- interrupts = <0 86 0x04>;
++ interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 62>, <&clks 148>;
+ clock-names = "ipg", "per";
+ };
+@@ -328,23 +408,25 @@
+ can1: flexcan@02090000 {
+ compatible = "fsl,imx6q-flexcan";
+ reg = <0x02090000 0x4000>;
+- interrupts = <0 110 0x04>;
++ interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 108>, <&clks 109>;
+ clock-names = "ipg", "per";
++ status = "disabled";
+ };
+
+ can2: flexcan@02094000 {
+ compatible = "fsl,imx6q-flexcan";
+ reg = <0x02094000 0x4000>;
+- interrupts = <0 111 0x04>;
++ interrupts = <0 111 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 110>, <&clks 111>;
+ clock-names = "ipg", "per";
++ status = "disabled";
+ };
+
+ gpt: gpt@02098000 {
+ compatible = "fsl,imx6q-gpt", "fsl,imx31-gpt";
+ reg = <0x02098000 0x4000>;
+- interrupts = <0 55 0x04>;
++ interrupts = <0 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 119>, <&clks 120>;
+ clock-names = "ipg", "per";
+ };
+@@ -352,7 +434,8 @@
+ gpio1: gpio@0209c000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x0209c000 0x4000>;
+- interrupts = <0 66 0x04 0 67 0x04>;
++ interrupts = <0 66 IRQ_TYPE_LEVEL_HIGH>,
++ <0 67 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -362,7 +445,8 @@
+ gpio2: gpio@020a0000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020a0000 0x4000>;
+- interrupts = <0 68 0x04 0 69 0x04>;
++ interrupts = <0 68 IRQ_TYPE_LEVEL_HIGH>,
++ <0 69 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -372,7 +456,8 @@
+ gpio3: gpio@020a4000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020a4000 0x4000>;
+- interrupts = <0 70 0x04 0 71 0x04>;
++ interrupts = <0 70 IRQ_TYPE_LEVEL_HIGH>,
++ <0 71 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -382,7 +467,8 @@
+ gpio4: gpio@020a8000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020a8000 0x4000>;
+- interrupts = <0 72 0x04 0 73 0x04>;
++ interrupts = <0 72 IRQ_TYPE_LEVEL_HIGH>,
++ <0 73 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -392,7 +478,8 @@
+ gpio5: gpio@020ac000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020ac000 0x4000>;
+- interrupts = <0 74 0x04 0 75 0x04>;
++ interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>,
++ <0 75 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -402,7 +489,8 @@
+ gpio6: gpio@020b0000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020b0000 0x4000>;
+- interrupts = <0 76 0x04 0 77 0x04>;
++ interrupts = <0 76 IRQ_TYPE_LEVEL_HIGH>,
++ <0 77 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -412,7 +500,8 @@
+ gpio7: gpio@020b4000 {
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+ reg = <0x020b4000 0x4000>;
+- interrupts = <0 78 0x04 0 79 0x04>;
++ interrupts = <0 78 IRQ_TYPE_LEVEL_HIGH>,
++ <0 79 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -421,20 +510,20 @@
+
+ kpp: kpp@020b8000 {
+ reg = <0x020b8000 0x4000>;
+- interrupts = <0 82 0x04>;
++ interrupts = <0 82 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ wdog1: wdog@020bc000 {
+ compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
+ reg = <0x020bc000 0x4000>;
+- interrupts = <0 80 0x04>;
++ interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 0>;
+ };
+
+ wdog2: wdog@020c0000 {
+ compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
+ reg = <0x020c0000 0x4000>;
+- interrupts = <0 81 0x04>;
++ interrupts = <0 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 0>;
+ status = "disabled";
+ };
+@@ -442,14 +531,17 @@
+ clks: ccm@020c4000 {
+ compatible = "fsl,imx6q-ccm";
+ reg = <0x020c4000 0x4000>;
+- interrupts = <0 87 0x04 0 88 0x04>;
++ interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>,
++ <0 88 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ };
+
+ anatop: anatop@020c8000 {
+ compatible = "fsl,imx6q-anatop", "syscon", "simple-bus";
+ reg = <0x020c8000 0x1000>;
+- interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
++ interrupts = <0 49 IRQ_TYPE_LEVEL_HIGH>,
++ <0 54 IRQ_TYPE_LEVEL_HIGH>,
++ <0 127 IRQ_TYPE_LEVEL_HIGH>;
+
+ regulator-1p1@110 {
+ compatible = "fsl,anatop-regulator";
+@@ -495,7 +587,7 @@
+
+ reg_arm: regulator-vddcore@140 {
+ compatible = "fsl,anatop-regulator";
+- regulator-name = "cpu";
++ regulator-name = "vddarm";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+@@ -515,7 +607,6 @@
+ regulator-name = "vddpu";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+- regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <9>;
+ anatop-vol-bit-width = <5>;
+@@ -547,23 +638,38 @@
+
+ tempmon: tempmon {
+ compatible = "fsl,imx6q-tempmon";
+- interrupts = <0 49 0x04>;
++ interrupts = <0 49 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,tempmon = <&anatop>;
+ fsl,tempmon-data = <&ocotp>;
++ clocks = <&clks 172>;
+ };
+
+ usbphy1: usbphy@020c9000 {
+ compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020c9000 0x1000>;
+- interrupts = <0 44 0x04>;
++ interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 182>;
++ fsl,anatop = <&anatop>;
+ };
+
+ usbphy2: usbphy@020ca000 {
+ compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020ca000 0x1000>;
+- interrupts = <0 45 0x04>;
++ interrupts = <0 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 183>;
++ fsl,anatop = <&anatop>;
++ };
++
++ usbphy_nop1: usbphy_nop1 {
++ compatible = "usb-nop-xceiv";
++ clocks = <&clks 182>;
++ clock-names = "main_clk";
++ };
++
++ usbphy_nop2: usbphy_nop2 {
++ compatible = "usb-nop-xceiv";
++ clocks = <&clks 182>;
++ clock-names = "main_clk";
+ };
+
+ snvs@020cc000 {
+@@ -575,31 +681,39 @@
+ snvs-rtc-lp@34 {
+ compatible = "fsl,sec-v4.0-mon-rtc-lp";
+ reg = <0x34 0x58>;
+- interrupts = <0 19 0x04 0 20 0x04>;
++ interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>,
++ <0 20 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ epit1: epit@020d0000 { /* EPIT1 */
+ reg = <0x020d0000 0x4000>;
+- interrupts = <0 56 0x04>;
++ interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ epit2: epit@020d4000 { /* EPIT2 */
+ reg = <0x020d4000 0x4000>;
+- interrupts = <0 57 0x04>;
++ interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ src: src@020d8000 {
+ compatible = "fsl,imx6q-src", "fsl,imx51-src";
+ reg = <0x020d8000 0x4000>;
+- interrupts = <0 91 0x04 0 96 0x04>;
++ interrupts = <0 91 IRQ_TYPE_LEVEL_HIGH>,
++ <0 96 IRQ_TYPE_LEVEL_HIGH>;
+ #reset-cells = <1>;
+ };
+
+ gpc: gpc@020dc000 {
+ compatible = "fsl,imx6q-gpc";
+ reg = <0x020dc000 0x4000>;
+- interrupts = <0 89 0x04 0 90 0x04>;
++ interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>,
++ <0 90 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 122>, <&clks 74>, <&clks 121>,
++ <&clks 26>, <&clks 143>, <&clks 168>, <&clks 62>;
++ clock-names = "gpu3d_core", "gpu3d_shader", "gpu2d_core",
++ "gpu2d_axi", "openvg_axi", "vpu_axi", "ipg";
++ pu-supply = <&reg_pu>;
+ };
+
+ gpr: iomuxc-gpr@020e0000 {
+@@ -610,778 +724,40 @@
+ iomuxc: iomuxc@020e0000 {
+ compatible = "fsl,imx6dl-iomuxc", "fsl,imx6q-iomuxc";
+ reg = <0x020e0000 0x4000>;
+-
+- audmux {
+- pinctrl_audmux_1: audmux-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x80000000
+- MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x80000000
+- MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x80000000
+- MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x80000000
+- >;
+- };
+-
+- pinctrl_audmux_2: audmux-2 {
+- fsl,pins = <
+- MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x80000000
+- MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x80000000
+- MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x80000000
+- MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x80000000
+- >;
+- };
+-
+- pinctrl_audmux_3: audmux-3 {
+- fsl,pins = <
+- MX6QDL_PAD_DISP0_DAT16__AUD5_TXC 0x80000000
+- MX6QDL_PAD_DISP0_DAT18__AUD5_TXFS 0x80000000
+- MX6QDL_PAD_DISP0_DAT19__AUD5_RXD 0x80000000
+- >;
+- };
+- };
+-
+- ecspi1 {
+- pinctrl_ecspi1_1: ecspi1grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
+- MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
+- MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
+- >;
+- };
+-
+- pinctrl_ecspi1_2: ecspi1grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL1__ECSPI1_MISO 0x100b1
+- MX6QDL_PAD_KEY_ROW0__ECSPI1_MOSI 0x100b1
+- MX6QDL_PAD_KEY_COL0__ECSPI1_SCLK 0x100b1
+- >;
+- };
+- };
+-
+- ecspi3 {
+- pinctrl_ecspi3_1: ecspi3grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_DISP0_DAT2__ECSPI3_MISO 0x100b1
+- MX6QDL_PAD_DISP0_DAT1__ECSPI3_MOSI 0x100b1
+- MX6QDL_PAD_DISP0_DAT0__ECSPI3_SCLK 0x100b1
+- >;
+- };
+- };
+-
+- enet {
+- pinctrl_enet_1: enetgrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+- MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
+- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
+- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
+- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
+- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
+- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+- MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
+- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
+- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
+- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
+- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
+- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+- MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
+- >;
+- };
+-
+- pinctrl_enet_2: enetgrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL1__ENET_MDIO 0x1b0b0
+- MX6QDL_PAD_KEY_COL2__ENET_MDC 0x1b0b0
+- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
+- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
+- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
+- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
+- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
+- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+- MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
+- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
+- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
+- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
+- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
+- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+- >;
+- };
+-
+- pinctrl_enet_3: enetgrp-3 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+- MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
+- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
+- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
+- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
+- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
+- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+- MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
+- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
+- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
+- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
+- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
+- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+- MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
+- >;
+- };
+- };
+-
+- esai {
+- pinctrl_esai_1: esaigrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_RXD0__ESAI_TX_HF_CLK 0x1b030
+- MX6QDL_PAD_ENET_CRS_DV__ESAI_TX_CLK 0x1b030
+- MX6QDL_PAD_ENET_RXD1__ESAI_TX_FS 0x1b030
+- MX6QDL_PAD_ENET_TX_EN__ESAI_TX3_RX2 0x1b030
+- MX6QDL_PAD_ENET_TXD1__ESAI_TX2_RX3 0x1b030
+- MX6QDL_PAD_ENET_TXD0__ESAI_TX4_RX1 0x1b030
+- MX6QDL_PAD_ENET_MDC__ESAI_TX5_RX0 0x1b030
+- MX6QDL_PAD_NANDF_CS2__ESAI_TX0 0x1b030
+- MX6QDL_PAD_NANDF_CS3__ESAI_TX1 0x1b030
+- >;
+- };
+-
+- pinctrl_esai_2: esaigrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_CRS_DV__ESAI_TX_CLK 0x1b030
+- MX6QDL_PAD_ENET_RXD1__ESAI_TX_FS 0x1b030
+- MX6QDL_PAD_ENET_TX_EN__ESAI_TX3_RX2 0x1b030
+- MX6QDL_PAD_GPIO_5__ESAI_TX2_RX3 0x1b030
+- MX6QDL_PAD_ENET_TXD0__ESAI_TX4_RX1 0x1b030
+- MX6QDL_PAD_ENET_MDC__ESAI_TX5_RX0 0x1b030
+- MX6QDL_PAD_GPIO_17__ESAI_TX0 0x1b030
+- MX6QDL_PAD_NANDF_CS3__ESAI_TX1 0x1b030
+- MX6QDL_PAD_ENET_MDIO__ESAI_RX_CLK 0x1b030
+- MX6QDL_PAD_GPIO_9__ESAI_RX_FS 0x1b030
+- >;
+- };
+- };
+-
+- flexcan1 {
+- pinctrl_flexcan1_1: flexcan1grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x80000000
+- MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x80000000
+- >;
+- };
+-
+- pinctrl_flexcan1_2: flexcan1grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_7__FLEXCAN1_TX 0x80000000
+- MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x80000000
+- >;
+- };
+- };
+-
+- flexcan2 {
+- pinctrl_flexcan2_1: flexcan2grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX 0x80000000
+- MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX 0x80000000
+- >;
+- };
+- };
+-
+- gpmi-nand {
+- pinctrl_gpmi_nand_1: gpmi-nand-1 {
+- fsl,pins = <
+- MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
+- MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
+- MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
+- MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
+- MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
+- MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
+- MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
+- MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
+- MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
+- MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
+- MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
+- MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
+- MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
+- MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
+- MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
+- MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
+- MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
+- >;
+- };
+- };
+-
+- hdmi_hdcp {
+- pinctrl_hdmi_hdcp_1: hdmihdcpgrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL3__HDMI_TX_DDC_SCL 0x4001b8b1
+- MX6QDL_PAD_KEY_ROW3__HDMI_TX_DDC_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_hdmi_hdcp_2: hdmihdcpgrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_EB2__HDMI_TX_DDC_SCL 0x4001b8b1
+- MX6QDL_PAD_EIM_D16__HDMI_TX_DDC_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_hdmi_hdcp_3: hdmihdcpgrp-3 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_EB2__HDMI_TX_DDC_SCL 0x4001b8b1
+- MX6QDL_PAD_KEY_ROW3__HDMI_TX_DDC_SDA 0x4001b8b1
+- >;
+- };
+- };
+-
+- hdmi_cec {
+- pinctrl_hdmi_cec_1: hdmicecgrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_A25__HDMI_TX_CEC_LINE 0x1f8b0
+- >;
+- };
+-
+- pinctrl_hdmi_cec_2: hdmicecgrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
+- >;
+- };
+- };
+-
+- i2c1 {
+- pinctrl_i2c1_1: i2c1grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+- MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_i2c1_2: i2c1grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b8b1
+- MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b8b1
+- >;
+- };
+- };
+-
+- i2c2 {
+- pinctrl_i2c2_1: i2c2grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
+- MX6QDL_PAD_EIM_D16__I2C2_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_i2c2_2: i2c2grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
+- MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_i2c2_3: i2c2grp-3 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
+- MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+- >;
+- };
+- };
+-
+- i2c3 {
+- pinctrl_i2c3_1: i2c3grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
+- MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_i2c3_2: i2c3grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+- MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_i2c3_3: i2c3grp-3 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
+- MX6QDL_PAD_GPIO_16__I2C3_SDA 0x4001b8b1
+- >;
+- };
+-
+- pinctrl_i2c3_4: i2c3grp-4 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+- MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
+- >;
+- };
+- };
+-
+- ipu1 {
+- pinctrl_ipu1_1: ipu1grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
+- MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x10
+- MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02 0x10
+- MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03 0x10
+- MX6QDL_PAD_DI0_PIN4__IPU1_DI0_PIN04 0x80000000
+- MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00 0x10
+- MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01 0x10
+- MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02 0x10
+- MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03 0x10
+- MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04 0x10
+- MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05 0x10
+- MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06 0x10
+- MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07 0x10
+- MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08 0x10
+- MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09 0x10
+- MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10 0x10
+- MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11 0x10
+- MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12 0x10
+- MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13 0x10
+- MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14 0x10
+- MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15 0x10
+- MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16 0x10
+- MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17 0x10
+- MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18 0x10
+- MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19 0x10
+- MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20 0x10
+- MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21 0x10
+- MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22 0x10
+- MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23 0x10
+- >;
+- };
+-
+- pinctrl_ipu1_2: ipu1grp-2 { /* parallel camera */
+- fsl,pins = <
+- MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x80000000
+- MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13 0x80000000
+- MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14 0x80000000
+- MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15 0x80000000
+- MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16 0x80000000
+- MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17 0x80000000
+- MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18 0x80000000
+- MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19 0x80000000
+- MX6QDL_PAD_CSI0_DATA_EN__IPU1_CSI0_DATA_EN 0x80000000
+- MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x80000000
+- MX6QDL_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC 0x80000000
+- MX6QDL_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC 0x80000000
+- >;
+- };
+-
+- pinctrl_ipu1_3: ipu1grp-3 { /* parallel port 16-bit */
+- fsl,pins = <
+- MX6QDL_PAD_CSI0_DAT4__IPU1_CSI0_DATA04 0x80000000
+- MX6QDL_PAD_CSI0_DAT5__IPU1_CSI0_DATA05 0x80000000
+- MX6QDL_PAD_CSI0_DAT6__IPU1_CSI0_DATA06 0x80000000
+- MX6QDL_PAD_CSI0_DAT7__IPU1_CSI0_DATA07 0x80000000
+- MX6QDL_PAD_CSI0_DAT8__IPU1_CSI0_DATA08 0x80000000
+- MX6QDL_PAD_CSI0_DAT9__IPU1_CSI0_DATA09 0x80000000
+- MX6QDL_PAD_CSI0_DAT10__IPU1_CSI0_DATA10 0x80000000
+- MX6QDL_PAD_CSI0_DAT11__IPU1_CSI0_DATA11 0x80000000
+- MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x80000000
+- MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13 0x80000000
+- MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14 0x80000000
+- MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15 0x80000000
+- MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16 0x80000000
+- MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17 0x80000000
+- MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18 0x80000000
+- MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19 0x80000000
+- MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x80000000
+- MX6QDL_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC 0x80000000
+- MX6QDL_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC 0x80000000
+- >;
+- };
+- };
+-
+- mlb {
+- pinctrl_mlb_1: mlbgrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_3__MLB_CLK 0x71
+- MX6QDL_PAD_GPIO_6__MLB_SIG 0x71
+- MX6QDL_PAD_GPIO_2__MLB_DATA 0x71
+- >;
+- };
+-
+- pinctrl_mlb_2: mlbgrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_TXD1__MLB_CLK 0x71
+- MX6QDL_PAD_GPIO_6__MLB_SIG 0x71
+- MX6QDL_PAD_GPIO_2__MLB_DATA 0x71
+- >;
+- };
+- };
+-
+- pwm0 {
+- pinctrl_pwm0_1: pwm0grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD1_DAT3__PWM1_OUT 0x1b0b1
+- >;
+- };
+- };
+-
+- pwm3 {
+- pinctrl_pwm3_1: pwm3grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
+- >;
+- };
+- };
+-
+- spdif {
+- pinctrl_spdif_1: spdifgrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL3__SPDIF_IN 0x1b0b0
+- >;
+- };
+-
+- pinctrl_spdif_2: spdifgrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_16__SPDIF_IN 0x1b0b0
+- MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0
+- >;
+- };
+-
+- pinctrl_spdif_3: spdifgrp-3 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_RXD0__SPDIF_OUT 0x1b0b0
+- >;
+- };
+- };
+-
+- uart1 {
+- pinctrl_uart1_1: uart1grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+- MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
+- >;
+- };
+- };
+-
+- uart2 {
+- pinctrl_uart2_1: uart2grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
+- >;
+- };
+-
+- pinctrl_uart2_2: uart2grp-2 { /* DTE mode */
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D26__UART2_RX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D27__UART2_TX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D28__UART2_DTE_CTS_B 0x1b0b1
+- MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B 0x1b0b1
+- >;
+- };
+- };
+-
+- uart3 {
+- pinctrl_uart3_1: uart3grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD4_CLK__UART3_RX_DATA 0x1b0b1
+- MX6QDL_PAD_SD4_CMD__UART3_TX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D30__UART3_CTS_B 0x1b0b1
+- MX6QDL_PAD_EIM_EB3__UART3_RTS_B 0x1b0b1
+- >;
+- };
+-
+- pinctrl_uart3_2: uart3grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+- MX6QDL_PAD_EIM_D23__UART3_CTS_B 0x1b0b1
+- MX6QDL_PAD_EIM_EB3__UART3_RTS_B 0x1b0b1
+- >;
+- };
+- };
+-
+- uart4 {
+- pinctrl_uart4_1: uart4grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+- MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+- >;
+- };
+- };
+-
+- usbotg {
+- pinctrl_usbotg_1: usbotggrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg_2: usbotggrp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
+- >;
+- };
+- };
+-
+- usbh2 {
+- pinctrl_usbh2_1: usbh2grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_RGMII_TXC__USB_H2_DATA 0x40013030
+- MX6QDL_PAD_RGMII_TX_CTL__USB_H2_STROBE 0x40013030
+- >;
+- };
+-
+- pinctrl_usbh2_2: usbh2grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_RGMII_TX_CTL__USB_H2_STROBE 0x40017030
+- >;
+- };
+- };
+-
+- usbh3 {
+- pinctrl_usbh3_1: usbh3grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_RGMII_RX_CTL__USB_H3_DATA 0x40013030
+- MX6QDL_PAD_RGMII_RXC__USB_H3_STROBE 0x40013030
+- >;
+- };
+-
+- pinctrl_usbh3_2: usbh3grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_RGMII_RXC__USB_H3_STROBE 0x40017030
+- >;
+- };
+- };
+-
+- usdhc1 {
+- pinctrl_usdhc1_1: usdhc1grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
+- MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
+- MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
+- MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
+- MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
+- MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
+- MX6QDL_PAD_NANDF_D0__SD1_DATA4 0x17059
+- MX6QDL_PAD_NANDF_D1__SD1_DATA5 0x17059
+- MX6QDL_PAD_NANDF_D2__SD1_DATA6 0x17059
+- MX6QDL_PAD_NANDF_D3__SD1_DATA7 0x17059
+- >;
+- };
+-
+- pinctrl_usdhc1_2: usdhc1grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
+- MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
+- MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
+- MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
+- MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
+- MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
+- >;
+- };
+- };
+-
+- usdhc2 {
+- pinctrl_usdhc2_1: usdhc2grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
+- MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
+- MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+- MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+- MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+- MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
+- MX6QDL_PAD_NANDF_D4__SD2_DATA4 0x17059
+- MX6QDL_PAD_NANDF_D5__SD2_DATA5 0x17059
+- MX6QDL_PAD_NANDF_D6__SD2_DATA6 0x17059
+- MX6QDL_PAD_NANDF_D7__SD2_DATA7 0x17059
+- >;
+- };
+-
+- pinctrl_usdhc2_2: usdhc2grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
+- MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
+- MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+- MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+- MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+- MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
+- >;
+- };
+- };
+-
+- usdhc3 {
+- pinctrl_usdhc3_1: usdhc3grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+- MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+- MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+- MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+- MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+- MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+- MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
+- MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
+- MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
+- MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
+- >;
+- };
+-
+- pinctrl_usdhc3_1_100mhz: usdhc3grp-1-100mhz { /* 100Mhz */
+- fsl,pins = <
+- MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
+- MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
+- MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
+- MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
+- MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
+- MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
+- MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170b9
+- MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170b9
+- MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170b9
+- MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170b9
+- >;
+- };
+-
+- pinctrl_usdhc3_1_200mhz: usdhc3grp-1-200mhz { /* 200Mhz */
+- fsl,pins = <
+- MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
+- MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
+- MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
+- MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
+- MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
+- MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
+- MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170f9
+- MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170f9
+- MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170f9
+- MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170f9
+- >;
+- };
+-
+- pinctrl_usdhc3_2: usdhc3grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+- MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+- MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+- MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+- MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+- MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+- >;
+- };
+- };
+-
+- usdhc4 {
+- pinctrl_usdhc4_1: usdhc4grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
+- MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
+- MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
+- MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
+- MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
+- MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
+- MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
+- MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
+- MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
+- MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
+- >;
+- };
+-
+- pinctrl_usdhc4_2: usdhc4grp-2 {
+- fsl,pins = <
+- MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
+- MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
+- MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
+- MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
+- MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
+- MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
+- >;
+- };
+- };
+-
+- weim {
+- pinctrl_weim_cs0_1: weim_cs0grp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_CS0__EIM_CS0_B 0xb0b1
+- >;
+- };
+-
+- pinctrl_weim_nor_1: weim_norgrp-1 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_OE__EIM_OE_B 0xb0b1
+- MX6QDL_PAD_EIM_RW__EIM_RW 0xb0b1
+- MX6QDL_PAD_EIM_WAIT__EIM_WAIT_B 0xb060
+- /* data */
+- MX6QDL_PAD_EIM_D16__EIM_DATA16 0x1b0b0
+- MX6QDL_PAD_EIM_D17__EIM_DATA17 0x1b0b0
+- MX6QDL_PAD_EIM_D18__EIM_DATA18 0x1b0b0
+- MX6QDL_PAD_EIM_D19__EIM_DATA19 0x1b0b0
+- MX6QDL_PAD_EIM_D20__EIM_DATA20 0x1b0b0
+- MX6QDL_PAD_EIM_D21__EIM_DATA21 0x1b0b0
+- MX6QDL_PAD_EIM_D22__EIM_DATA22 0x1b0b0
+- MX6QDL_PAD_EIM_D23__EIM_DATA23 0x1b0b0
+- MX6QDL_PAD_EIM_D24__EIM_DATA24 0x1b0b0
+- MX6QDL_PAD_EIM_D25__EIM_DATA25 0x1b0b0
+- MX6QDL_PAD_EIM_D26__EIM_DATA26 0x1b0b0
+- MX6QDL_PAD_EIM_D27__EIM_DATA27 0x1b0b0
+- MX6QDL_PAD_EIM_D28__EIM_DATA28 0x1b0b0
+- MX6QDL_PAD_EIM_D29__EIM_DATA29 0x1b0b0
+- MX6QDL_PAD_EIM_D30__EIM_DATA30 0x1b0b0
+- MX6QDL_PAD_EIM_D31__EIM_DATA31 0x1b0b0
+- /* address */
+- MX6QDL_PAD_EIM_A23__EIM_ADDR23 0xb0b1
+- MX6QDL_PAD_EIM_A22__EIM_ADDR22 0xb0b1
+- MX6QDL_PAD_EIM_A21__EIM_ADDR21 0xb0b1
+- MX6QDL_PAD_EIM_A20__EIM_ADDR20 0xb0b1
+- MX6QDL_PAD_EIM_A19__EIM_ADDR19 0xb0b1
+- MX6QDL_PAD_EIM_A18__EIM_ADDR18 0xb0b1
+- MX6QDL_PAD_EIM_A17__EIM_ADDR17 0xb0b1
+- MX6QDL_PAD_EIM_A16__EIM_ADDR16 0xb0b1
+- MX6QDL_PAD_EIM_DA15__EIM_AD15 0xb0b1
+- MX6QDL_PAD_EIM_DA14__EIM_AD14 0xb0b1
+- MX6QDL_PAD_EIM_DA13__EIM_AD13 0xb0b1
+- MX6QDL_PAD_EIM_DA12__EIM_AD12 0xb0b1
+- MX6QDL_PAD_EIM_DA11__EIM_AD11 0xb0b1
+- MX6QDL_PAD_EIM_DA10__EIM_AD10 0xb0b1
+- MX6QDL_PAD_EIM_DA9__EIM_AD09 0xb0b1
+- MX6QDL_PAD_EIM_DA8__EIM_AD08 0xb0b1
+- MX6QDL_PAD_EIM_DA7__EIM_AD07 0xb0b1
+- MX6QDL_PAD_EIM_DA6__EIM_AD06 0xb0b1
+- MX6QDL_PAD_EIM_DA5__EIM_AD05 0xb0b1
+- MX6QDL_PAD_EIM_DA4__EIM_AD04 0xb0b1
+- MX6QDL_PAD_EIM_DA3__EIM_AD03 0xb0b1
+- MX6QDL_PAD_EIM_DA2__EIM_AD02 0xb0b1
+- MX6QDL_PAD_EIM_DA1__EIM_AD01 0xb0b1
+- MX6QDL_PAD_EIM_DA0__EIM_AD00 0xb0b1
+- >;
+- };
+- };
+ };
+
+ ldb: ldb@020e0008 {
+- #address-cells = <1>;
+- #size-cells = <0>;
+ compatible = "fsl,imx6q-ldb", "fsl,imx53-ldb";
+- gpr = <&gpr>;
++ reg = <0x020e0000 0x4000>;
++ clocks = <&clks 135>, <&clks 136>,
++ <&clks 39>, <&clks 40>,
++ <&clks 41>, <&clks 42>,
++ <&clks 184>, <&clks 185>,
++ <&clks 210>, <&clks 211>,
++ <&clks 212>, <&clks 213>;
++ clock-names = "ldb_di0", "ldb_di1",
++ "ipu1_di0_sel", "ipu1_di1_sel",
++ "ipu2_di0_sel", "ipu2_di1_sel",
++ "di0_div_3_5", "di1_div_3_5",
++ "di0_div_7", "di1_div_7",
++ "di0_div_sel", "di1_div_sel";
+ status = "disabled";
+-
+- lvds-channel@0 {
+- reg = <0>;
+- status = "disabled";
+- };
+-
+- lvds-channel@1 {
+- reg = <1>;
+- status = "disabled";
+- };
+ };
+
+ dcic1: dcic@020e4000 {
+ reg = <0x020e4000 0x4000>;
+- interrupts = <0 124 0x04>;
++ interrupts = <0 124 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ dcic2: dcic@020e8000 {
+ reg = <0x020e8000 0x4000>;
+- interrupts = <0 125 0x04>;
++ interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sdma: sdma@020ec000 {
+ compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
+ reg = <0x020ec000 0x4000>;
+- interrupts = <0 2 0x04>;
++ interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 155>, <&clks 155>;
+ clock-names = "ipg", "ahb";
+ #dma-cells = <3>;
+@@ -1396,9 +772,29 @@
+ reg = <0x02100000 0x100000>;
+ ranges;
+
+- caam@02100000 {
+- reg = <0x02100000 0x40000>;
+- interrupts = <0 105 0x04 0 106 0x04>;
++ crypto: caam@02100000 {
++ compatible = "fsl,sec-v4.0";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0x2100000 0x40000>;
++ ranges = <0 0x2100000 0x40000>;
++ interrupt-parent = <&intc>; /* interrupts = <0 92 0x4>; */
++ clocks = <&clks 214>, <&clks 215>, <&clks 216>, <&clks 196>;
++ clock-names = "caam_mem", "caam_aclk", "caam_ipg", "caam_emi_slow";
++
++ sec_jr0: jr0@1000 {
++ compatible = "fsl,sec-v4.0-job-ring";
++ reg = <0x1000 0x1000>;
++ interrupt-parent = <&intc>;
++ interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr1: jr1@2000 {
++ compatible = "fsl,sec-v4.0-job-ring";
++ reg = <0x2000 0x1000>;
++ interrupt-parent = <&intc>;
++ interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
++ };
+ };
+
+ aipstz@0217c000 { /* AIPSTZ2 */
+@@ -1408,7 +804,7 @@
+ usbotg: usb@02184000 {
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184000 0x200>;
+- interrupts = <0 43 0x04>;
++ interrupts = <0 43 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 162>;
+ fsl,usbphy = <&usbphy1>;
+ fsl,usbmisc = <&usbmisc 0>;
+@@ -1418,7 +814,7 @@
+ usbh1: usb@02184200 {
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184200 0x200>;
+- interrupts = <0 40 0x04>;
++ interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 162>;
+ fsl,usbphy = <&usbphy2>;
+ fsl,usbmisc = <&usbmisc 1>;
+@@ -1428,18 +824,24 @@
+ usbh2: usb@02184400 {
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184400 0x200>;
+- interrupts = <0 41 0x04>;
++ interrupts = <0 41 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 162>;
+ fsl,usbmisc = <&usbmisc 2>;
++ phy_type = "hsic";
++ fsl,usbphy = <&usbphy_nop1>;
++ fsl,anatop = <&anatop>;
+ status = "disabled";
+ };
+
+ usbh3: usb@02184600 {
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184600 0x200>;
+- interrupts = <0 42 0x04>;
++ interrupts = <0 42 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 162>;
+ fsl,usbmisc = <&usbmisc 3>;
++ phy_type = "hsic";
++ fsl,usbphy = <&usbphy_nop2>;
++ fsl,anatop = <&anatop>;
+ status = "disabled";
+ };
+
+@@ -1453,7 +855,9 @@
+ fec: ethernet@02188000 {
+ compatible = "fsl,imx6q-fec";
+ reg = <0x02188000 0x4000>;
+- interrupts = <0 118 0x04 0 119 0x04>;
++ interrupts-extended =
++ <&intc 0 118 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 117>, <&clks 117>, <&clks 190>;
+ clock-names = "ipg", "ahb", "ptp";
+ status = "disabled";
+@@ -1461,13 +865,15 @@
+
+ mlb@0218c000 {
+ reg = <0x0218c000 0x4000>;
+- interrupts = <0 53 0x04 0 117 0x04 0 126 0x04>;
++ interrupts = <0 53 IRQ_TYPE_LEVEL_HIGH>,
++ <0 117 IRQ_TYPE_LEVEL_HIGH>,
++ <0 126 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ usdhc1: usdhc@02190000 {
+ compatible = "fsl,imx6q-usdhc";
+ reg = <0x02190000 0x4000>;
+- interrupts = <0 22 0x04>;
++ interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 163>, <&clks 163>, <&clks 163>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+@@ -1477,7 +883,7 @@
+ usdhc2: usdhc@02194000 {
+ compatible = "fsl,imx6q-usdhc";
+ reg = <0x02194000 0x4000>;
+- interrupts = <0 23 0x04>;
++ interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 164>, <&clks 164>, <&clks 164>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+@@ -1487,7 +893,7 @@
+ usdhc3: usdhc@02198000 {
+ compatible = "fsl,imx6q-usdhc";
+ reg = <0x02198000 0x4000>;
+- interrupts = <0 24 0x04>;
++ interrupts = <0 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 165>, <&clks 165>, <&clks 165>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+@@ -1497,7 +903,7 @@
+ usdhc4: usdhc@0219c000 {
+ compatible = "fsl,imx6q-usdhc";
+ reg = <0x0219c000 0x4000>;
+- interrupts = <0 25 0x04>;
++ interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 166>, <&clks 166>, <&clks 166>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+@@ -1509,7 +915,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+ reg = <0x021a0000 0x4000>;
+- interrupts = <0 36 0x04>;
++ interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 125>;
+ status = "disabled";
+ };
+@@ -1519,7 +925,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+ reg = <0x021a4000 0x4000>;
+- interrupts = <0 37 0x04>;
++ interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 126>;
+ status = "disabled";
+ };
+@@ -1529,7 +935,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+ reg = <0x021a8000 0x4000>;
+- interrupts = <0 38 0x04>;
++ interrupts = <0 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 127>;
+ status = "disabled";
+ };
+@@ -1538,6 +944,11 @@
+ reg = <0x021ac000 0x4000>;
+ };
+
++ mmdc0-1@021b0000 { /* MMDC0-1 */
++ compatible = "fsl,imx6q-mmdc-combine";
++ reg = <0x021b0000 0x8000>;
++ };
++
+ mmdc0: mmdc@021b0000 { /* MMDC0 */
+ compatible = "fsl,imx6q-mmdc";
+ reg = <0x021b0000 0x4000>;
+@@ -1550,23 +961,29 @@
+ weim: weim@021b8000 {
+ compatible = "fsl,imx6q-weim";
+ reg = <0x021b8000 0x4000>;
+- interrupts = <0 14 0x04>;
++ interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 196>;
+ };
+
+- ocotp: ocotp@021bc000 {
+- compatible = "fsl,imx6q-ocotp", "syscon";
++ ocotp: ocotp-ctrl@021bc000 {
++ compatible = "syscon";
+ reg = <0x021bc000 0x4000>;
+ };
+
++ ocotp-fuse@021bc000 {
++ compatible = "fsl,imx6q-ocotp";
++ reg = <0x021bc000 0x4000>;
++ clocks = <&clks 128>;
++ };
++
+ tzasc@021d0000 { /* TZASC1 */
+ reg = <0x021d0000 0x4000>;
+- interrupts = <0 108 0x04>;
++ interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ tzasc@021d4000 { /* TZASC2 */
+ reg = <0x021d4000 0x4000>;
+- interrupts = <0 109 0x04>;
++ interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ audmux: audmux@021d8000 {
+@@ -1575,23 +992,32 @@
+ status = "disabled";
+ };
+
+- mipi@021dc000 { /* MIPI-CSI */
++ mipi_csi: mipi_csi@021dc000 {
++ compatible = "fsl,imx6q-mipi-csi2";
+ reg = <0x021dc000 0x4000>;
+- };
+-
+- mipi@021e0000 { /* MIPI-DSI */
+- reg = <0x021e0000 0x4000>;
++ interrupts = <0 100 0x04>, <0 101 0x04>;
++ clocks = <&clks 138>, <&clks 53>, <&clks 204>;
++ /* Note: clks 138 is hsi_tx, however, the dphy_c
++ * hsi_tx and pll_refclk use the same clk gate.
++ * In current clk driver, open/close clk gate do
++ * use hsi_tx for a temporary debug purpose.
++ */
++ clock-names = "dphy_clk", "pixel_clk", "cfg_clk";
++ status = "disabled";
+ };
+
+ vdoa@021e4000 {
++ compatible = "fsl,imx6q-vdoa";
+ reg = <0x021e4000 0x4000>;
+- interrupts = <0 18 0x04>;
++ interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 202>;
++ iram = <&ocram>;
+ };
+
+ uart2: serial@021e8000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x021e8000 0x4000>;
+- interrupts = <0 27 0x04>;
++ interrupts = <0 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma 27 4 0>, <&sdma 28 4 0>;
+@@ -1602,7 +1028,7 @@
+ uart3: serial@021ec000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x021ec000 0x4000>;
+- interrupts = <0 28 0x04>;
++ interrupts = <0 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma 29 4 0>, <&sdma 30 4 0>;
+@@ -1613,7 +1039,7 @@
+ uart4: serial@021f0000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x021f0000 0x4000>;
+- interrupts = <0 29 0x04>;
++ interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma 31 4 0>, <&sdma 32 4 0>;
+@@ -1624,7 +1050,7 @@
+ uart5: serial@021f4000 {
+ compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x021f4000 0x4000>;
+- interrupts = <0 30 0x04>;
++ interrupts = <0 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 160>, <&clks 161>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma 33 4 0>, <&sdma 34 4 0>;
+@@ -1634,13 +1060,18 @@
+ };
+
+ ipu1: ipu@02400000 {
+- #crtc-cells = <1>;
+ compatible = "fsl,imx6q-ipu";
+ reg = <0x02400000 0x400000>;
+- interrupts = <0 6 0x4 0 5 0x4>;
+- clocks = <&clks 130>, <&clks 131>, <&clks 132>;
+- clock-names = "bus", "di0", "di1";
++ interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>,
++ <0 5 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 130>, <&clks 131>, <&clks 132>,
++ <&clks 39>, <&clks 40>,
++ <&clks 135>, <&clks 136>;
++ clock-names = "bus", "di0", "di1",
++ "di0_sel", "di1_sel",
++ "ldb_di0", "ldb_di1";
+ resets = <&src 2>;
++ bypass_reset = <0>;
+ };
+ };
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi 2015-05-01 14:57:57.447427001 -0500
+@@ -0,0 +1,374 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/ {
++ /* these are used by bootloader for disabling nodes */
++ aliases {
++ can0 = &can1;
++ ethernet0 = &fec;
++ led0 = &led0;
++ led1 = &led1;
++ nand = &gpmi;
++ usb0 = &usbh1;
++ usb1 = &usbotg;
++ };
++
++ chosen {
++ bootargs = "console=ttymxc1,115200";
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ led0: user1 {
++ label = "user1";
++ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
++ default-state = "on";
++ linux,default-trigger = "heartbeat";
++ };
++
++ led1: user2 {
++ label = "user2";
++ gpios = <&gpio4 7 0>; /* 103 -> MX6_PANLEDR */
++ default-state = "off";
++ };
++ };
++
++ memory {
++ reg = <0x10000000 0x20000000>;
++ };
++
++ pps {
++ compatible = "pps-gpio";
++ gpios = <&gpio1 26 0>;
++ status = "okay";
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_3p3v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_5p0v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "5P0V";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 30 0>;
++ status = "okay";
++};
++
++&gpmi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ eeprom1: eeprom@50 {
++ compatible = "atmel,24c02";
++ reg = <0x50>;
++ pagesize = <16>;
++ };
++
++ eeprom2: eeprom@51 {
++ compatible = "atmel,24c02";
++ reg = <0x51>;
++ pagesize = <16>;
++ };
++
++ eeprom3: eeprom@52 {
++ compatible = "atmel,24c02";
++ reg = <0x52>;
++ pagesize = <16>;
++ };
++
++ eeprom4: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
++
++ gpio: pca9555@23 {
++ compatible = "nxp,pca9555";
++ reg = <0x23>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ hwmon: gsc@29 {
++ compatible = "gw,gsp";
++ reg = <0x29>;
++ };
++
++ rtc: ds1672@68 {
++ compatible = "dallas,ds1672";
++ reg = <0x68>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ pmic: ltc3676@3c {
++ compatible = "ltc,ltc3676";
++ reg = <0x3c>;
++
++ regulators {
++ sw1_reg: ltc3676__sw1 {
++ regulator-min-microvolt = <1175000>;
++ regulator-max-microvolt = <1175000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw2_reg: ltc3676__sw2 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3_reg: ltc3676__sw3 {
++ regulator-min-microvolt = <1175000>;
++ regulator-max-microvolt = <1175000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: ltc3676__sw4 {
++ regulator-min-microvolt = <1500000>;
++ regulator-max-microvolt = <1500000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ ldo2_reg: ltc3676__ldo2 {
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ ldo4_reg: ltc3676__ldo4 {
++ regulator-min-microvolt = <3000000>;
++ regulator-max-microvolt = <3000000>;
++ };
++ };
++ };
++};
++
++&i2c3 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ status = "okay";
++
++ videoin: adv7180@20 {
++ compatible = "adi,adv7180";
++ reg = <0x20>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6qdl-gw51xx {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x80000000 /* MEZZ_DIO0 */
++ MX6QDL_PAD_EIM_A20__GPIO2_IO18 0x80000000 /* MEZZ_DIO1 */
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
++ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* GPS_PPS */
++ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* PHY Reset */
++ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x80000000 /* PCIE_RST# */
++ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
++ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x80000000 /* user2 led */
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart3: uart3grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart5: uart5grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++ };
++};
++
++&pcie {
++ reset-gpio = <&gpio1 0 0>;
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&uart3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart3>;
++ status = "okay";
++};
++
++&uart5 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart5>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usbh1 {
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi 2015-05-01 14:57:57.447427001 -0500
+@@ -0,0 +1,527 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/ {
++ /* these are used by bootloader for disabling nodes */
++ aliases {
++ ethernet0 = &fec;
++ led0 = &led0;
++ led1 = &led1;
++ led2 = &led2;
++ nand = &gpmi;
++ ssi0 = &ssi1;
++ usb0 = &usbh1;
++ usb1 = &usbotg;
++ usdhc2 = &usdhc3;
++ };
++
++ chosen {
++ bootargs = "console=ttymxc1,115200";
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm4 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ led0: user1 {
++ label = "user1";
++ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
++ default-state = "on";
++ linux,default-trigger = "heartbeat";
++ };
++
++ led1: user2 {
++ label = "user2";
++ gpios = <&gpio4 7 0>; /* 103 -> MX6_PANLEDR */
++ default-state = "off";
++ };
++
++ led2: user3 {
++ label = "user3";
++ gpios = <&gpio4 15 1>; /* 111 - MX6_LOCLED# */
++ default-state = "off";
++ };
++ };
++
++ memory {
++ reg = <0x10000000 0x20000000>;
++ };
++
++ pps {
++ compatible = "pps-gpio";
++ gpios = <&gpio1 26 0>;
++ status = "okay";
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_1p0v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "1P0V";
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <1000000>;
++ regulator-always-on;
++ };
++
++ /* remove this fixed regulator once ltc3676__sw2 driver available */
++ reg_1p8v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "1P8V";
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_5p0v: regulator@3 {
++ compatible = "regulator-fixed";
++ reg = <3>;
++ regulator-name = "5P0V";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@4 {
++ compatible = "regulator-fixed";
++ reg = <4>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx6q-sabrelite-sgtl5000",
++ "fsl,imx-audio-sgtl5000";
++ model = "imx6q-sabrelite-sgtl5000";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <1>;
++ mux-ext-port = <4>;
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>;
++ status = "okay";
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 30 0>;
++ status = "okay";
++};
++
++&gpmi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ eeprom1: eeprom@50 {
++ compatible = "atmel,24c02";
++ reg = <0x50>;
++ pagesize = <16>;
++ };
++
++ eeprom2: eeprom@51 {
++ compatible = "atmel,24c02";
++ reg = <0x51>;
++ pagesize = <16>;
++ };
++
++ eeprom3: eeprom@52 {
++ compatible = "atmel,24c02";
++ reg = <0x52>;
++ pagesize = <16>;
++ };
++
++ eeprom4: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
++
++ gpio: pca9555@23 {
++ compatible = "nxp,pca9555";
++ reg = <0x23>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ hwmon: gsc@29 {
++ compatible = "gw,gsp";
++ reg = <0x29>;
++ };
++
++ rtc: ds1672@68 {
++ compatible = "dallas,ds1672";
++ reg = <0x68>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ pciswitch: pex8609@3f {
++ compatible = "plx,pex8609";
++ reg = <0x3f>;
++ };
++
++ pmic: ltc3676@3c {
++ compatible = "ltc,ltc3676";
++ reg = <0x3c>;
++
++ regulators {
++ sw1_reg: ltc3676__sw1 {
++ regulator-min-microvolt = <1175000>;
++ regulator-max-microvolt = <1175000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw2_reg: ltc3676__sw2 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3_reg: ltc3676__sw3 {
++ regulator-min-microvolt = <1175000>;
++ regulator-max-microvolt = <1175000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: ltc3676__sw4 {
++ regulator-min-microvolt = <1500000>;
++ regulator-max-microvolt = <1500000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ ldo2_reg: ltc3676__ldo2 {
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ ldo3_reg: ltc3676__ldo3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ ldo4_reg: ltc3676__ldo4 {
++ regulator-min-microvolt = <3000000>;
++ regulator-max-microvolt = <3000000>;
++ };
++ };
++ };
++};
++
++&i2c3 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ status = "okay";
++
++ accelerometer: fxos8700@1e {
++ compatible = "fsl,fxos8700";
++ reg = <0x13>;
++ };
++
++ codec: sgtl5000@0a {
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ clocks = <&clks 169>;
++ VDDA-supply = <&reg_1p8v>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++
++ touchscreen: egalax_ts@04 {
++ compatible = "eeti,egalax_ts";
++ reg = <0x04>;
++ interrupt-parent = <&gpio7>;
++ interrupts = <12 2>; /* gpio7_12 active low */
++ wakeup-gpios = <&gpio7 12 0>;
++ };
++
++ videoin: adv7180@20 {
++ compatible = "adi,adv7180";
++ reg = <0x20>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6qdl-gw52xx {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x80000000 /* MEZZ_DIO0 */
++ MX6QDL_PAD_EIM_A20__GPIO2_IO18 0x80000000 /* MEZZ_DIO1 */
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
++ MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x80000000 /* VIDDEC_PDN# */
++ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* PHY Reset */
++ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* PCIE_RST# */
++ MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x80000000 /* GPS_PWDN */
++ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* GPS_PPS */
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000130b0 /* AUD4_MCK */
++ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* USB_SEL_PCI */
++ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* TOUCH_IRQ# */
++ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
++ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x80000000 /* user2 led */
++ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000 /* user3 led */
++ MX6QDL_PAD_SD2_CMD__GPIO1_IO11 0x80000000 /* LVDS_TCH# */
++ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x80000000 /* SD3_CD# */
++ MX6QDL_PAD_SD4_DAT3__GPIO2_IO11 0x80000000 /* UART2_EN# */
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x130b0
++ MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x130b0
++ MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x110b0
++ MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pwm4: pwm4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart5: uart5grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@0 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
++ };
++};
++
++&pcie {
++ reset-gpio = <&gpio1 29 0>;
++ status = "okay";
++};
++
++&pwm4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm4>;
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&uart5 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart5>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usbh1 {
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ cd-gpios = <&gpio7 0 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi 2015-05-01 14:57:57.447427001 -0500
+@@ -0,0 +1,572 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/ {
++ /* these are used by bootloader for disabling nodes */
++ aliases {
++ can0 = &can1;
++ ethernet0 = &fec;
++ ethernet1 = &eth1;
++ led0 = &led0;
++ led1 = &led1;
++ led2 = &led2;
++ nand = &gpmi;
++ sky2 = &eth1;
++ ssi0 = &ssi1;
++ usb0 = &usbh1;
++ usb1 = &usbotg;
++ usdhc2 = &usdhc3;
++ };
++
++ chosen {
++ bootargs = "console=ttymxc1,115200";
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm4 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ led0: user1 {
++ label = "user1";
++ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
++ default-state = "on";
++ linux,default-trigger = "heartbeat";
++ };
++
++ led1: user2 {
++ label = "user2";
++ gpios = <&gpio4 7 0>; /* 103 -> MX6_PANLEDR */
++ default-state = "off";
++ };
++
++ led2: user3 {
++ label = "user3";
++ gpios = <&gpio4 15 1>; /* 111 -> MX6_LOCLED# */
++ default-state = "off";
++ };
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ pps {
++ compatible = "pps-gpio";
++ gpios = <&gpio1 26 0>;
++ status = "okay";
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_1p0v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "1P0V";
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <1000000>;
++ regulator-always-on;
++ };
++
++ /* remove when pmic 1p8 regulator available */
++ reg_1p8v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "1P8V";
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_h1_vbus: regulator@3 {
++ compatible = "regulator-fixed";
++ reg = <3>;
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@4 {
++ compatible = "regulator-fixed";
++ reg = <4>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx6q-sabrelite-sgtl5000",
++ "fsl,imx-audio-sgtl5000";
++ model = "imx6q-sabrelite-sgtl5000";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <1>;
++ mux-ext-port = <4>;
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>;
++ status = "okay";
++};
++
++&can1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_flexcan1>;
++ status = "okay";
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 30 0>;
++ status = "okay";
++};
++
++&gpmi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ eeprom1: eeprom@50 {
++ compatible = "atmel,24c02";
++ reg = <0x50>;
++ pagesize = <16>;
++ };
++
++ eeprom2: eeprom@51 {
++ compatible = "atmel,24c02";
++ reg = <0x51>;
++ pagesize = <16>;
++ };
++
++ eeprom3: eeprom@52 {
++ compatible = "atmel,24c02";
++ reg = <0x52>;
++ pagesize = <16>;
++ };
++
++ eeprom4: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
++
++ gpio: pca9555@23 {
++ compatible = "nxp,pca9555";
++ reg = <0x23>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ hwmon: gsc@29 {
++ compatible = "gw,gsp";
++ reg = <0x29>;
++ };
++
++ rtc: ds1672@68 {
++ compatible = "dallas,ds1672";
++ reg = <0x68>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ pciclkgen: si53156@6b {
++ compatible = "sil,si53156";
++ reg = <0x6b>;
++ };
++
++ pciswitch: pex8606@3f {
++ compatible = "plx,pex8606";
++ reg = <0x3f>;
++ };
++
++ pmic: ltc3676@3c {
++ compatible = "ltc,ltc3676";
++ reg = <0x3c>;
++
++ regulators {
++ /* VDD_SOC */
++ sw1_reg: ltc3676__sw1 {
++ regulator-min-microvolt = <1175000>;
++ regulator-max-microvolt = <1175000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ /* VDD_1P8 */
++ sw2_reg: ltc3676__sw2 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ /* VDD_ARM */
++ sw3_reg: ltc3676__sw3 {
++ regulator-min-microvolt = <1175000>;
++ regulator-max-microvolt = <1175000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ /* VDD_DDR */
++ sw4_reg: ltc3676__sw4 {
++ regulator-min-microvolt = <1500000>;
++ regulator-max-microvolt = <1500000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ /* VDD_2P5 */
++ ldo2_reg: ltc3676__ldo2 {
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ /* VDD_1P8 */
++ ldo3_reg: ltc3676__ldo3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ /* VDD_HIGH */
++ ldo4_reg: ltc3676__ldo4 {
++ regulator-min-microvolt = <3000000>;
++ regulator-max-microvolt = <3000000>;
++ };
++ };
++ };
++};
++
++&i2c3 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ status = "okay";
++
++ accelerometer: fxos8700@1e {
++ compatible = "fsl,fxos8700";
++ reg = <0x1e>;
++ };
++
++ codec: sgtl5000@0a {
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ clocks = <&clks 201>;
++ VDDA-supply = <&reg_1p8v>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++
++ hdmiin: adv7611@4c {
++ compatible = "adi,adv7611";
++ reg = <0x4c>;
++ };
++
++ touchscreen: egalax_ts@04 {
++ compatible = "eeti,egalax_ts";
++ reg = <0x04>;
++ interrupt-parent = <&gpio1>;
++ interrupts = <11 2>; /* gpio1_11 active low */
++ wakeup-gpios = <&gpio1 11 0>;
++ };
++
++ videoout: adv7393@2a {
++ compatible = "adi,adv7393";
++ reg = <0x2a>;
++ };
++
++ videoin: adv7180@20 {
++ compatible = "adi,adv7180";
++ reg = <0x20>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6qdl-gw53xx {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x80000000 /* PCIE6EXP_DIO0 */
++ MX6QDL_PAD_EIM_A20__GPIO2_IO18 0x80000000 /* PCIE6EXP_DIO1 */
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
++ MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x80000000 /* GPS_SHDN */
++ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* GPS_PPS */
++ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x80000000 /* PCIE IRQ */
++ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* PCIE RST */
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000130b0 /* AUD4_MCK */
++ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* CAN_STBY */
++ MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x80000000 /* PMIC_IRQ# */
++ MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x80000000 /* HUB_RST# */
++ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* PCIE_WDIS# */
++ MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x80000000 /* ACCEL_IRQ# */
++ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
++ MX6QDL_PAD_KEY_COL4__GPIO4_IO14 0x80000000 /* USBOTG_OC# */
++ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x80000000 /* user2 led */
++ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000 /* user3 led */
++ MX6QDL_PAD_SD2_CMD__GPIO1_IO11 0x80000000 /* TOUCH_IRQ# */
++ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x80000000 /* SD3_DET# */
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x130b0
++ MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x130b0
++ MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x110b0
++ MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_flexcan1: flexcan1grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x80000000
++ MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x80000000
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pwm4: pwm4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart5: uart5grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@1 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
++ };
++};
++
++&pcie {
++ reset-gpio = <&gpio1 29 0>;
++ status = "okay";
++
++ eth1: sky2@8 { /* MAC/PHY on bus 8 */
++ compatible = "marvell,sky2";
++ };
++};
++
++&pwm4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm4>;
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&uart5 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart5>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usbh1 {
++ vbus-supply = <&reg_usb_h1_vbus>;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ cd-gpios = <&gpio7 0 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi 2015-05-01 14:57:57.447427001 -0500
+@@ -0,0 +1,599 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/ {
++ /* these are used by bootloader for disabling nodes */
++ aliases {
++ can0 = &can1;
++ ethernet0 = &fec;
++ ethernet1 = &eth1;
++ led0 = &led0;
++ led1 = &led1;
++ led2 = &led2;
++ nand = &gpmi;
++ sky2 = &eth1;
++ ssi0 = &ssi1;
++ usb0 = &usbh1;
++ usb1 = &usbotg;
++ usdhc2 = &usdhc3;
++ };
++
++ chosen {
++ bootargs = "console=ttymxc1,115200";
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm4 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ led0: user1 {
++ label = "user1";
++ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
++ default-state = "on";
++ linux,default-trigger = "heartbeat";
++ };
++
++ led1: user2 {
++ label = "user2";
++ gpios = <&gpio4 7 0>; /* 103 -> MX6_PANLEDR */
++ default-state = "off";
++ };
++
++ led2: user3 {
++ label = "user3";
++ gpios = <&gpio4 15 1>; /* 111 -> MX6_LOCLED# */
++ default-state = "off";
++ };
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ pps {
++ compatible = "pps-gpio";
++ gpios = <&gpio1 26 0>;
++ status = "okay";
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_1p0v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "1P0V";
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <1000000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_h1_vbus: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@3 {
++ compatible = "regulator-fixed";
++ reg = <3>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx6q-sabrelite-sgtl5000",
++ "fsl,imx-audio-sgtl5000";
++ model = "imx6q-sabrelite-sgtl5000";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <1>;
++ mux-ext-port = <4>;
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>; /* AUD4<->sgtl5000 */
++ status = "okay";
++};
++
++&can1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_flexcan1>;
++ status = "okay";
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 30 0>;
++ status = "okay";
++};
++
++&gpmi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ eeprom1: eeprom@50 {
++ compatible = "atmel,24c02";
++ reg = <0x50>;
++ pagesize = <16>;
++ };
++
++ eeprom2: eeprom@51 {
++ compatible = "atmel,24c02";
++ reg = <0x51>;
++ pagesize = <16>;
++ };
++
++ eeprom3: eeprom@52 {
++ compatible = "atmel,24c02";
++ reg = <0x52>;
++ pagesize = <16>;
++ };
++
++ eeprom4: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
++
++ gpio: pca9555@23 {
++ compatible = "nxp,pca9555";
++ reg = <0x23>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ hwmon: gsc@29 {
++ compatible = "gw,gsp";
++ reg = <0x29>;
++ };
++
++ rtc: ds1672@68 {
++ compatible = "dallas,ds1672";
++ reg = <0x68>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3950000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++
++ pciswitch: pex8609@3f {
++ compatible = "plx,pex8609";
++ reg = <0x3f>;
++ };
++
++ pciclkgen: si52147@6b {
++ compatible = "sil,si52147";
++ reg = <0x6b>;
++ };
++};
++
++&i2c3 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ status = "okay";
++
++ accelerometer: fxos8700@1e {
++ compatible = "fsl,fxos8700";
++ reg = <0x1e>;
++ };
++
++ codec: sgtl5000@0a {
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ clocks = <&clks 201>;
++ VDDA-supply = <&sw4_reg>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++
++ hdmiin: adv7611@4c {
++ compatible = "adi,adv7611";
++ reg = <0x4c>;
++ };
++
++ touchscreen: egalax_ts@04 {
++ compatible = "eeti,egalax_ts";
++ reg = <0x04>;
++ interrupt-parent = <&gpio7>;
++ interrupts = <12 2>; /* gpio7_12 active low */
++ wakeup-gpios = <&gpio7 12 0>;
++ };
++
++ videoout: adv7393@2a {
++ compatible = "adi,adv7393";
++ reg = <0x2a>;
++ };
++
++ videoin: adv7180@20 {
++ compatible = "adi,adv7180";
++ reg = <0x20>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6qdl-gw54xx {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
++ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x80000000 /* SPINOR_CS0# */
++ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x80000000 /* GPS_PPS */
++ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x80000000 /* PCIE IRQ */
++ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* PCIE RST */
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000130b0 /* AUD4_MCK */
++ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* CAN_STBY */
++ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* TOUCH_IRQ# */
++ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
++ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x80000000 /* user2 led */
++ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000 /* user3 led */
++ MX6QDL_PAD_SD1_DAT0__GPIO1_IO16 0x80000000 /* USBHUB_RST# */
++ MX6QDL_PAD_SD1_DAT3__GPIO1_IO21 0x80000000 /* MIPI_DIO */
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x130b0
++ MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x130b0
++ MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x110b0
++ MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_flexcan1: flexcan1grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x80000000
++ MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x80000000
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pwm4: pwm4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart5: uart5grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@1 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
++ };
++};
++
++&pcie {
++ reset-gpio = <&gpio1 29 0>;
++ status = "okay";
++
++ eth1: sky2@8 { /* MAC/PHY on bus 8 */
++ compatible = "marvell,sky2";
++ };
++};
++
++&pwm4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm4>;
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&ssi2 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&uart5 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart5>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usbh1 {
++ vbus-supply = <&reg_usb_h1_vbus>;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ cd-gpios = <&gpio7 0 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi 2015-05-01 14:57:57.447427001 -0500
+@@ -0,0 +1,367 @@
++/*
++ * Copyright (C) 2013,2014 Russell King
++ */
++#include "imx6qdl-microsom.dtsi"
++#include "imx6qdl-microsom-ar8035.dtsi"
++
++/ {
++ chosen {
++ bootargs = "quiet console=ttymxc0,115200 root=/dev/mmcblk0p2 rw";
++ };
++
++ aliases {
++ mxcfb0 = &mxcfb1;
++ };
++
++ ir_recv: ir-receiver {
++ compatible = "gpio-ir-receiver";
++ gpios = <&gpio3 5 1>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_gpio3_5>;
++ linux,rc-map-name = "rc-rc6-mce";
++ };
++
++ regulators {
++ compatible = "simple-bus";
++
++ reg_3p3v: 3p3v {
++ compatible = "regulator-fixed";
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usbh1_vbus: usb-h1-vbus {
++ compatible = "regulator-fixed";
++ enable-active-high;
++ gpio = <&gpio1 0 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_usbh1_vbus>;
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ };
++
++ reg_usbotg_vbus: usb-otg-vbus {
++ compatible = "regulator-fixed";
++ enable-active-high;
++ gpio = <&gpio3 22 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_usbotg_vbus>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ };
++ };
++
++ sound-sgtl5000 {
++ audio-codec = <&sgtl5000>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ compatible = "fsl,imx-audio-sgtl5000";
++ model = "On-board Codec";
++ mux-ext-port = <5>;
++ mux-int-port = <1>;
++ ssi-controller = <&ssi1>;
++ };
++
++ sound-spdif {
++ compatible = "fsl,imx-audio-spdif";
++ model = "imx-spdif";
++ spdif-controller = <&spdif>;
++ spdif-out;
++ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <32>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "okay";
++ };
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <0>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++&hdmi_cec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_hdmi>;
++ status = "okay";
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_i2c2>;
++ status = "okay";
++
++ ddc: imx6_hdmi_i2c@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
++};
++
++&audmux {
++ status = "okay";
++};
++
++&can1 {
++ pinctrl-names = "default";
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_i2c1>;
++ status = "okay";
++
++ /* Pro model */
++ rtc: pcf8523@68 {
++ compatible = "nxp,pcf8523";
++ reg = <0x68>;
++ };
++
++ /* Pro model */
++ sgtl5000: sgtl5000@0a {
++ clocks = <&clks 201>;
++ compatible = "fsl,sgtl5000";
++ pinctrl-0 = <&pinctrl_hummingboard_sgtl5000>;
++ pinctrl-names = "default";
++ reg = <0x0a>;
++ VDDA-supply = <&reg_3p3v>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++ hummingboard {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ /*
++ * 26 pin header GPIO description. The pins
++ * numbering as following -
++ * GPIO number | GPIO (bank,num) | PIN number
++ * ------------+-----------------+------------
++ * gpio1 | (1,1) | IO7
++ * gpio73 | (3,9) | IO11
++ * gpio72 | (3,8) | IO12
++ * gpio71 | (3,7) | IO13
++ * gpio70 | (3,6) | IO15
++ * gpio194 | (7,2) | IO16
++ * gpio195 | (7,3) | IO18
++ * gpio67 | (3,3) | IO22
++ *
++ * Notice the gpioX and GPIO (Y,Z) mapping forumla :
++ * X = (Y-1) * 32 + Z
++ */
++ MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x400130b1
++ MX6QDL_PAD_EIM_DA9__GPIO3_IO09 0x400130b1
++ MX6QDL_PAD_EIM_DA8__GPIO3_IO08 0x400130b1
++ MX6QDL_PAD_EIM_DA7__GPIO3_IO07 0x400130b1
++ MX6QDL_PAD_EIM_DA6__GPIO3_IO06 0x400130b1
++ MX6QDL_PAD_SD3_CMD__GPIO7_IO02 0x400130b1
++ MX6QDL_PAD_SD3_CLK__GPIO7_IO03 0x400130b1
++ MX6QDL_PAD_EIM_DA3__GPIO3_IO03 0x400130b1
++ >;
++ };
++
++ pinctrl_hummingboard_gpio3_5: hummingboard-gpio3_5 {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_DA5__GPIO3_IO05 0x80000000
++ >;
++ };
++
++ pinctrl_hummingboard_hdmi: hummingboard-hdmi {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
++ >;
++ };
++
++ pinctrl_hummingboard_i2c1: hummingboard-i2c1 {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_hummingboard_i2c2: hummingboard-i2c2 {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_hummingboard_sgtl5000: hummingboard-sgtl5000 {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT19__AUD5_RXD 0x130b0 /*brk*/
++ MX6QDL_PAD_KEY_COL0__AUD5_TXC 0x130b0 /*ok*/
++ MX6QDL_PAD_KEY_ROW0__AUD5_TXD 0x110b0 /*brk*/
++ MX6QDL_PAD_KEY_COL1__AUD5_TXFS 0x130b0 /*ok*/
++ MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0
++ >;
++ };
++
++ pinctrl_hummingboard_spdif: hummingboard-spdif {
++ fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
++ };
++
++ pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus {
++ fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
++ };
++
++ pinctrl_hummingboard_usbotg_id: hummingboard-usbotg-id {
++ /*
++ * Similar to pinctrl_usbotg_2, but we want it
++ * pulled down for a fixed host connection.
++ */
++ fsl,pins = <MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x13059>;
++ };
++
++ pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus {
++ fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
++ };
++
++ pinctrl_hummingboard_usdhc2_aux: hummingboard-usdhc2-aux {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1f071
++ >;
++ };
++
++ pinctrl_hummingboard_usdhc2: hummingboard-usdhc2 {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
++ >;
++ };
++
++ pinctrl_hummingboard_pcie_reset: hummingboard-pcie-reset {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x80000000
++ >;
++ };
++
++ pinctrl_pwm1: pwm1grp {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b1
++ >;
++ };
++
++ };
++};
++
++&spdif {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_spdif>;
++ clocks = <&clks 197>, <&clks 0>,
++ <&clks 197>, <&clks 0>,
++ <&clks 0>, <&clks 0>,
++ <&clks 0>, <&clks 0>,
++ <&clks 0>;
++ clock-names = "core", "rxtx0",
++ "rxtx1", "rxtx2",
++ "rxtx3", "rxtx4",
++ "rxtx5", "rxtx6",
++ "rxtx7";
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&usbh1 {
++ disable-over-current;
++ vbus-supply = <&reg_usbh1_vbus>;
++ status = "okay";
++};
++
++&usbotg {
++ disable-over-current;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_usbotg_id>;
++ vbus-supply = <&reg_usbotg_vbus>;
++ status = "okay";
++};
++
++&usdhc2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <
++ &pinctrl_hummingboard_usdhc2_aux
++ &pinctrl_hummingboard_usdhc2
++ >;
++ vmmc-supply = <&reg_3p3v>;
++ cd-gpios = <&gpio1 4 0>;
++ status = "okay";
++};
++
++&gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++};
++
++&pcie {
++ pinctrl-names = "default";
++ pinctrl-0 = <
++ &pinctrl_hummingboard_pcie_reset
++ >;
++ reset-gpio = <&gpio3 4 0>;
++ status = "okay";
++ no-msi;
++};
++
++&pwm1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm1>;
++ status = "okay";
++};
++
++&pwm2 {
++ pinctrl-names = "default";
++ status = "okay";
++};
++
++&pwm3 {
++ status = "disabled";
++};
++
++&pwm4 {
++ status = "disabled";
++};
++
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi 2015-05-01 14:57:57.447427001 -0500
+@@ -17,7 +17,7 @@
+ enet {
+ pinctrl_microsom_enet_ar8035: microsom-enet-ar8035 {
+ fsl,pins = <
+- MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b8b0
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ /* AR8035 reset */
+ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x130b0
+@@ -26,25 +26,25 @@
+ /* GPIO16 -> AR8035 25MHz */
+ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0xc0000000
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x80000000
+- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
+- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
+- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
+- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
+- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
+ /* AR8035 CLK_25M --> ENET_REF_CLK (V22) */
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x0a0b1
+ /* AR8035 pin strapping: IO voltage: pull up */
+- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ /* AR8035 pin strapping: PHYADDR#0: pull down */
+- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x130b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x13030
+ /* AR8035 pin strapping: PHYADDR#1: pull down */
+- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x130b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x13030
+ /* AR8035 pin strapping: MODE#1: pull up */
+- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ /* AR8035 pin strapping: MODE#3: pull up */
+- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ /* AR8035 pin strapping: MODE#0: pull down */
+- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x130b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x13030
+
+ /*
+ * As the RMII pins are also connected to RGMII
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-microsom.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-microsom.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2015-05-01 14:57:57.447427001 -0500
+@@ -1,9 +1,69 @@
+ /*
+ * Copyright (C) 2013,2014 Russell King
+ */
++#include <dt-bindings/gpio/gpio.h>
++/ {
++ regulators {
++ compatible = "simple-bus";
++
++ reg_brcm_osc: brcm-osc-reg {
++ compatible = "regulator-fixed";
++ enable-active-high;
++ gpio = <&gpio5 5 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_microsom_brcm_osc_reg>;
++ regulator-name = "brcm_osc_reg";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ regulator-boot-on;
++ };
++
++ reg_brcm: brcm-reg {
++ compatible = "regulator-fixed";
++ enable-active-high;
++ gpio = <&gpio3 19 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_microsom_brcm_reg>;
++ regulator-name = "brcm_reg";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ startup-delay-us = <200000>;
++ };
++ };
++};
+
+ &iomuxc {
+ microsom {
++ pinctrl_microsom_brcm_bt: microsom-brcm-bt {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT14__GPIO6_IO00 0x40013070
++ MX6QDL_PAD_CSI0_DAT15__GPIO6_IO01 0x40013070
++ MX6QDL_PAD_CSI0_DAT18__GPIO6_IO04 0x40013070
++ >;
++ };
++
++ pinctrl_microsom_brcm_osc_reg: microsom-brcm-osc-reg {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT11__GPIO5_IO05 0x40013070
++ >;
++ };
++
++ pinctrl_microsom_brcm_reg: microsom-brcm-reg {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x40013070
++ >;
++ };
++
++ pinctrl_microsom_brcm_wifi: microsom-brcm-wifi {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_8__XTALOSC_REF_CLK_32K 0x1b0b0
++ MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x40013070
++ MX6QDL_PAD_CSI0_DAT8__GPIO5_IO26 0x40013070
++ MX6QDL_PAD_CSI0_DAT9__GPIO5_IO27 0x40013070
++ >;
++ };
++
+ pinctrl_microsom_uart1: microsom-uart1 {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+@@ -11,12 +71,24 @@
+ >;
+ };
+
+- pinctrl_microsom_usbotg: microsom-usbotg {
+- /*
+- * Similar to pinctrl_usbotg_2, but we want it
+- * pulled down for a fixed host connection.
+- */
+- fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
++ pinctrl_microsom_uart4_1: microsom-uart4 {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT12__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT13__UART4_RX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT16__UART4_RTS_B 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT17__UART4_CTS_B 0x1b0b1
++ >;
++ };
++
++ pinctrl_microsom_usdhc1: microsom-usdhc1 {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
++ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
++ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
++ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
++ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
++ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
++ >;
+ };
+ };
+ };
+@@ -27,7 +99,23 @@
+ status = "okay";
+ };
+
+-&usbotg {
++/* UART4 - Connected to optional BRCM Wifi/BT/FM */
++&uart4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_microsom_brcm_bt &pinctrl_microsom_uart4_1>;
++ fsl,uart-has-rtscts;
++ status = "okay";
++};
++
++/* USDHC1 - Connected to optional BRCM Wifi/BT/FM */
++&usdhc1 {
++ card-external-vcc-supply = <&reg_brcm>;
++ card-reset-gpios = <&gpio5 26 GPIO_ACTIVE_LOW>, <&gpio6 0 GPIO_ACTIVE_LOW>;
++ keep-power-in-suspend;
++ non-removable;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_microsom_usbotg>;
++ pinctrl-0 = <&pinctrl_microsom_brcm_wifi &pinctrl_microsom_usdhc1>;
++ vmmc-supply = <&reg_brcm>;
++ status = "okay";
+ };
++
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi 2015-05-01 14:57:57.447427001 -0500
+@@ -0,0 +1,426 @@
++/*
++ * Copyright 2013 Boundary Devices, Inc.
++ * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
++
++/ {
++ chosen {
++ stdout-path = &uart2;
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_2p5v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "2P5V";
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ gpio-keys {
++ compatible = "gpio-keys";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpio_keys>;
++
++ power {
++ label = "Power Button";
++ gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_POWER>;
++ gpio-key,wakeup;
++ };
++
++ menu {
++ label = "Menu";
++ gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_MENU>;
++ };
++
++ home {
++ label = "Home";
++ gpios = <&gpio2 4 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_HOME>;
++ };
++
++ back {
++ label = "Back";
++ gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_BACK>;
++ };
++
++ volume-up {
++ label = "Volume Up";
++ gpios = <&gpio7 13 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_VOLUMEUP>;
++ };
++
++ volume-down {
++ label = "Volume Down";
++ gpios = <&gpio4 5 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_VOLUMEDOWN>;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx6q-nitrogen6x-sgtl5000",
++ "fsl,imx-audio-sgtl5000";
++ model = "imx6q-nitrogen6x-sgtl5000";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <1>;
++ mux-ext-port = <3>;
++ };
++
++ backlight_lcd {
++ compatible = "pwm-backlight";
++ pwms = <&pwm1 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ power-supply = <&reg_3p3v>;
++ status = "okay";
++ };
++
++ backlight_lvds {
++ compatible = "pwm-backlight";
++ pwms = <&pwm4 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ power-supply = <&reg_3p3v>;
++ status = "okay";
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>;
++ status = "okay";
++};
++
++&ecspi1 {
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio3 19 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi1>;
++ status = "okay";
++
++ flash: m25p80@0 {
++ compatible = "sst,sst25vf016b";
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 27 0>;
++ txen-skew-ps = <0>;
++ txc-skew-ps = <3000>;
++ rxdv-skew-ps = <0>;
++ rxc-skew-ps = <3000>;
++ rxd0-skew-ps = <0>;
++ rxd1-skew-ps = <0>;
++ rxd2-skew-ps = <0>;
++ rxd3-skew-ps = <0>;
++ txd0-skew-ps = <0>;
++ txd1-skew-ps = <0>;
++ txd2-skew-ps = <0>;
++ txd3-skew-ps = <0>;
++ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ codec: sgtl5000@0a {
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ clocks = <&clks 201>;
++ VDDA-supply = <&reg_2p5v>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-nitrogen6x {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ /* SGTL5000 sys_mclk */
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x030b0
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x130b0
++ MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
++ MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x110b0
++ MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
++ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
++ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
++ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x000b1 /* CS */
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x100b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x100b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x100b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x100b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x100b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x100b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x100b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x100b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ /* Phy reset */
++ MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x000b0
++ MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
++ >;
++ };
++
++ pinctrl_gpio_keys: gpio_keysgrp {
++ fsl,pins = <
++ /* Power Button */
++ MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x1b0b0
++ /* Menu Button */
++ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x1b0b0
++ /* Home Button */
++ MX6QDL_PAD_NANDF_D4__GPIO2_IO04 0x1b0b0
++ /* Back Button */
++ MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x1b0b0
++ /* Volume Up Button */
++ MX6QDL_PAD_GPIO_18__GPIO7_IO13 0x1b0b0
++ /* Volume Down Button */
++ MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x1b0b0
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pwm1: pwm1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT3__PWM1_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_pwm3: pwm3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT1__PWM3_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_pwm4: pwm4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ MX6QDL_PAD_KEY_COL4__USB_OTG_OC 0x1b0b0
++ /* power enable, high active */
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x000b0
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0 /* CD */
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_NANDF_D6__GPIO2_IO06 0x1b0b0 /* CD */
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@0 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
++ };
++};
++
++&pcie {
++ status = "okay";
++};
++
++&pwm1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm1>;
++ status = "okay";
++};
++
++&pwm3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm3>;
++ status = "okay";
++};
++
++&pwm4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm4>;
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&usbh1 {
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ cd-gpios = <&gpio7 0 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
++
++&usdhc4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ cd-gpios = <&gpio2 6 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi 2015-05-01 14:57:57.447427001 -0500
+@@ -0,0 +1,98 @@
++/*
++ * Copyright 2013 Christian Hemp, Phytec Messtechnik GmbH
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/ {
++ chosen {
++ linux,stdout-path = &uart4;
++ };
++};
++
++&fec {
++ status = "okay";
++};
++
++&gpmi {
++ status = "okay";
++};
++
++&i2c2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ clock-frequency = <100000>;
++ status = "okay";
++
++ tlv320@18 {
++ compatible = "ti,tlv320aic3x";
++ reg = <0x18>;
++ };
++
++ stmpe@41 {
++ compatible = "st,stmpe811";
++ reg = <0x41>;
++ };
++
++ rtc@51 {
++ compatible = "nxp,rtc8564";
++ reg = <0x51>;
++ };
++
++ adc@64 {
++ compatible = "maxim,max1037";
++ reg = <0x64>;
++ };
++};
++
++&i2c3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ clock-frequency = <100000>;
++ status = "okay";
++};
++
++&uart3 {
++ status = "okay";
++};
++
++&uart4 {
++ status = "okay";
++};
++
++&usbh1 {
++ status = "okay";
++};
++
++&usbotg {
++ status = "okay";
++};
++
++&usdhc2 {
++ status = "okay";
++};
++
++&usdhc3 {
++ status = "okay";
++};
++
++&iomuxc {
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D16__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
++ >;
++ };
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi 2015-05-01 14:57:57.447427001 -0500
+@@ -0,0 +1,356 @@
++/*
++ * Copyright 2013 Christian Hemp, Phytec Messtechnik GmbH
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <dt-bindings/gpio/gpio.h>
++
++/ {
++ model = "Phytec phyFLEX-i.MX6 Ouad";
++ compatible = "phytec,imx6q-pfla02", "fsl,imx6q";
++
++ memory {
++ reg = <0x10000000 0x80000000>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_usb_otg_vbus: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio4 15 0>;
++ };
++
++ reg_usb_h1_vbus: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio1 0 0>;
++ };
++ };
++
++ gpio_leds: leds {
++ compatible = "gpio-leds";
++
++ green {
++ label = "phyflex:green";
++ gpios = <&gpio1 30 0>;
++ };
++
++ red {
++ label = "phyflex:red";
++ gpios = <&gpio2 31 0>;
++ };
++ };
++};
++
++&ecspi3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi3>;
++ status = "okay";
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio4 24 0>;
++
++ flash@0 {
++ compatible = "m25p80";
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&i2c1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ eeprom@50 {
++ compatible = "atmel,24c32";
++ reg = <0x50>;
++ };
++
++ pmic@58 {
++ compatible = "dialog,da9063";
++ reg = <0x58>;
++ interrupt-parent = <&gpio4>;
++ interrupts = <17 0x8>; /* active-low GPIO4_17 */
++
++ regulators {
++ vddcore_reg: bcore1 {
++ regulator-min-microvolt = <730000>;
++ regulator-max-microvolt = <1380000>;
++ regulator-always-on;
++ };
++
++ vddsoc_reg: bcore2 {
++ regulator-min-microvolt = <730000>;
++ regulator-max-microvolt = <1380000>;
++ regulator-always-on;
++ };
++
++ vdd_ddr3_reg: bpro {
++ regulator-min-microvolt = <1500000>;
++ regulator-max-microvolt = <1500000>;
++ regulator-always-on;
++ };
++
++ vdd_3v3_reg: bperi {
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vdd_buckmem_reg: bmem {
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vdd_eth_reg: bio {
++ regulator-min-microvolt = <1200000>;
++ regulator-max-microvolt = <1200000>;
++ regulator-always-on;
++ };
++
++ vdd_eth_io_reg: ldo4 {
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-always-on;
++ };
++
++ vdd_mx6_snvs_reg: ldo5 {
++ regulator-min-microvolt = <3000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-always-on;
++ };
++
++ vdd_3v3_pmic_io_reg: ldo6 {
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vdd_sd0_reg: ldo9 {
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vdd_sd1_reg: ldo10 {
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vdd_mx6_high_reg: ldo11 {
++ regulator-min-microvolt = <3000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-always-on;
++ };
++ };
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-phytec-pfla02 {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000
++ MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24 0x80000000 /* SPI NOR chipselect */
++ MX6QDL_PAD_DI0_PIN15__GPIO4_IO17 0x80000000 /* PMIC interrupt */
++ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* Green LED */
++ MX6QDL_PAD_EIM_EB3__GPIO2_IO31 0x80000000 /* Red LED */
++ >;
++ };
++
++ pinctrl_ecspi3: ecspi3grp {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT2__ECSPI3_MISO 0x100b1
++ MX6QDL_PAD_DISP0_DAT1__ECSPI3_MOSI 0x100b1
++ MX6QDL_PAD_DISP0_DAT0__ECSPI3_SCLK 0x100b1
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_uart3: uart3grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D30__UART3_RTS_B 0x1b0b1
++ MX6QDL_PAD_EIM_D31__UART3_CTS_B 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart4: uart4grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbh1: usbh1grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_0__USB_H1_PWR 0x80000000
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ MX6QDL_PAD_KEY_COL4__USB_OTG_OC 0x1b0b0
++ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000
++ >;
++ };
++
++ pinctrl_usdhc2: usdhc2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3_cdwp: usdhc3cdwp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x80000000
++ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000
++ >;
++ };
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
++ status = "disabled";
++};
++
++&gpmi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ nand-on-flash-bbt;
++ status = "disabled";
++};
++
++&uart3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart3>;
++ status = "disabled";
++};
++
++&uart4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart4>;
++ status = "disabled";
++};
++
++&usbh1 {
++ vbus-supply = <&reg_usb_h1_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbh1>;
++ status = "disabled";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "disabled";
++};
++
++&usdhc2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc2>;
++ cd-gpios = <&gpio1 4 0>;
++ wp-gpios = <&gpio1 2 0>;
++ status = "disabled";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3
++ &pinctrl_usdhc3_cdwp>;
++ cd-gpios = <&gpio1 27 0>;
++ wp-gpios = <&gpio1 29 0>;
++ status = "disabled";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi 2015-05-01 14:57:57.451427001 -0500
+@@ -10,17 +10,146 @@
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
++#include <dt-bindings/gpio/gpio.h>
++
+ / {
++ aliases {
++ mxcfb0 = &mxcfb1;
++ mxcfb1 = &mxcfb2;
++ mxcfb2 = &mxcfb3;
++ mxcfb3 = &mxcfb4;
++ };
++
+ memory {
+ reg = <0x10000000 0x80000000>;
+ };
++
++ leds {
++ compatible = "gpio-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpio_leds>;
++
++ user {
++ label = "debug";
++ gpios = <&gpio5 15 GPIO_ACTIVE_HIGH>;
++ };
++ };
++
++ sound-spdif {
++ compatible = "fsl,imx-audio-spdif",
++ "fsl,imx-sabreauto-spdif";
++ model = "imx-spdif";
++ spdif-controller = <&spdif>;
++ spdif-in;
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm3 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ status = "okay";
++ };
++
++ max7310_reset: max7310-reset {
++ compatible = "gpio-reset";
++ reset-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
++ reset-delay-us = <1>;
++ #reset-cells = <0>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ interface_pix_fmt = "RGB666";
++ mode_str ="LDB-XGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb2: fb@1 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <24>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb3: fb@2 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "lcd";
++ interface_pix_fmt = "RGB565";
++ mode_str ="CLAA-WVGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb4: fb@3 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ interface_pix_fmt = "RGB666";
++ mode_str ="LDB-XGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm3 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ reg_audio: cs42888_supply {
++ compatible = "regulator-fixed";
++ regulator-name = "cs42888_supply";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++
++ sound-cs42888 {
++ compatible = "fsl,imx6-sabreauto-cs42888",
++ "fsl,imx-audio-cs42888";
++ model = "imx-cs42888";
++ esai-controller = <&esai>;
++ asrc-controller = <&asrc_p2p>;
++ audio-codec = <&codec>;
++ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ clocks {
++ codec_osc: anaclk2 {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <24576000>;
++ };
++ };
+ };
+
+ &ecspi1 {
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio3 19 0>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_ecspi1_1 &pinctrl_ecspi1_sabreauto>;
++ pinctrl-0 = <&pinctrl_ecspi1 &pinctrl_ecspi1_cs>;
+ status = "disabled"; /* pin conflict with WEIM NOR */
+
+ flash: m25p80@0 {
+@@ -34,51 +163,481 @@
+
+ &fec {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_2>;
++ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii";
++ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ status = "okay";
+ };
+
+ &gpmi {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_gpmi_nand_1>;
++ pinctrl-0 = <&pinctrl_gpmi_nand>;
++ status = "okay";
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ egalax_ts@04 {
++ compatible = "eeti,egalax_ts";
++ reg = <0x04>;
++ interrupt-parent = <&gpio2>;
++ interrupts = <28 2>;
++ wakeup-gpios = <&gpio2 28 0>;
++ };
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++
++ codec: cs42888@048 {
++ compatible = "cirrus,cs42888";
++ reg = <0x048>;
++ clocks = <&codec_osc 0>;
++ clock-names = "codec_osc";
++ VA-supply = <&reg_audio>;
++ VD-supply = <&reg_audio>;
++ VLS-supply = <&reg_audio>;
++ VLC-supply = <&reg_audio>;
++ };
++
++ hdmi: edid@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
++};
++
++&i2c3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ pinctrl-assert-gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>;
+ status = "okay";
++
++ max7310_a: gpio@30 {
++ compatible = "maxim,max7310";
++ reg = <0x30>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ resets = <&max7310_reset>;
++ };
++
++ max7310_b: gpio@32 {
++ compatible = "maxim,max7310";
++ reg = <0x32>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ max7310_c: gpio@34 {
++ compatible = "maxim,max7310";
++ reg = <0x34>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
+ };
+
+ &iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+- hog {
++ imx6qdl-sabreauto {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x80000000
+ MX6QDL_PAD_SD2_DAT2__GPIO1_IO13 0x80000000
++ MX6QDL_PAD_EIM_A24__GPIO5_IO04 0x80000000
++ MX6QDL_PAD_SD2_DAT0__GPIO1_IO15 0x80000000
+ MX6QDL_PAD_GPIO_18__SD3_VSELECT 0x17059
+ >;
+ };
+- };
+
+- ecspi1 {
+- pinctrl_ecspi1_sabreauto: ecspi1-sabreauto {
++ pinctrl_esai1: esai1grp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_CRS_DV__ESAI_TX_CLK 0x1b030
++ MX6QDL_PAD_ENET_RXD1__ESAI_TX_FS 0x1b030
++ MX6QDL_PAD_ENET_TX_EN__ESAI_TX3_RX2 0x1b030
++ MX6QDL_PAD_GPIO_5__ESAI_TX2_RX3 0x1b030
++ MX6QDL_PAD_ENET_TXD0__ESAI_TX4_RX1 0x1b030
++ MX6QDL_PAD_ENET_MDC__ESAI_TX5_RX0 0x1b030
++ MX6QDL_PAD_GPIO_17__ESAI_TX0 0x1b030
++ MX6QDL_PAD_NANDF_CS3__ESAI_TX1 0x1b030
++ MX6QDL_PAD_ENET_MDIO__ESAI_RX_CLK 0x1b030
++ MX6QDL_PAD_GPIO_9__ESAI_RX_FS 0x1b030
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
++ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
++ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
++ >;
++ };
++
++ pinctrl_ecspi1_cs: ecspi1cs {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x80000000
+ >;
+ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_KEY_COL2__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
++ >;
++ };
++
++ pinctrl_gpio_leds: gpioledsgrp {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT21__GPIO5_IO15 0x80000000
++ >;
++ };
++
++ pinctrl_gpmi_nand: gpminandgrp {
++ fsl,pins = <
++ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
++ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
++ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
++ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
++ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
++ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
++ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
++ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
++ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
++ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
++ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
++ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
++ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
++ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
++ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
++ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
++ MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
++ >;
++ };
++
++ pinctrl_hdmi_cec_2: hdmicecgrp-2 {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pwm1: pwm1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_spdif: spdifgrp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__SPDIF_IN 0x1b0b0
++ >;
++ };
++
++ pinctrl_uart3: uart3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CLK__UART3_RX_DATA 0x1b0b1
++ MX6QDL_PAD_SD4_CMD__UART3_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D30__UART3_CTS_B 0x1b0b1
++ MX6QDL_PAD_EIM_EB3__UART3_RTS_B 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart4: uart4grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
++ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
++ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
++ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
++ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170b9
++ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170b9
++ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170b9
++ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170b9
++ >;
++ };
++
++ pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
++ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170f9
++ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170f9
++ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170f9
++ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170f9
++ >;
++ };
++
++ pinctrl_weim_cs0: weimcs0grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_CS0__EIM_CS0_B 0xb0b1
++ >;
++ };
++
++ pinctrl_weim_nor: weimnorgrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_OE__EIM_OE_B 0xb0b1
++ MX6QDL_PAD_EIM_RW__EIM_RW 0xb0b1
++ MX6QDL_PAD_EIM_WAIT__EIM_WAIT_B 0xb060
++ MX6QDL_PAD_EIM_D16__EIM_DATA16 0x1b0b0
++ MX6QDL_PAD_EIM_D17__EIM_DATA17 0x1b0b0
++ MX6QDL_PAD_EIM_D18__EIM_DATA18 0x1b0b0
++ MX6QDL_PAD_EIM_D19__EIM_DATA19 0x1b0b0
++ MX6QDL_PAD_EIM_D20__EIM_DATA20 0x1b0b0
++ MX6QDL_PAD_EIM_D21__EIM_DATA21 0x1b0b0
++ MX6QDL_PAD_EIM_D22__EIM_DATA22 0x1b0b0
++ MX6QDL_PAD_EIM_D23__EIM_DATA23 0x1b0b0
++ MX6QDL_PAD_EIM_D24__EIM_DATA24 0x1b0b0
++ MX6QDL_PAD_EIM_D25__EIM_DATA25 0x1b0b0
++ MX6QDL_PAD_EIM_D26__EIM_DATA26 0x1b0b0
++ MX6QDL_PAD_EIM_D27__EIM_DATA27 0x1b0b0
++ MX6QDL_PAD_EIM_D28__EIM_DATA28 0x1b0b0
++ MX6QDL_PAD_EIM_D29__EIM_DATA29 0x1b0b0
++ MX6QDL_PAD_EIM_D30__EIM_DATA30 0x1b0b0
++ MX6QDL_PAD_EIM_D31__EIM_DATA31 0x1b0b0
++ MX6QDL_PAD_EIM_A23__EIM_ADDR23 0xb0b1
++ MX6QDL_PAD_EIM_A22__EIM_ADDR22 0xb0b1
++ MX6QDL_PAD_EIM_A21__EIM_ADDR21 0xb0b1
++ MX6QDL_PAD_EIM_A20__EIM_ADDR20 0xb0b1
++ MX6QDL_PAD_EIM_A19__EIM_ADDR19 0xb0b1
++ MX6QDL_PAD_EIM_A18__EIM_ADDR18 0xb0b1
++ MX6QDL_PAD_EIM_A17__EIM_ADDR17 0xb0b1
++ MX6QDL_PAD_EIM_A16__EIM_ADDR16 0xb0b1
++ MX6QDL_PAD_EIM_DA15__EIM_AD15 0xb0b1
++ MX6QDL_PAD_EIM_DA14__EIM_AD14 0xb0b1
++ MX6QDL_PAD_EIM_DA13__EIM_AD13 0xb0b1
++ MX6QDL_PAD_EIM_DA12__EIM_AD12 0xb0b1
++ MX6QDL_PAD_EIM_DA11__EIM_AD11 0xb0b1
++ MX6QDL_PAD_EIM_DA10__EIM_AD10 0xb0b1
++ MX6QDL_PAD_EIM_DA9__EIM_AD09 0xb0b1
++ MX6QDL_PAD_EIM_DA8__EIM_AD08 0xb0b1
++ MX6QDL_PAD_EIM_DA7__EIM_AD07 0xb0b1
++ MX6QDL_PAD_EIM_DA6__EIM_AD06 0xb0b1
++ MX6QDL_PAD_EIM_DA5__EIM_AD05 0xb0b1
++ MX6QDL_PAD_EIM_DA4__EIM_AD04 0xb0b1
++ MX6QDL_PAD_EIM_DA3__EIM_AD03 0xb0b1
++ MX6QDL_PAD_EIM_DA2__EIM_AD02 0xb0b1
++ MX6QDL_PAD_EIM_DA1__EIM_AD01 0xb0b1
++ MX6QDL_PAD_EIM_DA0__EIM_AD00 0xb0b1
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@0 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
+ };
+ };
+
++&pcie {
++ status = "okay";
++};
++
++&pwm3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm1>;
++ status = "okay";
++};
++
++&spdif {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_spdif>;
++ status = "okay";
++};
++
++&uart3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart3>;
++ pinctrl-assert-gpios = <&max7310_b 4 GPIO_ACTIVE_HIGH>, /* CTS */
++ <&max7310_c 3 GPIO_ACTIVE_HIGH>; /* RXD and TXD */
++ fsl,uart-has-rtscts;
++ status = "okay";
++};
++
+ &uart4 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart4_1>;
++ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+ };
+
+ &usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+- pinctrl-0 = <&pinctrl_usdhc3_1>;
+- pinctrl-1 = <&pinctrl_usdhc3_1_100mhz>;
+- pinctrl-2 = <&pinctrl_usdhc3_1_200mhz>;
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
++ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ cd-gpios = <&gpio6 15 0>;
+ wp-gpios = <&gpio1 13 0>;
+ status = "okay";
+@@ -86,7 +645,7 @@
+
+ &weim {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_weim_nor_1 &pinctrl_weim_cs0_1>;
++ pinctrl-0 = <&pinctrl_weim_nor &pinctrl_weim_cs0>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x08000000 0x08000000>;
+@@ -102,3 +661,48 @@
+ 0x0000c000 0x1404a38e 0x00000000>;
+ };
+ };
++
++&ldb {
++ ipu_id = <1>;
++ disp_id = <0>;
++ ext_ref = <1>;
++ mode = "sep0";
++ sec_ipu_id = <1>;
++ sec_disp_id = <1>;
++ status = "okay";
++};
++
++&esai {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_esai1>;
++ status = "okay";
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <1>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++&hdmi_cec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hdmi_cec_2>;
++ status = "okay";
++};
++
++&gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi 2015-05-01 14:57:57.451427001 -0500
+@@ -0,0 +1,427 @@
++/*
++ * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
++
++/ {
++ chosen {
++ stdout-path = &uart2;
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_2p5v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "2P5V";
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ gpio-keys {
++ compatible = "gpio-keys";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpio_keys>;
++
++ power {
++ label = "Power Button";
++ gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_POWER>;
++ gpio-key,wakeup;
++ };
++
++ menu {
++ label = "Menu";
++ gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_MENU>;
++ };
++
++ home {
++ label = "Home";
++ gpios = <&gpio2 4 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_HOME>;
++ };
++
++ back {
++ label = "Back";
++ gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_BACK>;
++ };
++
++ volume-up {
++ label = "Volume Up";
++ gpios = <&gpio7 13 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_VOLUMEUP>;
++ };
++
++ volume-down {
++ label = "Volume Down";
++ gpios = <&gpio4 5 GPIO_ACTIVE_LOW>;
++ linux,code = <KEY_VOLUMEDOWN>;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx6q-sabrelite-sgtl5000",
++ "fsl,imx-audio-sgtl5000";
++ model = "imx6q-sabrelite-sgtl5000";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <1>;
++ mux-ext-port = <4>;
++ };
++
++ backlight_lcd {
++ compatible = "pwm-backlight";
++ pwms = <&pwm1 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ power-supply = <&reg_3p3v>;
++ status = "okay";
++ };
++
++ backlight_lvds {
++ compatible = "pwm-backlight";
++ pwms = <&pwm4 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ power-supply = <&reg_3p3v>;
++ status = "okay";
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>;
++ status = "okay";
++};
++
++&ecspi1 {
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio3 19 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi1>;
++ status = "okay";
++
++ flash: m25p80@0 {
++ compatible = "sst,sst25vf016b";
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
++ txen-skew-ps = <0>;
++ txc-skew-ps = <3000>;
++ rxdv-skew-ps = <0>;
++ rxc-skew-ps = <3000>;
++ rxd0-skew-ps = <0>;
++ rxd1-skew-ps = <0>;
++ rxd2-skew-ps = <0>;
++ rxd3-skew-ps = <0>;
++ txd0-skew-ps = <0>;
++ txd1-skew-ps = <0>;
++ txd2-skew-ps = <0>;
++ txd3-skew-ps = <0>;
++ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ codec: sgtl5000@0a {
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ clocks = <&clks 201>;
++ VDDA-supply = <&reg_2p5v>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-sabrelite {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ /* SGTL5000 sys_mclk */
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x030b0
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x130b0
++ MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x130b0
++ MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x110b0
++ MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
++ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
++ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
++ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x000b1 /* CS */
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x100b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x100b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x100b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x100b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x100b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x100b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x100b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x100b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ /* Phy reset */
++ MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x000b0
++ MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
++ >;
++ };
++
++ pinctrl_gpio_keys: gpio_keysgrp {
++ fsl,pins = <
++ /* Power Button */
++ MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x1b0b0
++ /* Menu Button */
++ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x1b0b0
++ /* Home Button */
++ MX6QDL_PAD_NANDF_D4__GPIO2_IO04 0x1b0b0
++ /* Back Button */
++ MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x1b0b0
++ /* Volume Up Button */
++ MX6QDL_PAD_GPIO_18__GPIO7_IO13 0x1b0b0
++ /* Volume Down Button */
++ MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x1b0b0
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pwm1: pwm1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT3__PWM1_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_pwm3: pwm3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT1__PWM3_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_pwm4: pwm4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ MX6QDL_PAD_KEY_COL4__USB_OTG_OC 0x1b0b0
++ /* power enable, high active */
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x000b0
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0 /* CD */
++ MX6QDL_PAD_SD3_DAT4__GPIO7_IO01 0x1f0b0 /* WP */
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_NANDF_D6__GPIO2_IO06 0x1b0b0 /* CD */
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++
++ lvds-channel@0 {
++ fsl,data-mapping = "spwg";
++ fsl,data-width = <18>;
++ status = "okay";
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: hsd100pxn1 {
++ clock-frequency = <65000000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hback-porch = <220>;
++ hfront-porch = <40>;
++ vback-porch = <21>;
++ vfront-porch = <7>;
++ hsync-len = <60>;
++ vsync-len = <10>;
++ };
++ };
++ };
++};
++
++&pcie {
++ status = "okay";
++};
++
++&pwm1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm1>;
++ status = "okay";
++};
++
++&pwm3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm3>;
++ status = "okay";
++};
++
++&pwm4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm4>;
++ status = "okay";
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&usbh1 {
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ cd-gpios = <&gpio7 0 0>;
++ wp-gpios = <&gpio7 1 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
++
++&usdhc4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ cd-gpios = <&gpio2 6 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-sabresd.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-sabresd.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-sabresd.dtsi 2015-05-01 14:57:57.451427001 -0500
+@@ -10,16 +10,33 @@
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
++#include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
++
+ / {
++ aliases {
++ mxcfb0 = &mxcfb1;
++ mxcfb1 = &mxcfb2;
++ mxcfb2 = &mxcfb3;
++ mxcfb3 = &mxcfb4;
++ };
++
++ chosen {
++ stdout-path = &uart1;
++ };
++
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
+
+ regulators {
+ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
+
+- reg_usb_otg_vbus: usb_otg_vbus {
++ reg_usb_otg_vbus: regulator@0 {
+ compatible = "regulator-fixed";
++ reg = <0>;
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+@@ -27,8 +44,9 @@
+ enable-active-high;
+ };
+
+- reg_usb_h1_vbus: usb_h1_vbus {
++ reg_usb_h1_vbus: regulator@1 {
+ compatible = "regulator-fixed";
++ reg = <1>;
+ regulator-name = "usb_h1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+@@ -36,29 +54,46 @@
+ enable-active-high;
+ };
+
+- reg_audio: wm8962_supply {
++ reg_audio: regulator@2 {
+ compatible = "regulator-fixed";
++ reg = <2>;
+ regulator-name = "wm8962-supply";
+ gpio = <&gpio4 10 0>;
+ enable-active-high;
+ };
++
++ reg_mipi_dsi_pwr_on: mipi_dsi_pwr_on {
++ compatible = "regulator-fixed";
++ regulator-name = "mipi_dsi_pwr_on";
++ gpio = <&gpio6 14 0>;
++ enable-active-high;
++ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpio_keys>;
++
++ power {
++ label = "Power Button";
++ gpios = <&gpio3 29 GPIO_ACTIVE_LOW>;
++ gpio-key,wakeup;
++ linux,code = <KEY_POWER>;
++ };
+
+ volume-up {
+ label = "Volume Up";
+- gpios = <&gpio1 4 0>;
++ gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ gpio-key,wakeup;
+- linux,code = <115>; /* KEY_VOLUMEUP */
++ linux,code = <KEY_VOLUMEUP>;
+ };
+
+ volume-down {
+ label = "Volume Down";
+- gpios = <&gpio1 5 0>;
++ gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
+ gpio-key,wakeup;
+- linux,code = <114>; /* KEY_VOLUMEDOWN */
++ linux,code = <KEY_VOLUMEDOWN>;
+ };
+ };
+
+@@ -88,11 +123,107 @@
+ default-brightness-level = <7>;
+ status = "okay";
+ };
++
++ leds {
++ compatible = "gpio-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_gpio_leds>;
++
++ red {
++ gpios = <&gpio1 2 0>;
++ default-state = "on";
++ };
++ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ interface_pix_fmt = "RGB666";
++ mode_str ="LDB-XGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb2: fb@1 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <24>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb3: fb@2 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "lcd";
++ interface_pix_fmt = "RGB565";
++ mode_str ="CLAA-WVGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ mxcfb4: fb@3 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ interface_pix_fmt = "RGB666";
++ mode_str ="LDB-XGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "disabled";
++ };
++
++ lcd@0 {
++ compatible = "fsl,lcd";
++ ipu_id = <0>;
++ disp_id = <0>;
++ default_ifmt = "RGB565";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ipu1>;
++ status = "okay";
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm1 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <7>;
++ };
++
++ v4l2_out {
++ compatible = "fsl,mxc_v4l2_output";
++ status = "okay";
++ };
++
++ lvds_cabc_ctrl {
++ lvds0-gpios = <&gpio6 15 0>;
++ lvds1-gpios = <&gpio6 16 0>;
++ };
++
++ mipi_dsi_reset: mipi-dsi-reset {
++ compatible = "gpio-reset";
++ reset-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
++ reset-delay-us = <50>;
++ #reset-cells = <0>;
++ };
+ };
+
+ &audmux {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_audmux_2>;
++ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+ };
+
+@@ -100,7 +231,7 @@
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio4 9 0>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_ecspi1_2>;
++ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ flash: m25p80@0 {
+@@ -114,7 +245,7 @@
+
+ &fec {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_1>;
++ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio1 25 0>;
+ status = "okay";
+@@ -123,7 +254,7 @@
+ &i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_i2c1_2>;
++ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ codec: wm8962@1a {
+@@ -149,10 +280,121 @@
+ };
+ };
+
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ hdmi: edid@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++};
++
+ &i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_i2c3_2>;
++ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ egalax_ts@04 {
+@@ -168,11 +410,9 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+- hog {
++ imx6qdl-sabresd {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+- MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x80000000
+- MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x80000000
+ MX6QDL_PAD_NANDF_D0__GPIO2_IO00 0x80000000
+ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x80000000
+ MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x80000000
+@@ -182,6 +422,202 @@
+ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000
+ MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x80000000
++ MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x80000000
++ MX6QDL_PAD_NANDF_CS3__GPIO6_IO16 0x80000000
++ MX6QDL_PAD_NANDF_CS0__GPIO6_IO11 0x80000000
++ MX6QDL_PAD_NANDF_CS1__GPIO6_IO14 0x80000000
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x130b0
++ MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
++ MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x110b0
++ MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__ECSPI1_MISO 0x100b1
++ MX6QDL_PAD_KEY_ROW0__ECSPI1_MOSI 0x100b1
++ MX6QDL_PAD_KEY_COL0__ECSPI1_SCLK 0x100b1
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_gpio_keys: gpio_keysgrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x80000000
++ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x80000000
++ MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x80000000
++ >;
++ };
++
++ pinctrl_hdmi_cec: hdmi_cecgrp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
++ >;
++ };
++
++ pinctrl_hdmi_hdcp: hdmi_hdcpgrp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__HDMI_TX_DDC_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__HDMI_TX_DDC_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b8b1
++ MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_ipu1: ipu1grp {
++ fsl,pins = <
++ MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
++ MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x10
++ MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02 0x10
++ MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03 0x10
++ MX6QDL_PAD_DI0_PIN4__IPU1_DI0_PIN04 0x80000000
++ MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00 0x10
++ MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01 0x10
++ MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02 0x10
++ MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03 0x10
++ MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04 0x10
++ MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05 0x10
++ MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06 0x10
++ MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07 0x10
++ MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08 0x10
++ MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09 0x10
++ MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10 0x10
++ MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11 0x10
++ MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12 0x10
++ MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13 0x10
++ MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14 0x10
++ MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15 0x10
++ MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16 0x10
++ MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17 0x10
++ MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18 0x10
++ MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19 0x10
++ MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20 0x10
++ MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21 0x10
++ MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22 0x10
++ MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23 0x10
++ >;
++ };
++
++ pinctrl_pcie: pciegrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000
++ >;
++ };
++
++ pinctrl_pwm1: pwm1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT3__PWM1_OUT 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc2: usdhc2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
++ MX6QDL_PAD_NANDF_D4__SD2_DATA4 0x17059
++ MX6QDL_PAD_NANDF_D5__SD2_DATA5 0x17059
++ MX6QDL_PAD_NANDF_D6__SD2_DATA6 0x17059
++ MX6QDL_PAD_NANDF_D7__SD2_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
++ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
++ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
++ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
++ MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
++ MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
++ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
++ >;
++ };
++ };
++
++ gpio_leds {
++ pinctrl_gpio_leds: gpioledsgrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000
+ >;
+ };
+ };
+@@ -212,9 +648,33 @@
+ };
+ };
+
++&pcie {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pcie>;
++ reset-gpio = <&gpio7 12 0>;
++ status = "okay";
++};
++
++&pcie {
++ power-on-gpio = <&gpio3 19 0>;
++ reset-gpio = <&gpio7 12 0>;
++ status = "okay";
++};
++
++
+ &pwm1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_pwm0_1>;
++ pinctrl-0 = <&pinctrl_pwm1>;
++ status = "okay";
++};
++
++&ldb {
++ ipu_id = <1>;
++ disp_id = <1>;
++ ext_ref = <1>;
++ mode = "sep1";
++ sec_ipu_id = <1>;
++ sec_disp_id = <0>;
+ status = "okay";
+ };
+
+@@ -225,7 +685,16 @@
+
+ &uart1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart1_1>;
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&mipi_dsi {
++ dev_id = <0>;
++ disp_id = <0>;
++ lcd_panel = "TRULY-WVGA";
++ disp-power-on-supply = <&reg_mipi_dsi_pwr_on>;
++ resets = <&mipi_dsi_reset>;
+ status = "okay";
+ };
+
+@@ -237,14 +706,14 @@
+ &usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usbotg_2>;
++ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ status = "okay";
+ };
+
+ &usdhc2 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc2_1>;
++ pinctrl-0 = <&pinctrl_usdhc2>;
+ bus-width = <8>;
+ cd-gpios = <&gpio2 2 0>;
+ wp-gpios = <&gpio2 3 0>;
+@@ -253,9 +722,47 @@
+
+ &usdhc3 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_1>;
++ pinctrl-0 = <&pinctrl_usdhc3>;
+ bus-width = <8>;
+ cd-gpios = <&gpio2 0 0>;
+ wp-gpios = <&gpio2 1 0>;
+ status = "okay";
+ };
++
++&usdhc4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ bus-width = <8>;
++ non-removable;
++ no-1-8-v;
++ status = "okay";
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <0>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++&hdmi_cec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hdmi_cec>;
++ status = "okay";
++};
++
++&gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-wandboard.dtsi linux-3.14.40/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6qdl-wandboard.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6qdl-wandboard.dtsi 2015-05-01 14:57:57.491427001 -0500
+@@ -12,17 +12,21 @@
+ / {
+ regulators {
+ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
+
+- reg_2p5v: 2p5v {
++ reg_2p5v: regulator@0 {
+ compatible = "regulator-fixed";
++ reg = <0>;
+ regulator-name = "2P5V";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+
+- reg_3p3v: 3p3v {
++ reg_3p3v: regulator@1 {
+ compatible = "regulator-fixed";
++ reg = <1>;
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+@@ -54,14 +58,14 @@
+
+ &audmux {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_audmux_2>;
++ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+ };
+
+ &i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_i2c2_2>;
++ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ codec: sgtl5000@0a {
+@@ -77,7 +81,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+- hog {
++ imx6qdl-wandboard {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x130b0
+@@ -91,20 +95,121 @@
+ MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x80000000
+ >;
+ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x130b0
++ MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
++ MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x110b0
++ MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_spdif: spdifgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_RXD0__SPDIF_OUT 0x1b0b0
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart3: uart3grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D23__UART3_CTS_B 0x1b0b1
++ MX6QDL_PAD_EIM_EB3__UART3_RTS_B 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc1: usdhc1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
++ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
++ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
++ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
++ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
++ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc2: usdhc2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
+ };
+ };
+
+ &fec {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_1>;
++ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio3 29 0>;
++ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ status = "okay";
+ };
+
+ &spdif {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_spdif_3>;
++ pinctrl-0 = <&pinctrl_spdif>;
+ status = "okay";
+ };
+
+@@ -115,13 +220,13 @@
+
+ &uart1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart1_1>;
++ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+ };
+
+ &uart3 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart3_2>;
++ pinctrl-0 = <&pinctrl_uart3>;
+ fsl,uart-has-rtscts;
+ status = "okay";
+ };
+@@ -132,7 +237,7 @@
+
+ &usbotg {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usbotg_1>;
++ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ dr_mode = "peripheral";
+ status = "okay";
+@@ -140,21 +245,21 @@
+
+ &usdhc1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc1_2>;
++ pinctrl-0 = <&pinctrl_usdhc1>;
+ cd-gpios = <&gpio1 2 0>;
+ status = "okay";
+ };
+
+ &usdhc2 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc2_2>;
++ pinctrl-0 = <&pinctrl_usdhc2>;
+ non-removable;
+ status = "okay";
+ };
+
+ &usdhc3 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_2>;
++ pinctrl-0 = <&pinctrl_usdhc3>;
+ cd-gpios = <&gpio3 9 0>;
+ status = "okay";
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts linux-3.14.40/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts 2015-05-01 14:57:57.491427001 -0500
+@@ -0,0 +1,432 @@
++/*
++ * Copyright 2013 Data Modul AG
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++
++#include <dt-bindings/gpio/gpio.h>
++#include "imx6q.dtsi"
++
++/ {
++ model = "Data Modul eDM-QMX6 Board";
++ compatible = "dmo,imx6q-edmqmx6", "fsl,imx6q";
++
++ chosen {
++ stdout-path = &uart2;
++ };
++
++ aliases {
++ gpio7 = &stmpe_gpio1;
++ gpio8 = &stmpe_gpio2;
++ stmpe-i2c0 = &stmpe1;
++ stmpe-i2c1 = &stmpe2;
++ };
++
++ memory {
++ reg = <0x10000000 0x80000000>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_3p3v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_switch: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "usb_otg_switch";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio7 12 0>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ reg_usb_host1: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "usb_host1_en";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ gpio = <&gpio3 31 0>;
++ enable-active-high;
++ };
++ };
++
++ gpio-leds {
++ compatible = "gpio-leds";
++
++ led-blue {
++ label = "blue";
++ gpios = <&stmpe_gpio1 8 GPIO_ACTIVE_HIGH>;
++ linux,default-trigger = "heartbeat";
++ };
++
++ led-green {
++ label = "green";
++ gpios = <&stmpe_gpio1 9 GPIO_ACTIVE_HIGH>;
++ };
++
++ led-pink {
++ label = "pink";
++ gpios = <&stmpe_gpio1 10 GPIO_ACTIVE_HIGH>;
++ };
++
++ led-red {
++ label = "red";
++ gpios = <&stmpe_gpio1 11 GPIO_ACTIVE_HIGH>;
++ };
++ };
++};
++
++&ecspi5 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi5>;
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio1 12 0>;
++ status = "okay";
++
++ flash: m25p80@0 {
++ compatible = "m25p80";
++ spi-max-frequency = <40000000>;
++ reg = <0>;
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio3 23 0>;
++ phy-supply = <&vgen2_1v2_eth>;
++ status = "okay";
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2
++ &pinctrl_stmpe1
++ &pinctrl_stmpe2
++ &pinctrl_pfuze>;
++ status = "okay";
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++ interrupt-parent = <&gpio3>;
++ interrupts = <20 8>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-always-on;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ regulator-always-on;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_1v2_eth: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vdd_high_in: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++
++ stmpe1: stmpe1601@40 {
++ compatible = "st,stmpe1601";
++ reg = <0x40>;
++ interrupts = <30 0>;
++ interrupt-parent = <&gpio3>;
++ vcc-supply = <&sw2_reg>;
++ vio-supply = <&sw2_reg>;
++
++ stmpe_gpio1: stmpe_gpio {
++ #gpio-cells = <2>;
++ compatible = "st,stmpe-gpio";
++ };
++ };
++
++ stmpe2: stmpe1601@44 {
++ compatible = "st,stmpe1601";
++ reg = <0x44>;
++ interrupts = <2 0>;
++ interrupt-parent = <&gpio5>;
++ vcc-supply = <&sw2_reg>;
++ vio-supply = <&sw2_reg>;
++
++ stmpe_gpio2: stmpe_gpio {
++ #gpio-cells = <2>;
++ compatible = "st,stmpe-gpio";
++ };
++ };
++
++ temp1: ad7414@4c {
++ compatible = "ad,ad7414";
++ reg = <0x4c>;
++ };
++
++ temp2: ad7414@4d {
++ compatible = "ad,ad7414";
++ reg = <0x4d>;
++ };
++
++ rtc: m41t62@68 {
++ compatible = "stm,m41t62";
++ reg = <0x68>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-dmo-edmqmx6 {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_A16__GPIO2_IO22 0x80000000
++ MX6QDL_PAD_EIM_A17__GPIO2_IO21 0x80000000
++ >;
++ };
++
++ pinctrl_ecspi5: ecspi5rp-1 {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_DAT0__ECSPI5_MISO 0x80000000
++ MX6QDL_PAD_SD1_CMD__ECSPI5_MOSI 0x80000000
++ MX6QDL_PAD_SD1_CLK__ECSPI5_SCLK 0x80000000
++ MX6QDL_PAD_SD2_DAT3__GPIO1_IO12 0x80000000
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_pfuze: pfuze100grp1 {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D20__GPIO3_IO20 0x80000000
++ >;
++ };
++
++ pinctrl_stmpe1: stmpe1grp {
++ fsl,pins = <MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x80000000>;
++ };
++
++ pinctrl_stmpe2: stmpe2grp {
++ fsl,pins = <MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x80000000>;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
++ MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
++ MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
++ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
++ >;
++ };
++ };
++};
++
++&sata {
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&usbh1 {
++ vbus-supply = <&reg_usb_host1>;
++ disable-over-current;
++ dr_mode = "host";
++ status = "okay";
++};
++
++&usbotg {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
++
++&usdhc4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ vmmc-supply = <&reg_3p3v>;
++ non-removable;
++ bus-width = <8>;
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q.dtsi linux-3.14.40/arch/arm/boot/dts/imx6q.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6q.dtsi 2015-05-01 14:57:57.491427001 -0500
+@@ -8,10 +8,16 @@
+ *
+ */
+
++#include <dt-bindings/interrupt-controller/irq.h>
+ #include "imx6q-pinfunc.h"
+ #include "imx6qdl.dtsi"
+
+ / {
++ aliases {
++ ipu1 = &ipu2;
++ spi4 = &ecspi5;
++ };
++
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -25,8 +31,17 @@
+ /* kHz uV */
+ 1200000 1275000
+ 996000 1250000
++ 852000 1250000
+ 792000 1150000
+- 396000 950000
++ 396000 975000
++ >;
++ fsl,soc-operating-points = <
++ /* ARM kHz SOC-PU uV */
++ 1200000 1275000
++ 996000 1250000
++ 852000 1250000
++ 792000 1175000
++ 396000 1175000
+ >;
+ clock-latency = <61036>; /* two CLK32 periods */
+ clocks = <&clks 104>, <&clks 6>, <&clks 16>,
+@@ -61,12 +76,77 @@
+ };
+
+ soc {
++
++ busfreq { /* BUSFREQ */
++ compatible = "fsl,imx6_busfreq";
++ clocks = <&clks 171>, <&clks 6>, <&clks 11>, <&clks 104>, <&clks 172>, <&clks 58>,
++ <&clks 18>, <&clks 60>, <&clks 20>, <&clks 3>;
++ clock-names = "pll2_bus", "pll2_pfd2_396m", "pll2_198m", "arm", "pll3_usb_otg", "periph",
++ "periph_pre", "periph_clk2", "periph_clk2_sel", "osc";
++ interrupts = <0 107 0x04>, <0 112 0x4>, <0 113 0x4>, <0 114 0x4>;
++ interrupt-names = "irq_busfreq_0", "irq_busfreq_1", "irq_busfreq_2", "irq_busfreq_3";
++ fsl,max_ddr_freq = <528000000>;
++ };
++
++ gpu@00130000 {
++ compatible = "fsl,imx6q-gpu";
++ reg = <0x00130000 0x4000>, <0x00134000 0x4000>,
++ <0x02204000 0x4000>, <0x0 0x0>;
++ reg-names = "iobase_3d", "iobase_2d",
++ "iobase_vg", "phys_baseaddr";
++ interrupts = <0 9 0x04>, <0 10 0x04>,<0 11 0x04>;
++ interrupt-names = "irq_3d", "irq_2d", "irq_vg";
++ clocks = <&clks 26>, <&clks 143>,
++ <&clks 27>, <&clks 121>,
++ <&clks 122>, <&clks 74>;
++ clock-names = "gpu2d_axi_clk", "openvg_axi_clk",
++ "gpu3d_axi_clk", "gpu2d_clk",
++ "gpu3d_clk", "gpu3d_shader_clk";
++ resets = <&src 0>, <&src 3>, <&src 3>;
++ reset-names = "gpu3d", "gpu2d", "gpuvg";
++ pu-supply = <&reg_pu>;
++ };
++
+ ocram: sram@00900000 {
+ compatible = "mmio-sram";
+ reg = <0x00900000 0x40000>;
+ clocks = <&clks 142>;
+ };
+
++ hdmi_core: hdmi_core@00120000 {
++ compatible = "fsl,imx6q-hdmi-core";
++ reg = <0x00120000 0x9000>;
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ status = "disabled";
++ };
++
++ hdmi_video: hdmi_video@020e0000 {
++ compatible = "fsl,imx6q-hdmi-video";
++ reg = <0x020e0000 0x1000>;
++ reg-names = "hdmi_gpr";
++ interrupts = <0 115 0x04>;
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ status = "disabled";
++ };
++
++ hdmi_audio: hdmi_audio@00120000 {
++ compatible = "fsl,imx6q-hdmi-audio";
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ dmas = <&sdma 2 23 0>;
++ dma-names = "tx";
++ status = "disabled";
++ };
++
++ hdmi_cec: hdmi_cec@00120000 {
++ compatible = "fsl,imx6q-hdmi-cec";
++ interrupts = <0 115 0x04>;
++ status = "disabled";
++ };
++
++
+ aips-bus@02000000 { /* AIPS1 */
+ spba-bus@02000000 {
+ ecspi5: ecspi@02018000 {
+@@ -74,13 +154,17 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02018000 0x4000>;
+- interrupts = <0 35 0x04>;
++ interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 116>, <&clks 116>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+ };
+
++ vpu@02040000 {
++ status = "okay";
++ };
++
+ iomuxc: iomuxc@020e0000 {
+ compatible = "fsl,imx6q-iomuxc";
+
+@@ -122,40 +206,40 @@
+ };
+ };
+
++ aips-bus@02100000 { /* AIPS2 */
++ mipi_dsi: mipi@021e0000 {
++ compatible = "fsl,imx6q-mipi-dsi";
++ reg = <0x021e0000 0x4000>;
++ interrupts = <0 102 0x04>;
++ gpr = <&gpr>;
++ clocks = <&clks 138>, <&clks 209>;
++ clock-names = "mipi_pllref_clk", "mipi_cfg_clk";
++ status = "disabled";
++ };
++ };
++
+ sata: sata@02200000 {
+ compatible = "fsl,imx6q-ahci";
+ reg = <0x02200000 0x4000>;
+- interrupts = <0 39 0x04>;
++ interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 154>, <&clks 187>, <&clks 105>;
+ clock-names = "sata", "sata_ref", "ahb";
+ status = "disabled";
+ };
+
+ ipu2: ipu@02800000 {
+- #crtc-cells = <1>;
+ compatible = "fsl,imx6q-ipu";
+ reg = <0x02800000 0x400000>;
+- interrupts = <0 8 0x4 0 7 0x4>;
+- clocks = <&clks 133>, <&clks 134>, <&clks 137>;
+- clock-names = "bus", "di0", "di1";
++ interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>,
++ <0 7 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 133>, <&clks 134>, <&clks 137>,
++ <&clks 41>, <&clks 42>,
++ <&clks 135>, <&clks 136>;
++ clock-names = "bus", "di0", "di1",
++ "di0_sel", "di1_sel",
++ "ldb_di0", "ldb_di1";
+ resets = <&src 4>;
++ bypass_reset = <0>;
+ };
+ };
+ };
+-
+-&ldb {
+- clocks = <&clks 33>, <&clks 34>,
+- <&clks 39>, <&clks 40>, <&clks 41>, <&clks 42>,
+- <&clks 135>, <&clks 136>;
+- clock-names = "di0_pll", "di1_pll",
+- "di0_sel", "di1_sel", "di2_sel", "di3_sel",
+- "di0", "di1";
+-
+- lvds-channel@0 {
+- crtcs = <&ipu1 0>, <&ipu1 1>, <&ipu2 0>, <&ipu2 1>;
+- };
+-
+- lvds-channel@1 {
+- crtcs = <&ipu1 0>, <&ipu1 1>, <&ipu2 0>, <&ipu2 1>;
+- };
+-};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-gk802.dts linux-3.14.40/arch/arm/boot/dts/imx6q-gk802.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-gk802.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-gk802.dts 2015-05-01 14:57:57.491427001 -0500
+@@ -0,0 +1,229 @@
++/*
++ * Copyright (C) 2013 Philipp Zabel
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++
++/ {
++ model = "Zealz GK802";
++ compatible = "zealz,imx6q-gk802", "fsl,imx6q";
++
++ aliases {
++ mxcfb0 = &mxcfb1;
++ };
++
++ chosen {
++ stdout-path = &uart4;
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_3p3v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_h1_vbus: usb_h1_vbus {
++ compatible = "regulator-fixed";
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio2 0 0>;
++ };
++ };
++
++ gpio-keys {
++ compatible = "gpio-keys";
++
++ recovery-button {
++ label = "recovery";
++ gpios = <&gpio3 16 1>;
++ linux,code = <0x198>; /* KEY_RESTART */
++ gpio-key,wakeup;
++ };
++
++ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <32>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "okay";
++ };
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <0>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++
++/* Internal I2C */
++&i2c2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ clock-frequency = <100000>;
++ status = "okay";
++
++ /* SDMC DM2016 1024 bit EEPROM + 128 bit OTP */
++ eeprom: dm2016@51 {
++ compatible = "sdmc,dm2016";
++ reg = <0x51>;
++ };
++};
++
++/* External I2C via HDMI */
++&i2c3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ clock-frequency = <100000>;
++ status = "okay";
++
++ ddc: imx6_hdmi_i2c@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-gk802 {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ /* Recovery button, active-low */
++ MX6QDL_PAD_EIM_D16__GPIO3_IO16 0x100b1
++ /* RTL8192CU enable GPIO, active-low */
++ MX6QDL_PAD_NANDF_D0__GPIO2_IO00 0x1b0b0
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_16__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_uart4: uart4grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc4: usdhc4grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
++ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
++ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
++ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
++ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
++ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
++ >;
++ };
++ };
++};
++
++&uart2 {
++ status = "okay";
++};
++
++&uart4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart4>;
++ status = "okay";
++};
++
++/* External USB-A port (USBOTG) */
++&usbotg {
++ phy_type = "utmi";
++ dr_mode = "host";
++ disable-over-current;
++ status = "okay";
++};
++
++/* Internal USB port (USBH1), connected to RTL8192CU */
++&usbh1 {
++ phy_type = "utmi";
++ dr_mode = "host";
++ vbus-supply = <&reg_usb_h1_vbus>;
++ disable-over-current;
++ status = "okay";
++};
++
++/* External microSD */
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ bus-width = <4>;
++ cd-gpios = <&gpio6 11 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
++
++/* Internal microSD */
++&usdhc4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc4>;
++ bus-width = <4>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-gw51xx.dts linux-3.14.40/arch/arm/boot/dts/imx6q-gw51xx.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-gw51xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-gw51xx.dts 2015-05-01 14:57:57.491427001 -0500
+@@ -0,0 +1,19 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++#include "imx6qdl-gw54xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 Quad GW51XX";
++ compatible = "gw,imx6q-gw51xx", "gw,ventana", "fsl,imx6q";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-gw52xx.dts linux-3.14.40/arch/arm/boot/dts/imx6q-gw52xx.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-gw52xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-gw52xx.dts 2015-05-01 14:57:57.491427001 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++#include "imx6qdl-gw52xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 Quad GW52XX";
++ compatible = "gw,imx6q-gw52xx", "gw,ventana", "fsl,imx6q";
++};
++
++&sata {
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-gw53xx.dts linux-3.14.40/arch/arm/boot/dts/imx6q-gw53xx.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-gw53xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-gw53xx.dts 2015-05-01 14:57:57.491427001 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++#include "imx6qdl-gw53xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 Quad GW53XX";
++ compatible = "gw,imx6q-gw53xx", "gw,ventana", "fsl,imx6q";
++};
++
++&sata {
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-gw5400-a.dts linux-3.14.40/arch/arm/boot/dts/imx6q-gw5400-a.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-gw5400-a.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-gw5400-a.dts 2015-05-01 14:57:57.491427001 -0500
+@@ -0,0 +1,543 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++
++/ {
++ model = "Gateworks Ventana GW5400-A";
++ compatible = "gw,imx6q-gw5400-a", "gw,ventana", "fsl,imx6q";
++
++ /* these are used by bootloader for disabling nodes */
++ aliases {
++ ethernet0 = &fec;
++ ethernet1 = &eth1;
++ i2c0 = &i2c1;
++ i2c1 = &i2c2;
++ i2c2 = &i2c3;
++ led0 = &led0;
++ led1 = &led1;
++ led2 = &led2;
++ sky2 = &eth1;
++ ssi0 = &ssi1;
++ spi0 = &ecspi1;
++ usb0 = &usbh1;
++ usb1 = &usbotg;
++ usdhc2 = &usdhc3;
++ };
++
++ chosen {
++ bootargs = "console=ttymxc1,115200";
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ led0: user1 {
++ label = "user1";
++ gpios = <&gpio4 6 0>; /* 102 -> MX6_PANLEDG */
++ default-state = "on";
++ linux,default-trigger = "heartbeat";
++ };
++
++ led1: user2 {
++ label = "user2";
++ gpios = <&gpio4 10 0>; /* 106 -> MX6_PANLEDR */
++ default-state = "off";
++ };
++
++ led2: user3 {
++ label = "user3";
++ gpios = <&gpio4 15 1>; /* 111 -> MX6_LOCLED# */
++ default-state = "off";
++ };
++ };
++
++ memory {
++ reg = <0x10000000 0x40000000>;
++ };
++
++ pps {
++ compatible = "pps-gpio";
++ gpios = <&gpio1 5 0>;
++ status = "okay";
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_1p0v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "1P0V";
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <1000000>;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_usb_h1_vbus: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "usb_h1_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ regulator-always-on;
++ };
++
++ reg_usb_otg_vbus: regulator@3 {
++ compatible = "regulator-fixed";
++ reg = <3>;
++ regulator-name = "usb_otg_vbus";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ gpio = <&gpio3 22 0>;
++ enable-active-high;
++ };
++ };
++
++ sound {
++ compatible = "fsl,imx6q-sabrelite-sgtl5000",
++ "fsl,imx-audio-sgtl5000";
++ model = "imx6q-sabrelite-sgtl5000";
++ ssi-controller = <&ssi1>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "MIC_IN", "Mic Jack",
++ "Mic Jack", "Mic Bias",
++ "Headphone Jack", "HP_OUT";
++ mux-int-port = <1>;
++ mux-ext-port = <4>;
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux>;
++ status = "okay";
++};
++
++&ecspi1 {
++ fsl,spi-num-chipselects = <1>;
++ cs-gpios = <&gpio3 19 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ecspi1>;
++ status = "okay";
++
++ flash: m25p80@0 {
++ compatible = "sst,w25q256";
++ spi-max-frequency = <30000000>;
++ reg = <0>;
++ };
++};
++
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ phy-reset-gpios = <&gpio1 30 0>;
++ status = "okay";
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ eeprom1: eeprom@50 {
++ compatible = "atmel,24c02";
++ reg = <0x50>;
++ pagesize = <16>;
++ };
++
++ eeprom2: eeprom@51 {
++ compatible = "atmel,24c02";
++ reg = <0x51>;
++ pagesize = <16>;
++ };
++
++ eeprom3: eeprom@52 {
++ compatible = "atmel,24c02";
++ reg = <0x52>;
++ pagesize = <16>;
++ };
++
++ eeprom4: eeprom@53 {
++ compatible = "atmel,24c02";
++ reg = <0x53>;
++ pagesize = <16>;
++ };
++
++ gpio: pca9555@23 {
++ compatible = "nxp,pca9555";
++ reg = <0x23>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ hwmon: gsc@29 {
++ compatible = "gw,gsp";
++ reg = <0x29>;
++ };
++
++ rtc: ds1672@68 {
++ compatible = "dallas,ds1672";
++ reg = <0x68>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3950000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++
++ pciswitch: pex8609@3f {
++ compatible = "plx,pex8609";
++ reg = <0x3f>;
++ };
++
++ pciclkgen: si52147@6b {
++ compatible = "sil,si52147";
++ reg = <0x6b>;
++ };
++};
++
++&i2c3 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3>;
++ status = "okay";
++
++ accelerometer: mma8450@1c {
++ compatible = "fsl,mma8450";
++ reg = <0x1c>;
++ };
++
++ codec: sgtl5000@0a {
++ compatible = "fsl,sgtl5000";
++ reg = <0x0a>;
++ clocks = <&clks 201>;
++ VDDA-supply = <&sw4_reg>;
++ VDDIO-supply = <&reg_3p3v>;
++ };
++
++ hdmiin: adv7611@4c {
++ compatible = "adi,adv7611";
++ reg = <0x4c>;
++ };
++
++ touchscreen: egalax_ts@04 {
++ compatible = "eeti,egalax_ts";
++ reg = <0x04>;
++ interrupt-parent = <&gpio7>;
++ interrupts = <12 2>; /* gpio7_12 active low */
++ wakeup-gpios = <&gpio7 12 0>;
++ };
++
++ videoout: adv7393@2a {
++ compatible = "adi,adv7393";
++ reg = <0x2a>;
++ };
++
++ videoin: adv7180@20 {
++ compatible = "adi,adv7180";
++ reg = <0x20>;
++ };
++};
++
++&iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ imx6q-gw5400-a {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000 /* OTG_PWR_EN */
++ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x80000000 /* SPINOR_CS0# */
++ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x80000000 /* PCIE IRQ */
++ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000 /* PCIE RST */
++ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x000130b0 /* AUD4_MCK */
++ MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x80000000 /* GPS_PPS */
++ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000 /* TOUCH_IRQ# */
++ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x80000000 /* user1 led */
++ MX6QDL_PAD_KEY_COL2__GPIO4_IO10 0x80000000 /* user2 led */
++ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x80000000 /* user3 led */
++ MX6QDL_PAD_SD1_DAT0__GPIO1_IO16 0x80000000 /* USBHUB_RST# */
++ MX6QDL_PAD_SD1_DAT3__GPIO1_IO21 0x80000000 /* MIPI_DIO */
++ >;
++ };
++
++ pinctrl_audmux: audmuxgrp {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x130b0
++ MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x130b0
++ MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x110b0
++ MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x130b0
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
++ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
++ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
++ >;
++ };
++
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
++ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
++ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_i2c3: i2c3grp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
++ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_uart5: uart5grp {
++ fsl,pins = <
++ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
++ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++ };
++};
++
++&ldb {
++ status = "okay";
++};
++
++&pcie {
++ reset-gpio = <&gpio1 29 0>;
++ status = "okay";
++
++ eth1: sky2@8 { /* MAC/PHY on bus 8 */
++ compatible = "marvell,sky2";
++ };
++};
++
++&ssi1 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&uart1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart1>;
++ status = "okay";
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart2>;
++ status = "okay";
++};
++
++&uart5 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_uart5>;
++ status = "okay";
++};
++
++&usbotg {
++ vbus-supply = <&reg_usb_otg_vbus>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg>;
++ disable-over-current;
++ status = "okay";
++};
++
++&usbh1 {
++ vbus-supply = <&reg_usb_h1_vbus>;
++ status = "okay";
++};
++
++&usdhc3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ cd-gpios = <&gpio7 0 0>;
++ vmmc-supply = <&reg_3p3v>;
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-gw54xx.dts linux-3.14.40/arch/arm/boot/dts/imx6q-gw54xx.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-gw54xx.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-gw54xx.dts 2015-05-01 14:57:57.491427001 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2013 Gateworks Corporation
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++#include "imx6qdl-gw54xx.dtsi"
++
++/ {
++ model = "Gateworks Ventana i.MX6 Quad GW54XX";
++ compatible = "gw,imx6q-gw54xx", "gw,ventana", "fsl,imx6q";
++};
++
++&sata {
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-hummingboard.dts linux-3.14.40/arch/arm/boot/dts/imx6q-hummingboard.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-hummingboard.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-hummingboard.dts 2015-05-01 14:57:57.491427001 -0500
+@@ -0,0 +1,21 @@
++/*
++ * Copyright (C) 2014 Rabeeh Khoury (rabeeh@solid-run.com)
++ * Based on work by Russell King
++ */
++/dts-v1/;
++
++#include "imx6q.dtsi"
++#include "imx6qdl-hummingboard.dtsi"
++
++/ {
++ model = "SolidRun HummingBoard Dual/Quad";
++ compatible = "solidrun,hummingboard/q", "fsl,imx6q";
++};
++
++&sata {
++ status = "okay";
++ fsl,transmit-level-mV = <1104>;
++ fsl,transmit-boost-mdB = <0>;
++ fsl,transmit-atten-16ths = <9>;
++ fsl,no-spread-spectrum;
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-nitrogen6x.dts linux-3.14.40/arch/arm/boot/dts/imx6q-nitrogen6x.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-nitrogen6x.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-nitrogen6x.dts 2015-05-01 14:57:57.491427001 -0500
+@@ -0,0 +1,25 @@
++/*
++ * Copyright 2013 Boundary Devices, Inc.
++ * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/dts-v1/;
++#include "imx6q.dtsi"
++#include "imx6qdl-nitrogen6x.dtsi"
++
++/ {
++ model = "Freescale i.MX6 Quad Nitrogen6x Board";
++ compatible = "fsl,imx6q-nitrogen6x", "fsl,imx6q";
++};
++
++&sata {
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-phytec-pbab01.dts linux-3.14.40/arch/arm/boot/dts/imx6q-phytec-pbab01.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-phytec-pbab01.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-phytec-pbab01.dts 2015-05-01 14:57:57.491427001 -0500
+@@ -11,24 +11,17 @@
+
+ /dts-v1/;
+ #include "imx6q-phytec-pfla02.dtsi"
++#include "imx6qdl-phytec-pbab01.dtsi"
+
+ / {
+ model = "Phytec phyFLEX-i.MX6 Quad Carrier-Board";
+ compatible = "phytec,imx6q-pbab01", "phytec,imx6q-pfla02", "fsl,imx6q";
+-};
+-
+-&fec {
+- status = "okay";
+-};
+-
+-&uart4 {
+- status = "okay";
+-};
+
+-&usdhc2 {
+- status = "okay";
++ chosen {
++ stdout-path = &uart4;
++ };
+ };
+
+-&usdhc3 {
+- status = "okay";
++&sata {
++ status = "okay";
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-phytec-pfla02.dtsi linux-3.14.40/arch/arm/boot/dts/imx6q-phytec-pfla02.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-phytec-pfla02.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-phytec-pfla02.dtsi 2015-05-01 14:57:57.495427001 -0500
+@@ -10,171 +10,13 @@
+ */
+
+ #include "imx6q.dtsi"
++#include "imx6qdl-phytec-pfla02.dtsi"
+
+ / {
+- model = "Phytec phyFLEX-i.MX6 Ouad";
++ model = "Phytec phyFLEX-i.MX6 Quad";
+ compatible = "phytec,imx6q-pfla02", "fsl,imx6q";
+
+ memory {
+ reg = <0x10000000 0x80000000>;
+ };
+ };
+-
+-&ecspi3 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_ecspi3_1>;
+- status = "okay";
+- fsl,spi-num-chipselects = <1>;
+- cs-gpios = <&gpio4 24 0>;
+-
+- flash@0 {
+- compatible = "m25p80";
+- spi-max-frequency = <20000000>;
+- reg = <0>;
+- };
+-};
+-
+-&i2c1 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_i2c1_1>;
+- status = "okay";
+-
+- eeprom@50 {
+- compatible = "atmel,24c32";
+- reg = <0x50>;
+- };
+-
+- pmic@58 {
+- compatible = "dialog,da9063";
+- reg = <0x58>;
+- interrupt-parent = <&gpio4>;
+- interrupts = <17 0x8>; /* active-low GPIO4_17 */
+-
+- regulators {
+- vddcore_reg: bcore1 {
+- regulator-min-microvolt = <730000>;
+- regulator-max-microvolt = <1380000>;
+- regulator-always-on;
+- };
+-
+- vddsoc_reg: bcore2 {
+- regulator-min-microvolt = <730000>;
+- regulator-max-microvolt = <1380000>;
+- regulator-always-on;
+- };
+-
+- vdd_ddr3_reg: bpro {
+- regulator-min-microvolt = <1500000>;
+- regulator-max-microvolt = <1500000>;
+- regulator-always-on;
+- };
+-
+- vdd_3v3_reg: bperi {
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- regulator-always-on;
+- };
+-
+- vdd_buckmem_reg: bmem {
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- regulator-always-on;
+- };
+-
+- vdd_eth_reg: bio {
+- regulator-min-microvolt = <1200000>;
+- regulator-max-microvolt = <1200000>;
+- regulator-always-on;
+- };
+-
+- vdd_eth_io_reg: ldo4 {
+- regulator-min-microvolt = <2500000>;
+- regulator-max-microvolt = <2500000>;
+- regulator-always-on;
+- };
+-
+- vdd_mx6_snvs_reg: ldo5 {
+- regulator-min-microvolt = <3000000>;
+- regulator-max-microvolt = <3000000>;
+- regulator-always-on;
+- };
+-
+- vdd_3v3_pmic_io_reg: ldo6 {
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- regulator-always-on;
+- };
+-
+- vdd_sd0_reg: ldo9 {
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- };
+-
+- vdd_sd1_reg: ldo10 {
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- };
+-
+- vdd_mx6_high_reg: ldo11 {
+- regulator-min-microvolt = <3000000>;
+- regulator-max-microvolt = <3000000>;
+- regulator-always-on;
+- };
+- };
+- };
+-};
+-
+-&iomuxc {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hog>;
+-
+- hog {
+- pinctrl_hog: hoggrp {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000
+- MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24 0x80000000 /* SPI NOR chipselect */
+- MX6QDL_PAD_DI0_PIN15__GPIO4_IO17 0x80000000 /* PMIC interrupt */
+- >;
+- };
+- };
+-
+- pfla02 {
+- pinctrl_usdhc3_pfla02: usdhc3grp-pfla02 {
+- fsl,pins = <
+- MX6QDL_PAD_ENET_RXD0__GPIO1_IO27 0x80000000
+- MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x80000000
+- >;
+- };
+- };
+-};
+-
+-&fec {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_3>;
+- phy-mode = "rgmii";
+- phy-reset-gpios = <&gpio3 23 0>;
+- status = "disabled";
+-};
+-
+-&uart4 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart4_1>;
+- status = "disabled";
+-};
+-
+-&usdhc2 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc2_2>;
+- cd-gpios = <&gpio1 4 0>;
+- wp-gpios = <&gpio1 2 0>;
+- status = "disabled";
+-};
+-
+-&usdhc3 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_2
+- &pinctrl_usdhc3_pfla02>;
+- cd-gpios = <&gpio1 27 0>;
+- wp-gpios = <&gpio1 29 0>;
+- status = "disabled";
+-};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-pinfunc.h linux-3.14.40/arch/arm/boot/dts/imx6q-pinfunc.h
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-pinfunc.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-pinfunc.h 2015-05-01 14:57:57.495427001 -0500
+@@ -673,6 +673,7 @@
+ #define MX6QDL_PAD_GPIO_3__USB_H1_OC 0x22c 0x5fc 0x948 0x6 0x1
+ #define MX6QDL_PAD_GPIO_3__MLB_CLK 0x22c 0x5fc 0x900 0x7 0x1
+ #define MX6QDL_PAD_GPIO_6__ESAI_TX_CLK 0x230 0x600 0x870 0x0 0x1
++#define MX6QDL_PAD_GPIO_6__ENET_IRQ 0x230 0x600 0x03c 0x11 0xff000609
+ #define MX6QDL_PAD_GPIO_6__I2C3_SDA 0x230 0x600 0x8ac 0x2 0x1
+ #define MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x230 0x600 0x000 0x5 0x0
+ #define MX6QDL_PAD_GPIO_6__SD2_LCTL 0x230 0x600 0x000 0x6 0x0
+@@ -1024,6 +1025,7 @@
+ #define MX6QDL_PAD_SD1_DAT2__WDOG1_RESET_B_DEB 0x34c 0x734 0x000 0x6 0x0
+ #define MX6QDL_PAD_SD1_CLK__SD1_CLK 0x350 0x738 0x000 0x0 0x0
+ #define MX6QDL_PAD_SD1_CLK__ECSPI5_SCLK 0x350 0x738 0x828 0x1 0x0
++#define MX6QDL_PAD_SD1_CLK__OSC32K_32K_OUT 0x350 0x738 0x000 0x2 0x0
+ #define MX6QDL_PAD_SD1_CLK__GPT_CLKIN 0x350 0x738 0x000 0x3 0x0
+ #define MX6QDL_PAD_SD1_CLK__GPIO1_IO20 0x350 0x738 0x000 0x5 0x0
+ #define MX6QDL_PAD_SD2_CLK__SD2_CLK 0x354 0x73c 0x000 0x0 0x0
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-sabreauto.dts linux-3.14.40/arch/arm/boot/dts/imx6q-sabreauto.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-sabreauto.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-sabreauto.dts 2015-05-01 14:57:57.495427001 -0500
+@@ -20,6 +20,22 @@
+ compatible = "fsl,imx6q-sabreauto", "fsl,imx6q";
+ };
+
++&mxcfb1 {
++ status = "okay";
++};
++
++&mxcfb2 {
++ status = "okay";
++};
++
++&mxcfb3 {
++ status = "okay";
++};
++
++&mxcfb4 {
++ status = "okay";
++};
++
+ &sata {
+ status = "okay";
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-sabrelite.dts linux-3.14.40/arch/arm/boot/dts/imx6q-sabrelite.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-sabrelite.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-sabrelite.dts 2015-05-01 14:57:57.495427001 -0500
+@@ -12,189 +12,13 @@
+
+ /dts-v1/;
+ #include "imx6q.dtsi"
++#include "imx6qdl-sabrelite.dtsi"
+
+ / {
+ model = "Freescale i.MX6 Quad SABRE Lite Board";
+ compatible = "fsl,imx6q-sabrelite", "fsl,imx6q";
+-
+- memory {
+- reg = <0x10000000 0x40000000>;
+- };
+-
+- regulators {
+- compatible = "simple-bus";
+-
+- reg_2p5v: 2p5v {
+- compatible = "regulator-fixed";
+- regulator-name = "2P5V";
+- regulator-min-microvolt = <2500000>;
+- regulator-max-microvolt = <2500000>;
+- regulator-always-on;
+- };
+-
+- reg_3p3v: 3p3v {
+- compatible = "regulator-fixed";
+- regulator-name = "3P3V";
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- regulator-always-on;
+- };
+-
+- reg_usb_otg_vbus: usb_otg_vbus {
+- compatible = "regulator-fixed";
+- regulator-name = "usb_otg_vbus";
+- regulator-min-microvolt = <5000000>;
+- regulator-max-microvolt = <5000000>;
+- gpio = <&gpio3 22 0>;
+- enable-active-high;
+- };
+- };
+-
+- sound {
+- compatible = "fsl,imx6q-sabrelite-sgtl5000",
+- "fsl,imx-audio-sgtl5000";
+- model = "imx6q-sabrelite-sgtl5000";
+- ssi-controller = <&ssi1>;
+- audio-codec = <&codec>;
+- audio-routing =
+- "MIC_IN", "Mic Jack",
+- "Mic Jack", "Mic Bias",
+- "Headphone Jack", "HP_OUT";
+- mux-int-port = <1>;
+- mux-ext-port = <4>;
+- };
+-};
+-
+-&audmux {
+- status = "okay";
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_audmux_1>;
+-};
+-
+-&ecspi1 {
+- fsl,spi-num-chipselects = <1>;
+- cs-gpios = <&gpio3 19 0>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_ecspi1_1>;
+- status = "okay";
+-
+- flash: m25p80@0 {
+- compatible = "sst,sst25vf016b";
+- spi-max-frequency = <20000000>;
+- reg = <0>;
+- };
+-};
+-
+-&fec {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_1>;
+- phy-mode = "rgmii";
+- phy-reset-gpios = <&gpio3 23 0>;
+- status = "okay";
+-};
+-
+-&i2c1 {
+- status = "okay";
+- clock-frequency = <100000>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_i2c1_1>;
+-
+- codec: sgtl5000@0a {
+- compatible = "fsl,sgtl5000";
+- reg = <0x0a>;
+- clocks = <&clks 201>;
+- VDDA-supply = <&reg_2p5v>;
+- VDDIO-supply = <&reg_3p3v>;
+- };
+-};
+-
+-&iomuxc {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_hog>;
+-
+- hog {
+- pinctrl_hog: hoggrp {
+- fsl,pins = <
+- MX6QDL_PAD_NANDF_D6__GPIO2_IO06 0x80000000
+- MX6QDL_PAD_NANDF_D7__GPIO2_IO07 0x80000000
+- MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x80000000
+- MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x80000000
+- MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000
+- MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x80000000
+- MX6QDL_PAD_SD3_DAT4__GPIO7_IO01 0x1f0b0
+- MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x80000000
+- MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000
+- >;
+- };
+- };
+-};
+-
+-&ldb {
+- status = "okay";
+-
+- lvds-channel@0 {
+- fsl,data-mapping = "spwg";
+- fsl,data-width = <18>;
+- status = "okay";
+-
+- display-timings {
+- native-mode = <&timing0>;
+- timing0: hsd100pxn1 {
+- clock-frequency = <65000000>;
+- hactive = <1024>;
+- vactive = <768>;
+- hback-porch = <220>;
+- hfront-porch = <40>;
+- vback-porch = <21>;
+- vfront-porch = <7>;
+- hsync-len = <60>;
+- vsync-len = <10>;
+- };
+- };
+- };
+ };
+
+ &sata {
+ status = "okay";
+ };
+-
+-&ssi1 {
+- fsl,mode = "i2s-slave";
+- status = "okay";
+-};
+-
+-&uart2 {
+- status = "okay";
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart2_1>;
+-};
+-
+-&usbh1 {
+- status = "okay";
+-};
+-
+-&usbotg {
+- vbus-supply = <&reg_usb_otg_vbus>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usbotg_1>;
+- disable-over-current;
+- status = "okay";
+-};
+-
+-&usdhc3 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_2>;
+- cd-gpios = <&gpio7 0 0>;
+- wp-gpios = <&gpio7 1 0>;
+- vmmc-supply = <&reg_3p3v>;
+- status = "okay";
+-};
+-
+-&usdhc4 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc4_2>;
+- cd-gpios = <&gpio2 6 0>;
+- wp-gpios = <&gpio2 7 0>;
+- vmmc-supply = <&reg_3p3v>;
+- status = "okay";
+-};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-sabresd.dts linux-3.14.40/arch/arm/boot/dts/imx6q-sabresd.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-sabresd.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-sabresd.dts 2015-05-01 14:57:57.495427001 -0500
+@@ -23,3 +23,19 @@
+ &sata {
+ status = "okay";
+ };
++
++&mxcfb1 {
++ status = "okay";
++};
++
++&mxcfb2 {
++ status = "okay";
++};
++
++&mxcfb3 {
++ status = "okay";
++};
++
++&mxcfb4 {
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-sabresd-hdcp.dts linux-3.14.40/arch/arm/boot/dts/imx6q-sabresd-hdcp.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-sabresd-hdcp.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-sabresd-hdcp.dts 2015-05-01 14:57:57.495427001 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2012-2013 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include "imx6q-sabresd.dts"
++
++&hdmi_video {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hdmi_hdcp>;
++ fsl,hdcp;
++};
++
++&i2c2 {
++ status = "disable";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-sbc6x.dts linux-3.14.40/arch/arm/boot/dts/imx6q-sbc6x.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-sbc6x.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-sbc6x.dts 2015-05-01 14:57:57.495427001 -0500
+@@ -17,28 +17,78 @@
+ };
+ };
+
++
+ &fec {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_enet_1>;
++ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii";
+ status = "okay";
+ };
+
++&iomuxc {
++ imx6q-sbc6x {
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg: usbotggrp {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++ };
++};
++
+ &uart1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart1_1>;
++ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+ };
+
+ &usbotg {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usbotg_1>;
++ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ status = "okay";
+ };
+
+ &usdhc3 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_2>;
++ pinctrl-0 = <&pinctrl_usdhc3>;
+ status = "okay";
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6q-udoo.dts linux-3.14.40/arch/arm/boot/dts/imx6q-udoo.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6q-udoo.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6q-udoo.dts 2015-05-01 14:57:57.495427001 -0500
+@@ -16,24 +16,78 @@
+ model = "Udoo i.MX6 Quad Board";
+ compatible = "udoo,imx6q-udoo", "fsl,imx6q";
+
++ chosen {
++ stdout-path = &uart2;
++ };
++
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
+ };
+
++&fec {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_enet>;
++ phy-mode = "rgmii";
++ status = "okay";
++};
++
++&iomuxc {
++ imx6q-udoo {
++ pinctrl_enet: enetgrp {
++ fsl,pins = <
++ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
++ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
++ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
++ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
++ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
++ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
++ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
++ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
++ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
++ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
++ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
++ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
++ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
++ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
++ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
++ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
++ >;
++ };
++
++ pinctrl_uart2: uart2grp {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
++ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++ };
++};
++
+ &sata {
+ status = "okay";
+ };
+
+ &uart2 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart2_1>;
++ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+ };
+
+ &usdhc3 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usdhc3_2>;
++ pinctrl-0 = <&pinctrl_usdhc3>;
+ non-removable;
+ status = "okay";
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6sl.dtsi linux-3.14.40/arch/arm/boot/dts/imx6sl.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6sl.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6sl.dtsi 2015-05-01 14:57:57.495427001 -0500
+@@ -7,12 +7,14 @@
+ *
+ */
+
++#include <dt-bindings/interrupt-controller/irq.h>
+ #include "skeleton.dtsi"
+ #include "imx6sl-pinfunc.h"
+ #include <dt-bindings/clock/imx6sl-clock.h>
+
+ / {
+ aliases {
++ ethernet0 = &fec;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+@@ -27,25 +29,46 @@
+ spi1 = &ecspi2;
+ spi2 = &ecspi3;
+ spi3 = &ecspi4;
++ usbphy0 = &usbphy1;
++ usbphy1 = &usbphy2;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- cpu@0 {
++ cpu0: cpu@0 {
+ compatible = "arm,cortex-a9";
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
++ operating-points = <
++ /* kHz uV */
++ 996000 1275000
++ 792000 1175000
++ 396000 975000
++ >;
++ fsl,soc-operating-points = <
++ /* ARM kHz SOC-PU uV */
++ 996000 1225000
++ 792000 1175000
++ 396000 1175000
++ >;
++ clock-latency = <61036>; /* two CLK32 periods */
++ clocks = <&clks IMX6SL_CLK_ARM>, <&clks IMX6SL_CLK_PLL2_PFD2>,
++ <&clks IMX6SL_CLK_STEP>, <&clks IMX6SL_CLK_PLL1_SW>,
++ <&clks IMX6SL_CLK_PLL1_SYS>;
++ clock-names = "arm", "pll2_pfd2_396m", "step",
++ "pll1_sw", "pll1_sys";
++ arm-supply = <&reg_arm>;
++ pu-supply = <&reg_pu>;
++ soc-supply = <&reg_soc>;
+ };
+ };
+
+ intc: interrupt-controller@00a01000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+- #address-cells = <1>;
+- #size-cells = <1>;
+ interrupt-controller;
+ reg = <0x00a01000 0x1000>,
+ <0x00a00100 0x100>;
+@@ -57,15 +80,21 @@
+
+ ckil {
+ compatible = "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ osc {
+ compatible = "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+
++ pu_dummy: pudummy_reg {
++ compatible = "fsl,imx6-dummy-pureg"; /* only used in ldo-bypass */
++ };
++
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+@@ -73,19 +102,45 @@
+ interrupt-parent = <&intc>;
+ ranges;
+
++ ocram: sram@00900000 {
++ compatible = "mmio-sram";
++ reg = <0x00900000 0x20000>;
++ clocks = <&clks IMX6SL_CLK_OCRAM>;
++ };
++
++ busfreq { /* BUSFREQ */
++ compatible = "fsl,imx6_busfreq";
++ clocks = <&clks IMX6SL_CLK_PLL2_BUS>, <&clks IMX6SL_CLK_PLL2_PFD2>,
++ <&clks IMX6SL_CLK_PLL2_198M>, <&clks IMX6SL_CLK_ARM>,
++ <&clks IMX6SL_CLK_PLL3_USB_OTG>, <&clks IMX6SL_CLK_PERIPH>,
++ <&clks IMX6SL_CLK_PRE_PERIPH_SEL>, <&clks IMX6SL_CLK_PERIPH_CLK2>,
++ <&clks IMX6SL_CLK_PERIPH_CLK2_SEL>, <&clks IMX6SL_CLK_OSC>,
++ <&clks IMX6SL_CLK_PLL1_SYS>, <&clks IMX6SL_CLK_PERIPH2>,
++ <&clks IMX6SL_CLK_AHB>, <&clks IMX6SL_CLK_OCRAM>,
++ <&clks IMX6SL_CLK_PLL1_SW>, <&clks IMX6SL_CLK_PRE_PERIPH2_SEL>,
++ <&clks IMX6SL_CLK_PERIPH2_CLK2_SEL>, <&clks IMX6SL_CLK_PERIPH2_CLK2>,
++ <&clks IMX6SL_CLK_STEP>;
++ clock-names = "pll2_bus", "pll2_pfd2_396m", "pll2_198m", "arm", "pll3_usb_otg", "periph",
++ "periph_pre", "periph_clk2", "periph_clk2_sel", "osc", "pll1_sys", "periph2", "ahb", "ocram", "pll1_sw",
++ "periph2_pre", "periph2_clk2_sel", "periph2_clk2", "step";
++ fsl,max_ddr_freq = <400000000>;
++ };
++
+ L2: l2-cache@00a02000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x00a02000 0x1000>;
+- interrupts = <0 92 0x04>;
++ interrupts = <0 92 IRQ_TYPE_LEVEL_HIGH>;
+ cache-unified;
+ cache-level = <2>;
+ arm,tag-latency = <4 2 3>;
+ arm,data-latency = <4 2 3>;
++ arm,dynamic-clk-gating;
++ arm,standby-mode;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+- interrupts = <0 94 0x04>;
++ interrupts = <0 94 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ aips1: aips-bus@02000000 {
+@@ -104,7 +159,7 @@
+
+ spdif: spdif@02004000 {
+ reg = <0x02004000 0x4000>;
+- interrupts = <0 52 0x04>;
++ interrupts = <0 52 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ ecspi1: ecspi@02008000 {
+@@ -112,7 +167,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02008000 0x4000>;
+- interrupts = <0 31 0x04>;
++ interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_ECSPI1>,
+ <&clks IMX6SL_CLK_ECSPI1>;
+ clock-names = "ipg", "per";
+@@ -124,7 +179,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-ecspi", "fsl,imx51-ecspi";
+ reg = <0x0200c000 0x4000>;
+- interrupts = <0 32 0x04>;
++ interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_ECSPI2>,
+ <&clks IMX6SL_CLK_ECSPI2>;
+ clock-names = "ipg", "per";
+@@ -136,7 +191,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02010000 0x4000>;
+- interrupts = <0 33 0x04>;
++ interrupts = <0 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_ECSPI3>,
+ <&clks IMX6SL_CLK_ECSPI3>;
+ clock-names = "ipg", "per";
+@@ -148,7 +203,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-ecspi", "fsl,imx51-ecspi";
+ reg = <0x02014000 0x4000>;
+- interrupts = <0 34 0x04>;
++ interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_ECSPI4>,
+ <&clks IMX6SL_CLK_ECSPI4>;
+ clock-names = "ipg", "per";
+@@ -159,7 +214,7 @@
+ compatible = "fsl,imx6sl-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02018000 0x4000>;
+- interrupts = <0 30 0x04>;
++ interrupts = <0 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_UART>,
+ <&clks IMX6SL_CLK_UART_SERIAL>;
+ clock-names = "ipg", "per";
+@@ -172,7 +227,7 @@
+ compatible = "fsl,imx6sl-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02020000 0x4000>;
+- interrupts = <0 26 0x04>;
++ interrupts = <0 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_UART>,
+ <&clks IMX6SL_CLK_UART_SERIAL>;
+ clock-names = "ipg", "per";
+@@ -185,7 +240,7 @@
+ compatible = "fsl,imx6sl-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02024000 0x4000>;
+- interrupts = <0 27 0x04>;
++ interrupts = <0 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_UART>,
+ <&clks IMX6SL_CLK_UART_SERIAL>;
+ clock-names = "ipg", "per";
+@@ -195,9 +250,11 @@
+ };
+
+ ssi1: ssi@02028000 {
+- compatible = "fsl,imx6sl-ssi","fsl,imx21-ssi";
++ compatible = "fsl,imx6sl-ssi",
++ "fsl,imx51-ssi",
++ "fsl,imx21-ssi";
+ reg = <0x02028000 0x4000>;
+- interrupts = <0 46 0x04>;
++ interrupts = <0 46 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_SSI1>;
+ dmas = <&sdma 37 1 0>,
+ <&sdma 38 1 0>;
+@@ -207,9 +264,11 @@
+ };
+
+ ssi2: ssi@0202c000 {
+- compatible = "fsl,imx6sl-ssi","fsl,imx21-ssi";
++ compatible = "fsl,imx6sl-ssi",
++ "fsl,imx51-ssi",
++ "fsl,imx21-ssi";
+ reg = <0x0202c000 0x4000>;
+- interrupts = <0 47 0x04>;
++ interrupts = <0 47 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_SSI2>;
+ dmas = <&sdma 41 1 0>,
+ <&sdma 42 1 0>;
+@@ -219,9 +278,11 @@
+ };
+
+ ssi3: ssi@02030000 {
+- compatible = "fsl,imx6sl-ssi","fsl,imx21-ssi";
++ compatible = "fsl,imx6sl-ssi",
++ "fsl,imx51-ssi",
++ "fsl,imx21-ssi";
+ reg = <0x02030000 0x4000>;
+- interrupts = <0 48 0x04>;
++ interrupts = <0 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_SSI3>;
+ dmas = <&sdma 45 1 0>,
+ <&sdma 46 1 0>;
+@@ -234,7 +295,7 @@
+ compatible = "fsl,imx6sl-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02034000 0x4000>;
+- interrupts = <0 28 0x04>;
++ interrupts = <0 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_UART>,
+ <&clks IMX6SL_CLK_UART_SERIAL>;
+ clock-names = "ipg", "per";
+@@ -247,7 +308,7 @@
+ compatible = "fsl,imx6sl-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02038000 0x4000>;
+- interrupts = <0 29 0x04>;
++ interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_UART>,
+ <&clks IMX6SL_CLK_UART_SERIAL>;
+ clock-names = "ipg", "per";
+@@ -261,7 +322,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm";
+ reg = <0x02080000 0x4000>;
+- interrupts = <0 83 0x04>;
++ interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_PWM1>,
+ <&clks IMX6SL_CLK_PWM1>;
+ clock-names = "ipg", "per";
+@@ -271,7 +332,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm";
+ reg = <0x02084000 0x4000>;
+- interrupts = <0 84 0x04>;
++ interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_PWM2>,
+ <&clks IMX6SL_CLK_PWM2>;
+ clock-names = "ipg", "per";
+@@ -281,7 +342,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm";
+ reg = <0x02088000 0x4000>;
+- interrupts = <0 85 0x04>;
++ interrupts = <0 85 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_PWM3>,
+ <&clks IMX6SL_CLK_PWM3>;
+ clock-names = "ipg", "per";
+@@ -291,7 +352,7 @@
+ #pwm-cells = <2>;
+ compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm";
+ reg = <0x0208c000 0x4000>;
+- interrupts = <0 86 0x04>;
++ interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_PWM4>,
+ <&clks IMX6SL_CLK_PWM4>;
+ clock-names = "ipg", "per";
+@@ -300,7 +361,7 @@
+ gpt: gpt@02098000 {
+ compatible = "fsl,imx6sl-gpt";
+ reg = <0x02098000 0x4000>;
+- interrupts = <0 55 0x04>;
++ interrupts = <0 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_GPT>,
+ <&clks IMX6SL_CLK_GPT_SERIAL>;
+ clock-names = "ipg", "per";
+@@ -309,7 +370,8 @@
+ gpio1: gpio@0209c000 {
+ compatible = "fsl,imx6sl-gpio", "fsl,imx35-gpio";
+ reg = <0x0209c000 0x4000>;
+- interrupts = <0 66 0x04 0 67 0x04>;
++ interrupts = <0 66 IRQ_TYPE_LEVEL_HIGH>,
++ <0 67 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -319,7 +381,8 @@
+ gpio2: gpio@020a0000 {
+ compatible = "fsl,imx6sl-gpio", "fsl,imx35-gpio";
+ reg = <0x020a0000 0x4000>;
+- interrupts = <0 68 0x04 0 69 0x04>;
++ interrupts = <0 68 IRQ_TYPE_LEVEL_HIGH>,
++ <0 69 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -329,7 +392,8 @@
+ gpio3: gpio@020a4000 {
+ compatible = "fsl,imx6sl-gpio", "fsl,imx35-gpio";
+ reg = <0x020a4000 0x4000>;
+- interrupts = <0 70 0x04 0 71 0x04>;
++ interrupts = <0 70 IRQ_TYPE_LEVEL_HIGH>,
++ <0 71 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -339,7 +403,8 @@
+ gpio4: gpio@020a8000 {
+ compatible = "fsl,imx6sl-gpio", "fsl,imx35-gpio";
+ reg = <0x020a8000 0x4000>;
+- interrupts = <0 72 0x04 0 73 0x04>;
++ interrupts = <0 72 IRQ_TYPE_LEVEL_HIGH>,
++ <0 73 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -349,7 +414,8 @@
+ gpio5: gpio@020ac000 {
+ compatible = "fsl,imx6sl-gpio", "fsl,imx35-gpio";
+ reg = <0x020ac000 0x4000>;
+- interrupts = <0 74 0x04 0 75 0x04>;
++ interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>,
++ <0 75 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -357,21 +423,23 @@
+ };
+
+ kpp: kpp@020b8000 {
++ compatible = "fsl,imx6sl-kpp", "fsl,imx21-kpp";
+ reg = <0x020b8000 0x4000>;
+- interrupts = <0 82 0x04>;
++ interrupts = <0 82 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks IMX6SL_CLK_DUMMY>;
+ };
+
+ wdog1: wdog@020bc000 {
+ compatible = "fsl,imx6sl-wdt", "fsl,imx21-wdt";
+ reg = <0x020bc000 0x4000>;
+- interrupts = <0 80 0x04>;
++ interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_DUMMY>;
+ };
+
+ wdog2: wdog@020c0000 {
+ compatible = "fsl,imx6sl-wdt", "fsl,imx21-wdt";
+ reg = <0x020c0000 0x4000>;
+- interrupts = <0 81 0x04>;
++ interrupts = <0 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_DUMMY>;
+ status = "disabled";
+ };
+@@ -379,7 +447,8 @@
+ clks: ccm@020c4000 {
+ compatible = "fsl,imx6sl-ccm";
+ reg = <0x020c4000 0x4000>;
+- interrupts = <0 87 0x04 0 88 0x04>;
++ interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>,
++ <0 88 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ };
+
+@@ -388,7 +457,9 @@
+ "fsl,imx6q-anatop",
+ "syscon", "simple-bus";
+ reg = <0x020c8000 0x1000>;
+- interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
++ interrupts = <0 49 IRQ_TYPE_LEVEL_HIGH>,
++ <0 54 IRQ_TYPE_LEVEL_HIGH>,
++ <0 127 IRQ_TYPE_LEVEL_HIGH>;
+
+ regulator-1p1@110 {
+ compatible = "fsl,anatop-regulator";
+@@ -434,7 +505,7 @@
+
+ reg_arm: regulator-vddcore@140 {
+ compatible = "fsl,anatop-regulator";
+- regulator-name = "cpu";
++ regulator-name = "vddarm";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+@@ -454,7 +525,6 @@
+ regulator-name = "vddpu";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+- regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <9>;
+ anatop-vol-bit-width = <5>;
+@@ -484,18 +554,34 @@
+ };
+ };
+
++ tempmon: tempmon {
++ compatible = "fsl,imx6sl-tempmon", "fsl,imx6q-tempmon";
++ interrupts = <0 49 0x04>;
++ fsl,tempmon = <&anatop>;
++ fsl,tempmon-data = <&ocotp>;
++ clocks = <&clks IMX6SL_CLK_PLL3_USB_OTG>;
++ };
++
+ usbphy1: usbphy@020c9000 {
+ compatible = "fsl,imx6sl-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020c9000 0x1000>;
+- interrupts = <0 44 0x04>;
++ interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USBPHY1>;
++ fsl,anatop = <&anatop>;
+ };
+
+ usbphy2: usbphy@020ca000 {
+ compatible = "fsl,imx6sl-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020ca000 0x1000>;
+- interrupts = <0 45 0x04>;
++ interrupts = <0 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USBPHY2>;
++ fsl,anatop = <&anatop>;
++ };
++
++ usbphy_nop1: usbphy_nop1 {
++ compatible = "usb-nop-xceiv";
++ clocks = <&clks IMX6SL_CLK_USBPHY1>;
++ clock-names = "main_clk";
+ };
+
+ snvs@020cc000 {
+@@ -507,271 +593,165 @@
+ snvs-rtc-lp@34 {
+ compatible = "fsl,sec-v4.0-mon-rtc-lp";
+ reg = <0x34 0x58>;
+- interrupts = <0 19 0x04 0 20 0x04>;
++ interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>,
++ <0 20 IRQ_TYPE_LEVEL_HIGH>;
+ };
+- };
+-
+- epit1: epit@020d0000 {
+- reg = <0x020d0000 0x4000>;
+- interrupts = <0 56 0x04>;
+- };
+
+- epit2: epit@020d4000 {
+- reg = <0x020d4000 0x4000>;
+- interrupts = <0 57 0x04>;
+- };
+-
+- src: src@020d8000 {
+- compatible = "fsl,imx6sl-src", "fsl,imx51-src";
+- reg = <0x020d8000 0x4000>;
+- interrupts = <0 91 0x04 0 96 0x04>;
+- #reset-cells = <1>;
+- };
+-
+- gpc: gpc@020dc000 {
+- compatible = "fsl,imx6sl-gpc", "fsl,imx6q-gpc";
+- reg = <0x020dc000 0x4000>;
+- interrupts = <0 89 0x04>;
+- };
+-
+- gpr: iomuxc-gpr@020e0000 {
+- compatible = "fsl,imx6sl-iomuxc-gpr",
+- "fsl,imx6q-iomuxc-gpr", "syscon";
+- reg = <0x020e0000 0x38>;
+- };
+-
+- iomuxc: iomuxc@020e0000 {
+- compatible = "fsl,imx6sl-iomuxc";
+- reg = <0x020e0000 0x4000>;
+-
+- ecspi1 {
+- pinctrl_ecspi1_1: ecspi1grp-1 {
++ csi {
++ pinctrl_csi_0: csigrp-0 {
+ fsl,pins = <
+- MX6SL_PAD_ECSPI1_MISO__ECSPI1_MISO 0x100b1
+- MX6SL_PAD_ECSPI1_MOSI__ECSPI1_MOSI 0x100b1
+- MX6SL_PAD_ECSPI1_SCLK__ECSPI1_SCLK 0x100b1
++ MX6SL_PAD_EPDC_GDRL__CSI_MCLK 0x110b0
++ MX6SL_PAD_EPDC_GDCLK__CSI_PIXCLK 0x110b0
++ MX6SL_PAD_EPDC_GDSP__CSI_VSYNC 0x110b0
++ MX6SL_PAD_EPDC_GDOE__CSI_HSYNC 0x110b0
++ MX6SL_PAD_EPDC_SDLE__CSI_DATA09 0x110b0
++ MX6SL_PAD_EPDC_SDCLK__CSI_DATA08 0x110b0
++ MX6SL_PAD_EPDC_D7__CSI_DATA07 0x110b0
++ MX6SL_PAD_EPDC_D6__CSI_DATA06 0x110b0
++ MX6SL_PAD_EPDC_D5__CSI_DATA05 0x110b0
++ MX6SL_PAD_EPDC_D4__CSI_DATA04 0x110b0
++ MX6SL_PAD_EPDC_D3__CSI_DATA03 0x110b0
++ MX6SL_PAD_EPDC_D2__CSI_DATA02 0x110b0
++ MX6SL_PAD_EPDC_D1__CSI_DATA01 0x110b0
++ MX6SL_PAD_EPDC_D0__CSI_DATA00 0x110b0
++ MX6SL_PAD_EPDC_SDSHR__GPIO1_IO26 0x80000000
++ MX6SL_PAD_EPDC_SDOE__GPIO1_IO25 0x80000000
+ >;
+ };
+ };
+
+- fec {
+- pinctrl_fec_1: fecgrp-1 {
++ i2c1 {
++ pinctrl_i2c1_1: i2c1grp-1 {
+ fsl,pins = <
+- MX6SL_PAD_FEC_MDC__FEC_MDC 0x1b0b0
+- MX6SL_PAD_FEC_MDIO__FEC_MDIO 0x1b0b0
+- MX6SL_PAD_FEC_CRS_DV__FEC_RX_DV 0x1b0b0
+- MX6SL_PAD_FEC_RXD0__FEC_RX_DATA0 0x1b0b0
+- MX6SL_PAD_FEC_RXD1__FEC_RX_DATA1 0x1b0b0
+- MX6SL_PAD_FEC_TX_EN__FEC_TX_EN 0x1b0b0
+- MX6SL_PAD_FEC_TXD0__FEC_TX_DATA0 0x1b0b0
+- MX6SL_PAD_FEC_TXD1__FEC_TX_DATA1 0x1b0b0
+- MX6SL_PAD_FEC_REF_CLK__FEC_REF_OUT 0x4001b0a8
++ MX6SL_PAD_I2C1_SCL__I2C1_SCL 0x4001b8b1
++ MX6SL_PAD_I2C1_SDA__I2C1_SDA 0x4001b8b1
+ >;
+ };
+ };
+
+- uart1 {
+- pinctrl_uart1_1: uart1grp-1 {
++ i2c2 {
++ pinctrl_i2c2_1: i2c2grp-1 {
+ fsl,pins = <
+- MX6SL_PAD_UART1_RXD__UART1_RX_DATA 0x1b0b1
+- MX6SL_PAD_UART1_TXD__UART1_TX_DATA 0x1b0b1
++ MX6SL_PAD_I2C2_SCL__I2C2_SCL 0x4001b8b1
++ MX6SL_PAD_I2C2_SDA__I2C2_SDA 0x4001b8b1
+ >;
+ };
+ };
+
+- usbotg1 {
+- pinctrl_usbotg1_1: usbotg1grp-1 {
+- fsl,pins = <
+- MX6SL_PAD_EPDC_PWRCOM__USB_OTG1_ID 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg1_2: usbotg1grp-2 {
++ i2c3 {
++ pinctrl_i2c3_1: i2c3grp-1 {
+ fsl,pins = <
+- MX6SL_PAD_FEC_RXD0__USB_OTG1_ID 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg1_3: usbotg1grp-3 {
+- fsl,pins = <
+- MX6SL_PAD_LCD_DAT1__USB_OTG1_ID 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg1_4: usbotg1grp-4 {
+- fsl,pins = <
+- MX6SL_PAD_REF_CLK_32K__USB_OTG1_ID 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg1_5: usbotg1grp-5 {
+- fsl,pins = <
+- MX6SL_PAD_SD3_DAT0__USB_OTG1_ID 0x17059
++ MX6SL_PAD_EPDC_SDCE2__I2C3_SCL 0x4001b8b1
++ MX6SL_PAD_EPDC_SDCE3__I2C3_SDA 0x4001b8b1
+ >;
+ };
+ };
+
+- usbotg2 {
+- pinctrl_usbotg2_1: usbotg2grp-1 {
+- fsl,pins = <
+- MX6SL_PAD_ECSPI1_SCLK__USB_OTG2_OC 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg2_2: usbotg2grp-2 {
++ lcdif {
++ pinctrl_lcdif_dat_0: lcdifdatgrp-0 {
+ fsl,pins = <
+- MX6SL_PAD_ECSPI2_SCLK__USB_OTG2_OC 0x17059
++ MX6SL_PAD_LCD_DAT0__LCD_DATA00 0x1b0b0
++ MX6SL_PAD_LCD_DAT1__LCD_DATA01 0x1b0b0
++ MX6SL_PAD_LCD_DAT2__LCD_DATA02 0x1b0b0
++ MX6SL_PAD_LCD_DAT3__LCD_DATA03 0x1b0b0
++ MX6SL_PAD_LCD_DAT4__LCD_DATA04 0x1b0b0
++ MX6SL_PAD_LCD_DAT5__LCD_DATA05 0x1b0b0
++ MX6SL_PAD_LCD_DAT6__LCD_DATA06 0x1b0b0
++ MX6SL_PAD_LCD_DAT7__LCD_DATA07 0x1b0b0
++ MX6SL_PAD_LCD_DAT8__LCD_DATA08 0x1b0b0
++ MX6SL_PAD_LCD_DAT9__LCD_DATA09 0x1b0b0
++ MX6SL_PAD_LCD_DAT10__LCD_DATA10 0x1b0b0
++ MX6SL_PAD_LCD_DAT11__LCD_DATA11 0x1b0b0
++ MX6SL_PAD_LCD_DAT12__LCD_DATA12 0x1b0b0
++ MX6SL_PAD_LCD_DAT13__LCD_DATA13 0x1b0b0
++ MX6SL_PAD_LCD_DAT14__LCD_DATA14 0x1b0b0
++ MX6SL_PAD_LCD_DAT15__LCD_DATA15 0x1b0b0
++ MX6SL_PAD_LCD_DAT16__LCD_DATA16 0x1b0b0
++ MX6SL_PAD_LCD_DAT17__LCD_DATA17 0x1b0b0
++ MX6SL_PAD_LCD_DAT18__LCD_DATA18 0x1b0b0
++ MX6SL_PAD_LCD_DAT19__LCD_DATA19 0x1b0b0
++ MX6SL_PAD_LCD_DAT20__LCD_DATA20 0x1b0b0
++ MX6SL_PAD_LCD_DAT21__LCD_DATA21 0x1b0b0
++ MX6SL_PAD_LCD_DAT22__LCD_DATA22 0x1b0b0
++ MX6SL_PAD_LCD_DAT23__LCD_DATA23 0x1b0b0
+ >;
+ };
+
+- pinctrl_usbotg2_3: usbotg2grp-3 {
++ pinctrl_lcdif_ctrl_0: lcdifctrlgrp-0 {
+ fsl,pins = <
+- MX6SL_PAD_KEY_ROW5__USB_OTG2_OC 0x17059
+- >;
+- };
+-
+- pinctrl_usbotg2_4: usbotg2grp-4 {
+- fsl,pins = <
+- MX6SL_PAD_SD3_DAT2__USB_OTG2_OC 0x17059
++ MX6SL_PAD_LCD_CLK__LCD_CLK 0x1b0b0
++ MX6SL_PAD_LCD_ENABLE__LCD_ENABLE 0x1b0b0
++ MX6SL_PAD_LCD_HSYNC__LCD_HSYNC 0x1b0b0
++ MX6SL_PAD_LCD_VSYNC__LCD_VSYNC 0x1b0b0
++ MX6SL_PAD_LCD_RESET__LCD_RESET 0x1b0b0
+ >;
+ };
+ };
+
+- usdhc1 {
+- pinctrl_usdhc1_1: usdhc1grp-1 {
++ pwm1 {
++ pinctrl_pwm1_0: pwm1grp-0 {
+ fsl,pins = <
+- MX6SL_PAD_SD1_CMD__SD1_CMD 0x17059
+- MX6SL_PAD_SD1_CLK__SD1_CLK 0x10059
+- MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x17059
+- MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x17059
+- MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x17059
+- MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x17059
+- MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x17059
+- MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x17059
+- MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x17059
+- MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x17059
++ MX6SL_PAD_PWM1__PWM1_OUT 0x110b0
+ >;
+ };
+-
+- pinctrl_usdhc1_1_100mhz: usdhc1grp-1-100mhz {
+- fsl,pins = <
+- MX6SL_PAD_SD1_CMD__SD1_CMD 0x170b9
+- MX6SL_PAD_SD1_CLK__SD1_CLK 0x100b9
+- MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x170b9
+- MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x170b9
+- MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x170b9
+- MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x170b9
+- MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x170b9
+- MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x170b9
+- MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x170b9
+- MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x170b9
+- >;
+- };
+-
+- pinctrl_usdhc1_1_200mhz: usdhc1grp-1-200mhz {
+- fsl,pins = <
+- MX6SL_PAD_SD1_CMD__SD1_CMD 0x170f9
+- MX6SL_PAD_SD1_CLK__SD1_CLK 0x100f9
+- MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x170f9
+- MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x170f9
+- MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x170f9
+- MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x170f9
+- MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x170f9
+- MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x170f9
+- MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x170f9
+- MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x170f9
+- >;
+- };
+-
+-
+ };
++ };
+
+- usdhc2 {
+- pinctrl_usdhc2_1: usdhc2grp-1 {
+- fsl,pins = <
+- MX6SL_PAD_SD2_CMD__SD2_CMD 0x17059
+- MX6SL_PAD_SD2_CLK__SD2_CLK 0x10059
+- MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+- MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+- MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+- MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x17059
+- >;
+- };
+-
+- pinctrl_usdhc2_1_100mhz: usdhc2grp-1-100mhz {
+- fsl,pins = <
+- MX6SL_PAD_SD2_CMD__SD2_CMD 0x170b9
+- MX6SL_PAD_SD2_CLK__SD2_CLK 0x100b9
+- MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
+- MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
+- MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
+- MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x170b9
+- >;
+- };
++ epit1: epit@020d0000 {
++ reg = <0x020d0000 0x4000>;
++ interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>;
++ };
+
+- pinctrl_usdhc2_1_200mhz: usdhc2grp-1-200mhz {
+- fsl,pins = <
+- MX6SL_PAD_SD2_CMD__SD2_CMD 0x170f9
+- MX6SL_PAD_SD2_CLK__SD2_CLK 0x100f9
+- MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
+- MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
+- MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
+- MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x170f9
+- >;
+- };
++ epit2: epit@020d4000 {
++ reg = <0x020d4000 0x4000>;
++ interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
++ };
+
+- };
++ src: src@020d8000 {
++ compatible = "fsl,imx6sl-src", "fsl,imx51-src";
++ reg = <0x020d8000 0x4000>;
++ interrupts = <0 91 IRQ_TYPE_LEVEL_HIGH>,
++ <0 96 IRQ_TYPE_LEVEL_HIGH>;
++ #reset-cells = <1>;
++ };
+
+- usdhc3 {
+- pinctrl_usdhc3_1: usdhc3grp-1 {
+- fsl,pins = <
+- MX6SL_PAD_SD3_CMD__SD3_CMD 0x17059
+- MX6SL_PAD_SD3_CLK__SD3_CLK 0x10059
+- MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+- MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+- MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+- MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+- >;
+- };
++ gpc: gpc@020dc000 {
++ compatible = "fsl,imx6sl-gpc", "fsl,imx6q-gpc";
++ reg = <0x020dc000 0x4000>;
++ interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks IMX6SL_CLK_GPU2D_PODF>, <&clks IMX6SL_CLK_GPU2D_OVG>,
++ <&clks IMX6SL_CLK_IPG>;
++ clock-names = "gpu2d_podf", "gpu2d_ovg", "ipg";
++ pu-supply = <&reg_pu>;
++ };
+
+- pinctrl_usdhc3_1_100mhz: usdhc3grp-1-100mhz {
+- fsl,pins = <
+- MX6SL_PAD_SD3_CMD__SD3_CMD 0x170b9
+- MX6SL_PAD_SD3_CLK__SD3_CLK 0x100b9
+- MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
+- MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
+- MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
+- MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
+- >;
+- };
++ gpr: iomuxc-gpr@020e0000 {
++ compatible = "fsl,imx6sl-iomuxc-gpr",
++ "fsl,imx6q-iomuxc-gpr", "syscon";
++ reg = <0x020e0000 0x38>;
++ };
+
+- pinctrl_usdhc3_1_200mhz: usdhc3grp-1-200mhz {
+- fsl,pins = <
+- MX6SL_PAD_SD3_CMD__SD3_CMD 0x170f9
+- MX6SL_PAD_SD3_CLK__SD3_CLK 0x100f9
+- MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
+- MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
+- MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
+- MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
+- >;
+- };
+- };
++ iomuxc: iomuxc@020e0000 {
++ compatible = "fsl,imx6sl-iomuxc";
++ reg = <0x020e0000 0x4000>;
+ };
+
+ csi: csi@020e4000 {
++ compatible = "fsl,imx6sl-csi";
+ reg = <0x020e4000 0x4000>;
+- interrupts = <0 7 0x04>;
++ interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
+ };
+
+ spdc: spdc@020e8000 {
+ reg = <0x020e8000 0x4000>;
+- interrupts = <0 6 0x04>;
++ interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sdma: sdma@020ec000 {
+ compatible = "fsl,imx6sl-sdma", "fsl,imx35-sdma";
+ reg = <0x020ec000 0x4000>;
+- interrupts = <0 2 0x04>;
++ interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_SDMA>,
+ <&clks IMX6SL_CLK_SDMA>;
+ clock-names = "ipg", "ahb";
+@@ -781,23 +761,32 @@
+ };
+
+ pxp: pxp@020f0000 {
++ compatible = "fsl,imx6sl-pxp-dma", "fsl,imx6dl-pxp-dma";
+ reg = <0x020f0000 0x4000>;
+- interrupts = <0 98 0x04>;
++ interrupts = <0 98 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 111>;
++ clock-names = "pxp-axi";
++ status = "disabled";
+ };
+
+ epdc: epdc@020f4000 {
+ reg = <0x020f4000 0x4000>;
+- interrupts = <0 97 0x04>;
++ interrupts = <0 97 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ lcdif: lcdif@020f8000 {
++ compatible = "fsl,imx6sl-lcdif", "fsl,imx28-lcdif";
+ reg = <0x020f8000 0x4000>;
+- interrupts = <0 39 0x04>;
++ interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks IMX6SL_CLK_LCDIF_PIX>,
++ <&clks IMX6SL_CLK_LCDIF_AXI>;
++ clock-names = "pix", "axi";
++ status = "disabled";
+ };
+
+ dcp: dcp@020fc000 {
+ reg = <0x020fc000 0x4000>;
+- interrupts = <0 99 0x04>;
++ interrupts = <0 99 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+@@ -811,7 +800,7 @@
+ usbotg1: usb@02184000 {
+ compatible = "fsl,imx6sl-usb", "fsl,imx27-usb";
+ reg = <0x02184000 0x200>;
+- interrupts = <0 43 0x04>;
++ interrupts = <0 43 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USBOH3>;
+ fsl,usbphy = <&usbphy1>;
+ fsl,usbmisc = <&usbmisc 0>;
+@@ -821,7 +810,7 @@
+ usbotg2: usb@02184200 {
+ compatible = "fsl,imx6sl-usb", "fsl,imx27-usb";
+ reg = <0x02184200 0x200>;
+- interrupts = <0 42 0x04>;
++ interrupts = <0 42 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USBOH3>;
+ fsl,usbphy = <&usbphy2>;
+ fsl,usbmisc = <&usbmisc 1>;
+@@ -831,9 +820,12 @@
+ usbh: usb@02184400 {
+ compatible = "fsl,imx6sl-usb", "fsl,imx27-usb";
+ reg = <0x02184400 0x200>;
+- interrupts = <0 40 0x04>;
++ interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USBOH3>;
+ fsl,usbmisc = <&usbmisc 2>;
++ phy_type = "hsic";
++ fsl,usbphy = <&usbphy_nop1>;
++ fsl,anatop = <&anatop>;
+ status = "disabled";
+ };
+
+@@ -847,7 +839,7 @@
+ fec: ethernet@02188000 {
+ compatible = "fsl,imx6sl-fec", "fsl,imx25-fec";
+ reg = <0x02188000 0x4000>;
+- interrupts = <0 114 0x04>;
++ interrupts = <0 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_ENET_REF>,
+ <&clks IMX6SL_CLK_ENET_REF>;
+ clock-names = "ipg", "ahb";
+@@ -857,7 +849,7 @@
+ usdhc1: usdhc@02190000 {
+ compatible = "fsl,imx6sl-usdhc", "fsl,imx6q-usdhc";
+ reg = <0x02190000 0x4000>;
+- interrupts = <0 22 0x04>;
++ interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USDHC1>,
+ <&clks IMX6SL_CLK_USDHC1>,
+ <&clks IMX6SL_CLK_USDHC1>;
+@@ -869,7 +861,7 @@
+ usdhc2: usdhc@02194000 {
+ compatible = "fsl,imx6sl-usdhc", "fsl,imx6q-usdhc";
+ reg = <0x02194000 0x4000>;
+- interrupts = <0 23 0x04>;
++ interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USDHC2>,
+ <&clks IMX6SL_CLK_USDHC2>,
+ <&clks IMX6SL_CLK_USDHC2>;
+@@ -881,7 +873,7 @@
+ usdhc3: usdhc@02198000 {
+ compatible = "fsl,imx6sl-usdhc", "fsl,imx6q-usdhc";
+ reg = <0x02198000 0x4000>;
+- interrupts = <0 24 0x04>;
++ interrupts = <0 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USDHC3>,
+ <&clks IMX6SL_CLK_USDHC3>,
+ <&clks IMX6SL_CLK_USDHC3>;
+@@ -893,7 +885,7 @@
+ usdhc4: usdhc@0219c000 {
+ compatible = "fsl,imx6sl-usdhc", "fsl,imx6q-usdhc";
+ reg = <0x0219c000 0x4000>;
+- interrupts = <0 25 0x04>;
++ interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USDHC4>,
+ <&clks IMX6SL_CLK_USDHC4>,
+ <&clks IMX6SL_CLK_USDHC4>;
+@@ -907,7 +899,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-i2c", "fsl,imx21-i2c";
+ reg = <0x021a0000 0x4000>;
+- interrupts = <0 36 0x04>;
++ interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_I2C1>;
+ status = "disabled";
+ };
+@@ -917,7 +909,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-i2c", "fsl,imx21-i2c";
+ reg = <0x021a4000 0x4000>;
+- interrupts = <0 37 0x04>;
++ interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_I2C2>;
+ status = "disabled";
+ };
+@@ -927,7 +919,7 @@
+ #size-cells = <0>;
+ compatible = "fsl,imx6sl-i2c", "fsl,imx21-i2c";
+ reg = <0x021a8000 0x4000>;
+- interrupts = <0 38 0x04>;
++ interrupts = <0 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_I2C3>;
+ status = "disabled";
+ };
+@@ -939,17 +931,23 @@
+
+ rngb: rngb@021b4000 {
+ reg = <0x021b4000 0x4000>;
+- interrupts = <0 5 0x04>;
++ interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ weim: weim@021b8000 {
+ reg = <0x021b8000 0x4000>;
+- interrupts = <0 14 0x04>;
++ interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ ocotp: ocotp-ctrl@021bc000 {
++ compatible = "syscon";
++ reg = <0x021bc000 0x4000>;
+ };
+
+- ocotp: ocotp@021bc000 {
+- compatible = "fsl,imx6sl-ocotp";
++ ocotp-fuse@021bc000 {
++ compatible = "fsl,imx6sl-ocotp", "fsl,imx6q-ocotp";
+ reg = <0x021bc000 0x4000>;
++ clocks = <&clks IMX6SL_CLK_OCOTP>;
+ };
+
+ audmux: audmux@021d8000 {
+@@ -957,6 +955,25 @@
+ reg = <0x021d8000 0x4000>;
+ status = "disabled";
+ };
++
++ gpu: gpu@02200000 {
++ compatible = "fsl,imx6sl-gpu", "fsl,imx6q-gpu";
++ reg = <0x02200000 0x4000>, <0x02204000 0x4000>,
++ <0x80000000 0x0>;
++ reg-names = "iobase_2d", "iobase_vg",
++ "phys_baseaddr";
++ interrupts = <0 10 0x04>, <0 11 0x04>;
++ interrupt-names = "irq_2d", "irq_vg";
++ clocks = <&clks IMX6SL_CLK_MMDC_ROOT>,
++ <&clks IMX6SL_CLK_MMDC_ROOT>,
++ <&clks IMX6SL_CLK_GPU2D_OVG>;
++ clock-names = "gpu2d_axi_clk", "openvg_axi_clk",
++ "gpu2d_clk";
++ resets = <&src 3>, <&src 3>;
++ reset-names = "gpu2d", "gpuvg";
++ pu-supply = <&reg_pu>;
++ };
++
+ };
+ };
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6sl-evk-csi.dts linux-3.14.40/arch/arm/boot/dts/imx6sl-evk-csi.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6sl-evk-csi.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/imx6sl-evk-csi.dts 2015-05-01 14:57:57.499427001 -0500
+@@ -0,0 +1,27 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "imx6sl-evk.dts"
++
++/ {
++ csi_v4l2_cap {
++ status = "okay";
++ };
++};
++
++&csi {
++ status = "okay";
++};
++
++&i2c3 {
++ status = "okay";
++};
++
++&epdc {
++ status = "disabled";
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/imx6sl-evk.dts linux-3.14.40/arch/arm/boot/dts/imx6sl-evk.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/imx6sl-evk.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/imx6sl-evk.dts 2015-05-01 14:57:57.499427001 -0500
+@@ -8,6 +8,8 @@
+
+ /dts-v1/;
+
++#include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
+ #include "imx6sl.dtsi"
+
+ / {
+@@ -18,11 +20,26 @@
+ reg = <0x80000000 0x40000000>;
+ };
+
++ leds {
++ compatible = "gpio-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_led>;
++
++ user {
++ label = "debug";
++ gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
++ linux,default-trigger = "heartbeat";
++ };
++ };
++
+ regulators {
+ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
+
+- reg_usb_otg1_vbus: usb_otg1_vbus {
++ reg_usb_otg1_vbus: regulator@0 {
+ compatible = "regulator-fixed";
++ reg = <0>;
+ regulator-name = "usb_otg1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+@@ -30,22 +47,63 @@
+ enable-active-high;
+ };
+
+- reg_usb_otg2_vbus: usb_otg2_vbus {
++ reg_usb_otg2_vbus: regulator@1 {
+ compatible = "regulator-fixed";
++ reg = <1>;
+ regulator-name = "usb_otg2_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio4 2 0>;
+ enable-active-high;
+ };
++
++ reg_aud3v: regulator@2 {
++ compatible = "regulator-fixed";
++ reg = <2>;
++ regulator-name = "wm8962-supply-3v15";
++ regulator-min-microvolt = <3150000>;
++ regulator-max-microvolt = <3150000>;
++ regulator-boot-on;
++ };
++
++ reg_aud4v: regulator@3 {
++ compatible = "regulator-fixed";
++ reg = <3>;
++ regulator-name = "wm8962-supply-4v2";
++ regulator-min-microvolt = <4325000>;
++ regulator-max-microvolt = <4325000>;
++ regulator-boot-on;
++ };
+ };
++
++ sound {
++ compatible = "fsl,imx6sl-evk-wm8962", "fsl,imx-audio-wm8962";
++ model = "wm8962-audio";
++ ssi-controller = <&ssi2>;
++ audio-codec = <&codec>;
++ audio-routing =
++ "Headphone Jack", "HPOUTL",
++ "Headphone Jack", "HPOUTR",
++ "Ext Spk", "SPKOUTL",
++ "Ext Spk", "SPKOUTR",
++ "AMIC", "MICBIAS",
++ "IN3R", "AMIC";
++ mux-int-port = <2>;
++ mux-ext-port = <3>;
++ };
++};
++
++&audmux {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_audmux3>;
++ status = "okay";
+ };
+
+ &ecspi1 {
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio4 11 0>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_ecspi1_1>;
++ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ flash: m25p80@0 {
+@@ -57,18 +115,326 @@
+ };
+ };
+
++&csi {
++ status = "okay";
++};
++
++&cpu0 {
++ arm-supply = <&sw1a_reg>;
++ soc-supply = <&sw1c_reg>;
++ pu-supply = <&pu_dummy>; /* use pu_dummy if VDDSOC share with VDDPU */
++};
++
+ &fec {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_fec_1>;
++ pinctrl-0 = <&pinctrl_fec>;
+ phy-mode = "rmii";
+ status = "okay";
+ };
+
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1>;
++ status = "okay";
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ regulator-always-on;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++
++ regulators {
++ compatible = "simple-bus";
++
++ reg_lcd_3v3: lcd-3v3 {
++ compatible = "regulator-fixed";
++ regulator-name = "lcd-3v3";
++ gpio = <&gpio4 3 0>;
++ enable-active-high;
++ };
++ };
++
++ backlight {
++ compatible = "pwm-backlight";
++ pwms = <&pwm1 0 5000000>;
++ brightness-levels = <0 4 8 16 32 64 128 255>;
++ default-brightness-level = <6>;
++ };
++
++ csi_v4l2_cap {
++ compatible = "fsl,imx6sl-csi-v4l2";
++ status = "okay";
++ };
++
++ pxp_v4l2_out {
++ compatible = "fsl,imx6sl-pxp-v4l2";
++ status = "okay";
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2>;
++ status = "okay";
++
++ codec: wm8962@1a {
++ compatible = "wlf,wm8962";
++ reg = <0x1a>;
++ clocks = <&clks IMX6SL_CLK_EXTERN_AUDIO>;
++ DCVDD-supply = <&vgen3_reg>;
++ DBVDD-supply = <&reg_aud3v>;
++ AVDD-supply = <&vgen3_reg>;
++ CPVDD-supply = <&vgen3_reg>;
++ MICVDD-supply = <&reg_aud3v>;
++ PLLVDD-supply = <&vgen3_reg>;
++ SPKVDD1-supply = <&reg_aud4v>;
++ SPKVDD2-supply = <&reg_aud4v>;
++ };
++};
++
++&i2c1 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1_1>;
++ status = "okay";
++
++ pmic: pfuze100@08 {
++ compatible = "fsl,pfuze100";
++ reg = <0x08>;
++
++ regulators {
++ sw1a_reg: sw1ab {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw1c_reg: sw1c {
++ regulator-min-microvolt = <300000>;
++ regulator-max-microvolt = <1875000>;
++ regulator-boot-on;
++ regulator-always-on;
++ regulator-ramp-delay = <6250>;
++ };
++
++ sw2_reg: sw2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3a_reg: sw3a {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw3b_reg: sw3b {
++ regulator-min-microvolt = <400000>;
++ regulator-max-microvolt = <1975000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ sw4_reg: sw4 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
++ swbst_reg: swbst {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5150000>;
++ };
++
++ snvs_reg: vsnvs {
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <3000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vref_reg: vrefddr {
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ vgen1_reg: vgen1 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen2_reg: vgen2 {
++ regulator-min-microvolt = <800000>;
++ regulator-max-microvolt = <1550000>;
++ };
++
++ vgen3_reg: vgen3 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen4_reg: vgen4 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen5_reg: vgen5 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ vgen6_reg: vgen6 {
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++ };
++ };
++
++ mma8450@1c {
++ compatible = "fsl,mma8450";
++ reg = <0x1c>;
++ };
++};
++
++&i2c2 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2_1>;
++ status = "okay";
++};
++
++&i2c3 {
++ clock-frequency = <100000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3_1>;
++ status = "okay";
++
++ ov564x: ov564x@3c {
++ compatible = "ovti,ov564x";
++ reg = <0x3c>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_csi_0>;
++ clocks = <&clks IMX6SL_CLK_CSI>;
++ clock-names = "csi_mclk";
++ AVDD-supply = <&vgen6_reg>; /* 2.8v */
++ DVDD-supply = <&vgen2_reg>; /* 1.5v*/
++ pwn-gpios = <&gpio1 25 1>;
++ rst-gpios = <&gpio1 26 0>;
++ csi_id = <0>;
++ mclk = <24000000>;
++ mclk_source = <0>;
++ };
++};
++
+ &iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+- hog {
++ imx6sl-evk {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6SL_PAD_KEY_ROW7__GPIO4_IO07 0x17059
+@@ -78,21 +444,270 @@
+ MX6SL_PAD_REF_CLK_32K__GPIO3_IO22 0x17059
+ MX6SL_PAD_KEY_COL4__GPIO4_IO00 0x80000000
+ MX6SL_PAD_KEY_COL5__GPIO4_IO02 0x80000000
++ MX6SL_PAD_AUD_MCLK__AUDIO_CLK_OUT 0x4130b0
++ >;
++ };
++
++ pinctrl_audmux3: audmux3grp {
++ fsl,pins = <
++ MX6SL_PAD_AUD_RXD__AUD3_RXD 0x4130b0
++ MX6SL_PAD_AUD_TXC__AUD3_TXC 0x4130b0
++ MX6SL_PAD_AUD_TXD__AUD3_TXD 0x4110b0
++ MX6SL_PAD_AUD_TXFS__AUD3_TXFS 0x4130b0
++ >;
++ };
++
++ pinctrl_ecspi1: ecspi1grp {
++ fsl,pins = <
++ MX6SL_PAD_ECSPI1_MISO__ECSPI1_MISO 0x100b1
++ MX6SL_PAD_ECSPI1_MOSI__ECSPI1_MOSI 0x100b1
++ MX6SL_PAD_ECSPI1_SCLK__ECSPI1_SCLK 0x100b1
++ MX6SL_PAD_ECSPI1_SS0__GPIO4_IO11 0x80000000
++ >;
++ };
++
++ pinctrl_fec: fecgrp {
++ fsl,pins = <
++ MX6SL_PAD_FEC_MDC__FEC_MDC 0x1b0b0
++ MX6SL_PAD_FEC_MDIO__FEC_MDIO 0x1b0b0
++ MX6SL_PAD_FEC_CRS_DV__FEC_RX_DV 0x1b0b0
++ MX6SL_PAD_FEC_RXD0__FEC_RX_DATA0 0x1b0b0
++ MX6SL_PAD_FEC_RXD1__FEC_RX_DATA1 0x1b0b0
++ MX6SL_PAD_FEC_TX_EN__FEC_TX_EN 0x1b0b0
++ MX6SL_PAD_FEC_TXD0__FEC_TX_DATA0 0x1b0b0
++ MX6SL_PAD_FEC_TXD1__FEC_TX_DATA1 0x1b0b0
++ MX6SL_PAD_FEC_REF_CLK__FEC_REF_OUT 0x4001b0a8
++ >;
++ };
++
++ pinctrl_i2c1: i2c1grp {
++ fsl,pins = <
++ MX6SL_PAD_I2C1_SCL__I2C1_SCL 0x4001b8b1
++ MX6SL_PAD_I2C1_SDA__I2C1_SDA 0x4001b8b1
++ >;
++ };
++
++
++ pinctrl_i2c2: i2c2grp {
++ fsl,pins = <
++ MX6SL_PAD_I2C2_SCL__I2C2_SCL 0x4001b8b1
++ MX6SL_PAD_I2C2_SDA__I2C2_SDA 0x4001b8b1
++ >;
++ };
++
++ pinctrl_led: ledgrp {
++ fsl,pins = <
++ MX6SL_PAD_HSIC_STROBE__GPIO3_IO20 0x17059
++ >;
++ };
++
++ pinctrl_kpp: kppgrp {
++ fsl,pins = <
++ MX6SL_PAD_KEY_ROW0__KEY_ROW0 0x1b010
++ MX6SL_PAD_KEY_ROW1__KEY_ROW1 0x1b010
++ MX6SL_PAD_KEY_ROW2__KEY_ROW2 0x1b0b0
++ MX6SL_PAD_KEY_COL0__KEY_COL0 0x110b0
++ MX6SL_PAD_KEY_COL1__KEY_COL1 0x110b0
++ MX6SL_PAD_KEY_COL2__KEY_COL2 0x110b0
++ >;
++ };
++
++ pinctrl_uart1: uart1grp {
++ fsl,pins = <
++ MX6SL_PAD_UART1_RXD__UART1_RX_DATA 0x1b0b1
++ MX6SL_PAD_UART1_TXD__UART1_TX_DATA 0x1b0b1
++ >;
++ };
++
++ pinctrl_usbotg1: usbotg1grp {
++ fsl,pins = <
++ MX6SL_PAD_EPDC_PWRCOM__USB_OTG1_ID 0x17059
++ >;
++ };
++
++ pinctrl_usdhc1: usdhc1grp {
++ fsl,pins = <
++ MX6SL_PAD_SD1_CMD__SD1_CMD 0x17059
++ MX6SL_PAD_SD1_CLK__SD1_CLK 0x10059
++ MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x17059
++ MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x17059
++ MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x17059
++ MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x17059
++ MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x17059
++ MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x17059
++ MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x17059
++ MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x17059
++ >;
++ };
++
++ pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
++ fsl,pins = <
++ MX6SL_PAD_SD1_CMD__SD1_CMD 0x170b9
++ MX6SL_PAD_SD1_CLK__SD1_CLK 0x100b9
++ MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x170b9
++ MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x170b9
++ MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x170b9
++ MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x170b9
++ MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x170b9
++ MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x170b9
++ MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x170b9
++ MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x170b9
++ >;
++ };
++
++ pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
++ fsl,pins = <
++ MX6SL_PAD_SD1_CMD__SD1_CMD 0x170f9
++ MX6SL_PAD_SD1_CLK__SD1_CLK 0x100f9
++ MX6SL_PAD_SD1_DAT0__SD1_DATA0 0x170f9
++ MX6SL_PAD_SD1_DAT1__SD1_DATA1 0x170f9
++ MX6SL_PAD_SD1_DAT2__SD1_DATA2 0x170f9
++ MX6SL_PAD_SD1_DAT3__SD1_DATA3 0x170f9
++ MX6SL_PAD_SD1_DAT4__SD1_DATA4 0x170f9
++ MX6SL_PAD_SD1_DAT5__SD1_DATA5 0x170f9
++ MX6SL_PAD_SD1_DAT6__SD1_DATA6 0x170f9
++ MX6SL_PAD_SD1_DAT7__SD1_DATA7 0x170f9
++ >;
++ };
++
++ pinctrl_usdhc2: usdhc2grp {
++ fsl,pins = <
++ MX6SL_PAD_SD2_CMD__SD2_CMD 0x17059
++ MX6SL_PAD_SD2_CLK__SD2_CLK 0x10059
++ MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x17059
++ MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x17059
++ MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x17059
++ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
++ fsl,pins = <
++ MX6SL_PAD_SD2_CMD__SD2_CMD 0x170b9
++ MX6SL_PAD_SD2_CLK__SD2_CLK 0x100b9
++ MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
++ MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
++ MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
++ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x170b9
++ >;
++ };
++
++ pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
++ fsl,pins = <
++ MX6SL_PAD_SD2_CMD__SD2_CMD 0x170f9
++ MX6SL_PAD_SD2_CLK__SD2_CLK 0x100f9
++ MX6SL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
++ MX6SL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
++ MX6SL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
++ MX6SL_PAD_SD2_DAT3__SD2_DATA3 0x170f9
++ >;
++ };
++
++ pinctrl_usdhc3: usdhc3grp {
++ fsl,pins = <
++ MX6SL_PAD_SD3_CMD__SD3_CMD 0x17059
++ MX6SL_PAD_SD3_CLK__SD3_CLK 0x10059
++ MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x17059
++ MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x17059
++ MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x17059
++ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x17059
++ >;
++ };
++
++ pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
++ fsl,pins = <
++ MX6SL_PAD_SD3_CMD__SD3_CMD 0x170b9
++ MX6SL_PAD_SD3_CLK__SD3_CLK 0x100b9
++ MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
++ MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
++ MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
++ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
++ >;
++ };
++
++ pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
++ fsl,pins = <
++ MX6SL_PAD_SD3_CMD__SD3_CMD 0x170f9
++ MX6SL_PAD_SD3_CLK__SD3_CLK 0x100f9
++ MX6SL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
++ MX6SL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
++ MX6SL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
++ MX6SL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
+ >;
+ };
+ };
+ };
+
++&kpp {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_kpp>;
++ linux,keymap = <
++ MATRIX_KEY(0x0, 0x0, KEY_UP) /* ROW0, COL0 */
++ MATRIX_KEY(0x0, 0x1, KEY_DOWN) /* ROW0, COL1 */
++ MATRIX_KEY(0x0, 0x2, KEY_ENTER) /* ROW0, COL2 */
++ MATRIX_KEY(0x1, 0x0, KEY_HOME) /* ROW1, COL0 */
++ MATRIX_KEY(0x1, 0x1, KEY_RIGHT) /* ROW1, COL1 */
++ MATRIX_KEY(0x1, 0x2, KEY_LEFT) /* ROW1, COL2 */
++ MATRIX_KEY(0x2, 0x0, KEY_VOLUMEDOWN) /* ROW2, COL0 */
++ MATRIX_KEY(0x2, 0x1, KEY_VOLUMEUP) /* ROW2, COL1 */
++ >;
++ status = "okay";
++};
++
++&ssi2 {
++ fsl,mode = "i2s-slave";
++ status = "okay";
++};
++
++&lcdif {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_lcdif_dat_0
++ &pinctrl_lcdif_ctrl_0>;
++ lcd-supply = <&reg_lcd_3v3>;
++ display = <&display>;
++ status = "okay";
++
++ display: display {
++ bits-per-pixel = <16>;
++ bus-width = <24>;
++
++ display-timings {
++ native-mode = <&timing0>;
++ timing0: timing0 {
++ clock-frequency = <33500000>;
++ hactive = <800>;
++ vactive = <480>;
++ hback-porch = <89>;
++ hfront-porch = <164>;
++ vback-porch = <23>;
++ vfront-porch = <10>;
++ hsync-len = <10>;
++ vsync-len = <10>;
++ hsync-active = <0>;
++ vsync-active = <0>;
++ de-active = <1>;
++ pixelclk-active = <0>;
++ };
++ };
++ };
++};
++
++&pwm1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm1_0>;
++ status = "okay";
++};
++
+ &uart1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_uart1_1>;
++ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+ };
+
+ &usbotg1 {
+ vbus-supply = <&reg_usb_otg1_vbus>;
+ pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_usbotg1_1>;
++ pinctrl-0 = <&pinctrl_usbotg1>;
+ disable-over-current;
+ status = "okay";
+ };
+@@ -106,9 +721,9 @@
+
+ &usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+- pinctrl-0 = <&pinctrl_usdhc1_1>;
+- pinctrl-1 = <&pinctrl_usdhc1_1_100mhz>;
+- pinctrl-2 = <&pinctrl_usdhc1_1_200mhz>;
++ pinctrl-0 = <&pinctrl_usdhc1>;
++ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
++ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ bus-width = <8>;
+ cd-gpios = <&gpio4 7 0>;
+ wp-gpios = <&gpio4 6 0>;
+@@ -117,9 +732,9 @@
+
+ &usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+- pinctrl-0 = <&pinctrl_usdhc2_1>;
+- pinctrl-1 = <&pinctrl_usdhc2_1_100mhz>;
+- pinctrl-2 = <&pinctrl_usdhc2_1_200mhz>;
++ pinctrl-0 = <&pinctrl_usdhc2>;
++ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
++ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ cd-gpios = <&gpio5 0 0>;
+ wp-gpios = <&gpio4 29 0>;
+ status = "okay";
+@@ -127,9 +742,26 @@
+
+ &usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+- pinctrl-0 = <&pinctrl_usdhc3_1>;
+- pinctrl-1 = <&pinctrl_usdhc3_1_100mhz>;
+- pinctrl-2 = <&pinctrl_usdhc3_1_200mhz>;
++ pinctrl-0 = <&pinctrl_usdhc3>;
++ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
++ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ cd-gpios = <&gpio3 22 0>;
+ status = "okay";
+ };
++
++&pxp {
++ status = "okay";
++};
++
++&gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++ fsl,ldo-bypass; /* use ldo-bypass, u-boot will check it and configure */
++ pu-supply = <&pu_dummy>; /* ldo-bypass:use pu_dummy if VDDSOC share with VDDPU */
++};
++
++&gpu {
++ pu-supply = <&pu_dummy>; /* ldo-bypass:use pu_dummy if VDDSOC share with VDDPU */
++};
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/Makefile linux-3.14.40/arch/arm/boot/dts/Makefile
+--- linux-3.14.40.orig/arch/arm/boot/dts/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/Makefile 2015-05-01 14:57:57.507427001 -0500
+@@ -154,16 +154,37 @@
+ imx53-qsb.dtb \
+ imx53-smd.dtb \
+ imx6dl-cubox-i.dtb \
++ imx6dl-dfi-fs700-m60.dtb \
++ imx6dl-gw51xx.dtb \
++ imx6dl-gw52xx.dtb \
++ imx6dl-gw53xx.dtb \
++ imx6dl-gw54xx.dtb \
+ imx6dl-hummingboard.dtb \
++ imx6dl-nitrogen6x.dtb \
++ imx6dl-phytec-pbab01.dtb \
+ imx6dl-sabreauto.dtb \
++ imx6dl-sabrelite.dtb \
+ imx6dl-sabresd.dtb \
++ imx6dl-sabresd-hdcp.dtb \
+ imx6dl-wandboard.dtb \
+ imx6q-arm2.dtb \
++ imx6q-cm-fx6.dtb \
+ imx6q-cubox-i.dtb \
++ imx6q-hummingboard.dtb \
++ imx6q-dfi-fs700-m60.dtb \
++ imx6q-dmo-edmqmx6.dtb \
++ imx6q-gk802.dtb \
++ imx6q-gw51xx.dtb \
++ imx6q-gw52xx.dtb \
++ imx6q-gw53xx.dtb \
++ imx6q-gw5400-a.dtb \
++ imx6q-gw54xx.dtb \
++ imx6q-nitrogen6x.dtb \
+ imx6q-phytec-pbab01.dtb \
+ imx6q-sabreauto.dtb \
+ imx6q-sabrelite.dtb \
+ imx6q-sabresd.dtb \
++ imx6q-sabresd-hdcp.dtb \
+ imx6q-sbc6x.dtb \
+ imx6q-udoo.dtb \
+ imx6q-wandboard.dtb \
+@@ -312,7 +333,14 @@
+ dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2p-ca5s.dtb \
+ vexpress-v2p-ca9.dtb \
+ vexpress-v2p-ca15-tc1.dtb \
+- vexpress-v2p-ca15_a7.dtb
++ vexpress-v2p-ca15_a7.dtb \
++ rtsm_ve-cortex_a9x2.dtb \
++ rtsm_ve-cortex_a9x4.dtb \
++ rtsm_ve-cortex_a15x1.dtb \
++ rtsm_ve-cortex_a15x2.dtb \
++ rtsm_ve-cortex_a15x4.dtb \
++ rtsm_ve-v2p-ca15x1-ca7x1.dtb \
++ rtsm_ve-v2p-ca15x4-ca7x4.dtb
+ dtb-$(CONFIG_ARCH_VIRT) += xenvm-4.2.dtb
+ dtb-$(CONFIG_ARCH_VT8500) += vt8500-bv07.dtb \
+ wm8505-ref.dtb \
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/marco.dtsi linux-3.14.40/arch/arm/boot/dts/marco.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/marco.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/marco.dtsi 2015-05-01 14:57:57.507427001 -0500
+@@ -36,7 +36,7 @@
+ ranges = <0x40000000 0x40000000 0xa0000000>;
+
+ l2-cache-controller@c0030000 {
+- compatible = "sirf,marco-pl310-cache", "arm,pl310-cache";
++ compatible = "arm,pl310-cache";
+ reg = <0xc0030000 0x1000>;
+ interrupts = <0 59 0>;
+ arm,tag-latency = <1 1 1>;
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/prima2.dtsi linux-3.14.40/arch/arm/boot/dts/prima2.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/prima2.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/prima2.dtsi 2015-05-01 14:57:57.507427001 -0500
+@@ -48,7 +48,7 @@
+ ranges = <0x40000000 0x40000000 0x80000000>;
+
+ l2-cache-controller@80040000 {
+- compatible = "arm,pl310-cache", "sirf,prima2-pl310-cache";
++ compatible = "arm,pl310-cache";
+ reg = <0x80040000 0x1000>;
+ interrupts = <59>;
+ arm,tag-latency = <1 1 1>;
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts linux-3.14.40/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/rtsm_ve-cortex_a15x1.dts 2015-05-01 14:57:57.507427001 -0500
+@@ -0,0 +1,159 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA15x1CT
++ *
++ * RTSM_VE_Cortex_A15x1.lisa
++ */
++
++/dts-v1/;
++
++/ {
++ model = "RTSM_VE_CortexA15x1";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a15x1", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <0>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0 0x80000000 0 0x80000000>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0 0x2c001000 0 0x1000>,
++ <0 0x2c002000 0 0x1000>,
++ <0 0x2c004000 0 0x2000>,
++ <0 0x2c006000 0 0x2000>;
++ interrupts = <1 9 0xf04>;
++ };
++
++ timer {
++ compatible = "arm,armv7-timer";
++ interrupts = <1 13 0xf08>,
++ <1 14 0xf08>,
++ <1 11 0xf08>,
++ <1 10 0xf08>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts linux-3.14.40/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/rtsm_ve-cortex_a15x2.dts 2015-05-01 14:57:57.511427001 -0500
+@@ -0,0 +1,165 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA15x2CT
++ *
++ * RTSM_VE_Cortex_A15x2.lisa
++ */
++
++/dts-v1/;
++
++/ {
++ model = "RTSM_VE_CortexA15x2";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a15x2", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <0>;
++ };
++
++ cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <1>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0 0x80000000 0 0x80000000>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0 0x2c001000 0 0x1000>,
++ <0 0x2c002000 0 0x1000>,
++ <0 0x2c004000 0 0x2000>,
++ <0 0x2c006000 0 0x2000>;
++ interrupts = <1 9 0xf04>;
++ };
++
++ timer {
++ compatible = "arm,armv7-timer";
++ interrupts = <1 13 0xf08>,
++ <1 14 0xf08>,
++ <1 11 0xf08>,
++ <1 10 0xf08>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts linux-3.14.40/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/rtsm_ve-cortex_a15x4.dts 2015-05-01 14:57:57.511427001 -0500
+@@ -0,0 +1,177 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA15x4CT
++ *
++ * RTSM_VE_Cortex_A15x4.lisa
++ */
++
++/dts-v1/;
++
++/ {
++ model = "RTSM_VE_CortexA15x4";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a15x4", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <0>;
++ };
++
++ cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <1>;
++ };
++
++ cpu@2 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <2>;
++ };
++
++ cpu@3 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <3>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0 0x80000000 0 0x80000000>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0 0x2c001000 0 0x1000>,
++ <0 0x2c002000 0 0x1000>,
++ <0 0x2c004000 0 0x2000>,
++ <0 0x2c006000 0 0x2000>;
++ interrupts = <1 9 0xf04>;
++ };
++
++ timer {
++ compatible = "arm,armv7-timer";
++ interrupts = <1 13 0xf08>,
++ <1 14 0xf08>,
++ <1 11 0xf08>,
++ <1 10 0xf08>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts linux-3.14.40/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/rtsm_ve-cortex_a9x2.dts 2015-05-01 14:57:57.511427001 -0500
+@@ -0,0 +1,171 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA9MPx2CT
++ *
++ * RTSM_VE_Cortex_A9x2.lisa
++ */
++
++/dts-v1/;
++
++/ {
++ model = "RTSM_VE_CortexA9x2";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a9x2", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a9";
++ reg = <0>;
++ };
++
++ cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a9";
++ reg = <1>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0x80000000 0x80000000>;
++ };
++
++ scu@2c000000 {
++ compatible = "arm,cortex-a9-scu";
++ reg = <0x2c000000 0x58>;
++ };
++
++ timer@2c000600 {
++ compatible = "arm,cortex-a9-twd-timer";
++ reg = <0x2c000600 0x20>;
++ interrupts = <1 13 0xf04>;
++ };
++
++ watchdog@2c000620 {
++ compatible = "arm,cortex-a9-twd-wdt";
++ reg = <0x2c000620 0x20>;
++ interrupts = <1 14 0xf04>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0x2c001000 0x1000>,
++ <0x2c000100 0x100>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0x08000000 0x04000000>,
++ <1 0 0x14000000 0x04000000>,
++ <2 0 0x18000000 0x04000000>,
++ <3 0 0x1c000000 0x04000000>,
++ <4 0 0x0c000000 0x04000000>,
++ <5 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts linux-3.14.40/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/rtsm_ve-cortex_a9x4.dts 2015-05-01 14:57:57.511427001 -0500
+@@ -0,0 +1,183 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA9MPx4CT
++ *
++ * RTSM_VE_Cortex_A9x4.lisa
++ */
++
++/dts-v1/;
++
++/ {
++ model = "RTSM_VE_CortexA9x4";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a9x4", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a9";
++ reg = <0>;
++ };
++
++ cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a9";
++ reg = <1>;
++ };
++
++ cpu@2 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a9";
++ reg = <2>;
++ };
++
++ cpu@3 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a9";
++ reg = <3>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0x80000000 0x80000000>;
++ };
++
++ scu@2c000000 {
++ compatible = "arm,cortex-a9-scu";
++ reg = <0x2c000000 0x58>;
++ };
++
++ timer@2c000600 {
++ compatible = "arm,cortex-a9-twd-timer";
++ reg = <0x2c000600 0x20>;
++ interrupts = <1 13 0xf04>;
++ };
++
++ watchdog@2c000620 {
++ compatible = "arm,cortex-a9-twd-wdt";
++ reg = <0x2c000620 0x20>;
++ interrupts = <1 14 0xf04>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0x2c001000 0x1000>,
++ <0x2c000100 0x100>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0x08000000 0x04000000>,
++ <1 0 0x14000000 0x04000000>,
++ <2 0 0x18000000 0x04000000>,
++ <3 0 0x1c000000 0x04000000>,
++ <4 0 0x0c000000 0x04000000>,
++ <5 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi linux-3.14.40/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/rtsm_ve-motherboard.dtsi 2015-05-01 14:57:57.511427001 -0500
+@@ -0,0 +1,231 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * Motherboard component
++ *
++ * VEMotherBoard.lisa
++ */
++
++ motherboard {
++ compatible = "arm,vexpress,v2m-p1", "simple-bus";
++ arm,hbi = <0x190>;
++ arm,vexpress,site = <0>;
++ arm,v2m-memory-map = "rs1";
++ #address-cells = <2>; /* SMB chipselect number and offset */
++ #size-cells = <1>;
++ #interrupt-cells = <1>;
++ ranges;
++
++ flash@0,00000000 {
++ compatible = "arm,vexpress-flash", "cfi-flash";
++ reg = <0 0x00000000 0x04000000>,
++ <4 0x00000000 0x04000000>;
++ bank-width = <4>;
++ };
++
++ vram@2,00000000 {
++ compatible = "arm,vexpress-vram";
++ reg = <2 0x00000000 0x00800000>;
++ };
++
++ ethernet@2,02000000 {
++ compatible = "smsc,lan91c111";
++ reg = <2 0x02000000 0x10000>;
++ interrupts = <15>;
++ };
++
++ iofpga@3,00000000 {
++ compatible = "arm,amba-bus", "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0 3 0 0x200000>;
++
++ v2m_sysreg: sysreg@010000 {
++ compatible = "arm,vexpress-sysreg";
++ reg = <0x010000 0x1000>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ };
++
++ v2m_sysctl: sysctl@020000 {
++ compatible = "arm,sp810", "arm,primecell";
++ reg = <0x020000 0x1000>;
++ clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&smbclk>;
++ clock-names = "refclk", "timclk", "apb_pclk";
++ #clock-cells = <1>;
++ clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
++ };
++
++ aaci@040000 {
++ compatible = "arm,pl041", "arm,primecell";
++ reg = <0x040000 0x1000>;
++ interrupts = <11>;
++ clocks = <&smbclk>;
++ clock-names = "apb_pclk";
++ };
++
++ mmci@050000 {
++ compatible = "arm,pl180", "arm,primecell";
++ reg = <0x050000 0x1000>;
++ interrupts = <9 10>;
++ cd-gpios = <&v2m_sysreg 0 0>;
++ wp-gpios = <&v2m_sysreg 1 0>;
++ max-frequency = <12000000>;
++ vmmc-supply = <&v2m_fixed_3v3>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "mclk", "apb_pclk";
++ };
++
++ kmi@060000 {
++ compatible = "arm,pl050", "arm,primecell";
++ reg = <0x060000 0x1000>;
++ interrupts = <12>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "KMIREFCLK", "apb_pclk";
++ };
++
++ kmi@070000 {
++ compatible = "arm,pl050", "arm,primecell";
++ reg = <0x070000 0x1000>;
++ interrupts = <13>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "KMIREFCLK", "apb_pclk";
++ };
++
++ v2m_serial0: uart@090000 {
++ compatible = "arm,pl011", "arm,primecell";
++ reg = <0x090000 0x1000>;
++ interrupts = <5>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "uartclk", "apb_pclk";
++ };
++
++ v2m_serial1: uart@0a0000 {
++ compatible = "arm,pl011", "arm,primecell";
++ reg = <0x0a0000 0x1000>;
++ interrupts = <6>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "uartclk", "apb_pclk";
++ };
++
++ v2m_serial2: uart@0b0000 {
++ compatible = "arm,pl011", "arm,primecell";
++ reg = <0x0b0000 0x1000>;
++ interrupts = <7>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "uartclk", "apb_pclk";
++ };
++
++ v2m_serial3: uart@0c0000 {
++ compatible = "arm,pl011", "arm,primecell";
++ reg = <0x0c0000 0x1000>;
++ interrupts = <8>;
++ clocks = <&v2m_clk24mhz>, <&smbclk>;
++ clock-names = "uartclk", "apb_pclk";
++ };
++
++ wdt@0f0000 {
++ compatible = "arm,sp805", "arm,primecell";
++ reg = <0x0f0000 0x1000>;
++ interrupts = <0>;
++ clocks = <&v2m_refclk32khz>, <&smbclk>;
++ clock-names = "wdogclk", "apb_pclk";
++ };
++
++ v2m_timer01: timer@110000 {
++ compatible = "arm,sp804", "arm,primecell";
++ reg = <0x110000 0x1000>;
++ interrupts = <2>;
++ clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&smbclk>;
++ clock-names = "timclken1", "timclken2", "apb_pclk";
++ };
++
++ v2m_timer23: timer@120000 {
++ compatible = "arm,sp804", "arm,primecell";
++ reg = <0x120000 0x1000>;
++ interrupts = <3>;
++ clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&smbclk>;
++ clock-names = "timclken1", "timclken2", "apb_pclk";
++ };
++
++ rtc@170000 {
++ compatible = "arm,pl031", "arm,primecell";
++ reg = <0x170000 0x1000>;
++ interrupts = <4>;
++ clocks = <&smbclk>;
++ clock-names = "apb_pclk";
++ };
++
++ clcd@1f0000 {
++ compatible = "arm,pl111", "arm,primecell";
++ reg = <0x1f0000 0x1000>;
++ interrupts = <14>;
++ clocks = <&v2m_oscclk1>, <&smbclk>;
++ clock-names = "v2m:oscclk1", "apb_pclk";
++ mode = "VGA";
++ use_dma = <0>;
++ framebuffer = <0x18000000 0x00180000>;
++ };
++
++ virtio_block@0130000 {
++ compatible = "virtio,mmio";
++ reg = <0x130000 0x200>;
++ interrupts = <42>;
++ };
++
++ };
++
++ v2m_fixed_3v3: fixedregulator@0 {
++ compatible = "regulator-fixed";
++ regulator-name = "3V3";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ v2m_clk24mhz: clk24mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <24000000>;
++ clock-output-names = "v2m:clk24mhz";
++ };
++
++ v2m_refclk1mhz: refclk1mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <1000000>;
++ clock-output-names = "v2m:refclk1mhz";
++ };
++
++ v2m_refclk32khz: refclk32khz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <32768>;
++ clock-output-names = "v2m:refclk32khz";
++ };
++
++ mcc {
++ compatible = "simple-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ v2m_oscclk1: osc@1 {
++ /* CLCD clock */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <23750000 63500000>;
++ #clock-cells = <0>;
++ clock-output-names = "v2m:oscclk1";
++ };
++
++ muxfpga@0 {
++ compatible = "arm,vexpress-muxfpga";
++ arm,vexpress-sysreg,func = <7 0>;
++ };
++
++ shutdown@0 {
++ compatible = "arm,vexpress-shutdown";
++ arm,vexpress-sysreg,func = <8 0>;
++ };
++ };
++ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts linux-3.14.40/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/rtsm_ve-v2p-ca15x1-ca7x1.dts 2015-05-01 14:57:57.511427001 -0500
+@@ -0,0 +1,233 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA15x4CT
++ * ARMCortexA7x4CT
++ * RTSM_VE_Cortex_A15x1_A7x1.lisa
++ */
++
++/dts-v1/;
++
++/memreserve/ 0xff000000 0x01000000;
++
++/ {
++ model = "RTSM_VE_CortexA15x1-A7x1";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a15x1_a7x1", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ clusters {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cluster0: cluster@0 {
++ reg = <0>;
++// freqs = <500000000 600000000 700000000 800000000 900000000 1000000000 1100000000 1200000000>;
++ cores {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ core0: core@0 {
++ reg = <0>;
++ };
++
++ };
++ };
++
++ cluster1: cluster@1 {
++ reg = <1>;
++// freqs = <350000000 400000000 500000000 600000000 700000000 800000000 900000000 1000000000>;
++ cores {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ core1: core@0 {
++ reg = <0>;
++ };
++
++ };
++ };
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <0>;
++ cluster = <&cluster0>;
++ core = <&core0>;
++// clock-frequency = <1000000000>;
++ cci-control-port = <&cci_control1>;
++ };
++
++ cpu1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a7";
++ reg = <0x100>;
++ cluster = <&cluster1>;
++ core = <&core1>;
++// clock-frequency = <800000000>;
++ cci-control-port = <&cci_control2>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0 0x80000000 0 0x80000000>;
++ };
++
++ cci@2c090000 {
++ compatible = "arm,cci-400", "arm,cci";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0 0x2c090000 0 0x1000>;
++ ranges = <0x0 0x0 0x2c090000 0x10000>;
++
++ cci_control1: slave-if@4000 {
++ compatible = "arm,cci-400-ctrl-if";
++ interface-type = "ace";
++ reg = <0x4000 0x1000>;
++ };
++
++ cci_control2: slave-if@5000 {
++ compatible = "arm,cci-400-ctrl-if";
++ interface-type = "ace";
++ reg = <0x5000 0x1000>;
++ };
++ };
++
++ dcscb@60000000 {
++ compatible = "arm,rtsm,dcscb";
++ reg = <0 0x60000000 0 0x1000>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0 0x2c001000 0 0x1000>,
++ <0 0x2c002000 0 0x1000>,
++ <0 0x2c004000 0 0x2000>,
++ <0 0x2c006000 0 0x2000>;
++ interrupts = <1 9 0xf04>;
++ };
++
++ timer {
++ compatible = "arm,armv7-timer";
++ interrupts = <1 13 0xf08>,
++ <1 14 0xf08>,
++ <1 11 0xf08>,
++ <1 10 0xf08>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts linux-3.14.40/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/boot/dts/rtsm_ve-v2p-ca15x4-ca7x4.dts 2015-05-01 14:57:57.511427001 -0500
+@@ -0,0 +1,317 @@
++/*
++ * ARM Ltd. Fast Models
++ *
++ * Versatile Express (VE) system model
++ * ARMCortexA15x4CT
++ * ARMCortexA7x4CT
++ * RTSM_VE_Cortex_A15x4_A7x4.lisa
++ */
++
++/dts-v1/;
++
++/memreserve/ 0xff000000 0x01000000;
++
++/ {
++ model = "RTSM_VE_CortexA15x4-A7x4";
++ arm,vexpress,site = <0xf>;
++ compatible = "arm,rtsm_ve,cortex_a15x4_a7x4", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ clusters {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cluster0: cluster@0 {
++ reg = <0>;
++// freqs = <500000000 600000000 700000000 800000000 900000000 1000000000 1100000000 1200000000>;
++ cores {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ core0: core@0 {
++ reg = <0>;
++ };
++
++ core1: core@1 {
++ reg = <1>;
++ };
++
++ core2: core@2 {
++ reg = <2>;
++ };
++
++ core3: core@3 {
++ reg = <3>;
++ };
++
++ };
++ };
++
++ cluster1: cluster@1 {
++ reg = <1>;
++// freqs = <350000000 400000000 500000000 600000000 700000000 800000000 900000000 1000000000>;
++ cores {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ core4: core@0 {
++ reg = <0>;
++ };
++
++ core5: core@1 {
++ reg = <1>;
++ };
++
++ core6: core@2 {
++ reg = <2>;
++ };
++
++ core7: core@3 {
++ reg = <3>;
++ };
++
++ };
++ };
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <0>;
++ cluster = <&cluster0>;
++ core = <&core0>;
++// clock-frequency = <1000000000>;
++ cci-control-port = <&cci_control1>;
++ };
++
++ cpu1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <1>;
++ cluster = <&cluster0>;
++ core = <&core1>;
++// clock-frequency = <1000000000>;
++ cci-control-port = <&cci_control1>;
++ };
++
++ cpu2: cpu@2 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <2>;
++ cluster = <&cluster0>;
++ core = <&core2>;
++// clock-frequency = <1000000000>;
++ cci-control-port = <&cci_control1>;
++ };
++
++ cpu3: cpu@3 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <3>;
++ cluster = <&cluster0>;
++ core = <&core3>;
++// clock-frequency = <1000000000>;
++ cci-control-port = <&cci_control1>;
++ };
++
++ cpu4: cpu@4 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a7";
++ reg = <0x100>;
++ cluster = <&cluster1>;
++ core = <&core4>;
++// clock-frequency = <800000000>;
++ cci-control-port = <&cci_control2>;
++ };
++
++ cpu5: cpu@5 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a7";
++ reg = <0x101>;
++ cluster = <&cluster1>;
++ core = <&core5>;
++// clock-frequency = <800000000>;
++ cci-control-port = <&cci_control2>;
++ };
++
++ cpu6: cpu@6 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a7";
++ reg = <0x102>;
++ cluster = <&cluster1>;
++ core = <&core6>;
++// clock-frequency = <800000000>;
++ cci-control-port = <&cci_control2>;
++ };
++
++ cpu7: cpu@7 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a7";
++ reg = <0x103>;
++ cluster = <&cluster1>;
++ core = <&core7>;
++// clock-frequency = <800000000>;
++ cci-control-port = <&cci_control2>;
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0 0x80000000 0 0x80000000>;
++ };
++
++ cci@2c090000 {
++ compatible = "arm,cci-400", "arm,cci";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0 0x2c090000 0 0x1000>;
++ ranges = <0x0 0x0 0x2c090000 0x10000>;
++
++ cci_control1: slave-if@4000 {
++ compatible = "arm,cci-400-ctrl-if";
++ interface-type = "ace";
++ reg = <0x4000 0x1000>;
++ };
++
++ cci_control2: slave-if@5000 {
++ compatible = "arm,cci-400-ctrl-if";
++ interface-type = "ace";
++ reg = <0x5000 0x1000>;
++ };
++ };
++
++ dcscb@60000000 {
++ compatible = "arm,rtsm,dcscb";
++ reg = <0 0x60000000 0 0x1000>;
++ };
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0 0x2c001000 0 0x1000>,
++ <0 0x2c002000 0 0x1000>,
++ <0 0x2c004000 0 0x2000>,
++ <0 0x2c006000 0 0x2000>;
++ interrupts = <1 9 0xf04>;
++ };
++
++ timer {
++ compatible = "arm,armv7-timer";
++ interrupts = <1 13 0xf08>,
++ <1 14 0xf08>,
++ <1 11 0xf08>,
++ <1 10 0xf08>;
++ };
++
++ dcc {
++ compatible = "arm,vexpress,config-bus";
++ arm,vexpress,config-bridge = <&v2m_sysreg>;
++
++ osc@0 {
++ /* ACLK clock to the AXI master port on the test chip */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 0>;
++ freq-range = <30000000 50000000>;
++ #clock-cells = <0>;
++ clock-output-names = "extsaxiclk";
++ };
++
++ oscclk1: osc@1 {
++ /* Reference clock for the CLCD */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 1>;
++ freq-range = <10000000 80000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clcdclk";
++ };
++
++ smbclk: oscclk2: osc@2 {
++ /* Reference clock for the test chip internal PLLs */
++ compatible = "arm,vexpress-osc";
++ arm,vexpress-sysreg,func = <1 2>;
++ freq-range = <33000000 100000000>;
++ #clock-cells = <0>;
++ clock-output-names = "tcrefclk";
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/vexpress-v2m.dtsi linux-3.14.40/arch/arm/boot/dts/vexpress-v2m.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/vexpress-v2m.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/vexpress-v2m.dtsi 2015-05-01 14:57:57.515427001 -0500
+@@ -227,6 +227,7 @@
+ };
+
+ clcd@1f000 {
++ status = "disabled";
+ compatible = "arm,pl111", "arm,primecell";
+ reg = <0x1f000 0x1000>;
+ interrupts = <14>;
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi linux-3.14.40/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi 2015-05-01 14:57:57.559427001 -0500
+@@ -228,6 +228,7 @@
+ };
+
+ clcd@1f0000 {
++ status = "disabled";
+ compatible = "arm,pl111", "arm,primecell";
+ reg = <0x1f0000 0x1000>;
+ interrupts = <14>;
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts linux-3.14.40/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts 2015-05-01 14:57:57.559427001 -0500
+@@ -9,6 +9,8 @@
+
+ /dts-v1/;
+
++/memreserve/ 0xff000000 0x01000000;
++
+ / {
+ model = "V2P-CA15_CA7";
+ arm,hbi = <0x249>;
+@@ -29,29 +31,60 @@
+ i2c1 = &v2m_i2c_pcie;
+ };
+
+- cpus {
++ clusters {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- cpu0: cpu@0 {
+- device_type = "cpu";
+- compatible = "arm,cortex-a15";
++ cluster0: cluster@0 {
+ reg = <0>;
+- cci-control-port = <&cci_control1>;
++ cores {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ core0: core@0 {
++ reg = <0>;
++ };
++
++ core1: core@1 {
++ reg = <1>;
++ };
++
++ };
+ };
+
+- cpu1: cpu@1 {
+- device_type = "cpu";
+- compatible = "arm,cortex-a15";
++ cluster1: cluster@1 {
+ reg = <1>;
+- cci-control-port = <&cci_control1>;
++ cores {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ core2: core@0 {
++ reg = <0>;
++ };
++
++ core3: core@1 {
++ reg = <1>;
++ };
++
++ core4: core@2 {
++ reg = <2>;
++ };
++ };
+ };
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x100>;
+ cci-control-port = <&cci_control2>;
++ cluster = <&cluster1>;
++ core = <&core2>;
++ clock-frequency = <800000000>;
+ };
+
+ cpu3: cpu@3 {
+@@ -59,6 +92,9 @@
+ compatible = "arm,cortex-a7";
+ reg = <0x101>;
+ cci-control-port = <&cci_control2>;
++ cluster = <&cluster1>;
++ core = <&core3>;
++ clock-frequency = <800000000>;
+ };
+
+ cpu4: cpu@4 {
+@@ -66,12 +102,35 @@
+ compatible = "arm,cortex-a7";
+ reg = <0x102>;
+ cci-control-port = <&cci_control2>;
++ cluster = <&cluster1>;
++ core = <&core4>;
++ clock-frequency = <800000000>;
++ };
++
++ cpu0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <0>;
++ cci-control-port = <&cci_control1>;
++ cluster = <&cluster0>;
++ core = <&core0>;
++ clock-frequency = <1000000000>;
++ };
++
++ cpu1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a15";
++ reg = <1>;
++ cci-control-port = <&cci_control1>;
++ cluster = <&cluster0>;
++ core = <&core1>;
++ clock-frequency = <1000000000>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+- reg = <0 0x80000000 0 0x40000000>;
++ reg = <0 0x80000000 0 0x80000000>;
+ };
+
+ wdt@2a490000 {
+@@ -86,6 +145,8 @@
+ compatible = "arm,hdlcd";
+ reg = <0 0x2b000000 0 0x1000>;
+ interrupts = <0 85 4>;
++ mode = "1024x768-16@60";
++ framebuffer = <0 0xff000000 0 0x01000000>;
+ clocks = <&oscclk5>;
+ clock-names = "pxlclk";
+ };
+@@ -127,6 +188,16 @@
+ interface-type = "ace";
+ reg = <0x5000 0x1000>;
+ };
++
++ pmu@9000 {
++ compatible = "arm,cci-400-pmu";
++ reg = <0x9000 0x5000>;
++ interrupts = <0 101 4>,
++ <0 102 4>,
++ <0 103 4>,
++ <0 104 4>,
++ <0 105 4>;
++ };
+ };
+
+ memory-controller@7ffd0000 {
+@@ -164,12 +235,21 @@
+ <1 10 0xf08>;
+ };
+
+- pmu {
++ pmu_a15 {
+ compatible = "arm,cortex-a15-pmu";
++ cluster = <&cluster0>;
+ interrupts = <0 68 4>,
+ <0 69 4>;
+ };
+
++ pmu_a7 {
++ compatible = "arm,cortex-a7-pmu";
++ cluster = <&cluster1>;
++ interrupts = <0 128 4>,
++ <0 129 4>,
++ <0 130 4>;
++ };
++
+ oscclk6a: oscclk6a {
+ /* Reference 24MHz clock */
+ compatible = "fixed-clock";
+@@ -178,6 +258,19 @@
+ clock-output-names = "oscclk6a";
+ };
+
++/* PSCI requires support from firmware and is not present in the normal TC2
++ * distribution, so this node is commented out by default...
++
++ psci {
++ compatible = "arm,psci";
++ method = "smc";
++ cpu_suspend = <0x80100001>;
++ cpu_off = <0x80100002>;
++ cpu_on = <0x80100003>;
++ migrate = <0x80100004>;
++ };
++*/
++
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts linux-3.14.40/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts 2015-05-01 14:57:57.559427001 -0500
+@@ -9,6 +9,8 @@
+
+ /dts-v1/;
+
++/memreserve/ 0xbf000000 0x01000000;
++
+ / {
+ model = "V2P-CA15";
+ arm,hbi = <0x237>;
+@@ -57,6 +59,8 @@
+ interrupts = <0 85 4>;
+ clocks = <&oscclk5>;
+ clock-names = "pxlclk";
++ mode = "1024x768-16@60";
++ framebuffer = <0 0xbf000000 0 0x01000000>;
+ };
+
+ memory-controller@2b0a0000 {
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/vexpress-v2p-ca5s.dts linux-3.14.40/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/vexpress-v2p-ca5s.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/vexpress-v2p-ca5s.dts 2015-05-01 14:57:57.559427001 -0500
+@@ -9,6 +9,8 @@
+
+ /dts-v1/;
+
++/memreserve/ 0xbf000000 0x01000000;
++
+ / {
+ model = "V2P-CA5s";
+ arm,hbi = <0x225>;
+@@ -59,6 +61,8 @@
+ interrupts = <0 85 4>;
+ clocks = <&oscclk3>;
+ clock-names = "pxlclk";
++ mode = "640x480-16@60";
++ framebuffer = <0xbf000000 0x01000000>;
+ };
+
+ memory-controller@2a150000 {
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/vexpress-v2p-ca9.dts linux-3.14.40/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/vexpress-v2p-ca9.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/vexpress-v2p-ca9.dts 2015-05-01 14:57:57.559427001 -0500
+@@ -9,6 +9,8 @@
+
+ /dts-v1/;
+
++/include/ "clcd-panels.dtsi"
++
+ / {
+ model = "V2P-CA9";
+ arm,hbi = <0x191>;
+@@ -73,6 +75,8 @@
+ interrupts = <0 44 4>;
+ clocks = <&oscclk1>, <&oscclk2>;
+ clock-names = "clcdclk", "apb_pclk";
++ mode = "XVGA";
++ use_dma = <1>;
+ };
+
+ memory-controller@100e0000 {
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/vf610.dtsi linux-3.14.40/arch/arm/boot/dts/vf610.dtsi
+--- linux-3.14.40.orig/arch/arm/boot/dts/vf610.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/vf610.dtsi 2015-05-01 14:57:57.563427001 -0500
+@@ -44,11 +44,13 @@
+
+ sxosc {
+ compatible = "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ fxosc {
+ compatible = "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/boot/dts/vf610-twr.dts linux-3.14.40/arch/arm/boot/dts/vf610-twr.dts
+--- linux-3.14.40.orig/arch/arm/boot/dts/vf610-twr.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/boot/dts/vf610-twr.dts 2015-05-01 14:57:57.563427001 -0500
+@@ -25,11 +25,13 @@
+ clocks {
+ audio_ext {
+ compatible = "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <24576000>;
+ };
+
+ enet_ext {
+ compatible = "fixed-clock";
++ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/common/Makefile linux-3.14.40/arch/arm/common/Makefile
+--- linux-3.14.40.orig/arch/arm/common/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/common/Makefile 2015-05-01 14:57:57.579427001 -0500
+@@ -13,6 +13,7 @@
+ obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
+ obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
+ obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
++CFLAGS_REMOVE_mcpm_entry.o = -pg
+ AFLAGS_mcpm_head.o := -march=armv7-a
+ AFLAGS_vlock.o := -march=armv7-a
+ obj-$(CONFIG_TI_PRIV_EDMA) += edma.o
+diff -Nur linux-3.14.40.orig/arch/arm/configs/imx_v6_v7_defconfig linux-3.14.40/arch/arm/configs/imx_v6_v7_defconfig
+--- linux-3.14.40.orig/arch/arm/configs/imx_v6_v7_defconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/configs/imx_v6_v7_defconfig 2015-05-01 14:57:57.587427001 -0500
+@@ -45,6 +45,9 @@
+ CONFIG_AEABI=y
+ CONFIG_HIGHMEM=y
+ CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
++CONFIG_CPU_FREQ=y
++CONFIG_ARM_IMX6Q_CPUFREQ=y
++CONFIG_CPU_IDLE=y
+ CONFIG_VFP=y
+ CONFIG_NEON=y
+ CONFIG_BINFMT_MISC=m
+@@ -70,6 +73,8 @@
+ CONFIG_DEVTMPFS=y
+ CONFIG_DEVTMPFS_MOUNT=y
+ # CONFIG_STANDALONE is not set
++CONFIG_CMA=y
++CONFIG_CMA_SIZE_MBYTES=256
+ CONFIG_IMX_WEIM=y
+ CONFIG_CONNECTOR=y
+ CONFIG_MTD=y
+@@ -154,7 +159,12 @@
+ CONFIG_SPI_IMX=y
+ CONFIG_GPIO_SYSFS=y
+ CONFIG_GPIO_MC9S08DZ60=y
++CONFIG_GPIO_PCA953X=y
+ # CONFIG_HWMON is not set
++CONFIG_THERMAL=y
++CONFIG_CPU_THERMAL=y
++CONFIG_IMX_THERMAL=y
++CONFIG_DEVICE_THERMAL=y
+ CONFIG_WATCHDOG=y
+ CONFIG_IMX2_WDT=y
+ CONFIG_MFD_DA9052_I2C=y
+@@ -170,32 +180,44 @@
+ CONFIG_REGULATOR_PFUZE100=y
+ CONFIG_MEDIA_SUPPORT=y
+ CONFIG_MEDIA_CAMERA_SUPPORT=y
++CONFIG_MEDIA_USB_SUPPORT=y
++CONFIG_USB_VIDEO_CLASS=m
+ CONFIG_MEDIA_RC_SUPPORT=y
+ CONFIG_RC_DEVICES=y
+ CONFIG_IR_GPIO_CIR=y
+ CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_MXC_OUTPUT=y
++CONFIG_VIDEO_MXC_IPU_OUTPUT=y
+ CONFIG_SOC_CAMERA=y
+ CONFIG_VIDEO_MX3=y
+ CONFIG_V4L_MEM2MEM_DRIVERS=y
+ CONFIG_VIDEO_CODA=y
+ CONFIG_SOC_CAMERA_OV2640=y
+ CONFIG_DRM=y
++CONFIG_DRM_VIVANTE=y
+ CONFIG_BACKLIGHT_LCD_SUPPORT=y
+ CONFIG_LCD_CLASS_DEVICE=y
+ CONFIG_LCD_L4F00242T03=y
+ CONFIG_LCD_PLATFORM=y
+ CONFIG_BACKLIGHT_CLASS_DEVICE=y
+ CONFIG_BACKLIGHT_PWM=y
++CONFIG_FB_MXC_SYNC_PANEL=y
++CONFIG_FB_MXC_LDB=y
++CONFIG_FB_MXC_HDMI=y
++CONFIG_FB_MXC_MIPI_DSI=y
++CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL=y
+ CONFIG_FRAMEBUFFER_CONSOLE=y
+ CONFIG_LOGO=y
+ CONFIG_SOUND=y
+ CONFIG_SND=y
++CONFIG_SND_USB_AUDIO=m
+ CONFIG_SND_SOC=y
+ CONFIG_SND_IMX_SOC=y
+ CONFIG_SND_SOC_PHYCORE_AC97=y
+ CONFIG_SND_SOC_EUKREA_TLV320=y
+ CONFIG_SND_SOC_IMX_WM8962=y
+ CONFIG_SND_SOC_IMX_SGTL5000=y
++CONFIG_SND_SOC_IMX_CS42888=y
+ CONFIG_SND_SOC_IMX_SPDIF=y
+ CONFIG_SND_SOC_IMX_MC13783=y
+ CONFIG_USB=y
+@@ -208,12 +230,18 @@
+ CONFIG_NOP_USB_XCEIV=y
+ CONFIG_USB_MXS_PHY=y
+ CONFIG_USB_GADGET=y
++CONFIG_USB_ZERO=m
+ CONFIG_USB_ETH=m
+ CONFIG_USB_MASS_STORAGE=m
++CONFIG_USB_G_SERIAL=m
+ CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
+ CONFIG_MMC_SDHCI=y
+ CONFIG_MMC_SDHCI_PLTFM=y
+ CONFIG_MMC_SDHCI_ESDHC_IMX=y
++CONFIG_MXC_IPU=y
++CONFIG_MXC_GPU_VIV=y
++CONFIG_MXC_ASRC=y
+ CONFIG_NEW_LEDS=y
+ CONFIG_LEDS_CLASS=y
+ CONFIG_LEDS_GPIO=y
+@@ -229,16 +257,10 @@
+ CONFIG_RTC_DRV_MXC=y
+ CONFIG_RTC_DRV_SNVS=y
+ CONFIG_DMADEVICES=y
++CONFIG_MXC_PXP_V2=y
+ CONFIG_IMX_SDMA=y
+ CONFIG_MXS_DMA=y
+ CONFIG_STAGING=y
+-CONFIG_DRM_IMX=y
+-CONFIG_DRM_IMX_FB_HELPER=y
+-CONFIG_DRM_IMX_PARALLEL_DISPLAY=y
+-CONFIG_DRM_IMX_TVE=y
+-CONFIG_DRM_IMX_LDB=y
+-CONFIG_DRM_IMX_IPUV3_CORE=y
+-CONFIG_DRM_IMX_IPUV3=y
+ CONFIG_COMMON_CLK_DEBUG=y
+ # CONFIG_IOMMU_SUPPORT is not set
+ CONFIG_PWM=y
+diff -Nur linux-3.14.40.orig/arch/arm/configs/imx_v7_cbi_hb_base_defconfig linux-3.14.40/arch/arm/configs/imx_v7_cbi_hb_base_defconfig
+--- linux-3.14.40.orig/arch/arm/configs/imx_v7_cbi_hb_base_defconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/configs/imx_v7_cbi_hb_base_defconfig 2015-05-01 14:57:57.587427001 -0500
+@@ -0,0 +1,367 @@
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_KERNEL_LZO=y
++CONFIG_SYSVIPC=y
++CONFIG_FHANDLE=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_LOG_BUF_SHIFT=18
++CONFIG_CGROUPS=y
++CONFIG_RELAY=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_EXPERT=y
++CONFIG_PERF_EVENTS=y
++CONFIG_CLEANCACHE=y
++CONFIG_FRONTSWAP=y
++CONFIG_ZSWAP=y
++CONFIG_ZSMALLOC=y
++# CONFIG_SLUB_DEBUG is not set
++# CONFIG_COMPAT_BRK is not set
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++# CONFIG_BLK_DEV_BSG is not set
++CONFIG_GPIO_PCA953X=y
++CONFIG_ARCH_MXC=y
++CONFIG_MXC_DEBUG_BOARD=y
++CONFIG_SOC_IMX6Q=y
++CONFIG_SOC_IMX6SL=y
++# CONFIG_SWP_EMULATE is not set
++CONFIG_PCI=y
++CONFIG_PCIE_DW=y
++CONFIG_PCI_IMX6=y
++CONFIG_SMP=y
++CONFIG_VMSPLIT_2G=y
++CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_AEABI=y
++# CONFIG_OABI_COMPAT is not set
++CONFIG_HIGHMEM=y
++CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++CONFIG_ARM_IMX6_CPUFREQ=y
++CONFIG_CPU_IDLE=y
++CONFIG_VFP=y
++CONFIG_VFPv3=y
++CONFIG_NEON=y
++CONFIG_KERNEL_MODE_NEON=y
++CONFIG_BINFMT_MISC=m
++CONFIG_PM_RUNTIME=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_TEST_SUSPEND=y
++CONFIG_IOSCHED_BFQ=y
++CONFIG_CGROUP_BFQIO=y
++CONFIG_DEFAULT_BFQ=y
++CONFIG_DEFAULT_IOSCHED="bfq"
++CONFIG_NET=y
++CONFIG_PACKET=y
++CONFIG_UNIX=y
++CONFIG_INET=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_IPV6=y
++CONFIG_NETFILTER=y
++CONFIG_VLAN_8021Q=y
++CONFIG_WIRELESS=y
++CONFIG_WIRELESS_EXT=y
++CONFIG_WEXT_CORE=y
++CONFIG_WEXT_PROC=y
++CONFIG_WEXT_SPY=y
++CONFIG_WEXT_PRIV=y
++CONFIG_CFG80211=y
++CONFIG_ETHERNET=y
++# CONFIG_NET_VENDOR_BROADCOM is not set
++# CONFIG_NET_VENDOR_CIRRUS is not set
++# CONFIG_NET_VENDOR_FARADAY
++# CONFIG_NET_VENDOR_INTEL
++# CONFIG_NET_VENDOR_I825XX
++# CONFIG_NET_VENDOR_MARVELL
++# CONFIG_NET_VENDOR_MICROCHIP
++# CONFIG_NET_VENDOR_MICROCHIP=y
++# CONFIG_ENC28J60 is not set
++# CONFIG_NET_VENDOR_NATSEMI=y
++# CONFIG_NET_VENDOR_8390=y
++# CONFIG_AX88796 is not set
++# CONFIG_ETHOC is not set
++# CONFIG_SH_ETH is not set
++# CONFIG_NET_VENDOR_SEEQ=y
++# CONFIG_NET_VENDOR_SMSC=y
++# CONFIG_SMC91X is not set
++# CONFIG_SMC911X is not set
++# CONFIG_SMSC911X is not set
++# CONFIG_NET_VENDOR_STMICRO=y
++# CONFIG_STMMAC_ETH is not set
++# CONFIG_NET_VENDOR_VIA=y
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_NET_VENDOR_WIZNET=y
++CONFIG_NET_VENDOR_FREESCALE=y
++CONFIG_FEC=y
++CONFIG_PHYLIB=y
++CONFIG_AT803X_PHY=y
++CONFIG_WLAN=y
++CONFIG_BRCMUTIL=m
++CONFIG_BRCMFMAC=m
++CONFIG_BRCMFMAC_SDIO=y
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++# CONFIG_STANDALONE is not set
++CONFIG_DMA_CMA=y
++CONFIG_CMA=y
++CONFIG_CMA_SIZE_MBYTES=256
++CONFIG_CONNECTOR=y
++# CONFIG_MTD is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=65536
++# CONFIG_SCSI_PROC_FS is not set
++CONFIG_BLK_DEV_SD=y
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++CONFIG_SCSI_SCAN_ASYNC=y
++# CONFIG_SCSI_LOWLEVEL is not set
++CONFIG_ATA=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_AHCI_IMX=y
++CONFIG_NETDEVICES=y
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_KEYBOARD_IMX=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++# CONFIG_KEYBOARD_ATKBD is not set
++# CONFIG_MOUSE_PS2 is not set
++CONFIG_INPUT_MISC=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_DEVKMEM is not set
++CONFIG_SERIAL_IMX=y
++CONFIG_SERIAL_IMX_CONSOLE=y
++CONFIG_SERIAL_FSL_LPUART=y
++CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
++CONFIG_FSL_OTP=y
++CONFIG_GPIO_MXC=y
++# CONFIG_I2C_COMPAT is not set
++CONFIG_I2C_CHARDEV=y
++# CONFIG_I2C_HELPER_AUTO is not set
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++CONFIG_I2C_IMX=y
++CONFIG_SPI=y
++CONFIG_SPI_IMX=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_POWER_SUPPLY=y
++CONFIG_THERMAL=y
++CONFIG_CPU_THERMAL=y
++CONFIG_IMX_THERMAL=y
++CONFIG_DEVICE_THERMAL=y
++CONFIG_WATCHDOG=y
++CONFIG_IMX2_WDT=y
++CONFIG_MFD_DA9052_I2C=y
++CONFIG_MFD_MC13XXX_SPI=y
++CONFIG_MFD_MC13XXX_I2C=y
++CONFIG_MFD_SI476X_CORE=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_REGULATOR_ANATOP=y
++CONFIG_REGULATOR_PFUZE100=y
++CONFIG_MEDIA_SUPPORT=y
++CONFIG_MEDIA_CAMERA_SUPPORT=y
++# CONFIG_MEDIA_RADIO_SUPPORT is not set
++CONFIG_VIDEO_V4L2_INT_DEVICE=y
++# CONFIG_MEDIA_USB_SUPPORT isnot set
++# CONFIG_USB_VIDEO_CLASS is not set
++# CONFIG_RADIO_ADAPTERS is not set
++CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_MXC_OUTPUT=y
++CONFIG_VIDEO_MXC_CAPTURE=m
++CONFIG_VIDEO_MXC_CSI_CAMERA=m
++CONFIG_MXC_CAMERA_OV5640=m
++CONFIG_MXC_CAMERA_OV5642=m
++CONFIG_MXC_CAMERA_OV5640_MIPI=m
++CONFIG_MXC_TVIN_ADV7180=m
++CONFIG_MXC_IPU_DEVICE_QUEUE_SDC=m
++CONFIG_VIDEO_MXC_IPU_OUTPUT=y
++CONFIG_VIDEO_MXC_PXP_V4L2=y
++CONFIG_SOC_CAMERA=y
++CONFIG_SOC_CAMERA_OV2640=y
++CONFIG_DRM=y
++CONFIG_DRM_VIVANTE=y
++CONFIG_FB=y
++# CONFIG_FB_MX3 is not set
++CONFIG_FB_MXC_SYNC_PANEL=y
++CONFIG_FB_MXC_LDB=y
++CONFIG_FB_MXC_HDMI=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++CONFIG_FONTS=y
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_LOGO=y
++CONFIG_SOUND=y
++CONFIG_SND=y
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_SOC=y
++CONFIG_SND_IMX_SOC=y
++CONFIG_SND_SOC_IMX_SGTL5000=y
++CONFIG_SND_SOC_IMX_SPDIF=y
++CONFIG_SND_SOC_IMX_HDMI=y
++CONFIG_USB=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB_CHIPIDEA=y
++CONFIG_USB_CHIPIDEA_UDC=y
++CONFIG_USB_CHIPIDEA_HOST=y
++CONFIG_USB_PHY=y
++CONFIG_NOP_USB_XCEIV=y
++CONFIG_USB_MXS_PHY=y
++CONFIG_USB_GADGET=y
++CONFIG_USB_ZERO=m
++CONFIG_USB_ETH=m
++CONFIG_USB_MASS_STORAGE=m
++CONFIG_USB_G_SERIAL=m
++CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
++CONFIG_MMC_SDHCI=y
++CONFIG_MMC_SDHCI_PLTFM=y
++CONFIG_MMC_SDHCI_ESDHC_IMX=y
++CONFIG_MXC_IPU=y
++CONFIG_MXC_GPU_VIV=y
++CONFIG_MXC_ASRC=y
++CONFIG_MXC_HDMI_CEC=y
++CONFIG_MXC_MIPI_CSI2=y
++CONFIG_MXC_MLB150=m
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_GPIO=y
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_GPIO=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_MXC=y
++CONFIG_RTC_DRV_SNVS=y
++CONFIG_RTC_DRV_PCF8523=y
++CONFIG_DMADEVICES=y
++CONFIG_MXC_PXP_V2=y
++CONFIG_IMX_SDMA=y
++CONFIG_MXS_DMA=y
++CONFIG_SRAM=y
++CONFIG_STAGING=y
++CONFIG_COMMON_CLK_DEBUG=y
++# CONFIG_IOMMU_SUPPORT is not set
++CONFIG_PWM=y
++CONFIG_PWM_SYSFS=y
++CONFIG_PWM_IMX=y
++CONFIG_IRQCHIP=y
++CONFIG_ARM_GIC=y
++# CONFIG_IPACK_BUS is not set
++CONFIG_ARCH_HAS_RESET_CONTROLLER=y
++CONFIG_RESET_CONTROLLER=y
++CONFIG_RESET_GPIO=y
++CONFIG_EXT4_FS=y
++CONFIG_EXT4_USE_FOR_EXT23=y
++CONFIG_EXT4_FS_XATTR=y
++CONFIG_EXT4_FS_POSIX_ACL=y
++CONFIG_EXT4_FS_SECURITY=y
++CONFIG_QUOTA=y
++CONFIG_QUOTA_NETLINK_INTERFACE=y
++# CONFIG_PRINT_QUOTA_WARNING is not set
++CONFIG_AUTOFS4_FS=y
++CONFIG_FUSE_FS=y
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_UDF_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=y
++CONFIG_TMPFS=y
++CONFIG_JFFS2_FS=y
++CONFIG_UBIFS_FS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_NLS_DEFAULT="cp437"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_UTF8=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_SCHED_DEBUG is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_FTRACE is not set
++CONFIG_SECURITYFS=y
++CONFIG_CRYPTO_USER=y
++CONFIG_CRYPTO_TEST=m
++CONFIG_CRYPTO_CCM=y
++CONFIG_CRYPTO_GCM=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_CTS=y
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_LRW=y
++CONFIG_CRYPTO_XTS=y
++CONFIG_CRYPTO_MD4=y
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_MICHAEL_MIC=y
++CONFIG_CRYPTO_RMD128=y
++CONFIG_CRYPTO_RMD160=y
++CONFIG_CRYPTO_RMD256=y
++CONFIG_CRYPTO_RMD320=y
++CONFIG_CRYPTO_SHA1=y
++CONFIG_CRYPTO_SHA256=y
++CONFIG_CRYPTO_SHA512=y
++CONFIG_CRYPTO_TGR192=y
++CONFIG_CRYPTO_WP512=y
++CONFIG_CRYPTO_BLOWFISH=y
++CONFIG_CRYPTO_CAMELLIA=y
++CONFIG_CRYPTO_DES=y
++CONFIG_CRYPTO_TWOFISH=y
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++CONFIG_CRYPTO_DEV_FSL_CAAM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO=y
++CONFIG_CRYPTO_AES_ARM_BS=y
++CONFIG_CRC_CCITT=m
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC7=m
++CONFIG_LIBCRC32C=m
++# CONFIG_MXC_MMA8451 is not set
++CONFIG_RC_CORE=m
++CONFIG_RC_DECODERS=y
++CONFIG_LIRC=m
++CONFIG_RC_LOOPBACK=m
++CONFIG_RC_MAP=m
++CONFIG_RC_DEVICES=y
++CONFIG_RC_ATI_REMOTE=m
++CONFIG_IR_NEC_DECODER=m
++CONFIG_IR_RC5_DECODER=m
++CONFIG_IR_RC6_DECODER=m
++CONFIG_IR_JVC_DECODER=m
++CONFIG_IR_SONY_DECODER=m
++CONFIG_IR_RC5_SZ_DECODER=m
++CONFIG_IR_SANYO_DECODER=m
++CONFIG_IR_MCE_KBD_DECODER=m
++CONFIG_IR_LIRC_CODEC=m
++CONFIG_IR_IMON=m
++CONFIG_IR_MCEUSB=m
++CONFIG_IR_ITE_CIR=m
++CONFIG_IR_NUVOTON=m
++CONFIG_IR_FINTEK=m
++CONFIG_IR_REDRAT3=m
++CONFIG_IR_ENE=m
++CONFIG_IR_STREAMZAP=m
++CONFIG_IR_WINBOND_CIR=m
++CONFIG_IR_IGUANA=m
++CONFIG_IR_TTUSBIR=m
++CONFIG_IR_GPIO_CIR=m
+diff -Nur linux-3.14.40.orig/arch/arm/configs/imx_v7_cbi_hb_defconfig linux-3.14.40/arch/arm/configs/imx_v7_cbi_hb_defconfig
+--- linux-3.14.40.orig/arch/arm/configs/imx_v7_cbi_hb_defconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/configs/imx_v7_cbi_hb_defconfig 2015-05-01 14:57:57.591427001 -0500
+@@ -0,0 +1,5138 @@
++#
++# Automatically generated make config: don't edit
++#
++CONFIG_MMU=y
++CONFIG_HOTPLUG_CPU=y
++# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
++# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
++CONFIG_LOCALVERSION=""
++CONFIG_CROSS_COMPILE=""
++CONFIG_DEFAULT_HOSTNAME="(none)"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_HOTPLUG=y
++CONFIG_UEVENT_HELPER_PATH=""
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++
++CONFIG_BUILD_DOCSRC=y
++
++#
++# General setup
++#
++CONFIG_KERNEL_LZO=y
++# CONFIG_KERNEL_BZIP2 is not set
++# CONFIG_KERNEL_LZMA is not set
++CONFIG_SWAP=y
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_BSD_PROCESS_ACCT_V3=y
++# CONFIG_COMPILE_TEST is not set
++CONFIG_TASKSTATS=y
++CONFIG_TASK_DELAY_ACCT=y
++CONFIG_TASK_XACCT=y
++CONFIG_TASK_IO_ACCOUNTING=y
++CONFIG_SYSCTL=y
++# CONFIG_IKCONFIG is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_CFQ_GROUP_IOSCHED=y
++CONFIG_IOSCHED_BFQ=y
++CONFIG_CGROUP_BFQIO=y
++CONFIG_DEFAULT_BFQ=y
++CONFIG_DEFAULT_IOSCHED="bfq"
++# CONFIG_CHECKPOINT_RESTORE is not set
++CONFIG_NAMESPACES=y
++CONFIG_PID_NS=y
++CONFIG_UTS_NS=y
++CONFIG_IPC_NS=y
++CONFIG_NET_NS=y
++CONFIG_USER_NS=y
++# CONFIG_UIDGID_STRICT_TYPE_CHECKS is not set
++CONFIG_SYSVIPC=y
++CONFIG_FHANDLE=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_LOG_BUF_SHIFT=18
++CONFIG_CGROUPS=y
++CONFIG_RELAY=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_EXPERT=y
++CONFIG_PERF_EVENTS=y
++# CONFIG_SLUB_DEBUG is not set
++# CONFIG_COMPAT_BRK is not set
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++
++CONFIG_POSIX_MQUEUE=y
++CONFIG_PREEMPT_VOLUNTARY=y
++
++CONFIG_SLUB=y
++CONFIG_SLUB_CPU_PARTIAL=y
++# CONFIG_SLUB_STATS is not set
++# CONFIG_SLUB_DEBUG_ON is not set
++
++# CONFIG_AD525X_DPOT is not set
++# CONFIG_ATMEL_PWM is not set
++# CONFIG_IWMC3200TOP is not set
++# CONFIG_BLK_DEV_BSG is not set
++
++# MX6 specific kernel configuration
++CONFIG_GPIO_PCA953X=y
++CONFIG_ARCH_MXC=y
++CONFIG_MXC_DEBUG_BOARD=y
++CONFIG_SOC_IMX6Q=y
++CONFIG_SOC_IMX6SL=y
++# CONFIG_SWP_EMULATE is not set
++CONFIG_PCI=y
++CONFIG_PCIE_DW=y
++CONFIG_PCI_IMX6=y
++CONFIG_SMP=y
++CONFIG_VMSPLIT_2G=y
++CONFIG_AEABI=y
++# CONFIG_OABI_COMPAT is not set
++CONFIG_HIGHMEM=y
++CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++CONFIG_ARM_IMX6_CPUFREQ=y
++CONFIG_CPU_IDLE=y
++CONFIG_VFP=y
++CONFIG_VFPv3=y
++CONFIG_NEON=y
++CONFIG_KERNEL_MODE_NEON=y
++CONFIG_BINFMT_MISC=m
++CONFIG_PM_RUNTIME=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_TEST_SUSPEND=y
++CONFIG_NET=y
++CONFIG_PACKET=y
++CONFIG_UNIX=y
++CONFIG_INET=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_IPV6=y
++CONFIG_NETFILTER=y
++CONFIG_VLAN_8021Q=y
++CONFIG_WIRELESS=y
++CONFIG_WIRELESS_EXT=y
++CONFIG_WEXT_CORE=y
++CONFIG_WEXT_PROC=y
++CONFIG_WEXT_SPY=y
++CONFIG_WEXT_PRIV=y
++CONFIG_CFG80211=y
++CONFIG_ETHERNET=y
++# CONFIG_NET_VENDOR_BROADCOM is not set
++# CONFIG_NET_VENDOR_CIRRUS is not set
++# CONFIG_NET_VENDOR_FARADAY
++# CONFIG_NET_VENDOR_INTEL
++# CONFIG_NET_VENDOR_I825XX
++# CONFIG_NET_VENDOR_MARVELL
++# CONFIG_NET_VENDOR_MICROCHIP
++# CONFIG_NET_VENDOR_MICROCHIP=y
++# CONFIG_ENC28J60 is not set
++# CONFIG_NET_VENDOR_NATSEMI=y
++# CONFIG_NET_VENDOR_8390=y
++# CONFIG_AX88796 is not set
++# CONFIG_ETHOC is not set
++# CONFIG_SH_ETH is not set
++# CONFIG_NET_VENDOR_SEEQ=y
++# CONFIG_NET_VENDOR_SMSC=y
++# CONFIG_SMC91X is not set
++# CONFIG_SMC911X is not set
++# CONFIG_SMSC911X is not set
++# CONFIG_NET_VENDOR_STMICRO=y
++# CONFIG_STMMAC_ETH is not set
++# CONFIG_NET_VENDOR_VIA=y
++# CONFIG_VIA_VELOCITY is not set
++# CONFIG_NET_VENDOR_WIZNET=y
++CONFIG_NET_VENDOR_FREESCALE=y
++CONFIG_FEC=y
++CONFIG_PHYLIB=y
++CONFIG_AT803X_PHY=y
++CONFIG_WLAN=y
++CONFIG_BRCMUTIL=m
++CONFIG_BRCMFMAC=m
++CONFIG_BRCMFMAC_SDIO=y
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++# CONFIG_STANDALONE is not set
++CONFIG_DMA_CMA=y
++CONFIG_CMA=y
++CONFIG_CMA_SIZE_MBYTES=256
++CONFIG_CONNECTOR=y
++# CONFIG_MTD is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=65536
++# CONFIG_SCSI_PROC_FS is not set
++CONFIG_BLK_DEV_SD=y
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++CONFIG_SCSI_SCAN_ASYNC=y
++# CONFIG_SCSI_LOWLEVEL is not set
++CONFIG_ATA=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_AHCI_IMX=y
++CONFIG_NETDEVICES=y
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_KEYBOARD_IMX=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++# CONFIG_KEYBOARD_ATKBD is not set
++# CONFIG_MOUSE_PS2 is not set
++CONFIG_INPUT_MISC=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_DEVKMEM is not set
++CONFIG_SERIAL_IMX=y
++CONFIG_SERIAL_IMX_CONSOLE=y
++CONFIG_SERIAL_FSL_LPUART=y
++CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
++CONFIG_FSL_OTP=y
++CONFIG_GPIO_MXC=y
++# CONFIG_I2C_COMPAT is not set
++CONFIG_I2C_CHARDEV=y
++# CONFIG_I2C_HELPER_AUTO is not set
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++CONFIG_I2C_IMX=y
++CONFIG_SPI=y
++CONFIG_SPI_IMX=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_POWER_SUPPLY=y
++CONFIG_THERMAL=y
++CONFIG_CPU_THERMAL=y
++CONFIG_IMX_THERMAL=y
++CONFIG_DEVICE_THERMAL=y
++CONFIG_WATCHDOG=y
++CONFIG_IMX2_WDT=y
++CONFIG_MFD_DA9052_I2C=y
++CONFIG_MFD_MC13XXX_SPI=y
++CONFIG_MFD_MC13XXX_I2C=y
++CONFIG_MFD_SI476X_CORE=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_REGULATOR_ANATOP=y
++CONFIG_REGULATOR_PFUZE100=y
++CONFIG_MEDIA_SUPPORT=y
++CONFIG_MEDIA_CAMERA_SUPPORT=y
++# CONFIG_MEDIA_RADIO_SUPPORT is not set
++CONFIG_VIDEO_V4L2_INT_DEVICE=y
++CONFIG_MEDIA_USB_SUPPORT=y
++CONFIG_USB_VIDEO_CLASS=m
++# CONFIG_RADIO_ADAPTERS is not set
++CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_MXC_OUTPUT=y
++CONFIG_VIDEO_MXC_CAPTURE=m
++CONFIG_VIDEO_MXC_CSI_CAMERA=m
++CONFIG_MXC_CAMERA_OV5640=m
++CONFIG_MXC_CAMERA_OV5642=m
++CONFIG_MXC_CAMERA_OV5640_MIPI=m
++CONFIG_MXC_TVIN_ADV7180=m
++CONFIG_MXC_IPU_DEVICE_QUEUE_SDC=m
++CONFIG_VIDEO_MXC_IPU_OUTPUT=y
++CONFIG_VIDEO_MXC_PXP_V4L2=y
++CONFIG_SOC_CAMERA=y
++CONFIG_SOC_CAMERA_OV2640=y
++CONFIG_DRM=y
++CONFIG_DRM_VIVANTE=y
++CONFIG_FB=y
++# CONFIG_FB_MX3 is not set
++CONFIG_FB_MXC_SYNC_PANEL=y
++CONFIG_FB_MXC_LDB=y
++CONFIG_FB_MXC_HDMI=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++CONFIG_FONTS=y
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_LOGO=y
++CONFIG_SOUND=y
++CONFIG_SND=y
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_SOC=y
++CONFIG_SND_IMX_SOC=y
++CONFIG_SND_SOC_IMX_SGTL5000=y
++CONFIG_SND_SOC_IMX_SPDIF=y
++CONFIG_SND_SOC_IMX_HDMI=y
++CONFIG_USB=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB_CHIPIDEA=y
++CONFIG_USB_CHIPIDEA_UDC=y
++CONFIG_USB_CHIPIDEA_HOST=y
++CONFIG_USB_PHY=y
++CONFIG_NOP_USB_XCEIV=y
++CONFIG_USB_MXS_PHY=y
++CONFIG_USB_GADGET=y
++CONFIG_USB_ZERO=m
++CONFIG_USB_ETH=m
++CONFIG_USB_MASS_STORAGE=m
++CONFIG_USB_G_SERIAL=m
++CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
++CONFIG_MMC_SDHCI=y
++CONFIG_MMC_SDHCI_PLTFM=y
++CONFIG_MMC_SDHCI_ESDHC_IMX=y
++CONFIG_MXC_IPU=y
++CONFIG_MXC_GPU_VIV=y
++CONFIG_MXC_ASRC=y
++CONFIG_MXC_HDMI_CEC=y
++CONFIG_MXC_MIPI_CSI2=y
++CONFIG_MXC_MLB150=m
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_GPIO=y
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_GPIO=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_MXC=y
++CONFIG_RTC_DRV_SNVS=y
++CONFIG_RTC_DRV_PCF8523=y
++CONFIG_DMADEVICES=y
++CONFIG_MXC_PXP_V2=y
++CONFIG_IMX_SDMA=y
++CONFIG_MXS_DMA=y
++CONFIG_SRAM=y
++CONFIG_STAGING=y
++CONFIG_COMMON_CLK_DEBUG=y
++# CONFIG_IOMMU_SUPPORT is not set
++CONFIG_PWM=y
++CONFIG_PWM_SYSFS=y
++CONFIG_PWM_IMX=y
++CONFIG_IRQCHIP=y
++CONFIG_ARM_GIC=y
++CONFIG_ARCH_HAS_RESET_CONTROLLER=y
++CONFIG_RESET_CONTROLLER=y
++CONFIG_RESET_GPIO=y
++CONFIG_EXT4_FS=y
++CONFIG_EXT4_USE_FOR_EXT23=y
++CONFIG_EXT4_FS_XATTR=y
++CONFIG_EXT4_FS_POSIX_ACL=y
++CONFIG_EXT4_FS_SECURITY=y
++CONFIG_QUOTA=y
++CONFIG_QUOTA_NETLINK_INTERFACE=y
++# CONFIG_PRINT_QUOTA_WARNING is not set
++CONFIG_AUTOFS4_FS=y
++CONFIG_FUSE_FS=y
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_UDF_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=y
++CONFIG_TMPFS=y
++CONFIG_JFFS2_FS=y
++CONFIG_UBIFS_FS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_NLS_DEFAULT="cp437"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_UTF8=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_SCHED_DEBUG is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_FTRACE is not set
++CONFIG_SECURITYFS=y
++CONFIG_CRYPTO_USER=y
++CONFIG_CRYPTO_TEST=m
++CONFIG_CRYPTO_CCM=y
++CONFIG_CRYPTO_GCM=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_CTS=y
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_LRW=y
++CONFIG_CRYPTO_XTS=y
++CONFIG_CRYPTO_MD4=y
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_MICHAEL_MIC=y
++CONFIG_CRYPTO_RMD128=y
++CONFIG_CRYPTO_RMD160=y
++CONFIG_CRYPTO_RMD256=y
++CONFIG_CRYPTO_RMD320=y
++CONFIG_CRYPTO_SHA1=y
++CONFIG_CRYPTO_SHA256=y
++CONFIG_CRYPTO_SHA512=y
++CONFIG_CRYPTO_TGR192=y
++CONFIG_CRYPTO_WP512=y
++CONFIG_CRYPTO_BLOWFISH=y
++CONFIG_CRYPTO_CAMELLIA=y
++CONFIG_CRYPTO_DES=y
++CONFIG_CRYPTO_TWOFISH=y
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++CONFIG_CRYPTO_DEV_FSL_CAAM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO=y
++CONFIG_CRYPTO_AES_ARM_BS=y
++CONFIG_CRC_CCITT=m
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC7=m
++CONFIG_LIBCRC32C=m
++# CONFIG_MXC_MMA8451 is not set
++
++#
++# Loadable module support
++#
++# CONFIG_MODULE_FORCE_LOAD is not set
++# -- MODULE_FORCE_UNLOAD is controlled by config-debug/nodebug
++
++# CONFIG_PCI_DEBUG is not set
++CONFIG_PCI_STUB=y
++CONFIG_PCI_IOV=y
++CONFIG_PCI_PRI=y
++CONFIG_PCI_PASID=y
++CONFIG_HT_IRQ=y
++CONFIG_PCI_MSI=y
++# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
++CONFIG_PCIEPORTBUS=y
++CONFIG_PCIEAER=y
++CONFIG_PCIEASPM=y
++# CONFIG_PCIEASPM_DEBUG is not set
++CONFIG_PCIE_ECRC=y
++CONFIG_PCIEAER_INJECT=m
++CONFIG_HOTPLUG_PCI_PCIE=y
++CONFIG_HOTPLUG_PCI_FAKE=m
++
++# CONFIG_SGI_IOC4 is not set
++
++# CONFIG_ISA is not set
++# CONFIG_SCx200 is not set
++
++#
++# PCMCIA/CardBus support
++# FIXME: Deprecate Cardbus ?
++#
++CONFIG_PCMCIA=y
++CONFIG_PCMCIA_LOAD_CIS=y
++# CONFIG_PCMCIA_DEBUG is not set
++CONFIG_YENTA=m
++CONFIG_CARDBUS=y
++CONFIG_I82092=m
++CONFIG_PD6729=m
++
++CONFIG_PCCARD=y
++CONFIG_SDIO_UART=m
++# CONFIG_MMC_TEST is not set
++# CONFIG_MMC_DEBUG is not set
++# https://lists.fedoraproject.org/pipermail/kernel/2014-February/004889.html
++# CONFIG_MMC_CLKGATE is not set
++CONFIG_MMC_BLOCK=y
++CONFIG_MMC_BLOCK_MINORS=8
++CONFIG_MMC_BLOCK_BOUNCE=y
++CONFIG_MMC_SDHCI_PCI=m
++CONFIG_MMC_SDHCI_ACPI=m
++CONFIG_MMC_SDRICOH_CS=m
++CONFIG_MMC_TIFM_SD=m
++CONFIG_MMC_WBSD=m
++CONFIG_MMC_VIA_SDMMC=m
++CONFIG_MMC_CB710=m
++CONFIG_MMC_RICOH_MMC=y
++CONFIG_MMC_USHC=m
++CONFIG_MMC_REALTEK_PCI=m
++CONFIG_MMC_VUB300=m
++# CONFIG_MMC_SDHCI_PXAV2 is not set
++# CONFIG_MMC_SDHCI_PXAV3 is not set
++# CONFIG_MMC_SDHCI_OF_ARASAN is not set
++
++
++CONFIG_CB710_CORE=m
++# CONFIG_CB710_DEBUG is not set
++
++CONFIG_INFINIBAND=m
++CONFIG_INFINIBAND_MTHCA=m
++# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
++CONFIG_INFINIBAND_IPOIB=m
++CONFIG_INFINIBAND_IPOIB_DEBUG=y
++CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y
++CONFIG_INFINIBAND_IPOIB_CM=y
++CONFIG_INFINIBAND_SRP=m
++CONFIG_INFINIBAND_SRPT=m
++CONFIG_INFINIBAND_USER_MAD=m
++CONFIG_INFINIBAND_USER_ACCESS=m
++# CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING is not set #staging
++CONFIG_INFINIBAND_IPATH=m
++CONFIG_INFINIBAND_ISER=m
++CONFIG_INFINIBAND_ISERT=m
++CONFIG_INFINIBAND_AMSO1100=m
++# CONFIG_INFINIBAND_AMSO1100_DEBUG is not set
++CONFIG_INFINIBAND_CXGB3=m
++CONFIG_INFINIBAND_CXGB4=m
++CONFIG_SCSI_CXGB3_ISCSI=m
++CONFIG_SCSI_CXGB4_ISCSI=m
++# CONFIG_INFINIBAND_CXGB3_DEBUG is not set
++CONFIG_MLX4_INFINIBAND=m
++CONFIG_MLX5_INFINIBAND=m
++CONFIG_INFINIBAND_NES=m
++# CONFIG_INFINIBAND_NES_DEBUG is not set
++CONFIG_INFINIBAND_QIB=m
++CONFIG_INFINIBAND_QIB_DCA=y
++# CONFIG_INFINIBAND_OCRDMA is not set
++# CONFIG_INFINIBAND_USNIC is not set
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
++# CONFIG_BINFMT_AOUT is not set
++CONFIG_BINFMT_SCRIPT=y
++
++#
++# Device Drivers
++#
++
++# CONFIG_COMMON_CLK_SI5351 is not set
++
++#
++# Generic Driver Options
++#
++CONFIG_FW_LOADER=y
++# CONFIG_FIRMWARE_IN_KERNEL is not set
++CONFIG_EXTRA_FIRMWARE=""
++
++# Give this a try in rawhide for now
++# CONFIG_FW_LOADER_USER_HELPER is not set
++
++
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD_TESTS is not set
++# CONFIG_MTD_REDBOOT_PARTS is not set
++# CONFIG_MTD_AR7_PARTS is not set
++# CONFIG_MTD_CMDLINE_PARTS is not set
++
++#
++# User Modules And Translation Layers
++#
++# CONFIG_MTD_CHAR is not set
++# CONFIG_MTD_BLKDEVS is not set
++# CONFIG_MTD_BLOCK is not set
++# CONFIG_MTD_BLOCK_RO is not set
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_SM_FTL is not set
++# CONFIG_MTD_OOPS is not set
++# CONFIG_MTD_SWAP is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++# CONFIG_MTD_CFI is not set
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_RAM is not set
++# CONFIG_MTD_ROM is not set
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_TS5500 is not set
++# CONFIG_MTD_INTEL_VR_NOR is not set
++# CONFIG_MTD_PLATRAM is not set
++
++# Self-contained MTD device drivers
++# CONFIG_MTD_PMC551 is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOCG3 is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++# CONFIG_MTD_NAND_VERIFY_WRITE is not set
++# CONFIG_MTD_NAND_ECC_BCH is not set
++# CONFIG_MTD_NAND_MUSEUM_IDS is not set
++# CONFIG_MTD_NAND_DISKONCHIP is not set
++# CONFIG_MTD_LPDDR is not set
++CONFIG_MTD_UBI=m
++CONFIG_MTD_UBI_WL_THRESHOLD=4096
++CONFIG_MTD_UBI_BEB_LIMIT=20
++# CONFIG_MTD_UBI_FASTMAP is not set
++# CONFIG_MTD_UBI_GLUEBI is not set
++
++#
++# Parallel port support
++#
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++CONFIG_PARPORT_SERIAL=m
++# CONFIG_PARPORT_PC_FIFO is not set
++# CONFIG_PARPORT_PC_SUPERIO is not set
++CONFIG_PARPORT_PC_PCMCIA=m
++CONFIG_PARPORT_1284=y
++# CONFIG_PARPORT_AX88796 is not set
++
++CONFIG_ACPI_PCI_SLOT=y
++CONFIG_HOTPLUG_PCI_ACPI=y
++CONFIG_HOTPLUG_PCI_ACPI_IBM=m
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV=y
++CONFIG_BLK_DEV_NULL_BLK=m
++CONFIG_BLK_DEV_FD=m
++# CONFIG_PARIDE is not set
++CONFIG_ZRAM=m
++# CONFIG_ZRAM_DEBUG is not set
++CONFIG_ENHANCEIO=m
++
++CONFIG_BLK_CPQ_DA=m
++CONFIG_BLK_CPQ_CISS_DA=m
++CONFIG_CISS_SCSI_TAPE=y
++CONFIG_BLK_DEV_DAC960=m
++# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
++CONFIG_BLK_DEV_DRBD=m
++CONFIG_BLK_DEV_UMEM=m
++CONFIG_BLK_DEV_LOOP_MIN_COUNT=0
++# Fedora 18 util-linux is the last release that supports cryptoloop devices
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_NVME=m
++CONFIG_BLK_DEV_SKD=m # 64-bit only but easier to put here
++CONFIG_BLK_DEV_OSD=m
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_IO_TRACE=y
++
++CONFIG_BLK_DEV_BSGLIB=y
++CONFIG_BLK_DEV_INTEGRITY=y
++CONFIG_BLK_DEV_THROTTLING=y
++# CONFIG_BLK_CMDLINE_PARSER is not set
++
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++# CONFIG_IDE is not set
++
++# CONFIG_BLK_DEV_HD is not set
++# CONFIG_BLK_DEV_RSXX is not set
++
++CONFIG_SCSI_VIRTIO=m
++CONFIG_VIRTIO_BLK=m
++CONFIG_VIRTIO_PCI=m
++CONFIG_VIRTIO_BALLOON=m
++CONFIG_VIRTIO_MMIO=m
++# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set
++CONFIG_VIRTIO_NET=m
++CONFIG_HW_RANDOM_VIRTIO=m
++CONFIG_VIRTIO_CONSOLE=m
++CONFIG_VHOST_NET=m
++CONFIG_TCM_VHOST=m
++CONFIG_VHOST_SCSI=m
++
++#
++# SCSI device support
++#
++CONFIG_SCSI=y
++
++CONFIG_SCSI_ENCLOSURE=m
++CONFIG_SCSI_SRP=m
++CONFIG_SCSI_SRP_ATTRS=m
++CONFIG_SCSI_TGT=m
++CONFIG_SCSI_ISCI=m
++CONFIG_SCSI_CHELSIO_FCOE=m
++
++CONFIG_SCSI_DH=y
++CONFIG_SCSI_DH_RDAC=m
++CONFIG_SCSI_DH_HP_SW=m
++CONFIG_SCSI_DH_EMC=m
++CONFIG_SCSI_DH_ALUA=m
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=y
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=y
++CONFIG_CHR_DEV_SCH=m
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++CONFIG_SCSI_FC_TGT_ATTRS=y
++CONFIG_SCSI_ISCSI_ATTRS=m
++CONFIG_SCSI_SAS_ATTRS=m
++CONFIG_SCSI_SRP_TGT_ATTRS=y
++CONFIG_SCSI_SAS_LIBSAS=m
++CONFIG_SCSI_SAS_ATA=y
++CONFIG_SCSI_SAS_HOST_SMP=y
++CONFIG_RAID_ATTRS=m
++
++CONFIG_ISCSI_TCP=m
++CONFIG_ISCSI_BOOT_SYSFS=m
++
++#
++# SCSI low-level drivers
++#
++CONFIG_BLK_DEV_3W_XXXX_RAID=m
++CONFIG_SCSI_3W_9XXX=m
++CONFIG_SCSI_ACARD=m
++CONFIG_SCSI_AACRAID=m
++CONFIG_SCSI_AIC7XXX=m
++# http://lists.fedoraproject.org/pipermail/kernel/2013-February/004102.html
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
++# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
++CONFIG_AIC7XXX_DEBUG_MASK=0
++# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
++CONFIG_SCSI_AIC79XX=m
++CONFIG_AIC79XX_CMDS_PER_DEVICE=4
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
++# CONFIG_AIC79XX_DEBUG_ENABLE is not set
++CONFIG_AIC79XX_DEBUG_MASK=0
++# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
++CONFIG_SCSI_AIC94XX=m
++# CONFIG_AIC94XX_DEBUG is not set
++# CONFIG_SCSI_ADVANSYS is not set
++CONFIG_SCSI_BFA_FC=m
++CONFIG_MEGARAID_NEWGEN=y
++CONFIG_MEGARAID_MM=m
++CONFIG_MEGARAID_MAILBOX=m
++CONFIG_MEGARAID_LEGACY=m
++CONFIG_MEGARAID_SAS=m
++CONFIG_SCSI_ESAS2R=m
++CONFIG_SCSI_MVSAS=m
++# CONFIG_SCSI_MVSAS_DEBUG is not set
++CONFIG_SCSI_MVSAS_TASKLET=y
++CONFIG_SCSI_MPT2SAS=m
++CONFIG_SCSI_MPT2SAS_MAX_SGE=128
++CONFIG_SCSI_MPT2SAS_LOGGING=y
++CONFIG_SCSI_MPT3SAS=m
++CONFIG_SCSI_MPT3SAS_MAX_SGE=128
++CONFIG_SCSI_MPT3SAS_LOGGING=y
++
++CONFIG_SCSI_UFSHCD=m
++CONFIG_SCSI_UFSHCD_PCI=m
++# CONFIG_SCSI_UFSHCD_PLATFORM is not set
++
++CONFIG_SCSI_MVUMI=m
++
++CONFIG_SCSI_OSD_INITIATOR=m
++CONFIG_SCSI_OSD_ULD=m
++CONFIG_SCSI_OSD_DPRINT_SENSE=1
++# CONFIG_SCSI_OSD_DEBUG is not set
++
++CONFIG_SCSI_BNX2_ISCSI=m
++CONFIG_SCSI_BNX2X_FCOE=m
++CONFIG_BE2ISCSI=m
++CONFIG_SCSI_PMCRAID=m
++
++CONFIG_SCSI_HPSA=m
++CONFIG_SCSI_3W_SAS=m
++CONFIG_SCSI_PM8001=m
++CONFIG_VMWARE_PVSCSI=m
++CONFIG_VMWARE_BALLOON=m
++
++CONFIG_SCSI_ARCMSR=m
++CONFIG_SCSI_BUSLOGIC=m
++CONFIG_SCSI_INITIO=m
++CONFIG_SCSI_FLASHPOINT=y
++CONFIG_SCSI_DMX3191D=m
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_EATA_PIO is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++CONFIG_SCSI_GDTH=m
++CONFIG_SCSI_HPTIOP=m
++CONFIG_SCSI_IPS=m
++CONFIG_SCSI_INIA100=m
++# CONFIG_SCSI_PPA is not set
++# CONFIG_SCSI_IMM is not set
++# CONFIG_SCSI_IZIP_EPP16 is not set
++# CONFIG_SCSI_IZIP_SLOW_CTR is not set
++CONFIG_SCSI_STEX=m
++CONFIG_SCSI_SYM53C8XX_2=m
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++CONFIG_SCSI_QLOGIC_1280=m
++CONFIG_SCSI_DC395x=m
++# CONFIG_SCSI_NSP32 is not set
++CONFIG_SCSI_DEBUG=m
++CONFIG_SCSI_DC390T=m
++CONFIG_SCSI_QLA_FC=m
++CONFIG_TCM_QLA2XXX=m
++CONFIG_SCSI_QLA_ISCSI=m
++CONFIG_SCSI_IPR=m
++CONFIG_SCSI_IPR_TRACE=y
++CONFIG_SCSI_IPR_DUMP=y
++# CONFIG_SCSI_DPT_I2O is not set
++CONFIG_SCSI_LPFC=m
++# CONFIG_SCSI_LPFC_DEBUG_FS is not set
++
++# PCMCIA SCSI adapter support
++# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
++
++CONFIG_ATA_BMDMA=y
++CONFIG_ATA_VERBOSE_ERROR=y
++CONFIG_ATA_SFF=y
++CONFIG_ATA_PIIX=y
++# CONFIG_SATA_HIGHBANK is not set
++CONFIG_ATA_ACPI=y
++CONFIG_BLK_DEV_SX8=m
++CONFIG_PDC_ADMA=m
++CONFIG_SATA_AHCI=y
++CONFIG_SATA_INIC162X=m
++CONFIG_SATA_MV=m
++CONFIG_SATA_NV=m
++CONFIG_SATA_PMP=y
++CONFIG_SATA_PROMISE=m
++CONFIG_SATA_QSTOR=m
++CONFIG_SATA_RCAR=m
++CONFIG_SATA_SIL=m
++CONFIG_SATA_SIL24=m
++CONFIG_SATA_SIS=m
++CONFIG_SATA_SVW=m
++CONFIG_SATA_SX4=m
++CONFIG_SATA_ULI=m
++CONFIG_SATA_VIA=m
++CONFIG_SATA_VITESSE=m
++# CONFIG_SATA_ZPODD is not set
++CONFIG_SATA_ACARD_AHCI=m
++
++# CONFIG_PATA_LEGACY is not set
++CONFIG_PATA_ACPI=m
++CONFIG_PATA_ALI=m
++CONFIG_PATA_AMD=m
++CONFIG_PATA_ARASAN_CF=m
++CONFIG_PATA_ARTOP=m
++CONFIG_PATA_ATIIXP=m
++CONFIG_PATA_CMD640_PCI=m
++CONFIG_PATA_CMD64X=m
++CONFIG_PATA_CS5520=m
++CONFIG_PATA_CS5530=m
++CONFIG_PATA_CS5535=m
++CONFIG_PATA_CS5536=m
++CONFIG_PATA_CYPRESS=m
++CONFIG_PATA_EFAR=m
++CONFIG_ATA_GENERIC=m
++CONFIG_PATA_HPT366=m
++CONFIG_PATA_HPT37X=m
++CONFIG_PATA_HPT3X2N=m
++CONFIG_PATA_HPT3X3=m
++# CONFIG_PATA_HPT3X3_DMA is not set
++CONFIG_PATA_IT821X=m
++CONFIG_PATA_IT8213=m
++CONFIG_PATA_JMICRON=m
++CONFIG_PATA_NINJA32=m
++CONFIG_PATA_MARVELL=m
++CONFIG_PATA_MPIIX=m
++CONFIG_PATA_NETCELL=m
++CONFIG_PATA_NS87410=m
++CONFIG_PATA_NS87415=m
++CONFIG_PATA_OLDPIIX=m
++CONFIG_PATA_OPTI=m
++CONFIG_PATA_OPTIDMA=m
++CONFIG_PATA_PCMCIA=m
++CONFIG_PATA_PDC_OLD=m
++# CONFIG_PATA_RADISYS is not set
++CONFIG_PATA_RDC=m
++# CONFIG_PATA_RZ1000 is not set
++# CONFIG_PATA_SC1200 is not set
++CONFIG_PATA_SERVERWORKS=m
++CONFIG_PATA_PDC2027X=m
++CONFIG_PATA_SCH=m
++CONFIG_PATA_SIL680=m
++CONFIG_PATA_SIS=m
++CONFIG_PATA_TOSHIBA=m
++CONFIG_PATA_TRIFLEX=m
++CONFIG_PATA_VIA=m
++CONFIG_PATA_WINBOND=m
++CONFIG_PATA_ATP867X=m
++
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_AUTODETECT=y
++CONFIG_MD_FAULTY=m
++CONFIG_MD_LINEAR=m
++CONFIG_MD_MULTIPATH=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID456=m
++
++CONFIG_BCACHE=m
++# CONFIG_BCACHE_DEBUG is not set
++# CONFIG_BCACHE_EDEBUG is not set
++# CONFIG_BCACHE_CLOSURES_DEBUG is not set
++
++# CONFIG_MULTICORE_RAID456 is not set
++CONFIG_ASYNC_RAID6_TEST=m
++CONFIG_BLK_DEV_DM=y
++CONFIG_DM_CRYPT=m
++CONFIG_DM_DEBUG=y
++CONFIG_DM_DELAY=m
++CONFIG_DM_MIRROR=y
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_SNAPSHOT=y
++CONFIG_DM_THIN_PROVISIONING=m
++CONFIG_DM_CACHE=m
++CONFIG_DM_CACHE_MQ=m
++CONFIG_DM_CACHE_CLEANER=m
++# CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set
++# CONFIG_DM_DEBUG_SPACE_MAPS is not set
++CONFIG_DM_UEVENT=y
++CONFIG_DM_ZERO=y
++CONFIG_DM_LOG_USERSPACE=m
++CONFIG_DM_MULTIPATH_QL=m
++CONFIG_DM_MULTIPATH_ST=m
++CONFIG_DM_RAID=m
++CONFIG_DM_FLAKEY=m
++CONFIG_DM_VERITY=m
++CONFIG_DM_SWITCH=m
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=m
++CONFIG_FUSION_FC=m
++CONFIG_FUSION_MAX_SGE=40
++CONFIG_FUSION_CTL=m
++CONFIG_FUSION_LAN=m
++CONFIG_FUSION_SAS=m
++CONFIG_FUSION_LOGGING=y
++
++#
++# IEEE 1394 (FireWire) support (JUJU alternative stack)
++#
++CONFIG_FIREWIRE=m
++CONFIG_FIREWIRE_OHCI=m
++CONFIG_FIREWIRE_SBP2=m
++CONFIG_FIREWIRE_NET=m
++CONFIG_FIREWIRE_OHCI_DEBUG=y
++CONFIG_FIREWIRE_NOSY=m
++# CONFIG_FIREWIRE_SERIAL is not set
++# CONFIG_FIREWIRE_OHCI_REMOTE_DMA is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++# CONFIG_I2O_LCT_NOTIFY_ON_CHANGES is not set
++
++#
++# Virtualization support drivers
++#
++# CONFIG_VIRT_DRIVERS is not set
++
++# Networking support
++#
++
++CONFIG_NET_DMA=y
++
++CONFIG_NETLINK_MMAP=y
++CONFIG_NETLINK_DIAG=m
++
++CONFIG_TCP_CONG_ADVANCED=y
++CONFIG_TCP_CONG_BIC=m
++CONFIG_TCP_CONG_CUBIC=y
++CONFIG_TCP_CONG_HTCP=m
++CONFIG_TCP_CONG_HSTCP=m
++CONFIG_TCP_CONG_HYBLA=m
++CONFIG_TCP_CONG_ILLINOIS=m
++CONFIG_TCP_CONG_LP=m
++CONFIG_TCP_CONG_SCALABLE=m
++CONFIG_TCP_CONG_VEGAS=m
++CONFIG_TCP_CONG_VENO=m
++CONFIG_TCP_CONG_WESTWOOD=m
++CONFIG_TCP_CONG_YEAH=m
++
++CONFIG_TCP_MD5SIG=y
++
++#
++# Networking options
++#
++CONFIG_PACKET_DIAG=m
++CONFIG_UNIX_DIAG=m
++CONFIG_NET_KEY=m
++CONFIG_NET_KEY_MIGRATE=y
++CONFIG_INET_TUNNEL=m
++CONFIG_INET_DIAG=m
++CONFIG_INET_UDP_DIAG=m
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_IP_FIB_TRIE_STATS=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++CONFIG_IP_ROUTE_VERBOSE=y
++CONFIG_IP_NF_SECURITY=m
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE_DEMUX=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++CONFIG_ARPD=y
++CONFIG_SYN_COOKIES=y
++CONFIG_NET_IPVTI=m
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++CONFIG_INET_IPCOMP=m
++CONFIG_NETCONSOLE=m
++CONFIG_NETCONSOLE_DYNAMIC=y
++CONFIG_NETPOLL_TRAP=y
++CONFIG_NET_POLL_CONTROLLER=y
++
++#
++# IP: Virtual Server Configuration
++#
++CONFIG_IP_VS=m
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
++CONFIG_IP_VS_PROTO_SCTP=y
++CONFIG_IP_VS_IPV6=y
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
++
++#
++# IPVS SH scheduler
++#
++CONFIG_IP_VS_SH_TAB_BITS=8
++
++CONFIG_IP_VS_FTP=m
++CONFIG_IP_VS_PE_SIP=m
++
++CONFIG_IPV6_PRIVACY=y
++CONFIG_IPV6_ROUTER_PREF=y
++CONFIG_IPV6_ROUTE_INFO=y
++CONFIG_IPV6_OPTIMISTIC_DAD=y
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++CONFIG_INET6_IPCOMP=m
++CONFIG_IPV6_MIP6=y
++CONFIG_IPV6_VTI=m
++CONFIG_IPV6_SIT=m
++CONFIG_IPV6_SIT_6RD=y
++CONFIG_IPV6_TUNNEL=m
++# CONFIG_IPV6_GRE is not set
++CONFIG_IPV6_SUBTREES=y
++CONFIG_IPV6_MULTIPLE_TABLES=y
++CONFIG_IPV6_MROUTE=y
++CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
++CONFIG_IPV6_PIMSM_V2=y
++
++CONFIG_RDS=m
++# CONFIG_RDS_DEBUG is not set
++CONFIG_RDS_RDMA=m
++CONFIG_RDS_TCP=m
++
++CONFIG_NET_9P=m
++CONFIG_NET_9P_VIRTIO=m
++# CONFIG_NET_9P_DEBUG is not set
++CONFIG_NET_9P_RDMA=m
++
++# CONFIG_DECNET is not set
++CONFIG_BRIDGE=m
++CONFIG_BRIDGE_IGMP_SNOOPING=y
++CONFIG_BRIDGE_VLAN_FILTERING=y
++
++# PHY timestamping adds overhead
++CONFIG_NETWORK_PHY_TIMESTAMPING=y
++
++CONFIG_NETFILTER_ADVANCED=y
++CONFIG_NF_CONNTRACK=m
++CONFIG_NETFILTER_NETLINK=m
++CONFIG_NETFILTER_NETLINK_ACCT=m
++CONFIG_NETFILTER_NETLINK_QUEUE=m
++CONFIG_NETFILTER_NETLINK_QUEUE_CT=y
++CONFIG_NETFILTER_NETLINK_LOG=m
++CONFIG_NETFILTER_TPROXY=m
++CONFIG_NETFILTER_XTABLES=y
++CONFIG_NETFILTER_XT_SET=m
++CONFIG_NETFILTER_XT_MARK=m
++CONFIG_NETFILTER_XT_CONNMARK=m
++
++CONFIG_NETFILTER_XT_TARGET_AUDIT=m
++CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
++CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
++CONFIG_NETFILTER_XT_TARGET_CT=m
++CONFIG_NETFILTER_XT_TARGET_DSCP=m
++CONFIG_NETFILTER_XT_TARGET_HMARK=m
++CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
++CONFIG_NETFILTER_XT_TARGET_LED=m
++CONFIG_NETFILTER_XT_TARGET_LOG=m
++CONFIG_NETFILTER_XT_TARGET_MARK=m
++CONFIG_NETFILTER_XT_TARGET_NFLOG=m
++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
++CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
++CONFIG_NETFILTER_XT_TARGET_RATEEST=m
++CONFIG_NETFILTER_XT_TARGET_SECMARK=m
++CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
++CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
++CONFIG_NETFILTER_XT_TARGET_TRACE=m
++CONFIG_NETFILTER_XT_TARGET_TEE=m
++CONFIG_NETFILTER_XT_TARGET_TPROXY=m
++
++CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
++CONFIG_NETFILTER_XT_MATCH_BPF=m
++CONFIG_NETFILTER_XT_MATCH_CGROUP=m
++CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
++CONFIG_NETFILTER_XT_MATCH_COMMENT=m
++CONFIG_NETFILTER_XT_MATCH_CPU=m
++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
++CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
++CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
++CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
++CONFIG_NETFILTER_XT_MATCH_DCCP=m
++CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
++CONFIG_NETFILTER_XT_MATCH_DSCP=m
++CONFIG_NETFILTER_XT_MATCH_ECN=m
++CONFIG_NETFILTER_XT_MATCH_ESP=m
++CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
++CONFIG_NETFILTER_XT_MATCH_HELPER=m
++CONFIG_NETFILTER_XT_MATCH_HL=m
++CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
++CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
++CONFIG_NETFILTER_XT_MATCH_IPVS=m
++CONFIG_NETFILTER_XT_MATCH_L2TP=m
++CONFIG_NETFILTER_XT_MATCH_LENGTH=m
++CONFIG_NETFILTER_XT_MATCH_LIMIT=m
++CONFIG_NETFILTER_XT_MATCH_MAC=m
++CONFIG_NETFILTER_XT_MATCH_MARK=m
++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
++CONFIG_NETFILTER_XT_MATCH_NFACCT=m
++CONFIG_NETFILTER_XT_MATCH_OSF=m
++CONFIG_NETFILTER_XT_MATCH_OWNER=m
++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
++CONFIG_NETFILTER_XT_MATCH_POLICY=m
++CONFIG_NETFILTER_XT_MATCH_QUOTA=m
++CONFIG_NETFILTER_XT_MATCH_RATEEST=m
++CONFIG_NETFILTER_XT_MATCH_REALM=m
++CONFIG_NETFILTER_XT_MATCH_RECENT=m
++CONFIG_NETFILTER_XT_MATCH_SCTP=m
++CONFIG_NETFILTER_XT_MATCH_SOCKET=m
++CONFIG_NETFILTER_XT_MATCH_STATE=y
++CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
++CONFIG_NETFILTER_XT_MATCH_STRING=m
++CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
++CONFIG_NETFILTER_XT_MATCH_TIME=m
++CONFIG_NETFILTER_XT_MATCH_U32=m
++
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# IP: Netfilter Configuration
++#
++
++CONFIG_NF_CONNTRACK_MARK=y
++CONFIG_NF_CONNTRACK_SECMARK=y
++CONFIG_NF_CONNTRACK_EVENTS=y
++CONFIG_NF_CONNTRACK_ZONES=y
++CONFIG_NF_CONNTRACK_PROCFS=y # check if contrack(8) in f17 supports netlink
++# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
++CONFIG_NF_CONNTRACK_AMANDA=m
++CONFIG_NF_CONNTRACK_FTP=m
++CONFIG_NF_CONNTRACK_H323=m
++CONFIG_NF_CONNTRACK_IRC=m
++CONFIG_NF_CONNTRACK_NETBIOS_NS=m
++CONFIG_NF_CONNTRACK_PPTP=m
++CONFIG_NF_CONNTRACK_SANE=m
++CONFIG_NF_CONNTRACK_SIP=m
++CONFIG_NF_CONNTRACK_TFTP=m
++CONFIG_NF_CONNTRACK_IPV4=y
++CONFIG_NF_CONNTRACK_IPV6=y
++# CONFIG_NF_CONNTRACK_TIMEOUT is not set
++CONFIG_NF_CONNTRACK_TIMESTAMP=y
++CONFIG_NF_CONNTRACK_SNMP=m
++CONFIG_NF_NAT=m
++CONFIG_NF_NAT_SNMP_BASIC=m
++CONFIG_NF_CT_PROTO_DCCP=m
++CONFIG_NF_CT_PROTO_SCTP=m
++CONFIG_NF_CT_NETLINK=m
++# CONFIG_NF_CT_NETLINK_TIMEOUT is not set
++CONFIG_NF_CT_NETLINK_HELPER=m
++CONFIG_NF_CT_PROTO_UDPLITE=m
++
++CONFIG_IP_NF_MATCH_AH=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_RPFILTER=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_TARGET_CLUSTERIP=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_LOG=m
++CONFIG_IP_NF_TARGET_ULOG=m
++CONFIG_IP_NF_TARGET_REJECT=y
++CONFIG_IP_NF_TARGET_SYNPROXY=m
++CONFIG_IP_NF_TARGET_TTL=m
++CONFIG_NF_NAT_IPV4=m
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
++CONFIG_IP_NF_QUEUE=m
++CONFIG_IP_NF_RAW=m
++
++CONFIG_IP_NF_IPTABLES=y
++CONFIG_IP_NF_FILTER=y
++
++#
++# IPv6: Netfilter Configuration
++#
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_MATCH_AH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_MH=m
++CONFIG_IP6_NF_MATCH_RPFILTER=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_RT=m
++CONFIG_IP6_NF_QUEUE=m
++CONFIG_IP6_NF_RAW=m
++CONFIG_IP6_NF_SECURITY=m
++CONFIG_IP6_NF_TARGET_LOG=m
++CONFIG_IP6_NF_TARGET_REJECT=m
++CONFIG_IP6_NF_TARGET_SYNPROXY=m
++CONFIG_IP6_NF_TARGET_HL=m
++CONFIG_NF_NAT_IPV6=m
++CONFIG_IP6_NF_TARGET_MASQUERADE=m
++# CONFIG_IP6_NF_TARGET_NPT is not set
++
++# nf_tables support
++CONFIG_NF_TABLES=m
++CONFIG_NF_TABLES_INET=m
++CONFIG_NFT_EXTHDR=m
++CONFIG_NFT_META=m
++CONFIG_NFT_CT=m
++CONFIG_NFT_RBTREE=m
++CONFIG_NFT_HASH=m
++CONFIG_NFT_COUNTER=m
++CONFIG_NFT_LOG=m
++CONFIG_NFT_LIMIT=m
++CONFIG_NFT_NAT=m
++CONFIG_NFT_QUEUE=m
++CONFIG_NFT_REJECT=m
++CONFIG_NFT_COMPAT=m
++
++CONFIG_NF_TABLES_IPV4=m
++CONFIG_NFT_REJECT_IPV4=m
++CONFIG_NFT_CHAIN_ROUTE_IPV4=m
++CONFIG_NFT_CHAIN_NAT_IPV4=m
++CONFIG_NF_TABLES_ARP=m
++
++CONFIG_NF_TABLES_IPV6=m
++CONFIG_NFT_CHAIN_ROUTE_IPV6=m
++CONFIG_NFT_CHAIN_NAT_IPV6=m
++
++CONFIG_NF_TABLES_BRIDGE=m
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_IP6=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_LOG=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_NFLOG=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_ULOG=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_XFRM=y
++CONFIG_XFRM_MIGRATE=y
++CONFIG_XFRM_SUB_POLICY=y
++CONFIG_XFRM_STATISTICS=y
++CONFIG_XFRM_USER=y
++CONFIG_INET6_XFRM_MODE_TRANSPORT=m
++CONFIG_INET6_XFRM_MODE_TUNNEL=m
++CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
++CONFIG_INET6_XFRM_MODE_BEET=m
++
++CONFIG_IP_SET=m
++CONFIG_IP_SET_MAX=256
++CONFIG_IP_SET_BITMAP_IP=m
++CONFIG_IP_SET_BITMAP_IPMAC=m
++CONFIG_IP_SET_BITMAP_PORT=m
++CONFIG_IP_SET_HASH_IP=m
++CONFIG_IP_SET_HASH_IPPORT=m
++CONFIG_IP_SET_HASH_IPPORTIP=m
++CONFIG_IP_SET_HASH_IPPORTNET=m
++CONFIG_IP_SET_HASH_NETPORTNET=m
++CONFIG_IP_SET_HASH_NET=m
++CONFIG_IP_SET_HASH_NETNET=m
++CONFIG_IP_SET_HASH_NETPORT=m
++CONFIG_IP_SET_HASH_NETIFACE=m
++CONFIG_IP_SET_LIST_SET=m
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_SCTP=m
++CONFIG_NET_SCTPPROBE=m
++# CONFIG_SCTP_DBG_MSG is not set
++# CONFIG_SCTP_DBG_OBJCNT is not set
++CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
++# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
++# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
++CONFIG_SCTP_COOKIE_HMAC_MD5=y
++CONFIG_SCTP_COOKIE_HMAC_SHA1=y
++CONFIG_ATM=m
++CONFIG_VLAN_8021Q_GVRP=y
++CONFIG_VLAN_8021Q_MVRP=y
++CONFIG_LLC=m
++# CONFIG_LLC2 is not set
++CONFIG_IPX=m
++# CONFIG_IPX_INTERN is not set
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=m
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_IPDDP_DECAP=y
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++CONFIG_WAN_ROUTER=m
++CONFIG_IP_DCCP=m
++CONFIG_IP_DCCP_CCID2=m
++# CONFIG_IP_DCCP_CCID2_DEBUG is not set
++CONFIG_IP_DCCP_CCID3=y
++# CONFIG_IP_DCCP_CCID3_DEBUG is not set
++# CONFIG_IP_DCCP_DEBUG is not set
++# CONFIG_NET_DCCPPROBE is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++CONFIG_TIPC=m
++CONFIG_TIPC_PORTS=8192
++# CONFIG_TIPC_MEDIA_IB is not set
++# CONFIG_TIPC_ADVANCED is not set
++# CONFIG_TIPC_DEBUG is not set
++
++CONFIG_NETLABEL=y
++
++#
++# QoS and/or fair queueing
++#
++CONFIG_NET_SCHED=y
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_DRR=m
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_INGRESS=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_SFB=m
++CONFIG_NET_SCH_MQPRIO=m
++CONFIG_NET_SCH_MULTIQ=m
++CONFIG_NET_SCH_CHOKE=m
++CONFIG_NET_SCH_QFQ=m
++CONFIG_NET_SCH_CODEL=m
++CONFIG_NET_SCH_FQ_CODEL=m
++CONFIG_NET_SCH_FQ=m
++CONFIG_NET_SCH_HHF=m
++CONFIG_NET_SCH_PIE=m
++CONFIG_NET_SCH_PLUG=m
++CONFIG_NET_CLS=y
++CONFIG_NET_CLS_ACT=y
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_CGROUP=y
++CONFIG_NET_CLS_BPF=m
++CONFIG_NET_CLS_FLOW=m
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_IND=y
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_ROUTE=y
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_U32=m
++CONFIG_CLS_U32_MARK=y
++CONFIG_CLS_U32_PERF=y
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_CMP=m
++CONFIG_NET_EMATCH_META=m
++CONFIG_NET_EMATCH_NBYTE=m
++CONFIG_NET_EMATCH_STACK=32
++CONFIG_NET_EMATCH_TEXT=m
++CONFIG_NET_EMATCH_IPSET=m
++CONFIG_NET_EMATCH_U32=m
++
++CONFIG_NET_ACT_CSUM=m
++CONFIG_NET_ACT_GACT=m
++CONFIG_GACT_PROB=y
++CONFIG_NET_ACT_IPT=m
++CONFIG_NET_ACT_MIRRED=m
++CONFIG_NET_ACT_NAT=m
++CONFIG_NET_ACT_PEDIT=m
++CONFIG_NET_ACT_POLICE=m
++CONFIG_NET_ACT_SIMP=m
++CONFIG_NET_ACT_SKBEDIT=m
++
++CONFIG_DCB=y
++CONFIG_DNS_RESOLVER=m
++CONFIG_BATMAN_ADV=m
++CONFIG_BATMAN_ADV_BLA=y
++CONFIG_BATMAN_ADV_DAT=y
++CONFIG_BATMAN_ADV_NC=y
++
++# CONFIG_BATMAN_ADV_DEBUG is not set
++CONFIG_OPENVSWITCH=m
++CONFIG_OPENVSWITCH_GRE=y
++CONFIG_OPENVSWITCH_VXLAN=y
++CONFIG_VSOCKETS=m
++
++
++#
++# Network testing
++#
++CONFIG_NET_PKTGEN=m
++# CONFIG_NET_TCPPROBE is not set
++CONFIG_NET_DROP_MONITOR=y
++
++# disable later --kyle
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++CONFIG_IFB=m
++CONFIG_NET_TEAM=m
++CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
++CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
++CONFIG_NET_TEAM_MODE_LOADBALANCE=m
++CONFIG_NET_TEAM_MODE_BROADCAST=m
++CONFIG_NET_TEAM_MODE_RANDOM=m
++CONFIG_DUMMY=m
++CONFIG_BONDING=m
++CONFIG_MACVLAN=m
++CONFIG_MACVTAP=m
++CONFIG_VXLAN=m
++CONFIG_EQUALIZER=m
++CONFIG_TUN=m
++CONFIG_VETH=m
++CONFIG_NLMON=m
++
++#
++# ATM
++#
++CONFIG_ATM_DRIVERS=y
++# CONFIG_ATM_DUMMY is not set
++CONFIG_ATM_CLIP=m
++CONFIG_ATM_LANE=m
++CONFIG_ATM_BR2684=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_ATM_TCP=m
++# CONFIG_ATM_LANAI is not set
++CONFIG_ATM_ENI=m
++CONFIG_ATM_FIRESTREAM=m
++# CONFIG_ATM_ZATM is not set
++# CONFIG_ATM_IDT77252 is not set
++# CONFIG_ATM_AMBASSADOR is not set
++# CONFIG_ATM_HORIZON is not set
++# CONFIG_ATM_FORE200E is not set
++# CONFIG_ATM_FORE200E_USE_TASKLET is not set
++CONFIG_ATM_FORE200E_TX_RETRY=16
++CONFIG_ATM_FORE200E_DEBUG=0
++
++CONFIG_ATM_HE=m
++CONFIG_PPTP=m
++CONFIG_PPPOATM=m
++CONFIG_PPPOL2TP=m
++CONFIG_ATM_NICSTAR=m
++# CONFIG_ATM_IA is not set
++# CONFIG_ATM_CLIP_NO_ICMP is not set
++# CONFIG_ATM_MPOA is not set
++# CONFIG_ATM_BR2684_IPFILTER is not set
++# CONFIG_ATM_ENI_DEBUG is not set
++# CONFIG_ATM_ENI_TUNE_BURST is not set
++# CONFIG_ATM_ZATM_DEBUG is not set
++# CONFIG_ATM_IDT77252_DEBUG is not set
++# CONFIG_ATM_IDT77252_RCV_ALL is not set
++# CONFIG_ATM_AMBASSADOR_DEBUG is not set
++# CONFIG_ATM_HORIZON_DEBUG is not set
++# CONFIG_ATM_HE_USE_SUNI is not set
++# CONFIG_ATM_NICSTAR_USE_SUNI is not set
++# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
++# CONFIG_ATM_IA_DEBUG is not set
++CONFIG_ATM_SOLOS=m
++
++CONFIG_L2TP=m
++CONFIG_L2TP_V3=y
++CONFIG_L2TP_IP=m
++CONFIG_L2TP_ETH=m
++
++# CONFIG_CAIF is not set
++
++CONFIG_RFKILL=m
++CONFIG_RFKILL_GPIO=m
++CONFIG_RFKILL_INPUT=y
++
++
++#
++# Ethernet (10 or 100Mbit)
++#
++
++CONFIG_NET_VENDOR_ADAPTEC=y
++CONFIG_ADAPTEC_STARFIRE=m
++
++CONFIG_NET_VENDOR_ALTEON=y
++CONFIG_ACENIC=m
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++
++CONFIG_NET_VENDOR_AMD=y
++CONFIG_PCNET32=m
++CONFIG_AMD8111_ETH=m
++CONFIG_PCMCIA_NMCLAN=m
++
++CONFIG_NET_VENDOR_ARC=y
++CONFIG_ARC_EMAC=m
++
++CONFIG_NET_VENDOR_ATHEROS=y
++CONFIG_ALX=m
++CONFIG_ATL2=m
++CONFIG_ATL1=m
++CONFIG_ATL1C=m
++CONFIG_ATL1E=m
++CONFIG_NET_CADENCE=y
++CONFIG_ARM_AT91_ETHER=m
++CONFIG_MACB=m
++
++CONFIG_NET_VENDOR_BROCADE=y
++CONFIG_BNA=m
++CONFIG_NET_CALXEDA_XGMAC=m
++
++CONFIG_NET_VENDOR_CHELSIO=y
++CONFIG_CHELSIO_T1=m
++CONFIG_CHELSIO_T1_1G=y
++CONFIG_CHELSIO_T3=m
++CONFIG_CHELSIO_T4=m
++CONFIG_CHELSIO_T4VF=m
++
++CONFIG_NET_VENDOR_CISCO=y
++CONFIG_ENIC=m
++
++CONFIG_NET_VENDOR_DEC=y
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++CONFIG_DE2104X=m
++CONFIG_DE2104X_DSL=0
++CONFIG_TULIP=m
++# CONFIG_TULIP_NAPI is not set
++# CONFIG_TULIP_MWI is not set
++CONFIG_TULIP_MMIO=y
++# CONFIG_NI5010 is not set
++CONFIG_DE4X5=m
++CONFIG_WINBOND_840=m
++CONFIG_DM9102=m
++CONFIG_PCMCIA_XIRCOM=m
++CONFIG_ULI526X=m
++
++CONFIG_NET_VENDOR_DLINK=y
++CONFIG_DE600=m
++CONFIG_DE620=m
++CONFIG_DL2K=m
++CONFIG_SUNDANCE=m
++# CONFIG_SUNDANCE_MMIO is not set
++
++CONFIG_NET_VENDOR_EMULEX=y
++CONFIG_BE2NET=m
++
++CONFIG_NET_VENDOR_EXAR=y
++CONFIG_S2IO=m
++CONFIG_VXGE=m
++# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
++
++# CONFIG_NET_VENDOR_FARADAY is not set
++# CONFIG_NET_VENDOR_FUJITSU is not set
++# CONFIG_NET_VENDOR_HP is not set
++CONFIG_NET_VENDOR_INTEL=y
++CONFIG_E100=m
++CONFIG_E1000=m
++CONFIG_E1000E=m
++CONFIG_IGB=m
++CONFIG_IGB_HWMON=y
++CONFIG_IGB_DCA=y
++CONFIG_IGB_PTP=y
++CONFIG_IGBVF=m
++CONFIG_IXGB=m
++CONFIG_IXGBEVF=m
++CONFIG_IXGBE=m
++CONFIG_IXGBE_DCA=y
++CONFIG_IXGBE_DCB=y
++CONFIG_IXGBE_HWMON=y
++CONFIG_IXGBE_PTP=y
++CONFIG_I40E=m
++# CONFIG_I40E_VXLAN is not set
++# CONFIG_I40E_DCB is not set
++# CONFIG_I40EVF is not set
++
++
++# CONFIG_NET_VENDOR_I825XX is not set
++CONFIG_NET_VENDOR_MARVELL=y
++CONFIG_MVMDIO=m
++CONFIG_SKGE=m
++# CONFIG_SKGE_DEBUG is not set
++CONFIG_SKGE_GENESIS=y
++CONFIG_SKY2=m
++# CONFIG_SKY2_DEBUG is not set
++
++CONFIG_NET_VENDOR_MICREL=y
++CONFIG_KSZ884X_PCI=m
++# CONFIG_KS8842 is not set
++# CONFIG_KS8851_MLL is not set
++
++CONFIG_NET_VENDOR_MYRI=y
++CONFIG_MYRI10GE=m
++CONFIG_MYRI10GE_DCA=y
++
++CONFIG_NATSEMI=m
++CONFIG_NS83820=m
++
++CONFIG_PCMCIA_AXNET=m
++CONFIG_NE2K_PCI=m
++CONFIG_NE3210=m
++CONFIG_PCMCIA_PCNET=m
++
++CONFIG_NET_VENDOR_NVIDIA=y
++CONFIG_FORCEDETH=m
++
++CONFIG_NET_VENDOR_OKI=y
++# CONFIG_PCH_GBE is not set
++# CONFIG_PCH_PTP is not set
++
++CONFIG_NET_PACKET_ENGINE=y
++CONFIG_HAMACHI=m
++CONFIG_YELLOWFIN=m
++
++CONFIG_NET_VENDOR_QLOGIC=y
++CONFIG_QLA3XXX=m
++CONFIG_QLCNIC=m
++CONFIG_QLCNIC_SRIOV=y
++CONFIG_QLCNIC_DCB=y
++CONFIG_QLGE=m
++CONFIG_NETXEN_NIC=m
++
++CONFIG_NET_VENDOR_REALTEK=y
++CONFIG_ATP=m
++CONFIG_8139CP=m
++CONFIG_8139TOO=m
++# CONFIG_8139TOO_PIO is not set
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++CONFIG_8139TOO_8129=y
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_R8169=m
++
++
++CONFIG_NET_VENDOR_RDC=y
++CONFIG_R6040=m
++
++
++CONFIG_NET_VENDOR_SILAN=y
++CONFIG_SC92031=m
++
++CONFIG_NET_VENDOR_SIS=y
++CONFIG_SIS900=m
++CONFIG_SIS190=m
++
++CONFIG_PCMCIA_SMC91C92=m
++CONFIG_EPIC100=m
++CONFIG_SMSC9420=m
++
++# CONFIG_STMMAC_PLATFORM is not set
++# CONFIG_STMMAC_PCI is not set
++# CONFIG_STMMAC_DA is not set
++# CONFIG_STMMAC_DUAL_MAC is not set
++# CONFIG_STMMAC_TIMER is not set
++# CONFIG_STMMAC_DEBUG_FS is not set
++
++CONFIG_NET_VENDOR_SUN=y
++CONFIG_HAPPYMEAL=m
++CONFIG_SUNGEM=m
++CONFIG_CASSINI=m
++CONFIG_NIU=m
++
++CONFIG_NET_VENDOR_TEHUTI=y
++CONFIG_TEHUTI=m
++
++CONFIG_NET_VENDOR_TI=y
++CONFIG_TLAN=m
++
++CONFIG_VIA_RHINE=m
++CONFIG_VIA_RHINE_MMIO=y
++
++CONFIG_WIZNET_W5100=m
++CONFIG_WIZNET_W5300=m
++CONFIG_NET_VENDOR_XIRCOM=y
++CONFIG_PCMCIA_XIRC2PS=m
++
++CONFIG_AMD_PHY=m
++CONFIG_BROADCOM_PHY=m
++CONFIG_BCM87XX_PHY=m
++CONFIG_CICADA_PHY=m
++CONFIG_DAVICOM_PHY=m
++CONFIG_DP83640_PHY=m
++CONFIG_FIXED_PHY=y
++CONFIG_MDIO_BITBANG=m
++CONFIG_NATIONAL_PHY=m
++CONFIG_ICPLUS_PHY=m
++CONFIG_BCM63XX_PHY=m
++CONFIG_LSI_ET1011C_PHY=m
++CONFIG_LXT_PHY=m
++CONFIG_MARVELL_PHY=m
++CONFIG_QSEMI_PHY=m
++CONFIG_REALTEK_PHY=m
++CONFIG_SMSC_PHY=m
++CONFIG_STE10XP=m
++CONFIG_VITESSE_PHY=m
++CONFIG_MICREL_PHY=m
++
++CONFIG_MII=m
++CONFIG_NET_CORE=y
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=m
++CONFIG_TYPHOON=m
++CONFIG_DNET=m
++
++
++CONFIG_LNE390=m
++CONFIG_ES3210=m
++CONFIG_NET_PCI=y
++CONFIG_B44=m
++CONFIG_B44_PCI=y
++CONFIG_BNX2=m
++CONFIG_BNX2X=m
++CONFIG_BNX2X_SRIOV=y
++CONFIG_CNIC=m
++CONFIG_FEALNX=m
++CONFIG_NET_POCKET=y
++
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_TIGON3=m
++CONFIG_JME=m
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_IP1000 is not set
++# CONFIG_MLX4_EN is not set
++# CONFIG_SFC is not set
++
++# CONFIG_FDDI is not set
++# CONFIG_DEFXX is not set
++# CONFIG_SKFP is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PLIP is not set
++CONFIG_PPP=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_PPP_DEFLATE=m
++CONFIG_IPPP_FILTER=y
++CONFIG_PPP_BSDCOMP=y
++CONFIG_PPPOE=m
++CONFIG_PPP_MPPE=m
++CONFIG_SLIP=m
++CONFIG_SLIP_COMPRESSED=y
++CONFIG_SLIP_SMART=y
++# CONFIG_SLIP_MODE_SLIP6 is not set
++
++#
++# Wireless LAN
++#
++#
++# CONFIG_STRIP is not set
++# CONFIG_PCMCIA_RAYCS is not set
++
++CONFIG_CFG80211_WEXT=y
++# CONFIG_CFG80211_REG_DEBUG is not set
++# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
++CONFIG_CFG80211_DEFAULT_PS=y
++CONFIG_NL80211=y
++# CONFIG_NL80211_TESTMODE is not set
++# CONFIG_WIRELESS_EXT_SYSFS is not set
++CONFIG_LIB80211=m
++CONFIG_LIB80211_CRYPT_WEP=m
++CONFIG_LIB80211_CRYPT_CCMP=m
++CONFIG_LIB80211_CRYPT_TKIP=m
++# CONFIG_LIB80211_DEBUG is not set
++
++CONFIG_MAC80211=m
++CONFIG_MAC80211_RC_MINSTREL=y
++# CONFIG_MAC80211_RC_DEFAULT_PID is not set
++CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
++CONFIG_MAC80211_RC_DEFAULT="minstrel"
++CONFIG_MAC80211_MESH=y
++CONFIG_MAC80211_LEDS=y
++# CONFIG_MAC80211_DEBUG_MENU is not set
++
++# CONFIG_WIMAX is not set
++
++# CONFIG_ADM8211 is not set
++CONFIG_ATH_COMMON=m
++CONFIG_ATH_CARDS=m
++CONFIG_ATH5K=m
++CONFIG_ATH5K_DEBUG=y
++# CONFIG_ATH5K_TRACER is not set
++CONFIG_ATH6KL=m
++CONFIG_ATH6KL_DEBUG=y
++CONFIG_ATH6KL_SDIO=m
++CONFIG_ATH6KL_USB=m
++# CONFIG_ATH6KL_TRACING is not set
++CONFIG_AR5523=m
++CONFIG_ATH9K=m
++CONFIG_ATH9K_PCI=y
++CONFIG_ATH9K_AHB=y
++# CONFIG_ATH9K_DEBUG is not set
++# CONFIG_ATH9K_MAC_DEBUG is not set
++CONFIG_ATH9K_HTC=m
++CONFIG_ATH9K_BTCOEX_SUPPORT=y
++# CONFIG_ATH9K_LEGACY_RATE_CONTROL is not set
++# CONFIG_ATH9K_WOW is not set
++#
++CONFIG_ATH10K=m
++CONFIG_ATH10K_PCI=m
++# CONFIG_ATH10K_DEBUG is not set
++# CONFIG_ATH10K_TRACING is not set
++CONFIG_ATH10K_DEBUGFS=y
++CONFIG_WCN36XX=m
++# CONFIG_WCN36XX_DEBUGFS is not set
++CONFIG_WIL6210=m
++CONFIG_WIL6210_ISR_COR=y
++# CONFIG_WIL6210_TRACING is not set
++CONFIG_CARL9170=m
++CONFIG_CARL9170_LEDS=y
++# CONFIG_CARL9170_HWRNG is not set
++CONFIG_AT76C50X_USB=m
++# CONFIG_AIRO is not set
++# CONFIG_AIRO_CS is not set
++# CONFIG_ATMEL is not set
++CONFIG_B43=m
++CONFIG_B43_PCMCIA=y
++CONFIG_B43_SDIO=y
++CONFIG_B43_BCMA=y
++# CONFIG_B43_BCMA_EXTRA is not set
++CONFIG_B43_BCMA_PIO=y
++# CONFIG_B43_DEBUG is not set
++CONFIG_B43_PHY_LP=y
++CONFIG_B43_PHY_N=y
++CONFIG_B43_PHY_HT=y
++# CONFIG_B43_FORCE_PIO is not set
++CONFIG_B43LEGACY=m
++# CONFIG_B43LEGACY_DEBUG is not set
++CONFIG_B43LEGACY_DMA=y
++CONFIG_B43LEGACY_PIO=y
++CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
++# CONFIG_B43LEGACY_DMA_MODE is not set
++# CONFIG_B43LEGACY_PIO_MODE is not set
++CONFIG_BRCMSMAC=m
++# CONFIG_BRCMFMAC_SDIO_OOB is not set
++CONFIG_BRCMFMAC_USB=y
++# CONFIG_BRCM_TRACING is not set
++# CONFIG_BRCMISCAN is not set
++# CONFIG_BRCMDBG is not set
++CONFIG_HERMES=m
++CONFIG_HERMES_CACHE_FW_ON_INIT=y
++# CONFIG_HERMES_PRISM is not set
++CONFIG_NORTEL_HERMES=m
++CONFIG_PCI_HERMES=m
++CONFIG_PLX_HERMES=m
++CONFIG_PCMCIA_HERMES=m
++CONFIG_ORINOCO_USB=m
++# CONFIG_TMD_HERMES is not set
++# CONFIG_PCMCIA_SPECTRUM is not set
++CONFIG_CW1200=m
++CONFIG_CW1200_WLAN_SDIO=m
++CONFIG_CW1200_WLAN_SPI=m
++# CONFIG_HOSTAP is not set
++# CONFIG_IPW2100 is not set
++# CONFIG_IPW2200 is not set
++# CONFIG_IPW2100_DEBUG is not set
++# CONFIG_IPW2200_DEBUG is not set
++# CONFIG_LIBIPW_DEBUG is not set
++CONFIG_LIBERTAS=m
++CONFIG_LIBERTAS_USB=m
++CONFIG_LIBERTAS_CS=m
++CONFIG_LIBERTAS_SDIO=m
++# CONFIG_LIBERTAS_DEBUG is not set
++# CONFIG_LIBERTAS_THINFIRM is not set
++CONFIG_LIBERTAS_MESH=y
++CONFIG_IWLWIFI=m
++CONFIG_IWLDVM=m
++CONFIG_IWLMVM=m
++CONFIG_IWLWIFI_DEBUG=y
++CONFIG_IWLWIFI_DEVICE_SVTOOL=y
++# CONFIG_IWLWIFI_EXPERIMENTAL_MFP is not set
++CONFIG_IWLWIFI_UCODE16=y
++# CONFIG_IWLWIFI_P2P is not set
++CONFIG_IWLEGACY=m
++CONFIG_IWLEGACY_DEBUG=y
++# CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING is not set
++CONFIG_IWL4965=y
++CONFIG_IWL3945=m
++# CONFIG_IWM is not set
++# CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE is not set
++CONFIG_MAC80211_HWSIM=m
++CONFIG_P54_COMMON=m
++CONFIG_P54_USB=m
++CONFIG_P54_PCI=m
++CONFIG_MWL8K=m
++# CONFIG_PRISM54 is not set
++# CONFIG_PCMCIA_WL3501 is not set
++CONFIG_RT2X00=m
++# CONFIG_RT2X00_DEBUG is not set
++CONFIG_RT2400PCI=m
++CONFIG_RT2500PCI=m
++CONFIG_RT61PCI=m
++CONFIG_RT2500USB=m
++CONFIG_RT2800USB=m
++CONFIG_RT2800USB_RT33XX=y
++CONFIG_RT2800USB_RT35XX=y
++CONFIG_RT2800USB_RT3573=y
++CONFIG_RT2800USB_RT53XX=y
++CONFIG_RT2800USB_RT55XX=y
++CONFIG_RT2800USB_UNKNOWN=y
++CONFIG_RT2800PCI=m
++CONFIG_RT2800PCI_RT3290=y
++CONFIG_RT2800PCI_RT33XX=y
++CONFIG_RT2800PCI_RT35XX=y
++CONFIG_RT2800PCI_RT53XX=y
++CONFIG_RT73USB=m
++CONFIG_RTL8180=m
++CONFIG_RTL8187=m
++# CONFIG_USB_ZD1201 is not set
++# CONFIG_USB_NET_SR9800 is not set
++CONFIG_USB_NET_RNDIS_WLAN=m
++CONFIG_USB_NET_KALMIA=m
++CONFIG_USB_NET_QMI_WWAN=m
++CONFIG_USB_NET_SMSC75XX=m
++# CONFIG_WL_TI is not set
++CONFIG_ZD1211RW=m
++# CONFIG_ZD1211RW_DEBUG is not set
++
++CONFIG_WL12XX=m
++CONFIG_WL12XX_SPI=m
++CONFIG_WL12XX_SDIO=m
++
++CONFIG_WL1251=m
++CONFIG_WL1251_SPI=m
++CONFIG_WL1251_SDIO=m
++
++CONFIG_RTL_CARDS=m
++CONFIG_RTLWIFI=m
++CONFIG_RTL8192CE=m
++CONFIG_RTL8192SE=m
++CONFIG_RTL8192CU=m
++CONFIG_RTL8192DE=m
++CONFIG_RTL8723AE=m
++CONFIG_RTL8188EE=m
++
++CONFIG_MWIFIEX=m
++CONFIG_MWIFIEX_SDIO=m
++CONFIG_MWIFIEX_PCIE=m
++CONFIG_MWIFIEX_USB=m
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++CONFIG_NET_FC=y
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++
++#
++# PCMCIA network device support
++#
++CONFIG_NET_PCMCIA=y
++CONFIG_PCMCIA_3C589=m
++CONFIG_PCMCIA_3C574=m
++CONFIG_PCMCIA_FMVJ18X=m
++
++#
++# Amateur Radio support
++#
++CONFIG_HAMRADIO=y
++CONFIG_AX25=m
++CONFIG_AX25_DAMA_SLAVE=y
++
++# CONFIG_CAN is not set
++
++CONFIG_NETROM=m
++CONFIG_ROSE=m
++CONFIG_MKISS=m
++CONFIG_6PACK=m
++CONFIG_BPQETHER=m
++CONFIG_BAYCOM_SER_FDX=m
++CONFIG_BAYCOM_SER_HDX=m
++CONFIG_BAYCOM_PAR=m
++CONFIG_BAYCOM_EPP=m
++CONFIG_YAM=m
++
++CONFIG_NFC=m
++CONFIG_NFC_DIGITAL=m
++CONFIG_NFC_NCI=m
++CONFIG_NFC_HCI=m
++CONFIG_NFC_SHDLC=y
++CONFIG_NFC_LLCP=y
++CONFIG_NFC_SIM=m
++CONFIG_NFC_MRVL=m
++CONFIG_NFC_MRVL_USB=m
++
++#
++# Near Field Communication (NFC) devices
++#
++CONFIG_NFC_PORT100=m
++CONFIG_NFC_PN544=m
++CONFIG_NFC_PN544_I2C=m
++CONFIG_NFC_PN533=m
++CONFIG_NFC_MICROREAD=m
++CONFIG_NFC_MICROREAD_I2C=m
++
++#
++# IrDA (infrared) support
++#
++CONFIG_IRDA=m
++# CONFIG_IRDA_DEBUG is not set
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++# CONFIG_IRDA_ULTRA is not set
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++CONFIG_IRTTY_SIR=m
++CONFIG_DONGLE=y
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
++CONFIG_ESI_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_KINGSUN_DONGLE=m
++CONFIG_KSDAZZLE_DONGLE=m
++CONFIG_KS959_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_TOIM3232_DONGLE=m
++
++CONFIG_ALI_FIR=m
++CONFIG_MCS_FIR=m
++CONFIG_NSC_FIR=m
++CONFIG_SIGMATEL_FIR=m
++CONFIG_SMC_IRCC_FIR=m
++# CONFIG_TOSHIBA_FIR is not set
++CONFIG_USB_IRDA=m
++CONFIG_VLSI_FIR=m
++CONFIG_VIA_FIR=m
++CONFIG_WINBOND_FIR=m
++
++#
++# Bluetooth support
++#
++CONFIG_BT=m
++CONFIG_BT_L2CAP=y
++CONFIG_BT_SCO=y
++CONFIG_BT_CMTP=m
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_HIDP=m
++
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_HCIBTUSB=m
++# Disable the BT_HCIUSB driver.
++# It sucks more power than BT_HCIBTUSB which has the same functionality.
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++CONFIG_BT_HCIUART_ATH3K=y
++CONFIG_BT_HCIUART_3WIRE=y
++CONFIG_BT_HCIDTL1=m
++CONFIG_BT_HCIBT3C=m
++CONFIG_BT_HCIBLUECARD=m
++CONFIG_BT_HCIBTUART=m
++CONFIG_BT_HCIVHCI=m
++CONFIG_BT_HCIBCM203X=m
++CONFIG_BT_HCIBFUSB=m
++CONFIG_BT_HCIBPA10X=m
++CONFIG_BT_HCIBTSDIO=m
++CONFIG_BT_HCIUART_LL=y
++CONFIG_BT_MRVL=m
++CONFIG_BT_MRVL_SDIO=m
++CONFIG_BT_ATH3K=m
++CONFIG_BT_WILINK=m
++
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=y
++CONFIG_MISDN=m
++CONFIG_MISDN_DSP=m
++CONFIG_MISDN_L1OIP=m
++CONFIG_MISDN_AVMFRITZ=m
++CONFIG_MISDN_SPEEDFAX=m
++CONFIG_MISDN_INFINEON=m
++CONFIG_MISDN_W6692=m
++CONFIG_MISDN_NETJET=m
++
++#
++# mISDN hardware drivers
++#
++CONFIG_MISDN_HFCPCI=m
++CONFIG_MISDN_HFCMULTI=m
++CONFIG_ISDN_I4L=m
++CONFIG_ISDN_DRV_AVMB1_B1PCI=m
++CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
++CONFIG_ISDN_DRV_AVMB1_T1PCI=m
++CONFIG_ISDN_DRV_AVMB1_C4=m
++
++CONFIG_MISDN_HFCUSB=m
++
++CONFIG_ISDN_PPP=y
++CONFIG_ISDN_PPP_VJ=y
++CONFIG_ISDN_MPP=y
++# CONFIG_ISDN_PPP_BSDCOMP is not set
++CONFIG_ISDN_TTY_FAX=y
++CONFIG_DE_AOC=y
++
++CONFIG_ISDN_AUDIO=y
++
++CONFIG_ISDN_DRV_HISAX=m
++CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
++CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
++
++CONFIG_ISDN_CAPI_CAPIDRV=m
++CONFIG_ISDN_DIVERSION=m
++
++CONFIG_HISAX_EURO=y
++CONFIG_HISAX_1TR6=y
++CONFIG_HISAX_NI1=y
++CONFIG_HISAX_MAX_CARDS=8
++CONFIG_HISAX_16_3=y
++CONFIG_HISAX_TELESPCI=y
++CONFIG_HISAX_S0BOX=y
++CONFIG_HISAX_FRITZPCI=y
++CONFIG_HISAX_AVM_A1_PCMCIA=y
++CONFIG_HISAX_ELSA=y
++CONFIG_HISAX_DIEHLDIVA=y
++CONFIG_HISAX_SEDLBAUER=y
++CONFIG_HISAX_NETJET=y
++CONFIG_HISAX_NETJET_U=y
++CONFIG_HISAX_NICCY=y
++CONFIG_HISAX_BKM_A4T=y
++CONFIG_HISAX_SCT_QUADRO=y
++CONFIG_HISAX_GAZEL=y
++CONFIG_HISAX_HFC_PCI=y
++CONFIG_HISAX_W6692=y
++CONFIG_HISAX_HFC_SX=y
++CONFIG_HISAX_ENTERNOW_PCI=y
++# CONFIG_HISAX_DEBUG is not set
++CONFIG_HISAX_AVM_A1_CS=m
++CONFIG_HISAX_ST5481=m
++# CONFIG_HISAX_HFCUSB is not set
++CONFIG_HISAX_FRITZ_PCIPNP=m
++CONFIG_HISAX_NO_SENDCOMPLETE=y
++CONFIG_HISAX_NO_LLC=y
++CONFIG_HISAX_NO_KEYPAD=y
++CONFIG_HISAX_SEDLBAUER_CS=m
++CONFIG_HISAX_ELSA_CS=m
++CONFIG_HISAX_TELES_CS=m
++CONFIG_HISAX_HFC4S8S=m
++
++CONFIG_ISDN_DRV_LOOP=m
++CONFIG_HYSDN=m
++CONFIG_HYSDN_CAPI=y
++
++
++#
++# CAPI subsystem
++#
++CONFIG_ISDN_CAPI=m
++# CONFIG_CAPI_TRACE is not set
++CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
++CONFIG_ISDN_CAPI_MIDDLEWARE=y
++CONFIG_ISDN_CAPI_CAPI20=m
++
++#
++# CAPI hardware drivers
++#
++
++#
++# Active AVM cards
++#
++CONFIG_CAPI_AVM=y
++
++#
++# Active Eicon DIVA Server cards
++#
++# CONFIG_CAPI_EICON is not set
++CONFIG_ISDN_DIVAS=m
++CONFIG_ISDN_DIVAS_BRIPCI=y
++CONFIG_ISDN_DIVAS_PRIPCI=y
++CONFIG_ISDN_DIVAS_DIVACAPI=m
++CONFIG_ISDN_DIVAS_USERIDI=m
++CONFIG_ISDN_DIVAS_MAINT=m
++
++CONFIG_ISDN_DRV_GIGASET=m
++CONFIG_GIGASET_CAPI=y
++CONFIG_GIGASET_BASE=m
++CONFIG_GIGASET_M101=m
++CONFIG_GIGASET_M105=m
++# CONFIG_GIGASET_DEBUG is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++CONFIG_INPUT_FF_MEMLESS=m
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++# CONFIG_INPUT_MATRIXKMAP is not set
++
++CONFIG_INPUT_TABLET=y
++CONFIG_TABLET_USB_ACECAD=m
++CONFIG_TABLET_USB_AIPTEK=m
++CONFIG_TABLET_USB_GTCO=m
++CONFIG_TABLET_USB_HANWANG=m
++CONFIG_TABLET_USB_KBTAB=m
++CONFIG_TABLET_USB_WACOM=m
++
++CONFIG_INPUT_POWERMATE=m
++CONFIG_INPUT_YEALINK=m
++CONFIG_INPUT_CM109=m
++CONFIG_INPUT_POLLDEV=m
++CONFIG_INPUT_SPARSEKMAP=m
++# CONFIG_INPUT_ADXL34X is not set
++# CONFIG_INPUT_BMA150 is not set
++# CONFIG_INPUT_IMS_PCU is not set
++CONFIG_INPUT_CMA3000=m
++CONFIG_INPUT_CMA3000_I2C=m
++CONFIG_INPUT_IDEAPAD_SLIDEBAR=m
++
++#
++# Input I/O drivers
++#
++CONFIG_GAMEPORT=m
++CONFIG_GAMEPORT_NS558=m
++CONFIG_GAMEPORT_L4=m
++CONFIG_GAMEPORT_EMU10K1=m
++CONFIG_GAMEPORT_FM801=m
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_RAW=m
++CONFIG_SERIO_ALTERA_PS2=m
++# CONFIG_SERIO_PS2MULT is not set
++CONFIG_SERIO_ARC_PS2=m
++# CONFIG_SERIO_APBPS2 is not set
++
++# CONFIG_SERIO_CT82C710 is not set
++# CONFIG_SERIO_OLPC_APSP is not set
++# CONFIG_SERIO_PARKBD is not set
++# CONFIG_SERIO_PCIPS2 is not set
++# CONFIG_SERIO_LIBPS2 is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_SH_KEYSC is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_MATRIX is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_KEYBOARD_STOWAWAY is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_LM8323 is not set
++# CONFIG_KEYBOARD_LM8333 is not set
++# CONFIG_KEYBOARD_MAX7359 is not set
++# CONFIG_KEYBOARD_ADP5589 is not set
++# CONFIG_KEYBOARD_MPR121 is not set
++# CONFIG_KEYBOARD_QT1070 is not set
++# CONFIG_KEYBOARD_MCS is not set
++# CONFIG_KEYBOARD_OPENCORES is not set
++# CONFIG_KEYBOARD_SAMSUNG is not set
++# CONFIG_KEYBOARD_QT2160 is not set
++# CONFIG_KEYBOARD_TCA6416 is not set
++# CONFIG_KEYBOARD_TCA8418 is not set
++# CONFIG_KEYBOARD_OMAP4 is not set
++CONFIG_INPUT_MOUSE=y
++# CONFIG_MOUSE_PS2_TOUCHKIT is not set
++CONFIG_MOUSE_PS2_ELANTECH=y
++CONFIG_MOUSE_PS2_SENTELIC=y
++CONFIG_MOUSE_SERIAL=m
++CONFIG_MOUSE_VSXXXAA=m
++CONFIG_MOUSE_APPLETOUCH=m
++CONFIG_MOUSE_BCM5974=m
++CONFIG_MOUSE_SYNAPTICS_I2C=m
++CONFIG_MOUSE_SYNAPTICS_USB=m
++CONFIG_MOUSE_CYAPA=m
++CONFIG_INPUT_JOYSTICK=y
++CONFIG_JOYSTICK_ANALOG=m
++CONFIG_JOYSTICK_A3D=m
++CONFIG_JOYSTICK_ADI=m
++CONFIG_JOYSTICK_COBRA=m
++CONFIG_JOYSTICK_GF2K=m
++CONFIG_JOYSTICK_GRIP=m
++CONFIG_JOYSTICK_GRIP_MP=m
++CONFIG_JOYSTICK_GUILLEMOT=m
++CONFIG_JOYSTICK_INTERACT=m
++CONFIG_JOYSTICK_SIDEWINDER=m
++CONFIG_JOYSTICK_TMDC=m
++CONFIG_JOYSTICK_IFORCE=m
++CONFIG_JOYSTICK_IFORCE_USB=y
++CONFIG_JOYSTICK_IFORCE_232=y
++CONFIG_JOYSTICK_WARRIOR=m
++CONFIG_JOYSTICK_MAGELLAN=m
++CONFIG_JOYSTICK_SPACEORB=m
++CONFIG_JOYSTICK_SPACEBALL=m
++CONFIG_JOYSTICK_STINGER=m
++CONFIG_JOYSTICK_DB9=m
++CONFIG_JOYSTICK_GAMECON=m
++CONFIG_JOYSTICK_TURBOGRAFX=m
++CONFIG_JOYSTICK_JOYDUMP=m
++CONFIG_JOYSTICK_TWIDJOY=m
++CONFIG_JOYSTICK_WALKERA0701=m
++CONFIG_JOYSTICK_XPAD=m
++CONFIG_JOYSTICK_XPAD_FF=y
++CONFIG_JOYSTICK_XPAD_LEDS=y
++CONFIG_JOYSTICK_ZHENHUA=m
++# CONFIG_JOYSTICK_AS5011 is not set
++
++CONFIG_INPUT_TOUCHSCREEN=y
++# CONFIG_TOUCHSCREEN_AD7879 is not set
++CONFIG_TOUCHSCREEN_AD7879_I2C=m
++# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
++# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
++# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set
++CONFIG_TOUCHSCREEN_DYNAPRO=m
++CONFIG_TOUCHSCREEN_EDT_FT5X06=m
++CONFIG_TOUCHSCREEN_EETI=m
++CONFIG_TOUCHSCREEN_EGALAX=m
++CONFIG_TOUCHSCREEN_ELO=m
++CONFIG_TOUCHSCREEN_FUJITSU=m
++CONFIG_TOUCHSCREEN_GUNZE=m
++# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
++CONFIG_TOUCHSCREEN_INEXIO=m
++CONFIG_TOUCHSCREEN_ILI210X=m
++CONFIG_TOUCHSCREEN_MMS114=m
++CONFIG_TOUCHSCREEN_MTOUCH=m
++CONFIG_TOUCHSCREEN_MCS5000=m
++CONFIG_TOUCHSCREEN_MK712=m
++CONFIG_TOUCHSCREEN_PENMOUNT=m
++# CONFIG_TOUCHSCREEN_SUR40 is not set
++# CONFIG_TOUCHSCREEN_TPS6507X is not set
++CONFIG_TOUCHSCREEN_TSC_SERIO=m
++CONFIG_TOUCHSCREEN_TSC2007=m
++CONFIG_TOUCHSCREEN_TOUCHIT213=m
++CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
++CONFIG_TOUCHSCREEN_TOUCHWIN=m
++CONFIG_TOUCHSCREEN_PIXCIR=m
++CONFIG_TOUCHSCREEN_UCB1400=m
++CONFIG_TOUCHSCREEN_WACOM_W8001=m
++CONFIG_TOUCHSCREEN_WACOM_I2C=m
++CONFIG_TOUCHSCREEN_USB_E2I=y
++CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
++# CONFIG_TOUCHSCREEN_WM97XX is not set
++CONFIG_TOUCHSCREEN_W90X900=m
++# CONFIG_TOUCHSCREEN_BU21013 is not set
++CONFIG_TOUCHSCREEN_ST1232=m
++CONFIG_TOUCHSCREEN_ATMEL_MXT=m
++# CONFIG_TOUCHSCREEN_MAX11801 is not set
++CONFIG_TOUCHSCREEN_AUO_PIXCIR=m
++CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
++CONFIG_TOUCHSCREEN_ZFORCE=m
++
++CONFIG_INPUT_PCSPKR=m
++CONFIG_INPUT_RETU_PWRBUTTON=m
++CONFIG_INPUT_UINPUT=m
++CONFIG_INPUT_WISTRON_BTNS=m
++CONFIG_INPUT_ATLAS_BTNS=m
++
++CONFIG_INPUT_ATI_REMOTE2=m
++CONFIG_INPUT_KEYSPAN_REMOTE=m
++
++CONFIG_MAC_EMUMOUSEBTN=y
++
++CONFIG_INPUT_WM831X_ON=m
++
++
++# CONFIG_INPUT_AD714X is not set
++# CONFIG_INPUT_PCF8574 is not set
++CONFIG_INPUT_MMA8450=m
++CONFIG_INPUT_MPU3050=m
++CONFIG_INPUT_KXTJ9=m
++# CONFIG_INPUT_KXTJ9_POLLED_MODE is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++CONFIG_SERIAL_NONSTANDARD=y
++CONFIG_ROCKETPORT=m
++CONFIG_SYNCLINK=m
++CONFIG_SYNCLINKMP=m
++CONFIG_SYNCLINK_GT=m
++CONFIG_N_HDLC=m
++CONFIG_N_GSM=m
++# CONFIG_TRACE_SINK is not set
++# CONFIG_STALDRV is not set
++# CONFIG_DUMMY_IRQ is not set
++# CONFIG_IBM_ASM is not set
++CONFIG_TIFM_CORE=m
++CONFIG_TIFM_7XX1=m
++CONFIG_TCG_TPM=m
++CONFIG_TCG_TIS=m
++# CONFIG_TCG_TIS_I2C_INFINEON is not set
++# CONFIG_TCG_TIS_I2C_ATMEL is not set
++# CONFIG_TCG_TIS_I2C_NUVOTON is not set
++CONFIG_TCG_NSC=m
++CONFIG_TCG_ATMEL=m
++# CONFIG_TCG_INFINEON is not set
++# CONFIG_TCG_ST33_I2C is not set
++# CONFIG_TCG_XEN is not set
++CONFIG_TELCLOCK=m
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_CS=m
++CONFIG_SERIAL_8250_NR_UARTS=32
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_MANY_PORTS=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++CONFIG_SERIAL_8250_RSA=y
++# CONFIG_SERIAL_8250_DW is not set
++CONFIG_CYCLADES=m
++# CONFIG_CYZ_INTR is not set
++# CONFIG_MOXA_INTELLIO is not set
++# CONFIG_MOXA_SMARTIO is not set
++# CONFIG_ISI is not set
++# CONFIG_RIO is not set
++CONFIG_SERIAL_JSM=m
++# CONFIG_SERIAL_SCCNXP is not set
++# CONFIG_SERIAL_MFD_HSU is not set
++
++# CONFIG_SERIAL_ALTERA_JTAGUART is not set
++# CONFIG_SERIAL_ALTERA_UART is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_XILINX_PS_UART is not set
++# CONFIG_SERIAL_TIMBERDALE is not set
++CONFIG_SERIAL_ARC=m
++CONFIG_SERIAL_ARC_NR_PORTS=1
++# CONFIG_SERIAL_RP2 is not set
++# CONFIG_SERIAL_ST_ASC is not set
++# CONFIG_SERIAL_PCH_UART is not set
++
++CONFIG_UNIX98_PTYS=y
++CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
++CONFIG_PRINTER=m
++CONFIG_LP_CONSOLE=y
++CONFIG_PPDEV=m
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++# CONFIG_I2C_MUX is not set
++# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set
++# CONFIG_I2C_MUX_PCA954x is not set
++# CONFIG_I2C_MUX_GPIO is not set
++# CONFIG_I2C_MUX_PCA9541 is not set
++# CONFIG_I2C_MUX_PINCTRL is not set
++#
++
++#
++# I2C Algorithms
++#
++# CONFIG_I2C_DEBUG_ALGO is not set
++CONFIG_I2C_ALGOBIT=m
++
++#
++# I2C Hardware Bus support
++#
++
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD756_S4882 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_ISCH is not set
++# CONFIG_I2C_NFORCE2_S4985 is not set
++# CONFIG_I2C_INTEL_MID is not set
++# CONFIG_I2C_EG20T is not set
++# CONFIG_I2C_CBUS_GPIO is not set
++CONFIG_I2C_VIPERBOARD=m
++
++CONFIG_EEPROM_AT24=m
++CONFIG_EEPROM_LEGACY=m
++CONFIG_EEPROM_93CX6=m
++CONFIG_EEPROM_MAX6875=m
++
++CONFIG_I2C_NFORCE2=m
++# CONFIG_I2C_OCORES is not set
++CONFIG_I2C_PARPORT=m
++CONFIG_I2C_PARPORT_LIGHT=m
++# CONFIG_I2C_ROBOTFUZZ_OSIF is not set
++CONFIG_I2C_PASEMI=m
++CONFIG_I2C_PCA_PLATFORM=m
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_SCx200_ACB is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++CONFIG_I2C_SIMTEC=m
++CONFIG_I2C_STUB=m
++CONFIG_I2C_TINY_USB=m
++# CONFIG_I2C_TAOS_EVM is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_DESIGNWARE is not set
++# CONFIG_I2C_XILINX is not set
++
++CONFIG_I2C_DIOLAN_U2C=m
++
++#
++# I2C Hardware Sensors Chip support
++#
++CONFIG_SENSORS_ATK0110=m
++CONFIG_SENSORS_ABITUGURU=m
++CONFIG_SENSORS_ABITUGURU3=m
++CONFIG_SENSORS_AD7414=m
++CONFIG_SENSORS_AD7418=m
++CONFIG_SENSORS_ADM1021=m
++CONFIG_SENSORS_ADM1025=m
++CONFIG_SENSORS_ADM1026=m
++CONFIG_SENSORS_ADM1029=m
++CONFIG_SENSORS_ADM1031=m
++CONFIG_SENSORS_ADM9240=m
++CONFIG_SENSORS_ADT7310=m
++CONFIG_SENSORS_ADT7410=m
++CONFIG_SENSORS_ADS7828=m
++CONFIG_SENSORS_ADT7462=m
++CONFIG_SENSORS_ADT7470=m
++CONFIG_SENSORS_ADT7475=m
++CONFIG_SENSORS_APPLESMC=m
++CONFIG_SENSORS_ASB100=m
++CONFIG_SENSORS_ATXP1=m
++CONFIG_SENSORS_CORETEMP=m
++CONFIG_SENSORS_DME1737=m
++CONFIG_SENSORS_DS1621=m
++# CONFIG_DS1682 is not set
++CONFIG_SENSORS_F71805F=m
++CONFIG_SENSORS_F71882FG=m
++CONFIG_SENSORS_F75375S=m
++CONFIG_SENSORS_FSCHMD=m
++CONFIG_SENSORS_G760A=m
++CONFIG_SENSORS_G762=m
++CONFIG_SENSORS_GL518SM=m
++CONFIG_SENSORS_GL520SM=m
++CONFIG_SENSORS_HDAPS=m
++# CONFIG_SENSORS_HIH6130 is not set
++# CONFIG_SENSORS_HTU21 is not set
++# CONFIG_SENSORS_I5K_AMB is not set
++# FIXME: IBMAEM x86 only?
++CONFIG_SENSORS_IBMAEM=m
++CONFIG_SENSORS_IBMPEX=m
++# CONFIG_SENSORS_IIO_HWMON is not set
++CONFIG_SENSORS_IT87=m
++CONFIG_SENSORS_K8TEMP=m
++CONFIG_SENSORS_K10TEMP=m
++CONFIG_SENSORS_LIS3LV02D=m
++CONFIG_SENSORS_LIS3_SPI=m
++CONFIG_SENSORS_LIS3_I2C=m
++CONFIG_SENSORS_LM63=m
++CONFIG_SENSORS_LM75=m
++CONFIG_SENSORS_LM77=m
++CONFIG_SENSORS_LM78=m
++CONFIG_SENSORS_LM80=m
++CONFIG_SENSORS_LM83=m
++CONFIG_SENSORS_LM85=m
++CONFIG_SENSORS_LM87=m
++CONFIG_SENSORS_LM90=m
++CONFIG_SENSORS_LM92=m
++CONFIG_SENSORS_LM93=m
++CONFIG_SENSORS_LM95234=m
++CONFIG_SENSORS_LTC4245=m
++CONFIG_SENSORS_MAX1619=m
++CONFIG_SENSORS_MAX6650=m
++CONFIG_SENSORS_MAX6697=m
++CONFIG_SENSORS_MCP3021=m
++CONFIG_SENSORS_NCT6775=m
++CONFIG_SENSORS_NTC_THERMISTOR=m
++CONFIG_SENSORS_PC87360=m
++CONFIG_SENSORS_PC87427=m
++CONFIG_SENSORS_PCF8591=m
++CONFIG_SENSORS_SHT15=m
++CONFIG_SENSORS_SIS5595=m
++CONFIG_CHARGER_SMB347=m
++CONFIG_SENSORS_SMSC47M1=m
++CONFIG_SENSORS_SMSC47M192=m
++CONFIG_SENSORS_SMSC47B397=m
++CONFIG_SENSORS_THMC50=m
++CONFIG_SENSORS_TMP401=m
++CONFIG_APDS9802ALS=m
++CONFIG_ISL29020=m
++CONFIG_ISL29003=m
++CONFIG_SENSORS_BH1770=m
++CONFIG_SENSORS_APDS990X=m
++CONFIG_SENSORS_TSL2550=m
++CONFIG_SENSORS_VIA686A=m
++CONFIG_SENSORS_VIA_CPUTEMP=m
++CONFIG_SENSORS_VT1211=m
++CONFIG_SENSORS_VT8231=m
++CONFIG_SENSORS_W83627HF=m
++CONFIG_SENSORS_W83781D=m
++CONFIG_SENSORS_W83L785TS=m
++CONFIG_SENSORS_W83L786NG=m
++CONFIG_SENSORS_W83627EHF=m
++CONFIG_SENSORS_W83791D=m
++CONFIG_SENSORS_W83792D=m
++CONFIG_SENSORS_W83793=m
++CONFIG_SENSORS_LTC4215=m
++CONFIG_SENSORS_LM95241=m
++CONFIG_SENSORS_LM95245=m
++CONFIG_SENSORS_TMP421=m
++CONFIG_SENSORS_WM8350=m
++CONFIG_SENSORS_WM831X=m
++CONFIG_SENSORS_LM73=m
++CONFIG_SENSORS_AMC6821=m
++CONFIG_SENSORS_INA2XX=m
++CONFIG_SENSORS_INA209=m
++CONFIG_SENSORS_ADT7411=m
++CONFIG_SENSORS_ASC7621=m
++CONFIG_SENSORS_EMC1403=m
++CONFIG_SENSORS_TMP102=m
++CONFIG_SENSORS_LTC4261=m
++# CONFIG_SENSORS_BH1780 is not set
++# CONFIG_SENSORS_JC42 is not set
++# CONFIG_SENSORS_SMM665 is not set
++# CONFIG_SENSORS_EMC2103 is not set
++# CONFIG_SENSORS_GPIO_FAN is not set
++CONFIG_SENSORS_W83795=m
++# CONFIG_SENSORS_W83795_FANCTRL is not set
++CONFIG_SENSORS_DS620=m
++CONFIG_SENSORS_SHT21=m
++CONFIG_SENSORS_LINEAGE=m
++CONFIG_SENSORS_LTC4151=m
++CONFIG_SENSORS_MAX6639=m
++CONFIG_SENSORS_SCH5627=m
++CONFIG_SENSORS_SCH5636=m
++CONFIG_SENSORS_ADS1015=m
++CONFIG_SENSORS_MAX16065=m
++CONFIG_SENSORS_MAX6642=m
++CONFIG_SENSORS_ADM1275=m
++CONFIG_SENSORS_UCD9000=m
++CONFIG_SENSORS_UCD9200=m
++CONFIG_SENSORS_ZL6100=m
++CONFIG_SENSORS_EMC6W201=m
++
++CONFIG_PMBUS=m
++CONFIG_SENSORS_PMBUS=m
++CONFIG_SENSORS_MAX16064=m
++CONFIG_SENSORS_LM25066=m
++CONFIG_SENSORS_LTC2978=m
++CONFIG_SENSORS_MAX34440=m
++CONFIG_SENSORS_MAX8688=m
++CONFIG_SENSORS_MAX1668=m
++CONFIG_SENSORS_MAX197=m
++
++# Industrial I/O subsystem configuration
++CONFIG_IIO=m
++CONFIG_IIO_BUFFER=y
++CONFIG_IIO_BUFFER_CB=y
++# CONFIG_IIO_KFIFO_BUF is not set
++CONFIG_IIO_TRIGGERED_BUFFER=m
++CONFIG_IIO_TRIGGER=y
++CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
++CONFIG_IIO_INTERRUPT_TRIGGER=m
++CONFIG_HID_SENSOR_IIO_COMMON=m
++CONFIG_HID_SENSOR_IIO_TRIGGER=m
++CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS=y
++# CONFIG_IIO_SYSFS_TRIGGER is not set
++# CONFIG_AD5446 is not set
++# CONFIG_AD5380 is not set
++# CONFIG_AD5064 is not set
++# CONFIG_BMA180 is not set
++# CONFIG_MAX1363 is not set
++# CONFIG_MAX517 is not set
++# CONFIG_MCP4725 is not set
++# CONFIG_ITG3200 is not set
++# CONFIG_APDS9300 is not set
++# CONFIG_CM32181 is not set
++# CONFIG_CM36651 is not set
++# CONFIG_GP2AP020A00F is not set
++# CONFIG_TSL2583 is not set
++# CONFIG_TSL2x7x is not set
++# CONFIG_TCS3472 is not set
++# CONFIG_TSL4531 is not set
++# CONFIG_NAU7802 is not set
++# CONFIG_TI_ADC081C is not set
++# CONFIG_EXYNOS_ADC is not set
++# CONFIG_VIPERBOARD_ADC is not set
++# CONFIG_INV_MPU6050_IIO is not set
++CONFIG_IIO_ST_GYRO_3AXIS=m
++CONFIG_IIO_ST_MAGN_3AXIS=m
++CONFIG_IIO_ST_ACCEL_3AXIS=m
++CONFIG_HID_SENSOR_INCLINOMETER_3D=m
++# CONFIG_ADJD_S311 is not set
++# CONFIG_SENSORS_TSL2563 is not set
++# CONFIG_VCNL4000 is not set
++# CONFIG_AK8975 is not set
++# CONFIG_MAG3110 is not set
++# CONFIG_TMP006 is not set
++# CONFIG_IIO_ST_PRESS is not set
++# CONFIG_KXSD9 is not set
++# CONFIG_AD7266 is not set
++# CONFIG_AD7298 is not set
++# CONFIG_AD7476 is not set
++# CONFIG_AD7791 is not set
++# CONFIG_AD7793 is not set
++# CONFIG_AD7887 is not set
++# CONFIG_AD7923 is not set
++# CONFIG_MCP320X is not set
++# CONFIG_MCP3422 is not set
++# CONFIG_AD8366 is not set
++# CONFIG_AD5360 is not set
++# CONFIG_AD5421 is not set
++# CONFIG_AD5449 is not set
++# CONFIG_AD5504 is not set
++# CONFIG_AD5624R_SPI is not set
++# CONFIG_AD5686 is not set
++# CONFIG_AD5755 is not set
++# CONFIG_AD5764 is not set
++# CONFIG_AD5791 is not set
++# CONFIG_AD7303 is not set
++# CONFIG_AD9523 is not set
++# CONFIG_ADF4350 is not set
++# CONFIG_ADIS16080 is not set
++# CONFIG_ADIS16130 is not set
++# CONFIG_ADIS16136 is not set
++# CONFIG_ADIS16260 is not set
++# CONFIG_ADXRS450 is not set
++# CONFIG_ADIS16400 is not set
++# CONFIG_ADIS16480 is not set
++# CONFIG_DHT11 is not set
++# CONFIG_MPL3115 is not set
++
++# staging IIO drivers
++# CONFIG_AD7291 is not set
++# CONFIG_AD7606 is not set
++# CONFIG_AD799X is not set
++# CONFIG_ADT7316 is not set
++# CONFIG_AD7150 is not set
++# CONFIG_AD7152 is not set
++# CONFIG_AD7746 is not set
++# CONFIG_AD5933 is not set
++# CONFIG_ADE7854 is not set
++# CONFIG_SENSORS_ISL29018 is not set
++# CONFIG_SENSORS_ISL29028 is not set
++# CONFIG_SENSORS_HMC5843 is not set
++# CONFIG_IIO_PERIODIC_RTC_TRIGGER is not set
++# CONFIG_IIO_SIMPLE_DUMMY is not set
++# CONFIG_ADIS16201 is not set
++# CONFIG_ADIS16203 is not set
++# CONFIG_ADIS16204 is not set
++# CONFIG_ADIS16209 is not set
++# CONFIG_ADIS16220 is not set
++# CONFIG_ADIS16240 is not set
++# CONFIG_LIS3L02DQ is not set
++# CONFIG_SCA3000 is not set
++# CONFIG_AD7780 is not set
++# CONFIG_AD7816 is not set
++# CONFIG_AD7192 is not set
++# CONFIG_AD7280 is not set
++# CONFIG_AD5930 is not set
++# CONFIG_AD9832 is not set
++# CONFIG_AD9834 is not set
++# CONFIG_AD9850 is not set
++# CONFIG_AD9852 is not set
++# CONFIG_AD9910 is not set
++# CONFIG_AD9951 is not set
++# CONFIG_ADIS16060 is not set
++# CONFIG_ADE7753 is not set
++# CONFIG_ADE7754 is not set
++# CONFIG_ADE7758 is not set
++# CONFIG_ADE7759 is not set
++# CONFIG_AD2S90 is not set
++# CONFIG_AD2S1200 is not set
++# CONFIG_AD2S1210 is not set
++
++
++
++# CONFIG_HMC6352 is not set
++# CONFIG_BMP085 is not set
++# CONFIG_BMP085_I2C is not set
++# CONFIG_PCH_PHUB is not set
++# CONFIG_USB_SWITCH_FSA9480 is not set
++
++CONFIG_W1=m
++CONFIG_W1_CON=y
++# CONFIG_W1_MASTER_MATROX is not set
++CONFIG_W1_MASTER_DS2490=m
++CONFIG_W1_MASTER_DS2482=m
++CONFIG_W1_MASTER_DS1WM=m
++CONFIG_W1_MASTER_GPIO=m
++# CONFIG_HDQ_MASTER_OMAP is not set
++CONFIG_W1_SLAVE_THERM=m
++CONFIG_W1_SLAVE_SMEM=m
++CONFIG_W1_SLAVE_DS2408=m
++# CONFIG_W1_SLAVE_DS2408_READBACK is not set
++CONFIG_W1_SLAVE_DS2413=m
++CONFIG_W1_SLAVE_DS2423=m
++CONFIG_W1_SLAVE_DS2431=m
++CONFIG_W1_SLAVE_DS2433=m
++CONFIG_W1_SLAVE_DS2433_CRC=y
++CONFIG_W1_SLAVE_DS2760=m
++CONFIG_W1_SLAVE_DS2780=m
++CONFIG_W1_SLAVE_DS2781=m
++CONFIG_W1_SLAVE_DS28E04=m
++CONFIG_W1_SLAVE_BQ27000=m
++
++#
++# Mice
++#
++
++#
++# IPMI
++#
++CONFIG_IPMI_HANDLER=m
++# CONFIG_IPMI_PANIC_EVENT is not set
++CONFIG_IPMI_DEVICE_INTERFACE=m
++CONFIG_IPMI_WATCHDOG=m
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_POWEROFF=m
++
++#
++# Watchdog Cards
++#
++CONFIG_WATCHDOG_CORE=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++CONFIG_SOFT_WATCHDOG=m
++CONFIG_WDTPCI=m
++# CONFIG_ACQUIRE_WDT is not set
++# CONFIG_ADVANTECH_WDT is not set
++# CONFIG_EUROTECH_WDT is not set
++CONFIG_IB700_WDT=m
++# CONFIG_SCx200_WDT is not set
++# CONFIG_60XX_WDT is not set
++CONFIG_W83877F_WDT=m
++CONFIG_W83627HF_WDT=m
++CONFIG_MACHZ_WDT=m
++# CONFIG_SC520_WDT is not set
++CONFIG_ALIM7101_WDT=m
++CONFIG_ALIM1535_WDT=m
++CONFIG_IT87_WDT=m
++CONFIG_ITCO_WDT=m
++CONFIG_ITCO_VENDOR_SUPPORT=y
++# CONFIG_SC1200_WDT is not set
++# CONFIG_PC87413_WDT is not set
++# CONFIG_WAFER_WDT is not set
++# CONFIG_CPU5_WDT is not set
++CONFIG_I6300ESB_WDT=m
++CONFIG_IT8712F_WDT=m
++# CONFIG_SBC8360_WDT is not set
++# CONFIG_SBC7240_WDT is not set
++CONFIG_SMSC_SCH311X_WDT=m
++CONFIG_W83977F_WDT=m
++CONFIG_PCIPCWATCHDOG=m
++CONFIG_USBPCWATCHDOG=m
++# CONFIG_SBC_EPX_C3_WATCHDOG is not set
++CONFIG_WM8350_WATCHDOG=m
++CONFIG_WM831X_WATCHDOG=m
++# CONFIG_MAX63XX_WATCHDOG is not set
++# CONFIG_DW_WATCHDOG is not set
++CONFIG_W83697UG_WDT=m
++# CONFIG_MEN_A21_WDT is not set
++# CONFIG_GPIO_WATCHDOG is not set
++
++CONFIG_HW_RANDOM=y
++CONFIG_HW_RANDOM_TIMERIOMEM=m
++CONFIG_HW_RANDOM_TPM=m
++# CONFIG_HW_RANDOM_ATMEL is not set
++# CONFIG_HW_RANDOM_EXYNOS is not set
++# CONFIG_NVRAM is not set
++# CONFIG_RTC is not set
++# CONFIG_RTC_DEBUG is not set
++# CONFIG_GEN_RTC is not set
++CONFIG_RTC_HCTOSYS=y
++# CONFIG_RTC_SYSTOHC is not set
++CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
++CONFIG_RTC_INTF_SYSFS=y
++CONFIG_RTC_INTF_PROC=y
++CONFIG_RTC_INTF_DEV=y
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++CONFIG_RTC_DRV_CMOS=y
++CONFIG_RTC_DRV_DS1307=m
++CONFIG_RTC_DRV_DS1511=m
++CONFIG_RTC_DRV_DS1553=m
++CONFIG_RTC_DRV_DS1672=m
++CONFIG_RTC_DRV_DS1742=m
++CONFIG_RTC_DRV_DS1374=m
++# CONFIG_RTC_DRV_EP93XX is not set
++CONFIG_RTC_DRV_FM3130=m
++CONFIG_RTC_DRV_ISL1208=m
++CONFIG_RTC_DRV_M41T80=m
++CONFIG_RTC_DRV_M41T80_WDT=y
++CONFIG_RTC_DRV_M48T59=m
++CONFIG_RTC_DRV_MAX6900=m
++# CONFIG_RTC_DRV_M48T86 is not set
++CONFIG_RTC_DRV_PCF2127=m
++CONFIG_RTC_DRV_PCF8563=m
++CONFIG_RTC_DRV_PCF8583=m
++CONFIG_RTC_DRV_RS5C372=m
++# CONFIG_RTC_DRV_SA1100 is not set
++# CONFIG_RTC_DRV_TEST is not set
++CONFIG_RTC_DRV_X1205=m
++CONFIG_RTC_DRV_V3020=m
++CONFIG_RTC_DRV_DS2404=m
++CONFIG_RTC_DRV_STK17TA8=m
++# CONFIG_RTC_DRV_S35390A is not set
++CONFIG_RTC_DRV_RX8581=m
++CONFIG_RTC_DRV_RX8025=m
++CONFIG_RTC_DRV_DS1286=m
++CONFIG_RTC_DRV_M48T35=m
++CONFIG_RTC_DRV_BQ4802=m
++CONFIG_RTC_DRV_WM8350=m
++# CONFIG_RTC_DRV_AB3100 is not set
++CONFIG_RTC_DRV_WM831X=m
++CONFIG_RTC_DRV_BQ32K=m
++CONFIG_RTC_DRV_MSM6242=m
++CONFIG_RTC_DRV_RP5C01=m
++CONFIG_RTC_DRV_EM3027=m
++CONFIG_RTC_DRV_RV3029C2=m
++CONFIG_RTC_DRV_PCF50633=m
++CONFIG_RTC_DRV_DS3232=m
++CONFIG_RTC_DRV_ISL12022=m
++# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
++# CONFIG_RTC_DRV_MOXART is not set
++# CONFIG_RTC_DRV_ISL12057 is not set
++
++CONFIG_R3964=m
++# CONFIG_APPLICOM is not set
++# CONFIG_SONYPI is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=y
++CONFIG_AGP_ALI=y
++CONFIG_AGP_ATI=y
++CONFIG_AGP_AMD=y
++CONFIG_AGP_AMD64=y
++CONFIG_AGP_INTEL=y
++CONFIG_AGP_NVIDIA=y
++CONFIG_AGP_SIS=y
++CONFIG_AGP_SWORKS=y
++CONFIG_AGP_VIA=y
++CONFIG_AGP_EFFICEON=y
++
++CONFIG_VGA_ARB=y
++CONFIG_VGA_ARB_MAX_GPUS=16
++
++# CONFIG_STUB_POULSBO is not set
++
++#
++# PCMCIA character devices
++#
++# CONFIG_SYNCLINK_CS is not set
++
++CONFIG_CARDMAN_4000=m
++CONFIG_CARDMAN_4040=m
++
++CONFIG_MWAVE=m
++CONFIG_RAW_DRIVER=y
++CONFIG_MAX_RAW_DEVS=8192
++CONFIG_HANGCHECK_TIMER=m
++
++CONFIG_MEDIA_PCI_SUPPORT=y
++#
++# Multimedia devices
++#
++CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
++CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
++CONFIG_MEDIA_RC_SUPPORT=y
++CONFIG_MEDIA_CONTROLLER=y
++CONFIG_VIDEO_DEV=m
++# CONFIG_VIDEO_ADV_DEBUG is not set
++CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
++CONFIG_VIDEO_V4L2=y
++CONFIG_VIDEO_V4L2_SUBDEV_API=y
++# CONFIG_VIDEO_VIVI is not set
++# CONFIG_USB_SI4713 is not set
++# CONFIG_PLATFORM_SI4713 is not set
++# CONFIG_I2C_SI4713 is not set
++# CONFIG_USB_RAREMONO is not set
++
++#
++# Video For Linux
++#
++
++#
++# Video Adapters
++#
++CONFIG_V4L_USB_DRIVERS=y
++CONFIG_VIDEO_CAPTURE_DRIVERS=y
++CONFIG_V4L_PCI_DRIVERS=y
++CONFIG_VIDEO_AU0828=m
++CONFIG_VIDEO_AU0828_V4L2=y
++CONFIG_VIDEO_BT848=m
++CONFIG_VIDEO_BT848_DVB=y
++CONFIG_VIDEO_BWQCAM=m
++CONFIG_VIDEO_SR030PC30=m
++CONFIG_VIDEO_NOON010PC30=m
++CONFIG_VIDEO_CAFE_CCIC=m
++# CONFIG_VIDEO_CPIA is not set
++CONFIG_VIDEO_CPIA2=m
++CONFIG_VIDEO_CQCAM=m
++CONFIG_VIDEO_CX23885=m
++CONFIG_MEDIA_ALTERA_CI=m
++CONFIG_VIDEO_CX18=m
++CONFIG_VIDEO_CX18_ALSA=m
++CONFIG_VIDEO_CX88=m
++CONFIG_VIDEO_CX88_DVB=m
++CONFIG_VIDEO_CX88_ALSA=m
++CONFIG_VIDEO_CX88_BLACKBIRD=m
++CONFIG_VIDEO_CX88_ENABLE_VP3054=y
++CONFIG_VIDEO_CX88_VP3054=m
++CONFIG_VIDEO_EM28XX=m
++CONFIG_VIDEO_EM28XX_V4L2=m
++CONFIG_VIDEO_EM28XX_ALSA=m
++CONFIG_VIDEO_EM28XX_DVB=m
++CONFIG_VIDEO_EM28XX_RC=y
++CONFIG_VIDEO_CX231XX=m
++CONFIG_VIDEO_CX231XX_ALSA=m
++CONFIG_VIDEO_CX231XX_DVB=m
++CONFIG_VIDEO_CX231XX_RC=y
++CONFIG_VIDEO_HEXIUM_ORION=m
++CONFIG_VIDEO_HEXIUM_GEMINI=m
++CONFIG_VIDEO_IVTV=m
++# CONFIG_VIDEO_IVTV_ALSA is not set
++CONFIG_VIDEO_MEYE=m
++CONFIG_VIDEO_MXB=m
++CONFIG_VIDEO_PVRUSB2_DVB=y
++# CONFIG_VIDEO_PMS is not set
++CONFIG_VIDEO_HDPVR=m
++CONFIG_VIDEO_SAA6588=m
++CONFIG_VIDEO_SAA7134=m
++CONFIG_VIDEO_SAA7134_ALSA=m
++CONFIG_VIDEO_SAA7134_DVB=m
++CONFIG_VIDEO_SAA7134_RC=y
++CONFIG_VIDEO_USBVISION=m
++CONFIG_VIDEO_STK1160_COMMON=m
++CONFIG_VIDEO_STK1160=m
++CONFIG_VIDEO_STK1160_AC97=y
++CONFIG_VIDEO_W9966=m
++CONFIG_VIDEO_ZORAN=m
++CONFIG_VIDEO_ZORAN_AVS6EYES=m
++CONFIG_VIDEO_ZORAN_BUZ=m
++CONFIG_VIDEO_ZORAN_DC10=m
++CONFIG_VIDEO_ZORAN_DC30=m
++CONFIG_VIDEO_ZORAN_LML33=m
++CONFIG_VIDEO_ZORAN_LML33R10=m
++CONFIG_VIDEO_ZORAN_ZR36060=m
++# CONFIG_V4L_ISA_PARPORT_DRIVERS is not set
++CONFIG_VIDEO_FB_IVTV=m
++CONFIG_VIDEO_SAA7164=m
++CONFIG_VIDEO_TM6000=m
++CONFIG_VIDEO_TM6000_ALSA=m
++CONFIG_VIDEO_TM6000_DVB=m
++CONFIG_VIDEO_TLG2300=m
++CONFIG_VIDEO_USBTV=m
++
++CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
++
++#
++# Radio Adapters
++#
++CONFIG_RADIO_MAXIRADIO=m
++CONFIG_RADIO_SHARK=m
++CONFIG_RADIO_SHARK2=m
++CONFIG_RADIO_WL1273=m
++
++CONFIG_MEDIA_ATTACH=y
++
++#
++# V4L/DVB tuners
++# Selected automatically by not setting CONFIG_MEDIA_TUNER_CUSTOMISE
++#
++# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++CONFIG_DVB_CAPTURE_DRIVERS=y
++CONFIG_DVB_CORE=m
++CONFIG_DVB_NET=y
++CONFIG_DVB_MAX_ADAPTERS=8
++CONFIG_DVB_DYNAMIC_MINORS=y
++
++#
++# DVB frontends
++# Selected automatically by not setting CONFIG_DVB_FE_CUSTOMISE
++#
++# CONFIG_DVB_FE_CUSTOMISE is not set
++
++#
++# Supported DVB bridge Modules
++#
++CONFIG_DVB_BT8XX=m
++CONFIG_DVB_BUDGET_CORE=m
++CONFIG_DVB_PLUTO2=m
++CONFIG_SMS_SIANO_MDTV=m
++CONFIG_SMS_SIANO_RC=y
++# CONFIG_SMS_SIANO_DEBUGFS is not set
++CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
++CONFIG_SMS_USB_DRV=m
++CONFIG_SMS_SDIO_DRV=m
++CONFIG_DVB_TTUSB_DEC=m
++CONFIG_DVB_USB_DTV5100=m
++CONFIG_DVB_USB_AF9015=m
++CONFIG_DVB_USB_ANYSEE=m
++CONFIG_DVB_USB_DW2102=m
++CONFIG_DVB_USB_FRIIO=m
++CONFIG_DVB_USB_EC168=m
++CONFIG_DVB_USB_PCTV452E=m
++CONFIG_DVB_USB_IT913X=m
++CONFIG_DVB_USB_MXL111SF=m
++CONFIG_DVB_DM1105=m
++CONFIG_DVB_FIREDTV=m
++CONFIG_DVB_NGENE=m
++CONFIG_DVB_DDBRIDGE=m
++CONFIG_DVB_USB_TECHNISAT_USB2=m
++CONFIG_DVB_USB_V2=m
++
++CONFIG_DVB_AV7110=m
++CONFIG_DVB_AV7110_OSD=y
++CONFIG_DVB_BUDGET=m
++CONFIG_DVB_BUDGET_CI=m
++CONFIG_DVB_BUDGET_AV=m
++CONFIG_DVB_BUDGET_PATCH=m
++
++CONFIG_DVB_TTUSB_BUDGET=m
++
++CONFIG_DVB_USB_CINERGY_T2=m
++CONFIG_DVB_B2C2_FLEXCOP=m
++# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
++
++CONFIG_DVB_B2C2_FLEXCOP_PCI=m
++# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
++CONFIG_DVB_B2C2_FLEXCOP_USB=m
++# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
++CONFIG_DVB_USB=m
++# CONFIG_DVB_USB_DEBUG is not set
++CONFIG_DVB_USB_A800=m
++CONFIG_DVB_USB_AF9005=m
++CONFIG_DVB_USB_AF9005_REMOTE=m
++CONFIG_DVB_USB_AU6610=m
++CONFIG_DVB_USB_CXUSB=m
++CONFIG_DVB_USB_DIBUSB_MB=m
++# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
++CONFIG_DVB_USB_DIBUSB_MC=m
++CONFIG_DVB_USB_DIB0700=m
++CONFIG_DVB_USB_DIGITV=m
++CONFIG_DVB_USB_DTT200U=m
++CONFIG_DVB_USB_GL861=m
++CONFIG_DVB_USB_GP8PSK=m
++CONFIG_DVB_USB_M920X=m
++CONFIG_DVB_USB_NOVA_T_USB2=m
++CONFIG_DVB_USB_CE6230=m
++CONFIG_DVB_USB_OPERA1=m
++CONFIG_DVB_USB_TTUSB2=m
++CONFIG_DVB_USB_UMT_010=m
++CONFIG_DVB_USB_VP702X=m
++CONFIG_DVB_USB_VP7045=m
++CONFIG_DVB_USB_AZ6027=m
++CONFIG_DVB_USB_AZ6007=m
++CONFIG_DVB_USB_LME2510=m
++CONFIG_DVB_USB_RTL28XXU=m
++CONFIG_DVB_USB_AF9035=m
++
++CONFIG_DVB_PT1=m
++
++CONFIG_MANTIS_CORE=m
++CONFIG_DVB_MANTIS=m
++CONFIG_DVB_HOPPER=m
++
++CONFIG_VIDEO_SAA7146=m
++CONFIG_VIDEO_SAA7146_VV=m
++CONFIG_VIDEO_TVP5150=m
++CONFIG_VIDEO_TUNER=m
++CONFIG_VIDEO_BTCX=m
++CONFIG_VIDEO_PVRUSB2=m
++CONFIG_VIDEO_PVRUSB2_SYSFS=y
++# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
++
++CONFIG_RC_CORE=m
++CONFIG_RC_DECODERS=y
++CONFIG_LIRC=m
++CONFIG_RC_LOOPBACK=m
++CONFIG_RC_MAP=m
++CONFIG_RC_DEVICES=y
++CONFIG_RC_ATI_REMOTE=m
++CONFIG_IR_NEC_DECODER=m
++CONFIG_IR_RC5_DECODER=m
++CONFIG_IR_RC6_DECODER=m
++CONFIG_IR_JVC_DECODER=m
++CONFIG_IR_SONY_DECODER=m
++CONFIG_IR_RC5_SZ_DECODER=m
++CONFIG_IR_SANYO_DECODER=m
++CONFIG_IR_MCE_KBD_DECODER=m
++CONFIG_IR_LIRC_CODEC=m
++CONFIG_IR_IMON=m
++CONFIG_IR_MCEUSB=m
++CONFIG_IR_ITE_CIR=m
++CONFIG_IR_NUVOTON=m
++CONFIG_IR_FINTEK=m
++CONFIG_IR_REDRAT3=m
++CONFIG_IR_ENE=m
++CONFIG_IR_STREAMZAP=m
++CONFIG_IR_WINBOND_CIR=m
++CONFIG_IR_IGUANA=m
++CONFIG_IR_TTUSBIR=m
++CONFIG_IR_GPIO_CIR=m
++
++CONFIG_V4L_MEM2MEM_DRIVERS=y
++# CONFIG_VIDEO_MEM2MEM_DEINTERLACE is not set
++# CONFIG_VIDEO_SH_VEU is not set
++# CONFIG_VIDEO_RENESAS_VSP1 is not set
++# CONFIG_V4L_TEST_DRIVERS is not set
++
++# CONFIG_VIDEO_MEM2MEM_TESTDEV is not set
++
++#
++# Broadcom Crystal HD video decoder driver
++#
++CONFIG_CRYSTALHD=m
++
++#
++# Graphics support
++#
++
++CONFIG_DISPLAY_SUPPORT=m
++CONFIG_VIDEO_OUTPUT_CONTROL=m
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++CONFIG_VGACON_SOFT_SCROLLBACK=y
++CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
++
++#
++# Logo configuration
++#
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++
++#
++# Sound
++#
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SOUND_OSS_CORE_PRECLAIM=y
++# CONFIG_SND_DEBUG_VERBOSE is not set
++CONFIG_SND_VERBOSE_PROCFS=y
++CONFIG_SND_SEQUENCER=y
++CONFIG_SND_HRTIMER=y
++CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
++CONFIG_SND_SEQ_DUMMY=m
++CONFIG_SND_SEQUENCER_OSS=y
++CONFIG_SND_SEQ_RTCTIMER_DEFAULT=y
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=y
++CONFIG_SND_PCM_OSS=y
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_RTCTIMER=y
++CONFIG_SND_DYNAMIC_MINORS=y
++CONFIG_SND_MAX_CARDS=32
++# CONFIG_SND_SUPPORT_OLD_API is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_DUMMY=m
++CONFIG_SND_ALOOP=m
++CONFIG_SND_VIRMIDI=m
++CONFIG_SND_MTPAV=m
++CONFIG_SND_MTS64=m
++CONFIG_SND_SERIAL_U16550=m
++CONFIG_SND_MPU401=m
++CONFIG_SND_PORTMAN2X4=m
++CONFIG_SND_AC97_POWER_SAVE=y
++CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
++
++CONFIG_SND_DRIVERS=y
++
++#
++# ISA devices
++#
++CONFIG_SND_AD1889=m
++
++#
++# PCI devices
++#
++CONFIG_SND_PCI=y
++CONFIG_SND_ALI5451=m
++CONFIG_SND_ALS300=m
++CONFIG_SND_ALS4000=m
++CONFIG_SND_ATIIXP=m
++CONFIG_SND_ATIIXP_MODEM=m
++CONFIG_SND_AU8810=m
++CONFIG_SND_AU8820=m
++CONFIG_SND_AU8830=m
++# CONFIG_SND_AW2 is not set
++CONFIG_SND_AZT3328=m
++CONFIG_SND_BT87X=m
++# CONFIG_SND_BT87X_OVERCLOCK is not set
++CONFIG_SND_CA0106=m
++CONFIG_SND_CMIPCI=m
++CONFIG_SND_CS46XX=m
++CONFIG_SND_CS46XX_NEW_DSP=y
++CONFIG_SND_CS4281=m
++CONFIG_SND_CS5530=m
++CONFIG_SND_CS5535AUDIO=m
++CONFIG_SND_EMU10K1=m
++CONFIG_SND_EMU10K1X=m
++CONFIG_SND_ENS1370=m
++CONFIG_SND_ENS1371=m
++CONFIG_SND_ES1938=m
++CONFIG_SND_ES1968=m
++CONFIG_SND_ES1968_INPUT=y
++CONFIG_SND_ES1968_RADIO=y
++CONFIG_SND_FM801=m
++CONFIG_SND_FM801_TEA575X_BOOL=y
++CONFIG_SND_CTXFI=m
++CONFIG_SND_LX6464ES=m
++CONFIG_SND_HDA_INTEL=y
++CONFIG_SND_HDA_INPUT_BEEP=y
++CONFIG_SND_HDA_INPUT_BEEP_MODE=0
++CONFIG_SND_HDA_INPUT_JACK=y
++CONFIG_SND_HDA_PATCH_LOADER=y
++CONFIG_SND_HDA_HWDEP=y
++CONFIG_SND_HDA_CODEC_REALTEK=y
++CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS=y
++CONFIG_SND_HDA_CODEC_CA0110=y
++CONFIG_SND_HDA_CODEC_ANALOG=y
++CONFIG_SND_HDA_CODEC_SIGMATEL=y
++CONFIG_SND_HDA_CODEC_VIA=y
++CONFIG_SND_HDA_CODEC_CIRRUS=y
++CONFIG_SND_HDA_CODEC_CONEXANT=y
++CONFIG_SND_HDA_CODEC_CMEDIA=y
++CONFIG_SND_HDA_CODEC_SI3054=y
++CONFIG_SND_HDA_CODEC_HDMI=y
++CONFIG_SND_HDA_I915=y
++CONFIG_SND_HDA_CODEC_CA0132=y
++CONFIG_SND_HDA_CODEC_CA0132_DSP=y
++CONFIG_SND_HDA_GENERIC=y
++CONFIG_SND_HDA_POWER_SAVE=y
++CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
++CONFIG_SND_HDA_RECONFIG=y
++CONFIG_SND_HDA_PREALLOC_SIZE=4096
++CONFIG_SND_HDSPM=m
++CONFIG_SND_ICE1712=m
++CONFIG_SND_ICE1724=m
++CONFIG_SND_INTEL8X0=y
++CONFIG_SND_INTEL8X0M=m
++CONFIG_SND_KORG1212=m
++CONFIG_SND_MAESTRO3=m
++CONFIG_SND_MAESTRO3_INPUT=y
++CONFIG_SND_MIXART=m
++CONFIG_SND_NM256=m
++CONFIG_SND_OXYGEN=m
++CONFIG_SND_RME32=m
++CONFIG_SND_PCSP=m
++CONFIG_SND_PCXHR=m
++CONFIG_SND_RIPTIDE=m
++CONFIG_SND_RME96=m
++CONFIG_SND_RME9652=m
++CONFIG_SND_SIS7019=m
++CONFIG_SND_SONICVIBES=m
++CONFIG_SND_HDSP=m
++CONFIG_SND_TRIDENT=m
++CONFIG_SND_VIA82XX=m
++CONFIG_SND_VIA82XX_MODEM=m
++CONFIG_SND_VIRTUOSO=m
++CONFIG_SND_VX222=m
++CONFIG_SND_YMFPCI=m
++CONFIG_SND_ASIHPI=m
++CONFIG_SND_LOLA=m
++
++#
++# ALSA USB devices
++#
++CONFIG_SND_USB=y
++CONFIG_SND_USB_CAIAQ=m
++CONFIG_SND_USB_CAIAQ_INPUT=y
++CONFIG_SND_USB_USX2Y=m
++CONFIG_SND_USB_US122L=m
++CONFIG_SND_USB_UA101=m
++CONFIG_SND_USB_6FIRE=m
++CONFIG_SND_USB_HIFACE=m
++
++#
++# PCMCIA devices
++#
++# CONFIG_SND_PCMCIA is not set
++
++CONFIG_SND_FIREWIRE=y
++CONFIG_SND_FIREWIRE_SPEAKERS=m
++CONFIG_SND_ISIGHT=m
++CONFIG_SND_SCS1X=m
++CONFIG_SND_DICE=m
++
++#
++# Open Sound System
++#
++# CONFIG_SOUND_PRIME is not set
++
++#
++# USB support
++#
++CONFIG_USB_SUPPORT=y
++# CONFIG_USB_DEBUG is not set
++
++# DEPRECATED: See bug 362221. Fix udev.
++# CONFIG_USB_DEVICE_CLASS is not set
++
++
++#
++# Miscellaneous USB options
++#
++
++# Deprecated.
++# CONFIG_USB_DEVICEFS is not set
++
++CONFIG_USB_DEFAULT_PERSIST=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++CONFIG_USB_SUSPEND=y
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_EHCI_TT_NEWSCHED=y
++# CONFIG_USB_EHCI_MV is not set
++# CONFIG_USB_EHCI_HCD_PLATFORM is not set
++# CONFIG_USB_ISP116X_HCD is not set
++# CONFIG_USB_ISP1760_HCD is not set
++CONFIG_USB_ISP1362_HCD=m
++CONFIG_USB_FUSBH200_HCD=m
++# CONFIG_USB_FOTG210_HCD is not set
++# CONFIG_USB_GR_UDC is not set
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_OHCI_HCD_PCI=y
++# CONFIG_USB_OHCI_HCD_SSB is not set
++# CONFIG_USB_HCD_TEST_MODE is not set
++# CONFIG_USB_OHCI_HCD_PLATFORM is not set
++CONFIG_USB_UHCI_HCD=y
++CONFIG_USB_SL811_HCD=m
++CONFIG_USB_SL811_HCD_ISO=y
++# CONFIG_USB_SL811_CS is not set
++# CONFIG_USB_R8A66597_HCD is not set
++CONFIG_USB_XHCI_HCD=y
++# CONFIG_USB_XHCI_HCD_DEBUGGING is not set
++
++#
++# USB Device Class drivers
++#
++
++#
++# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
++#
++CONFIG_USB_ACM=m
++CONFIG_USB_PRINTER=m
++CONFIG_USB_WDM=m
++CONFIG_USB_TMC=m
++# CONFIG_BLK_DEV_UB is not set
++# CONFIG_USB_STORAGE_DEBUG is not set
++CONFIG_USB_STORAGE_CYPRESS_ATACB=m
++CONFIG_USB_STORAGE_DATAFAB=m
++CONFIG_USB_STORAGE_FREECOM=m
++CONFIG_USB_STORAGE_ISD200=m
++CONFIG_USB_STORAGE_SDDR09=m
++CONFIG_USB_STORAGE_SDDR55=m
++CONFIG_USB_STORAGE_JUMPSHOT=m
++CONFIG_USB_STORAGE_USBAT=y
++CONFIG_USB_STORAGE_ONETOUCH=m
++CONFIG_USB_STORAGE_ALAUDA=m
++CONFIG_USB_STORAGE_KARMA=m
++CONFIG_USB_STORAGE_REALTEK=m
++CONFIG_REALTEK_AUTOPM=y
++CONFIG_USB_STORAGE_ENE_UB6250=m
++# CONFIG_USB_LIBUSUAL is not set
++# CONFIG_USB_UAS is not set
++
++
++#
++# USB Human Interface Devices (HID)
++#
++CONFIG_USB_HID=y
++
++CONFIG_HID_SUPPORT=y
++
++CONFIG_HID=y
++CONFIG_I2C_HID=m
++CONFIG_HID_BATTERY_STRENGTH=y
++# debugging default is y upstream now
++CONFIG_HIDRAW=y
++CONFIG_UHID=m
++CONFIG_HID_PID=y
++CONFIG_LOGITECH_FF=y
++CONFIG_HID_LOGITECH_DJ=m
++CONFIG_LOGIWII_FF=y
++CONFIG_LOGIRUMBLEPAD2_FF=y
++CONFIG_PANTHERLORD_FF=y
++CONFIG_THRUSTMASTER_FF=y
++CONFIG_HID_WACOM=m
++CONFIG_HID_WACOM_POWER_SUPPLY=y
++CONFIG_ZEROPLUS_FF=y
++CONFIG_USB_HIDDEV=y
++CONFIG_USB_IDMOUSE=m
++CONFIG_DRAGONRISE_FF=y
++CONFIG_GREENASIA_FF=y
++CONFIG_SMARTJOYPLUS_FF=y
++CONFIG_LOGIG940_FF=y
++CONFIG_LOGIWHEELS_FF=y
++CONFIG_HID_MAGICMOUSE=y
++CONFIG_HID_MULTITOUCH=m
++CONFIG_HID_NTRIG=y
++CONFIG_HID_QUANTA=y
++CONFIG_HID_PRIMAX=m
++CONFIG_HID_PS3REMOTE=m
++CONFIG_HID_PRODIKEYS=m
++CONFIG_HID_DRAGONRISE=m
++CONFIG_HID_GYRATION=m
++CONFIG_HID_ICADE=m
++CONFIG_HID_TWINHAN=m
++CONFIG_HID_ORTEK=m
++CONFIG_HID_PANTHERLORD=m
++CONFIG_HID_PETALYNX=m
++CONFIG_HID_PICOLCD=m
++CONFIG_HID_RMI=m
++CONFIG_HID_ROCCAT=m
++CONFIG_HID_ROCCAT_KONE=m
++CONFIG_HID_SAMSUNG=m
++CONFIG_HID_SONY=m
++CONFIG_SONY_FF=y
++CONFIG_HID_SUNPLUS=m
++CONFIG_HID_STEELSERIES=m
++CONFIG_HID_GREENASIA=m
++CONFIG_HID_SMARTJOYPLUS=m
++CONFIG_HID_TOPSEED=m
++CONFIG_HID_THINGM=m
++CONFIG_HID_THRUSTMASTER=m
++CONFIG_HID_XINMO=m
++CONFIG_HID_ZEROPLUS=m
++CONFIG_HID_ZYDACRON=m
++CONFIG_HID_SENSOR_HUB=m
++CONFIG_HID_SENSOR_GYRO_3D=m
++CONFIG_HID_SENSOR_MAGNETOMETER_3D=m
++CONFIG_HID_SENSOR_ALS=m
++CONFIG_HID_SENSOR_ACCEL_3D=m
++CONFIG_HID_EMS_FF=m
++CONFIG_HID_ELECOM=m
++CONFIG_HID_ELO=m
++CONFIG_HID_UCLOGIC=m
++CONFIG_HID_WALTOP=m
++CONFIG_HID_ROCCAT_PYRA=m
++CONFIG_HID_ROCCAT_KONEPLUS=m
++CONFIG_HID_ACRUX=m
++CONFIG_HID_ACRUX_FF=y
++CONFIG_HID_KEYTOUCH=m
++CONFIG_HID_LCPOWER=m
++CONFIG_HID_LENOVO_TPKBD=m
++CONFIG_HID_ROCCAT_ARVO=m
++CONFIG_HID_ROCCAT_ISKU=m
++CONFIG_HID_ROCCAT_KOVAPLUS=m
++CONFIG_HID_HOLTEK=m
++CONFIG_HOLTEK_FF=y
++CONFIG_HID_HUION=m
++CONFIG_HID_SPEEDLINK=m
++CONFIG_HID_WIIMOTE=m
++CONFIG_HID_WIIMOTE_EXT=y
++CONFIG_HID_KYE=m
++CONFIG_HID_SAITEK=m
++CONFIG_HID_TIVO=m
++CONFIG_HID_GENERIC=y
++CONFIG_HID_AUREAL=m
++CONFIG_HID_APPLEIR=m
++
++
++#
++# USB Imaging devices
++#
++CONFIG_USB_MDC800=m
++CONFIG_USB_MICROTEK=m
++
++#
++# USB Multimedia devices
++#
++
++CONFIG_USB_DSBR=m
++# CONFIG_USB_ET61X251 is not set
++CONFIG_USB_M5602=m
++CONFIG_USB_STV06XX=m
++CONFIG_USB_GSPCA=m
++CONFIG_USB_GSPCA_MR97310A=m
++CONFIG_USB_GSPCA_BENQ=m
++CONFIG_USB_GSPCA_CONEX=m
++CONFIG_USB_GSPCA_CPIA1=m
++CONFIG_USB_GSPCA_ETOMS=m
++CONFIG_USB_GSPCA_FINEPIX=m
++CONFIG_USB_GSPCA_MARS=m
++CONFIG_USB_GSPCA_OV519=m
++CONFIG_USB_GSPCA_OV534=m
++CONFIG_USB_GSPCA_OV534_9=m
++CONFIG_USB_GSPCA_PAC207=m
++CONFIG_USB_GSPCA_PAC7311=m
++CONFIG_USB_GSPCA_SN9C2028=m
++CONFIG_USB_GSPCA_SN9C20X=m
++CONFIG_USB_GSPCA_SONIXB=m
++CONFIG_USB_GSPCA_SONIXJ=m
++CONFIG_USB_GSPCA_SPCA500=m
++CONFIG_USB_GSPCA_SPCA501=m
++CONFIG_USB_GSPCA_SPCA505=m
++CONFIG_USB_GSPCA_SPCA506=m
++CONFIG_USB_GSPCA_SPCA508=m
++CONFIG_USB_GSPCA_SPCA561=m
++CONFIG_USB_GSPCA_STK014=m
++CONFIG_USB_GSPCA_STK1135=m
++CONFIG_USB_GSPCA_SUNPLUS=m
++CONFIG_USB_GSPCA_T613=m
++CONFIG_USB_GSPCA_TOPRO=m
++CONFIG_USB_GSPCA_TV8532=m
++CONFIG_USB_GSPCA_VC032X=m
++CONFIG_USB_GSPCA_ZC3XX=m
++CONFIG_USB_GSPCA_SQ905=m
++CONFIG_USB_GSPCA_SQ905C=m
++CONFIG_USB_GSPCA_PAC7302=m
++CONFIG_USB_GSPCA_STV0680=m
++CONFIG_USB_GL860=m
++CONFIG_USB_GSPCA_JEILINJ=m
++CONFIG_USB_GSPCA_JL2005BCD=m
++CONFIG_USB_GSPCA_KONICA=m
++CONFIG_USB_GSPCA_XIRLINK_CIT=m
++CONFIG_USB_GSPCA_SPCA1528=m
++CONFIG_USB_GSPCA_SQ930X=m
++CONFIG_USB_GSPCA_NW80X=m
++CONFIG_USB_GSPCA_VICAM=m
++CONFIG_USB_GSPCA_KINECT=m
++CONFIG_USB_GSPCA_SE401=m
++
++CONFIG_USB_S2255=m
++# CONFIG_VIDEO_SH_MOBILE_CEU is not set
++# CONFIG_VIDEO_SH_MOBILE_CSI2 is not set
++# CONFIG_USB_SN9C102 is not set
++CONFIG_USB_ZR364XX=m
++
++#
++# USB Network adaptors
++#
++CONFIG_USB_CATC=m
++CONFIG_USB_HSO=m
++CONFIG_USB_KAWETH=m
++CONFIG_USB_PEGASUS=m
++CONFIG_USB_RTL8150=m
++CONFIG_USB_RTL8152=m
++CONFIG_USB_USBNET=m
++CONFIG_USB_SPEEDTOUCH=m
++CONFIG_USB_NET_AX8817X=m
++CONFIG_USB_NET_AX88179_178A=m
++CONFIG_USB_NET_DM9601=m
++CONFIG_USB_NET_SR9700=m
++CONFIG_USB_NET_SMSC95XX=m
++CONFIG_USB_NET_GL620A=m
++CONFIG_USB_NET_NET1080=m
++CONFIG_USB_NET_PLUSB=m
++CONFIG_USB_NET_MCS7830=m
++CONFIG_USB_NET_RNDIS_HOST=m
++CONFIG_USB_NET_CDC_SUBSET=m
++CONFIG_USB_NET_CDC_EEM=m
++CONFIG_USB_NET_CDC_NCM=m
++CONFIG_USB_NET_HUAWEI_CDC_NCM=m
++CONFIG_USB_NET_CDC_MBIM=m
++CONFIG_USB_NET_ZAURUS=m
++CONFIG_USB_NET_CX82310_ETH=m
++CONFIG_USB_NET_INT51X1=m
++CONFIG_USB_CDC_PHONET=m
++CONFIG_USB_IPHETH=m
++CONFIG_USB_SIERRA_NET=m
++CONFIG_USB_VL600=m
++
++#
++# USB Host-to-Host Cables
++#
++CONFIG_USB_AN2720=y
++CONFIG_USB_BELKIN=y
++
++#
++# Intelligent USB Devices/Gadgets
++#
++CONFIG_USB_ARMLINUX=y
++CONFIG_USB_EPSON2888=y
++CONFIG_USB_KC2190=y
++
++# CONFIG_USB_MUSB_HDRC is not set
++
++#
++# USB port drivers
++#
++CONFIG_USB_USS720=m
++
++#
++# USB Serial Converter support
++#
++CONFIG_USB_SERIAL=y
++CONFIG_USB_SERIAL_GENERIC=y
++CONFIG_USB_SERIAL_SIMPLE=m
++CONFIG_USB_SERIAL_AIRCABLE=m
++CONFIG_USB_SERIAL_ARK3116=m
++CONFIG_USB_SERIAL_BELKIN=m
++CONFIG_USB_SERIAL_CH341=m
++CONFIG_USB_SERIAL_CYPRESS_M8=m
++CONFIG_USB_SERIAL_CYBERJACK=m
++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
++CONFIG_USB_SERIAL_CP210X=m
++CONFIG_USB_SERIAL_QUALCOMM=m
++CONFIG_USB_SERIAL_SYMBOL=m
++CONFIG_USB_SERIAL_EDGEPORT=m
++CONFIG_USB_SERIAL_EDGEPORT_TI=m
++CONFIG_USB_SERIAL_EMPEG=m
++# CONFIG_USB_SERIAL_F81232 is not set
++CONFIG_USB_SERIAL_FTDI_SIO=m
++CONFIG_USB_SERIAL_FUNSOFT=m
++CONFIG_USB_SERIAL_GARMIN=m
++CONFIG_USB_SERIAL_HP4X=m
++CONFIG_USB_SERIAL_IPAQ=m
++CONFIG_USB_SERIAL_IPW=m
++CONFIG_USB_SERIAL_IR=m
++CONFIG_USB_SERIAL_IUU=m
++CONFIG_USB_SERIAL_KEYSPAN_PDA=m
++CONFIG_USB_SERIAL_KEYSPAN=m
++CONFIG_USB_SERIAL_KEYSPAN_MPR=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19=y
++CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
++CONFIG_USB_SERIAL_KLSI=m
++CONFIG_USB_SERIAL_KOBIL_SCT=m
++CONFIG_USB_SERIAL_MCT_U232=m
++# CONFIG_USB_SERIAL_METRO is not set
++CONFIG_USB_SERIAL_MOS7720=m
++CONFIG_USB_SERIAL_MOS7715_PARPORT=y
++# CONFIG_USB_SERIAL_ZIO is not set
++# CONFIG_USB_SERIAL_WISHBONE is not set
++# CONFIG_USB_SERIAL_ZTE is not set
++CONFIG_USB_SERIAL_MOS7840=m
++CONFIG_USB_SERIAL_MOTOROLA=m
++# CONFIG_USB_SERIAL_MXUPORT is not set
++CONFIG_USB_SERIAL_NAVMAN=m
++CONFIG_USB_SERIAL_OPTION=m
++CONFIG_USB_SERIAL_OTI6858=m
++CONFIG_USB_SERIAL_OPTICON=m
++CONFIG_USB_SERIAL_OMNINET=m
++CONFIG_USB_SERIAL_PL2303=m
++# CONFIG_USB_SERIAL_QUATECH2 is not set
++CONFIG_USB_SERIAL_SAFE=m
++CONFIG_USB_SERIAL_SAFE_PADDED=y
++CONFIG_USB_SERIAL_SIERRAWIRELESS=m
++CONFIG_USB_SERIAL_SIEMENS_MPI=m
++CONFIG_USB_SERIAL_SPCP8X5=m
++CONFIG_USB_SERIAL_TI=m
++CONFIG_USB_SERIAL_VISOR=m
++CONFIG_USB_SERIAL_WHITEHEAT=m
++CONFIG_USB_SERIAL_XIRCOM=m
++CONFIG_USB_SERIAL_QCAUX=m
++CONFIG_USB_SERIAL_VIVOPAY_SERIAL=m
++CONFIG_USB_SERIAL_XSENS_MT=m
++CONFIG_USB_SERIAL_DEBUG=m
++CONFIG_USB_SERIAL_SSU100=m
++CONFIG_USB_SERIAL_QT2=m
++CONFIG_USB_SERIAL_FLASHLOADER=m
++CONFIG_USB_SERIAL_SUUNTO=m
++CONFIG_USB_SERIAL_CONSOLE=y
++
++CONFIG_USB_EZUSB=y
++CONFIG_USB_EMI62=m
++CONFIG_USB_LED=m
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++
++#
++# USB Miscellaneous drivers
++#
++
++CONFIG_USB_ADUTUX=m
++CONFIG_USB_SEVSEG=m
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_APPLEDISPLAY=m
++
++# Physical Layer USB driver
++# CONFIG_USB_OTG_FSM is not set
++
++# CONFIG_GENERIC_PHY is not set
++# CONFIG_PHY_EXYNOS_MIPI_VIDEO is not set
++# CONFIG_PHY_EXYNOS_DP_VIDEO is not set
++# CONFIG_OMAP_USB2 is not set
++# CONFIG_OMAP_USB3 is not set
++# CONFIG_OMAP_CONTROL_USB is not set
++# CONFIG_AM335X_PHY_USB is not set
++# CONFIG_SAMSUNG_USBPHY is not set
++# CONFIG_SAMSUNG_USB2PHY is not set
++# CONFIG_SAMSUNG_USB3PHY is not set
++# CONFIG_BCM_KONA_USB2_PHY is not set
++CONFIG_USB_RCAR_PHY=m
++CONFIG_USB_ATM=m
++CONFIG_USB_CXACRU=m
++# CONFIG_USB_C67X00_HCD is not set
++# CONFIG_USB_CYTHERM is not set
++CONFIG_USB_EMI26=m
++CONFIG_USB_FTDI_ELAN=m
++CONFIG_USB_FILE_STORAGE=m
++# CONFIG_USB_FILE_STORAGE_TEST is not set
++# CONFIG_USB_DWC3 is not set
++# CONFIG_USB_GADGETFS is not set
++# CONFIG_USB_OXU210HP_HCD is not set
++CONFIG_USB_IOWARRIOR=m
++CONFIG_USB_ISIGHTFW=m
++CONFIG_USB_YUREX=m
++CONFIG_USB_EZUSB_FX2=m
++CONFIG_USB_HSIC_USB3503=m
++CONFIG_USB_LCD=m
++CONFIG_USB_LD=m
++CONFIG_USB_LEGOTOWER=m
++CONFIG_USB_MON=y
++CONFIG_USB_PWC=m
++CONFIG_USB_PWC_INPUT_EVDEV=y
++# CONFIG_USB_PWC_DEBUG is not set
++# CONFIG_USB_RIO500 is not set
++CONFIG_USB_SISUSBVGA=m
++CONFIG_USB_SISUSBVGA_CON=y
++CONFIG_RADIO_SI470X=y
++CONFIG_USB_KEENE=m
++CONFIG_USB_MA901=m
++CONFIG_USB_SI470X=m
++CONFIG_I2C_SI470X=m
++CONFIG_RADIO_SI4713=m
++# CONFIG_RADIO_TEF6862 is not set
++CONFIG_USB_MR800=m
++CONFIG_USB_STKWEBCAM=m
++# CONFIG_USB_TEST is not set
++# CONFIG_USB_EHSET_TEST_FIXTURE is not set
++CONFIG_USB_TRANCEVIBRATOR=m
++CONFIG_USB_U132_HCD=m
++CONFIG_USB_UEAGLEATM=m
++CONFIG_USB_XUSBATM=m
++
++# CONFIG_USB_DWC2 is not set
++
++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
++
++# CONFIG_USB_ISP1301 is not set
++
++# CONFIG_USB_OTG is not set
++
++#
++# Sonics Silicon Backplane
++#
++CONFIG_SSB=m
++CONFIG_SSB_PCIHOST=y
++CONFIG_SSB_SDIOHOST=y
++CONFIG_SSB_PCMCIAHOST=y
++# CONFIG_SSB_SILENT is not set
++# CONFIG_SSB_DEBUG is not set
++CONFIG_SSB_DRIVER_PCICORE=y
++CONFIG_SSB_DRIVER_GPIO=y
++
++# Multifunction USB devices
++# CONFIG_MFD_PCF50633 is not set
++CONFIG_PCF50633_ADC=m
++CONFIG_PCF50633_GPIO=m
++# CONFIG_AB3100_CORE is not set
++CONFIG_INPUT_PCF50633_PMU=m
++CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
++
++CONFIG_MFD_SUPPORT=y
++CONFIG_MFD_VX855=m
++CONFIG_MFD_SM501=m
++CONFIG_MFD_SM501_GPIO=y
++CONFIG_MFD_RTSX_PCI=m
++# CONFIG_MFD_TI_AM335X_TSCADC is not set
++CONFIG_MFD_VIPERBOARD=m
++# CONFIG_MFD_RETU is not set
++# CONFIG_MFD_TC6393XB is not set
++# CONFIG_MFD_WM8400 is not set
++# CONFIG_MFD_WM8350_I2C is not set
++# CONFIG_MFD_WM8350 is not set
++# CONFIG_MFD_WM831X is not set
++# CONFIG_AB3100_OTP is not set
++# CONFIG_MFD_TIMBERDALE is not set
++# CONFIG_MFD_WM8994 is not set
++# CONFIG_MFD_88PM860X is not set
++# CONFIG_LPC_SCH is not set
++# CONFIG_LPC_ICH is not set
++# CONFIG_HTC_I2CPLD is not set
++# CONFIG_MFD_MAX8925 is not set
++# CONFIG_MFD_ASIC3 is not set
++# CONFIG_MFD_AS3722 is not set
++# CONFIG_HTC_EGPIO is not set
++# CONFIG_TPS6507X is not set
++# CONFIG_ABX500_CORE is not set
++# CONFIG_MFD_RDC321X is not set
++# CONFIG_MFD_JANZ_CMODIO is not set
++# CONFIG_MFD_KEMPLD is not set
++# CONFIG_MFD_WM831X_I2C is not set
++# CONFIG_MFD_CS5535 is not set
++# CONFIG_MFD_STMPE is not set
++# CONFIG_MFD_MAX8998 is not set
++# CONFIG_MFD_TPS6586X is not set
++# CONFIG_MFD_TC3589X is not set
++# CONFIG_MFD_WL1273_CORE is not set
++# CONFIG_MFD_TPS65217 is not set
++# CONFIG_MFD_LM3533 is not set
++# CONFIG_MFD_ARIZONA is not set
++# CONFIG_MFD_ARIZONA_I2C is not set
++# CONFIG_MFD_CROS_EC is not set
++# CONFIG_MFD_TPS65912 is not set
++# CONFIG_MFD_SYSCON is not set
++# CONFIG_MFD_DA9063 is not set
++# CONFIG_MFD_LP3943 is not set
++
++#
++# File systems
++#
++CONFIG_MISC_FILESYSTEMS=y
++
++# ext4 is used for ext2 and ext3 filesystems
++CONFIG_JBD2=y
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=m
++# CONFIG_REISERFS_CHECK is not set
++CONFIG_REISERFS_PROC_INFO=y
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++CONFIG_JFS_FS=m
++# CONFIG_JFS_DEBUG is not set
++# CONFIG_JFS_STATISTICS is not set
++CONFIG_JFS_POSIX_ACL=y
++CONFIG_JFS_SECURITY=y
++CONFIG_XFS_FS=m
++# CONFIG_XFS_DEBUG is not set
++# CONFIG_XFS_RT is not set
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_MINIX_FS=m
++CONFIG_ROMFS_FS=m
++# CONFIG_QFMT_V1 is not set
++CONFIG_QFMT_V2=y
++CONFIG_QUOTACTL=y
++CONFIG_DNOTIFY=y
++# Autofsv3 is obsolete.
++# systemd is dependant upon AUTOFS, so build it in.
++# CONFIG_EXOFS_FS is not set
++# CONFIG_EXOFS_DEBUG is not set
++CONFIG_NILFS2_FS=m
++# CONFIG_LOGFS is not set
++CONFIG_CEPH_FS=m
++CONFIG_CEPH_FSCACHE=y
++CONFIG_BLK_DEV_RBD=m
++CONFIG_CEPH_LIB=m
++CONFIG_CEPH_FS_POSIX_ACL=y
++# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set
++
++CONFIG_FSCACHE=m
++CONFIG_FSCACHE_STATS=y
++# CONFIG_FSCACHE_HISTOGRAM is not set
++# CONFIG_FSCACHE_DEBUG is not set
++CONFIG_FSCACHE_OBJECT_LIST=y
++
++CONFIG_CACHEFILES=m
++# CONFIG_CACHEFILES_DEBUG is not set
++# CONFIG_CACHEFILES_HISTOGRAM is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_VMCORE=y
++CONFIG_TMPFS_POSIX_ACL=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++# CONFIG_DEBUG_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++CONFIG_AFFS_FS=m
++CONFIG_ECRYPT_FS=m
++# CONFIG_ECRYPT_FS_MESSAGING is not set
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++# CONFIG_HFSPLUS_FS_POSIX_ACL is not set
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++
++CONFIG_CRAMFS=m
++CONFIG_SQUASHFS=m
++CONFIG_SQUASHFS_XATTR=y
++CONFIG_SQUASHFS_LZO=y
++CONFIG_SQUASHFS_XZ=y
++CONFIG_SQUASHFS_ZLIB=y
++# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
++# CONFIG_SQUASHFS_EMBEDDED is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_QNX6FS_FS is not set
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++# CONFIG_UFS_DEBUG is not set
++CONFIG_9P_FS=m
++CONFIG_9P_FSCACHE=y
++CONFIG_9P_FS_POSIX_ACL=y
++CONFIG_9P_FS_SECURITY=y
++# CONFIG_OMFS_FS is not set
++CONFIG_CUSE=m
++# CONFIG_F2FS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NETWORK_FILESYSTEMS=y
++# CONFIG_NFS_V2 is not set
++CONFIG_NFS_V3=y
++CONFIG_NFS_SWAP=y
++CONFIG_NFS_V4_1=y
++CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
++# CONFIG_NFS_V4_1_MIGRATION is not set
++CONFIG_NFS_V4_2=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V3_ACL=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_V4_SECURITY_LABEL=y
++CONFIG_NFS_FSCACHE=y
++# CONFIG_NFS_USE_LEGACY_DNS is not set
++CONFIG_PNFS_OBJLAYOUT=m
++CONFIG_PNFS_BLOCK=m
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=y
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_SUNRPC_XPRT_RDMA=m
++CONFIG_SUNRPC_DEBUG=y
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_CIFS=m
++CONFIG_CIFS_STATS=y
++# CONFIG_CIFS_STATS2 is not set
++CONFIG_CIFS_SMB2=y
++CONFIG_CIFS_UPCALL=y
++CONFIG_CIFS_XATTR=y
++CONFIG_CIFS_POSIX=y
++CONFIG_CIFS_FSCACHE=y
++CONFIG_CIFS_ACL=y
++CONFIG_CIFS_WEAK_PW_HASH=y
++CONFIG_CIFS_DEBUG=y
++# CONFIG_CIFS_DEBUG2 is not set
++CONFIG_CIFS_DFS_UPCALL=y
++CONFIG_CIFS_NFSD_EXPORT=y
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++CONFIG_NCPFS_SMALLDOS=y
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++CONFIG_CODA_FS=m
++# CONFIG_AFS_FS is not set
++# CONFIG_AF_RXRPC is not set
++
++CONFIG_OCFS2_FS=m
++# CONFIG_OCFS2_DEBUG_FS is not set
++# CONFIG_OCFS2_DEBUG_MASKLOG is not set
++CONFIG_OCFS2_FS_O2CB=m
++CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
++# CONFIG_OCFS2_FS_STATS is not set
++
++CONFIG_BTRFS_FS=m
++CONFIG_BTRFS_FS_POSIX_ACL=y
++# Maybe see if we want this on for debug kernels?
++# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
++# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
++# CONFIG_BTRFS_DEBUG is not set
++# CONFIG_BTRFS_ASSERT is not set
++
++CONFIG_CONFIGFS_FS=y
++
++CONFIG_DLM=m
++CONFIG_DLM_DEBUG=y
++CONFIG_GFS2_FS=m
++CONFIG_GFS2_FS_LOCKING_DLM=y
++
++
++CONFIG_UBIFS_FS_XATTR=y
++# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
++# CONFIG_UBIFS_FS_DEBUG is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++CONFIG_AIX_PARTITION=y
++CONFIG_AMIGA_PARTITION=y
++# CONFIG_ATARI_PARTITION is not set
++CONFIG_BSD_DISKLABEL=y
++CONFIG_EFI_PARTITION=y
++CONFIG_KARMA_PARTITION=y
++CONFIG_LDM_PARTITION=y
++# CONFIG_LDM_DEBUG is not set
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_MINIX_SUBPARTITION=y
++CONFIG_OSF_PARTITION=y
++CONFIG_SGI_PARTITION=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_SUN_PARTITION=y
++# CONFIG_SYSV68_PARTITION is not set
++CONFIG_UNIXWARE_DISKLABEL=y
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_CMDLINE_PARTITION is not set
++
++CONFIG_NLS=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_MAC_ROMAN=m
++CONFIG_NLS_MAC_CELTIC=m
++CONFIG_NLS_MAC_CENTEURO=m
++CONFIG_NLS_MAC_CROATIAN=m
++CONFIG_NLS_MAC_CYRILLIC=m
++CONFIG_NLS_MAC_GAELIC=m
++CONFIG_NLS_MAC_GREEK=m
++CONFIG_NLS_MAC_ICELAND=m
++CONFIG_NLS_MAC_INUIT=m
++CONFIG_NLS_MAC_ROMANIAN=m
++CONFIG_NLS_MAC_TURKISH=m
++
++#
++# Profiling support
++#
++CONFIG_PROFILING=y
++CONFIG_OPROFILE=m
++CONFIG_OPROFILE_EVENT_MULTIPLEX=y
++
++#
++# Kernel hacking
++#
++CONFIG_DEBUG_KERNEL=y
++CONFIG_FRAME_WARN=1024
++CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x0
++# CONFIG_DEBUG_INFO is not set
++CONFIG_FRAME_POINTER=y
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_HEADERS_CHECK=y
++# CONFIG_LKDTM is not set
++# CONFIG_NOTIFIER_ERROR_INJECTION is not set
++# CONFIG_READABLE_ASM is not set
++
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_LOCKDEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++
++# DEBUG options that don't get enabled/disabled with 'make debug/release'
++
++# This generates a huge amount of dmesg spew
++# CONFIG_DEBUG_KOBJECT is not set
++#
++# This breaks booting until the module patches are in-tree
++# CONFIG_DEBUG_KOBJECT_RELEASE is not set
++#
++#
++# These debug options are deliberatly left on (even in 'make release' kernels).
++# They aren't that much of a performance impact, and the value
++# from getting useful bug-reports makes it worth leaving them on.
++# CONFIG_DEBUG_HIGHMEM is not set
++# CONFIG_DEBUG_SHIRQ is not set
++CONFIG_BOOT_PRINTK_DELAY=y
++CONFIG_DEBUG_DEVRES=y
++CONFIG_DEBUG_RODATA_TEST=y
++CONFIG_DEBUG_NX_TEST=m
++CONFIG_DEBUG_SET_MODULE_RONX=y
++CONFIG_DEBUG_BOOT_PARAMS=y
++# CONFIG_DEBUG_VM is not set
++# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
++CONFIG_LOCKUP_DETECTOR=y
++# CONFIG_DEBUG_INFO_REDUCED is not set
++# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
++# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
++# CONFIG_PANIC_ON_OOPS is not set
++CONFIG_PANIC_TIMEOUT=0
++CONFIG_ATOMIC64_SELFTEST=y
++CONFIG_MEMORY_FAILURE=y
++CONFIG_HWPOISON_INJECT=m
++CONFIG_CROSS_MEMORY_ATTACH=y
++# CONFIG_DEBUG_SECTION_MISMATCH is not set
++# CONFIG_BACKTRACE_SELF_TEST is not set
++CONFIG_RESOURCE_COUNTERS=y
++# CONFIG_DEBUG_VIRTUAL is not set
++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
++CONFIG_EARLY_PRINTK_DBGP=y
++# CONFIG_PAGE_POISONING is not set
++# CONFIG_CRASH_DUMP is not set
++# CONFIG_CRASH is not set
++# CONFIG_GCOV_KERNEL is not set
++# CONFIG_RAMOOPS is not set
++
++
++#
++# Security options
++#
++CONFIG_SECURITY=y
++# CONFIG_SECURITY_DMESG_RESTRICT is not set
++CONFIG_SECURITY_NETWORK=y
++CONFIG_SECURITY_NETWORK_XFRM=y
++# CONFIG_SECURITY_PATH is not set
++CONFIG_SECURITY_SELINUX=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM=y
++CONFIG_SECURITY_SELINUX_DISABLE=y
++CONFIG_SECURITY_SELINUX_DEVELOP=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
++CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
++CONFIG_SECURITY_SELINUX_AVC_STATS=y
++# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
++# CONFIG_SECURITY_SMACK is not set
++# CONFIG_SECURITY_TOMOYO is not set
++# CONFIG_SECURITY_APPARMOR is not set
++# CONFIG_SECURITY_YAMA is not set
++CONFIG_AUDIT=y
++CONFIG_AUDITSYSCALL=y
++# http://lists.fedoraproject.org/pipermail/kernel/2013-February/004125.html
++CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
++
++CONFIG_SECCOMP=y
++
++# CONFIG_SSBI is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_FIPS=y
++CONFIG_CRYPTO_USER_API_HASH=y
++CONFIG_CRYPTO_USER_API_SKCIPHER=y
++CONFIG_CRYPTO_MANAGER=y
++# Note, CONFIG_CRYPTO_MANAGER_DISABLE_TESTS needs to be unset, or FIPS will be disabled.
++# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
++CONFIG_CRYPTO_HW=y
++CONFIG_CRYPTO_BLKCIPHER=y
++# CONFIG_CRYPTO_CRYPTD is not set
++CONFIG_CRYPTO_AES=y
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_ANUBIS=m
++CONFIG_CRYPTO_AUTHENC=m
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_CRC32C=y
++CONFIG_CRYPTO_CRC32=m
++CONFIG_CRYPTO_CTR=y
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_FCRYPT=m
++CONFIG_CRYPTO_GF128MUL=m
++CONFIG_CRYPTO_CMAC=m
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_KHAZAD=m
++CONFIG_CRYPTO_LZO=m
++CONFIG_CRYPTO_LZ4=m
++CONFIG_CRYPTO_LZ4HC=m
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_PCBC=m
++CONFIG_CRYPTO_SALSA20=m
++CONFIG_CRYPTO_SALSA20_586=m
++CONFIG_CRYPTO_SEED=m
++CONFIG_CRYPTO_SEQIV=m
++CONFIG_CRYPTO_SERPENT=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_XCBC=m
++CONFIG_CRYPTO_VMAC=m
++CONFIG_CRYPTO_CRC32C_INTEL=m
++CONFIG_CRYPTO_GHASH=m
++CONFIG_CRYPTO_DEV_HIFN_795X=m
++CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y
++CONFIG_CRYPTO_PCRYPT=m
++
++
++
++# Random number generation
++
++#
++# Library routines
++#
++CONFIG_CRC16=y
++CONFIG_CRC32=m
++# CONFIG_CRC32_SELFTEST is not set
++CONFIG_CRC_ITU_T=m
++CONFIG_CRC8=m
++# CONFIG_RANDOM32_SELFTEST is not set
++CONFIG_CORDIC=m
++# CONFIG_DDR is not set
++
++CONFIG_CRYPTO_ZLIB=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=m
++
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_KEYS=y
++CONFIG_PERSISTENT_KEYRINGS=y
++CONFIG_BIG_KEYS=y
++CONFIG_TRUSTED_KEYS=m
++CONFIG_ENCRYPTED_KEYS=m
++CONFIG_KEYS_DEBUG_PROC_KEYS=y
++CONFIG_CDROM_PKTCDVD=m
++CONFIG_CDROM_PKTCDVD_BUFFERS=8
++# CONFIG_CDROM_PKTCDVD_WCACHE is not set
++
++CONFIG_ATA_OVER_ETH=m
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=m
++# CONFIG_BACKLIGHT_GENERIC is not set
++CONFIG_BACKLIGHT_PROGEAR=m
++
++CONFIG_LCD_CLASS_DEVICE=m
++CONFIG_LCD_PLATFORM=m
++
++CONFIG_FAIR_GROUP_SCHED=y
++CONFIG_CFS_BANDWIDTH=y
++CONFIG_SCHED_OMIT_FRAME_POINTER=y
++CONFIG_RT_GROUP_SCHED=y
++CONFIG_SCHED_AUTOGROUP=y
++
++CONFIG_CPUSETS=y
++CONFIG_PROC_PID_CPUSET=y
++
++# CONFIG_CGROUP_DEBUG is not set
++CONFIG_CGROUP_CPUACCT=y
++CONFIG_CGROUP_DEVICE=y
++CONFIG_CGROUP_FREEZER=y
++CONFIG_CGROUP_SCHED=y
++CONFIG_MEMCG=y
++CONFIG_MEMCG_SWAP=y
++CONFIG_MEMCG_SWAP_ENABLED=y
++CONFIG_MEMCG_KMEM=y
++# CONFIG_CGROUP_HUGETLB is not set
++CONFIG_CGROUP_PERF=y
++CONFIG_CGROUP_NET_PRIO=m
++# CONFIG_CGROUP_NET_CLASSID is not set
++CONFIG_BLK_CGROUP=y
++
++# CONFIG_SYSFS_DEPRECATED is not set
++# CONFIG_SYSFS_DEPRECATED_V2 is not set
++
++CONFIG_PRINTK_TIME=y
++
++CONFIG_ENABLE_MUST_CHECK=y
++# CONFIG_ENABLE_WARN_DEPRECATED is not set
++
++CONFIG_KEXEC=y
++
++CONFIG_HWMON=y
++# CONFIG_HWMON_DEBUG_CHIP is not set
++CONFIG_THERMAL_HWMON=y
++# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
++# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
++CONFIG_THERMAL_GOV_FAIR_SHARE=y
++# CONFIG_THERMAL_GOV_USER_SPACE is not set
++CONFIG_THERMAL_GOV_STEP_WISE=y
++# CONFIG_THERMAL_EMULATION is not set
++
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++
++#
++# Bus devices
++#
++# CONFIG_OMAP_OCP2SCP is not set
++CONFIG_PROC_EVENTS=y
++
++CONFIG_IBMASR=m
++
++CONFIG_PM=y
++CONFIG_PM_STD_PARTITION=""
++# CONFIG_DPM_WATCHDOG is not set # revisit this in debug
++CONFIG_PM_TRACE=y
++CONFIG_PM_TRACE_RTC=y
++# CONFIG_PM_OPP is not set
++# CONFIG_PM_AUTOSLEEP is not set
++# CONFIG_PM_WAKELOCKS is not set
++CONFIG_HIBERNATION=y
++# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
++CONFIG_SUSPEND=y
++
++CONFIG_CPU_FREQ_TABLE=y
++CONFIG_CPU_FREQ_STAT=m
++CONFIG_CPU_FREQ_STAT_DETAILS=y
++
++
++CONFIG_NET_VENDOR_SMC=y
++# CONFIG_IBMTR is not set
++# CONFIG_SKISA is not set
++# CONFIG_PROTEON is not set
++# CONFIG_SMCTR is not set
++
++# CONFIG_MOUSE_ATIXL is not set
++
++# CONFIG_MEDIA_PARPORT_SUPPORT is not set
++
++CONFIG_RADIO_TEA5764=m
++CONFIG_RADIO_SAA7706H=m
++CONFIG_RADIO_CADET=m
++CONFIG_RADIO_RTRACK=m
++CONFIG_RADIO_RTRACK2=m
++CONFIG_RADIO_AZTECH=m
++CONFIG_RADIO_GEMTEK=m
++CONFIG_RADIO_SF16FMI=m
++CONFIG_RADIO_SF16FMR2=m
++CONFIG_RADIO_TERRATEC=m
++CONFIG_RADIO_TRUST=m
++CONFIG_RADIO_TYPHOON=m
++CONFIG_RADIO_ZOLTRIX=m
++
++CONFIG_SND_DARLA20=m
++CONFIG_SND_GINA20=m
++CONFIG_SND_LAYLA20=m
++CONFIG_SND_DARLA24=m
++CONFIG_SND_GINA24=m
++CONFIG_SND_LAYLA24=m
++CONFIG_SND_MONA=m
++CONFIG_SND_MIA=m
++CONFIG_SND_ECHO3G=m
++CONFIG_SND_INDIGO=m
++CONFIG_SND_INDIGOIO=m
++CONFIG_SND_INDIGODJ=m
++CONFIG_SND_INDIGOIOX=m
++CONFIG_SND_INDIGODJX=m
++
++CONFIG_BALLOON_COMPACTION=y
++CONFIG_COMPACTION=y
++CONFIG_MIGRATION=y
++CONFIG_BOUNCE=y
++# CONFIG_LEDS_AMS_DELTA is not set
++# CONFIG_LEDS_LOCOMO is not set
++# CONFIG_LEDS_NET48XX is not set
++# CONFIG_LEDS_NET5501 is not set
++# CONFIG_LEDS_PCA9532 is not set
++# CONFIG_LEDS_PCA955X is not set
++# CONFIG_LEDS_BD2802 is not set
++# CONFIG_LEDS_S3C24XX is not set
++# CONFIG_LEDS_PCA9633 is not set
++CONFIG_LEDS_DELL_NETBOOKS=m
++# CONFIG_LEDS_TCA6507 is not set
++# CONFIG_LEDS_LM355x is not set
++# CONFIG_LEDS_OT200 is not set
++# CONFIG_LEDS_PWM is not set
++# CONFIG_LEDS_LP8501 is not set
++# CONFIG_LEDS_PCA963X is not set
++# CONFIG_LEDS_PCA9685 is not set
++CONFIG_LEDS_TRIGGER_TIMER=m
++CONFIG_LEDS_TRIGGER_ONESHOT=m
++CONFIG_LEDS_TRIGGER_IDE_DISK=y
++CONFIG_LEDS_TRIGGER_HEARTBEAT=m
++CONFIG_LEDS_TRIGGER_BACKLIGHT=m
++# CONFIG_LEDS_TRIGGER_CPU is not set
++CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
++CONFIG_LEDS_TRIGGER_TRANSIENT=m
++CONFIG_LEDS_TRIGGER_CAMERA=m
++CONFIG_LEDS_ALIX2=m
++CONFIG_LEDS_CLEVO_MAIL=m
++CONFIG_LEDS_INTEL_SS4200=m
++CONFIG_LEDS_LM3530=m
++# CONFIG_LEDS_LM3642 is not set
++CONFIG_LEDS_LM3556=m
++CONFIG_LEDS_BLINKM=m
++CONFIG_LEDS_LP3944=m
++CONFIG_LEDS_LP5521=m
++CONFIG_LEDS_LP5523=m
++CONFIG_LEDS_LP5562=m
++CONFIG_LEDS_LT3593=m
++CONFIG_LEDS_REGULATOR=m
++CONFIG_LEDS_WM8350=m
++CONFIG_LEDS_WM831X_STATUS=m
++
++CONFIG_DMA_ENGINE=y
++CONFIG_DW_DMAC_CORE=m
++CONFIG_DW_DMAC=m
++CONFIG_DW_DMAC_PCI=m
++# CONFIG_DW_DMAC_BIG_ENDIAN_IO is not set
++# CONFIG_TIMB_DMA is not set
++# CONFIG_DMATEST is not set
++CONFIG_ASYNC_TX_DMA=y
++
++CONFIG_UNUSED_SYMBOLS=y
++
++CONFIG_UPROBE_EVENT=y
++
++CONFIG_DYNAMIC_FTRACE=y
++# CONFIG_IRQSOFF_TRACER is not set
++CONFIG_SCHED_TRACER=y
++CONFIG_CONTEXT_SWITCH_TRACER=y
++CONFIG_TRACER_SNAPSHOT=y
++# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set
++CONFIG_FTRACE_SYSCALLS=y
++CONFIG_FTRACE_MCOUNT_RECORD=y
++# CONFIG_FTRACE_STARTUP_TEST is not set
++# CONFIG_TRACE_BRANCH_PROFILING is not set
++CONFIG_FUNCTION_PROFILER=y
++CONFIG_RING_BUFFER_BENCHMARK=m
++# CONFIG_RING_BUFFER_STARTUP_TEST is not set
++# CONFIG_RBTREE_TEST is not set
++# CONFIG_INTERVAL_TREE_TEST is not set
++CONFIG_FUNCTION_TRACER=y
++CONFIG_STACK_TRACER=y
++# CONFIG_FUNCTION_GRAPH_TRACER is not set
++
++CONFIG_KPROBES=y
++CONFIG_KPROBE_EVENT=y
++# CONFIG_KPROBES_SANITY_TEST is not set
++# CONFIG_JUMP_LABEL is not set
++CONFIG_OPTPROBES=y
++
++CONFIG_HZ_1000=y
++
++CONFIG_TIMER_STATS=y
++CONFIG_PERF_COUNTERS=y
++
++# Auxillary displays
++CONFIG_KS0108=m
++CONFIG_KS0108_PORT=0x378
++CONFIG_KS0108_DELAY=2
++CONFIG_CFAG12864B=y
++CONFIG_CFAG12864B_RATE=20
++
++# CONFIG_PHANTOM is not set
++
++# CONFIG_POWER_SUPPLY_DEBUG is not set
++
++# CONFIG_TEST_POWER is not set
++CONFIG_APM_POWER=m
++# CONFIG_GENERIC_ADC_BATTERY is not set
++# CONFIG_WM831X_POWER is not set
++
++# CONFIG_BATTERY_DS2760 is not set
++# CONFIG_BATTERY_DS2781 is not set
++# CONFIG_BATTERY_DS2782 is not set
++# CONFIG_BATTERY_SBS is not set
++# CONFIG_BATTERY_BQ20Z75 is not set
++# CONFIG_BATTERY_DS2780 is not set
++# CONFIG_BATTERY_BQ27x00 is not set
++# CONFIG_BATTERY_MAX17040 is not set
++# CONFIG_BATTERY_MAX17042 is not set
++# CONFIG_BATTERY_GOLDFISH is not set
++
++# CONFIG_CHARGER_ISP1704 is not set
++# CONFIG_CHARGER_MAX8903 is not set
++# CONFIG_CHARGER_LP8727 is not set
++# CONFIG_CHARGER_GPIO is not set
++# CONFIG_CHARGER_PCF50633 is not set
++# CONFIG_CHARGER_BQ2415X is not set
++# CONFIG_CHARGER_BQ24190 is not set
++# CONFIG_CHARGER_BQ24735 is not set
++CONFIG_POWER_RESET=y
++
++# CONFIG_PDA_POWER is not set
++
++CONFIG_AUXDISPLAY=y
++
++CONFIG_UIO=m
++CONFIG_UIO_CIF=m
++# CONFIG_UIO_PDRV is not set
++# CONFIG_UIO_PDRV_GENIRQ is not set
++# CONFIG_UIO_DMEM_GENIRQ is not set
++CONFIG_UIO_AEC=m
++CONFIG_UIO_SERCOS3=m
++CONFIG_UIO_PCI_GENERIC=m
++# CONFIG_UIO_NETX is not set
++# CONFIG_UIO_MF624 is not set
++
++CONFIG_VFIO=m
++CONFIG_VFIO_IOMMU_TYPE1=m
++CONFIG_VFIO_PCI=m
++
++
++# LIRC
++CONFIG_LIRC_STAGING=y
++CONFIG_LIRC_BT829=m
++CONFIG_LIRC_IGORPLUGUSB=m
++CONFIG_LIRC_IMON=m
++CONFIG_LIRC_ZILOG=m
++CONFIG_LIRC_PARALLEL=m
++CONFIG_LIRC_SERIAL=m
++CONFIG_LIRC_SERIAL_TRANSMITTER=y
++CONFIG_LIRC_SASEM=m
++CONFIG_LIRC_SIR=m
++CONFIG_LIRC_TTUSBIR=m
++
++# CONFIG_SAMPLES is not set
++
++
++CONFIG_NOZOMI=m
++# CONFIG_TPS65010 is not set
++
++CONFIG_INPUT_APANEL=m
++CONFIG_INPUT_GP2A=m
++# CONFIG_INPUT_GPIO_TILT_POLLED is not set
++# CONFIG_INPUT_GPIO_BEEPER is not set
++
++# CONFIG_INTEL_MENLOW is not set
++CONFIG_ENCLOSURE_SERVICES=m
++CONFIG_IPWIRELESS=m
++
++# CONFIG_BLK_DEV_XIP is not set
++CONFIG_MEMSTICK=m
++# CONFIG_MEMSTICK_DEBUG is not set
++# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
++CONFIG_MSPRO_BLOCK=m
++# CONFIG_MS_BLOCK is not set
++CONFIG_MEMSTICK_TIFM_MS=m
++CONFIG_MEMSTICK_JMICRON_38X=m
++CONFIG_MEMSTICK_R592=m
++CONFIG_MEMSTICK_REALTEK_PCI=m
++
++CONFIG_ACCESSIBILITY=y
++CONFIG_A11Y_BRAILLE_CONSOLE=y
++
++# CONFIG_HTC_PASIC3 is not set
++
++# MT9V022_PCA9536_SWITCH is not set
++
++CONFIG_OPTIMIZE_INLINING=y
++
++# FIXME: This should be x86/ia64 only
++# CONFIG_HP_ILO is not set
++
++CONFIG_GPIOLIB=y
++# CONFIG_PINCTRL is not set
++# CONFIG_DEBUG_PINCTRL is not set
++# CONFIG_PINMUX is not set
++# CONFIG_PINCONF is not set
++
++CONFIG_NET_DSA=m
++CONFIG_NET_DSA_MV88E6060=m
++CONFIG_NET_DSA_MV88E6131=m
++CONFIG_NET_DSA_MV88E6123_61_65=m
++
++# Used by Maemo, we don't care.
++# CONFIG_PHONET is not set
++
++# CONFIG_ICS932S401 is not set
++# CONFIG_ATMEL_SSC is not set
++
++# CONFIG_C2PORT is not set
++
++# CONFIG_REGULATOR_DEBUG is not set
++
++CONFIG_WM8350_POWER=m
++
++# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
++
++CONFIG_USB_WUSB=m
++CONFIG_USB_WUSB_CBAF=m
++# CONFIG_USB_WUSB_CBAF_DEBUG is not set
++CONFIG_USB_WHCI_HCD=m
++CONFIG_USB_HWA_HCD=m
++# CONFIG_USB_HCD_BCMA is not set
++# CONFIG_USB_HCD_SSB is not set
++
++CONFIG_UWB=m
++CONFIG_UWB_HWA=m
++CONFIG_UWB_WHCI=m
++CONFIG_UWB_I1480U=m
++
++# CONFIG_ANDROID is not set
++CONFIG_STAGING_MEDIA=y
++# CONFIG_DVB_AS102 is not set
++# CONFIG_ET131X is not set
++# CONFIG_SLICOSS is not set
++# CONFIG_WLAGS49_H2 is not set
++# CONFIG_WLAGS49_H25 is not set
++# CONFIG_VIDEO_DT3155 is not set
++# CONFIG_TI_ST is not set
++# CONFIG_FB_XGI is not set
++# CONFIG_VIDEO_GO7007 is not set
++# CONFIG_I2C_BCM2048 is not set
++# CONFIG_VIDEO_TCM825X is not set
++# CONFIG_VIDEO_OMAP4 is not set
++# CONFIG_USB_MSI3101 is not set
++# CONFIG_DT3155 is not set
++# CONFIG_W35UND is not set
++# CONFIG_PRISM2_USB is not set
++# CONFIG_ECHO is not set
++CONFIG_USB_ATMEL=m
++# CONFIG_COMEDI is not set
++# CONFIG_ASUS_OLED is not set
++# CONFIG_PANEL is not set
++# CONFIG_TRANZPORT is not set
++# CONFIG_POHMELFS is not set
++# CONFIG_IDE_PHISON is not set
++# CONFIG_LINE6_USB is not set
++# CONFIG_VME_BUS is not set
++# CONFIG_RAR_REGISTER is not set
++# CONFIG_VT6656 is not set
++# CONFIG_USB_SERIAL_QUATECH_USB2 is not set
++# Larry Finger maintains these (rhbz 913753)
++CONFIG_RTLLIB=m
++CONFIG_RTLLIB_CRYPTO_CCMP=m
++CONFIG_RTLLIB_CRYPTO_TKIP=m
++CONFIG_RTLLIB_CRYPTO_WEP=m
++CONFIG_RTL8192E=m
++# CONFIG_INPUT_GPIO is not set
++# CONFIG_VIDEO_CX25821 is not set
++# CONFIG_R8187SE is not set
++# CONFIG_R8188EU is not set
++# CONFIG_R8821AE is not set
++# CONFIG_RTL8192U is not set
++# CONFIG_FB_SM7XX is not set
++# CONFIG_SPECTRA is not set
++# CONFIG_EASYCAP is not set
++# CONFIG_SOLO6X10 is not set
++# CONFIG_ACPI_QUICKSTART is not set
++# CONFIG_LTE_GDM724X is not set
++CONFIG_R8712U=m # Larry Finger maintains this (rhbz 699618)
++# CONFIG_R8712_AP is not set
++# CONFIG_ATH6K_LEGACY is not set
++# CONFIG_USB_ENESTORAGE is not set
++# CONFIG_BCM_WIMAX is not set
++# CONFIG_USB_BTMTK is not set
++# CONFIG_FT1000 is not set
++# CONFIG_SPEAKUP is not set
++# CONFIG_DX_SEP is not set
++# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
++# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
++# CONFIG_RTS_PSTOR is not set
++CONFIG_ALTERA_STAPL=m
++# CONFIG_DVB_CXD2099 is not set
++# CONFIG_USBIP_CORE is not set
++# CONFIG_INTEL_MEI is not set
++# CONFIG_ZCACHE is not set
++# CONFIG_RTS5139 is not set
++# CONFIG_NVEC_LEDS is not set
++# CONFIG_VT6655 is not set
++# CONFIG_RAMSTER is not set
++# CONFIG_USB_WPAN_HCD is not set
++# CONFIG_WIMAX_GDM72XX is not set
++# CONFIG_IPACK_BUS is not set
++# CONFIG_CSR_WIFI is not set
++# CONFIG_ZCACHE2 is not set
++# CONFIG_NET_VENDOR_SILICOM is not set
++# CONFIG_SBYPASS is not set
++# CONFIG_BPCTL is not set
++# CONFIG_CED1401 is not set
++# CONFIG_DGRP is not set
++# CONFIG_SB105X is not set
++# CONFIG_LUSTRE_FS is not set
++# CONFIG_XILLYBUS is not set
++# CONFIG_DGAP is not set
++# CONFIG_DGNC is not set
++# CONFIG_RTS5208 is not set
++# END OF STAGING
++
++#
++# Remoteproc drivers (EXPERIMENTAL)
++#
++# CONFIG_STE_MODEM_RPROC is not set
++
++CONFIG_LIBFC=m
++CONFIG_LIBFCOE=m
++CONFIG_FCOE=m
++CONFIG_FCOE_FNIC=m
++
++
++# CONFIG_IMA is not set
++CONFIG_IMA_MEASURE_PCR_IDX=10
++CONFIG_IMA_AUDIT=y
++CONFIG_IMA_LSM_RULES=y
++
++# CONFIG_EVM is not set
++# CONFIG_PWM_PCA9685 is not set
++
++CONFIG_LSM_MMAP_MIN_ADDR=65536
++
++CONFIG_STRIP_ASM_SYMS=y
++
++# CONFIG_RCU_FANOUT_EXACT is not set
++# FIXME: Revisit FAST_NO_HZ after it's fixed
++# CONFIG_RCU_FAST_NO_HZ is not set
++# CONFIG_RCU_NOCB_CPU is not set
++CONFIG_RCU_CPU_STALL_TIMEOUT=60
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_RCU_TRACE is not set
++# CONFIG_RCU_CPU_STALL_INFO is not set
++# CONFIG_RCU_USER_QS is not set
++
++CONFIG_KSM=y
++CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
++
++CONFIG_FSNOTIFY=y
++CONFIG_FANOTIFY=y
++CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
++
++CONFIG_IEEE802154=m
++CONFIG_IEEE802154_6LOWPAN=m
++CONFIG_IEEE802154_DRIVERS=m
++CONFIG_IEEE802154_FAKEHARD=m
++CONFIG_IEEE802154_FAKELB=m
++
++CONFIG_MAC802154=m
++CONFIG_NET_MPLS_GSO=m
++
++# CONFIG_HSR is not set
++
++# CONFIG_EXTCON is not set
++# CONFIG_EXTCON_ADC_JACK is not set
++# CONFIG_MEMORY is not set
++
++CONFIG_PPS=m
++# CONFIG_PPS_CLIENT_KTIMER is not set
++CONFIG_PPS_CLIENT_LDISC=m
++# CONFIG_PPS_DEBUG is not set
++CONFIG_PPS_CLIENT_PARPORT=m
++CONFIG_PPS_GENERATOR_PARPORT=m
++CONFIG_PPS_CLIENT_GPIO=m
++CONFIG_NTP_PPS=y
++
++CONFIG_PTP_1588_CLOCK=m
++CONFIG_PTP_1588_CLOCK_PCH=m
++
++CONFIG_CLEANCACHE=y
++CONFIG_FRONTSWAP=y
++CONFIG_ZSWAP=y
++CONFIG_ZSMALLOC=y
++# CONFIG_PGTABLE_MAPPING is not set
++
++# CONFIG_MDIO_GPIO is not set
++# CONFIG_KEYBOARD_GPIO_POLLED is not set
++# CONFIG_MOUSE_GPIO is not set
++# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
++# CONFIG_I2C_DESIGNWARE_PCI is not set
++# CONFIG_I2C_GPIO is not set
++# CONFIG_DEBUG_GPIO is not set
++# CONFIG_GPIO_GENERIC_PLATFORM is not set
++# CONFIG_GPIO_CS5535 is not set
++# CONFIG_GPIO_IT8761E is not set
++# CONFIG SB105x is not set
++# CONFIG_GPIO_TS5500 is not set
++CONFIG_GPIO_VIPERBOARD=m
++# CONFIG_UCB1400_CORE is not set
++# CONFIG_TPS6105X is not set
++# CONFIG_RADIO_MIROPCM20 is not set
++# CONFIG_USB_GPIO_VBUS is not set
++# CONFIG_GPIO_SCH is not set
++# CONFIG_GPIO_LANGWELL is not set
++# CONFIG_GPIO_RDC321X is not set
++# CONFIG_GPIO_VX855 is not set
++# CONFIG_GPIO_PCH is not set
++# CONFIG_GPIO_ML_IOH is not set
++# CONFIG_GPIO_AMD8111 is not set
++# CONFIG_GPIO_BT8XX is not set
++# CONFIG_GPIO_GRGPIO is not set
++# CONFIG_GPIO_PL061 is not set
++# CONFIG_GPIO_BCM_KONA is not set
++# CONFIG_GPIO_SCH311X is not set
++CONFIG_GPIO_MAX730X=m
++CONFIG_GPIO_MAX7300=m
++CONFIG_GPIO_MAX732X=m
++CONFIG_GPIO_PCF857X=m
++CONFIG_GPIO_SX150X=y
++CONFIG_GPIO_ADP5588=m
++CONFIG_GPIO_ADNP=m
++CONFIG_GPIO_MAX7301=m
++CONFIG_GPIO_MCP23S08=m
++CONFIG_GPIO_MC33880=m
++CONFIG_GPIO_74X164=m
++
++# FIXME: Why?
++CONFIG_EVENT_POWER_TRACING_DEPRECATED=y
++
++CONFIG_TEST_KSTRTOX=y
++CONFIG_XZ_DEC=y
++CONFIG_XZ_DEC_X86=y
++CONFIG_XZ_DEC_POWERPC=y
++# CONFIG_XZ_DEC_IA64 is not set
++CONFIG_XZ_DEC_ARM=y
++# CONFIG_XZ_DEC_ARMTHUMB is not set
++# CONFIG_XZ_DEC_SPARC is not set
++# CONFIG_XZ_DEC_TEST is not set
++
++# CONFIG_POWER_AVS is not set
++
++CONFIG_TARGET_CORE=m
++CONFIG_ISCSI_TARGET=m
++CONFIG_LOOPBACK_TARGET=m
++CONFIG_SBP_TARGET=m
++CONFIG_TCM_IBLOCK=m
++CONFIG_TCM_FILEIO=m
++CONFIG_TCM_PSCSI=m
++CONFIG_TCM_FC=m
++
++CONFIG_HWSPINLOCK=m
++
++CONFIG_PSTORE=y
++CONFIG_PSTORE_RAM=m
++# CONFIG_PSTORE_CONSOLE is not set
++# CONFIG_PSTORE_FTRACE is not set
++
++# CONFIG_TEST_MODULE is not set
++# CONFIG_TEST_USER_COPY is not set
++
++# CONFIG_AVERAGE is not set
++# CONFIG_VMXNET3 is not set
++
++# CONFIG_SIGMA is not set
++
++CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
++
++CONFIG_BCMA=m
++CONFIG_BCMA_BLOCKIO=y
++CONFIG_BCMA_HOST_PCI_POSSIBLE=y
++CONFIG_BCMA_HOST_PCI=y
++# CONFIG_BCMA_HOST_SOC is not set
++CONFIG_BCMA_DRIVER_GMAC_CMN=y
++CONFIG_BCMA_DRIVER_GPIO=y
++# CONFIG_BCMA_DEBUG is not set
++
++# CONFIG_GOOGLE_FIRMWARE is not set
++# CONFIG_INTEL_MID_PTI is not set
++
++# CONFIG_MAILBOX is not set
++
++CONFIG_FMC=m
++CONFIG_FMC_FAKEDEV=m
++CONFIG_FMC_TRIVIAL=m
++CONFIG_FMC_WRITE_EEPROM=m
++CONFIG_FMC_CHARDEV=m
++
++# CONFIG_GENWQE is not set
++
++# CONFIG_POWERCAP is not set
++
++# CONFIG_HSI is not set
++
++
++# CONFIG_ARM_ARCH_TIMER_EVTSTREAM is not set
++
++# CONFIG_PM_DEVFREQ is not set
++# CONFIG_MODULE_SIG is not set
++# CONFIG_SYSTEM_TRUSTED_KEYRING is not set
++# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set
++# CONFIG_MODULE_VERIFY_ELF is not set
++# CONFIG_CRYPTO_KEY_TYPE is not set
++# CONFIG_PGP_LIBRARY is not set
++# CONFIG_PGP_PRELOAD is not set
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_PROC_DEVICETREE=y
++
+diff -Nur linux-3.14.40.orig/arch/arm/configs/imx_v7_defconfig linux-3.14.40/arch/arm/configs/imx_v7_defconfig
+--- linux-3.14.40.orig/arch/arm/configs/imx_v7_defconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/configs/imx_v7_defconfig 2015-05-01 14:57:57.591427001 -0500
+@@ -0,0 +1,343 @@
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_KERNEL_LZO=y
++CONFIG_SYSVIPC=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_LOG_BUF_SHIFT=18
++CONFIG_CGROUPS=y
++CONFIG_RELAY=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_EXPERT=y
++CONFIG_PERF_EVENTS=y
++# CONFIG_SLUB_DEBUG is not set
++# CONFIG_COMPAT_BRK is not set
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++# CONFIG_BLK_DEV_BSG is not set
++CONFIG_GPIO_PCA953X=y
++CONFIG_ARCH_MXC=y
++CONFIG_MXC_DEBUG_BOARD=y
++CONFIG_MACH_IMX51_DT=y
++CONFIG_MACH_EUKREA_CPUIMX51SD=y
++CONFIG_SOC_IMX53=y
++CONFIG_SOC_IMX6Q=y
++CONFIG_SOC_IMX6SL=y
++CONFIG_SOC_VF610=y
++# CONFIG_SWP_EMULATE is not set
++CONFIG_SMP=y
++CONFIG_VMSPLIT_2G=y
++CONFIG_PREEMPT=y
++CONFIG_AEABI=y
++# CONFIG_OABI_COMPAT is not set
++CONFIG_HIGHMEM=y
++CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++CONFIG_ARM_IMX6_CPUFREQ=y
++CONFIG_CPU_IDLE=y
++CONFIG_VFP=y
++CONFIG_NEON=y
++CONFIG_BINFMT_MISC=m
++CONFIG_PM_RUNTIME=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_TEST_SUSPEND=y
++CONFIG_NET=y
++CONFIG_PACKET=y
++CONFIG_UNIX=y
++CONFIG_INET=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_IPV6=y
++CONFIG_NETFILTER=y
++CONFIG_VLAN_8021Q=y
++# CONFIG_WIRELESS is not set
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++# CONFIG_STANDALONE is not set
++CONFIG_CMA=y
++CONFIG_CMA_SIZE_MBYTES=320
++CONFIG_IMX_WEIM=y
++CONFIG_CONNECTOR=y
++CONFIG_MTD=y
++CONFIG_MTD_CMDLINE_PARTS=y
++CONFIG_MTD_BLOCK=y
++CONFIG_MTD_CFI=y
++CONFIG_MTD_JEDECPROBE=y
++CONFIG_MTD_CFI_INTELEXT=y
++CONFIG_MTD_CFI_AMDSTD=y
++CONFIG_MTD_CFI_STAA=y
++CONFIG_MTD_PHYSMAP_OF=y
++CONFIG_MTD_DATAFLASH=y
++CONFIG_MTD_M25P80=y
++CONFIG_MTD_SST25L=y
++CONFIG_MTD_NAND=y
++CONFIG_MTD_NAND_GPMI_NAND=y
++CONFIG_MTD_NAND_MXC=y
++CONFIG_MTD_UBI=y
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=65536
++CONFIG_EEPROM_AT24=y
++CONFIG_EEPROM_AT25=y
++# CONFIG_SCSI_PROC_FS is not set
++CONFIG_BLK_DEV_SD=y
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++CONFIG_SCSI_SCAN_ASYNC=y
++# CONFIG_SCSI_LOWLEVEL is not set
++CONFIG_ATA=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_AHCI_IMX=y
++CONFIG_PATA_IMX=y
++CONFIG_NETDEVICES=y
++# CONFIG_NET_VENDOR_BROADCOM is not set
++CONFIG_CS89x0=y
++CONFIG_CS89x0_PLATFORM=y
++# CONFIG_NET_VENDOR_FARADAY is not set
++# CONFIG_NET_VENDOR_INTEL is not set
++# CONFIG_NET_VENDOR_MARVELL is not set
++# CONFIG_NET_VENDOR_MICREL is not set
++# CONFIG_NET_VENDOR_MICROCHIP is not set
++# CONFIG_NET_VENDOR_NATSEMI is not set
++# CONFIG_NET_VENDOR_SEEQ is not set
++CONFIG_SMC91X=y
++CONFIG_SMC911X=y
++CONFIG_SMSC911X=y
++# CONFIG_NET_VENDOR_STMICRO is not set
++# CONFIG_WLAN is not set
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_EVDEV=y
++CONFIG_INPUT_EVBUG=m
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_KEYBOARD_IMX=y
++CONFIG_MOUSE_PS2=m
++CONFIG_MOUSE_PS2_ELANTECH=y
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_EGALAX=y
++CONFIG_TOUCHSCREEN_EGALAX_SINGLE_TOUCH=y
++CONFIG_TOUCHSCREEN_MAX11801=y
++CONFIG_TOUCHSCREEN_MC13783=y
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_MMA8450=y
++CONFIG_INPUT_ISL29023=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_DEVKMEM is not set
++CONFIG_SERIAL_IMX=y
++CONFIG_SERIAL_IMX_CONSOLE=y
++CONFIG_SERIAL_FSL_LPUART=y
++CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
++CONFIG_FSL_OTP=y
++# CONFIG_I2C_COMPAT is not set
++CONFIG_I2C_CHARDEV=y
++# CONFIG_I2C_HELPER_AUTO is not set
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++CONFIG_I2C_IMX=y
++CONFIG_SPI=y
++CONFIG_SPI_IMX=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_POWER_SUPPLY=y
++CONFIG_SABRESD_MAX8903=y
++CONFIG_IMX6_USB_CHARGER=y
++CONFIG_SENSORS_MAG3110=y
++CONFIG_THERMAL=y
++CONFIG_CPU_THERMAL=y
++CONFIG_IMX_THERMAL=y
++CONFIG_DEVICE_THERMAL=y
++CONFIG_WATCHDOG=y
++CONFIG_IMX2_WDT=y
++CONFIG_MFD_DA9052_I2C=y
++CONFIG_MFD_MC13XXX_SPI=y
++CONFIG_MFD_MC13XXX_I2C=y
++CONFIG_MFD_SI476X_CORE=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_REGULATOR_DA9052=y
++CONFIG_REGULATOR_ANATOP=y
++CONFIG_REGULATOR_MC13783=y
++CONFIG_REGULATOR_MC13892=y
++CONFIG_REGULATOR_PFUZE100=y
++CONFIG_MEDIA_SUPPORT=y
++CONFIG_MEDIA_CAMERA_SUPPORT=y
++CONFIG_MEDIA_RADIO_SUPPORT=y
++CONFIG_VIDEO_V4L2_INT_DEVICE=y
++CONFIG_MEDIA_USB_SUPPORT=y
++CONFIG_USB_VIDEO_CLASS=m
++CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_MXC_OUTPUT=y
++CONFIG_VIDEO_MXC_CAPTURE=m
++CONFIG_VIDEO_MXC_CSI_CAMERA=m
++CONFIG_MXC_CAMERA_OV5640=m
++CONFIG_MXC_CAMERA_OV5642=m
++CONFIG_MXC_CAMERA_OV5640_MIPI=m
++CONFIG_MXC_TVIN_ADV7180=m
++CONFIG_MXC_IPU_DEVICE_QUEUE_SDC=m
++CONFIG_VIDEO_MXC_IPU_OUTPUT=y
++CONFIG_VIDEO_MXC_PXP_V4L2=y
++CONFIG_SOC_CAMERA=y
++CONFIG_VIDEO_MX3=y
++CONFIG_RADIO_SI476X=y
++CONFIG_SOC_CAMERA_OV2640=y
++CONFIG_DRM=y
++CONFIG_DRM_VIVANTE=y
++CONFIG_FB=y
++CONFIG_FB_MXS=y
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_LCD_CLASS_DEVICE=y
++CONFIG_LCD_L4F00242T03=y
++CONFIG_LCD_PLATFORM=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=y
++CONFIG_BACKLIGHT_PWM=y
++CONFIG_FB_MXC_SYNC_PANEL=y
++CONFIG_FB_MXC_LDB=y
++CONFIG_FB_MXC_MIPI_DSI=y
++CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL=y
++CONFIG_FB_MXC_HDMI=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++CONFIG_FONTS=y
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_LOGO=y
++CONFIG_SOUND=y
++CONFIG_SND=y
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_SOC=y
++CONFIG_SND_IMX_SOC=y
++CONFIG_SND_SOC_EUKREA_TLV320=y
++CONFIG_SND_SOC_IMX_CS42888=y
++CONFIG_SND_SOC_IMX_WM8962=y
++CONFIG_SND_SOC_IMX_SGTL5000=y
++CONFIG_SND_SOC_IMX_SPDIF=y
++CONFIG_SND_SOC_IMX_MC13783=y
++CONFIG_SND_SOC_IMX_HDMI=y
++CONFIG_SND_SOC_IMX_SI476X=y
++CONFIG_USB=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB_CHIPIDEA=y
++CONFIG_USB_CHIPIDEA_UDC=y
++CONFIG_USB_CHIPIDEA_HOST=y
++CONFIG_USB_PHY=y
++CONFIG_NOP_USB_XCEIV=y
++CONFIG_USB_MXS_PHY=y
++CONFIG_USB_GADGET=y
++CONFIG_USB_ZERO=m
++CONFIG_USB_ETH=m
++CONFIG_USB_MASS_STORAGE=m
++CONFIG_USB_G_SERIAL=m
++CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
++CONFIG_MMC_SDHCI=y
++CONFIG_MMC_SDHCI_PLTFM=y
++CONFIG_MMC_SDHCI_ESDHC_IMX=y
++CONFIG_MXC_IPU=y
++CONFIG_MXC_GPU_VIV=y
++CONFIG_MXC_ASRC=y
++CONFIG_MXC_MIPI_CSI2=y
++CONFIG_MXC_MLB150=m
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_GPIO=y
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_GPIO=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_INTF_DEV_UIE_EMUL=y
++CONFIG_RTC_DRV_MC13XXX=y
++CONFIG_RTC_DRV_MXC=y
++CONFIG_RTC_DRV_SNVS=y
++CONFIG_DMADEVICES=y
++CONFIG_MXC_PXP_V2=y
++CONFIG_IMX_SDMA=y
++CONFIG_MXS_DMA=y
++CONFIG_STAGING=y
++CONFIG_COMMON_CLK_DEBUG=y
++# CONFIG_IOMMU_SUPPORT is not set
++CONFIG_PWM=y
++CONFIG_PWM_IMX=y
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_EXT4_FS=y
++CONFIG_EXT4_FS_POSIX_ACL=y
++CONFIG_EXT4_FS_SECURITY=y
++CONFIG_QUOTA=y
++CONFIG_QUOTA_NETLINK_INTERFACE=y
++# CONFIG_PRINT_QUOTA_WARNING is not set
++CONFIG_AUTOFS4_FS=y
++CONFIG_FUSE_FS=y
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_UDF_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=y
++CONFIG_TMPFS=y
++CONFIG_JFFS2_FS=y
++CONFIG_UBIFS_FS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_NLS_DEFAULT="cp437"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_UTF8=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_SCHED_DEBUG is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_FTRACE is not set
++CONFIG_SECURITYFS=y
++CONFIG_CRYPTO_USER=y
++CONFIG_CRYPTO_TEST=m
++CONFIG_CRYPTO_CCM=y
++CONFIG_CRYPTO_GCM=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_CTS=y
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_LRW=y
++CONFIG_CRYPTO_XTS=y
++CONFIG_CRYPTO_MD4=y
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_MICHAEL_MIC=y
++CONFIG_CRYPTO_RMD128=y
++CONFIG_CRYPTO_RMD160=y
++CONFIG_CRYPTO_RMD256=y
++CONFIG_CRYPTO_RMD320=y
++CONFIG_CRYPTO_SHA1=y
++CONFIG_CRYPTO_SHA256=y
++CONFIG_CRYPTO_SHA512=y
++CONFIG_CRYPTO_TGR192=y
++CONFIG_CRYPTO_WP512=y
++CONFIG_CRYPTO_BLOWFISH=y
++CONFIG_CRYPTO_CAMELLIA=y
++CONFIG_CRYPTO_DES=y
++CONFIG_CRYPTO_TWOFISH=y
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++CONFIG_CRYPTO_DEV_FSL_CAAM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO=y
++CONFIG_CRC_CCITT=m
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC7=m
++CONFIG_LIBCRC32C=m
+diff -Nur linux-3.14.40.orig/arch/arm/configs/imx_v7_mfg_defconfig linux-3.14.40/arch/arm/configs/imx_v7_mfg_defconfig
+--- linux-3.14.40.orig/arch/arm/configs/imx_v7_mfg_defconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/configs/imx_v7_mfg_defconfig 2015-05-01 14:57:57.591427001 -0500
+@@ -0,0 +1,341 @@
++CONFIG_KERNEL_LZO=y
++CONFIG_SYSVIPC=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_LOG_BUF_SHIFT=18
++CONFIG_CGROUPS=y
++CONFIG_RELAY=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_EXPERT=y
++CONFIG_PERF_EVENTS=y
++# CONFIG_SLUB_DEBUG is not set
++# CONFIG_COMPAT_BRK is not set
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++# CONFIG_BLK_DEV_BSG is not set
++CONFIG_GPIO_PCA953X=y
++CONFIG_ARCH_MXC=y
++CONFIG_MXC_DEBUG_BOARD=y
++CONFIG_MACH_IMX51_DT=y
++CONFIG_MACH_EUKREA_CPUIMX51SD=y
++CONFIG_SOC_IMX53=y
++CONFIG_SOC_IMX6Q=y
++CONFIG_SOC_IMX6SL=y
++CONFIG_SOC_VF610=y
++# CONFIG_SWP_EMULATE is not set
++CONFIG_SMP=y
++CONFIG_VMSPLIT_2G=y
++CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_AEABI=y
++# CONFIG_OABI_COMPAT is not set
++CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
++CONFIG_CPU_FREQ_GOV_POWERSAVE=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
++CONFIG_ARM_IMX6_CPUFREQ=y
++CONFIG_CPU_IDLE=y
++CONFIG_VFP=y
++CONFIG_NEON=y
++CONFIG_BINFMT_MISC=m
++CONFIG_PM_RUNTIME=y
++CONFIG_PM_DEBUG=y
++CONFIG_PM_TEST_SUSPEND=y
++CONFIG_NET=y
++CONFIG_PACKET=y
++CONFIG_UNIX=y
++CONFIG_INET=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
++# CONFIG_INET_XFRM_MODE_BEET is not set
++# CONFIG_INET_LRO is not set
++CONFIG_IPV6=y
++CONFIG_NETFILTER=y
++CONFIG_VLAN_8021Q=y
++CONFIG_CFG80211=y
++CONFIG_CFG80211_WEXT=y
++CONFIG_MAC80211=y
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++# CONFIG_STANDALONE is not set
++CONFIG_CMA=y
++CONFIG_CMA_SIZE_MBYTES=320
++CONFIG_IMX_WEIM=y
++CONFIG_CONNECTOR=y
++CONFIG_MTD=y
++CONFIG_MTD_CMDLINE_PARTS=y
++CONFIG_MTD_BLOCK=y
++CONFIG_MTD_CFI=y
++CONFIG_MTD_JEDECPROBE=y
++CONFIG_MTD_CFI_INTELEXT=y
++CONFIG_MTD_CFI_AMDSTD=y
++CONFIG_MTD_CFI_STAA=y
++CONFIG_MTD_PHYSMAP_OF=y
++CONFIG_MTD_DATAFLASH=y
++CONFIG_MTD_M25P80=y
++CONFIG_MTD_SST25L=y
++CONFIG_MTD_NAND=y
++CONFIG_MTD_NAND_GPMI_NAND=y
++CONFIG_MTD_NAND_MXC=y
++CONFIG_MTD_UBI=y
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=65536
++CONFIG_EEPROM_AT24=y
++CONFIG_EEPROM_AT25=y
++# CONFIG_SCSI_PROC_FS is not set
++CONFIG_BLK_DEV_SD=y
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++CONFIG_SCSI_SCAN_ASYNC=y
++# CONFIG_SCSI_LOWLEVEL is not set
++CONFIG_ATA=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_AHCI_IMX=y
++CONFIG_PATA_IMX=y
++CONFIG_NETDEVICES=y
++# CONFIG_NET_VENDOR_BROADCOM is not set
++CONFIG_CS89x0=y
++CONFIG_CS89x0_PLATFORM=y
++# CONFIG_NET_VENDOR_FARADAY is not set
++# CONFIG_NET_VENDOR_INTEL is not set
++# CONFIG_NET_VENDOR_MARVELL is not set
++# CONFIG_NET_VENDOR_MICREL is not set
++# CONFIG_NET_VENDOR_MICROCHIP is not set
++# CONFIG_NET_VENDOR_NATSEMI is not set
++# CONFIG_NET_VENDOR_SEEQ is not set
++CONFIG_SMC91X=y
++CONFIG_SMC911X=y
++CONFIG_SMSC911X=y
++# CONFIG_NET_VENDOR_STMICRO is not set
++CONFIG_ATH_CARDS=y
++CONFIG_ATH6KL=m
++CONFIG_ATH6KL_SDIO=m
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_EVDEV=y
++CONFIG_INPUT_EVBUG=m
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_KEYBOARD_IMX=y
++CONFIG_MOUSE_PS2=m
++CONFIG_MOUSE_PS2_ELANTECH=y
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_EGALAX=y
++CONFIG_TOUCHSCREEN_ELAN=y
++CONFIG_TOUCHSCREEN_MAX11801=y
++CONFIG_TOUCHSCREEN_MC13783=y
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_MMA8450=y
++CONFIG_INPUT_ISL29023=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_DEVKMEM is not set
++CONFIG_SERIAL_IMX=y
++CONFIG_SERIAL_IMX_CONSOLE=y
++CONFIG_SERIAL_FSL_LPUART=y
++CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
++CONFIG_FSL_OTP=y
++# CONFIG_I2C_COMPAT is not set
++CONFIG_I2C_CHARDEV=y
++# CONFIG_I2C_HELPER_AUTO is not set
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++CONFIG_I2C_IMX=y
++CONFIG_SPI=y
++CONFIG_SPI_IMX=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_POWER_SUPPLY=y
++CONFIG_SABRESD_MAX8903=y
++CONFIG_SENSORS_MAX17135=y
++CONFIG_SENSORS_MAG3110=y
++CONFIG_THERMAL=y
++CONFIG_CPU_THERMAL=y
++CONFIG_IMX_THERMAL=y
++CONFIG_DEVICE_THERMAL=y
++CONFIG_WATCHDOG=y
++CONFIG_IMX2_WDT=y
++CONFIG_MFD_DA9052_I2C=y
++CONFIG_MFD_MC13XXX_SPI=y
++CONFIG_MFD_MC13XXX_I2C=y
++CONFIG_MFD_MAX17135=y
++CONFIG_MFD_SI476X_CORE=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_REGULATOR_DA9052=y
++CONFIG_REGULATOR_ANATOP=y
++CONFIG_REGULATOR_MC13783=y
++CONFIG_REGULATOR_MC13892=y
++CONFIG_REGULATOR_MAX17135=y
++CONFIG_REGULATOR_PFUZE100=y
++CONFIG_MEDIA_SUPPORT=y
++CONFIG_MEDIA_CAMERA_SUPPORT=y
++CONFIG_MEDIA_RADIO_SUPPORT=y
++CONFIG_VIDEO_V4L2_INT_DEVICE=y
++CONFIG_MEDIA_USB_SUPPORT=y
++CONFIG_USB_VIDEO_CLASS=m
++CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_MXC_OUTPUT=y
++CONFIG_VIDEO_MXC_CAPTURE=m
++CONFIG_VIDEO_MXC_CSI_CAMERA=m
++CONFIG_MXC_CAMERA_OV5640=m
++CONFIG_MXC_CAMERA_OV5642=m
++CONFIG_MXC_CAMERA_OV5640_MIPI=m
++CONFIG_MXC_TVIN_ADV7180=m
++CONFIG_MXC_IPU_DEVICE_QUEUE_SDC=m
++CONFIG_VIDEO_MXC_IPU_OUTPUT=y
++CONFIG_VIDEO_MXC_PXP_V4L2=y
++CONFIG_SOC_CAMERA=y
++CONFIG_VIDEO_MX3=y
++CONFIG_RADIO_SI476X=y
++CONFIG_SOC_CAMERA_OV2640=y
++CONFIG_DRM=y
++CONFIG_DRM_VIVANTE=y
++CONFIG_FB=y
++CONFIG_FB_MXS=y
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_LCD_CLASS_DEVICE=y
++CONFIG_LCD_L4F00242T03=y
++CONFIG_LCD_PLATFORM=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=y
++CONFIG_BACKLIGHT_PWM=y
++CONFIG_FB_MXC_SYNC_PANEL=y
++CONFIG_FB_MXC_LDB=y
++CONFIG_FB_MXC_MIPI_DSI=y
++CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL=y
++CONFIG_FB_MXC_HDMI=y
++CONFIG_FB_MXC_EINK_PANEL=y
++CONFIG_FB_MXS_SII902X=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
++CONFIG_FONTS=y
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_LOGO=y
++CONFIG_SOUND=y
++CONFIG_SND=y
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_SOC=y
++CONFIG_SND_IMX_SOC=y
++CONFIG_SND_SOC_EUKREA_TLV320=y
++CONFIG_SND_SOC_IMX_CS42888=y
++CONFIG_SND_SOC_IMX_WM8962=y
++CONFIG_SND_SOC_IMX_SGTL5000=y
++CONFIG_SND_SOC_IMX_SPDIF=y
++CONFIG_SND_SOC_IMX_MC13783=y
++CONFIG_SND_SOC_IMX_HDMI=y
++CONFIG_SND_SOC_IMX_SI476X=y
++CONFIG_USB=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB_CHIPIDEA=y
++CONFIG_USB_CHIPIDEA_UDC=y
++CONFIG_USB_CHIPIDEA_HOST=y
++CONFIG_USB_PHY=y
++CONFIG_USB_MXS_PHY=y
++CONFIG_USB_GADGET=y
++# CONFIG_USB_ZERO is not set
++# CONFIG_USB_AUDIO is not set
++# CONFIG_USB_ETH is not set
++# CONFIG_USB_G_NCM is not set
++# CONFIG_USB_GADGETFS is not set
++# CONFIG_USB_FUNCTIONFS is not set
++CONFIG_USB_MASS_STORAGE=y
++CONFIG_FSL_UTP=y
++# CONFIG_USB_G_SERIAL is not set
++# CONFIG_USB_MIDI_GADGET is not set
++# CONFIG_USB_G_PRINTER is not set
++# CONFIG_USB_CDC_COMPOSITE is not set
++# CONFIG_USB_G_ACM_MS is not set
++# CONFIG_USB_G_MULTI is not set
++# CONFIG_USB_G_HID is not set
++# CONFIG_USB_G_DBGP is not set
++# CONFIG_USB_G_WEBCAM is not set
++CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
++CONFIG_MMC_SDHCI=y
++CONFIG_MMC_SDHCI_PLTFM=y
++CONFIG_MMC_SDHCI_ESDHC_IMX=y
++CONFIG_MXC_IPU=y
++CONFIG_MXC_GPU_VIV=y
++CONFIG_MXC_ASRC=y
++CONFIG_MXC_MIPI_CSI2=y
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_INTF_DEV_UIE_EMUL=y
++CONFIG_RTC_DRV_MC13XXX=y
++CONFIG_RTC_DRV_MXC=y
++CONFIG_RTC_DRV_SNVS=y
++CONFIG_DMADEVICES=y
++CONFIG_MXC_PXP_V2=y
++CONFIG_IMX_SDMA=y
++CONFIG_MXS_DMA=y
++CONFIG_STAGING=y
++CONFIG_COMMON_CLK_DEBUG=y
++# CONFIG_IOMMU_SUPPORT is not set
++CONFIG_PWM=y
++CONFIG_PWM_IMX=y
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_EXT4_FS=y
++CONFIG_EXT4_FS_POSIX_ACL=y
++CONFIG_EXT4_FS_SECURITY=y
++CONFIG_QUOTA=y
++CONFIG_QUOTA_NETLINK_INTERFACE=y
++# CONFIG_PRINT_QUOTA_WARNING is not set
++CONFIG_AUTOFS4_FS=y
++CONFIG_FUSE_FS=y
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_UDF_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=y
++CONFIG_TMPFS=y
++CONFIG_JFFS2_FS=y
++CONFIG_UBIFS_FS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_NLS_DEFAULT="cp437"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_UTF8=y
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_SCHED_DEBUG is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_FTRACE is not set
++CONFIG_SECURITYFS=y
++CONFIG_CRYPTO_USER=y
++CONFIG_CRYPTO_CCM=y
++CONFIG_CRYPTO_GCM=y
++CONFIG_CRYPTO_CBC=y
++CONFIG_CRYPTO_CTS=y
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_LRW=y
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++CONFIG_CRYPTO_DEV_FSL_CAAM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST=y
++CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO=y
++CONFIG_CRC_CCITT=m
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC7=m
++CONFIG_LIBCRC32C=m
+diff -Nur linux-3.14.40.orig/arch/arm/include/asm/arch_timer.h linux-3.14.40/arch/arm/include/asm/arch_timer.h
+--- linux-3.14.40.orig/arch/arm/include/asm/arch_timer.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/include/asm/arch_timer.h 2015-05-01 14:57:57.599427001 -0500
+@@ -107,7 +107,6 @@
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+- | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_VCT_ACCESS_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
+ arch_timer_set_cntkctl(cntkctl);
+diff -Nur linux-3.14.40.orig/arch/arm/include/asm/atomic.h linux-3.14.40/arch/arm/include/asm/atomic.h
+--- linux-3.14.40.orig/arch/arm/include/asm/atomic.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/include/asm/atomic.h 2015-05-01 14:57:57.611427001 -0500
+@@ -60,6 +60,7 @@
+ int result;
+
+ smp_mb();
++ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic_add_return\n"
+ "1: ldrex %0, [%3]\n"
+@@ -99,6 +100,7 @@
+ int result;
+
+ smp_mb();
++ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic_sub_return\n"
+ "1: ldrex %0, [%3]\n"
+@@ -121,6 +123,7 @@
+ unsigned long res;
+
+ smp_mb();
++ prefetchw(&ptr->counter);
+
+ do {
+ __asm__ __volatile__("@ atomic_cmpxchg\n"
+@@ -138,6 +141,33 @@
+ return oldval;
+ }
+
++static inline int __atomic_add_unless(atomic_t *v, int a, int u)
++{
++ int oldval, newval;
++ unsigned long tmp;
++
++ smp_mb();
++ prefetchw(&v->counter);
++
++ __asm__ __volatile__ ("@ atomic_add_unless\n"
++"1: ldrex %0, [%4]\n"
++" teq %0, %5\n"
++" beq 2f\n"
++" add %1, %0, %6\n"
++" strex %2, %1, [%4]\n"
++" teq %2, #0\n"
++" bne 1b\n"
++"2:"
++ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "r" (u), "r" (a)
++ : "cc");
++
++ if (oldval != u)
++ smp_mb();
++
++ return oldval;
++}
++
+ #else /* ARM_ARCH_6 */
+
+ #ifdef CONFIG_SMP
+@@ -186,10 +216,6 @@
+ return ret;
+ }
+
+-#endif /* __LINUX_ARM_ARCH__ */
+-
+-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+-
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+ int c, old;
+@@ -200,6 +226,10 @@
+ return c;
+ }
+
++#endif /* __LINUX_ARM_ARCH__ */
++
++#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++
+ #define atomic_inc(v) atomic_add(1, v)
+ #define atomic_dec(v) atomic_sub(1, v)
+
+@@ -299,6 +329,7 @@
+ unsigned long tmp;
+
+ smp_mb();
++ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_add_return\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+@@ -340,6 +371,7 @@
+ unsigned long tmp;
+
+ smp_mb();
++ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_sub_return\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+@@ -364,6 +396,7 @@
+ unsigned long res;
+
+ smp_mb();
++ prefetchw(&ptr->counter);
+
+ do {
+ __asm__ __volatile__("@ atomic64_cmpxchg\n"
+@@ -388,6 +421,7 @@
+ unsigned long tmp;
+
+ smp_mb();
++ prefetchw(&ptr->counter);
+
+ __asm__ __volatile__("@ atomic64_xchg\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+@@ -409,6 +443,7 @@
+ unsigned long tmp;
+
+ smp_mb();
++ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_dec_if_positive\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+@@ -436,6 +471,7 @@
+ int ret = 1;
+
+ smp_mb();
++ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_add_unless\n"
+ "1: ldrexd %0, %H0, [%4]\n"
+diff -Nur linux-3.14.40.orig/arch/arm/include/asm/cmpxchg.h linux-3.14.40/arch/arm/include/asm/cmpxchg.h
+--- linux-3.14.40.orig/arch/arm/include/asm/cmpxchg.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/include/asm/cmpxchg.h 2015-05-01 14:57:57.611427001 -0500
+@@ -2,6 +2,7 @@
+ #define __ASM_ARM_CMPXCHG_H
+
+ #include <linux/irqflags.h>
++#include <linux/prefetch.h>
+ #include <asm/barrier.h>
+
+ #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
+@@ -35,6 +36,7 @@
+ #endif
+
+ smp_mb();
++ prefetchw((const void *)ptr);
+
+ switch (size) {
+ #if __LINUX_ARM_ARCH__ >= 6
+@@ -138,6 +140,8 @@
+ {
+ unsigned long oldval, res;
+
++ prefetchw((const void *)ptr);
++
+ switch (size) {
+ #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
+ case 1:
+@@ -230,6 +234,8 @@
+ unsigned long long oldval;
+ unsigned long res;
+
++ prefetchw(ptr);
++
+ __asm__ __volatile__(
+ "1: ldrexd %1, %H1, [%3]\n"
+ " teq %1, %4\n"
+diff -Nur linux-3.14.40.orig/arch/arm/include/asm/ftrace.h linux-3.14.40/arch/arm/include/asm/ftrace.h
+--- linux-3.14.40.orig/arch/arm/include/asm/ftrace.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/include/asm/ftrace.h 2015-05-01 14:57:57.639427001 -0500
+@@ -52,15 +52,7 @@
+
+ #endif
+
+-#define HAVE_ARCH_CALLER_ADDR
+-
+-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+-#define CALLER_ADDR1 ((unsigned long)return_address(1))
+-#define CALLER_ADDR2 ((unsigned long)return_address(2))
+-#define CALLER_ADDR3 ((unsigned long)return_address(3))
+-#define CALLER_ADDR4 ((unsigned long)return_address(4))
+-#define CALLER_ADDR5 ((unsigned long)return_address(5))
+-#define CALLER_ADDR6 ((unsigned long)return_address(6))
++#define ftrace_return_address(n) return_address(n)
+
+ #endif /* ifndef __ASSEMBLY__ */
+
+diff -Nur linux-3.14.40.orig/arch/arm/include/asm/futex.h linux-3.14.40/arch/arm/include/asm/futex.h
+--- linux-3.14.40.orig/arch/arm/include/asm/futex.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/include/asm/futex.h 2015-05-01 14:57:57.643427001 -0500
+@@ -23,6 +23,7 @@
+
+ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+ smp_mb(); \
++ prefetchw(uaddr); \
+ __asm__ __volatile__( \
+ "1: ldrex %1, [%3]\n" \
+ " " insn "\n" \
+@@ -46,6 +47,8 @@
+ return -EFAULT;
+
+ smp_mb();
++ /* Prefetching cannot fault */
++ prefetchw(uaddr);
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: ldrex %1, [%4]\n"
+ " teq %1, %2\n"
+diff -Nur linux-3.14.40.orig/arch/arm/include/asm/glue-cache.h linux-3.14.40/arch/arm/include/asm/glue-cache.h
+--- linux-3.14.40.orig/arch/arm/include/asm/glue-cache.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/include/asm/glue-cache.h 2015-05-01 14:57:57.659427001 -0500
+@@ -102,19 +102,19 @@
+ #endif
+
+ #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+-# ifdef _CACHE
++//# ifdef _CACHE
+ # define MULTI_CACHE 1
+-# else
+-# define _CACHE v6
+-# endif
++//# else
++//# define _CACHE v6
++//# endif
+ #endif
+
+ #if defined(CONFIG_CPU_V7)
+-# ifdef _CACHE
++//# ifdef _CACHE
+ # define MULTI_CACHE 1
+-# else
+-# define _CACHE v7
+-# endif
++//# else
++//# define _CACHE v7
++//# endif
+ #endif
+
+ #if defined(CONFIG_CPU_V7M)
+diff -Nur linux-3.14.40.orig/arch/arm/include/asm/hardware/cache-l2x0.h linux-3.14.40/arch/arm/include/asm/hardware/cache-l2x0.h
+--- linux-3.14.40.orig/arch/arm/include/asm/hardware/cache-l2x0.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/include/asm/hardware/cache-l2x0.h 2015-05-01 14:57:57.659427001 -0500
+@@ -26,8 +26,8 @@
+ #define L2X0_CACHE_TYPE 0x004
+ #define L2X0_CTRL 0x100
+ #define L2X0_AUX_CTRL 0x104
+-#define L2X0_TAG_LATENCY_CTRL 0x108
+-#define L2X0_DATA_LATENCY_CTRL 0x10C
++#define L310_TAG_LATENCY_CTRL 0x108
++#define L310_DATA_LATENCY_CTRL 0x10C
+ #define L2X0_EVENT_CNT_CTRL 0x200
+ #define L2X0_EVENT_CNT1_CFG 0x204
+ #define L2X0_EVENT_CNT0_CFG 0x208
+@@ -54,53 +54,93 @@
+ #define L2X0_LOCKDOWN_WAY_D_BASE 0x900
+ #define L2X0_LOCKDOWN_WAY_I_BASE 0x904
+ #define L2X0_LOCKDOWN_STRIDE 0x08
+-#define L2X0_ADDR_FILTER_START 0xC00
+-#define L2X0_ADDR_FILTER_END 0xC04
++#define L310_ADDR_FILTER_START 0xC00
++#define L310_ADDR_FILTER_END 0xC04
+ #define L2X0_TEST_OPERATION 0xF00
+ #define L2X0_LINE_DATA 0xF10
+ #define L2X0_LINE_TAG 0xF30
+ #define L2X0_DEBUG_CTRL 0xF40
+-#define L2X0_PREFETCH_CTRL 0xF60
+-#define L2X0_POWER_CTRL 0xF80
+-#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1)
+-#define L2X0_STNDBY_MODE_EN (1 << 0)
++#define L310_PREFETCH_CTRL 0xF60
++#define L310_POWER_CTRL 0xF80
++#define L310_DYNAMIC_CLK_GATING_EN (1 << 1)
++#define L310_STNDBY_MODE_EN (1 << 0)
+
+ /* Registers shifts and masks */
+ #define L2X0_CACHE_ID_PART_MASK (0xf << 6)
+ #define L2X0_CACHE_ID_PART_L210 (1 << 6)
++#define L2X0_CACHE_ID_PART_L220 (2 << 6)
+ #define L2X0_CACHE_ID_PART_L310 (3 << 6)
+ #define L2X0_CACHE_ID_RTL_MASK 0x3f
+-#define L2X0_CACHE_ID_RTL_R0P0 0x0
+-#define L2X0_CACHE_ID_RTL_R1P0 0x2
+-#define L2X0_CACHE_ID_RTL_R2P0 0x4
+-#define L2X0_CACHE_ID_RTL_R3P0 0x5
+-#define L2X0_CACHE_ID_RTL_R3P1 0x6
+-#define L2X0_CACHE_ID_RTL_R3P2 0x8
+-
+-#define L2X0_AUX_CTRL_MASK 0xc0000fff
++#define L210_CACHE_ID_RTL_R0P2_02 0x00
++#define L210_CACHE_ID_RTL_R0P1 0x01
++#define L210_CACHE_ID_RTL_R0P2_01 0x02
++#define L210_CACHE_ID_RTL_R0P3 0x03
++#define L210_CACHE_ID_RTL_R0P4 0x0b
++#define L210_CACHE_ID_RTL_R0P5 0x0f
++#define L220_CACHE_ID_RTL_R1P7_01REL0 0x06
++#define L310_CACHE_ID_RTL_R0P0 0x00
++#define L310_CACHE_ID_RTL_R1P0 0x02
++#define L310_CACHE_ID_RTL_R2P0 0x04
++#define L310_CACHE_ID_RTL_R3P0 0x05
++#define L310_CACHE_ID_RTL_R3P1 0x06
++#define L310_CACHE_ID_RTL_R3P1_50REL0 0x07
++#define L310_CACHE_ID_RTL_R3P2 0x08
++#define L310_CACHE_ID_RTL_R3P3 0x09
++
++/* L2C auxiliary control register - bits common to L2C-210/220/310 */
++#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17
++#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17)
++#define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17)
++#define L2C_AUX_CTRL_EVTMON_ENABLE BIT(20)
++#define L2C_AUX_CTRL_PARITY_ENABLE BIT(21)
++#define L2C_AUX_CTRL_SHARED_OVERRIDE BIT(22)
++/* L2C-210/220 common bits */
+ #define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
+-#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7
++#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK (7 << 0)
+ #define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
+-#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3)
++#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (7 << 3)
+ #define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
+-#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6)
++#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (7 << 6)
+ #define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
+-#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9)
+-#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
+-#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
+-#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
+-#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
+-#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
+-#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
+-#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28
+-#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
+-#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
+-
+-#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0
+-#define L2X0_LATENCY_CTRL_RD_SHIFT 4
+-#define L2X0_LATENCY_CTRL_WR_SHIFT 8
+-
+-#define L2X0_ADDR_FILTER_EN 1
++#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (7 << 9)
++#define L2X0_AUX_CTRL_ASSOC_SHIFT 13
++#define L2X0_AUX_CTRL_ASSOC_MASK (15 << 13)
++/* L2C-210 specific bits */
++#define L210_AUX_CTRL_WRAP_DISABLE BIT(12)
++#define L210_AUX_CTRL_WA_OVERRIDE BIT(23)
++#define L210_AUX_CTRL_EXCLUSIVE_ABORT BIT(24)
++/* L2C-220 specific bits */
++#define L220_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
++#define L220_AUX_CTRL_FWA_SHIFT 23
++#define L220_AUX_CTRL_FWA_MASK (3 << 23)
++#define L220_AUX_CTRL_NS_LOCKDOWN BIT(26)
++#define L220_AUX_CTRL_NS_INT_CTRL BIT(27)
++/* L2C-310 specific bits */
++#define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */
++#define L310_AUX_CTRL_HIGHPRIO_SO_DEV BIT(10) /* R2P0+ */
++#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */
++#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
++#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16)
++#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */
++#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26)
++#define L310_AUX_CTRL_NS_INT_CTRL BIT(27)
++#define L310_AUX_CTRL_DATA_PREFETCH BIT(28)
++#define L310_AUX_CTRL_INSTR_PREFETCH BIT(29)
++#define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */
++
++#define L310_LATENCY_CTRL_SETUP(n) ((n) << 0)
++#define L310_LATENCY_CTRL_RD(n) ((n) << 4)
++#define L310_LATENCY_CTRL_WR(n) ((n) << 8)
++
++#define L310_ADDR_FILTER_EN 1
++
++#define L310_PREFETCH_CTRL_OFFSET_MASK 0x1f
++#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR BIT(23)
++#define L310_PREFETCH_CTRL_PREFETCH_DROP BIT(24)
++#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP BIT(27)
++#define L310_PREFETCH_CTRL_DATA_PREFETCH BIT(28)
++#define L310_PREFETCH_CTRL_INSTR_PREFETCH BIT(29)
++#define L310_PREFETCH_CTRL_DBL_LINEFILL BIT(30)
+
+ #define L2X0_CTRL_EN 1
+
+diff -Nur linux-3.14.40.orig/arch/arm/include/asm/outercache.h linux-3.14.40/arch/arm/include/asm/outercache.h
+--- linux-3.14.40.orig/arch/arm/include/asm/outercache.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/include/asm/outercache.h 2015-05-01 14:57:57.663427001 -0500
+@@ -21,6 +21,7 @@
+ #ifndef __ASM_OUTERCACHE_H
+ #define __ASM_OUTERCACHE_H
+
++#include <linux/bug.h>
+ #include <linux/types.h>
+
+ struct outer_cache_fns {
+@@ -28,53 +29,84 @@
+ void (*clean_range)(unsigned long, unsigned long);
+ void (*flush_range)(unsigned long, unsigned long);
+ void (*flush_all)(void);
+- void (*inv_all)(void);
+ void (*disable)(void);
+ #ifdef CONFIG_OUTER_CACHE_SYNC
+ void (*sync)(void);
+ #endif
+- void (*set_debug)(unsigned long);
+ void (*resume)(void);
++
++ /* This is an ARM L2C thing */
++ void (*write_sec)(unsigned long, unsigned);
+ };
+
+ extern struct outer_cache_fns outer_cache;
+
+ #ifdef CONFIG_OUTER_CACHE
+-
++/**
++ * outer_inv_range - invalidate range of outer cache lines
++ * @start: starting physical address, inclusive
++ * @end: end physical address, exclusive
++ */
+ static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
+ {
+ if (outer_cache.inv_range)
+ outer_cache.inv_range(start, end);
+ }
++
++/**
++ * outer_clean_range - clean dirty outer cache lines
++ * @start: starting physical address, inclusive
++ * @end: end physical address, exclusive
++ */
+ static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
+ {
+ if (outer_cache.clean_range)
+ outer_cache.clean_range(start, end);
+ }
++
++/**
++ * outer_flush_range - clean and invalidate outer cache lines
++ * @start: starting physical address, inclusive
++ * @end: end physical address, exclusive
++ */
+ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
+ {
+ if (outer_cache.flush_range)
+ outer_cache.flush_range(start, end);
+ }
+
++/**
++ * outer_flush_all - clean and invalidate all cache lines in the outer cache
++ *
++ * Note: depending on implementation, this may not be atomic - it must
++ * only be called with interrupts disabled and no other active outer
++ * cache masters.
++ *
++ * It is intended that this function is only used by implementations
++ * needing to override the outer_cache.disable() method due to security.
++ * (Some implementations perform this as a clean followed by an invalidate.)
++ */
+ static inline void outer_flush_all(void)
+ {
+ if (outer_cache.flush_all)
+ outer_cache.flush_all();
+ }
+
+-static inline void outer_inv_all(void)
+-{
+- if (outer_cache.inv_all)
+- outer_cache.inv_all();
+-}
+-
+-static inline void outer_disable(void)
+-{
+- if (outer_cache.disable)
+- outer_cache.disable();
+-}
+-
++/**
++ * outer_disable - clean, invalidate and disable the outer cache
++ *
++ * Disable the outer cache, ensuring that any data contained in the outer
++ * cache is pushed out to lower levels of system memory. The note and
++ * conditions above concerning outer_flush_all() applies here.
++ */
++extern void outer_disable(void);
++
++/**
++ * outer_resume - restore the cache configuration and re-enable outer cache
++ *
++ * Restore any configuration that the cache had when previously enabled,
++ * and re-enable the outer cache.
++ */
+ static inline void outer_resume(void)
+ {
+ if (outer_cache.resume)
+@@ -90,13 +122,18 @@
+ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
+ { }
+ static inline void outer_flush_all(void) { }
+-static inline void outer_inv_all(void) { }
+ static inline void outer_disable(void) { }
+ static inline void outer_resume(void) { }
+
+ #endif
+
+ #ifdef CONFIG_OUTER_CACHE_SYNC
++/**
++ * outer_sync - perform a sync point for outer cache
++ *
++ * Ensure that all outer cache operations are complete and any store
++ * buffers are drained.
++ */
+ static inline void outer_sync(void)
+ {
+ if (outer_cache.sync)
+diff -Nur linux-3.14.40.orig/arch/arm/include/asm/pmu.h linux-3.14.40/arch/arm/include/asm/pmu.h
+--- linux-3.14.40.orig/arch/arm/include/asm/pmu.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/include/asm/pmu.h 2015-05-01 14:57:57.663427001 -0500
+@@ -62,9 +62,19 @@
+ raw_spinlock_t pmu_lock;
+ };
+
++struct cpupmu_regs {
++ u32 pmc;
++ u32 pmcntenset;
++ u32 pmuseren;
++ u32 pmintenset;
++ u32 pmxevttype[8];
++ u32 pmxevtcnt[8];
++};
++
+ struct arm_pmu {
+ struct pmu pmu;
+ cpumask_t active_irqs;
++ cpumask_t valid_cpus;
+ char *name;
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+ void (*enable)(struct perf_event *event);
+@@ -81,6 +91,8 @@
+ int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
+ void (*free_irq)(struct arm_pmu *);
+ int (*map_event)(struct perf_event *event);
++ void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *);
++ void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *);
+ int num_events;
+ atomic_t active_events;
+ struct mutex reserve_mutex;
+diff -Nur linux-3.14.40.orig/arch/arm/include/asm/psci.h linux-3.14.40/arch/arm/include/asm/psci.h
+--- linux-3.14.40.orig/arch/arm/include/asm/psci.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/include/asm/psci.h 2015-05-01 14:57:57.663427001 -0500
+@@ -16,6 +16,10 @@
+
+ #define PSCI_POWER_STATE_TYPE_STANDBY 0
+ #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
++#define PSCI_POWER_STATE_AFFINITY_LEVEL0 0
++#define PSCI_POWER_STATE_AFFINITY_LEVEL1 1
++#define PSCI_POWER_STATE_AFFINITY_LEVEL2 2
++#define PSCI_POWER_STATE_AFFINITY_LEVEL3 3
+
+ struct psci_power_state {
+ u16 id;
+@@ -42,4 +46,12 @@
+ static inline bool psci_smp_available(void) { return false; }
+ #endif
+
++#ifdef CONFIG_ARM_PSCI
++extern int psci_probe(void);
++#else
++static inline int psci_probe(void)
++{
++ return -ENODEV;
++}
++#endif
+ #endif /* __ASM_ARM_PSCI_H */
+diff -Nur linux-3.14.40.orig/arch/arm/include/asm/topology.h linux-3.14.40/arch/arm/include/asm/topology.h
+--- linux-3.14.40.orig/arch/arm/include/asm/topology.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/include/asm/topology.h 2015-05-01 14:57:57.663427001 -0500
+@@ -26,11 +26,14 @@
+ void init_cpu_topology(void);
+ void store_cpu_topology(unsigned int cpuid);
+ const struct cpumask *cpu_coregroup_mask(int cpu);
++int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask);
+
+ #else
+
+ static inline void init_cpu_topology(void) { }
+ static inline void store_cpu_topology(unsigned int cpuid) { }
++static inline int cluster_to_logical_mask(unsigned int socket_id,
++ cpumask_t *cluster_mask) { return -EINVAL; }
+
+ #endif
+
+diff -Nur linux-3.14.40.orig/arch/arm/Kconfig linux-3.14.40/arch/arm/Kconfig
+--- linux-3.14.40.orig/arch/arm/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/Kconfig 2015-05-01 14:57:57.691427001 -0500
+@@ -1216,19 +1216,6 @@
+ register of the Cortex-A9 which reduces the linefill issuing
+ capabilities of the processor.
+
+-config PL310_ERRATA_588369
+- bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
+- depends on CACHE_L2X0
+- help
+- The PL310 L2 cache controller implements three types of Clean &
+- Invalidate maintenance operations: by Physical Address
+- (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
+- They are architecturally defined to behave as the execution of a
+- clean operation followed immediately by an invalidate operation,
+- both performing to the same memory location. This functionality
+- is not correctly implemented in PL310 as clean lines are not
+- invalidated as a result of these operations.
+-
+ config ARM_ERRATA_643719
+ bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
+ depends on CPU_V7 && SMP
+@@ -1251,17 +1238,6 @@
+ tables. The workaround changes the TLB flushing routines to invalidate
+ entries regardless of the ASID.
+
+-config PL310_ERRATA_727915
+- bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
+- depends on CACHE_L2X0
+- help
+- PL310 implements the Clean & Invalidate by Way L2 cache maintenance
+- operation (offset 0x7FC). This operation runs in background so that
+- PL310 can handle normal accesses while it is in progress. Under very
+- rare circumstances, due to this erratum, write data can be lost when
+- PL310 treats a cacheable write transaction during a Clean &
+- Invalidate by Way operation.
+-
+ config ARM_ERRATA_743622
+ bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
+ depends on CPU_V7
+@@ -1287,21 +1263,6 @@
+ operation is received by a CPU before the ICIALLUIS has completed,
+ potentially leading to corrupted entries in the cache or TLB.
+
+-config PL310_ERRATA_753970
+- bool "PL310 errata: cache sync operation may be faulty"
+- depends on CACHE_PL310
+- help
+- This option enables the workaround for the 753970 PL310 (r3p0) erratum.
+-
+- Under some condition the effect of cache sync operation on
+- the store buffer still remains when the operation completes.
+- This means that the store buffer is always asked to drain and
+- this prevents it from merging any further writes. The workaround
+- is to replace the normal offset of cache sync operation (0x730)
+- by another offset targeting an unmapped PL310 register 0x740.
+- This has the same effect as the cache sync operation: store buffer
+- drain and waiting for all buffers empty.
+-
+ config ARM_ERRATA_754322
+ bool "ARM errata: possible faulty MMU translations following an ASID switch"
+ depends on CPU_V7
+@@ -1350,18 +1311,6 @@
+ relevant cache maintenance functions and sets a specific bit
+ in the diagnostic control register of the SCU.
+
+-config PL310_ERRATA_769419
+- bool "PL310 errata: no automatic Store Buffer drain"
+- depends on CACHE_L2X0
+- help
+- On revisions of the PL310 prior to r3p2, the Store Buffer does
+- not automatically drain. This can cause normal, non-cacheable
+- writes to be retained when the memory system is idle, leading
+- to suboptimal I/O performance for drivers using coherent DMA.
+- This option adds a write barrier to the cpu_idle loop so that,
+- on systems with an outer cache, the store buffer is drained
+- explicitly.
+-
+ config ARM_ERRATA_775420
+ bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
+ depends on CPU_V7
+@@ -1391,6 +1340,29 @@
+ loop buffer may deliver incorrect instructions. This
+ workaround disables the loop buffer to avoid the erratum.
+
++config ARM_ERRATA_794072
++ bool "ARM errata: A short loop including a DMB instruction might cause a denial of service"
++ depends on CPU_V7 && SMP
++ help
++ This option enables the workaround for the 794072 Cortex-A9
++ (all revisions). A processor which continuously executes a short
++ loop containing a DMB instruction might prevent a CP15 operation
++ broadcast by another processor making further progress, causing
++ a denial of service. This erratum can be worked around by setting
++ bit[4] of the undocumented Diagnostic Control Register to 1.
++
++config ARM_ERRATA_761320
++ bool "Full cache line writes to the same memory region from at least two processors might deadlock processor"
++ depends on CPU_V7 && SMP
++ help
++ This option enables the workaround for the 761320 Cortex-A9 (r0..r3).
++ Under very rare circumstances, full cache line writes
++ from (at least) 2 processors on cache lines in hazard with
++ other requests may cause arbitration issues in the SCU,
++ leading to processor deadlock. This erratum can be
++ worked around by setting bit[21] of the undocumented
++ Diagnostic Control Register to 1.
++
+ endmenu
+
+ source "arch/arm/common/Kconfig"
+@@ -1835,6 +1807,7 @@
+ range 11 64 if ARCH_SHMOBILE_LEGACY
+ default "12" if SOC_AM33XX
+ default "9" if SA1111 || ARCH_EFM32
++ default "14" if ARCH_MXC
+ default "11"
+ help
+ The kernel memory allocator divides physically contiguous memory
+diff -Nur linux-3.14.40.orig/arch/arm/kernel/perf_event.c linux-3.14.40/arch/arm/kernel/perf_event.c
+--- linux-3.14.40.orig/arch/arm/kernel/perf_event.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/kernel/perf_event.c 2015-05-01 14:57:57.719427001 -0500
+@@ -12,6 +12,7 @@
+ */
+ #define pr_fmt(fmt) "hw perfevents: " fmt
+
++#include <linux/cpumask.h>
+ #include <linux/kernel.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+@@ -86,6 +87,9 @@
+ return armpmu_map_cache_event(cache_map, config);
+ case PERF_TYPE_RAW:
+ return armpmu_map_raw_event(raw_event_mask, config);
++ default:
++ if (event->attr.type >= PERF_TYPE_MAX)
++ return armpmu_map_raw_event(raw_event_mask, config);
+ }
+
+ return -ENOENT;
+@@ -159,6 +163,8 @@
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
++ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
++ return;
+ /*
+ * ARM pmu always has to update the counter, so ignore
+ * PERF_EF_UPDATE, see comments in armpmu_start().
+@@ -175,6 +181,8 @@
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
++ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
++ return;
+ /*
+ * ARM pmu always has to reprogram the period, so ignore
+ * PERF_EF_RELOAD, see the comment below.
+@@ -202,6 +210,9 @@
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
++ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
++ return;
++
+ armpmu_stop(event, PERF_EF_UPDATE);
+ hw_events->events[idx] = NULL;
+ clear_bit(idx, hw_events->used_mask);
+@@ -218,6 +229,10 @@
+ int idx;
+ int err = 0;
+
++ /* An event following a process won't be stopped earlier */
++ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
++ return 0;
++
+ perf_pmu_disable(event->pmu);
+
+ /* If we don't have a space for the counter then finish early. */
+@@ -419,6 +434,10 @@
+ int err = 0;
+ atomic_t *active_events = &armpmu->active_events;
+
++ if (event->cpu != -1 &&
++ !cpumask_test_cpu(event->cpu, &armpmu->valid_cpus))
++ return -ENOENT;
++
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+diff -Nur linux-3.14.40.orig/arch/arm/kernel/perf_event_cpu.c linux-3.14.40/arch/arm/kernel/perf_event_cpu.c
+--- linux-3.14.40.orig/arch/arm/kernel/perf_event_cpu.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/kernel/perf_event_cpu.c 2015-05-01 14:57:57.723427001 -0500
+@@ -19,6 +19,7 @@
+ #define pr_fmt(fmt) "CPU PMU: " fmt
+
+ #include <linux/bitmap.h>
++#include <linux/cpu_pm.h>
+ #include <linux/export.h>
+ #include <linux/kernel.h>
+ #include <linux/of.h>
+@@ -31,33 +32,36 @@
+ #include <asm/pmu.h>
+
+ /* Set at runtime when we know what CPU type we are. */
+-static struct arm_pmu *cpu_pmu;
++static DEFINE_PER_CPU(struct arm_pmu *, cpu_pmu);
+
+ static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
+ static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
+ static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+
++static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs);
++
+ /*
+ * Despite the names, these two functions are CPU-specific and are used
+ * by the OProfile/perf code.
+ */
+ const char *perf_pmu_name(void)
+ {
+- if (!cpu_pmu)
++ struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
++ if (!pmu)
+ return NULL;
+
+- return cpu_pmu->name;
++ return pmu->name;
+ }
+ EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+ int perf_num_counters(void)
+ {
+- int max_events = 0;
++ struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+
+- if (cpu_pmu != NULL)
+- max_events = cpu_pmu->num_events;
++ if (!pmu)
++ return 0;
+
+- return max_events;
++ return pmu->num_events;
+ }
+ EXPORT_SYMBOL_GPL(perf_num_counters);
+
+@@ -75,11 +79,13 @@
+ {
+ int i, irq, irqs;
+ struct platform_device *pmu_device = cpu_pmu->plat_device;
++ int cpu = -1;
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+ for (i = 0; i < irqs; ++i) {
+- if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
++ cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
++ if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq >= 0)
+@@ -91,6 +97,7 @@
+ {
+ int i, err, irq, irqs;
+ struct platform_device *pmu_device = cpu_pmu->plat_device;
++ int cpu = -1;
+
+ if (!pmu_device)
+ return -ENODEV;
+@@ -103,6 +110,7 @@
+
+ for (i = 0; i < irqs; ++i) {
+ err = 0;
++ cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
+ irq = platform_get_irq(pmu_device, i);
+ if (irq < 0)
+ continue;
+@@ -112,7 +120,7 @@
+ * assume that we're running on a uniprocessor machine and
+ * continue. Otherwise, continue without this interrupt.
+ */
+- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
++ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
+ pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, i);
+ continue;
+@@ -127,7 +135,7 @@
+ return err;
+ }
+
+- cpumask_set_cpu(i, &cpu_pmu->active_irqs);
++ cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
+ }
+
+ return 0;
+@@ -136,7 +144,7 @@
+ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
+ {
+ int cpu;
+- for_each_possible_cpu(cpu) {
++ for_each_cpu_mask(cpu, cpu_pmu->valid_cpus) {
+ struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
+ events->events = per_cpu(hw_events, cpu);
+ events->used_mask = per_cpu(used_mask, cpu);
+@@ -149,7 +157,7 @@
+
+ /* Ensure the PMU has sane values out of reset. */
+ if (cpu_pmu->reset)
+- on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
++ on_each_cpu_mask(&cpu_pmu->valid_cpus, cpu_pmu->reset, cpu_pmu, 1);
+ }
+
+ /*
+@@ -161,21 +169,46 @@
+ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
+ void *hcpu)
+ {
++ struct arm_pmu *pmu = per_cpu(cpu_pmu, (long)hcpu);
++
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+- if (cpu_pmu && cpu_pmu->reset)
+- cpu_pmu->reset(cpu_pmu);
++ if (pmu && pmu->reset)
++ pmu->reset(pmu);
+ else
+ return NOTIFY_DONE;
+
+ return NOTIFY_OK;
+ }
+
++static int cpu_pmu_pm_notify(struct notifier_block *b,
++ unsigned long action, void *hcpu)
++{
++ int cpu = smp_processor_id();
++ struct arm_pmu *pmu = per_cpu(cpu_pmu, cpu);
++ struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu);
++
++ if (!pmu)
++ return NOTIFY_DONE;
++
++ if (action == CPU_PM_ENTER && pmu->save_regs) {
++ pmu->save_regs(pmu, pmuregs);
++ } else if (action == CPU_PM_EXIT && pmu->restore_regs) {
++ pmu->restore_regs(pmu, pmuregs);
++ }
++
++ return NOTIFY_OK;
++}
++
+ static struct notifier_block cpu_pmu_hotplug_notifier = {
+ .notifier_call = cpu_pmu_notify,
+ };
+
++static struct notifier_block cpu_pmu_pm_notifier = {
++ .notifier_call = cpu_pmu_pm_notify,
++};
++
+ /*
+ * PMU platform driver and devicetree bindings.
+ */
+@@ -247,6 +280,9 @@
+ }
+ }
+
++ /* assume PMU support all the CPUs in this case */
++ cpumask_setall(&pmu->valid_cpus);
++
+ put_cpu();
+ return ret;
+ }
+@@ -254,15 +290,10 @@
+ static int cpu_pmu_device_probe(struct platform_device *pdev)
+ {
+ const struct of_device_id *of_id;
+- const int (*init_fn)(struct arm_pmu *);
+ struct device_node *node = pdev->dev.of_node;
+ struct arm_pmu *pmu;
+- int ret = -ENODEV;
+-
+- if (cpu_pmu) {
+- pr_info("attempt to register multiple PMU devices!");
+- return -ENOSPC;
+- }
++ int ret = 0;
++ int cpu;
+
+ pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
+ if (!pmu) {
+@@ -271,8 +302,28 @@
+ }
+
+ if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
+- init_fn = of_id->data;
+- ret = init_fn(pmu);
++ smp_call_func_t init_fn = (smp_call_func_t)of_id->data;
++ struct device_node *ncluster;
++ int cluster = -1;
++ cpumask_t sibling_mask;
++
++ ncluster = of_parse_phandle(node, "cluster", 0);
++ if (ncluster) {
++ int len;
++ const u32 *hwid;
++ hwid = of_get_property(ncluster, "reg", &len);
++ if (hwid && len == 4)
++ cluster = be32_to_cpup(hwid);
++ }
++ /* set sibling mask to all cpu mask if socket is not specified */
++ if (cluster == -1 ||
++ cluster_to_logical_mask(cluster, &sibling_mask))
++ cpumask_setall(&sibling_mask);
++
++ smp_call_function_any(&sibling_mask, init_fn, pmu, 1);
++
++ /* now set the valid_cpus after init */
++ cpumask_copy(&pmu->valid_cpus, &sibling_mask);
+ } else {
+ ret = probe_current_pmu(pmu);
+ }
+@@ -282,10 +333,12 @@
+ goto out_free;
+ }
+
+- cpu_pmu = pmu;
+- cpu_pmu->plat_device = pdev;
+- cpu_pmu_init(cpu_pmu);
+- ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
++ for_each_cpu_mask(cpu, pmu->valid_cpus)
++ per_cpu(cpu_pmu, cpu) = pmu;
++
++ pmu->plat_device = pdev;
++ cpu_pmu_init(pmu);
++ ret = armpmu_register(pmu, -1);
+
+ if (!ret)
+ return 0;
+@@ -314,9 +367,17 @@
+ if (err)
+ return err;
+
++ err = cpu_pm_register_notifier(&cpu_pmu_pm_notifier);
++ if (err) {
++ unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
++ return err;
++ }
++
+ err = platform_driver_register(&cpu_pmu_driver);
+- if (err)
++ if (err) {
++ cpu_pm_unregister_notifier(&cpu_pmu_pm_notifier);
+ unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
++ }
+
+ return err;
+ }
+diff -Nur linux-3.14.40.orig/arch/arm/kernel/perf_event_v7.c linux-3.14.40/arch/arm/kernel/perf_event_v7.c
+--- linux-3.14.40.orig/arch/arm/kernel/perf_event_v7.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/kernel/perf_event_v7.c 2015-05-01 14:57:57.727427001 -0500
+@@ -950,6 +950,51 @@
+ }
+ #endif
+
++static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu,
++ struct cpupmu_regs *regs)
++{
++ unsigned int cnt;
++ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc));
++ if (!(regs->pmc & ARMV7_PMNC_E))
++ return;
++
++ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset));
++ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (regs->pmuseren));
++ asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset));
++ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (regs->pmxevtcnt[0]));
++ for (cnt = ARMV7_IDX_COUNTER0;
++ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
++ armv7_pmnc_select_counter(cnt);
++ asm volatile("mrc p15, 0, %0, c9, c13, 1"
++ : "=r"(regs->pmxevttype[cnt]));
++ asm volatile("mrc p15, 0, %0, c9, c13, 2"
++ : "=r"(regs->pmxevtcnt[cnt]));
++ }
++ return;
++}
++
++static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu,
++ struct cpupmu_regs *regs)
++{
++ unsigned int cnt;
++ if (!(regs->pmc & ARMV7_PMNC_E))
++ return;
++
++ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
++ asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (regs->pmuseren));
++ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
++ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (regs->pmxevtcnt[0]));
++ for (cnt = ARMV7_IDX_COUNTER0;
++ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
++ armv7_pmnc_select_counter(cnt);
++ asm volatile("mcr p15, 0, %0, c9, c13, 1"
++ : : "r"(regs->pmxevttype[cnt]));
++ asm volatile("mcr p15, 0, %0, c9, c13, 2"
++ : : "r"(regs->pmxevtcnt[cnt]));
++ }
++ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc));
++}
++
+ static void armv7pmu_enable_event(struct perf_event *event)
+ {
+ unsigned long flags;
+@@ -1223,6 +1268,8 @@
+ cpu_pmu->start = armv7pmu_start;
+ cpu_pmu->stop = armv7pmu_stop;
+ cpu_pmu->reset = armv7pmu_reset;
++ cpu_pmu->save_regs = armv7pmu_save_regs;
++ cpu_pmu->restore_regs = armv7pmu_restore_regs;
+ cpu_pmu->max_period = (1LLU << 32) - 1;
+ };
+
+@@ -1240,7 +1287,7 @@
+ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
+ {
+ armv7pmu_init(cpu_pmu);
+- cpu_pmu->name = "ARMv7 Cortex-A8";
++ cpu_pmu->name = "ARMv7_Cortex_A8";
+ cpu_pmu->map_event = armv7_a8_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ return 0;
+@@ -1249,7 +1296,7 @@
+ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
+ {
+ armv7pmu_init(cpu_pmu);
+- cpu_pmu->name = "ARMv7 Cortex-A9";
++ cpu_pmu->name = "ARMv7_Cortex_A9";
+ cpu_pmu->map_event = armv7_a9_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ return 0;
+@@ -1258,7 +1305,7 @@
+ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
+ {
+ armv7pmu_init(cpu_pmu);
+- cpu_pmu->name = "ARMv7 Cortex-A5";
++ cpu_pmu->name = "ARMv7_Cortex_A5";
+ cpu_pmu->map_event = armv7_a5_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ return 0;
+@@ -1267,7 +1314,7 @@
+ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
+ {
+ armv7pmu_init(cpu_pmu);
+- cpu_pmu->name = "ARMv7 Cortex-A15";
++ cpu_pmu->name = "ARMv7_Cortex_A15";
+ cpu_pmu->map_event = armv7_a15_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+@@ -1277,7 +1324,7 @@
+ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
+ {
+ armv7pmu_init(cpu_pmu);
+- cpu_pmu->name = "ARMv7 Cortex-A7";
++ cpu_pmu->name = "ARMv7_Cortex_A7";
+ cpu_pmu->map_event = armv7_a7_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+diff -Nur linux-3.14.40.orig/arch/arm/kernel/process.c linux-3.14.40/arch/arm/kernel/process.c
+--- linux-3.14.40.orig/arch/arm/kernel/process.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/kernel/process.c 2015-05-01 14:57:57.727427001 -0500
+@@ -172,8 +172,10 @@
+ */
+ void arch_cpu_idle(void)
+ {
++ idle_notifier_call_chain(IDLE_START);
+ if (cpuidle_idle_call())
+ default_idle();
++ idle_notifier_call_chain(IDLE_END);
+ }
+
+ /*
+diff -Nur linux-3.14.40.orig/arch/arm/kernel/psci.c linux-3.14.40/arch/arm/kernel/psci.c
+--- linux-3.14.40.orig/arch/arm/kernel/psci.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/kernel/psci.c 2015-05-01 14:57:57.747427001 -0500
+@@ -42,6 +42,7 @@
+ #define PSCI_RET_EOPNOTSUPP -1
+ #define PSCI_RET_EINVAL -2
+ #define PSCI_RET_EPERM -3
++#define PSCI_RET_EALREADYON -4
+
+ static int psci_to_linux_errno(int errno)
+ {
+@@ -54,6 +55,8 @@
+ return -EINVAL;
+ case PSCI_RET_EPERM:
+ return -EPERM;
++ case PSCI_RET_EALREADYON:
++ return -EAGAIN;
+ };
+
+ return -EINVAL;
+@@ -153,7 +156,7 @@
+ return psci_to_linux_errno(err);
+ }
+
+-static const struct of_device_id psci_of_match[] __initconst = {
++static const struct of_device_id psci_of_match[] = {
+ { .compatible = "arm,psci", },
+ {},
+ };
+@@ -208,3 +211,16 @@
+ of_node_put(np);
+ return;
+ }
++
++int psci_probe(void)
++{
++ struct device_node *np;
++ int ret = -ENODEV;
++
++ np = of_find_matching_node(NULL, psci_of_match);
++ if (np)
++ ret = 0;
++
++ of_node_put(np);
++ return ret;
++}
+diff -Nur linux-3.14.40.orig/arch/arm/kernel/setup.c linux-3.14.40/arch/arm/kernel/setup.c
+--- linux-3.14.40.orig/arch/arm/kernel/setup.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/kernel/setup.c 2015-05-01 14:57:57.747427001 -0500
+@@ -273,6 +273,19 @@
+ int aliasing_icache;
+ unsigned int id_reg, num_sets, line_size;
+
++#ifdef CONFIG_BIG_LITTLE
++ /*
++ * We expect a combination of Cortex-A15 and Cortex-A7 cores.
++ * A7 = VIPT aliasing I-cache
++ * A15 = PIPT (non-aliasing) I-cache
++ * To cater for this discrepancy, let's assume aliasing I-cache
++ * all the time. This means unneeded extra work on the A15 but
++ * only ptrace is affected which is not performance critical.
++ */
++ if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc0f0)
++ return 1;
++#endif
++
+ /* PIPT caches never alias. */
+ if (icache_is_pipt())
+ return 0;
+diff -Nur linux-3.14.40.orig/arch/arm/kernel/topology.c linux-3.14.40/arch/arm/kernel/topology.c
+--- linux-3.14.40.orig/arch/arm/kernel/topology.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/kernel/topology.c 2015-05-01 14:57:57.755427001 -0500
+@@ -267,6 +267,33 @@
+ }
+
+ /*
++ * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
++ * @socket_id: cluster HW identifier
++ * @cluster_mask: the cpumask location to be initialized, modified by the
++ * function only if return value == 0
++ *
++ * Return:
++ *
++ * 0 on success
++ * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
++ */
++int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
++{
++ int cpu;
++
++ if (!cluster_mask)
++ return -EINVAL;
++
++ for_each_online_cpu(cpu)
++ if (socket_id == topology_physical_package_id(cpu)) {
++ cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
++ return 0;
++ }
++
++ return -EINVAL;
++}
++
++/*
+ * init_cpu_topology is called at boot when only one cpu is running
+ * which prevent simultaneous write access to cpu_topology array
+ */
+diff -Nur linux-3.14.40.orig/arch/arm/lib/bitops.h linux-3.14.40/arch/arm/lib/bitops.h
+--- linux-3.14.40.orig/arch/arm/lib/bitops.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/lib/bitops.h 2015-05-01 14:57:57.763427001 -0500
+@@ -37,6 +37,11 @@
+ add r1, r1, r0, lsl #2 @ Get word offset
+ mov r3, r2, lsl r3 @ create mask
+ smp_dmb
++#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
++ .arch_extension mp
++ ALT_SMP(W(pldw) [r1])
++ ALT_UP(W(nop))
++#endif
+ 1: ldrex r2, [r1]
+ ands r0, r2, r3 @ save old value of bit
+ \instr r2, r2, r3 @ toggle bit
+diff -Nur linux-3.14.40.orig/arch/arm/mach-berlin/berlin.c linux-3.14.40/arch/arm/mach-berlin/berlin.c
+--- linux-3.14.40.orig/arch/arm/mach-berlin/berlin.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-berlin/berlin.c 2015-05-01 14:57:57.771427001 -0500
+@@ -24,7 +24,7 @@
+ * with DT probing for L2CCs, berlin_init_machine can be removed.
+ * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc
+ */
+- l2x0_of_init(0x70c00000, 0xfeffffff);
++ l2x0_of_init(0x30c00000, 0xfeffffff);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ }
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-cns3xxx/core.c linux-3.14.40/arch/arm/mach-cns3xxx/core.c
+--- linux-3.14.40.orig/arch/arm/mach-cns3xxx/core.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-cns3xxx/core.c 2015-05-01 14:57:57.771427001 -0500
+@@ -240,9 +240,9 @@
+ *
+ * 1 cycle of latency for setup, read and write accesses
+ */
+- val = readl(base + L2X0_TAG_LATENCY_CTRL);
++ val = readl(base + L310_TAG_LATENCY_CTRL);
+ val &= 0xfffff888;
+- writel(val, base + L2X0_TAG_LATENCY_CTRL);
++ writel(val, base + L310_TAG_LATENCY_CTRL);
+
+ /*
+ * Data RAM Control register
+@@ -253,12 +253,12 @@
+ *
+ * 1 cycle of latency for setup, read and write accesses
+ */
+- val = readl(base + L2X0_DATA_LATENCY_CTRL);
++ val = readl(base + L310_DATA_LATENCY_CTRL);
+ val &= 0xfffff888;
+- writel(val, base + L2X0_DATA_LATENCY_CTRL);
++ writel(val, base + L310_DATA_LATENCY_CTRL);
+
+ /* 32 KiB, 8-way, parity disable */
+- l2x0_init(base, 0x00540000, 0xfe000fff);
++ l2x0_init(base, 0x00500000, 0xfe0f0fff);
+ }
+
+ #endif /* CONFIG_CACHE_L2X0 */
+diff -Nur linux-3.14.40.orig/arch/arm/mach-exynos/common.c linux-3.14.40/arch/arm/mach-exynos/common.c
+--- linux-3.14.40.orig/arch/arm/mach-exynos/common.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-exynos/common.c 2015-05-01 14:57:57.771427001 -0500
+@@ -45,9 +45,6 @@
+ #include "common.h"
+ #include "regs-pmu.h"
+
+-#define L2_AUX_VAL 0x7C470001
+-#define L2_AUX_MASK 0xC200ffff
+-
+ static const char name_exynos4210[] = "EXYNOS4210";
+ static const char name_exynos4212[] = "EXYNOS4212";
+ static const char name_exynos4412[] = "EXYNOS4412";
+@@ -400,7 +397,7 @@
+ {
+ int ret;
+
+- ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
++ ret = l2x0_of_init(0x3c400001, 0xc20fffff);
+ if (ret)
+ return ret;
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-highbank/highbank.c linux-3.14.40/arch/arm/mach-highbank/highbank.c
+--- linux-3.14.40.orig/arch/arm/mach-highbank/highbank.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-highbank/highbank.c 2015-05-01 14:57:57.787427001 -0500
+@@ -20,7 +20,7 @@
+ #include <linux/input.h>
+ #include <linux/io.h>
+ #include <linux/irqchip.h>
+-#include <linux/mailbox.h>
++#include <linux/pl320-ipc.h>
+ #include <linux/of.h>
+ #include <linux/of_irq.h>
+ #include <linux/of_platform.h>
+@@ -51,11 +51,13 @@
+ }
+
+
+-static void highbank_l2x0_disable(void)
++static void highbank_l2c310_write_sec(unsigned long val, unsigned reg)
+ {
+- outer_flush_all();
+- /* Disable PL310 L2 Cache controller */
+- highbank_smc1(0x102, 0x0);
++ if (reg == L2X0_CTRL)
++ highbank_smc1(0x102, val);
++ else
++ WARN_ONCE(1, "Highbank L2C310: ignoring write to reg 0x%x\n",
++ reg);
+ }
+
+ static void __init highbank_init_irq(void)
+@@ -66,11 +68,9 @@
+ highbank_scu_map_io();
+
+ /* Enable PL310 L2 Cache controller */
+- if (IS_ENABLED(CONFIG_CACHE_L2X0) &&
+- of_find_compatible_node(NULL, NULL, "arm,pl310-cache")) {
+- highbank_smc1(0x102, 0x1);
+- l2x0_of_init(0, ~0UL);
+- outer_cache.disable = highbank_l2x0_disable;
++ if (IS_ENABLED(CONFIG_CACHE_L2X0)) {
++ outer_cache.write_sec = highbank_l2c310_write_sec;
++ l2x0_of_init(0, ~0);
+ }
+ }
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/anatop.c linux-3.14.40/arch/arm/mach-imx/anatop.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/anatop.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/anatop.c 2015-05-01 14:57:57.795427001 -0500
+@@ -9,6 +9,7 @@
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
++#include <linux/delay.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
+@@ -35,6 +36,10 @@
+ #define BM_ANADIG_USB_CHRG_DETECT_CHK_CHRG_B 0x80000
+ #define BM_ANADIG_USB_CHRG_DETECT_EN_B 0x100000
+
++#define ANADIG_REG_TARG_MASK 0x1f
++#define ANADIG_REG1_TARG_SHIFT 9 /* VDDPU */
++#define ANADIG_REG2_TARG_SHIFT 18 /* VDDSOC */
++
+ static struct regmap *anatop;
+
+ static void imx_anatop_enable_weak2p5(bool enable)
+@@ -78,6 +83,28 @@
+ BM_ANADIG_USB_CHRG_DETECT_CHK_CHRG_B);
+ }
+
++void imx_anatop_pu_enable(bool enable)
++{
++ u32 val;
++
++ regmap_read(anatop, ANADIG_REG_CORE, &val);
++ val &= ANADIG_REG_TARG_MASK << ANADIG_REG2_TARG_SHIFT;
++ /*
++ * set pu regulator only in LDO_BYPASS mode(know by VDDSOC reg 0x1f),
++ * else handled by anatop regulator driver.
++ */
++ if (((val >> (ANADIG_REG2_TARG_SHIFT)) & ANADIG_REG_TARG_MASK)
++ == ANADIG_REG_TARG_MASK) {
++ if (enable) {
++ regmap_write(anatop, ANADIG_REG_CORE + REG_SET,
++ ANADIG_REG_TARG_MASK << ANADIG_REG1_TARG_SHIFT);
++ udelay(70); /* bypass need 70us to be stable */
++ } else {
++ regmap_write(anatop, ANADIG_REG_CORE + REG_CLR,
++ ANADIG_REG_TARG_MASK << ANADIG_REG1_TARG_SHIFT);
++ }
++ }
++}
+ void __init imx_init_revision_from_anatop(void)
+ {
+ struct device_node *np;
+@@ -104,6 +131,15 @@
+ case 2:
+ revision = IMX_CHIP_REVISION_1_2;
+ break;
++ case 3:
++ revision = IMX_CHIP_REVISION_1_3;
++ break;
++ case 4:
++ revision = IMX_CHIP_REVISION_1_4;
++ break;
++ case 5:
++ revision = IMX_CHIP_REVISION_1_5;
++ break;
+ default:
+ revision = IMX_CHIP_REVISION_UNKNOWN;
+ }
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/busfreq_ddr3.c linux-3.14.40/arch/arm/mach-imx/busfreq_ddr3.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/busfreq_ddr3.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mach-imx/busfreq_ddr3.c 2015-05-01 14:57:57.795427001 -0500
+@@ -0,0 +1,471 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file busfreq_ddr3.c
++ *
++ * @brief iMX6 DDR3 frequency change specific file.
++ *
++ * @ingroup PM
++ */
++#include <asm/cacheflush.h>
++#include <asm/fncpy.h>
++#include <asm/io.h>
++#include <asm/mach/map.h>
++#include <asm/mach-types.h>
++#include <asm/tlb.h>
++#include <linux/clk.h>
++#include <linux/cpumask.h>
++#include <linux/delay.h>
++#include <linux/genalloc.h>
++#include <linux/interrupt.h>
++#include <linux/irqchip/arm-gic.h>
++#include <linux/kernel.h>
++#include <linux/mutex.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/proc_fs.h>
++#include <linux/sched.h>
++#include <linux/smp.h>
++
++#include "hardware.h"
++
++/* DDR settings */
++static unsigned long (*iram_ddr_settings)[2];
++static unsigned long (*normal_mmdc_settings)[2];
++static unsigned long (*iram_iomux_settings)[2];
++static void __iomem *mmdc_base;
++static void __iomem *iomux_base;
++static void __iomem *ccm_base;
++static void __iomem *l2_base;
++static void __iomem *gic_dist_base;
++static u32 *irqs_used;
++
++static void *ddr_freq_change_iram_base;
++static int ddr_settings_size;
++static int iomux_settings_size;
++static volatile unsigned int cpus_in_wfe;
++static volatile bool wait_for_ddr_freq_update;
++static int curr_ddr_rate;
++
++void (*mx6_change_ddr_freq)(u32 freq, void *ddr_settings,
++ bool dll_mode, void *iomux_offsets) = NULL;
++
++extern unsigned int ddr_med_rate;
++extern unsigned int ddr_normal_rate;
++extern int low_bus_freq_mode;
++extern int audio_bus_freq_mode;
++extern void mx6_ddr3_freq_change(u32 freq, void *ddr_settings,
++ bool dll_mode, void *iomux_offsets);
++
++#define MIN_DLL_ON_FREQ 333000000
++#define MAX_DLL_OFF_FREQ 125000000
++#define DDR_FREQ_CHANGE_SIZE 0x2000
++
++unsigned long ddr3_dll_mx6q[][2] = {
++ {0x0c, 0x0},
++ {0x10, 0x0},
++ {0x1C, 0x04088032},
++ {0x1C, 0x0408803a},
++ {0x1C, 0x08408030},
++ {0x1C, 0x08408038},
++ {0x818, 0x0},
++};
++
++unsigned long ddr3_calibration[][2] = {
++ {0x83c, 0x0},
++ {0x840, 0x0},
++ {0x483c, 0x0},
++ {0x4840, 0x0},
++ {0x848, 0x0},
++ {0x4848, 0x0},
++ {0x850, 0x0},
++ {0x4850, 0x0},
++};
++
++unsigned long ddr3_dll_mx6dl[][2] = {
++ {0x0c, 0x0},
++ {0x10, 0x0},
++ {0x1C, 0x04008032},
++ {0x1C, 0x0400803a},
++ {0x1C, 0x07208030},
++ {0x1C, 0x07208038},
++ {0x818, 0x0},
++};
++
++unsigned long iomux_offsets_mx6q[][2] = {
++ {0x5A8, 0x0},
++ {0x5B0, 0x0},
++ {0x524, 0x0},
++ {0x51C, 0x0},
++ {0x518, 0x0},
++ {0x50C, 0x0},
++ {0x5B8, 0x0},
++ {0x5C0, 0x0},
++};
++
++unsigned long iomux_offsets_mx6dl[][2] = {
++ {0x4BC, 0x0},
++ {0x4C0, 0x0},
++ {0x4C4, 0x0},
++ {0x4C8, 0x0},
++ {0x4CC, 0x0},
++ {0x4D0, 0x0},
++ {0x4D4, 0x0},
++ {0x4D8, 0x0},
++};
++
++unsigned long ddr3_400[][2] = {
++ {0x83c, 0x42490249},
++ {0x840, 0x02470247},
++ {0x483c, 0x42570257},
++ {0x4840, 0x02400240},
++ {0x848, 0x4039363C},
++ {0x4848, 0x3A39333F},
++ {0x850, 0x38414441},
++ {0x4850, 0x472D4833}
++};
++
++int can_change_ddr_freq(void)
++{
++ return 1;
++}
++
++/*
++ * each active core apart from the one changing
++ * the DDR frequency will execute this function.
++ * the rest of the cores have to remain in WFE
++ * state until the frequency is changed.
++ */
++irqreturn_t wait_in_wfe_irq(int irq, void *dev_id)
++{
++ u32 me = smp_processor_id();
++
++ *((char *)(&cpus_in_wfe) + (u8)me) = 0xff;
++
++ while (wait_for_ddr_freq_update)
++ wfe();
++
++ *((char *)(&cpus_in_wfe) + (u8)me) = 0;
++
++ return IRQ_HANDLED;
++}
++
++/* change the DDR frequency. */
++int update_ddr_freq(int ddr_rate)
++{
++ int i, j;
++ unsigned int reg;
++ bool dll_off = false;
++ unsigned int online_cpus = 0;
++ int cpu = 0;
++ int me;
++
++ if (!can_change_ddr_freq())
++ return -1;
++
++ if (ddr_rate == curr_ddr_rate)
++ return 0;
++
++ pr_debug("Bus freq set to %d start...\n", ddr_rate);
++
++ if (low_bus_freq_mode || audio_bus_freq_mode)
++ dll_off = true;
++
++ iram_ddr_settings[0][0] = ddr_settings_size;
++ iram_iomux_settings[0][0] = iomux_settings_size;
++ if (ddr_rate == ddr_med_rate && cpu_is_imx6q()) {
++ for (i = 0; i < ARRAY_SIZE(ddr3_dll_mx6q); i++) {
++ iram_ddr_settings[i + 1][0] =
++ normal_mmdc_settings[i][0];
++ iram_ddr_settings[i + 1][1] =
++ normal_mmdc_settings[i][1];
++ }
++ for (j = 0, i = ARRAY_SIZE(ddr3_dll_mx6q);
++ i < iram_ddr_settings[0][0]; j++, i++) {
++ iram_ddr_settings[i + 1][0] =
++ ddr3_400[j][0];
++ iram_ddr_settings[i + 1][1] =
++ ddr3_400[j][1];
++ }
++ } else if (ddr_rate == ddr_normal_rate) {
++ for (i = 0; i < iram_ddr_settings[0][0]; i++) {
++ iram_ddr_settings[i + 1][0] =
++ normal_mmdc_settings[i][0];
++ iram_ddr_settings[i + 1][1] =
++ normal_mmdc_settings[i][1];
++ }
++ }
++
++ /* ensure that all Cores are in WFE. */
++ local_irq_disable();
++
++ me = smp_processor_id();
++
++ *((char *)(&cpus_in_wfe) + (u8)me) = 0xff;
++ wait_for_ddr_freq_update = true;
++ for_each_online_cpu(cpu) {
++ *((char *)(&online_cpus) + (u8)cpu) = 0xff;
++ if (cpu != me) {
++ /* set the interrupt to be pending in the GIC. */
++ reg = 1 << (irqs_used[cpu] % 32);
++ writel_relaxed(reg, gic_dist_base + GIC_DIST_PENDING_SET
++ + (irqs_used[cpu] / 32) * 4);
++ }
++ }
++ while (cpus_in_wfe != online_cpus)
++ udelay(5);
++
++ /*
++ * Flush the TLB, to ensure no TLB maintenance occurs
++ * when DDR is in self-refresh.
++ */
++ local_flush_tlb_all();
++ /* Now we can change the DDR frequency. */
++ mx6_change_ddr_freq(ddr_rate, iram_ddr_settings,
++ dll_off, iram_iomux_settings);
++
++ curr_ddr_rate = ddr_rate;
++
++ /* DDR frequency change is done . */
++ wait_for_ddr_freq_update = false;
++
++ /* wake up all the cores. */
++ sev();
++
++ *((char *)(&cpus_in_wfe) + (u8)me) = 0;
++
++ local_irq_enable();
++
++ pr_debug("Bus freq set to %d done!\n", ddr_rate);
++
++ return 0;
++}
++
++int init_mmdc_ddr3_settings(struct platform_device *busfreq_pdev)
++{
++ struct device *dev = &busfreq_pdev->dev;
++ struct platform_device *ocram_dev;
++ unsigned int iram_paddr;
++ int i, err;
++ u32 cpu;
++ struct device_node *node;
++ struct gen_pool *iram_pool;
++
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-mmdc-combine");
++ if (!node) {
++ pr_err("failed to find imx6q-mmdc device tree data!\n");
++ return -EINVAL;
++ }
++ mmdc_base = of_iomap(node, 0);
++ WARN(!mmdc_base, "unable to map mmdc registers\n");
++
++ node = NULL;
++ if (cpu_is_imx6q())
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-iomuxc");
++ if (cpu_is_imx6dl())
++ node = of_find_compatible_node(NULL, NULL,
++ "fsl,imx6dl-iomuxc");
++ if (!node) {
++ pr_err("failed to find imx6q-iomux device tree data!\n");
++ return -EINVAL;
++ }
++ iomux_base = of_iomap(node, 0);
++ WARN(!iomux_base, "unable to map iomux registers\n");
++
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ccm");
++ if (!node) {
++ pr_err("failed to find imx6q-ccm device tree data!\n");
++ return -EINVAL;
++ }
++ ccm_base = of_iomap(node, 0);
++ WARN(!ccm_base, "unable to map mmdc registers\n");
++
++ node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
++ if (!node) {
++ pr_err("failed to find imx6q-pl310-cache device tree data!\n");
++ return -EINVAL;
++ }
++ l2_base = of_iomap(node, 0);
++ WARN(!ccm_base, "unable to map mmdc registers\n");
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic");
++ if (!node) {
++ pr_err("failed to find imx6q-a9-gic device tree data!\n");
++ return -EINVAL;
++ }
++ gic_dist_base = of_iomap(node, 0);
++ WARN(!gic_dist_base, "unable to map gic dist registers\n");
++
++ if (cpu_is_imx6q())
++ ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6q) +
++ ARRAY_SIZE(ddr3_calibration);
++ if (cpu_is_imx6dl())
++ ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6dl) +
++ ARRAY_SIZE(ddr3_calibration);
++
++ normal_mmdc_settings = kmalloc((ddr_settings_size * 8), GFP_KERNEL);
++ if (cpu_is_imx6q()) {
++ memcpy(normal_mmdc_settings, ddr3_dll_mx6q,
++ sizeof(ddr3_dll_mx6q));
++ memcpy(((char *)normal_mmdc_settings + sizeof(ddr3_dll_mx6q)),
++ ddr3_calibration, sizeof(ddr3_calibration));
++ }
++ if (cpu_is_imx6dl()) {
++ memcpy(normal_mmdc_settings, ddr3_dll_mx6dl,
++ sizeof(ddr3_dll_mx6dl));
++ memcpy(((char *)normal_mmdc_settings + sizeof(ddr3_dll_mx6dl)),
++ ddr3_calibration, sizeof(ddr3_calibration));
++ }
++ /* store the original DDR settings at boot. */
++ for (i = 0; i < ddr_settings_size; i++) {
++ /*
++ * writes via command mode register cannot be read back.
++ * hence hardcode them in the initial static array.
++ * this may require modification on a per customer basis.
++ */
++ if (normal_mmdc_settings[i][0] != 0x1C)
++ normal_mmdc_settings[i][1] =
++ readl_relaxed(mmdc_base
++ + normal_mmdc_settings[i][0]);
++ }
++
++ irqs_used = devm_kzalloc(dev, sizeof(u32) * num_present_cpus(),
++ GFP_KERNEL);
++
++ for_each_online_cpu(cpu) {
++ int irq;
++
++ /*
++ * set up a reserved interrupt to get all
++ * the active cores into a WFE state
++ * before changing the DDR frequency.
++ */
++ irq = platform_get_irq(busfreq_pdev, cpu);
++ err = request_irq(irq, wait_in_wfe_irq,
++ IRQF_PERCPU, "mmdc_1", NULL);
++ if (err) {
++ dev_err(dev,
++ "Busfreq:request_irq failed %d, err = %d\n",
++ irq, err);
++ return err;
++ }
++ err = irq_set_affinity(irq, cpumask_of(cpu));
++ if (err) {
++ dev_err(dev,
++ "Busfreq: Cannot set irq affinity irq=%d,\n",
++ irq);
++ return err;
++ }
++ irqs_used[cpu] = irq;
++ }
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "mmio-sram");
++ if (!node) {
++ dev_err(dev, "%s: failed to find ocram node\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ ocram_dev = of_find_device_by_node(node);
++ if (!ocram_dev) {
++ dev_err(dev, "failed to find ocram device!\n");
++ return -EINVAL;
++ }
++
++ iram_pool = dev_get_gen_pool(&ocram_dev->dev);
++ if (!iram_pool) {
++ dev_err(dev, "iram pool unavailable!\n");
++ return -EINVAL;
++ }
++
++ iomux_settings_size = ARRAY_SIZE(iomux_offsets_mx6q);
++ iram_iomux_settings = gen_pool_alloc(iram_pool,
++ (iomux_settings_size * 8) + 8);
++ if (!iram_iomux_settings) {
++ dev_err(dev, "unable to alloc iram for IOMUX settings!\n");
++ return -ENOMEM;
++ }
++
++ /*
++ * Allocate extra space to store the number of entries in the
++ * ddr_settings plus 4 extra regsiter information that needs
++ * to be passed to the frequency change code.
++ * sizeof(iram_ddr_settings) = sizeof(ddr_settings) +
++ * entries in ddr_settings + 16.
++ * The last 4 enties store the addresses of the registers:
++ * CCM_BASE_ADDR
++ * MMDC_BASE_ADDR
++ * IOMUX_BASE_ADDR
++ * L2X0_BASE_ADDR
++ */
++ iram_ddr_settings = gen_pool_alloc(iram_pool,
++ (ddr_settings_size * 8) + 8 + 32);
++ if (!iram_ddr_settings) {
++ dev_err(dev, "unable to alloc iram for ddr settings!\n");
++ return -ENOMEM;
++ }
++ i = ddr_settings_size + 1;
++ iram_ddr_settings[i][0] = (unsigned long)mmdc_base;
++ iram_ddr_settings[i+1][0] = (unsigned long)ccm_base;
++ iram_ddr_settings[i+2][0] = (unsigned long)iomux_base;
++ iram_ddr_settings[i+3][0] = (unsigned long)l2_base;
++
++ if (cpu_is_imx6q()) {
++ /* store the IOMUX settings at boot. */
++ for (i = 0; i < iomux_settings_size; i++) {
++ iomux_offsets_mx6q[i][1] =
++ readl_relaxed(iomux_base +
++ iomux_offsets_mx6q[i][0]);
++ iram_iomux_settings[i+1][0] = iomux_offsets_mx6q[i][0];
++ iram_iomux_settings[i+1][1] = iomux_offsets_mx6q[i][1];
++ }
++ }
++
++ if (cpu_is_imx6dl()) {
++ for (i = 0; i < iomux_settings_size; i++) {
++ iomux_offsets_mx6dl[i][1] =
++ readl_relaxed(iomux_base +
++ iomux_offsets_mx6dl[i][0]);
++ iram_iomux_settings[i+1][0] = iomux_offsets_mx6dl[i][0];
++ iram_iomux_settings[i+1][1] = iomux_offsets_mx6dl[i][1];
++ }
++ }
++
++ ddr_freq_change_iram_base = gen_pool_alloc(iram_pool,
++ DDR_FREQ_CHANGE_SIZE);
++ if (!ddr_freq_change_iram_base) {
++ dev_err(dev, "Cannot alloc iram for ddr freq change code!\n");
++ return -ENOMEM;
++ }
++
++ iram_paddr = gen_pool_virt_to_phys(iram_pool,
++ (unsigned long)ddr_freq_change_iram_base);
++ /*
++ * Need to remap the area here since we want
++ * the memory region to be executable.
++ */
++ ddr_freq_change_iram_base = __arm_ioremap(iram_paddr,
++ DDR_FREQ_CHANGE_SIZE,
++ MT_MEMORY_RWX_NONCACHED);
++ mx6_change_ddr_freq = (void *)fncpy(ddr_freq_change_iram_base,
++ &mx6_ddr3_freq_change, DDR_FREQ_CHANGE_SIZE);
++
++ curr_ddr_rate = ddr_normal_rate;
++
++ return 0;
++}
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/busfreq-imx6.c linux-3.14.40/arch/arm/mach-imx/busfreq-imx6.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/busfreq-imx6.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mach-imx/busfreq-imx6.c 2015-05-01 14:57:57.799427001 -0500
+@@ -0,0 +1,952 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @file busfreq-imx6.c
++ *
++ * @brief A common API for the Freescale Semiconductor iMX6 Busfreq API
++ *
++ * The APIs are for setting bus frequency to different values based on the
++ * highest freqeuncy requested.
++ *
++ * @ingroup PM
++ */
++
++#include <asm/cacheflush.h>
++#include <asm/io.h>
++#include <asm/mach/map.h>
++#include <asm/mach-types.h>
++#include <asm/tlb.h>
++#include <linux/busfreq-imx6.h>
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/proc_fs.h>
++#include <linux/reboot.h>
++#include <linux/regulator/consumer.h>
++#include <linux/sched.h>
++#include <linux/suspend.h>
++#include "hardware.h"
++
++#define LPAPM_CLK 24000000
++#define DDR3_AUDIO_CLK 50000000
++#define LPDDR2_AUDIO_CLK 100000000
++
++int high_bus_freq_mode;
++int med_bus_freq_mode;
++int audio_bus_freq_mode;
++int low_bus_freq_mode;
++int ultra_low_bus_freq_mode;
++unsigned int ddr_med_rate;
++unsigned int ddr_normal_rate;
++
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++static int bus_freq_scaling_initialized;
++static struct device *busfreq_dev;
++static int busfreq_suspended;
++static u32 org_arm_rate;
++static int bus_freq_scaling_is_active;
++static int high_bus_count, med_bus_count, audio_bus_count, low_bus_count;
++static unsigned int ddr_low_rate;
++
++extern int init_mmdc_lpddr2_settings(struct platform_device *dev);
++extern int init_mmdc_ddr3_settings(struct platform_device *dev);
++extern int update_ddr_freq(int ddr_rate);
++extern int update_lpddr2_freq(int ddr_rate);
++
++DEFINE_MUTEX(bus_freq_mutex);
++static DEFINE_SPINLOCK(freq_lock);
++
++static struct clk *pll2_400;
++static struct clk *periph_clk;
++static struct clk *periph_pre_clk;
++static struct clk *periph_clk2_sel;
++static struct clk *periph_clk2;
++static struct clk *osc_clk;
++static struct clk *cpu_clk;
++static struct clk *pll3;
++static struct clk *pll2;
++static struct clk *pll2_200;
++static struct clk *pll1_sys;
++static struct clk *periph2_clk;
++static struct clk *ocram_clk;
++static struct clk *ahb_clk;
++static struct clk *pll1_sw_clk;
++static struct clk *periph2_pre_clk;
++static struct clk *periph2_clk2_sel;
++static struct clk *periph2_clk2;
++static struct clk *step_clk;
++static struct clk *axi_sel_clk;
++static struct clk *pll3_pfd1_540m;
++
++static u32 pll2_org_rate;
++static struct delayed_work low_bus_freq_handler;
++static struct delayed_work bus_freq_daemon;
++
++static void enter_lpm_imx6sl(void)
++{
++ unsigned long flags;
++
++ if (high_bus_freq_mode) {
++ pll2_org_rate = clk_get_rate(pll2);
++ /* Set periph_clk to be sourced from OSC_CLK */
++ clk_set_parent(periph_clk2_sel, osc_clk);
++ clk_set_parent(periph_clk, periph_clk2);
++ /* Ensure AHB/AXI clks are at 24MHz. */
++ clk_set_rate(ahb_clk, LPAPM_CLK);
++ clk_set_rate(ocram_clk, LPAPM_CLK);
++ }
++ if (audio_bus_count) {
++ /* Set AHB to 8MHz to lower pwer.*/
++ clk_set_rate(ahb_clk, LPAPM_CLK / 3);
++
++ /* Set up DDR to 100MHz. */
++ spin_lock_irqsave(&freq_lock, flags);
++ update_lpddr2_freq(LPDDR2_AUDIO_CLK);
++ spin_unlock_irqrestore(&freq_lock, flags);
++
++ /* Fix the clock tree in kernel */
++ clk_set_rate(pll2, pll2_org_rate);
++ clk_set_parent(periph2_pre_clk, pll2_200);
++ clk_set_parent(periph2_clk, periph2_pre_clk);
++
++ if (low_bus_freq_mode || ultra_low_bus_freq_mode) {
++ /*
++ * Swtich ARM to run off PLL2_PFD2_400MHz
++ * since DDR is anyway at 100MHz.
++ */
++ clk_set_parent(step_clk, pll2_400);
++ clk_set_parent(pll1_sw_clk, step_clk);
++ /*
++ * Ensure that the clock will be
++ * at original speed.
++ */
++ clk_set_rate(cpu_clk, org_arm_rate);
++ }
++ low_bus_freq_mode = 0;
++ ultra_low_bus_freq_mode = 0;
++ audio_bus_freq_mode = 1;
++ } else {
++ u32 arm_div, pll1_rate;
++ org_arm_rate = clk_get_rate(cpu_clk);
++ if (low_bus_freq_mode && low_bus_count == 0) {
++ /*
++ * We are already in DDR @ 24MHz state, but
++ * no one but ARM needs the DDR. In this case,
++ * we can lower the DDR freq to 1MHz when ARM
++ * enters WFI in this state. Keep track of this state.
++ */
++ ultra_low_bus_freq_mode = 1;
++ low_bus_freq_mode = 0;
++ audio_bus_freq_mode = 0;
++ } else {
++ if (!ultra_low_bus_freq_mode && !low_bus_freq_mode) {
++ /*
++ * Set DDR to 24MHz.
++ * Since we are going to bypass PLL2,
++ * we need to move ARM clk off PLL2_PFD2
++ * to PLL1. Make sure the PLL1 is running
++ * at the lowest possible freq.
++ */
++ clk_set_rate(pll1_sys,
++ clk_round_rate(pll1_sys, org_arm_rate));
++ pll1_rate = clk_get_rate(pll1_sys);
++ arm_div = pll1_rate / org_arm_rate + 1;
++ /*
++ * Ensure ARM CLK is lower before
++ * changing the parent.
++ */
++ clk_set_rate(cpu_clk, org_arm_rate / arm_div);
++ /* Now set the ARM clk parent to PLL1_SYS. */
++ clk_set_parent(pll1_sw_clk, pll1_sys);
++
++ /*
++ * Set STEP_CLK back to OSC to save power and
++ * also to maintain the parent.The WFI iram code
++ * will switch step_clk to osc, but the clock API
++ * is not aware of the change and when a new request
++ * to change the step_clk parent to pll2_pfd2_400M
++ * is requested sometime later, the change is ignored.
++ */
++ clk_set_parent(step_clk, osc_clk);
++ /* Now set DDR to 24MHz. */
++ spin_lock_irqsave(&freq_lock, flags);
++ update_lpddr2_freq(LPAPM_CLK);
++ spin_unlock_irqrestore(&freq_lock, flags);
++
++ /*
++ * Fix the clock tree in kernel.
++ * Make sure PLL2 rate is updated as it gets
++ * bypassed in the DDR freq change code.
++ */
++ clk_set_rate(pll2, LPAPM_CLK);
++ clk_set_parent(periph2_clk2_sel, pll2);
++ clk_set_parent(periph2_clk, periph2_clk2_sel);
++
++ }
++ if (low_bus_count == 0) {
++ ultra_low_bus_freq_mode = 1;
++ low_bus_freq_mode = 0;
++ } else {
++ ultra_low_bus_freq_mode = 0;
++ low_bus_freq_mode = 1;
++ }
++ audio_bus_freq_mode = 0;
++ }
++ }
++}
++
++static void exit_lpm_imx6sl(void)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&freq_lock, flags);
++ /* Change DDR freq in IRAM. */
++ update_lpddr2_freq(ddr_normal_rate);
++ spin_unlock_irqrestore(&freq_lock, flags);
++
++ /*
++ * Fix the clock tree in kernel.
++ * Make sure PLL2 rate is updated as it gets
++ * un-bypassed in the DDR freq change code.
++ */
++ clk_set_rate(pll2, pll2_org_rate);
++ clk_set_parent(periph2_pre_clk, pll2_400);
++ clk_set_parent(periph2_clk, periph2_pre_clk);
++
++ /* Ensure that periph_clk is sourced from PLL2_400. */
++ clk_set_parent(periph_pre_clk, pll2_400);
++ /*
++ * Before switching the perhiph_clk, ensure that the
++ * AHB/AXI will not be too fast.
++ */
++ clk_set_rate(ahb_clk, LPAPM_CLK / 3);
++ clk_set_rate(ocram_clk, LPAPM_CLK / 2);
++ clk_set_parent(periph_clk, periph_pre_clk);
++
++ if (low_bus_freq_mode || ultra_low_bus_freq_mode) {
++ /* Move ARM from PLL1_SW_CLK to PLL2_400. */
++ clk_set_parent(step_clk, pll2_400);
++ clk_set_parent(pll1_sw_clk, step_clk);
++ clk_set_rate(cpu_clk, org_arm_rate);
++ ultra_low_bus_freq_mode = 0;
++ }
++}
++
++int reduce_bus_freq(void)
++{
++ int ret = 0;
++ clk_prepare_enable(pll3);
++ if (cpu_is_imx6sl())
++ enter_lpm_imx6sl();
++ else {
++ if (cpu_is_imx6dl() && (clk_get_parent(axi_sel_clk)
++ != periph_clk))
++ /* Set axi to periph_clk */
++ clk_set_parent(axi_sel_clk, periph_clk);
++
++ if (audio_bus_count) {
++ /* Need to ensure that PLL2_PFD_400M is kept ON. */
++ clk_prepare_enable(pll2_400);
++ update_ddr_freq(DDR3_AUDIO_CLK);
++ /* Make sure periph clk's parent also got updated */
++ ret = clk_set_parent(periph_clk2_sel, pll3);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_pre_clk, pll2_200);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_clk, periph_pre_clk);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ audio_bus_freq_mode = 1;
++ low_bus_freq_mode = 0;
++ } else {
++ update_ddr_freq(LPAPM_CLK);
++ /* Make sure periph clk's parent also got updated */
++ ret = clk_set_parent(periph_clk2_sel, osc_clk);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ /* Set periph_clk parent to OSC via periph_clk2_sel */
++ ret = clk_set_parent(periph_clk, periph_clk2);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ if (audio_bus_freq_mode)
++ clk_disable_unprepare(pll2_400);
++ low_bus_freq_mode = 1;
++ audio_bus_freq_mode = 0;
++ }
++ }
++ clk_disable_unprepare(pll3);
++
++ med_bus_freq_mode = 0;
++ high_bus_freq_mode = 0;
++
++ if (audio_bus_freq_mode)
++ dev_dbg(busfreq_dev, "Bus freq set to audio mode. Count:\
++ high %d, med %d, audio %d\n",
++ high_bus_count, med_bus_count, audio_bus_count);
++ if (low_bus_freq_mode)
++ dev_dbg(busfreq_dev, "Bus freq set to low mode. Count:\
++ high %d, med %d, audio %d\n",
++ high_bus_count, med_bus_count, audio_bus_count);
++
++ return ret;
++}
++
++static void reduce_bus_freq_handler(struct work_struct *work)
++{
++ mutex_lock(&bus_freq_mutex);
++
++ reduce_bus_freq();
++
++ mutex_unlock(&bus_freq_mutex);
++}
++
++/*
++ * Set the DDR, AHB to 24MHz.
++ * This mode will be activated only when none of the modules that
++ * need a higher DDR or AHB frequency are active.
++ */
++int set_low_bus_freq(void)
++{
++ if (busfreq_suspended)
++ return 0;
++
++ if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active)
++ return 0;
++
++ /*
++ * Check to see if we need to got from
++ * low bus freq mode to audio bus freq mode.
++ * If so, the change needs to be done immediately.
++ */
++ if (audio_bus_count && (low_bus_freq_mode || ultra_low_bus_freq_mode))
++ reduce_bus_freq();
++ else
++ /*
++ * Don't lower the frequency immediately. Instead
++ * scheduled a delayed work and drop the freq if
++ * the conditions still remain the same.
++ */
++ schedule_delayed_work(&low_bus_freq_handler,
++ usecs_to_jiffies(3000000));
++ return 0;
++}
++
++/*
++ * Set the DDR to either 528MHz or 400MHz for iMX6qd
++ * or 400MHz for iMX6dl.
++ */
++int set_high_bus_freq(int high_bus_freq)
++{
++ int ret = 0;
++ struct clk *periph_clk_parent;
++
++ if (bus_freq_scaling_initialized && bus_freq_scaling_is_active)
++ cancel_delayed_work_sync(&low_bus_freq_handler);
++
++ if (busfreq_suspended)
++ return 0;
++
++ if (cpu_is_imx6q())
++ periph_clk_parent = pll2;
++ else
++ periph_clk_parent = pll2_400;
++
++ if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active)
++ return 0;
++
++ if (high_bus_freq_mode)
++ return 0;
++
++ /* medium bus freq is only supported for MX6DQ */
++ if (med_bus_freq_mode && !high_bus_freq)
++ return 0;
++
++ clk_prepare_enable(pll3);
++ if (cpu_is_imx6sl())
++ exit_lpm_imx6sl();
++ else {
++ if (high_bus_freq) {
++ update_ddr_freq(ddr_normal_rate);
++ /* Make sure periph clk's parent also got updated */
++ ret = clk_set_parent(periph_clk2_sel, pll3);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_pre_clk, periph_clk_parent);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_clk, periph_pre_clk);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ if (cpu_is_imx6dl() && (clk_get_parent(axi_sel_clk)
++ != pll3_pfd1_540m))
++ /* Set axi to pll3_pfd1_540m */
++ clk_set_parent(axi_sel_clk, pll3_pfd1_540m);
++ } else {
++ update_ddr_freq(ddr_med_rate);
++ /* Make sure periph clk's parent also got updated */
++ ret = clk_set_parent(periph_clk2_sel, pll3);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_pre_clk, pll2_400);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_clk, periph_pre_clk);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ }
++ if (audio_bus_freq_mode)
++ clk_disable_unprepare(pll2_400);
++ }
++
++ high_bus_freq_mode = 1;
++ med_bus_freq_mode = 0;
++ low_bus_freq_mode = 0;
++ audio_bus_freq_mode = 0;
++
++ clk_disable_unprepare(pll3);
++
++ if (high_bus_freq_mode)
++ dev_dbg(busfreq_dev, "Bus freq set to high mode. Count:\
++ high %d, med %d, audio %d\n",
++ high_bus_count, med_bus_count, audio_bus_count);
++ if (med_bus_freq_mode)
++ dev_dbg(busfreq_dev, "Bus freq set to med mode. Count:\
++ high %d, med %d, audio %d\n",
++ high_bus_count, med_bus_count, audio_bus_count);
++
++ return 0;
++}
++#endif
++
++void request_bus_freq(enum bus_freq_mode mode)
++{
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++ mutex_lock(&bus_freq_mutex);
++
++ if (mode == BUS_FREQ_HIGH)
++ high_bus_count++;
++ else if (mode == BUS_FREQ_MED)
++ med_bus_count++;
++ else if (mode == BUS_FREQ_AUDIO)
++ audio_bus_count++;
++ else if (mode == BUS_FREQ_LOW)
++ low_bus_count++;
++
++ if (busfreq_suspended || !bus_freq_scaling_initialized ||
++ !bus_freq_scaling_is_active) {
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ cancel_delayed_work_sync(&low_bus_freq_handler);
++
++ if (cpu_is_imx6dl()) {
++ /* No support for medium setpoint on MX6DL. */
++ if (mode == BUS_FREQ_MED) {
++ high_bus_count++;
++ mode = BUS_FREQ_HIGH;
++ }
++ }
++
++ if ((mode == BUS_FREQ_HIGH) && (!high_bus_freq_mode)) {
++ set_high_bus_freq(1);
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++
++ if ((mode == BUS_FREQ_MED) && (!high_bus_freq_mode) &&
++ (!med_bus_freq_mode)) {
++ set_high_bus_freq(0);
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ if ((mode == BUS_FREQ_AUDIO) && (!high_bus_freq_mode) &&
++ (!med_bus_freq_mode) && (!audio_bus_freq_mode)) {
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ mutex_unlock(&bus_freq_mutex);
++#endif
++ return;
++}
++EXPORT_SYMBOL(request_bus_freq);
++
++void release_bus_freq(enum bus_freq_mode mode)
++{
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++ mutex_lock(&bus_freq_mutex);
++
++ if (mode == BUS_FREQ_HIGH) {
++ if (high_bus_count == 0) {
++ dev_err(busfreq_dev, "high bus count mismatch!\n");
++ dump_stack();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ high_bus_count--;
++ } else if (mode == BUS_FREQ_MED) {
++ if (med_bus_count == 0) {
++ dev_err(busfreq_dev, "med bus count mismatch!\n");
++ dump_stack();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ med_bus_count--;
++ } else if (mode == BUS_FREQ_AUDIO) {
++ if (audio_bus_count == 0) {
++ dev_err(busfreq_dev, "audio bus count mismatch!\n");
++ dump_stack();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ audio_bus_count--;
++ } else if (mode == BUS_FREQ_LOW) {
++ if (low_bus_count == 0) {
++ dev_err(busfreq_dev, "low bus count mismatch!\n");
++ dump_stack();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ low_bus_count--;
++ }
++
++ if (busfreq_suspended || !bus_freq_scaling_initialized ||
++ !bus_freq_scaling_is_active) {
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++
++ if (cpu_is_imx6dl()) {
++ /* No support for medium setpoint on MX6DL. */
++ if (mode == BUS_FREQ_MED) {
++ high_bus_count--;
++ mode = BUS_FREQ_HIGH;
++ }
++ }
++
++ if ((!audio_bus_freq_mode) && (high_bus_count == 0) &&
++ (med_bus_count == 0) && (audio_bus_count != 0)) {
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ if ((!low_bus_freq_mode) && (high_bus_count == 0) &&
++ (med_bus_count == 0) && (audio_bus_count == 0) &&
++ (low_bus_count != 0)) {
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ if ((!ultra_low_bus_freq_mode) && (high_bus_count == 0) &&
++ (med_bus_count == 0) && (audio_bus_count == 0) &&
++ (low_bus_count == 0)) {
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++
++ mutex_unlock(&bus_freq_mutex);
++#endif
++ return;
++}
++EXPORT_SYMBOL(release_bus_freq);
++
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++static void bus_freq_daemon_handler(struct work_struct *work)
++{
++ mutex_lock(&bus_freq_mutex);
++ if ((!low_bus_freq_mode) && (high_bus_count == 0) &&
++ (med_bus_count == 0) && (audio_bus_count == 0))
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++}
++
++static ssize_t bus_freq_scaling_enable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ if (bus_freq_scaling_is_active)
++ return sprintf(buf, "Bus frequency scaling is enabled\n");
++ else
++ return sprintf(buf, "Bus frequency scaling is disabled\n");
++}
++
++static ssize_t bus_freq_scaling_enable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t size)
++{
++ if (strncmp(buf, "1", 1) == 0) {
++ bus_freq_scaling_is_active = 1;
++ set_high_bus_freq(1);
++ /*
++ * We set bus freq to highest at the beginning,
++ * so we use this daemon thread to make sure system
++ * can enter low bus mode if
++ * there is no high bus request pending
++ */
++ schedule_delayed_work(&bus_freq_daemon,
++ usecs_to_jiffies(5000000));
++ } else if (strncmp(buf, "0", 1) == 0) {
++ if (bus_freq_scaling_is_active)
++ set_high_bus_freq(1);
++ bus_freq_scaling_is_active = 0;
++ }
++ return size;
++}
++
++static int bus_freq_pm_notify(struct notifier_block *nb, unsigned long event,
++ void *dummy)
++{
++ mutex_lock(&bus_freq_mutex);
++
++ if (event == PM_SUSPEND_PREPARE) {
++ high_bus_count++;
++ set_high_bus_freq(1);
++ busfreq_suspended = 1;
++ } else if (event == PM_POST_SUSPEND) {
++ busfreq_suspended = 0;
++ high_bus_count--;
++ schedule_delayed_work(&bus_freq_daemon,
++ usecs_to_jiffies(5000000));
++ }
++
++ mutex_unlock(&bus_freq_mutex);
++
++ return NOTIFY_OK;
++}
++
++static int busfreq_reboot_notifier_event(struct notifier_block *this,
++ unsigned long event, void *ptr)
++{
++ /* System is rebooting. Set the system into high_bus_freq_mode. */
++ request_bus_freq(BUS_FREQ_HIGH);
++
++ return 0;
++}
++
++static struct notifier_block imx_bus_freq_pm_notifier = {
++ .notifier_call = bus_freq_pm_notify,
++};
++
++static struct notifier_block imx_busfreq_reboot_notifier = {
++ .notifier_call = busfreq_reboot_notifier_event,
++};
++
++
++static DEVICE_ATTR(enable, 0644, bus_freq_scaling_enable_show,
++ bus_freq_scaling_enable_store);
++#endif
++
++/*!
++ * This is the probe routine for the bus frequency driver.
++ *
++ * @param pdev The platform device structure
++ *
++ * @return The function returns 0 on success
++ *
++ */
++
++static int busfreq_probe(struct platform_device *pdev)
++{
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++ u32 err;
++
++ busfreq_dev = &pdev->dev;
++
++ pll2_400 = devm_clk_get(&pdev->dev, "pll2_pfd2_396m");
++ if (IS_ERR(pll2_400)) {
++ dev_err(busfreq_dev, "%s: failed to get pll2_pfd2_396m\n",
++ __func__);
++ return PTR_ERR(pll2_400);
++ }
++
++ pll2_200 = devm_clk_get(&pdev->dev, "pll2_198m");
++ if (IS_ERR(pll2_200)) {
++ dev_err(busfreq_dev, "%s: failed to get pll2_198m\n",
++ __func__);
++ return PTR_ERR(pll2_200);
++ }
++
++ pll2 = devm_clk_get(&pdev->dev, "pll2_bus");
++ if (IS_ERR(pll2)) {
++ dev_err(busfreq_dev, "%s: failed to get pll2_bus\n",
++ __func__);
++ return PTR_ERR(pll2);
++ }
++
++ cpu_clk = devm_clk_get(&pdev->dev, "arm");
++ if (IS_ERR(cpu_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get cpu_clk\n",
++ __func__);
++ return PTR_ERR(cpu_clk);
++ }
++
++ pll3 = devm_clk_get(&pdev->dev, "pll3_usb_otg");
++ if (IS_ERR(pll3)) {
++ dev_err(busfreq_dev, "%s: failed to get pll3_usb_otg\n",
++ __func__);
++ return PTR_ERR(pll3);
++ }
++
++ periph_clk = devm_clk_get(&pdev->dev, "periph");
++ if (IS_ERR(periph_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get periph\n",
++ __func__);
++ return PTR_ERR(periph_clk);
++ }
++
++ periph_pre_clk = devm_clk_get(&pdev->dev, "periph_pre");
++ if (IS_ERR(periph_pre_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get periph_pre\n",
++ __func__);
++ return PTR_ERR(periph_pre_clk);
++ }
++
++ periph_clk2 = devm_clk_get(&pdev->dev, "periph_clk2");
++ if (IS_ERR(periph_clk2)) {
++ dev_err(busfreq_dev, "%s: failed to get periph_clk2\n",
++ __func__);
++ return PTR_ERR(periph_clk2);
++ }
++
++ periph_clk2_sel = devm_clk_get(&pdev->dev, "periph_clk2_sel");
++ if (IS_ERR(periph_clk2_sel)) {
++ dev_err(busfreq_dev, "%s: failed to get periph_clk2_sel\n",
++ __func__);
++ return PTR_ERR(periph_clk2_sel);
++ }
++
++ osc_clk = devm_clk_get(&pdev->dev, "osc");
++ if (IS_ERR(osc_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get osc_clk\n",
++ __func__);
++ return PTR_ERR(osc_clk);
++ }
++
++ if (cpu_is_imx6dl()) {
++ axi_sel_clk = devm_clk_get(&pdev->dev, "axi_sel");
++ if (IS_ERR(axi_sel_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get axi_sel_clk\n",
++ __func__);
++ return PTR_ERR(axi_sel_clk);
++ }
++
++ pll3_pfd1_540m = devm_clk_get(&pdev->dev, "pll3_pfd1_540m");
++ if (IS_ERR(pll3_pfd1_540m)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get pll3_pfd1_540m\n", __func__);
++ return PTR_ERR(pll3_pfd1_540m);
++ }
++ }
++
++ if (cpu_is_imx6sl()) {
++ pll1_sys = devm_clk_get(&pdev->dev, "pll1_sys");
++ if (IS_ERR(pll1_sys)) {
++ dev_err(busfreq_dev, "%s: failed to get pll1_sys\n",
++ __func__);
++ return PTR_ERR(pll1_sys);
++ }
++
++ ahb_clk = devm_clk_get(&pdev->dev, "ahb");
++ if (IS_ERR(ahb_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get ahb_clk\n",
++ __func__);
++ return PTR_ERR(ahb_clk);
++ }
++
++ ocram_clk = devm_clk_get(&pdev->dev, "ocram");
++ if (IS_ERR(ocram_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get ocram_clk\n",
++ __func__);
++ return PTR_ERR(ocram_clk);
++ }
++
++ pll1_sw_clk = devm_clk_get(&pdev->dev, "pll1_sw");
++ if (IS_ERR(pll1_sw_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get pll1_sw_clk\n",
++ __func__);
++ return PTR_ERR(pll1_sw_clk);
++ }
++
++ periph2_clk = devm_clk_get(&pdev->dev, "periph2");
++ if (IS_ERR(periph2_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get periph2\n",
++ __func__);
++ return PTR_ERR(periph2_clk);
++ }
++
++ periph2_pre_clk = devm_clk_get(&pdev->dev, "periph2_pre");
++ if (IS_ERR(periph2_pre_clk)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get periph2_pre_clk\n",
++ __func__);
++ return PTR_ERR(periph2_pre_clk);
++ }
++
++ periph2_clk2 = devm_clk_get(&pdev->dev, "periph2_clk2");
++ if (IS_ERR(periph2_clk2)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get periph2_clk2\n",
++ __func__);
++ return PTR_ERR(periph2_clk2);
++ }
++
++ periph2_clk2_sel = devm_clk_get(&pdev->dev, "periph2_clk2_sel");
++ if (IS_ERR(periph2_clk2_sel)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get periph2_clk2_sel\n",
++ __func__);
++ return PTR_ERR(periph2_clk2_sel);
++ }
++
++ step_clk = devm_clk_get(&pdev->dev, "step");
++ if (IS_ERR(step_clk)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get step_clk\n",
++ __func__);
++ return PTR_ERR(periph2_clk2_sel);
++ }
++
++ }
++
++ err = sysfs_create_file(&busfreq_dev->kobj, &dev_attr_enable.attr);
++ if (err) {
++ dev_err(busfreq_dev,
++ "Unable to register sysdev entry for BUSFREQ");
++ return err;
++ }
++
++ if (of_property_read_u32(pdev->dev.of_node, "fsl,max_ddr_freq",
++ &ddr_normal_rate)) {
++ dev_err(busfreq_dev, "max_ddr_freq entry missing\n");
++ return -EINVAL;
++ }
++#endif
++
++ high_bus_freq_mode = 1;
++ med_bus_freq_mode = 0;
++ low_bus_freq_mode = 0;
++ audio_bus_freq_mode = 0;
++ ultra_low_bus_freq_mode = 0;
++
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++ bus_freq_scaling_is_active = 1;
++ bus_freq_scaling_initialized = 1;
++
++ ddr_low_rate = LPAPM_CLK;
++ if (cpu_is_imx6q()) {
++ if (of_property_read_u32(pdev->dev.of_node, "fsl,med_ddr_freq",
++ &ddr_med_rate)) {
++ dev_info(busfreq_dev,
++ "DDR medium rate not supported.\n");
++ ddr_med_rate = ddr_normal_rate;
++ }
++ }
++
++ INIT_DELAYED_WORK(&low_bus_freq_handler, reduce_bus_freq_handler);
++ INIT_DELAYED_WORK(&bus_freq_daemon, bus_freq_daemon_handler);
++ register_pm_notifier(&imx_bus_freq_pm_notifier);
++ register_reboot_notifier(&imx_busfreq_reboot_notifier);
++
++ if (cpu_is_imx6sl())
++ err = init_mmdc_lpddr2_settings(pdev);
++ else
++ err = init_mmdc_ddr3_settings(pdev);
++ if (err) {
++ dev_err(busfreq_dev, "Busfreq init of MMDC failed\n");
++ return err;
++ }
++#endif
++ return 0;
++}
++
++static const struct of_device_id imx6_busfreq_ids[] = {
++ { .compatible = "fsl,imx6_busfreq", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver busfreq_driver = {
++ .driver = {
++ .name = "imx6_busfreq",
++ .owner = THIS_MODULE,
++ .of_match_table = imx6_busfreq_ids,
++ },
++ .probe = busfreq_probe,
++};
++
++/*!
++ * Initialise the busfreq_driver.
++ *
++ * @return The function always returns 0.
++ */
++
++static int __init busfreq_init(void)
++{
++ if (platform_driver_register(&busfreq_driver) != 0)
++ return -ENODEV;
++
++ printk(KERN_INFO "Bus freq driver module loaded\n");
++
++ return 0;
++}
++
++static void __exit busfreq_cleanup(void)
++{
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++ sysfs_remove_file(&busfreq_dev->kobj, &dev_attr_enable.attr);
++
++ bus_freq_scaling_initialized = 0;
++#endif
++ /* Unregister the device structure */
++ platform_driver_unregister(&busfreq_driver);
++}
++
++module_init(busfreq_init);
++module_exit(busfreq_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("BusFreq driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/busfreq_lpddr2.c linux-3.14.40/arch/arm/mach-imx/busfreq_lpddr2.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/busfreq_lpddr2.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mach-imx/busfreq_lpddr2.c 2015-05-01 14:57:57.799427001 -0500
+@@ -0,0 +1,183 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file busfreq_lpddr2.c
++ *
++ * @brief iMX6 LPDDR2 frequency change specific file.
++ *
++ * @ingroup PM
++ */
++#include <asm/cacheflush.h>
++#include <asm/fncpy.h>
++#include <asm/io.h>
++#include <asm/mach/map.h>
++#include <asm/mach-types.h>
++#include <asm/tlb.h>
++#include <linux/clk.h>
++#include <linux/cpumask.h>
++#include <linux/delay.h>
++#include <linux/genalloc.h>
++#include <linux/interrupt.h>
++#include <linux/irqchip/arm-gic.h>
++#include <linux/kernel.h>
++#include <linux/mutex.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/proc_fs.h>
++#include <linux/sched.h>
++#include <linux/smp.h>
++
++#include "hardware.h"
++
++/* DDR settings */
++static void __iomem *mmdc_base;
++static void __iomem *anatop_base;
++static void __iomem *ccm_base;
++static void __iomem *l2_base;
++static struct device *busfreq_dev;
++static void *ddr_freq_change_iram_base;
++static int curr_ddr_rate;
++
++unsigned long reg_addrs[4];
++
++void (*mx6_change_lpddr2_freq)(u32 ddr_freq, int bus_freq_mode,
++ void *iram_addr) = NULL;
++
++extern unsigned int ddr_normal_rate;
++extern int low_bus_freq_mode;
++extern int ultra_low_bus_freq_mode;
++extern void mx6_lpddr2_freq_change(u32 freq, int bus_freq_mode,
++ void *iram_addr);
++
++
++#define LPDDR2_FREQ_CHANGE_SIZE 0x1000
++
++
++/* change the DDR frequency. */
++int update_lpddr2_freq(int ddr_rate)
++{
++ if (ddr_rate == curr_ddr_rate)
++ return 0;
++
++ dev_dbg(busfreq_dev, "\nBus freq set to %d start...\n", ddr_rate);
++
++ /*
++ * Flush the TLB, to ensure no TLB maintenance occurs
++ * when DDR is in self-refresh.
++ */
++ local_flush_tlb_all();
++ /* Now change DDR frequency. */
++ mx6_change_lpddr2_freq(ddr_rate,
++ (low_bus_freq_mode | ultra_low_bus_freq_mode),
++ reg_addrs);
++
++ curr_ddr_rate = ddr_rate;
++
++ dev_dbg(busfreq_dev, "\nBus freq set to %d done...\n", ddr_rate);
++
++ return 0;
++}
++
++int init_mmdc_lpddr2_settings(struct platform_device *busfreq_pdev)
++{
++ struct platform_device *ocram_dev;
++ unsigned int iram_paddr;
++ struct device_node *node;
++ struct gen_pool *iram_pool;
++
++ busfreq_dev = &busfreq_pdev->dev;
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-mmdc");
++ if (!node) {
++ printk(KERN_ERR "failed to find imx6sl-mmdc device tree data!\n");
++ return -EINVAL;
++ }
++ mmdc_base = of_iomap(node, 0);
++ WARN(!mmdc_base, "unable to map mmdc registers\n");
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-ccm");
++ if (!node) {
++ printk(KERN_ERR "failed to find imx6sl-ccm device tree data!\n");
++ return -EINVAL;
++ }
++ ccm_base = of_iomap(node, 0);
++ WARN(!ccm_base, "unable to map ccm registers\n");
++
++ node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
++ if (!node) {
++ printk(KERN_ERR "failed to find imx6sl-pl310-cache device tree data!\n");
++ return -EINVAL;
++ }
++ l2_base = of_iomap(node, 0);
++ WARN(!l2_base, "unable to map PL310 registers\n");
++
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-anatop");
++ if (!node) {
++ printk(KERN_ERR "failed to find imx6sl-pl310-cache device tree data!\n");
++ return -EINVAL;
++ }
++ anatop_base = of_iomap(node, 0);
++ WARN(!anatop_base, "unable to map anatop registers\n");
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "mmio-sram");
++ if (!node) {
++ dev_err(busfreq_dev, "%s: failed to find ocram node\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ ocram_dev = of_find_device_by_node(node);
++ if (!ocram_dev) {
++ dev_err(busfreq_dev, "failed to find ocram device!\n");
++ return -EINVAL;
++ }
++
++ iram_pool = dev_get_gen_pool(&ocram_dev->dev);
++ if (!iram_pool) {
++ dev_err(busfreq_dev, "iram pool unavailable!\n");
++ return -EINVAL;
++ }
++
++ reg_addrs[0] = (unsigned long)anatop_base;
++ reg_addrs[1] = (unsigned long)ccm_base;
++ reg_addrs[2] = (unsigned long)mmdc_base;
++ reg_addrs[3] = (unsigned long)l2_base;
++
++ ddr_freq_change_iram_base = (void *)gen_pool_alloc(iram_pool,
++ LPDDR2_FREQ_CHANGE_SIZE);
++ if (!ddr_freq_change_iram_base) {
++ dev_err(busfreq_dev,
++ "Cannot alloc iram for ddr freq change code!\n");
++ return -ENOMEM;
++ }
++
++ iram_paddr = gen_pool_virt_to_phys(iram_pool,
++ (unsigned long)ddr_freq_change_iram_base);
++ /*
++ * Need to remap the area here since we want
++ * the memory region to be executable.
++ */
++ ddr_freq_change_iram_base = __arm_ioremap(iram_paddr,
++ LPDDR2_FREQ_CHANGE_SIZE,
++ MT_MEMORY_RWX_NONCACHED);
++ mx6_change_lpddr2_freq = (void *)fncpy(ddr_freq_change_iram_base,
++ &mx6_lpddr2_freq_change, LPDDR2_FREQ_CHANGE_SIZE);
++
++ curr_ddr_rate = ddr_normal_rate;
++
++ return 0;
++}
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/clk.h linux-3.14.40/arch/arm/mach-imx/clk.h
+--- linux-3.14.40.orig/arch/arm/mach-imx/clk.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/clk.h 2015-05-01 14:57:57.799427001 -0500
+@@ -23,7 +23,8 @@
+ };
+
+ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
+- const char *parent_name, void __iomem *base, u32 div_mask);
++ const char *parent_name, void __iomem *base,
++ u32 div_mask, bool always_on);
+
+ struct clk *clk_register_gate2(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/clk-imx6q.c linux-3.14.40/arch/arm/mach-imx/clk-imx6q.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/clk-imx6q.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/clk-imx6q.c 2015-05-01 14:57:57.807427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2011-2013 Freescale Semiconductor, Inc.
++ * Copyright 2011-2014 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+@@ -24,6 +24,8 @@
+ #include "common.h"
+ #include "hardware.h"
+
++#define CCM_CCGR_OFFSET(index) (index * 2)
++
+ static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
+ static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
+ static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
+@@ -39,6 +41,8 @@
+ static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
+ static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
+ static const char *ldb_di_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
++static const char *ldb_di0_div_sels[] = { "ldb_di0_div_3_5", "ldb_di0_div_7", };
++static const char *ldb_di1_div_sels[] = { "ldb_di1_div_3_5", "ldb_di1_div_7", };
+ static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+ static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+ static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+@@ -72,6 +76,10 @@
+ "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
+ "pcie_ref", "sata_ref",
+ };
++static const char *pll_av_sels[] = { "osc", "lvds1_in", "lvds2_in", "dummy", };
++static void __iomem *anatop_base;
++static void __iomem *ccm_base;
++
+
+ enum mx6q_clks {
+ dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
+@@ -88,11 +96,11 @@
+ periph_clk2, periph2_clk2, ipg, ipg_per, esai_pred, esai_podf,
+ asrc_pred, asrc_podf, spdif_pred, spdif_podf, can_root, ecspi_root,
+ gpu2d_core_podf, gpu3d_core_podf, gpu3d_shader, ipu1_podf, ipu2_podf,
+- ldb_di0_podf, ldb_di1_podf, ipu1_di0_pre, ipu1_di1_pre, ipu2_di0_pre,
+- ipu2_di1_pre, hsi_tx_podf, ssi1_pred, ssi1_podf, ssi2_pred, ssi2_podf,
+- ssi3_pred, ssi3_podf, uart_serial_podf, usdhc1_podf, usdhc2_podf,
+- usdhc3_podf, usdhc4_podf, enfc_pred, enfc_podf, emi_podf,
+- emi_slow_podf, vpu_axi_podf, cko1_podf, axi, mmdc_ch0_axi_podf,
++ ldb_di0_podf_unused, ldb_di1_podf_unused, ipu1_di0_pre, ipu1_di1_pre,
++ ipu2_di0_pre, ipu2_di1_pre, hsi_tx_podf, ssi1_pred, ssi1_podf,
++ ssi2_pred, ssi2_podf, ssi3_pred, ssi3_podf, uart_serial_podf,
++ usdhc1_podf, usdhc2_podf, usdhc3_podf, usdhc4_podf, enfc_pred, enfc_podf,
++ emi_podf, emi_slow_podf, vpu_axi_podf, cko1_podf, axi, mmdc_ch0_axi_podf,
+ mmdc_ch1_axi_podf, arm, ahb, apbh_dma, asrc, can1_ipg, can1_serial,
+ can2_ipg, can2_serial, ecspi1, ecspi2, ecspi3, ecspi4, ecspi5, enet,
+ esai, gpt_ipg, gpt_ipg_per, gpu2d_core, gpu3d_core, hdmi_iahb,
+@@ -107,7 +115,10 @@
+ sata_ref, sata_ref_100m, pcie_ref, pcie_ref_125m, enet_ref, usbphy1_gate,
+ usbphy2_gate, pll4_post_div, pll5_post_div, pll5_video_div, eim_slow,
+ spdif, cko2_sel, cko2_podf, cko2, cko, vdoa, pll4_audio_div,
+- lvds1_sel, lvds2_sel, lvds1_gate, lvds2_gate, clk_max
++ lvds1_sel, lvds2_sel, lvds1_gate, lvds2_gate, gpt_3m, video_27m,
++ ldb_di0_div_7, ldb_di1_div_7, ldb_di0_div_sel, ldb_di1_div_sel,
++ caam_mem, caam_aclk, caam_ipg, epit1, epit2, tzasc2, lvds1_in, lvds1_out,
++ pll4_sel, lvds2_in, lvds2_out, anaclk1, anaclk2, clk_max
+ };
+
+ static struct clk *clk[clk_max];
+@@ -140,20 +151,131 @@
+ { /* sentinel */ }
+ };
+
++static void init_ldb_clks(enum mx6q_clks new_parent)
++{
++ u32 reg;
++
++ /*
++ * Need to follow a strict procedure when changing the LDB
++ * clock, else we can introduce a glitch. Things to keep in
++ * mind:
++ * 1. The current and new parent clocks must be disabled.
++ * 2. The default clock for ldb_dio_clk is mmdc_ch1 which has
++ * no CG bit.
++ * 3. In the RTL implementation of the LDB_DI_CLK_SEL mux
++ * the top four options are in one mux and the PLL3 option along
++ * with another option is in the second mux. There is third mux
++ * used to decide between the first and second mux.
++ * The code below switches the parent to the bottom mux first
++ * and then manipulates the top mux. This ensures that no glitch
++ * will enter the divider.
++ *
++ * Need to disable MMDC_CH1 clock manually as there is no CG bit
++ * for this clock. The only way to disable this clock is to move
++ * it topll3_sw_clk and then to disable pll3_sw_clk
++ * Make sure periph2_clk2_sel is set to pll3_sw_clk
++ */
++ reg = readl_relaxed(ccm_base + 0x18);
++ reg &= ~(1 << 20);
++ writel_relaxed(reg, ccm_base + 0x18);
++
++ /*
++ * Set MMDC_CH1 mask bit.
++ */
++ reg = readl_relaxed(ccm_base + 0x4);
++ reg |= 1 << 16;
++ writel_relaxed(reg, ccm_base + 0x4);
++
++ /*
++ * Set the periph2_clk_sel to the top mux so that
++ * mmdc_ch1 is from pll3_sw_clk.
++ */
++ reg = readl_relaxed(ccm_base + 0x14);
++ reg |= 1 << 26;
++ writel_relaxed(reg, ccm_base + 0x14);
++
++ /*
++ * Wait for the clock switch.
++ */
++ while (readl_relaxed(ccm_base + 0x48))
++ ;
++
++ /*
++ * Disable pll3_sw_clk by selecting the bypass clock source.
++ */
++ reg = readl_relaxed(ccm_base + 0xc);
++ reg |= 1 << 0;
++ writel_relaxed(reg, ccm_base + 0xc);
++
++ /*
++ * Set the ldb_di0_clk and ldb_di1_clk to 111b.
++ */
++ reg = readl_relaxed(ccm_base + 0x2c);
++ reg |= ((7 << 9) | (7 << 12));
++ writel_relaxed(reg, ccm_base + 0x2c);
++
++ /*
++ * Set the ldb_di0_clk and ldb_di1_clk to 100b.
++ */
++ reg = readl_relaxed(ccm_base + 0x2c);
++ reg &= ~((7 << 9) | (7 << 12));
++ reg |= ((4 << 9) | (4 << 12));
++ writel_relaxed(reg, ccm_base + 0x2c);
++
++ /*
++ * Perform the LDB parent clock switch.
++ */
++ clk_set_parent(clk[ldb_di0_sel], clk[new_parent]);
++ clk_set_parent(clk[ldb_di1_sel], clk[new_parent]);
++
++ /*
++ * Unbypass pll3_sw_clk.
++ */
++ reg = readl_relaxed(ccm_base + 0xc);
++ reg &= ~(1 << 0);
++ writel_relaxed(reg, ccm_base + 0xc);
++
++ /*
++ * Set the periph2_clk_sel back to the bottom mux so that
++ * mmdc_ch1 is from its original parent.
++ */
++ reg = readl_relaxed(ccm_base + 0x14);
++ reg &= ~(1 << 26);
++ writel_relaxed(reg, ccm_base + 0x14);
++
++ /*
++ * Wait for the clock switch.
++ */
++ while (readl_relaxed(ccm_base + 0x48))
++ ;
++
++ /*
++ * Clear MMDC_CH1 mask bit.
++ */
++ reg = readl_relaxed(ccm_base + 0x4);
++ reg &= ~(1 << 16);
++ writel_relaxed(reg, ccm_base + 0x4);
++
++}
++
+ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ {
+ struct device_node *np;
+ void __iomem *base;
+ int i, irq;
+ int ret;
++ u32 reg;
+
+ clk[dummy] = imx_clk_fixed("dummy", 0);
+ clk[ckil] = imx_obtain_fixed_clock("ckil", 0);
+ clk[ckih] = imx_obtain_fixed_clock("ckih1", 0);
+ clk[osc] = imx_obtain_fixed_clock("osc", 0);
++ /* Clock source from external clock via ANACLK1/2 PADs */
++ clk[anaclk1] = imx_obtain_fixed_clock("anaclk1", 0);
++ clk[anaclk2] = imx_obtain_fixed_clock("anaclk2", 0);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
+- base = of_iomap(np, 0);
++ anatop_base = base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ /* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */
+@@ -165,13 +287,18 @@
+ }
+
+ /* type name parent_name base div_mask */
+- clk[pll1_sys] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f);
+- clk[pll2_bus] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x1);
+- clk[pll3_usb_otg] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x3);
+- clk[pll4_audio] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "osc", base + 0x70, 0x7f);
+- clk[pll5_video] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x7f);
+- clk[pll6_enet] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6_enet", "osc", base + 0xe0, 0x3);
+- clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host","osc", base + 0x20, 0x3);
++ clk[pll1_sys] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f, false);
++ clk[pll2_bus] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x1, false);
++ clk[pll3_usb_otg] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x3, false);
++ clk[pll4_audio] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "pll4_sel", base + 0x70, 0x7f, false);
++ clk[pll5_video] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x7f, false);
++ clk[pll6_enet] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6_enet", "osc", base + 0xe0, 0x3, false);
++ clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host", "osc", base + 0x20, 0x3, false);
++
++ /* name reg shift width parent_names num_parents */
++ clk[lvds1_sel] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
++ clk[lvds2_sel] = imx_clk_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
++ clk[pll4_sel] = imx_clk_mux("pll4_sel", base + 0x70, 14, 2, pll_av_sels, ARRAY_SIZE(pll_av_sels));
+
+ /*
+ * Bit 20 is the reserved and read-only bit, we do this only for:
+@@ -191,6 +318,11 @@
+
+ clk[sata_ref] = imx_clk_fixed_factor("sata_ref", "pll6_enet", 1, 5);
+ clk[pcie_ref] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 4);
++ /* NOTICE: The gate of the lvds1/2 in/out is used to select the clk direction */
++ clk[lvds1_in] = imx_clk_gate("lvds1_in", "anaclk1", base + 0x160, 12);
++ clk[lvds2_in] = imx_clk_gate("lvds2_in", "anaclk2", base + 0x160, 13);
++ clk[lvds1_out] = imx_clk_gate("lvds1_out", "lvds1_sel", base + 0x160, 10);
++ clk[lvds2_out] = imx_clk_gate("lvds2_out", "lvds2_sel", base + 0x160, 11);
+
+ clk[sata_ref_100m] = imx_clk_gate("sata_ref_100m", "sata_ref", base + 0xe0, 20);
+ clk[pcie_ref_125m] = imx_clk_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19);
+@@ -199,18 +331,6 @@
+ base + 0xe0, 0, 2, 0, clk_enet_ref_table,
+ &imx_ccm_lock);
+
+- clk[lvds1_sel] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
+- clk[lvds2_sel] = imx_clk_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
+-
+- /*
+- * lvds1_gate and lvds2_gate are pseudo-gates. Both can be
+- * independently configured as clock inputs or outputs. We treat
+- * the "output_enable" bit as a gate, even though it's really just
+- * enabling clock output.
+- */
+- clk[lvds1_gate] = imx_clk_gate("lvds1_gate", "dummy", base + 0x160, 10);
+- clk[lvds2_gate] = imx_clk_gate("lvds2_gate", "dummy", base + 0x160, 11);
+-
+ /* name parent_name reg idx */
+ clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
+ clk[pll2_pfd1_594m] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
+@@ -226,6 +346,8 @@
+ clk[pll3_80m] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
+ clk[pll3_60m] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
+ clk[twd] = imx_clk_fixed_factor("twd", "arm", 1, 2);
++ clk[gpt_3m] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
++ clk[video_27m] = imx_clk_fixed_factor("video_27m", "pll3_pfd1_540m", 1, 20);
+
+ clk[pll4_post_div] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
+ clk[pll4_audio_div] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
+@@ -233,7 +355,7 @@
+ clk[pll5_video_div] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
+
+ np = ccm_node;
+- base = of_iomap(np, 0);
++ ccm_base = base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ imx6q_pm_set_ccm_base(base);
+@@ -258,14 +380,16 @@
+ clk[ipu2_sel] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
+ clk[ldb_di0_sel] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
+ clk[ldb_di1_sel] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
+- clk[ipu1_di0_pre_sel] = imx_clk_mux("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+- clk[ipu1_di1_pre_sel] = imx_clk_mux("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+- clk[ipu2_di0_pre_sel] = imx_clk_mux("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+- clk[ipu2_di1_pre_sel] = imx_clk_mux("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+- clk[ipu1_di0_sel] = imx_clk_mux("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels));
+- clk[ipu1_di1_sel] = imx_clk_mux("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels));
+- clk[ipu2_di0_sel] = imx_clk_mux("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels));
+- clk[ipu2_di1_sel] = imx_clk_mux("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels));
++ clk[ldb_di0_div_sel] = imx_clk_mux_flags("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels), CLK_SET_RATE_PARENT);
++ clk[ldb_di1_div_sel] = imx_clk_mux_flags("ldb_di1_div_sel", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels), CLK_SET_RATE_PARENT);
++ clk[ipu1_di0_pre_sel] = imx_clk_mux_flags("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
++ clk[ipu1_di1_pre_sel] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
++ clk[ipu2_di0_pre_sel] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
++ clk[ipu2_di1_pre_sel] = imx_clk_mux_flags("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
++ clk[ipu1_di0_sel] = imx_clk_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels), CLK_SET_RATE_PARENT);
++ clk[ipu1_di1_sel] = imx_clk_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels), CLK_SET_RATE_PARENT);
++ clk[ipu2_di0_sel] = imx_clk_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels), CLK_SET_RATE_PARENT);
++ clk[ipu2_di1_sel] = imx_clk_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels), CLK_SET_RATE_PARENT);
+ clk[hsi_tx_sel] = imx_clk_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels));
+ clk[pcie_axi_sel] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels));
+ clk[ssi1_sel] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
+@@ -307,9 +431,9 @@
+ clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
+ clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
+ clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+- clk[ldb_di0_podf] = imx_clk_divider_flags("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1, 0);
++ clk[ldb_di0_div_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7);
+ clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+- clk[ldb_di1_podf] = imx_clk_divider_flags("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1, 0);
++ clk[ldb_di1_div_7] = imx_clk_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7);
+ clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
+ clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
+ clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
+@@ -344,6 +468,9 @@
+ /* name parent_name reg shift */
+ clk[apbh_dma] = imx_clk_gate2("apbh_dma", "usdhc3", base + 0x68, 4);
+ clk[asrc] = imx_clk_gate2("asrc", "asrc_podf", base + 0x68, 6);
++ clk[caam_mem] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8);
++ clk[caam_aclk] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10);
++ clk[caam_ipg] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12);
+ clk[can1_ipg] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
+ clk[can1_serial] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16);
+ clk[can2_ipg] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
+@@ -354,6 +481,8 @@
+ clk[ecspi4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6);
+ clk[ecspi5] = imx_clk_gate2("ecspi5", "ecspi_root", base + 0x6c, 8);
+ clk[enet] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10);
++ clk[epit1] = imx_clk_gate2("epit1", "ipg", base + 0x6c, 12);
++ clk[epit2] = imx_clk_gate2("epit2", "ipg", base + 0x6c, 14);
+ clk[esai] = imx_clk_gate2("esai", "esai_podf", base + 0x6c, 16);
+ clk[gpt_ipg] = imx_clk_gate2("gpt_ipg", "ipg", base + 0x6c, 20);
+ clk[gpt_ipg_per] = imx_clk_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22);
+@@ -373,15 +502,16 @@
+ clk[i2c3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10);
+ clk[iim] = imx_clk_gate2("iim", "ipg", base + 0x70, 12);
+ clk[enfc] = imx_clk_gate2("enfc", "enfc_podf", base + 0x70, 14);
++ clk[tzasc2] = imx_clk_gate2("tzasc2", "mmdc_ch0_axi_podf", base + 0x70, 24);
+ clk[vdoa] = imx_clk_gate2("vdoa", "vdo_axi", base + 0x70, 26);
+ clk[ipu1] = imx_clk_gate2("ipu1", "ipu1_podf", base + 0x74, 0);
+ clk[ipu1_di0] = imx_clk_gate2("ipu1_di0", "ipu1_di0_sel", base + 0x74, 2);
+ clk[ipu1_di1] = imx_clk_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4);
+ clk[ipu2] = imx_clk_gate2("ipu2", "ipu2_podf", base + 0x74, 6);
+ clk[ipu2_di0] = imx_clk_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8);
+- clk[ldb_di0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12);
+- clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
+ clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
++ clk[ldb_di0] = imx_clk_gate2("ldb_di0", "ldb_di0_div_sel", base + 0x74, 12);
++ clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_div_sel", base + 0x74, 14);
+ clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16);
+ if (cpu_is_imx6dl())
+ /*
+@@ -413,6 +543,9 @@
+ clk[ssi1_ipg] = imx_clk_gate2("ssi1_ipg", "ipg", base + 0x7c, 18);
+ clk[ssi2_ipg] = imx_clk_gate2("ssi2_ipg", "ipg", base + 0x7c, 20);
+ clk[ssi3_ipg] = imx_clk_gate2("ssi3_ipg", "ipg", base + 0x7c, 22);
++ clk[ssi1] = imx_clk_gate2("ssi1", "ssi1_podf", base + 0x7c, 18);
++ clk[ssi2] = imx_clk_gate2("ssi2", "ssi2_podf", base + 0x7c, 20);
++ clk[ssi3] = imx_clk_gate2("ssi3", "ssi3_podf", base + 0x7c, 22);
+ clk[uart_ipg] = imx_clk_gate2("uart_ipg", "ipg", base + 0x7c, 24);
+ clk[uart_serial] = imx_clk_gate2("uart_serial", "uart_serial_podf", base + 0x7c, 26);
+ clk[usboh3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0);
+@@ -431,25 +564,79 @@
+ pr_err("i.MX6q clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
++ /* Initialize clock gate status */
++ writel_relaxed(1 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(1) |
++ 3 << CCM_CCGR_OFFSET(0), base + 0x68);
++ if (cpu_is_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0)
++ writel_relaxed(3 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10), base + 0x6c);
++ else
++ writel_relaxed(3 << CCM_CCGR_OFFSET(10), base + 0x6c);
++ writel_relaxed(1 << CCM_CCGR_OFFSET(12) |
++ 3 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10) |
++ 3 << CCM_CCGR_OFFSET(9) |
++ 3 << CCM_CCGR_OFFSET(8), base + 0x70);
++ writel_relaxed(3 << CCM_CCGR_OFFSET(14) |
++ 1 << CCM_CCGR_OFFSET(13) |
++ 3 << CCM_CCGR_OFFSET(12) |
++ 1 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10), base + 0x74);
++ writel_relaxed(3 << CCM_CCGR_OFFSET(7) |
++ 3 << CCM_CCGR_OFFSET(6) |
++ 3 << CCM_CCGR_OFFSET(4), base + 0x78);
++ writel_relaxed(1 << CCM_CCGR_OFFSET(0), base + 0x7c);
++ writel_relaxed(0, base + 0x80);
++
++ /* Make sure PFDs are disabled at boot. */
++ reg = readl_relaxed(anatop_base + 0x100);
++ /* Cannot disable pll2_pfd2_396M, as it is the MMDC clock in iMX6DL */
++ if (cpu_is_imx6dl())
++ reg |= 0x80008080;
++ else
++ reg |= 0x80808080;
++ writel_relaxed(reg, anatop_base + 0x100);
++
++ /* Disable PLL3 PFDs. */
++ reg = readl_relaxed(anatop_base + 0xF0);
++ reg |= 0x80808080;
++ writel_relaxed(reg, anatop_base + 0xF0);
++
++ /* Make sure PLLs is disabled */
++ reg = readl_relaxed(anatop_base + 0xA0);
++ reg &= ~(1 << 13);
++ writel_relaxed(reg, anatop_base + 0xA0);
++
+ clk_data.clks = clk;
+ clk_data.clk_num = ARRAY_SIZE(clk);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
++ clk_register_clkdev(clk[gpt_3m], "gpt_3m", "imx-gpt.0");
+ clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
+ clk_register_clkdev(clk[ahb], "ahb", NULL);
+ clk_register_clkdev(clk[cko1], "cko1", NULL);
+ clk_register_clkdev(clk[arm], NULL, "cpu0");
+- clk_register_clkdev(clk[pll4_post_div], "pll4_post_div", NULL);
+- clk_register_clkdev(clk[pll4_audio], "pll4_audio", NULL);
++ clk_register_clkdev(clk[pll4_audio_div], "pll4_audio_div", NULL);
++ clk_register_clkdev(clk[pll4_sel], "pll4_sel", NULL);
++ clk_register_clkdev(clk[lvds2_in], "lvds2_in", NULL);
++ clk_register_clkdev(clk[esai], "esai", NULL);
+
+- if ((imx_get_soc_revision() != IMX_CHIP_REVISION_1_0) ||
+- cpu_is_imx6dl()) {
+- clk_set_parent(clk[ldb_di0_sel], clk[pll5_video_div]);
+- clk_set_parent(clk[ldb_di1_sel], clk[pll5_video_div]);
++ if (cpu_is_imx6dl()) {
++ clk_set_parent(clk[ipu1_sel], clk[pll3_pfd1_540m]);
+ }
+
++ clk_set_parent(clk[ipu1_di0_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu1_di1_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu2_di0_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu2_di1_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu1_di0_sel], clk[ipu1_di0_pre]);
++ clk_set_parent(clk[ipu1_di1_sel], clk[ipu1_di1_pre]);
++ clk_set_parent(clk[ipu2_di0_sel], clk[ipu2_di0_pre]);
++ clk_set_parent(clk[ipu2_di1_sel], clk[ipu2_di1_pre]);
++
+ /*
+ * The gpmi needs 100MHz frequency in the EDO/Sync mode,
+ * We can not get the 100MHz from the pll2_pfd0_352m.
+@@ -457,6 +644,19 @@
+ */
+ clk_set_parent(clk[enfc_sel], clk[pll2_pfd2_396m]);
+
++ /* Set the parent clks of PCIe lvds1 and pcie_axi to be sata ref, axi */
++ if (clk_set_parent(clk[lvds1_sel], clk[sata_ref]))
++ pr_err("Failed to set PCIe bus parent clk.\n");
++ if (clk_set_parent(clk[pcie_axi_sel], clk[axi]))
++ pr_err("Failed to set PCIe parent clk.\n");
++
++ /* gpu clock initilazation */
++ clk_set_parent(clk[gpu3d_shader_sel], clk[pll2_pfd1_594m]);
++ clk_set_rate(clk[gpu3d_shader], 594000000);
++ clk_set_parent(clk[gpu3d_core_sel], clk[mmdc_ch0_axi]);
++ clk_set_rate(clk[gpu3d_core], 528000000);
++ clk_set_parent(clk[gpu2d_core_sel], clk[pll3_usb_otg]);
++
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+ clk_prepare_enable(clk[clks_init_on[i]]);
+
+@@ -465,6 +665,25 @@
+ clk_prepare_enable(clk[usbphy2_gate]);
+ }
+
++ /* ipu clock initialization */
++ init_ldb_clks(pll2_pfd0_352m);
++ clk_set_parent(clk[ipu1_di0_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu1_di1_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu2_di0_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu2_di1_pre_sel], clk[pll5_video_div]);
++ clk_set_parent(clk[ipu1_di0_sel], clk[ipu1_di0_pre]);
++ clk_set_parent(clk[ipu1_di1_sel], clk[ipu1_di1_pre]);
++ clk_set_parent(clk[ipu2_di0_sel], clk[ipu2_di0_pre]);
++ clk_set_parent(clk[ipu2_di1_sel], clk[ipu2_di1_pre]);
++ if (cpu_is_imx6dl()) {
++ clk_set_rate(clk[pll3_pfd1_540m], 540000000);
++ clk_set_parent(clk[ipu1_sel], clk[pll3_pfd1_540m]);
++ clk_set_parent(clk[axi_sel], clk[pll3_pfd1_540m]);
++ } else if (cpu_is_imx6q()) {
++ clk_set_parent(clk[ipu1_sel], clk[mmdc_ch0_axi]);
++ clk_set_parent(clk[ipu2_sel], clk[mmdc_ch0_axi]);
++ }
++
+ /*
+ * Let's initially set up CLKO with OSC24M, since this configuration
+ * is widely used by imx6q board designs to clock audio codec.
+@@ -482,6 +701,18 @@
+ if (IS_ENABLED(CONFIG_PCI_IMX6))
+ clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
+
++ /* Audio clocks */
++ clk_set_parent(clk[ssi1_sel], clk[pll4_audio_div]);
++ clk_set_parent(clk[ssi2_sel], clk[pll4_audio_div]);
++ clk_set_parent(clk[ssi3_sel], clk[pll4_audio_div]);
++ clk_set_parent(clk[esai_sel], clk[pll4_audio_div]);
++ clk_set_parent(clk[spdif_sel], clk[pll3_pfd3_454m]);
++ clk_set_parent(clk[asrc_sel], clk[pll3_usb_otg]);
++ clk_set_rate(clk[asrc_sel], 7500000);
++
++ /* Set pll4_audio to a value that can derive 5K-88.2KHz and 8K-96KHz */
++ clk_set_rate(clk[pll4_audio_div], 541900800);
++
+ /* Set initial power mode */
+ imx6q_set_lpm(WAIT_CLOCKED);
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/clk-imx6sl.c linux-3.14.40/arch/arm/mach-imx/clk-imx6sl.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/clk-imx6sl.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/clk-imx6sl.c 2015-05-01 14:57:57.819427001 -0500
+@@ -7,9 +7,29 @@
+ *
+ */
+
++#define CCM_CCDR_OFFSET 0x4
++#define ANATOP_PLL_USB1 0x10
++#define ANATOP_PLL_USB2 0x20
++#define ANATOP_PLL_ENET 0xE0
++#define ANATOP_PLL_BYPASS_OFFSET (1 << 16)
++#define ANATOP_PLL_ENABLE_OFFSET (1 << 13)
++#define ANATOP_PLL_POWER_OFFSET (1 << 12)
++#define ANATOP_PFD_480n_OFFSET 0xf0
++#define ANATOP_PFD_528n_OFFSET 0x100
++#define PFD0_CLKGATE (1 << 7)
++#define PFD1_CLK_GATE (1 << 15)
++#define PFD2_CLK_GATE (1 << 23)
++#define PFD3_CLK_GATE (1 << 31)
++#define CCDR_CH0_HS_BYP 17
++#define OSC_RATE 24000000
++
++#define CCM_CCGR_OFFSET(index) (index * 2)
++
+ #include <linux/clk.h>
+ #include <linux/clkdev.h>
+ #include <linux/err.h>
++#include <linux/init.h>
++#include <linux/io.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
+@@ -18,6 +38,7 @@
+ #include "clk.h"
+ #include "common.h"
+
++static bool uart_from_osc;
+ static const char const *step_sels[] = { "osc", "pll2_pfd2", };
+ static const char const *pll1_sw_sels[] = { "pll1_sys", "step", };
+ static const char const *ocram_alt_sels[] = { "pll2_pfd2", "pll3_pfd1", };
+@@ -25,8 +46,8 @@
+ static const char const *pre_periph_sels[] = { "pll2_bus", "pll2_pfd2", "pll2_pfd0", "pll2_198m", };
+ static const char const *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", "dummy", };
+ static const char const *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", };
+-static const char const *periph_sels[] = { "pre_periph_sel", "periph_clk2_podf", };
+-static const char const *periph2_sels[] = { "pre_periph2_sel", "periph2_clk2_podf", };
++static const char const *periph_sels[] = { "pre_periph_sel", "periph_clk2", };
++static const char const *periph2_sels[] = { "pre_periph2_sel", "periph2_clk2", };
+ static const char const *csi_lcdif_sels[] = { "mmdc", "pll2_pfd2", "pll3_120m", "pll3_pfd1", };
+ static const char const *usdhc_sels[] = { "pll2_pfd2", "pll2_pfd0", };
+ static const char const *ssi_sels[] = { "pll3_pfd2", "pll3_pfd3", "pll4_audio_div", "dummy", };
+@@ -38,7 +59,7 @@
+ static const char const *epdc_pix_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0", "pll2_pfd1", "pll3_pfd1", };
+ static const char const *audio_sels[] = { "pll4_audio_div", "pll3_pfd2", "pll3_pfd3", "pll3_usb_otg", };
+ static const char const *ecspi_sels[] = { "pll3_60m", "osc", };
+-static const char const *uart_sels[] = { "pll3_80m", "osc", };
++static const char const *uart_sels[] = { "pll3_80m", "uart_osc_4M", };
+
+ static struct clk_div_table clk_enet_ref_table[] = {
+ { .val = 0, .div = 20, },
+@@ -65,6 +86,80 @@
+
+ static struct clk *clks[IMX6SL_CLK_END];
+ static struct clk_onecell_data clk_data;
++static u32 cur_arm_podf;
++static u32 pll1_org_rate;
++
++extern int low_bus_freq_mode;
++extern int audio_bus_freq_mode;
++
++/*
++ * On MX6SL, need to ensure that the ARM:IPG clock ratio is maintained
++ * within 12:5 when the clocks to ARM are gated when the SOC enters
++ * WAIT mode. This is necessary to avoid WAIT mode issue (an early
++ * interrupt waking up the ARM).
++ * This function will set the ARM clk to max value within the 12:5 limit.
++ */
++void imx6sl_set_wait_clk(bool enter)
++{
++ u32 parent_rate;
++
++ if (enter) {
++ u32 wait_podf;
++ u32 new_parent_rate = OSC_RATE;
++ u32 ipg_rate = clk_get_rate(clks[IMX6SL_CLK_IPG]);
++ u32 max_arm_wait_clk = (12 * ipg_rate) / 5;
++ parent_rate = clk_get_rate(clks[IMX6SL_CLK_PLL1_SW]);
++ cur_arm_podf = parent_rate / clk_get_rate(clks[IMX6SL_CLK_ARM]);
++ if (low_bus_freq_mode) {
++ /*
++ * IPG clk is at 12MHz at this point, we can only run
++ * ARM at a max of 28.8MHz. So we need to set ARM
++ * to run from the 24MHz OSC, as there is no way to
++ * get 28.8MHz when ARM is sourced from PLL1.
++ */
++ clk_set_parent(clks[IMX6SL_CLK_STEP],
++ clks[IMX6SL_CLK_OSC]);
++ clk_set_parent(clks[IMX6SL_CLK_PLL1_SW],
++ clks[IMX6SL_CLK_STEP]);
++ } else if (audio_bus_freq_mode) {
++ /*
++ * In this mode ARM is from PLL2_PFD2 (396MHz),
++ * but IPG is at 12MHz. Need to switch ARM to run
++ * from the bypassed PLL1 clocks so that we can run
++ * ARM at 24MHz.
++ */
++ pll1_org_rate = clk_get_rate(clks[IMX6SL_CLK_PLL1_SYS]);
++ /* Ensure PLL1 is at 24MHz. */
++ clk_set_rate(clks[IMX6SL_CLK_PLL1_SYS], OSC_RATE);
++ clk_set_parent(clks[IMX6SL_CLK_PLL1_SW], clks[IMX6SL_CLK_PLL1_SYS]);
++ } else
++ new_parent_rate = clk_get_rate(clks[IMX6SL_CLK_PLL1_SW]);
++ wait_podf = (new_parent_rate + max_arm_wait_clk - 1) /
++ max_arm_wait_clk;
++
++ clk_set_rate(clks[IMX6SL_CLK_ARM], new_parent_rate / wait_podf);
++ } else {
++ if (low_bus_freq_mode)
++ /* Move ARM back to PLL1. */
++ clk_set_parent(clks[IMX6SL_CLK_PLL1_SW],
++ clks[IMX6SL_CLK_PLL1_SYS]);
++ else if (audio_bus_freq_mode) {
++ /* Move ARM back to PLL2_PFD2 via STEP_CLK. */
++ clk_set_parent(clks[IMX6SL_CLK_PLL1_SW], clks[IMX6SL_CLK_STEP]);
++ clk_set_rate(clks[IMX6SL_CLK_PLL1_SYS], pll1_org_rate);
++ }
++ parent_rate = clk_get_rate(clks[IMX6SL_CLK_PLL1_SW]);
++ clk_set_rate(clks[IMX6SL_CLK_ARM], parent_rate / cur_arm_podf);
++ }
++}
++
++static int __init setup_uart_clk(char *uart_rate)
++{
++ uart_from_osc = true;
++ return 1;
++}
++
++__setup("uart_at_4M", setup_uart_clk);
+
+ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
+ {
+@@ -72,6 +167,8 @@
+ void __iomem *base;
+ int irq;
+ int i;
++ int ret;
++ u32 reg;
+
+ clks[IMX6SL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
+ clks[IMX6SL_CLK_CKIL] = imx_obtain_fixed_clock("ckil", 0);
+@@ -82,13 +179,18 @@
+ WARN_ON(!base);
+
+ /* type name parent base div_mask */
+- clks[IMX6SL_CLK_PLL1_SYS] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f);
+- clks[IMX6SL_CLK_PLL2_BUS] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x1);
+- clks[IMX6SL_CLK_PLL3_USB_OTG] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x3);
+- clks[IMX6SL_CLK_PLL4_AUDIO] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "osc", base + 0x70, 0x7f);
+- clks[IMX6SL_CLK_PLL5_VIDEO] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x7f);
+- clks[IMX6SL_CLK_PLL6_ENET] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6_enet", "osc", base + 0xe0, 0x3);
+- clks[IMX6SL_CLK_PLL7_USB_HOST] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host", "osc", base + 0x20, 0x3);
++ clks[IMX6SL_CLK_PLL1_SYS] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f, true);
++ clks[IMX6SL_CLK_PLL2_BUS] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x1, true);
++ clks[IMX6SL_CLK_PLL3_USB_OTG] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x3, false);
++ clks[IMX6SL_CLK_PLL4_AUDIO] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "osc", base + 0x70, 0x7f, false);
++ clks[IMX6SL_CLK_PLL5_VIDEO] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x7f, false);
++ clks[IMX6SL_CLK_PLL6_ENET] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6_enet", "osc", base + 0xe0, 0x3, false);
++ clks[IMX6SL_CLK_PLL7_USB_HOST] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host", "osc", base + 0x20, 0x3, false);
++
++ /* Ensure the AHB clk is at 132MHz. */
++ ret = clk_set_rate(clks[IMX6SL_CLK_AHB], 132000000);
++ if (ret)
++ pr_warn("%s: failed to set AHB clock rate %d\n", __func__, ret);
+
+ /*
+ * usbphy1 and usbphy2 are implemented as dummy gates using reserve
+@@ -118,11 +220,36 @@
+ clks[IMX6SL_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_usb_otg", base + 0xf0, 2);
+ clks[IMX6SL_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_usb_otg", base + 0xf0, 3);
+
+- /* name parent_name mult div */
+- clks[IMX6SL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2", 1, 2);
+- clks[IMX6SL_CLK_PLL3_120M] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4);
+- clks[IMX6SL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
+- clks[IMX6SL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
++ /* name parent_name mult div */
++ clks[IMX6SL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2", 1, 2);
++ clks[IMX6SL_CLK_PLL3_120M] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4);
++ clks[IMX6SL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
++ clks[IMX6SL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
++ clks[IMX6SL_CLK_UART_OSC_4M] = imx_clk_fixed_factor("uart_osc_4M", "osc", 1, 6);
++
++ /* Ensure all PFDs but PLL2_PFD2 are disabled. */
++ reg = readl_relaxed(base + ANATOP_PFD_480n_OFFSET);
++ reg |= (PFD0_CLKGATE | PFD1_CLK_GATE | PFD2_CLK_GATE | PFD3_CLK_GATE);
++ writel_relaxed(reg, base + ANATOP_PFD_480n_OFFSET);
++ reg = readl_relaxed(base + ANATOP_PFD_528n_OFFSET);
++ reg |= (PFD0_CLKGATE | PFD1_CLK_GATE);
++ writel_relaxed(reg, base + ANATOP_PFD_528n_OFFSET);
++
++ /* Ensure Unused PLLs are disabled. */
++ reg = readl_relaxed(base + ANATOP_PLL_USB1);
++ reg |= ANATOP_PLL_BYPASS_OFFSET;
++ reg &= ~(ANATOP_PLL_ENABLE_OFFSET | ANATOP_PLL_POWER_OFFSET);
++ writel_relaxed(reg, base + ANATOP_PLL_USB1);
++
++ reg = readl_relaxed(base + ANATOP_PLL_USB2);
++ reg |= ANATOP_PLL_BYPASS_OFFSET;
++ reg &= ~(ANATOP_PLL_ENABLE_OFFSET | ANATOP_PLL_POWER_OFFSET);
++ writel_relaxed(reg, base + ANATOP_PLL_USB2);
++
++ reg = readl_relaxed(base + ANATOP_PLL_ENET);
++ reg |= (ANATOP_PLL_BYPASS_OFFSET | ANATOP_PLL_POWER_OFFSET);
++ reg &= ~ANATOP_PLL_ENABLE_OFFSET;
++ writel_relaxed(reg, base + ANATOP_PLL_ENET);
+
+ np = ccm_node;
+ base = of_iomap(np, 0);
+@@ -158,7 +285,7 @@
+ clks[IMX6SL_CLK_EPDC_PIX_SEL] = imx_clk_mux("epdc_pix_sel", base + 0x38, 15, 3, epdc_pix_sels, ARRAY_SIZE(epdc_pix_sels));
+ clks[IMX6SL_CLK_SPDIF0_SEL] = imx_clk_mux("spdif0_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
+ clks[IMX6SL_CLK_SPDIF1_SEL] = imx_clk_mux("spdif1_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
+- clks[IMX6SL_CLK_EXTERN_AUDIO_SEL] = imx_clk_mux("extern_audio_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
++ clks[IMX6SL_CLK_EXTERN_AUDIO_SEL] = imx_clk_mux_flags("extern_audio_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
+ clks[IMX6SL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
+
+@@ -168,8 +295,8 @@
+
+ /* name parent_name reg shift width */
+ clks[IMX6SL_CLK_OCRAM_PODF] = imx_clk_divider("ocram_podf", "ocram_sel", base + 0x14, 16, 3);
+- clks[IMX6SL_CLK_PERIPH_CLK2_PODF] = imx_clk_divider("periph_clk2_podf", "periph_clk2_sel", base + 0x14, 27, 3);
+- clks[IMX6SL_CLK_PERIPH2_CLK2_PODF] = imx_clk_divider("periph2_clk2_podf", "periph2_clk2_sel", base + 0x14, 0, 3);
++ clks[IMX6SL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
++ clks[IMX6SL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
+ clks[IMX6SL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
+ clks[IMX6SL_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3);
+ clks[IMX6SL_CLK_LCDIF_AXI_PODF] = imx_clk_divider("lcdif_axi_podf", "lcdif_axi_sel", base + 0x3c, 16, 3);
+@@ -251,6 +378,25 @@
+ pr_err("i.MX6SL clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+
++ /* Initialize clock gate status */
++ writel_relaxed(1 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(1) |
++ 3 << CCM_CCGR_OFFSET(0), base + 0x68);
++ writel_relaxed(3 << CCM_CCGR_OFFSET(10), base + 0x6c);
++ writel_relaxed(1 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10) |
++ 3 << CCM_CCGR_OFFSET(9) |
++ 3 << CCM_CCGR_OFFSET(8), base + 0x70);
++ writel_relaxed(3 << CCM_CCGR_OFFSET(14) |
++ 3 << CCM_CCGR_OFFSET(13) |
++ 3 << CCM_CCGR_OFFSET(12) |
++ 3 << CCM_CCGR_OFFSET(11) |
++ 3 << CCM_CCGR_OFFSET(10), base + 0x74);
++ writel_relaxed(3 << CCM_CCGR_OFFSET(7) |
++ 3 << CCM_CCGR_OFFSET(4), base + 0x78);
++ writel_relaxed(1 << CCM_CCGR_OFFSET(0), base + 0x7c);
++ writel_relaxed(0, base + 0x80);
++
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+@@ -258,17 +404,58 @@
+ clk_register_clkdev(clks[IMX6SL_CLK_GPT], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clks[IMX6SL_CLK_GPT_SERIAL], "per", "imx-gpt.0");
+
++ /*
++ * Make sure the ARM clk is enabled to maintain the correct usecount
++ * and enabling/disabling of parent PLLs.
++ */
++ ret = clk_prepare_enable(clks[IMX6SL_CLK_ARM]);
++ if (ret)
++ pr_warn("%s: failed to enable ARM core clock %d\n",
++ __func__, ret);
++
++ /*
++ * Make sure the MMDC clk is enabled to maintain the correct usecount
++ * and enabling/disabling of parent PLLs.
++ */
++ ret = clk_prepare_enable(clks[IMX6SL_CLK_MMDC_ROOT]);
++ if (ret)
++ pr_warn("%s: failed to enable MMDC clock %d\n",
++ __func__, ret);
++
+ if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
+ clk_prepare_enable(clks[IMX6SL_CLK_USBPHY1_GATE]);
+ clk_prepare_enable(clks[IMX6SL_CLK_USBPHY2_GATE]);
+ }
+
++ clk_set_parent(clks[IMX6SL_CLK_GPU2D_OVG_SEL],
++ clks[IMX6SL_CLK_PLL2_BUS]);
++ clk_set_parent(clks[IMX6SL_CLK_GPU2D_SEL], clks[IMX6SL_CLK_PLL2_BUS]);
++
+ /* Audio-related clocks configuration */
+ clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]);
+
++ /* set extern_audio to be sourced from PLL4/audio PLL */
++ clk_set_parent(clks[IMX6SL_CLK_EXTERN_AUDIO_SEL], clks[IMX6SL_CLK_PLL4_AUDIO_DIV]);
++ /* set extern_audio to 24MHz */
++ clk_set_rate(clks[IMX6SL_CLK_PLL4_AUDIO], 24000000);
++ clk_set_rate(clks[IMX6SL_CLK_EXTERN_AUDIO], 24000000);
++
++ /* set SSI2 parent to PLL4 */
++ clk_set_parent(clks[IMX6SL_CLK_SSI2_SEL], clks[IMX6SL_CLK_PLL4_AUDIO_DIV]);
++ clk_set_rate(clks[IMX6SL_CLK_SSI2], 24000000);
++
+ /* Set initial power mode */
+ imx6q_set_lpm(WAIT_CLOCKED);
+
++ /* Ensure that CH0 handshake is bypassed. */
++ reg = readl_relaxed(base + CCM_CCDR_OFFSET);
++ reg |= 1 << CCDR_CH0_HS_BYP;
++ writel_relaxed(reg, base + CCM_CCDR_OFFSET);
++
++ /* Set the UART parent if needed. */
++ if (uart_from_osc)
++ ret = clk_set_parent(clks[IMX6SL_CLK_UART_SEL], clks[IMX6SL_CLK_UART_OSC_4M]);
++
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/clk-pfd.c linux-3.14.40/arch/arm/mach-imx/clk-pfd.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/clk-pfd.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/clk-pfd.c 2015-05-01 14:57:57.819427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+@@ -17,6 +17,8 @@
+ #include <linux/err.h>
+ #include "clk.h"
+
++#define BYPASS_RATE 24000000
++
+ /**
+ * struct clk_pfd - IMX PFD clock
+ * @clk_hw: clock source
+@@ -62,9 +64,14 @@
+ u64 tmp = parent_rate;
+ u8 frac = (readl_relaxed(pfd->reg) >> (pfd->idx * 8)) & 0x3f;
+
+- tmp *= 18;
+- do_div(tmp, frac);
+-
++ /*
++ * If the parent PLL is in bypass state, the PFDs
++ * are also in bypass state.
++ */
++ if (tmp != BYPASS_RATE) {
++ tmp *= 18;
++ do_div(tmp, frac);
++ }
+ return tmp;
+ }
+
+@@ -74,17 +81,22 @@
+ u64 tmp = *prate;
+ u8 frac;
+
+- tmp = tmp * 18 + rate / 2;
+- do_div(tmp, rate);
+- frac = tmp;
+- if (frac < 12)
+- frac = 12;
+- else if (frac > 35)
+- frac = 35;
+- tmp = *prate;
+- tmp *= 18;
+- do_div(tmp, frac);
+-
++ /*
++ * If the parent PLL is in bypass state, the PFDs
++ * are also in bypass state.
++ */
++ if (tmp != BYPASS_RATE) {
++ tmp = tmp * 18 + rate / 2;
++ do_div(tmp, rate);
++ frac = tmp;
++ if (frac < 12)
++ frac = 12;
++ else if (frac > 35)
++ frac = 35;
++ tmp = *prate;
++ tmp *= 18;
++ do_div(tmp, frac);
++ }
+ return tmp;
+ }
+
+@@ -95,6 +107,9 @@
+ u64 tmp = parent_rate;
+ u8 frac;
+
++ if (tmp == BYPASS_RATE)
++ return 0;
++
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/clk-pllv3.c linux-3.14.40/arch/arm/mach-imx/clk-pllv3.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/clk-pllv3.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/clk-pllv3.c 2015-05-01 14:57:57.819427001 -0500
+@@ -26,12 +26,15 @@
+ #define BM_PLL_ENABLE (0x1 << 13)
+ #define BM_PLL_BYPASS (0x1 << 16)
+ #define BM_PLL_LOCK (0x1 << 31)
++#define BYPASS_RATE 24000000
++#define BYPASS_MASK 0x10000
+
+ /**
+ * struct clk_pllv3 - IMX PLL clock version 3
+ * @clk_hw: clock source
+ * @base: base address of PLL registers
+ * @powerup_set: set POWER bit to power up the PLL
++ * @always_on : Leave the PLL powered up all the time.
+ * @div_mask: mask of divider bits
+ *
+ * IMX PLL clock version 3, found on i.MX6 series. Divider for pllv3
+@@ -41,7 +44,9 @@
+ struct clk_hw hw;
+ void __iomem *base;
+ bool powerup_set;
++ bool always_on;
+ u32 div_mask;
++ u32 rate_req;
+ };
+
+ #define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw)
+@@ -61,54 +66,53 @@
+ break;
+ if (time_after(jiffies, timeout))
+ break;
+- usleep_range(50, 500);
++ udelay(100);
+ } while (1);
+
+ return readl_relaxed(pll->base) & BM_PLL_LOCK ? 0 : -ETIMEDOUT;
+ }
+
+-static int clk_pllv3_prepare(struct clk_hw *hw)
++static int clk_pllv3_power_up_down(struct clk_hw *hw, bool enable)
+ {
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+- u32 val;
+- int ret;
+-
+- val = readl_relaxed(pll->base);
+- if (pll->powerup_set)
+- val |= BM_PLL_POWER;
+- else
+- val &= ~BM_PLL_POWER;
+- writel_relaxed(val, pll->base);
+-
+- ret = clk_pllv3_wait_lock(pll);
+- if (ret)
+- return ret;
++ u32 val, ret = 0;
+
+- val = readl_relaxed(pll->base);
+- val &= ~BM_PLL_BYPASS;
+- writel_relaxed(val, pll->base);
+-
+- return 0;
+-}
++ if (enable) {
++ val = readl_relaxed(pll->base);
++ val &= ~BM_PLL_BYPASS;
++ if (pll->powerup_set)
++ val |= BM_PLL_POWER;
++ else
++ val &= ~BM_PLL_POWER;
++ writel_relaxed(val, pll->base);
++
++ ret = clk_pllv3_wait_lock(pll);
++ } else {
++ val = readl_relaxed(pll->base);
++ val |= BM_PLL_BYPASS;
++ if (pll->powerup_set)
++ val &= ~BM_PLL_POWER;
++ else
++ val |= BM_PLL_POWER;
++ writel_relaxed(val, pll->base);
++ }
+
+-static void clk_pllv3_unprepare(struct clk_hw *hw)
+-{
+- struct clk_pllv3 *pll = to_clk_pllv3(hw);
+- u32 val;
++ if (!ret) {
++ val = readl_relaxed(pll->base);
++ val &= ~BM_PLL_BYPASS;
++ writel_relaxed(val, pll->base);
++ }
+
+- val = readl_relaxed(pll->base);
+- val |= BM_PLL_BYPASS;
+- if (pll->powerup_set)
+- val &= ~BM_PLL_POWER;
+- else
+- val |= BM_PLL_POWER;
+- writel_relaxed(val, pll->base);
++ return ret;
+ }
+
+ static int clk_pllv3_enable(struct clk_hw *hw)
+ {
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val;
++
++ if (pll->rate_req != BYPASS_RATE)
++ clk_pllv3_power_up_down(hw, true);
+
+ val = readl_relaxed(pll->base);
+ val |= BM_PLL_ENABLE;
+@@ -123,8 +127,12 @@
+ u32 val;
+
+ val = readl_relaxed(pll->base);
+- val &= ~BM_PLL_ENABLE;
++ if (!pll->always_on)
++ val &= ~BM_PLL_ENABLE;
+ writel_relaxed(val, pll->base);
++
++ if (pll->rate_req != BYPASS_RATE)
++ clk_pllv3_power_up_down(hw, false);
+ }
+
+ static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
+@@ -132,8 +140,15 @@
+ {
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
++ u32 bypass = readl_relaxed(pll->base) & BYPASS_MASK;
++ u32 rate;
++
++ if (pll->rate_req == BYPASS_RATE && bypass)
++ rate = BYPASS_RATE;
++ else
++ rate = (div == 1) ? parent_rate * 22 : parent_rate * 20;
+
+- return (div == 1) ? parent_rate * 22 : parent_rate * 20;
++ return rate;
+ }
+
+ static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
+@@ -141,6 +156,10 @@
+ {
+ unsigned long parent_rate = *prate;
+
++ /* If the PLL is bypassed, its rate is 24MHz. */
++ if (rate == BYPASS_RATE)
++ return BYPASS_RATE;
++
+ return (rate >= parent_rate * 22) ? parent_rate * 22 :
+ parent_rate * 20;
+ }
+@@ -151,6 +170,22 @@
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val, div;
+
++ pll->rate_req = rate;
++ val = readl_relaxed(pll->base);
++
++ /* If the PLL is bypassed, its rate is 24MHz. */
++ if (rate == BYPASS_RATE) {
++ /* Set the bypass bit. */
++ val |= BM_PLL_BYPASS;
++ /* Power down the PLL. */
++ if (pll->powerup_set)
++ val &= ~BM_PLL_POWER;
++ else
++ val |= BM_PLL_POWER;
++ writel_relaxed(val, pll->base);
++
++ return 0;
++ }
+ if (rate == parent_rate * 22)
+ div = 1;
+ else if (rate == parent_rate * 20)
+@@ -167,8 +202,6 @@
+ }
+
+ static const struct clk_ops clk_pllv3_ops = {
+- .prepare = clk_pllv3_prepare,
+- .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_recalc_rate,
+@@ -181,6 +214,10 @@
+ {
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
++ u32 bypass = readl_relaxed(pll->base) & BYPASS_MASK;
++
++ if (pll->rate_req == BYPASS_RATE && bypass)
++ return BYPASS_RATE;
+
+ return parent_rate * div / 2;
+ }
+@@ -193,6 +230,9 @@
+ unsigned long max_rate = parent_rate * 108 / 2;
+ u32 div;
+
++ if (rate == BYPASS_RATE)
++ return BYPASS_RATE;
++
+ if (rate > max_rate)
+ rate = max_rate;
+ else if (rate < min_rate)
+@@ -210,9 +250,26 @@
+ unsigned long max_rate = parent_rate * 108 / 2;
+ u32 val, div;
+
+- if (rate < min_rate || rate > max_rate)
++ if (rate != BYPASS_RATE && (rate < min_rate || rate > max_rate))
+ return -EINVAL;
+
++ pll->rate_req = rate;
++ val = readl_relaxed(pll->base);
++
++ if (rate == BYPASS_RATE) {
++ /*
++ * Set the PLL in bypass mode if rate requested is
++ * BYPASS_RATE.
++ */
++ val |= BM_PLL_BYPASS;
++ /* Power down the PLL. */
++ if (pll->powerup_set)
++ val &= ~BM_PLL_POWER;
++ else
++ val |= BM_PLL_POWER;
++ writel_relaxed(val, pll->base);
++ return 0;
++ }
+ div = rate * 2 / parent_rate;
+ val = readl_relaxed(pll->base);
+ val &= ~pll->div_mask;
+@@ -223,8 +280,6 @@
+ }
+
+ static const struct clk_ops clk_pllv3_sys_ops = {
+- .prepare = clk_pllv3_prepare,
+- .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_sys_recalc_rate,
+@@ -239,6 +294,10 @@
+ u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
+ u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
++ u32 bypass = readl_relaxed(pll->base) & BYPASS_MASK;
++
++ if (pll->rate_req == BYPASS_RATE && bypass)
++ return BYPASS_RATE;
+
+ return (parent_rate * div) + ((parent_rate / mfd) * mfn);
+ }
+@@ -253,6 +312,9 @@
+ u32 mfn, mfd = 1000000;
+ s64 temp64;
+
++ if (rate == BYPASS_RATE)
++ return BYPASS_RATE;
++
+ if (rate > max_rate)
+ rate = max_rate;
+ else if (rate < min_rate)
+@@ -273,13 +335,36 @@
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ unsigned long min_rate = parent_rate * 27;
+ unsigned long max_rate = parent_rate * 54;
+- u32 val, div;
++ u32 val, newval, div;
+ u32 mfn, mfd = 1000000;
+ s64 temp64;
++ int ret;
+
+- if (rate < min_rate || rate > max_rate)
++ if (rate != BYPASS_RATE && (rate < min_rate || rate > max_rate))
+ return -EINVAL;
+
++ pll->rate_req = rate;
++ val = readl_relaxed(pll->base);
++
++ if (rate == BYPASS_RATE) {
++ /*
++ * Set the PLL in bypass mode if rate requested is
++ * BYPASS_RATE.
++ */
++ /* Bypass the PLL */
++ val |= BM_PLL_BYPASS;
++ /* Power down the PLL. */
++ if (pll->powerup_set)
++ val &= ~BM_PLL_POWER;
++ else
++ val |= BM_PLL_POWER;
++ writel_relaxed(val, pll->base);
++ return 0;
++ }
++ /* Else clear the bypass bit. */
++ val &= ~BM_PLL_BYPASS;
++ writel_relaxed(val, pll->base);
++
+ div = rate / parent_rate;
+ temp64 = (u64) (rate - div * parent_rate);
+ temp64 *= mfd;
+@@ -287,18 +372,30 @@
+ mfn = temp64;
+
+ val = readl_relaxed(pll->base);
+- val &= ~pll->div_mask;
+- val |= div;
+- writel_relaxed(val, pll->base);
++
++ /* set the PLL into bypass mode */
++ newval = val | BM_PLL_BYPASS;
++ writel_relaxed(newval, pll->base);
++
++ /* configure the new frequency */
++ newval &= ~pll->div_mask;
++ newval |= div;
++ writel_relaxed(newval, pll->base);
+ writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
+- writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
++ writel(mfd, pll->base + PLL_DENOM_OFFSET);
+
+- return clk_pllv3_wait_lock(pll);
++ ret = clk_pllv3_wait_lock(pll);
++ if (ret == 0 && val & BM_PLL_POWER) {
++ /* only if it locked can we switch back to the PLL */
++ newval &= ~BM_PLL_BYPASS;
++ newval |= val & BM_PLL_BYPASS;
++ writel(newval, pll->base);
++ }
++
++ return ret;
+ }
+
+ static const struct clk_ops clk_pllv3_av_ops = {
+- .prepare = clk_pllv3_prepare,
+- .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_av_recalc_rate,
+@@ -313,8 +410,6 @@
+ }
+
+ static const struct clk_ops clk_pllv3_enet_ops = {
+- .prepare = clk_pllv3_prepare,
+- .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_enet_recalc_rate,
+@@ -322,7 +417,7 @@
+
+ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
+ const char *parent_name, void __iomem *base,
+- u32 div_mask)
++ u32 div_mask, bool always_on)
+ {
+ struct clk_pllv3 *pll;
+ const struct clk_ops *ops;
+@@ -352,6 +447,7 @@
+ }
+ pll->base = base;
+ pll->div_mask = div_mask;
++ pll->always_on = always_on;
+
+ init.name = name;
+ init.ops = ops;
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/common.h linux-3.14.40/arch/arm/mach-imx/common.h
+--- linux-3.14.40.orig/arch/arm/mach-imx/common.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/common.h 2015-05-01 14:57:57.823427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+ /*
+@@ -116,7 +116,6 @@
+ void imx_set_cpu_jump(int cpu, void *jump_addr);
+ u32 imx_get_cpu_arg(int cpu);
+ void imx_set_cpu_arg(int cpu, u32 arg);
+-void v7_cpu_resume(void);
+ #ifdef CONFIG_SMP
+ void v7_secondary_startup(void);
+ void imx_scu_map_io(void);
+@@ -129,7 +128,7 @@
+ #endif
+ void imx_src_init(void);
+ void imx_gpc_init(void);
+-void imx_gpc_pre_suspend(void);
++void imx_gpc_pre_suspend(bool arm_power_off);
+ void imx_gpc_post_resume(void);
+ void imx_gpc_mask_all(void);
+ void imx_gpc_restore_all(void);
+@@ -138,14 +137,28 @@
+ void imx_anatop_init(void);
+ void imx_anatop_pre_suspend(void);
+ void imx_anatop_post_resume(void);
++void imx_anatop_pu_enable(bool enable);
+ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
+-void imx6q_set_chicken_bit(void);
++void imx6q_set_cache_lpm_in_wait(bool enable);
++void imx6sl_set_wait_clk(bool enter);
++void imx6_enet_mac_init(const char *compatible);
+
+ void imx_cpu_die(unsigned int cpu);
+ int imx_cpu_kill(unsigned int cpu);
+
++#ifdef CONFIG_SUSPEND
++void v7_cpu_resume(void);
++void imx6_suspend(void __iomem *ocram_vbase);
++#else
++static inline void v7_cpu_resume(void) {}
++static inline void imx6_suspend(void __iomem *ocram_vbase) {}
++#endif
++
+ void imx6q_pm_init(void);
++void imx6dl_pm_init(void);
++void imx6sl_pm_init(void);
+ void imx6q_pm_set_ccm_base(void __iomem *base);
++
+ #ifdef CONFIG_PM
+ void imx5_pm_init(void);
+ #else
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/cpuidle.h linux-3.14.40/arch/arm/mach-imx/cpuidle.h
+--- linux-3.14.40.orig/arch/arm/mach-imx/cpuidle.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/cpuidle.h 2015-05-01 14:57:57.827427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+@@ -13,6 +13,7 @@
+ #ifdef CONFIG_CPU_IDLE
+ extern int imx5_cpuidle_init(void);
+ extern int imx6q_cpuidle_init(void);
++extern int imx6sl_cpuidle_init(void);
+ #else
+ static inline int imx5_cpuidle_init(void)
+ {
+@@ -22,4 +23,8 @@
+ {
+ return 0;
+ }
++static inline int imx6sl_cpuidle_init(void)
++{
++ return 0;
++}
+ #endif
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/cpuidle-imx6q.c linux-3.14.40/arch/arm/mach-imx/cpuidle-imx6q.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/cpuidle-imx6q.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/cpuidle-imx6q.c 2015-05-01 14:57:57.827427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -68,8 +68,8 @@
+ /* Need to enable SCU standby for entering WAIT modes */
+ imx_scu_standby_enable();
+
+- /* Set chicken bit to get a reliable WAIT mode support */
+- imx6q_set_chicken_bit();
++ /* Set cache lpm bit for reliable WAIT mode support */
++ imx6q_set_cache_lpm_in_wait(true);
+
+ return cpuidle_register(&imx6q_cpuidle_driver, NULL);
+ }
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/cpuidle-imx6sl.c linux-3.14.40/arch/arm/mach-imx/cpuidle-imx6sl.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/cpuidle-imx6sl.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mach-imx/cpuidle-imx6sl.c 2015-05-01 14:57:57.831427001 -0500
+@@ -0,0 +1,149 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/cpuidle.h>
++#include <linux/genalloc.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_device.h>
++#include <asm/cpuidle.h>
++#include <asm/fncpy.h>
++#include <asm/mach/map.h>
++#include <asm/proc-fns.h>
++#include <asm/tlb.h>
++
++#include "common.h"
++#include "cpuidle.h"
++
++extern u32 audio_bus_freq_mode;
++extern u32 ultra_low_bus_freq_mode;
++extern unsigned long reg_addrs[];
++extern void imx6sl_low_power_wfi(void);
++
++static void __iomem *iomux_base;
++static void *wfi_iram_base;
++
++void (*imx6sl_wfi_in_iram_fn)(void *wfi_iram_base,
++ void *iomux_addr, void *regs_addr, u32 audio_mode) = NULL;
++
++#define WFI_IN_IRAM_SIZE 0x1000
++
++static int imx6sl_enter_wait(struct cpuidle_device *dev,
++ struct cpuidle_driver *drv, int index)
++{
++ imx6q_set_lpm(WAIT_UNCLOCKED);
++#ifdef CONFIG_ARM_IMX6_CPUFREQ
++ if (ultra_low_bus_freq_mode || audio_bus_freq_mode) {
++ /*
++ * Flush the TLB, to ensure no TLB maintenance occurs
++ * when DDR is in self-refresh.
++ */
++ local_flush_tlb_all();
++ /*
++ * Run WFI code from IRAM.
++ * Drop the DDR freq to 1MHz and AHB to 3MHz
++ * Also float DDR IO pads.
++ */
++ imx6sl_wfi_in_iram_fn(wfi_iram_base, iomux_base, reg_addrs, audio_bus_freq_mode);
++ }
++ else
++#endif
++ {
++ imx6sl_set_wait_clk(true);
++ cpu_do_idle();
++ imx6sl_set_wait_clk(false);
++ }
++ imx6q_set_lpm(WAIT_CLOCKED);
++
++ return index;
++}
++
++static struct cpuidle_driver imx6sl_cpuidle_driver = {
++ .name = "imx6sl_cpuidle",
++ .owner = THIS_MODULE,
++ .states = {
++ /* WFI */
++ ARM_CPUIDLE_WFI_STATE,
++ /* WAIT */
++ {
++ .exit_latency = 50,
++ .target_residency = 75,
++ .flags = CPUIDLE_FLAG_TIME_VALID |
++ CPUIDLE_FLAG_TIMER_STOP,
++ .enter = imx6sl_enter_wait,
++ .name = "WAIT",
++ .desc = "Clock off",
++ },
++ },
++ .state_count = 2,
++ .safe_state_index = 0,
++};
++
++int __init imx6sl_cpuidle_init(void)
++{
++ struct platform_device *ocram_dev;
++ unsigned int iram_paddr;
++ struct device_node *node;
++ struct gen_pool *iram_pool;
++
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-iomuxc");
++ if (!node) {
++ pr_err("failed to find imx6sl-iomuxc device tree data!\n");
++ return -EINVAL;
++ }
++ iomux_base = of_iomap(node, 0);
++ WARN(!iomux_base, "unable to map iomux registers\n");
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "mmio-sram");
++ if (!node) {
++ pr_err("%s: failed to find ocram node\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ ocram_dev = of_find_device_by_node(node);
++ if (!ocram_dev) {
++ pr_err("failed to find ocram device!\n");
++ return -EINVAL;
++ }
++
++ iram_pool = dev_get_gen_pool(&ocram_dev->dev);
++ if (!iram_pool) {
++ pr_err("iram pool unavailable!\n");
++ return -EINVAL;
++ }
++ /*
++ * Allocate IRAM memory when ARM executes WFI in
++ * ultra_low_power_mode.
++ */
++ wfi_iram_base = (void *)gen_pool_alloc(iram_pool,
++ WFI_IN_IRAM_SIZE);
++ if (!wfi_iram_base) {
++ pr_err("Cannot alloc iram for wfi code!\n");
++ return -ENOMEM;
++ }
++
++ iram_paddr = gen_pool_virt_to_phys(iram_pool,
++ (unsigned long)wfi_iram_base);
++ /*
++ * Need to remap the area here since we want
++ * the memory region to be executable.
++ */
++ wfi_iram_base = __arm_ioremap(iram_paddr,
++ WFI_IN_IRAM_SIZE,
++ MT_MEMORY_RWX_NONCACHED);
++ if (!wfi_iram_base)
++ pr_err("wfi_ram_base NOT remapped\n");
++
++ imx6sl_wfi_in_iram_fn = (void *)fncpy(wfi_iram_base,
++ &imx6sl_low_power_wfi, WFI_IN_IRAM_SIZE);
++
++ return cpuidle_register(&imx6sl_cpuidle_driver, NULL);
++}
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/ddr3_freq_imx6.S linux-3.14.40/arch/arm/mach-imx/ddr3_freq_imx6.S
+--- linux-3.14.40.orig/arch/arm/mach-imx/ddr3_freq_imx6.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mach-imx/ddr3_freq_imx6.S 2015-05-01 14:57:57.831427001 -0500
+@@ -0,0 +1,893 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/linkage.h>
++
++#define MMDC0_MDPDC 0x4
++#define MMDC0_MDCF0 0x0c
++#define MMDC0_MDCF1 0x10
++#define MMDC0_MDMISC 0x18
++#define MMDC0_MDSCR 0x1c
++#define MMDC0_MAPSR 0x404
++#define MMDC0_MADPCR0 0x410
++#define MMDC0_MPZQHWCTRL 0x800
++#define MMDC1_MPZQHWCTRL 0x4800
++#define MMDC0_MPODTCTRL 0x818
++#define MMDC1_MPODTCTRL 0x4818
++#define MMDC0_MPDGCTRL0 0x83c
++#define MMDC1_MPDGCTRL0 0x483c
++#define MMDC0_MPMUR0 0x8b8
++#define MMDC1_MPMUR0 0x48b8
++
++#define CCM_CBCDR 0x14
++#define CCM_CBCMR 0x18
++#define CCM_CSCMR1 0x1c
++#define CCM_CDHIPR 0x48
++
++#define L2_CACHE_SYNC 0x730
++
++ .align 3
++
++ .macro switch_to_528MHz
++
++ /* check if periph_clk_sel is already set */
++ ldr r0, [r6, #CCM_CBCDR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq set_ahb_podf_before_switch
++
++ /* change periph_clk to be sourced from pll3_clk. */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(3 << 12)
++ str r0, [r6, #CCM_CBCMR]
++
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(0x38 << 20)
++ str r0, [r6, #CCM_CBCDR]
++
++ /*
++ * set the AHB dividers before the switch,
++ * don't change AXI clock divider,
++ * set the MMDC_DIV=1, AXI_DIV = 2, AHB_DIV=4,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #0xd00
++ orr r0, r0, #(1 << 16)
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update528:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update528
++
++ /* now switch periph_clk to pll3_main_clk. */
++ ldr r0, [r6, #CCM_CBCDR]
++ orr r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch3:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch3
++
++ b switch_pre_periph_clk_528
++
++set_ahb_podf_before_switch:
++ /*
++ * set the MMDC_DIV=1, AXI_DIV = 2, AHB_DIV=4,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #0xd00
++ orr r0, r0, #(1 << 16)
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update528_1:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update528_1
++
++switch_pre_periph_clk_528:
++
++ /* now switch pre_periph_clk to PLL2_528MHz. */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(0xc << 16)
++ str r0, [r6, #CCM_CBCMR]
++
++ /* now switch periph_clk back. */
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch4:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch4
++
++ .endm
++
++ .macro switch_to_400MHz
++
++ /* check if periph_clk_sel is already set. */
++ ldr r0, [r6, #CCM_CBCDR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq set_ahb_podf_before_switch1
++
++ /* change periph_clk to be sourced from pll3_clk. */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(3 << 12)
++ str r0, [r6, #CCM_CBCMR]
++
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(0x38 << 24)
++ str r0, [r6, #CCM_CBCDR]
++
++ /* now switch periph_clk to pll3_main_clk. */
++ ldr r0, [r6, #CCM_CBCDR]
++ orr r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch5:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch5
++
++ b switch_pre_periph_clk_400
++
++set_ahb_podf_before_switch1:
++ /*
++ * set the MMDC_DIV=1, AXI_DIV = 2, AHB_DIV=4,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #(0x9 << 8)
++ orr r0, r0, #(1 << 16)
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update400_1:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update400_1
++
++switch_pre_periph_clk_400:
++
++ /* now switch pre_periph_clk to PFD_400MHz. */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(0xc << 16)
++ orr r0, r0, #(0x4 << 16)
++ str r0, [r6, #CCM_CBCMR]
++
++ /* now switch periph_clk back. */
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch6:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch6
++
++ /*
++ * change AHB divider so that we are at 400/3=133MHz.
++ * don't change AXI clock divider.
++ * set the MMDC_DIV=1, AXI_DIV=2, AHB_DIV=3,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #(0x9 << 8)
++ orr r0, r0, #(1 << 16)
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update400_2:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update400_2
++
++ .endm
++
++ .macro switch_to_50MHz
++
++ /* check if periph_clk_sel is already set. */
++ ldr r0, [r6, #CCM_CBCDR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq switch_pre_periph_clk_50
++
++ /*
++ * set the periph_clk to be sourced from PLL2_PFD_200M
++ * change periph_clk to be sourced from pll3_clk.
++ * ensure PLL3 is the source and set the divider to 1.
++ */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(0x3 << 12)
++ str r0, [r6, #CCM_CBCMR]
++
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(0x38 << 24)
++ str r0, [r6, #CCM_CBCDR]
++
++ /* now switch periph_clk to pll3_main_clk. */
++ ldr r0, [r6, #CCM_CBCDR]
++ orr r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch_50:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch_50
++
++switch_pre_periph_clk_50:
++
++ /* now switch pre_periph_clk to PFD_200MHz. */
++ ldr r0, [r6, #CCM_CBCMR]
++ orr r0, r0, #(0xc << 16)
++ str r0, [r6, #CCM_CBCMR]
++
++ /*
++ * set the MMDC_DIV=4, AXI_DIV = 4, AHB_DIV=8,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #(0x18 << 16)
++ orr r0, r0, #(0x3 << 16)
++
++ /*
++ * if changing AHB divider remember to change
++ * the IPGPER divider too below.
++ */
++ orr r0, r0, #0x1d00
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update_50:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update_50
++
++ /* now switch periph_clk back. */
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch2:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch2
++
++ .endm
++
++ .macro switch_to_24MHz
++ /*
++ * change the freq now try setting DDR to 24MHz.
++ * source it from the periph_clk2 ensure the
++ * periph_clk2 is sourced from 24MHz and the
++ * divider is 1.
++ */
++
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(0x3 << 12)
++ orr r0, r0, #(1 << 12)
++ str r0, [r6, #CCM_CBCMR]
++
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(0x38 << 24)
++ str r0, [r6, #CCM_CBCDR]
++
++ /* now switch periph_clk to 24MHz. */
++ ldr r0, [r6, #CCM_CBCDR]
++ orr r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch1:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch1
++
++ /* change all the dividers to 1. */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #(1 << 8)
++ str r0, [r6, #CCM_CBCDR]
++
++ /* Wait for the divider to change. */
++wait_div_update:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update
++
++ .endm
++
++/*
++ * mx6_ddr3_freq_change
++ *
++ * idle the processor (eg, wait for interrupt).
++ * make sure DDR is in self-refresh.
++ * IRQs are already disabled.
++ */
++ENTRY(mx6_ddr3_freq_change)
++
++ stmfd sp!, {r4-r12}
++
++ /*
++ * r5 -> mmdc_base
++ * r6 -> ccm_base
++ * r7 -> iomux_base
++ * r12 -> l2_base
++ */
++ mov r4, r0
++ mov r8, r1
++ mov r9, r2
++ mov r11, r3
++
++ /*
++ * Get the addresses of the registers.
++ * They are last few entries in the
++ * ddr_settings parameter.
++ * The first entry contains the count,
++ * and each entry is 2 words.
++ */
++ ldr r0, [r1]
++ add r0, r0, #1
++ lsl r0, r0, #3
++ add r1, r0, r1
++ /* mmdc_base. */
++ ldr r5, [r1]
++ add r1, #8
++ /* ccm_base */
++ ldr r6, [r1]
++ add r1, #8
++ /*iomux_base */
++ ldr r7, [r1]
++ add r1, #8
++ /*l2_base */
++ ldr r12, [r1]
++
++ddr_freq_change:
++ /*
++ * make sure no TLB miss will occur when
++ * the DDR is in self refresh. invalidate
++ * TLB single entry to ensure that the
++ * address is not already in the TLB.
++ */
++
++ adr r10, ddr_freq_change
++
++ ldr r2, [r6]
++ ldr r2, [r5]
++ ldr r2, [r7]
++ ldr r2, [r8]
++ ldr r2, [r10]
++ ldr r2, [r11]
++ ldr r2, [r12]
++
++#ifdef CONFIG_CACHE_L2X0
++ /*
++ * Make sure the L2 buffers are drained.
++ * Sync operation on L2 drains the buffers.
++ */
++ mov r1, #0x0
++ str r1, [r12, #L2_CACHE_SYNC]
++#endif
++
++ /* disable automatic power saving. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ orr r0, r0, #0x01
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* disable MMDC power down timer. */
++ ldr r0, [r5, #MMDC0_MDPDC]
++ bic r0, r0, #(0xff << 8)
++ str r0, [r5, #MMDC0_MDPDC]
++
++ /* delay for a while */
++ ldr r1, =4
++delay1:
++ ldr r2, =0
++cont1:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont1
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay1
++
++ /* set CON_REG */
++ ldr r0, =0x8000
++ str r0, [r5, #MMDC0_MDSCR]
++poll_conreq_set_1:
++ ldr r0, [r5, #MMDC0_MDSCR]
++ and r0, r0, #(0x4 << 12)
++ cmp r0, #(0x4 << 12)
++ bne poll_conreq_set_1
++
++ ldr r0, =0x00008010
++ str r0, [r5, #MMDC0_MDSCR]
++ ldr r0, =0x00008018
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /*
++ * if requested frequency is greater than
++ * 300MHz go to DLL on mode.
++ */
++ ldr r1, =300000000
++ cmp r4, r1
++ bge dll_on_mode
++
++dll_off_mode:
++
++ /* if DLL is currently on, turn it off. */
++ cmp r9, #1
++ beq continue_dll_off_1
++
++ ldr r0, =0x00018031
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00018039
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r1, =10
++delay1a:
++ ldr r2, =0
++cont1a:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont1a
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay1a
++
++continue_dll_off_1:
++ /* set DVFS - enter self refresh mode */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ orr r0, r0, #(1 << 21)
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* de-assert con_req */
++ mov r0, #0x0
++ str r0, [r5, #MMDC0_MDSCR]
++
++poll_dvfs_set_1:
++ ldr r0, [r5, #MMDC0_MAPSR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ bne poll_dvfs_set_1
++
++ ldr r1, =24000000
++ cmp r4, r1
++ beq switch_freq_24
++
++ switch_to_50MHz
++ b continue_dll_off_2
++
++switch_freq_24:
++ switch_to_24MHz
++
++continue_dll_off_2:
++
++ /* set SBS - block ddr accesses */
++ ldr r0, [r5, #MMDC0_MADPCR0]
++ orr r0, r0, #(1 << 8)
++ str r0, [r5, #MMDC0_MADPCR0]
++
++ /* clear DVFS - exit from self refresh mode */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ bic r0, r0, #(1 << 21)
++ str r0, [r5, #MMDC0_MAPSR]
++
++poll_dvfs_clear_1:
++ ldr r0, [r5, #MMDC0_MAPSR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq poll_dvfs_clear_1
++
++ /* if DLL was previously on, continue DLL off routine. */
++ cmp r9, #1
++ beq continue_dll_off_3
++
++ ldr r0, =0x00018031
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00018039
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x08208030
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x08208038
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00088032
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x0008803A
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* delay for a while. */
++ ldr r1, =4
++delay_1:
++ ldr r2, =0
++cont_1:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont_1
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay_1
++
++ ldr r0, [r5, #MMDC0_MDCF0]
++ bic r0, r0, #0xf
++ orr r0, r0, #0x3
++ str r0, [r5, #MMDC0_MDCF0]
++
++ ldr r0, [r5, #MMDC0_MDCF1]
++ bic r0, r0, #0x7
++ orr r0, r0, #0x4
++ str r0, [r5, #MMDC0_MDCF1]
++
++ ldr r0, =0x00091680
++ str r0, [r5, #MMDC0_MDMISC]
++
++ /* enable dqs pull down in the IOMUX. */
++ ldr r1, [r11]
++ add r11, r11, #8
++ ldr r2, =0x3028
++update_iomux:
++ ldr r0, [r11, #0x0]
++ ldr r3, [r7, r0]
++ bic r3, r3, r2
++ orr r3, r3, #(0x3 << 12)
++ orr r3, r3, #0x28
++ str r3, [r7, r0]
++ add r11, r11, #8
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt update_iomux
++
++ /* ODT disabled. */
++ ldr r0, =0x0
++ ldr r2, =MMDC0_MPODTCTRL
++ str r0, [r5, r2]
++ ldr r2, =MMDC1_MPODTCTRL
++ str r0, [r5, r2]
++
++ /* DQS gating disabled. */
++ ldr r2, =MMDC0_MPDGCTRL0
++ ldr r0, [r5, r2]
++ orr r0, r0, #(1 << 29)
++ str r0, [r5, r2]
++
++ ldr r2, =MMDC1_MPDGCTRL0
++ ldr r0, [r5, r2]
++ orr r0, r0, #(0x1 << 29)
++ str r0, [r5, r2]
++
++ /* MMDC0_MAPSR adopt power down enable. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ bic r0, r0, #0x01
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* frc_msr + mu bypass */
++ ldr r0, =0x00000060
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++ ldr r0, =0x00000460
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++ ldr r0, =0x00000c60
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++
++continue_dll_off_3:
++ /* clear SBS - unblock accesses to DDR. */
++ ldr r0, [r5, #MMDC0_MADPCR0]
++ bic r0, r0, #(0x1 << 8)
++ str r0, [r5, #MMDC0_MADPCR0]
++
++ mov r0, #0x0
++ str r0, [r5, #MMDC0_MDSCR]
++poll_conreq_clear_1:
++ ldr r0, [r5, #MMDC0_MDSCR]
++ and r0, r0, #(0x4 << 12)
++ cmp r0, #(0x4 << 12)
++ beq poll_conreq_clear_1
++
++ b done
++
++dll_on_mode:
++ /* assert DVFS - enter self refresh mode. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ orr r0, r0, #(1 << 21)
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* de-assert CON_REQ. */
++ mov r0, #0x0
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* poll DVFS ack. */
++poll_dvfs_set_2:
++ ldr r0, [r5, #MMDC0_MAPSR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ bne poll_dvfs_set_2
++
++ ldr r1, =528000000
++ cmp r4, r1
++ beq switch_freq_528
++
++ switch_to_400MHz
++
++ b continue_dll_on
++
++switch_freq_528:
++ switch_to_528MHz
++
++continue_dll_on:
++
++ /* set SBS step-by-step mode. */
++ ldr r0, [r5, #MMDC0_MADPCR0]
++ orr r0, r0, #( 1 << 8)
++ str r0, [r5, #MMDC0_MADPCR0]
++
++ /* clear DVFS - exit self refresh mode. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ bic r0, r0, #(1 << 21)
++ str r0, [r5, #MMDC0_MAPSR]
++
++poll_dvfs_clear_2:
++ ldr r0, [r5, #MMDC0_MAPSR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq poll_dvfs_clear_2
++
++ /* if DLL is currently off, turn it back on. */
++ cmp r9, #0
++ beq update_calibration_only
++
++ ldr r0, =0xa5390003
++ str r0, [r5, #MMDC0_MPZQHWCTRL]
++ ldr r2, =MMDC1_MPZQHWCTRL
++ str r0, [r5, r2]
++
++ /* enable DQS gating. */
++ ldr r2, =MMDC0_MPDGCTRL0
++ ldr r0, [r5, r2]
++ bic r0, r0, #(1 << 29)
++ str r0, [r5, r2]
++
++ ldr r2, =MMDC1_MPDGCTRL0
++ ldr r0, [r5, r2]
++ bic r0, r0, #(1 << 29)
++ str r0, [r5, r2]
++
++ /* force measure. */
++ ldr r0, =0x00000800
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++
++ /* delay for while. */
++ ldr r1, =4
++delay5:
++ ldr r2, =0
++cont5:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont5
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay5
++
++ /* disable dqs pull down in the IOMUX. */
++ ldr r1, [r11]
++ add r11, r11, #8
++update_iomux1:
++ ldr r0, [r11, #0x0]
++ ldr r3, [r11, #0x4]
++ str r3, [r7, r0]
++ add r11, r11, #8
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt update_iomux1
++
++ /* config MMDC timings to 528MHz. */
++ ldr r9, [r8]
++ add r8, r8, #8
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ /* update MISC register: WALAT, RALAT */
++ ldr r0, =0x00081740
++ str r0, [r5, #MMDC0_MDMISC]
++
++ /* configure ddr devices to dll on, odt. */
++ ldr r0, =0x00028031
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00028039
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* delay for while. */
++ ldr r1, =4
++delay7:
++ ldr r2, =0
++cont7:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont7
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay7
++
++ /* reset dll. */
++ ldr r0, =0x09208030
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x09208038
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* delay for while. */
++ ldr r1, =100
++delay8:
++ ldr r2, =0
++cont8:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont8
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay8
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r0, =0x00428031
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00428039
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ /* issue a zq command. */
++ ldr r0, =0x04008040
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x04008048
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* MMDC ODT enable. */
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r2, =0x4818
++ str r0, [r5, r2]
++
++ /* delay for while. */
++ ldr r1, =40
++delay15:
++ ldr r2, =0
++cont15:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont15
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay15
++
++ /* MMDC0_MAPSR adopt power down enable. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ bic r0, r0, #0x01
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* enable MMDC power down timer. */
++ ldr r0, [r5, #MMDC0_MDPDC]
++ orr r0, r0, #(0x55 << 8)
++ str r0, [r5, #MMDC0_MDPDC]
++
++ b update_calibration
++
++update_calibration_only:
++ ldr r1, [r8]
++ sub r1, r1, #7
++ add r8, r8, #64
++ b update_calib
++
++update_calibration:
++ /* write the new calibration values. */
++ mov r1, r9
++ sub r1, r1, #7
++
++update_calib:
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt update_calib
++
++ /* perform a force measurement. */
++ ldr r0, =0x800
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++
++ /* clear SBS - unblock DDR accesses. */
++ ldr r0, [r5, #MMDC0_MADPCR0]
++ bic r0, r0, #(1 << 8)
++ str r0, [r5, #MMDC0_MADPCR0]
++
++ mov r0, #0x0
++ str r0, [r5, #MMDC0_MDSCR]
++poll_conreq_clear_2:
++ ldr r0, [r5, #MMDC0_MDSCR]
++ and r0, r0, #(0x4 << 12)
++ cmp r0, #(0x4 << 12)
++ beq poll_conreq_clear_2
++
++done:
++ /* restore registers */
++
++ ldmfd sp!, {r4-r12}
++ mov pc, lr
++
++ .type mx6_do_ddr3_freq_change, #object
++ENTRY(mx6_do_ddr_freq_change)
++ .word mx6_ddr3_freq_change
++ .size mx6_ddr3_freq_change, . - mx6_ddr3_freq_change
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/gpc.c linux-3.14.40/arch/arm/mach-imx/gpc.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/gpc.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/gpc.c 2015-05-01 14:57:57.831427001 -0500
+@@ -10,30 +10,69 @@
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
++#include <linux/clk.h>
++#include <linux/delay.h>
+ #include <linux/io.h>
+ #include <linux/irq.h>
++#include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
++#include <linux/platform_device.h>
+ #include <linux/irqchip/arm-gic.h>
++#include <linux/regulator/consumer.h>
++#include <linux/regulator/driver.h>
++#include <linux/regulator/machine.h>
+ #include "common.h"
++#include "hardware.h"
+
+ #define GPC_IMR1 0x008
+ #define GPC_PGC_CPU_PDN 0x2a0
++#define GPC_PGC_GPU_PDN 0x260
++#define GPC_PGC_GPU_PUPSCR 0x264
++#define GPC_PGC_GPU_PDNSCR 0x268
++#define GPC_PGC_GPU_SW_SHIFT 0
++#define GPC_PGC_GPU_SW_MASK 0x3f
++#define GPC_PGC_GPU_SW2ISO_SHIFT 8
++#define GPC_PGC_GPU_SW2ISO_MASK 0x3f
++#define GPC_PGC_CPU_PUPSCR 0x2a4
++#define GPC_PGC_CPU_PDNSCR 0x2a8
++#define GPC_PGC_CPU_SW_SHIFT 0
++#define GPC_PGC_CPU_SW_MASK 0x3f
++#define GPC_PGC_CPU_SW2ISO_SHIFT 8
++#define GPC_PGC_CPU_SW2ISO_MASK 0x3f
++#define GPC_CNTR 0x0
++#define GPC_CNTR_PU_UP_REQ_SHIFT 0x1
++#define GPC_CNTR_PU_DOWN_REQ_SHIFT 0x0
+
+ #define IMR_NUM 4
+
+ static void __iomem *gpc_base;
+ static u32 gpc_wake_irqs[IMR_NUM];
+ static u32 gpc_saved_imrs[IMR_NUM];
++static struct clk *gpu3d_clk, *gpu3d_shader_clk, *gpu2d_clk, *gpu2d_axi_clk;
++static struct clk *openvg_axi_clk, *vpu_clk, *ipg_clk;
++static struct device *gpc_dev;
++struct regulator *pu_reg;
++struct notifier_block nb;
++static struct regulator_dev *pu_dummy_regulator_rdev;
++static struct regulator_init_data pu_dummy_initdata = {
++ .constraints = {
++ .max_uV = 1450000, /* allign with real max of anatop */
++ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
++ REGULATOR_CHANGE_VOLTAGE,
++ },
++};
++static int pu_dummy_enable;
+
+-void imx_gpc_pre_suspend(void)
++void imx_gpc_pre_suspend(bool arm_power_off)
+ {
+ void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
+ int i;
+
+- /* Tell GPC to power off ARM core when suspend */
+- writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_PDN);
++ if (arm_power_off)
++ /* Tell GPC to power off ARM core when suspend */
++ writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_PDN);
+
+ for (i = 0; i < IMR_NUM; i++) {
+ gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
+@@ -120,10 +159,119 @@
+ writel_relaxed(val, reg);
+ }
+
++static void imx_pu_clk(bool enable)
++{
++ if (enable) {
++ if (cpu_is_imx6sl()) {
++ clk_prepare_enable(gpu2d_clk);
++ clk_prepare_enable(openvg_axi_clk);
++ } else {
++ clk_prepare_enable(vpu_clk);
++ clk_prepare_enable(gpu3d_clk);
++ clk_prepare_enable(gpu3d_shader_clk);
++ clk_prepare_enable(gpu2d_clk);
++ clk_prepare_enable(gpu2d_axi_clk);
++ clk_prepare_enable(openvg_axi_clk);
++ }
++ } else {
++ if (cpu_is_imx6sl()) {
++ clk_disable_unprepare(gpu2d_clk);
++ clk_disable_unprepare(openvg_axi_clk);
++ } else {
++ clk_disable_unprepare(openvg_axi_clk);
++ clk_disable_unprepare(gpu2d_axi_clk);
++ clk_disable_unprepare(gpu2d_clk);
++ clk_disable_unprepare(gpu3d_shader_clk);
++ clk_disable_unprepare(gpu3d_clk);
++ clk_disable_unprepare(vpu_clk);
++ }
++ }
++}
++
++static void imx_gpc_pu_enable(bool enable)
++{
++ u32 rate, delay_us;
++ u32 gpu_pupscr_sw2iso, gpu_pdnscr_iso2sw;
++ u32 gpu_pupscr_sw, gpu_pdnscr_iso;
++
++ /* get ipg clk rate for PGC delay */
++ rate = clk_get_rate(ipg_clk);
++
++ if (enable) {
++ imx_anatop_pu_enable(true);
++ /*
++ * need to add necessary delay between powering up PU LDO and
++ * disabling PU isolation in PGC, the counter of PU isolation
++ * is based on ipg clk.
++ */
++ gpu_pupscr_sw2iso = (readl_relaxed(gpc_base +
++ GPC_PGC_GPU_PUPSCR) >> GPC_PGC_GPU_SW2ISO_SHIFT)
++ & GPC_PGC_GPU_SW2ISO_MASK;
++ gpu_pupscr_sw = (readl_relaxed(gpc_base +
++ GPC_PGC_GPU_PUPSCR) >> GPC_PGC_GPU_SW_SHIFT)
++ & GPC_PGC_GPU_SW_MASK;
++ delay_us = (gpu_pupscr_sw2iso + gpu_pupscr_sw) * 1000000
++ / rate + 1;
++ udelay(delay_us);
++
++ imx_pu_clk(true);
++ writel_relaxed(1, gpc_base + GPC_PGC_GPU_PDN);
++ writel_relaxed(1 << GPC_CNTR_PU_UP_REQ_SHIFT,
++ gpc_base + GPC_CNTR);
++ while (readl_relaxed(gpc_base + GPC_CNTR) &
++ (1 << GPC_CNTR_PU_UP_REQ_SHIFT))
++ ;
++ imx_pu_clk(false);
++ } else {
++ writel_relaxed(1, gpc_base + GPC_PGC_GPU_PDN);
++ writel_relaxed(1 << GPC_CNTR_PU_DOWN_REQ_SHIFT,
++ gpc_base + GPC_CNTR);
++ while (readl_relaxed(gpc_base + GPC_CNTR) &
++ (1 << GPC_CNTR_PU_DOWN_REQ_SHIFT))
++ ;
++ /*
++ * need to add necessary delay between enabling PU isolation
++ * in PGC and powering down PU LDO , the counter of PU isolation
++ * is based on ipg clk.
++ */
++ gpu_pdnscr_iso2sw = (readl_relaxed(gpc_base +
++ GPC_PGC_GPU_PDNSCR) >> GPC_PGC_GPU_SW2ISO_SHIFT)
++ & GPC_PGC_GPU_SW2ISO_MASK;
++ gpu_pdnscr_iso = (readl_relaxed(gpc_base +
++ GPC_PGC_GPU_PDNSCR) >> GPC_PGC_GPU_SW_SHIFT)
++ & GPC_PGC_GPU_SW_MASK;
++ delay_us = (gpu_pdnscr_iso2sw + gpu_pdnscr_iso) * 1000000
++ / rate + 1;
++ udelay(delay_us);
++ imx_anatop_pu_enable(false);
++ }
++}
++
++static int imx_gpc_regulator_notify(struct notifier_block *nb,
++ unsigned long event,
++ void *ignored)
++{
++ switch (event) {
++ case REGULATOR_EVENT_PRE_DISABLE:
++ imx_gpc_pu_enable(false);
++ break;
++ case REGULATOR_EVENT_ENABLE:
++ imx_gpc_pu_enable(true);
++ break;
++ default:
++ break;
++ }
++
++ return NOTIFY_OK;
++}
++
+ void __init imx_gpc_init(void)
+ {
+ struct device_node *np;
+ int i;
++ u32 val;
++ u32 cpu_pupscr_sw2iso, cpu_pupscr_sw;
++ u32 cpu_pdnscr_iso2sw, cpu_pdnscr_iso;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc");
+ gpc_base = of_iomap(np, 0);
+@@ -137,4 +285,190 @@
+ gic_arch_extn.irq_mask = imx_gpc_irq_mask;
+ gic_arch_extn.irq_unmask = imx_gpc_irq_unmask;
+ gic_arch_extn.irq_set_wake = imx_gpc_irq_set_wake;
++
++ /*
++ * If there are CPU isolation timing settings in dts,
++ * update them according to dts, otherwise, keep them
++ * with default value in registers.
++ */
++ cpu_pupscr_sw2iso = cpu_pupscr_sw =
++ cpu_pdnscr_iso2sw = cpu_pdnscr_iso = 0;
++
++ /* Read CPU isolation setting for GPC */
++ of_property_read_u32(np, "fsl,cpu_pupscr_sw2iso", &cpu_pupscr_sw2iso);
++ of_property_read_u32(np, "fsl,cpu_pupscr_sw", &cpu_pupscr_sw);
++ of_property_read_u32(np, "fsl,cpu_pdnscr_iso2sw", &cpu_pdnscr_iso2sw);
++ of_property_read_u32(np, "fsl,cpu_pdnscr_iso", &cpu_pdnscr_iso);
++
++ /* Update CPU PUPSCR timing if it is defined in dts */
++ val = readl_relaxed(gpc_base + GPC_PGC_CPU_PUPSCR);
++ if (cpu_pupscr_sw2iso)
++ val &= ~(GPC_PGC_CPU_SW2ISO_MASK << GPC_PGC_CPU_SW2ISO_SHIFT);
++ if (cpu_pupscr_sw)
++ val &= ~(GPC_PGC_CPU_SW_MASK << GPC_PGC_CPU_SW_SHIFT);
++ val |= cpu_pupscr_sw2iso << GPC_PGC_CPU_SW2ISO_SHIFT;
++ val |= cpu_pupscr_sw << GPC_PGC_CPU_SW_SHIFT;
++ writel_relaxed(val, gpc_base + GPC_PGC_CPU_PUPSCR);
++
++ /* Update CPU PDNSCR timing if it is defined in dts */
++ val = readl_relaxed(gpc_base + GPC_PGC_CPU_PDNSCR);
++ if (cpu_pdnscr_iso2sw)
++ val &= ~(GPC_PGC_CPU_SW2ISO_MASK << GPC_PGC_CPU_SW2ISO_SHIFT);
++ if (cpu_pdnscr_iso)
++ val &= ~(GPC_PGC_CPU_SW_MASK << GPC_PGC_CPU_SW_SHIFT);
++ val |= cpu_pdnscr_iso2sw << GPC_PGC_CPU_SW2ISO_SHIFT;
++ val |= cpu_pdnscr_iso << GPC_PGC_CPU_SW_SHIFT;
++ writel_relaxed(val, gpc_base + GPC_PGC_CPU_PDNSCR);
++}
++
++static int imx_pureg_set_voltage(struct regulator_dev *reg, int min_uV,
++ int max_uV, unsigned *selector)
++{
++ return 0;
++}
++
++static int imx_pureg_enable(struct regulator_dev *rdev)
++{
++ pu_dummy_enable = 1;
++
++ return 0;
++}
++
++static int imx_pureg_disable(struct regulator_dev *rdev)
++{
++ pu_dummy_enable = 0;
++
++ return 0;
+ }
++
++static int imx_pureg_is_enable(struct regulator_dev *rdev)
++{
++ return pu_dummy_enable;
++}
++
++static int imx_pureg_list_voltage(struct regulator_dev *rdev,
++ unsigned int selector)
++{
++ return 0;
++}
++
++static struct regulator_ops pu_dummy_ops = {
++ .set_voltage = imx_pureg_set_voltage,
++ .enable = imx_pureg_enable,
++ .disable = imx_pureg_disable,
++ .is_enabled = imx_pureg_is_enable,
++ .list_voltage = imx_pureg_list_voltage,
++};
++
++static struct regulator_desc pu_dummy_desc = {
++ .name = "pureg-dummy",
++ .id = -1,
++ .type = REGULATOR_VOLTAGE,
++ .owner = THIS_MODULE,
++ .ops = &pu_dummy_ops,
++};
++
++static int pu_dummy_probe(struct platform_device *pdev)
++{
++ struct regulator_config config = { };
++ int ret;
++
++ config.dev = &pdev->dev;
++ config.init_data = &pu_dummy_initdata;
++ config.of_node = pdev->dev.of_node;
++
++ pu_dummy_regulator_rdev = regulator_register(&pu_dummy_desc, &config);
++ if (IS_ERR(pu_dummy_regulator_rdev)) {
++ ret = PTR_ERR(pu_dummy_regulator_rdev);
++ dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static const struct of_device_id imx_pudummy_ids[] = {
++ { .compatible = "fsl,imx6-dummy-pureg" },
++};
++MODULE_DEVICE_TABLE(of, imx_pudummy_ids);
++
++static struct platform_driver pu_dummy_driver = {
++ .probe = pu_dummy_probe,
++ .driver = {
++ .name = "pu-dummy",
++ .owner = THIS_MODULE,
++ .of_match_table = imx_pudummy_ids,
++ },
++};
++
++static int imx_gpc_probe(struct platform_device *pdev)
++{
++ int ret;
++
++ gpc_dev = &pdev->dev;
++
++ pu_reg = devm_regulator_get(gpc_dev, "pu");
++ if (IS_ERR(pu_reg)) {
++ ret = PTR_ERR(pu_reg);
++ dev_info(gpc_dev, "pu regulator not ready.\n");
++ return ret;
++ }
++ nb.notifier_call = &imx_gpc_regulator_notify;
++
++ /* Get gpu&vpu clk for power up PU by GPC */
++ if (cpu_is_imx6sl()) {
++ gpu2d_clk = devm_clk_get(gpc_dev, "gpu2d_podf");
++ openvg_axi_clk = devm_clk_get(gpc_dev, "gpu2d_ovg");
++ ipg_clk = devm_clk_get(gpc_dev, "ipg");
++ if (IS_ERR(gpu2d_clk) || IS_ERR(openvg_axi_clk)
++ || IS_ERR(ipg_clk)) {
++ dev_err(gpc_dev, "failed to get clk!\n");
++ return -ENOENT;
++ }
++ } else {
++ gpu3d_clk = devm_clk_get(gpc_dev, "gpu3d_core");
++ gpu3d_shader_clk = devm_clk_get(gpc_dev, "gpu3d_shader");
++ gpu2d_clk = devm_clk_get(gpc_dev, "gpu2d_core");
++ gpu2d_axi_clk = devm_clk_get(gpc_dev, "gpu2d_axi");
++ openvg_axi_clk = devm_clk_get(gpc_dev, "openvg_axi");
++ vpu_clk = devm_clk_get(gpc_dev, "vpu_axi");
++ ipg_clk = devm_clk_get(gpc_dev, "ipg");
++ if (IS_ERR(gpu3d_clk) || IS_ERR(gpu3d_shader_clk)
++ || IS_ERR(gpu2d_clk) || IS_ERR(gpu2d_axi_clk)
++ || IS_ERR(openvg_axi_clk) || IS_ERR(vpu_clk)
++ || IS_ERR(ipg_clk)) {
++ dev_err(gpc_dev, "failed to get clk!\n");
++ return -ENOENT;
++ }
++ }
++
++ ret = regulator_register_notifier(pu_reg, &nb);
++ if (ret) {
++ dev_err(gpc_dev,
++ "regulator notifier request failed\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static const struct of_device_id imx_gpc_ids[] = {
++ { .compatible = "fsl,imx6q-gpc" },
++};
++MODULE_DEVICE_TABLE(of, imx_gpc_ids);
++
++static struct platform_driver imx_gpc_platdrv = {
++ .driver = {
++ .name = "imx-gpc",
++ .owner = THIS_MODULE,
++ .of_match_table = imx_gpc_ids,
++ },
++ .probe = imx_gpc_probe,
++};
++module_platform_driver(imx_gpc_platdrv);
++
++module_platform_driver(pu_dummy_driver);
++
++MODULE_AUTHOR("Anson Huang <b20788@freescale.com>");
++MODULE_DESCRIPTION("Freescale i.MX GPC driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/hardware.h linux-3.14.40/arch/arm/mach-imx/hardware.h
+--- linux-3.14.40.orig/arch/arm/mach-imx/hardware.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/hardware.h 2015-05-01 14:57:57.831427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Copyright 2004-2007, 2014 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ *
+ * This program is free software; you can redistribute it and/or
+@@ -20,7 +20,9 @@
+ #ifndef __ASM_ARCH_MXC_HARDWARE_H__
+ #define __ASM_ARCH_MXC_HARDWARE_H__
+
++#ifndef __ASSEMBLY__
+ #include <asm/io.h>
++#endif
+ #include <asm/sizes.h>
+
+ #define addr_in_module(addr, mod) \
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/headsmp.S linux-3.14.40/arch/arm/mach-imx/headsmp.S
+--- linux-3.14.40.orig/arch/arm/mach-imx/headsmp.S 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/headsmp.S 2015-05-01 14:57:57.831427001 -0500
+@@ -12,8 +12,6 @@
+
+ #include <linux/linkage.h>
+ #include <linux/init.h>
+-#include <asm/asm-offsets.h>
+-#include <asm/hardware/cache-l2x0.h>
+
+ .section ".text.head", "ax"
+
+@@ -35,37 +33,3 @@
+ b secondary_startup
+ ENDPROC(v7_secondary_startup)
+ #endif
+-
+-#ifdef CONFIG_ARM_CPU_SUSPEND
+-/*
+- * The following code must assume it is running from physical address
+- * where absolute virtual addresses to the data section have to be
+- * turned into relative ones.
+- */
+-
+-#ifdef CONFIG_CACHE_L2X0
+- .macro pl310_resume
+- adr r0, l2x0_saved_regs_offset
+- ldr r2, [r0]
+- add r2, r2, r0
+- ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0
+- ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value
+- str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl
+- mov r1, #0x1
+- str r1, [r0, #L2X0_CTRL] @ re-enable L2
+- .endm
+-
+-l2x0_saved_regs_offset:
+- .word l2x0_saved_regs - .
+-
+-#else
+- .macro pl310_resume
+- .endm
+-#endif
+-
+-ENTRY(v7_cpu_resume)
+- bl v7_invalidate_l1
+- pl310_resume
+- b cpu_resume
+-ENDPROC(v7_cpu_resume)
+-#endif
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/imx6sl_wfi.S linux-3.14.40/arch/arm/mach-imx/imx6sl_wfi.S
+--- linux-3.14.40.orig/arch/arm/mach-imx/imx6sl_wfi.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mach-imx/imx6sl_wfi.S 2015-05-01 14:57:57.831427001 -0500
+@@ -0,0 +1,639 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/linkage.h>
++#define IRAM_WAIT_SIZE (1 << 11)
++
++ .macro sl_ddr_io_save
++
++ ldr r4, [r1, #0x30c] /* DRAM_DQM0 */
++ ldr r5, [r1, #0x310] /* DRAM_DQM1 */
++ ldr r6, [r1, #0x314] /* DRAM_DQM2 */
++ ldr r7, [r1, #0x318] /* DRAM_DQM3 */
++ stmfd r9!, {r4-r7}
++
++ ldr r4, [r1, #0x5c4] /* GPR_B0DS */
++ ldr r5, [r1, #0x5cc] /* GPR_B1DS */
++ ldr r6, [r1, #0x5d4] /* GPR_B2DS */
++ ldr r7, [r1, #0x5d8] /* GPR_B3DS */
++ stmfd r9!, {r4-r7}
++
++ ldr r4, [r1, #0x300] /* DRAM_CAS */
++ ldr r5, [r1, #0x31c] /* DRAM_RAS */
++ ldr r6, [r1, #0x338] /* DRAM_SDCLK_0 */
++ ldr r7, [r1, #0x5ac] /* GPR_ADDS*/
++ stmfd r9!, {r4-r7}
++
++ ldr r4, [r1, #0x5b0] /* DDRMODE_CTL */
++ ldr r5, [r1, #0x5c0] /* DDRMODE */
++ ldr r6, [r1, #0x33c] /* DRAM_SODT0*/
++ ldr r7, [r1, #0x340] /* DRAM_SODT1*/
++ stmfd r9!, {r4-r7}
++
++ ldr r4, [r1, #0x330] /* DRAM_SDCKE0 */
++ ldr r5, [r1, #0x334] /* DRAM_SDCKE1 */
++ ldr r6, [r1, #0x320] /* DRAM_RESET */
++ stmfd r9!, {r4-r6}
++
++ .endm
++
++ .macro sl_ddr_io_restore
++
++ /*
++ * r9 points to IRAM stack.
++ * r1 points to IOMUX base address.
++ * r8 points to MMDC base address.
++ */
++ ldmea r9!, {r4-r7}
++ str r4, [r1, #0x30c] /* DRAM_DQM0 */
++ str r5, [r1, #0x310] /* DRAM_DQM1 */
++ str r6, [r1, #0x314] /* DRAM_DQM2 */
++ str r7, [r1, #0x318] /* DRAM_DQM3 */
++
++ ldmea r9!, {r4-r7}
++ str r4, [r1, #0x5c4] /* GPR_B0DS */
++ str r5, [r1, #0x5cc] /* GPR_B1DS */
++ str r6, [r1, #0x5d4] /* GPR_B2DS */
++ str r7, [r1, #0x5d8] /* GPR_B3DS */
++
++ ldmea r9!, {r4-r7}
++ str r4, [r1, #0x300] /* DRAM_CAS */
++ str r5, [r1, #0x31c] /* DRAM_RAS */
++ str r6, [r1, #0x338] /* DRAM_SDCLK_0 */
++ str r7, [r1, #0x5ac] /* GPR_ADDS*/
++
++ ldmea r9!, {r4-r7}
++ str r4, [r1, #0x5b0] /* DDRMODE_CTL */
++ str r5, [r1, #0x5c0] /* DDRMODE */
++ str r6, [r1, #0x33c] /* DRAM_SODT0*/
++ str r7, [r1, #0x340] /* DRAM_SODT1*/
++
++ ldmea r9!, {r4-r6}
++ str r4, [r1, #0x330] /* DRAM_SDCKE0 */
++ str r5, [r1, #0x334] /* DRAM_SDCKE1 */
++ str r6, [r1, #0x320] /* DRAM_RESET */
++
++ /*
++ * Need to reset the FIFO to avoid MMDC lockup
++ * caused because of floating/changing the
++ * configuration of many DDR IO pads.
++ */
++ ldr r7, =0x83c
++ ldr r6, [r8, r7]
++ orr r6, r6, #0x80000000
++ str r6, [r8, r7]
++fifo_reset1_wait:
++ ldr r6, [r8, r7]
++ and r6, r6, #0x80000000
++ cmp r6, #0
++ bne fifo_reset1_wait
++
++ /* reset FIFO a second time */
++ ldr r6, [r8, r7]
++ orr r6, r6, #0x80000000
++ str r6, [r8, r7]
++fifo_reset2_wait:
++ ldr r6, [r8, r7]
++ and r6, r6, #0x80000000
++ cmp r6, #0
++ bne fifo_reset2_wait
++
++ .endm
++
++ .macro sl_ddr_io_set_lpm
++
++ mov r4, #0
++ str r4, [r1, #0x30c] /* DRAM_DQM0 */
++ str r4, [r1, #0x310] /* DRAM_DQM1 */
++ str r4, [r1, #0x314] /* DRAM_DQM2 */
++ str r4, [r1, #0x318] /* DRAM_DQM3 */
++
++ str r4, [r1, #0x5c4] /* GPR_B0DS */
++ str r4, [r1, #0x5cc] /* GPR_B1DS */
++ str r4, [r1, #0x5d4] /* GPR_B2DS */
++ str r4, [r1, #0x5d8] /* GPR_B3DS */
++
++ str r4, [r1, #0x300] /* DRAM_CAS */
++ str r4, [r1, #0x31c] /* DRAM_RAS */
++ str r4, [r1, #0x338] /* DRAM_SDCLK_0 */
++ str r4, [r1, #0x5ac] /* GPR_ADDS*/
++
++ str r4, [r1, #0x5b0] /* DDRMODE_CTL */
++ str r4, [r1, #0x5c0] /* DDRMODE */
++ str r4, [r1, #0x33c] /* DRAM_SODT0*/
++ str r4, [r1, #0x340] /* DRAM_SODT1*/
++
++ mov r4, #0x80000
++ str r4, [r1, #0x320] /* DRAM_RESET */
++ mov r4, #0x1000
++ str r4, [r1, #0x330] /* DRAM_SDCKE0 */
++ str r4, [r1, #0x334] /* DRAM_SDCKE1 */
++
++ .endm
++
++/*
++ * imx6sl_low_power_wfi
++ *
++ * Idle the processor (eg, wait for interrupt).
++ * Make sure DDR is in self-refresh.
++ * IRQs are already disabled.
++ * r0: WFI IRAMcode base address.
++ * r1: IOMUX base address
++ * r2: Base address of CCM, ANATOP and MMDC
++ * r3: 1 if in audio_bus_freq_mode
++ */
++ .align 3
++ENTRY(imx6sl_low_power_wfi)
++
++ push {r4-r11}
++
++mx6sl_lpm_wfi:
++ /* Store audio_bus_freq_mode */
++ mov r11, r3
++
++ mov r4,r2
++ /* Get the IRAM data storage address. */
++ mov r10, r0
++ mov r9, r0 /* get suspend_iram_base */
++ add r9, r9, #IRAM_WAIT_SIZE
++
++ /* Anatop Base address in r3. */
++ ldr r3, [r4]
++ /* CCM Base Address in r2 */
++ ldr r2, [r4, #0x4]
++ /* MMDC Base Address in r8 */
++ ldr r8, [r4, #0x8]
++ /* L2 Base Address in r7 */
++ ldr r7, [r4, #0xC]
++
++ ldr r6, [r8]
++ ldr r6, [r3]
++ ldr r6, [r2]
++ ldr r6, [r1]
++
++ /* Store the original ARM PODF. */
++ ldr r0, [r2, #0x10]
++
++ /* Drain all the L1 buffers. */
++ dsb
++
++#ifdef CONFIG_CACHE_L2X0
++ /*
++ * Need to make sure the buffers in L2 are drained.
++ * Performing a sync operation does this.
++ */
++ mov r6, #0x0
++ str r6, [r7, #0x730]
++#endif
++
++ /*
++ * The second dsb might be needed to keep cache sync (device write)
++ * ordering with the memory accesses before it.
++ */
++ dsb
++ isb
++
++ /* Save the DDR IO state. */
++ sl_ddr_io_save
++
++ /* Disable Automatic power savings. */
++ ldr r6, [r8, #0x404]
++ orr r6, r6, #0x01
++ str r6, [r8, #0x404]
++
++ /* Make the DDR explicitly enter self-refresh. */
++ ldr r6, [r8, #0x404]
++ orr r6, r6, #0x200000
++ str r6, [r8, #0x404]
++
++poll_dvfs_set_1:
++ ldr r6, [r8, #0x404]
++ and r6, r6, #0x2000000
++ cmp r6, #0x2000000
++ bne poll_dvfs_set_1
++
++ /* set SBS step-by-step mode */
++ ldr r6, [r8, #0x410]
++ orr r6, r6, #0x100
++ str r6, [r8, #0x410]
++
++ cmp r11, #1
++ beq audio_mode
++ /*
++ * Now set DDR rate to 1MHz.
++ * DDR is from bypassed PLL2 on periph2_clk2 path.
++ * Set the periph2_clk2_podf to divide by 8.
++ */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x07
++ str r6, [r2, #0x14]
++
++ /* Now set MMDC PODF to divide by 3. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x38
++ orr r6, r6, #0x10
++ str r6, [r2, #0x14]
++ b mmdc_podf
++
++audio_mode:
++ /* MMDC is from PLL2_200M.
++ * Set the mmdc_podf to div by 8.
++ */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x38
++ str r6, [r2, #0x14]
++
++ /* Loop till podf is accepted. */
++mmdc_podf:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne mmdc_podf
++
++ /* Set the DDR IO in LPM state. */
++ sl_ddr_io_set_lpm
++
++ cmp r11, #1
++ beq do_audio_arm_clk
++
++ /*
++ * Check if none of the PLLs are
++ * locked, except PLL1 which will get
++ * bypassed below.
++ * We should not be here if PLL2 is not
++ * bypassed.
++ */
++ ldr r7, =1
++ /* USB1 PLL3 */
++ ldr r6, [r3, #0x10]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ beq no_analog_saving
++
++ /* USB2 PLL7 */
++ ldr r6, [r3, #0x20]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ beq no_analog_saving
++
++ /* Audio PLL4 */
++ ldr r6, [r3, #0x70]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ beq no_analog_saving
++
++ /* Video PLL5 */
++ ldr r6, [r3, #0xA0]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ beq no_analog_saving
++
++ /* ENET PLL8 */
++ ldr r6, [r3, #0xE0]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ beq no_analog_saving
++
++ b cont
++
++no_analog_saving:
++ ldr r7, =0
++
++cont:
++ /* Set the AHB to 3MHz. AXI to 3MHz. */
++ ldr r9, [r2, #0x14]
++ mov r6, r9
++ orr r6, r6, #0x1c00
++ orr r6, r6, #0x70000
++ str r6, [r2, #0x14]
++
++ /* Loop till podf is accepted. */
++ahb_podf:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne podf_loop
++
++ /*
++ * Now set ARM to 24MHz.
++ * Move ARM to be sourced from STEP_CLK
++ * after setting STEP_CLK to 24MHz.
++ */
++ ldr r6, [r2, #0xc]
++ bic r6, r6, #0x100
++ str r6, [r2, #0x0c]
++ /* Now PLL1_SW_CLK to step_clk. */
++ ldr r6, [r2, #0x0c]
++ orr r6, r6, #0x4
++ str r6, [r2, #0x0c]
++
++ /* Bypass PLL1 and power it down. */
++ ldr r6, =(1 << 16)
++ orr r6, r6, #0x1000
++ str r6, [r3, #0x04]
++
++ /*
++ * Set the ARM PODF to divide by 8.
++ * IPG is at 1.5MHz here, we need ARM to
++ * run at the 12:5 ratio (WAIT mode issue).
++ */
++ ldr r6, =0x7
++ str r6, [r2, #0x10]
++
++ /* Loop till podf is accepted. */
++podf_loop:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne podf_loop
++
++ /*
++ * Check if we can save some
++ * power in the Analog section.
++ */
++ cmp r7, #0x1
++ bne do_wfi
++
++ /* Disable 1p1 brown out. */
++ ldr r6, [r3, #0x110]
++ bic r6, r6, #0x2
++ str r6, [r3, #0x110]
++
++ /* Enable the weak 2P5 */
++ ldr r6, [r3, #0x130]
++ orr r6, r6, #0x40000
++ str r6, [r3, #0x130]
++
++ /* Disable main 2p5. */
++ ldr r6, [r3, #0x130]
++ bic r6, r6, #0x1
++ str r6, [r3, #0x130]
++
++ /*
++ * Set the OSC bias current to -37.5%
++ * to drop the power on VDDHIGH.
++ */
++ ldr r6, [r3, #0x150]
++ orr r6, r6, #0xC000
++ str r6, [r3, #0x150]
++
++ /* Enable low power bandgap */
++ ldr r6, [r3, #0x260]
++ orr r6, r6, #0x20
++ str r6, [r3, #0x260]
++
++ /*
++ * Turn off the bias current
++ * from the regular bandgap.
++ */
++ ldr r6, [r3, #0x260]
++ orr r6, r6, #0x80
++ str r6, [r3, #0x260]
++
++ /*
++ * Clear the REFTOP_SELFBIASOFF,
++ * self-bias circuit of the band gap.
++ * Per RM, should be cleared when
++ * band gap is powered down.
++ */
++ ldr r6, [r3, #0x150]
++ bic r6, r6, #0x8
++ str r6, [r3, #0x150]
++
++ /* Power down the regular bandgap. */
++ ldr r6, [r3, #0x150]
++ orr r6, r6, #0x1
++ str r6, [r3, #0x150]
++
++ b do_wfi
++
++do_audio_arm_clk:
++ /*
++ * ARM is from PLL2_PFD2_400M here.
++ * Switch ARM to bypassed PLL1.
++ */
++ ldr r6, [r2, #0xC]
++ bic r6, r6, #0x4
++ str r6, [r2, #0xC]
++
++ /*
++ * Set the ARM_PODF to divide by 2
++ * as IPG is at 4MHz, we cannot run
++ * ARM_CLK above 9.6MHz when
++ * system enters WAIT mode.
++ */
++ ldr r6, =0x2
++ str r6, [r2, #0x10]
++
++ /* Loop till podf is accepted. */
++podf_loop_audio:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne podf_loop_audio
++
++do_wfi:
++ /* Now do WFI. */
++ wfi
++
++ /* Set original ARM PODF back. */
++ str r0, [r2, #0x10]
++
++ /* Loop till podf is accepted. */
++podf_loop1:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne podf_loop1
++
++ cmp r11, #1
++ beq audio_arm_clk_restore
++
++ /*
++ * Check if powered down
++ * analog components.
++ */
++ cmp r7, #0x1
++ bne skip_analog_restore
++
++ /* Power up the regular bandgap. */
++ ldr r6, [r3, #0x150]
++ bic r6, r6, #0x1
++ str r6, [r3, #0x150]
++
++ /*
++ * Turn on the bias current
++ * from the regular bandgap.
++ */
++ ldr r6, [r3, #0x260]
++ bic r6, r6, #0x80
++ str r6, [r3, #0x260]
++
++ /* Disable the low power bandgap */
++ ldr r6, [r3, #0x260]
++ bic r6, r6, #0x20
++ str r6, [r3, #0x260]
++
++ /*
++ * Set the OSC bias current to max
++ * value for normal operation.
++ */
++ ldr r6, [r3, #0x150]
++ bic r6, r6, #0xC000
++ str r6, [r3, #0x150]
++
++ /* Enable main 2p5. */
++ ldr r6, [r3, #0x130]
++ orr r6, r6, #0x1
++ str r6, [r3, #0x130]
++
++ /* Ensure the 2P5 is up. */
++loop_2p5:
++ ldr r6, [r3, #0x130]
++ and r6, r6, #0x20000
++ cmp r6, #0x20000
++ bne loop_2p5
++
++ /* Disable the weak 2P5 */
++ ldr r6, [r3, #0x130]
++ bic r6, r6, #0x40000
++ str r6, [r3, #0x130]
++
++ /* Enable 1p1 brown out. */
++ ldr r6, [r3, #0x110]
++ orr r6, r6, #0x2
++ str r6, [r3, #0x110]
++
++skip_analog_restore:
++
++ /* Power up PLL1 and un-bypass it. */
++ ldr r6, =(1 << 12)
++ str r6, [r3, #0x08]
++
++ /* Wait for PLL1 to relock. */
++wait_for_pll_lock:
++ ldr r6, [r3, #0x0]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ bne wait_for_pll_lock
++
++ ldr r6, =(1 << 16)
++ str r6, [r3, #0x08]
++
++ /* Set PLL1_sw_clk back to PLL1. */
++ ldr r6, [r2, #0x0c]
++ bic r6, r6, #0x4
++ str r6, [r2, #0xc]
++
++ /* Restore AHB/AXI back. */
++ str r9, [r2, #0x14]
++
++ /* Loop till podf is accepted. */
++ahb_podf1:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne podf_loop1
++
++ b wfi_restore
++
++ audio_arm_clk_restore:
++ /* Move ARM back to PLL2_PFD2_400M */
++ ldr r6, [r2, #0xC]
++ orr r6, r6, #0x4
++ str r6, [r2, #0xC]
++
++wfi_restore:
++ /* get suspend_iram_base */
++ mov r9, r10
++ add r9, r9, #IRAM_WAIT_SIZE
++
++ /* Restore the DDR IO before exiting self-refresh. */
++ sl_ddr_io_restore
++
++ /*
++ * Set MMDC back to 24MHz.
++ * Set periph2_clk2_podf to divide by 1
++ * Now set MMDC PODF to divide by 1.
++ */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x3f
++ str r6, [r2, #0x14]
++
++mmdc_podf1:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0x0
++ bne mmdc_podf1
++
++ /* clear DVFS - exit from self refresh mode */
++ ldr r6, [r8, #0x404]
++ bic r6, r6, #0x200000
++ str r6, [r8, #0x404]
++
++poll_dvfs_clear_1:
++ ldr r6, [r8, #0x404]
++ and r6, r6, #0x2000000
++ cmp r6, #0x2000000
++ beq poll_dvfs_clear_1
++
++ /*
++ * Add these nops so that the
++ * prefetcher will not try to get
++ * any instructions from DDR.
++ * The prefetch depth is about 23
++ * on A9, so adding 25 nops.
++ */
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ nop
++ nop
++ nop
++ nop
++ nop
++
++ /* Enable Automatic power savings. */
++ ldr r6, [r8, #0x404]
++ bic r6, r6, #0x01
++ str r6, [r8, #0x404]
++
++ /* clear SBS - unblock DDR accesses */
++ ldr r6, [r8, #0x410]
++ bic r6, r6, #0x100
++ str r6, [r8, #0x410]
++
++
++ pop {r4-r11}
++
++ /* Restore registers */
++ mov pc, lr
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/Kconfig linux-3.14.40/arch/arm/mach-imx/Kconfig
+--- linux-3.14.40.orig/arch/arm/mach-imx/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/Kconfig 2015-05-01 14:57:57.831427001 -0500
+@@ -1,5 +1,6 @@
+ config ARCH_MXC
+ bool "Freescale i.MX family" if ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7
++ select ARCH_HAS_RESET_CONTROLLER
+ select ARCH_REQUIRE_GPIOLIB
+ select ARM_CPU_SUSPEND if PM
+ select ARM_PATCH_PHYS_VIRT
+@@ -13,6 +14,7 @@
+ select PINCTRL
+ select SOC_BUS
+ select SPARSE_IRQ
++ select SRAM
+ select USE_OF
+ help
+ Support for Freescale MXC/iMX-based family of processors
+@@ -63,7 +65,6 @@
+
+ config HAVE_IMX_SRC
+ def_bool y if SMP
+- select ARCH_HAS_RESET_CONTROLLER
+
+ config IMX_HAVE_IOMUX_V1
+ bool
+@@ -791,6 +792,8 @@
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_764369 if SMP
+ select ARM_ERRATA_775420
++ select ARM_ERRATA_794072 if SMP
++ select ARM_ERRATA_761320 if SMP
+ select ARM_GIC
+ select CPU_V7
+ select HAVE_ARM_SCU if SMP
+@@ -803,11 +806,13 @@
+ select MFD_SYSCON
+ select MIGHT_HAVE_PCI
+ select PCI_DOMAINS if PCI
++ select ARCH_SUPPORTS_MSI
+ select PINCTRL_IMX6Q
+ select PL310_ERRATA_588369 if CACHE_PL310
+ select PL310_ERRATA_727915 if CACHE_PL310
+ select PL310_ERRATA_769419 if CACHE_PL310
+ select PM_OPP if PM
++ select ZONE_DMA
+
+ help
+ This enables support for Freescale i.MX6 Quad processor.
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/lpddr2_freq_imx6.S linux-3.14.40/arch/arm/mach-imx/lpddr2_freq_imx6.S
+--- linux-3.14.40.orig/arch/arm/mach-imx/lpddr2_freq_imx6.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mach-imx/lpddr2_freq_imx6.S 2015-05-01 14:57:57.831427001 -0500
+@@ -0,0 +1,484 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/linkage.h>
++
++ .macro mx6sl_switch_to_24MHz
++
++ /*
++ * Set MMDC clock to be sourced from PLL3.
++ * Ensure first periph2_clk2 is sourced from PLL3.
++ * Set the PERIPH2_CLK2_PODF to divide by 2.
++ */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x7
++ orr r6, r6, #0x1
++ str r6, [r2, #0x14]
++
++ /* Select PLL3 to source MMDC. */
++ ldr r6, [r2, #0x18]
++ bic r6, r6, #0x100000
++ str r6, [r2, #0x18]
++
++ /* Swtich periph2_clk_sel to run from PLL3. */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch1:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch1
++
++ /*
++ * Need to clock gate the 528 PFDs before
++ * powering down PLL2.
++ * Only the PLL2_PFD2_400M should be ON
++ * at this time, so only clock gate that one.
++ */
++ ldr r6, [r3, #0x100]
++ orr r6, r6, #0x800000
++ str r6, [r3, #0x100]
++
++ /*
++ * Set PLL2 to bypass state. We should be here
++ * only if MMDC is not sourced from PLL2.
++ */
++ ldr r6, [r3, #0x30]
++ orr r6, r6, #0x10000
++ str r6, [r3, #0x30]
++
++ ldr r6, [r3, #0x30]
++ orr r6, r6, #0x1000
++ str r6, [r3, #0x30]
++
++ /* Ensure pre_periph2_clk_mux is set to pll2 */
++ ldr r6, [r2, #0x18]
++ bic r6, r6, #0x600000
++ str r6, [r2, #0x18]
++
++ /* Set MMDC clock to be sourced from the bypassed PLL2. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch2:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch2
++
++ /*
++ * Now move MMDC back to periph2_clk2 source.
++ * after selecting PLL2 as the option.
++ * Select PLL2 as the source.
++ */
++ ldr r6, [r2, #0x18]
++ orr r6, r6, #0x100000
++ str r6, [r2, #0x18]
++
++ /* set periph2_clk2_podf to divide by 1. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x7
++ str r6, [r2, #0x14]
++
++ /* Now move periph2_clk to periph2_clk2 source */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch3:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch3
++
++ /* Now set the MMDC PODF back to 1.*/
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x38
++ str r6, [r2, #0x14]
++
++mmdc_podf0:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne mmdc_podf0
++
++ .endm
++
++ .macro ddr_switch_400MHz
++
++ /* Set MMDC divider first, in case PLL3 is at 480MHz. */
++ ldr r6, [r3, #0x10]
++ and r6, r6, #0x10000
++ cmp r6, #0x10000
++ beq pll3_in_bypass
++
++ /* Set MMDC divder to divide by 2. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x38
++ orr r6, r6, #0x8
++ str r6, [r2, #0x14]
++
++mmdc_podf:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne mmdc_podf
++
++pll3_in_bypass:
++ /*
++ * Check if we are switching between
++ * 400Mhz <-> 100MHz.If so, we should
++ * try to source MMDC from PLL2_200M.
++ */
++ cmp r1, #0
++ beq not_low_bus_freq
++
++ /* Ensure that MMDC is sourced from PLL2 mux first. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch4:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch4
++
++not_low_bus_freq:
++ /* Now ensure periph2_clk2_sel mux is set to PLL3 */
++ ldr r6, [r2, #0x18]
++ bic r6, r6, #0x100000
++ str r6, [r2, #0x18]
++
++ /* Now switch MMDC to PLL3. */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch5:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch5
++
++ /*
++ * Check if PLL2 is already unlocked.
++ * If so do nothing with PLL2.
++ */
++ cmp r1, #0
++ beq pll2_already_on
++
++ /* Now power up PLL2 and unbypass it. */
++ ldr r6, [r3, #0x30]
++ bic r6, r6, #0x1000
++ str r6, [r3, #0x30]
++
++ /* Make sure PLL2 has locked.*/
++wait_for_pll_lock:
++ ldr r6, [r3, #0x30]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ bne wait_for_pll_lock
++
++ ldr r6, [r3, #0x30]
++ bic r6, r6, #0x10000
++ str r6, [r3, #0x30]
++
++ /*
++ * Need to enable the 528 PFDs after
++ * powering up PLL2.
++ * Only the PLL2_PFD2_400M should be ON
++ * as it feeds the MMDC. Rest should have
++ * been managed by clock code.
++ */
++ ldr r6, [r3, #0x100]
++ bic r6, r6, #0x800000
++ str r6, [r3, #0x100]
++
++pll2_already_on:
++ /*
++ * Now switch MMDC clk back to pll2_mux option.
++ * Ensure pre_periph2_clk2 is set to pll2_pfd_400M.
++ * If switching to audio DDR freq, set the
++ * pre_periph2_clk2 to PLL2_PFD_200M
++ */
++ ldr r6, =400000000
++ cmp r6, r0
++ bne use_pll2_pfd_200M
++
++ ldr r6, [r2, #0x18]
++ bic r6, r6, #0x600000
++ orr r6, r6, #0x200000
++ str r6, [r2, #0x18]
++ ldr r6, =400000000
++ b cont2
++
++use_pll2_pfd_200M:
++ ldr r6, [r2, #0x18]
++ orr r6, r6, #0x600000
++ str r6, [r2, #0x18]
++ ldr r6, =200000000
++
++cont2:
++ ldr r4, [r2, #0x14]
++ bic r4, r4, #0x4000000
++ str r4, [r2, #0x14]
++
++periph2_clk_switch6:
++ ldr r4, [r2, #0x48]
++ cmp r4, #0
++ bne periph2_clk_switch6
++
++change_divider_only:
++ /*
++ * Calculate the MMDC divider
++ * based on the requested freq.
++ */
++ ldr r4, =0
++Loop2:
++ sub r6, r6, r0
++ cmp r6, r0
++ blt Div_Found
++ add r4, r4, #1
++ bgt Loop2
++
++ /* Shift divider into correct offset. */
++ lsl r4, r4, #3
++Div_Found:
++ /* Set the MMDC PODF. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x38
++ orr r6, r6, r4
++ str r6, [r2, #0x14]
++
++mmdc_podf1:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne mmdc_podf1
++
++ .endm
++
++ .macro mmdc_clk_lower_100MHz
++
++ /*
++ * Prior to reducing the DDR frequency (at 528/400 MHz),
++ * read the Measure unit count bits (MU_UNIT_DEL_NUM)
++ */
++ ldr r5, =0x8B8
++ ldr r6, [r8, r5]
++ /* Original MU unit count */
++ mov r6, r6, LSR #16
++ ldr r4, =0x3FF
++ and r6, r6, r4
++ /* Original MU unit count * 2 */
++ mov r7, r6, LSL #1
++ /*
++ * Bypass the automatic measure unit when below 100 MHz
++ * by setting the Measure unit bypass enable bit (MU_BYP_EN)
++ */
++ ldr r6, [r8, r5]
++ orr r6, r6, #0x400
++ str r6, [r8, r5]
++ /*
++ * Double the measure count value read in step 1 and program it in the
++ * measurement bypass bits (MU_BYP_VAL) of the MMDC PHY Measure Unit
++ * Register for the reduced frequency operation below 100 MHz
++ */
++ ldr r6, [r8, r5]
++ ldr r4, =0x3FF
++ bic r6, r6, r4
++ orr r6, r6, r7
++ str r6, [r8, r5]
++ /* Now perform a Force Measurement. */
++ ldr r6, [r8, r5]
++ orr r6, r6, #0x800
++ str r6, [r8, r5]
++ /* Wait for FRC_MSR to clear. */
++force_measure:
++ ldr r6, [r8, r5]
++ and r6, r6, #0x800
++ cmp r6, #0x0
++ bne force_measure
++
++ .endm
++
++ .macro mmdc_clk_above_100MHz
++
++ /* Make sure that the PHY measurement unit is NOT in bypass mode */
++ ldr r5, =0x8B8
++ ldr r6, [r8, r5]
++ bic r6, r6, #0x400
++ str r6, [r8, r5]
++ /* Now perform a Force Measurement. */
++ ldr r6, [r8, r5]
++ orr r6, r6, #0x800
++ str r6, [r8, r5]
++ /* Wait for FRC_MSR to clear. */
++force_measure1:
++ ldr r6, [r8, r5]
++ and r6, r6, #0x800
++ cmp r6, #0x0
++ bne force_measure1
++ .endm
++
++/*
++ * mx6_lpddr2_freq_change
++ *
++ * Make sure DDR is in self-refresh.
++ * IRQs are already disabled.
++ * r0 : DDR freq.
++ * r1: low_bus_freq_mode flag
++ * r2: Pointer to array containing addresses of registers.
++ */
++ .align 3
++ENTRY(mx6_lpddr2_freq_change)
++
++ push {r4-r10}
++
++ mov r4, r2
++ ldr r3, [r4] @ANATOP_BASE_ADDR
++ ldr r2, [r4, #0x4] @CCM_BASE_ADDR
++ ldr r8, [r4, #0x8] @MMDC_P0_BASE_ADDR
++ ldr r7, [r4, #0xC] @L2_BASE_ADDR
++
++lpddr2_freq_change:
++ adr r9, lpddr2_freq_change
++
++ /* Prime all TLB entries. */
++ ldr r6, [r9]
++ ldr r6, [r8]
++ ldr r6, [r3]
++ ldr r6, [r2]
++
++ /* Drain all the L1 buffers. */
++ dsb
++
++#ifdef CONFIG_CACHE_L2X0
++ /*
++ * Need to make sure the buffers in L2 are drained.
++ * Performing a sync operation does this.
++ */
++ mov r6, #0x0
++ str r6, [r7, #0x730]
++#endif
++
++ /*
++ * The second dsb might be needed to keep cache sync (device write)
++ * ordering with the memory accesses before it.
++ */
++ dsb
++ isb
++
++ /* Disable Automatic power savings. */
++ ldr r6, [r8, #0x404]
++ orr r6, r6, #0x01
++ str r6, [r8, #0x404]
++
++ /* MMDC0_MDPDC disable power down timer */
++ ldr r6, [r8, #0x4]
++ bic r6, r6, #0xff00
++ str r6, [r8, #0x4]
++
++ /* Delay for a while */
++ ldr r10, =10
++delay1:
++ ldr r7, =0
++cont1:
++ ldr r6, [r8, r7]
++ add r7, r7, #4
++ cmp r7, #16
++ bne cont1
++ sub r10, r10, #1
++ cmp r10, #0
++ bgt delay1
++
++ /* Make the DDR explicitly enter self-refresh. */
++ ldr r6, [r8, #0x404]
++ orr r6, r6, #0x200000
++ str r6, [r8, #0x404]
++
++poll_dvfs_set_1:
++ ldr r6, [r8, #0x404]
++ and r6, r6, #0x2000000
++ cmp r6, #0x2000000
++ bne poll_dvfs_set_1
++
++ /* set SBS step-by-step mode */
++ ldr r6, [r8, #0x410]
++ orr r6, r6, #0x100
++ str r6, [r8, #0x410]
++
++ ldr r10, =100000000
++ cmp r0, r10
++ bgt set_ddr_mu_above_100
++ mmdc_clk_lower_100MHz
++
++set_ddr_mu_above_100:
++ ldr r10, =24000000
++ cmp r0, r10
++ beq set_to_24MHz
++
++ ddr_switch_400MHz
++
++ ldr r10,=100000000
++ cmp r0, r10
++ blt done
++ mmdc_clk_above_100MHz
++
++ b done
++
++set_to_24MHz:
++ mx6sl_switch_to_24MHz
++
++done:
++ /* clear DVFS - exit from self refresh mode */
++ ldr r6, [r8, #0x404]
++ bic r6, r6, #0x200000
++ str r6, [r8, #0x404]
++
++poll_dvfs_clear_1:
++ ldr r6, [r8, #0x404]
++ and r6, r6, #0x2000000
++ cmp r6, #0x2000000
++ beq poll_dvfs_clear_1
++
++ /* Enable Automatic power savings. */
++ ldr r6, [r8, #0x404]
++ bic r6, r6, #0x01
++ str r6, [r8, #0x404]
++
++ ldr r10, =24000000
++ cmp r0, r10
++ beq skip_power_down
++
++ /* Enable MMDC power down timer. */
++ ldr r6, [r8, #0x4]
++ orr r6, r6, #0x5500
++ str r6, [r8, #0x4]
++
++skip_power_down:
++ /* clear SBS - unblock DDR accesses */
++ ldr r6, [r8, #0x410]
++ bic r6, r6, #0x100
++ str r6, [r8, #0x410]
++
++ pop {r4-r10}
++
++ /* Restore registers */
++ mov pc, lr
++
++ .type mx6_lpddr2_do_iram, #object
++ENTRY(mx6_lpddr2_do_iram)
++ .word mx6_lpddr2_freq_change
++ .size mx6_lpddr2_freq_change, . - mx6_lpddr2_freq_change
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/mach-imx6q.c linux-3.14.40/arch/arm/mach-imx/mach-imx6q.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/mach-imx6q.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/mach-imx6q.c 2015-05-01 14:57:57.831427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2011-2013 Freescale Semiconductor, Inc.
++ * Copyright 2011-2014 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+@@ -15,6 +15,7 @@
+ #include <linux/cpu.h>
+ #include <linux/delay.h>
+ #include <linux/export.h>
++#include <linux/gpio.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/irq.h>
+@@ -22,15 +23,19 @@
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
++#include <linux/of_gpio.h>
+ #include <linux/of_platform.h>
+ #include <linux/pm_opp.h>
+ #include <linux/pci.h>
+ #include <linux/phy.h>
++#include <linux/pm_opp.h>
+ #include <linux/reboot.h>
+ #include <linux/regmap.h>
+ #include <linux/micrel_phy.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
++#include <linux/of_net.h>
++#include <linux/fsl_otp.h>
+ #include <asm/mach/arch.h>
+ #include <asm/mach/map.h>
+ #include <asm/system_misc.h>
+@@ -194,6 +199,87 @@
+
+ }
+
++static void __init imx6q_csi_mux_init(void)
++{
++ /*
++ * MX6Q SabreSD board:
++ * IPU1 CSI0 connects to parallel interface.
++ * Set GPR1 bit 19 to 0x1.
++ *
++ * MX6DL SabreSD board:
++ * IPU1 CSI0 connects to parallel interface.
++ * Set GPR13 bit 0-2 to 0x4.
++ * IPU1 CSI1 connects to MIPI CSI2 virtual channel 1.
++ * Set GPR13 bit 3-5 to 0x1.
++ */
++ struct regmap *gpr;
++
++ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++ if (!IS_ERR(gpr)) {
++ if (of_machine_is_compatible("fsl,imx6q-sabresd") ||
++ of_machine_is_compatible("fsl,imx6q-sabreauto"))
++ regmap_update_bits(gpr, IOMUXC_GPR1, 1 << 19, 1 << 19);
++ else if (of_machine_is_compatible("fsl,imx6dl-sabresd") ||
++ of_machine_is_compatible("fsl,imx6dl-sabreauto"))
++ regmap_update_bits(gpr, IOMUXC_GPR13, 0x3F, 0x0C);
++ } else {
++ pr_err("%s(): failed to find fsl,imx6q-iomux-gpr regmap\n",
++ __func__);
++ }
++}
++
++#define OCOTP_MACn(n) (0x00000620 + (n) * 0x10)
++void __init imx6_enet_mac_init(const char *compatible)
++{
++ struct device_node *enet_np;
++ struct property *newmac;
++ u32 macaddr_low, macaddr_high;
++ u8 *macaddr;
++ int ret;
++
++ enet_np = of_find_compatible_node(NULL, NULL, compatible);
++ if (!enet_np)
++ return;
++
++ if (of_get_mac_address(enet_np))
++ goto put_enet_node;
++
++ ret = fsl_otp_readl(OCOTP_MACn(0), &macaddr_high);
++ ret = fsl_otp_readl(OCOTP_MACn(1), &macaddr_low);
++
++ newmac = kzalloc(sizeof(*newmac) + 6, GFP_KERNEL);
++ if (!newmac)
++ goto put_enet_node;
++
++ newmac->value = newmac + 1;
++ newmac->length = 6;
++ newmac->name = kstrdup("local-mac-address", GFP_KERNEL);
++ if (!newmac->name) {
++ kfree(newmac);
++ goto put_enet_node;
++ }
++
++ macaddr = newmac->value;
++ macaddr[5] = macaddr_high & 0xff;
++ macaddr[4] = (macaddr_high >> 8) & 0xff;
++ macaddr[3] = (macaddr_high >> 16) & 0xff;
++ macaddr[2] = (macaddr_high >> 24) & 0xff;
++ macaddr[1] = macaddr_low & 0xff;
++ macaddr[0] = (macaddr_low >> 8) & 0xff;
++
++ of_update_property(enet_np, newmac);
++
++put_enet_node:
++ of_node_put(enet_np);
++}
++
++static inline void imx6q_enet_init(void)
++{
++ imx6_enet_mac_init("fsl,imx6q-fec");
++ imx6q_enet_phy_init();
++ imx6q_1588_init();
++}
++
+ static void __init imx6q_init_machine(void)
+ {
+ struct device *parent;
+@@ -207,45 +293,56 @@
+ if (parent == NULL)
+ pr_warn("failed to initialize soc device\n");
+
+- imx6q_enet_phy_init();
+-
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
+
++ imx6q_enet_init();
+ imx_anatop_init();
+- imx6q_pm_init();
+- imx6q_1588_init();
++ cpu_is_imx6q() ? imx6q_pm_init() : imx6dl_pm_init();
++ imx6q_csi_mux_init();
+ }
+
+ #define OCOTP_CFG3 0x440
+ #define OCOTP_CFG3_SPEED_SHIFT 16
+ #define OCOTP_CFG3_SPEED_1P2GHZ 0x3
++#define OCOTP_CFG3_SPEED_1GHZ 0x2
++#define OCOTP_CFG3_SPEED_850MHZ 0x1
++#define OCOTP_CFG3_SPEED_800MHZ 0x0
+
+-static void __init imx6q_opp_check_1p2ghz(struct device *cpu_dev)
++static void __init imx6q_opp_check_speed_grading(struct device *cpu_dev)
+ {
+- struct device_node *np;
+- void __iomem *base;
+ u32 val;
++ int ret;
+
+- np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
+- if (!np) {
+- pr_warn("failed to find ocotp node\n");
+- return;
++ ret = fsl_otp_readl(OCOTP_CFG3, &val);
++ if (ret) {
++ pr_warn("failed to read ocotp\n");
++ return;
+ }
+
+- base = of_iomap(np, 0);
+- if (!base) {
+- pr_warn("failed to map ocotp\n");
+- goto put_node;
+- }
++ /*
++ * SPEED_GRADING[1:0] defines the max speed of ARM:
++ * 2b'11: 1200000000Hz; -- i.MX6Q only.
++ * 2b'10: 1000000000Hz;
++ * 2b'01: 850000000Hz; -- i.MX6Q Only, exclusive with 1GHz.
++ * 2b'00: 800000000Hz;
++ * We need to set the max speed of ARM according to fuse map.
++ */
+
+- val = readl_relaxed(base + OCOTP_CFG3);
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+- if ((val & 0x3) != OCOTP_CFG3_SPEED_1P2GHZ)
+- if (dev_pm_opp_disable(cpu_dev, 1200000000))
+- pr_warn("failed to disable 1.2 GHz OPP\n");
+-
+-put_node:
+- of_node_put(np);
++ if (cpu_is_imx6q()) {
++ if ((val & 0x3) < OCOTP_CFG3_SPEED_1P2GHZ)
++ if (dev_pm_opp_disable(cpu_dev, 1200000000))
++ pr_warn("failed to disable 1.2 GHz OPP\n");
++ }
++ if ((val & 0x3) < OCOTP_CFG3_SPEED_1GHZ)
++ if (dev_pm_opp_disable(cpu_dev, 996000000))
++ pr_warn("failed to disable 1 GHz OPP\n");
++ if (cpu_is_imx6q()) {
++ if ((val & 0x3) < OCOTP_CFG3_SPEED_850MHZ ||
++ (val & 0x3) == OCOTP_CFG3_SPEED_1GHZ)
++ if (dev_pm_opp_disable(cpu_dev, 852000000))
++ pr_warn("failed to disable 850 MHz OPP\n");
++ }
+ }
+
+ static void __init imx6q_opp_init(void)
+@@ -268,29 +365,70 @@
+ goto put_node;
+ }
+
+- imx6q_opp_check_1p2ghz(cpu_dev);
++ imx6q_opp_check_speed_grading(cpu_dev);
+
+ put_node:
+ of_node_put(np);
+ }
+
++#define ESAI_AUDIO_MCLK 24576000
++
++static void __init imx6q_audio_lvds2_init(void)
++{
++ struct clk *pll4_sel, *lvds2_in, *pll4_audio_div, *esai;
++
++ pll4_audio_div = clk_get_sys(NULL, "pll4_audio_div");
++ pll4_sel = clk_get_sys(NULL, "pll4_sel");
++ lvds2_in = clk_get_sys(NULL, "lvds2_in");
++ esai = clk_get_sys(NULL, "esai");
++ if (IS_ERR(pll4_audio_div) || IS_ERR(pll4_sel) ||
++ IS_ERR(lvds2_in) || IS_ERR(esai))
++ return;
++
++ if (clk_get_rate(lvds2_in) != ESAI_AUDIO_MCLK)
++ return;
++
++ clk_set_parent(pll4_sel, lvds2_in);
++ clk_set_rate(pll4_audio_div, 786432000);
++ clk_set_rate(esai, ESAI_AUDIO_MCLK);
++}
++
+ static struct platform_device imx6q_cpufreq_pdev = {
+- .name = "imx6q-cpufreq",
++ .name = "imx6-cpufreq",
+ };
+
+ static void __init imx6q_init_late(void)
+ {
++ struct regmap *gpr;
++
++ /*
++ * Need to force IOMUXC irq pending to meet CCM low power mode
++ * restriction, this is recommended by hardware team.
++ */
++ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++ if (!IS_ERR(gpr))
++ regmap_update_bits(gpr, IOMUXC_GPR1,
++ IMX6Q_GPR1_GINT_MASK,
++ IMX6Q_GPR1_GINT_ASSERT);
++
+ /*
+ * WAIT mode is broken on TO 1.0 and 1.1, so there is no point
+ * to run cpuidle on them.
+ */
+- if (imx_get_soc_revision() > IMX_CHIP_REVISION_1_1)
++ if ((cpu_is_imx6q() && imx_get_soc_revision() > IMX_CHIP_REVISION_1_1)
++ || (cpu_is_imx6dl() && imx_get_soc_revision() >
++ IMX_CHIP_REVISION_1_0))
+ imx6q_cpuidle_init();
+
+- if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
++ if (IS_ENABLED(CONFIG_ARM_IMX6_CPUFREQ)) {
+ imx6q_opp_init();
+ platform_device_register(&imx6q_cpufreq_pdev);
+ }
++
++ if (of_machine_is_compatible("fsl,imx6q-sabreauto")
++ || of_machine_is_compatible("fsl,imx6dl-sabreauto")) {
++ imx6q_audio_lvds2_init();
++ }
+ }
+
+ static void __init imx6q_map_io(void)
+@@ -315,6 +453,12 @@
+ };
+
+ DT_MACHINE_START(IMX6Q, "Freescale i.MX6 Quad/DualLite (Device Tree)")
++ /*
++ * i.MX6Q/DL maps system memory at 0x10000000 (offset 256MiB), and
++ * GPU has a limit on physical address that it accesses, which must
++ * be below 2GiB.
++ */
++ .dma_zone_size = (SZ_2G - SZ_256M),
+ .smp = smp_ops(imx_smp_ops),
+ .map_io = imx6q_map_io,
+ .init_irq = imx6q_init_irq,
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/mach-imx6sl.c linux-3.14.40/arch/arm/mach-imx/mach-imx6sl.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/mach-imx6sl.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/mach-imx6sl.c 2015-05-01 14:57:57.835427001 -0500
+@@ -17,8 +17,9 @@
+ #include <asm/mach/map.h>
+
+ #include "common.h"
++#include "cpuidle.h"
+
+-static void __init imx6sl_fec_init(void)
++static void __init imx6sl_fec_clk_init(void)
+ {
+ struct regmap *gpr;
+
+@@ -34,8 +35,17 @@
+ }
+ }
+
++static inline void imx6sl_fec_init(void)
++{
++ imx6sl_fec_clk_init();
++ imx6_enet_mac_init("fsl,imx6sl-fec");
++}
++
+ static void __init imx6sl_init_late(void)
+ {
++ /* Init CPUIDLE */
++ imx6sl_cpuidle_init();
++
+ /* imx6sl reuses imx6q cpufreq driver */
+ if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ))
+ platform_device_register_simple("imx6q-cpufreq", -1, NULL, 0);
+@@ -55,8 +65,7 @@
+
+ imx6sl_fec_init();
+ imx_anatop_init();
+- /* Reuse imx6q pm code */
+- imx6q_pm_init();
++ imx6sl_pm_init();
+ }
+
+ static void __init imx6sl_init_irq(void)
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/mach-vf610.c linux-3.14.40/arch/arm/mach-imx/mach-vf610.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/mach-vf610.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/mach-vf610.c 2015-05-01 14:57:57.835427001 -0500
+@@ -22,7 +22,7 @@
+
+ static void __init vf610_init_irq(void)
+ {
+- l2x0_of_init(0, ~0UL);
++ l2x0_of_init(0, ~0);
+ irqchip_init();
+ }
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/Makefile linux-3.14.40/arch/arm/mach-imx/Makefile
+--- linux-3.14.40.orig/arch/arm/mach-imx/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/Makefile 2015-05-01 14:57:57.843427001 -0500
+@@ -30,6 +30,7 @@
+ ifeq ($(CONFIG_CPU_IDLE),y)
+ obj-$(CONFIG_SOC_IMX5) += cpuidle-imx5.o
+ obj-$(CONFIG_SOC_IMX6Q) += cpuidle-imx6q.o
++obj-$(CONFIG_SOC_IMX6SL) += cpuidle-imx6sl.o
+ endif
+
+ ifdef CONFIG_SND_IMX_SOC
+@@ -101,9 +102,18 @@
+ obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
+ obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o
+
+-obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
+-# i.MX6SL reuses i.MX6Q code
+-obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o
++AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
++obj-$(CONFIG_PM) += suspend-imx6.o pm-imx6.o headsmp.o
++
++obj-y += busfreq-imx6.o
++ifeq ($(CONFIG_ARM_IMX6_CPUFREQ),y)
++obj-$(CONFIG_SOC_IMX6Q) += ddr3_freq_imx6.o busfreq_ddr3.o
++obj-$(CONFIG_SOC_IMX6SL) += lpddr2_freq_imx6.o busfreq_lpddr2.o
++endif
++ifeq ($(CONFIG_CPU_IDLE), y)
++obj-$(CONFIG_SOC_IMX6SL) += imx6sl_wfi.o
++endif
++
+
+ # i.MX5 based machines
+ obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/mx6.h linux-3.14.40/arch/arm/mach-imx/mx6.h
+--- linux-3.14.40.orig/arch/arm/mach-imx/mx6.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mach-imx/mx6.h 2015-05-01 14:57:57.843427001 -0500
+@@ -0,0 +1,35 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __ASM_ARCH_MXC_IOMAP_H__
++#define __ASM_ARCH_MXC_IOMAP_H__
++
++#define MX6Q_IO_P2V(x) IMX_IO_P2V(x)
++#define MX6Q_IO_ADDRESS(x) IOMEM(MX6Q_IO_P2V(x))
++
++#define MX6Q_L2_BASE_ADDR 0x00a02000
++#define MX6Q_L2_SIZE 0x1000
++#define MX6Q_IOMUXC_BASE_ADDR 0x020e0000
++#define MX6Q_IOMUXC_SIZE 0x4000
++#define MX6Q_SRC_BASE_ADDR 0x020d8000
++#define MX6Q_SRC_SIZE 0x4000
++#define MX6Q_CCM_BASE_ADDR 0x020c4000
++#define MX6Q_CCM_SIZE 0x4000
++#define MX6Q_ANATOP_BASE_ADDR 0x020c8000
++#define MX6Q_ANATOP_SIZE 0x1000
++#define MX6Q_GPC_BASE_ADDR 0x020dc000
++#define MX6Q_GPC_SIZE 0x4000
++#define MX6Q_MMDC_P0_BASE_ADDR 0x021b0000
++#define MX6Q_MMDC_P0_SIZE 0x4000
++#define MX6Q_MMDC_P1_BASE_ADDR 0x021b4000
++#define MX6Q_MMDC_P1_SIZE 0x4000
++
++#define MX6_SUSPEND_IRAM_SIZE 0x1000
++#endif
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/mxc.h linux-3.14.40/arch/arm/mach-imx/mxc.h
+--- linux-3.14.40.orig/arch/arm/mach-imx/mxc.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/mxc.h 2015-05-01 14:57:57.843427001 -0500
+@@ -42,6 +42,8 @@
+ #define IMX_CHIP_REVISION_1_1 0x11
+ #define IMX_CHIP_REVISION_1_2 0x12
+ #define IMX_CHIP_REVISION_1_3 0x13
++#define IMX_CHIP_REVISION_1_4 0x14
++#define IMX_CHIP_REVISION_1_5 0x15
+ #define IMX_CHIP_REVISION_2_0 0x20
+ #define IMX_CHIP_REVISION_2_1 0x21
+ #define IMX_CHIP_REVISION_2_2 0x22
+@@ -177,6 +179,7 @@
+ extern struct cpu_op *(*get_cpu_op)(int *op);
+ #endif
+
++#define cpu_is_imx6() (cpu_is_imx6q() || cpu_is_imx6dl() || cpu_is_imx6sl())
+ #define cpu_is_mx3() (cpu_is_mx31() || cpu_is_mx35())
+ #define cpu_is_mx2() (cpu_is_mx21() || cpu_is_mx27())
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/pm-imx6.c linux-3.14.40/arch/arm/mach-imx/pm-imx6.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/pm-imx6.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mach-imx/pm-imx6.c 2015-05-01 14:57:57.843427001 -0500
+@@ -0,0 +1,580 @@
++/*
++ * Copyright 2011-2014 Freescale Semiconductor, Inc.
++ * Copyright 2011 Linaro Ltd.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/irq.h>
++#include <linux/genalloc.h>
++#include <linux/mfd/syscon.h>
++#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_platform.h>
++#include <linux/regmap.h>
++#include <linux/suspend.h>
++#include <linux/regmap.h>
++#include <linux/mfd/syscon.h>
++#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
++#include <asm/cacheflush.h>
++#include <asm/fncpy.h>
++#include <asm/proc-fns.h>
++#include <asm/suspend.h>
++#include <asm/tlb.h>
++
++#include "common.h"
++#include "hardware.h"
++
++#define CCR 0x0
++#define BM_CCR_WB_COUNT (0x7 << 16)
++#define BM_CCR_RBC_BYPASS_COUNT (0x3f << 21)
++#define BM_CCR_RBC_EN (0x1 << 27)
++
++#define CLPCR 0x54
++#define BP_CLPCR_LPM 0
++#define BM_CLPCR_LPM (0x3 << 0)
++#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
++#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
++#define BM_CLPCR_SBYOS (0x1 << 6)
++#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
++#define BM_CLPCR_VSTBY (0x1 << 8)
++#define BP_CLPCR_STBY_COUNT 9
++#define BM_CLPCR_STBY_COUNT (0x3 << 9)
++#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
++#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
++#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
++#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
++#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
++#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
++#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
++#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
++#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
++#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
++#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
++
++#define CGPR 0x64
++#define BM_CGPR_INT_MEM_CLK_LPM (0x1 << 17)
++
++#define MX6Q_SUSPEND_OCRAM_SIZE 0x1000
++#define MX6_MAX_MMDC_IO_NUM 33
++
++static void __iomem *ccm_base;
++static void __iomem *suspend_ocram_base;
++static void (*imx6_suspend_in_ocram_fn)(void __iomem *ocram_vbase);
++
++/*
++ * suspend ocram space layout:
++ * ======================== high address ======================
++ * .
++ * .
++ * .
++ * ^
++ * ^
++ * ^
++ * imx6_suspend code
++ * PM_INFO structure(imx6_cpu_pm_info)
++ * ======================== low address =======================
++ */
++
++struct imx6_pm_base {
++ phys_addr_t pbase;
++ void __iomem *vbase;
++};
++
++struct imx6_pm_socdata {
++ u32 cpu_type;
++ const char *mmdc_compat;
++ const char *src_compat;
++ const char *iomuxc_compat;
++ const char *gpc_compat;
++ const u32 mmdc_io_num;
++ const u32 *mmdc_io_offset;
++};
++
++static const u32 imx6q_mmdc_io_offset[] __initconst = {
++ 0x5ac, 0x5b4, 0x528, 0x520, /* DQM0 ~ DQM3 */
++ 0x514, 0x510, 0x5bc, 0x5c4, /* DQM4 ~ DQM7 */
++ 0x56c, 0x578, 0x588, 0x594, /* CAS, RAS, SDCLK_0, SDCLK_1 */
++ 0x5a8, 0x5b0, 0x524, 0x51c, /* SDQS0 ~ SDQS3 */
++ 0x518, 0x50c, 0x5b8, 0x5c0, /* SDQS4 ~ SDQS7 */
++ 0x784, 0x788, 0x794, 0x79c, /* GPR_B0DS ~ GPR_B3DS */
++ 0x7a0, 0x7a4, 0x7a8, 0x748, /* GPR_B4DS ~ GPR_B7DS */
++ 0x59c, 0x5a0, 0x750, 0x774, /* SODT0, SODT1, MODE_CTL, MODE */
++ 0x74c, /* GPR_ADDS */
++};
++
++static const struct imx6_pm_socdata imx6q_pm_data __initconst = {
++ .cpu_type = MXC_CPU_IMX6Q,
++ .mmdc_compat = "fsl,imx6q-mmdc",
++ .src_compat = "fsl,imx6q-src",
++ .iomuxc_compat = "fsl,imx6q-iomuxc",
++ .gpc_compat = "fsl,imx6q-gpc",
++ .mmdc_io_num = ARRAY_SIZE(imx6q_mmdc_io_offset),
++ .mmdc_io_offset = imx6q_mmdc_io_offset,
++};
++
++/*
++ * This structure is for passing necessary data for low level ocram
++ * suspend code(arch/arm/mach-imx/suspend-imx6.S), if this struct
++ * definition is changed, the offset definition in
++ * arch/arm/mach-imx/suspend-imx6.S must be also changed accordingly,
++ * otherwise, the suspend to ocram function will be broken!
++ */
++struct imx6_cpu_pm_info {
++ phys_addr_t pbase; /* The physical address of pm_info. */
++ phys_addr_t resume_addr; /* The physical resume address for asm code */
++ u32 cpu_type;
++ u32 pm_info_size; /* Size of pm_info. */
++ struct imx6_pm_base mmdc_base;
++ struct imx6_pm_base src_base;
++ struct imx6_pm_base iomuxc_base;
++ struct imx6_pm_base ccm_base;
++ struct imx6_pm_base gpc_base;
++ struct imx6_pm_base l2_base;
++ u32 mmdc_io_num; /* Number of MMDC IOs which need saved/restored. */
++ u32 mmdc_io_val[MX6_MAX_MMDC_IO_NUM][2]; /* To save offset and value */
++} __aligned(8);
++
++void imx6q_set_cache_lpm_in_wait(bool enable)
++{
++ if ((cpu_is_imx6q() && imx_get_soc_revision() >
++ IMX_CHIP_REVISION_1_1) ||
++ (cpu_is_imx6dl() && imx_get_soc_revision() >
++ IMX_CHIP_REVISION_1_0)) {
++ u32 val;
++
++ val = readl_relaxed(ccm_base + CGPR);
++ if (enable)
++ val |= BM_CGPR_INT_MEM_CLK_LPM;
++ else
++ val &= ~BM_CGPR_INT_MEM_CLK_LPM;
++ writel_relaxed(val, ccm_base + CGPR);
++ }
++}
++
++static void imx6q_enable_rbc(bool enable)
++{
++ u32 val;
++
++ /*
++ * need to mask all interrupts in GPC before
++ * operating RBC configurations
++ */
++ imx_gpc_mask_all();
++
++ /* configure RBC enable bit */
++ val = readl_relaxed(ccm_base + CCR);
++ val &= ~BM_CCR_RBC_EN;
++ val |= enable ? BM_CCR_RBC_EN : 0;
++ writel_relaxed(val, ccm_base + CCR);
++
++ /* configure RBC count */
++ val = readl_relaxed(ccm_base + CCR);
++ val &= ~BM_CCR_RBC_BYPASS_COUNT;
++ val |= enable ? BM_CCR_RBC_BYPASS_COUNT : 0;
++ writel(val, ccm_base + CCR);
++
++ /*
++ * need to delay at least 2 cycles of CKIL(32K)
++ * due to hardware design requirement, which is
++ * ~61us, here we use 65us for safe
++ */
++ udelay(65);
++
++ /* restore GPC interrupt mask settings */
++ imx_gpc_restore_all();
++}
++
++static void imx6q_enable_wb(bool enable)
++{
++ u32 val;
++
++ /* configure well bias enable bit */
++ val = readl_relaxed(ccm_base + CLPCR);
++ val &= ~BM_CLPCR_WB_PER_AT_LPM;
++ val |= enable ? BM_CLPCR_WB_PER_AT_LPM : 0;
++ writel_relaxed(val, ccm_base + CLPCR);
++
++ /* configure well bias count */
++ val = readl_relaxed(ccm_base + CCR);
++ val &= ~BM_CCR_WB_COUNT;
++ val |= enable ? BM_CCR_WB_COUNT : 0;
++ writel_relaxed(val, ccm_base + CCR);
++}
++
++int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
++{
++ struct irq_desc *iomuxc_irq_desc;
++ u32 val = readl_relaxed(ccm_base + CLPCR);
++
++ val &= ~BM_CLPCR_LPM;
++ switch (mode) {
++ case WAIT_CLOCKED:
++ break;
++ case WAIT_UNCLOCKED:
++ val |= 0x1 << BP_CLPCR_LPM;
++ val |= BM_CLPCR_ARM_CLK_DIS_ON_LPM;
++ val &= ~BM_CLPCR_VSTBY;
++ val &= ~BM_CLPCR_SBYOS;
++ if (cpu_is_imx6sl())
++ val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
++ else
++ val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
++ break;
++ case STOP_POWER_ON:
++ val |= 0x2 << BP_CLPCR_LPM;
++ val &= ~BM_CLPCR_VSTBY;
++ val &= ~BM_CLPCR_SBYOS;
++ val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
++ break;
++ case WAIT_UNCLOCKED_POWER_OFF:
++ val |= 0x1 << BP_CLPCR_LPM;
++ val &= ~BM_CLPCR_VSTBY;
++ val &= ~BM_CLPCR_SBYOS;
++ break;
++ case STOP_POWER_OFF:
++ val |= 0x2 << BP_CLPCR_LPM;
++ val |= 0x3 << BP_CLPCR_STBY_COUNT;
++ val |= BM_CLPCR_VSTBY;
++ val |= BM_CLPCR_SBYOS;
++ if (cpu_is_imx6sl()) {
++ val |= BM_CLPCR_BYPASS_PMIC_READY;
++ val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
++ } else {
++ val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
++ }
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /*
++ * ERR007265: CCM: When improper low-power sequence is used,
++ * the SoC enters low power mode before the ARM core executes WFI.
++ *
++ * Software workaround:
++ * 1) Software should trigger IRQ #32 (IOMUX) to be always pending
++ * by setting IOMUX_GPR1_GINT.
++ * 2) Software should then unmask IRQ #32 in GPC before setting CCM
++ * Low-Power mode.
++ * 3) Software should mask IRQ #32 right after CCM Low-Power mode
++ * is set (set bits 0-1 of CCM_CLPCR).
++ */
++ iomuxc_irq_desc = irq_to_desc(32);
++ imx_gpc_irq_unmask(&iomuxc_irq_desc->irq_data);
++ writel_relaxed(val, ccm_base + CLPCR);
++ imx_gpc_irq_mask(&iomuxc_irq_desc->irq_data);
++
++ return 0;
++}
++
++static int imx6q_suspend_finish(unsigned long val)
++{
++ if (!imx6_suspend_in_ocram_fn) {
++ cpu_do_idle();
++ } else {
++ /*
++ * call low level suspend function in ocram,
++ * as we need to float DDR IO.
++ */
++ local_flush_tlb_all();
++ imx6_suspend_in_ocram_fn(suspend_ocram_base);
++ }
++
++ return 0;
++}
++
++static int imx6q_pm_enter(suspend_state_t state)
++{
++ struct regmap *g;
++
++ /*
++ * L2 can exit by 'reset' or Inband beacon (from remote EP)
++ * toggling phy_powerdown has same effect as 'inband beacon'
++ * So, toggle bit18 of GPR1, used as a workaround of errata
++ * "PCIe PCIe does not support L2 Power Down"
++ */
++ if (IS_ENABLED(CONFIG_PCI_IMX6)) {
++ g = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++ if (IS_ERR(g)) {
++ pr_err("failed to find fsl,imx6q-iomux-gpr regmap\n");
++ return PTR_ERR(g);
++ }
++ regmap_update_bits(g, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD,
++ IMX6Q_GPR1_PCIE_TEST_PD);
++ }
++
++ switch (state) {
++ case PM_SUSPEND_STANDBY:
++ imx6q_set_lpm(STOP_POWER_ON);
++ imx6q_set_cache_lpm_in_wait(true);
++ imx_gpc_pre_suspend(false);
++ if (cpu_is_imx6sl())
++ imx6sl_set_wait_clk(true);
++ /* Zzz ... */
++ cpu_do_idle();
++ if (cpu_is_imx6sl())
++ imx6sl_set_wait_clk(false);
++ imx_gpc_post_resume();
++ imx6q_set_lpm(WAIT_CLOCKED);
++ break;
++ case PM_SUSPEND_MEM:
++ imx6q_set_cache_lpm_in_wait(false);
++ imx6q_set_lpm(STOP_POWER_OFF);
++ imx6q_enable_wb(true);
++ /*
++ * For suspend into ocram, asm code already take care of
++ * RBC setting, so we do NOT need to do that here.
++ */
++ if (!imx6_suspend_in_ocram_fn)
++ imx6q_enable_rbc(true);
++ imx_gpc_pre_suspend(true);
++ imx_anatop_pre_suspend();
++ imx_set_cpu_jump(0, v7_cpu_resume);
++ /* Zzz ... */
++ cpu_suspend(0, imx6q_suspend_finish);
++ if (cpu_is_imx6q() || cpu_is_imx6dl())
++ imx_smp_prepare();
++ imx_anatop_post_resume();
++ imx_gpc_post_resume();
++ imx6q_enable_wb(false);
++ imx6q_set_lpm(WAIT_CLOCKED);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /*
++ * L2 can exit by 'reset' or Inband beacon (from remote EP)
++ * toggling phy_powerdown has same effect as 'inband beacon'
++ * So, toggle bit18 of GPR1, used as a workaround of errata
++ * "PCIe PCIe does not support L2 Power Down"
++ */
++ if (IS_ENABLED(CONFIG_PCI_IMX6)) {
++ regmap_update_bits(g, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD,
++ !IMX6Q_GPR1_PCIE_TEST_PD);
++ }
++
++ return 0;
++}
++
++static int imx6q_pm_valid(suspend_state_t state)
++{
++ return (state == PM_SUSPEND_STANDBY || state == PM_SUSPEND_MEM);
++}
++
++static const struct platform_suspend_ops imx6q_pm_ops = {
++ .enter = imx6q_pm_enter,
++ .valid = imx6q_pm_valid,
++};
++
++void __init imx6q_pm_set_ccm_base(void __iomem *base)
++{
++ ccm_base = base;
++}
++
++static int __init imx6_pm_get_base(struct imx6_pm_base *base,
++ const char *compat)
++{
++ struct device_node *node;
++ struct resource res;
++ int ret = 0;
++
++ node = of_find_compatible_node(NULL, NULL, compat);
++ if (!node) {
++ ret = -ENODEV;
++ goto out;
++ }
++
++ ret = of_address_to_resource(node, 0, &res);
++ if (ret)
++ goto put_node;
++
++ base->pbase = res.start;
++ base->vbase = ioremap(res.start, resource_size(&res));
++ if (!base->vbase)
++ ret = -ENOMEM;
++
++put_node:
++ of_node_put(node);
++out:
++ return ret;
++}
++
++static int __init imx6q_ocram_suspend_init(const struct imx6_pm_socdata
++ *socdata)
++{
++ phys_addr_t ocram_pbase;
++ struct device_node *node;
++ struct platform_device *pdev;
++ struct imx6_cpu_pm_info *pm_info;
++ struct gen_pool *ocram_pool;
++ unsigned long ocram_base;
++ int i, ret = 0;
++ const u32 *mmdc_offset_array;
++
++ if (!socdata) {
++ pr_warn("%s: invalid argument!\n", __func__);
++ return -EINVAL;
++ }
++
++ node = of_find_compatible_node(NULL, NULL, "mmio-sram");
++ if (!node) {
++ pr_warn("%s: failed to find ocram node!\n", __func__);
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(node);
++ if (!pdev) {
++ pr_warn("%s: failed to find ocram device!\n", __func__);
++ ret = -ENODEV;
++ goto put_node;
++ }
++
++ ocram_pool = dev_get_gen_pool(&pdev->dev);
++ if (!ocram_pool) {
++ pr_warn("%s: ocram pool unavailable!\n", __func__);
++ ret = -ENODEV;
++ goto put_node;
++ }
++
++ ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE);
++ if (!ocram_base) {
++ pr_warn("%s: unable to alloc ocram!\n", __func__);
++ ret = -ENOMEM;
++ goto put_node;
++ }
++
++ ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
++
++ suspend_ocram_base = __arm_ioremap_exec(ocram_pbase,
++ MX6Q_SUSPEND_OCRAM_SIZE, false);
++
++ pm_info = suspend_ocram_base;
++ pm_info->pbase = ocram_pbase;
++ pm_info->resume_addr = virt_to_phys(v7_cpu_resume);
++ pm_info->pm_info_size = sizeof(*pm_info);
++
++ /*
++ * ccm physical address is not used by asm code currently,
++ * so get ccm virtual address directly, as we already have
++ * it from ccm driver.
++ */
++ pm_info->ccm_base.vbase = ccm_base;
++
++ ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat);
++ if (ret) {
++ pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret);
++ goto put_node;
++ }
++
++ ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat);
++ if (ret) {
++ pr_warn("%s: failed to get src base %d!\n", __func__, ret);
++ goto src_map_failed;
++ }
++
++ ret = imx6_pm_get_base(&pm_info->iomuxc_base, socdata->iomuxc_compat);
++ if (ret) {
++ pr_warn("%s: failed to get iomuxc base %d!\n", __func__, ret);
++ goto iomuxc_map_failed;
++ }
++
++ ret = imx6_pm_get_base(&pm_info->gpc_base, socdata->gpc_compat);
++ if (ret) {
++ pr_warn("%s: failed to get gpc base %d!\n", __func__, ret);
++ goto gpc_map_failed;
++ }
++
++ ret = imx6_pm_get_base(&pm_info->l2_base, "arm,pl310-cache");
++ if (ret) {
++ pr_warn("%s: failed to get pl310-cache base %d!\n",
++ __func__, ret);
++ goto pl310_cache_map_failed;
++ }
++
++ pm_info->cpu_type = socdata->cpu_type;
++ pm_info->mmdc_io_num = socdata->mmdc_io_num;
++ mmdc_offset_array = socdata->mmdc_io_offset;
++
++ for (i = 0; i < pm_info->mmdc_io_num; i++) {
++ pm_info->mmdc_io_val[i][0] =
++ mmdc_offset_array[i];
++ pm_info->mmdc_io_val[i][1] =
++ readl_relaxed(pm_info->iomuxc_base.vbase +
++ mmdc_offset_array[i]);
++ }
++
++ imx6_suspend_in_ocram_fn = fncpy(
++ suspend_ocram_base + sizeof(*pm_info),
++ &imx6_suspend,
++ MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info));
++
++ goto put_node;
++
++pl310_cache_map_failed:
++ iounmap(&pm_info->gpc_base.vbase);
++gpc_map_failed:
++ iounmap(&pm_info->iomuxc_base.vbase);
++iomuxc_map_failed:
++ iounmap(&pm_info->src_base.vbase);
++src_map_failed:
++ iounmap(&pm_info->mmdc_base.vbase);
++put_node:
++ of_node_put(node);
++
++ return ret;
++}
++
++static void __init imx6_pm_common_init(const struct imx6_pm_socdata
++ *socdata)
++{
++ struct regmap *gpr;
++ int ret;
++
++ WARN_ON(!ccm_base);
++
++ ret = imx6q_ocram_suspend_init(socdata);
++ if (ret)
++ pr_warn("%s: failed to initialize ocram suspend %d!\n",
++ __func__, ret);
++
++ /*
++ * This is for SW workaround step #1 of ERR007265, see comments
++ * in imx6q_set_lpm for details of this errata.
++ * Force IOMUXC irq pending, so that the interrupt to GPC can be
++ * used to deassert dsm_request signal when the signal gets
++ * asserted unexpectedly.
++ */
++ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++ if (!IS_ERR(gpr))
++ regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT_MASK,
++ IMX6Q_GPR1_GINT_MASK);
++
++
++ suspend_set_ops(&imx6q_pm_ops);
++}
++
++void __init imx6q_pm_init(void)
++{
++ imx6_pm_common_init(&imx6q_pm_data);
++}
++
++void __init imx6dl_pm_init(void)
++{
++ imx6_pm_common_init(NULL);
++}
++
++void __init imx6sl_pm_init(void)
++{
++ imx6_pm_common_init(NULL);
++}
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/pm-imx6q.c linux-3.14.40/arch/arm/mach-imx/pm-imx6q.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/pm-imx6q.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/pm-imx6q.c 1969-12-31 18:00:00.000000000 -0600
+@@ -1,241 +0,0 @@
+-/*
+- * Copyright 2011-2013 Freescale Semiconductor, Inc.
+- * Copyright 2011 Linaro Ltd.
+- *
+- * The code contained herein is licensed under the GNU General Public
+- * License. You may obtain a copy of the GNU General Public License
+- * Version 2 or later at the following locations:
+- *
+- * http://www.opensource.org/licenses/gpl-license.html
+- * http://www.gnu.org/copyleft/gpl.html
+- */
+-
+-#include <linux/delay.h>
+-#include <linux/init.h>
+-#include <linux/io.h>
+-#include <linux/irq.h>
+-#include <linux/mfd/syscon.h>
+-#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+-#include <linux/of.h>
+-#include <linux/of_address.h>
+-#include <linux/regmap.h>
+-#include <linux/suspend.h>
+-#include <asm/cacheflush.h>
+-#include <asm/proc-fns.h>
+-#include <asm/suspend.h>
+-#include <asm/hardware/cache-l2x0.h>
+-
+-#include "common.h"
+-#include "hardware.h"
+-
+-#define CCR 0x0
+-#define BM_CCR_WB_COUNT (0x7 << 16)
+-#define BM_CCR_RBC_BYPASS_COUNT (0x3f << 21)
+-#define BM_CCR_RBC_EN (0x1 << 27)
+-
+-#define CLPCR 0x54
+-#define BP_CLPCR_LPM 0
+-#define BM_CLPCR_LPM (0x3 << 0)
+-#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
+-#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
+-#define BM_CLPCR_SBYOS (0x1 << 6)
+-#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
+-#define BM_CLPCR_VSTBY (0x1 << 8)
+-#define BP_CLPCR_STBY_COUNT 9
+-#define BM_CLPCR_STBY_COUNT (0x3 << 9)
+-#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
+-#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
+-#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
+-#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
+-#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
+-#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
+-#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
+-#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
+-#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
+-#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
+-#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
+-
+-#define CGPR 0x64
+-#define BM_CGPR_CHICKEN_BIT (0x1 << 17)
+-
+-static void __iomem *ccm_base;
+-
+-void imx6q_set_chicken_bit(void)
+-{
+- u32 val = readl_relaxed(ccm_base + CGPR);
+-
+- val |= BM_CGPR_CHICKEN_BIT;
+- writel_relaxed(val, ccm_base + CGPR);
+-}
+-
+-static void imx6q_enable_rbc(bool enable)
+-{
+- u32 val;
+-
+- /*
+- * need to mask all interrupts in GPC before
+- * operating RBC configurations
+- */
+- imx_gpc_mask_all();
+-
+- /* configure RBC enable bit */
+- val = readl_relaxed(ccm_base + CCR);
+- val &= ~BM_CCR_RBC_EN;
+- val |= enable ? BM_CCR_RBC_EN : 0;
+- writel_relaxed(val, ccm_base + CCR);
+-
+- /* configure RBC count */
+- val = readl_relaxed(ccm_base + CCR);
+- val &= ~BM_CCR_RBC_BYPASS_COUNT;
+- val |= enable ? BM_CCR_RBC_BYPASS_COUNT : 0;
+- writel(val, ccm_base + CCR);
+-
+- /*
+- * need to delay at least 2 cycles of CKIL(32K)
+- * due to hardware design requirement, which is
+- * ~61us, here we use 65us for safe
+- */
+- udelay(65);
+-
+- /* restore GPC interrupt mask settings */
+- imx_gpc_restore_all();
+-}
+-
+-static void imx6q_enable_wb(bool enable)
+-{
+- u32 val;
+-
+- /* configure well bias enable bit */
+- val = readl_relaxed(ccm_base + CLPCR);
+- val &= ~BM_CLPCR_WB_PER_AT_LPM;
+- val |= enable ? BM_CLPCR_WB_PER_AT_LPM : 0;
+- writel_relaxed(val, ccm_base + CLPCR);
+-
+- /* configure well bias count */
+- val = readl_relaxed(ccm_base + CCR);
+- val &= ~BM_CCR_WB_COUNT;
+- val |= enable ? BM_CCR_WB_COUNT : 0;
+- writel_relaxed(val, ccm_base + CCR);
+-}
+-
+-int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
+-{
+- struct irq_desc *iomuxc_irq_desc;
+- u32 val = readl_relaxed(ccm_base + CLPCR);
+-
+- val &= ~BM_CLPCR_LPM;
+- switch (mode) {
+- case WAIT_CLOCKED:
+- break;
+- case WAIT_UNCLOCKED:
+- val |= 0x1 << BP_CLPCR_LPM;
+- val |= BM_CLPCR_ARM_CLK_DIS_ON_LPM;
+- break;
+- case STOP_POWER_ON:
+- val |= 0x2 << BP_CLPCR_LPM;
+- break;
+- case WAIT_UNCLOCKED_POWER_OFF:
+- val |= 0x1 << BP_CLPCR_LPM;
+- val &= ~BM_CLPCR_VSTBY;
+- val &= ~BM_CLPCR_SBYOS;
+- break;
+- case STOP_POWER_OFF:
+- val |= 0x2 << BP_CLPCR_LPM;
+- val |= 0x3 << BP_CLPCR_STBY_COUNT;
+- val |= BM_CLPCR_VSTBY;
+- val |= BM_CLPCR_SBYOS;
+- if (cpu_is_imx6sl()) {
+- val |= BM_CLPCR_BYPASS_PMIC_READY;
+- val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
+- } else {
+- val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
+- }
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+- /*
+- * ERR007265: CCM: When improper low-power sequence is used,
+- * the SoC enters low power mode before the ARM core executes WFI.
+- *
+- * Software workaround:
+- * 1) Software should trigger IRQ #32 (IOMUX) to be always pending
+- * by setting IOMUX_GPR1_GINT.
+- * 2) Software should then unmask IRQ #32 in GPC before setting CCM
+- * Low-Power mode.
+- * 3) Software should mask IRQ #32 right after CCM Low-Power mode
+- * is set (set bits 0-1 of CCM_CLPCR).
+- */
+- iomuxc_irq_desc = irq_to_desc(32);
+- imx_gpc_irq_unmask(&iomuxc_irq_desc->irq_data);
+- writel_relaxed(val, ccm_base + CLPCR);
+- imx_gpc_irq_mask(&iomuxc_irq_desc->irq_data);
+-
+- return 0;
+-}
+-
+-static int imx6q_suspend_finish(unsigned long val)
+-{
+- cpu_do_idle();
+- return 0;
+-}
+-
+-static int imx6q_pm_enter(suspend_state_t state)
+-{
+- switch (state) {
+- case PM_SUSPEND_MEM:
+- imx6q_set_lpm(STOP_POWER_OFF);
+- imx6q_enable_wb(true);
+- imx6q_enable_rbc(true);
+- imx_gpc_pre_suspend();
+- imx_anatop_pre_suspend();
+- imx_set_cpu_jump(0, v7_cpu_resume);
+- /* Zzz ... */
+- cpu_suspend(0, imx6q_suspend_finish);
+- if (cpu_is_imx6q() || cpu_is_imx6dl())
+- imx_smp_prepare();
+- imx_anatop_post_resume();
+- imx_gpc_post_resume();
+- imx6q_enable_rbc(false);
+- imx6q_enable_wb(false);
+- imx6q_set_lpm(WAIT_CLOCKED);
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+- return 0;
+-}
+-
+-static const struct platform_suspend_ops imx6q_pm_ops = {
+- .enter = imx6q_pm_enter,
+- .valid = suspend_valid_only_mem,
+-};
+-
+-void __init imx6q_pm_set_ccm_base(void __iomem *base)
+-{
+- ccm_base = base;
+-}
+-
+-void __init imx6q_pm_init(void)
+-{
+- struct regmap *gpr;
+-
+- WARN_ON(!ccm_base);
+-
+- /*
+- * This is for SW workaround step #1 of ERR007265, see comments
+- * in imx6q_set_lpm for details of this errata.
+- * Force IOMUXC irq pending, so that the interrupt to GPC can be
+- * used to deassert dsm_request signal when the signal gets
+- * asserted unexpectedly.
+- */
+- gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+- if (!IS_ERR(gpr))
+- regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT,
+- IMX6Q_GPR1_GINT);
+-
+-
+- suspend_set_ops(&imx6q_pm_ops);
+-}
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/suspend-imx6.S linux-3.14.40/arch/arm/mach-imx/suspend-imx6.S
+--- linux-3.14.40.orig/arch/arm/mach-imx/suspend-imx6.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mach-imx/suspend-imx6.S 2015-05-01 14:57:57.847427001 -0500
+@@ -0,0 +1,306 @@
++/*
++ * Copyright 2014 Freescale Semiconductor, Inc.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/linkage.h>
++#include <asm/asm-offsets.h>
++#include <asm/hardware/cache-l2x0.h>
++#include "hardware.h"
++
++/*
++ * ==================== low level suspend ====================
++ *
++ * Better to follow below rules to use ARM registers:
++ * r0: pm_info structure address;
++ * r1 ~ r4: for saving pm_info members;
++ * r5 ~ r10: free registers;
++ * r11: io base address.
++ *
++ * suspend ocram space layout:
++ * ======================== high address ======================
++ * .
++ * .
++ * .
++ * ^
++ * ^
++ * ^
++ * imx6_suspend code
++ * PM_INFO structure(imx6_cpu_pm_info)
++ * ======================== low address =======================
++ */
++
++/*
++ * Below offsets are based on struct imx6_cpu_pm_info
++ * which defined in arch/arm/mach-imx/pm-imx6q.c, this
++ * structure contains necessary pm info for low level
++ * suspend related code.
++ */
++#define PM_INFO_PBASE_OFFSET 0x0
++#define PM_INFO_RESUME_ADDR_OFFSET 0x4
++#define PM_INFO_CPU_TYPE_OFFSET 0x8
++#define PM_INFO_PM_INFO_SIZE_OFFSET 0xC
++#define PM_INFO_MX6Q_MMDC_P_OFFSET 0x10
++#define PM_INFO_MX6Q_MMDC_V_OFFSET 0x14
++#define PM_INFO_MX6Q_SRC_P_OFFSET 0x18
++#define PM_INFO_MX6Q_SRC_V_OFFSET 0x1C
++#define PM_INFO_MX6Q_IOMUXC_P_OFFSET 0x20
++#define PM_INFO_MX6Q_IOMUXC_V_OFFSET 0x24
++#define PM_INFO_MX6Q_CCM_P_OFFSET 0x28
++#define PM_INFO_MX6Q_CCM_V_OFFSET 0x2C
++#define PM_INFO_MX6Q_GPC_P_OFFSET 0x30
++#define PM_INFO_MX6Q_GPC_V_OFFSET 0x34
++#define PM_INFO_MX6Q_L2_P_OFFSET 0x38
++#define PM_INFO_MX6Q_L2_V_OFFSET 0x3C
++#define PM_INFO_MMDC_IO_NUM_OFFSET 0x40
++#define PM_INFO_MMDC_IO_VAL_OFFSET 0x44
++
++#define MX6Q_SRC_GPR1 0x20
++#define MX6Q_SRC_GPR2 0x24
++#define MX6Q_MMDC_MAPSR 0x404
++#define MX6Q_GPC_IMR1 0x08
++#define MX6Q_GPC_IMR2 0x0c
++#define MX6Q_GPC_IMR3 0x10
++#define MX6Q_GPC_IMR4 0x14
++#define MX6Q_CCM_CCR 0x0
++
++ .align 3
++
++ .macro sync_l2_cache
++
++ /* sync L2 cache to drain L2's buffers to DRAM. */
++#ifdef CONFIG_CACHE_L2X0
++ ldr r11, [r0, #PM_INFO_MX6Q_L2_V_OFFSET]
++ mov r6, #0x0
++ str r6, [r11, #L2X0_CACHE_SYNC]
++1:
++ ldr r6, [r11, #L2X0_CACHE_SYNC]
++ ands r6, r6, #0x1
++ bne 1b
++#endif
++
++ .endm
++
++ .macro resume_mmdc
++
++ /* restore MMDC IO */
++ cmp r5, #0x0
++ ldreq r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
++ ldrne r11, [r0, #PM_INFO_MX6Q_IOMUXC_P_OFFSET]
++
++ ldr r6, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
++ ldr r7, =PM_INFO_MMDC_IO_VAL_OFFSET
++ add r7, r7, r0
++1:
++ ldr r8, [r7], #0x4
++ ldr r9, [r7], #0x4
++ str r9, [r11, r8]
++ subs r6, r6, #0x1
++ bne 1b
++
++ cmp r5, #0x0
++ ldreq r11, [r0, #PM_INFO_MX6Q_MMDC_V_OFFSET]
++ ldrne r11, [r0, #PM_INFO_MX6Q_MMDC_P_OFFSET]
++
++ /* let DDR out of self-refresh */
++ ldr r7, [r11, #MX6Q_MMDC_MAPSR]
++ bic r7, r7, #(1 << 21)
++ str r7, [r11, #MX6Q_MMDC_MAPSR]
++2:
++ ldr r7, [r11, #MX6Q_MMDC_MAPSR]
++ ands r7, r7, #(1 << 25)
++ bne 2b
++
++ /* enable DDR auto power saving */
++ ldr r7, [r11, #MX6Q_MMDC_MAPSR]
++ bic r7, r7, #0x1
++ str r7, [r11, #MX6Q_MMDC_MAPSR]
++
++ .endm
++
++ENTRY(imx6_suspend)
++ ldr r1, [r0, #PM_INFO_PBASE_OFFSET]
++ ldr r2, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
++ ldr r3, [r0, #PM_INFO_CPU_TYPE_OFFSET]
++ ldr r4, [r0, #PM_INFO_PM_INFO_SIZE_OFFSET]
++
++ /*
++ * counting the resume address in iram
++ * to set it in SRC register.
++ */
++ ldr r6, =imx6_suspend
++ ldr r7, =resume
++ sub r7, r7, r6
++ add r8, r1, r4
++ add r9, r8, r7
++
++ /*
++ * make sure TLB contain the addr we want,
++ * as we will access them after MMDC IO floated.
++ */
++
++ ldr r11, [r0, #PM_INFO_MX6Q_CCM_V_OFFSET]
++ ldr r6, [r11, #0x0]
++ ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
++ ldr r6, [r11, #0x0]
++ ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
++ ldr r6, [r11, #0x0]
++
++ /* use r11 to store the IO address */
++ ldr r11, [r0, #PM_INFO_MX6Q_SRC_V_OFFSET]
++ /* store physical resume addr and pm_info address. */
++ str r9, [r11, #MX6Q_SRC_GPR1]
++ str r1, [r11, #MX6Q_SRC_GPR2]
++
++ /* need to sync L2 cache before DSM. */
++ sync_l2_cache
++
++ ldr r11, [r0, #PM_INFO_MX6Q_MMDC_V_OFFSET]
++ /*
++ * put DDR explicitly into self-refresh and
++ * disable automatic power savings.
++ */
++ ldr r7, [r11, #MX6Q_MMDC_MAPSR]
++ orr r7, r7, #0x1
++ str r7, [r11, #MX6Q_MMDC_MAPSR]
++
++ /* make the DDR explicitly enter self-refresh. */
++ ldr r7, [r11, #MX6Q_MMDC_MAPSR]
++ orr r7, r7, #(1 << 21)
++ str r7, [r11, #MX6Q_MMDC_MAPSR]
++
++poll_dvfs_set:
++ ldr r7, [r11, #MX6Q_MMDC_MAPSR]
++ ands r7, r7, #(1 << 25)
++ beq poll_dvfs_set
++
++ ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
++ ldr r6, =0x0
++ ldr r7, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
++ ldr r8, =PM_INFO_MMDC_IO_VAL_OFFSET
++ add r8, r8, r0
++set_mmdc_io_lpm:
++ ldr r9, [r8], #0x8
++ str r6, [r11, r9]
++ subs r7, r7, #0x1
++ bne set_mmdc_io_lpm
++
++ /*
++ * mask all GPC interrupts before
++ * enabling the RBC counters to
++ * avoid the counter starting too
++ * early if an interupt is already
++ * pending.
++ */
++ ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
++ ldr r6, [r11, #MX6Q_GPC_IMR1]
++ ldr r7, [r11, #MX6Q_GPC_IMR2]
++ ldr r8, [r11, #MX6Q_GPC_IMR3]
++ ldr r9, [r11, #MX6Q_GPC_IMR4]
++
++ ldr r10, =0xffffffff
++ str r10, [r11, #MX6Q_GPC_IMR1]
++ str r10, [r11, #MX6Q_GPC_IMR2]
++ str r10, [r11, #MX6Q_GPC_IMR3]
++ str r10, [r11, #MX6Q_GPC_IMR4]
++
++ /*
++ * enable the RBC bypass counter here
++ * to hold off the interrupts. RBC counter
++ * = 32 (1ms), Minimum RBC delay should be
++ * 400us for the analog LDOs to power down.
++ */
++ ldr r11, [r0, #PM_INFO_MX6Q_CCM_V_OFFSET]
++ ldr r10, [r11, #MX6Q_CCM_CCR]
++ bic r10, r10, #(0x3f << 21)
++ orr r10, r10, #(0x20 << 21)
++ str r10, [r11, #MX6Q_CCM_CCR]
++
++ /* enable the counter. */
++ ldr r10, [r11, #MX6Q_CCM_CCR]
++ orr r10, r10, #(0x1 << 27)
++ str r10, [r11, #MX6Q_CCM_CCR]
++
++ /* unmask all the GPC interrupts. */
++ ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
++ str r6, [r11, #MX6Q_GPC_IMR1]
++ str r7, [r11, #MX6Q_GPC_IMR2]
++ str r8, [r11, #MX6Q_GPC_IMR3]
++ str r9, [r11, #MX6Q_GPC_IMR4]
++
++ /*
++ * now delay for a short while (3usec)
++ * ARM is at 1GHz at this point
++ * so a short loop should be enough.
++ * this delay is required to ensure that
++ * the RBC counter can start counting in
++ * case an interrupt is already pending
++ * or in case an interrupt arrives just
++ * as ARM is about to assert DSM_request.
++ */
++ ldr r6, =2000
++rbc_loop:
++ subs r6, r6, #0x1
++ bne rbc_loop
++
++ /* Zzz, enter stop mode */
++ wfi
++ nop
++ nop
++ nop
++ nop
++
++ /*
++ * run to here means there is pending
++ * wakeup source, system should auto
++ * resume, we need to restore MMDC IO first
++ */
++ mov r5, #0x0
++ resume_mmdc
++
++ /* return to suspend finish */
++ mov pc, lr
++
++resume:
++ /* invalidate L1 I-cache first */
++ mov r6, #0x0
++ mcr p15, 0, r6, c7, c5, 0
++ mcr p15, 0, r6, c7, c5, 6
++ /* enable the Icache and branch prediction */
++ mov r6, #0x1800
++ mcr p15, 0, r6, c1, c0, 0
++ isb
++
++ /* get physical resume address from pm_info. */
++ ldr lr, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
++ /* clear core0's entry and parameter */
++ ldr r11, [r0, #PM_INFO_MX6Q_SRC_P_OFFSET]
++ mov r7, #0x0
++ str r7, [r11, #MX6Q_SRC_GPR1]
++ str r7, [r11, #MX6Q_SRC_GPR2]
++
++ mov r5, #0x1
++ resume_mmdc
++
++ mov pc, lr
++ENDPROC(imx6_suspend)
++
++/*
++ * The following code must assume it is running from physical address
++ * where absolute virtual addresses to the data section have to be
++ * turned into relative ones.
++ */
++
++ENTRY(v7_cpu_resume)
++ bl v7_invalidate_l1
++#ifdef CONFIG_CACHE_L2X0
++ bl l2c310_early_resume
++#endif
++ b cpu_resume
++ENDPROC(v7_cpu_resume)
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/system.c linux-3.14.40/arch/arm/mach-imx/system.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/system.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/system.c 2015-05-01 14:57:57.847427001 -0500
+@@ -34,6 +34,7 @@
+
+ static void __iomem *wdog_base;
+ static struct clk *wdog_clk;
++static u32 wdog_source = 1; /* use WDOG1 default */
+
+ /*
+ * Reset the system. It is called by machine_restart().
+@@ -47,6 +48,15 @@
+
+ if (cpu_is_mx1())
+ wcr_enable = (1 << 0);
++ /*
++ * Some i.MX6 boards use WDOG2 to reset external pmic in bypass mode,
++ * so do WDOG2 reset here. Do not set SRS, since we will
++ * trigger external POR later. Use WDOG1 to reset in ldo-enable
++ * mode. You can set it by "fsl,wdog-reset" in dts.
++ */
++ else if (wdog_source == 2 && (cpu_is_imx6q() || cpu_is_imx6dl() ||
++ cpu_is_imx6sl()))
++ wcr_enable = 0x14;
+ else
+ wcr_enable = (1 << 2);
+
+@@ -90,12 +100,29 @@
+
+ void __init mxc_arch_reset_init_dt(void)
+ {
+- struct device_node *np;
++ struct device_node *np = NULL;
++
++ if (cpu_is_imx6q() || cpu_is_imx6dl())
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc");
++ else if (cpu_is_imx6sl())
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpc");
++
++ if (np)
++ of_property_read_u32(np, "fsl,wdog-reset", &wdog_source);
++ pr_info("Use WDOG%d as reset source\n", wdog_source);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx21-wdt");
+ wdog_base = of_iomap(np, 0);
+ WARN_ON(!wdog_base);
+
++ /* Some i.MX6 boards use WDOG2 to reset board in ldo-bypass mode */
++ if (wdog_source == 2 && (cpu_is_imx6q() || cpu_is_imx6dl() ||
++ cpu_is_imx6sl())) {
++ np = of_find_compatible_node(np, NULL, "fsl,imx21-wdt");
++ wdog_base = of_iomap(np, 0);
++ WARN_ON(!wdog_base);
++ }
++
+ wdog_clk = of_clk_get(np, 0);
+ if (IS_ERR(wdog_clk)) {
+ pr_warn("%s: failed to get wdog clock\n", __func__);
+@@ -124,7 +151,7 @@
+ }
+
+ /* Configure the L2 PREFETCH and POWER registers */
+- val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
++ val = readl_relaxed(l2x0_base + L310_PREFETCH_CTRL);
+ val |= 0x70800000;
+ /*
+ * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0
+@@ -137,14 +164,12 @@
+ */
+ if (cpu_is_imx6q())
+ val &= ~(1 << 30 | 1 << 23);
+- writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL);
+- val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN;
+- writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL);
++ writel_relaxed(val, l2x0_base + L310_PREFETCH_CTRL);
+
+ iounmap(l2x0_base);
+ of_node_put(np);
+
+ out:
+- l2x0_of_init(0, ~0UL);
++ l2x0_of_init(0, ~0);
+ }
+ #endif
+diff -Nur linux-3.14.40.orig/arch/arm/mach-imx/time.c linux-3.14.40/arch/arm/mach-imx/time.c
+--- linux-3.14.40.orig/arch/arm/mach-imx/time.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-imx/time.c 2015-05-01 14:57:57.847427001 -0500
+@@ -60,7 +60,11 @@
+ #define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */
+ #define V2_TCTL_CLK_IPG (1 << 6)
+ #define V2_TCTL_CLK_PER (2 << 6)
++#define V2_TCTL_CLK_OSC_DIV8 (5 << 6)
++#define V2_TCTL_CLK_OSC (7 << 6)
++#define V2_TCTL_24MEN (1 << 10)
+ #define V2_TCTL_FRR (1 << 9)
++#define V2_TPRER_PRE24M 12
+ #define V2_IR 0x0c
+ #define V2_TSTAT 0x08
+ #define V2_TSTAT_OF1 (1 << 0)
+@@ -277,11 +281,20 @@
+
+ void __init mxc_timer_init(void __iomem *base, int irq)
+ {
+- uint32_t tctl_val;
++ uint32_t tctl_val, tprer_val;
+ struct clk *timer_clk;
+ struct clk *timer_ipg_clk;
+
+- timer_clk = clk_get_sys("imx-gpt.0", "per");
++ /*
++ * gpt clk source from 24M OSC on imx6q > TO1.0 and
++ * imx6dl, others from per clk.
++ */
++ if ((cpu_is_imx6q() && imx_get_soc_revision() > IMX_CHIP_REVISION_1_0)
++ || cpu_is_imx6dl())
++ timer_clk = clk_get_sys("imx-gpt.0", "gpt_3m");
++ else
++ timer_clk = clk_get_sys("imx-gpt.0", "per");
++
+ if (IS_ERR(timer_clk)) {
+ pr_err("i.MX timer: unable to get clk\n");
+ return;
+@@ -302,10 +315,24 @@
+ __raw_writel(0, timer_base + MXC_TCTL);
+ __raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */
+
+- if (timer_is_v2())
+- tctl_val = V2_TCTL_CLK_PER | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
+- else
++ if (timer_is_v2()) {
++ if ((cpu_is_imx6q() && imx_get_soc_revision() >
++ IMX_CHIP_REVISION_1_0) || cpu_is_imx6dl()) {
++ tctl_val = V2_TCTL_CLK_OSC_DIV8 | V2_TCTL_FRR |
++ V2_TCTL_WAITEN | MXC_TCTL_TEN;
++ if (cpu_is_imx6dl()) {
++ /* 24 / 8 = 3 MHz */
++ tprer_val = 7 << V2_TPRER_PRE24M;
++ __raw_writel(tprer_val, timer_base + MXC_TPRER);
++ tctl_val |= V2_TCTL_24MEN;
++ }
++ } else {
++ tctl_val = V2_TCTL_CLK_PER | V2_TCTL_FRR |
++ V2_TCTL_WAITEN | MXC_TCTL_TEN;
++ }
++ } else {
+ tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
++ }
+
+ __raw_writel(tctl_val, timer_base + MXC_TCTL);
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-nomadik/cpu-8815.c linux-3.14.40/arch/arm/mach-nomadik/cpu-8815.c
+--- linux-3.14.40.orig/arch/arm/mach-nomadik/cpu-8815.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-nomadik/cpu-8815.c 2015-05-01 14:57:57.855427001 -0500
+@@ -147,7 +147,7 @@
+ {
+ #ifdef CONFIG_CACHE_L2X0
+ /* At full speed latency must be >=2, so 0x249 in low bits */
+- l2x0_of_init(0x00730249, 0xfe000fff);
++ l2x0_of_init(0x00700249, 0xfe0fefff);
+ #endif
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ }
+diff -Nur linux-3.14.40.orig/arch/arm/mach-omap2/common.h linux-3.14.40/arch/arm/mach-omap2/common.h
+--- linux-3.14.40.orig/arch/arm/mach-omap2/common.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-omap2/common.h 2015-05-01 14:57:57.871427001 -0500
+@@ -91,6 +91,7 @@
+ extern void omap3_secure_sync32k_timer_init(void);
+ extern void omap3_gptimer_timer_init(void);
+ extern void omap4_local_timer_init(void);
++int omap_l2_cache_init(void);
+ extern void omap5_realtime_timer_init(void);
+
+ void omap2420_init_early(void);
+diff -Nur linux-3.14.40.orig/arch/arm/mach-omap2/io.c linux-3.14.40/arch/arm/mach-omap2/io.c
+--- linux-3.14.40.orig/arch/arm/mach-omap2/io.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-omap2/io.c 2015-05-01 14:57:57.879427001 -0500
+@@ -608,6 +608,7 @@
+ am43xx_clockdomains_init();
+ am43xx_hwmod_init();
+ omap_hwmod_init_postsetup();
++ omap_l2_cache_init();
+ omap_clk_soc_init = am43xx_dt_clk_init;
+ }
+
+@@ -639,6 +640,7 @@
+ omap44xx_clockdomains_init();
+ omap44xx_hwmod_init();
+ omap_hwmod_init_postsetup();
++ omap_l2_cache_init();
+ omap_clk_soc_init = omap4xxx_dt_clk_init;
+ }
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-omap2/Kconfig linux-3.14.40/arch/arm/mach-omap2/Kconfig
+--- linux-3.14.40.orig/arch/arm/mach-omap2/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-omap2/Kconfig 2015-05-01 14:57:57.895427001 -0500
+@@ -78,6 +78,7 @@
+ select MULTI_IRQ_HANDLER
+ select ARM_GIC
+ select MACH_OMAP_GENERIC
++ select MIGHT_HAVE_CACHE_L2X0
+
+ config SOC_DRA7XX
+ bool "TI DRA7XX"
+diff -Nur linux-3.14.40.orig/arch/arm/mach-omap2/omap4-common.c linux-3.14.40/arch/arm/mach-omap2/omap4-common.c
+--- linux-3.14.40.orig/arch/arm/mach-omap2/omap4-common.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-omap2/omap4-common.c 2015-05-01 14:57:57.907427001 -0500
+@@ -166,75 +166,57 @@
+ return l2cache_base;
+ }
+
+-static void omap4_l2x0_disable(void)
++static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
+ {
+- outer_flush_all();
+- /* Disable PL310 L2 Cache controller */
+- omap_smc1(0x102, 0x0);
+-}
++ unsigned smc_op;
+
+-static void omap4_l2x0_set_debug(unsigned long val)
+-{
+- /* Program PL310 L2 Cache controller debug register */
+- omap_smc1(0x100, val);
++ switch (reg) {
++ case L2X0_CTRL:
++ smc_op = OMAP4_MON_L2X0_CTRL_INDEX;
++ break;
++
++ case L2X0_AUX_CTRL:
++ smc_op = OMAP4_MON_L2X0_AUXCTRL_INDEX;
++ break;
++
++ case L2X0_DEBUG_CTRL:
++ smc_op = OMAP4_MON_L2X0_DBG_CTRL_INDEX;
++ break;
++
++ case L310_PREFETCH_CTRL:
++ smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
++ break;
++
++ default:
++ WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
++ return;
++ }
++
++ omap_smc1(smc_op, val);
+ }
+
+-static int __init omap_l2_cache_init(void)
++int __init omap_l2_cache_init(void)
+ {
+- u32 aux_ctrl = 0;
+-
+- /*
+- * To avoid code running on other OMAPs in
+- * multi-omap builds
+- */
+- if (!cpu_is_omap44xx())
+- return -ENODEV;
++ u32 aux_ctrl;
+
+ /* Static mapping, never released */
+ l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
+ if (WARN_ON(!l2cache_base))
+ return -ENOMEM;
+
+- /*
+- * 16-way associativity, parity disabled
+- * Way size - 32KB (es1.0)
+- * Way size - 64KB (es2.0 +)
+- */
+- aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) |
+- (0x1 << 25) |
+- (0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) |
+- (0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT));
+-
+- if (omap_rev() == OMAP4430_REV_ES1_0) {
+- aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
+- } else {
+- aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
+- (1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
+- (1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
+- (1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
+- (1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
+- }
+- if (omap_rev() != OMAP4430_REV_ES1_0)
+- omap_smc1(0x109, aux_ctrl);
+-
+- /* Enable PL310 L2 Cache controller */
+- omap_smc1(0x102, 0x1);
++ /* 16-way associativity, parity disabled, way size - 64KB (es2.0 +) */
++ aux_ctrl = L2C_AUX_CTRL_SHARED_OVERRIDE |
++ L310_AUX_CTRL_DATA_PREFETCH |
++ L310_AUX_CTRL_INSTR_PREFETCH;
+
++ outer_cache.write_sec = omap4_l2c310_write_sec;
+ if (of_have_populated_dt())
+- l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK);
++ l2x0_of_init(aux_ctrl, 0xcf9fffff);
+ else
+- l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK);
+-
+- /*
+- * Override default outer_cache.disable with a OMAP4
+- * specific one
+- */
+- outer_cache.disable = omap4_l2x0_disable;
+- outer_cache.set_debug = omap4_l2x0_set_debug;
++ l2x0_init(l2cache_base, aux_ctrl, 0xcf9fffff);
+
+ return 0;
+ }
+-omap_early_initcall(omap_l2_cache_init);
+ #endif
+
+ void __iomem *omap4_get_sar_ram_base(void)
+diff -Nur linux-3.14.40.orig/arch/arm/mach-omap2/omap-mpuss-lowpower.c linux-3.14.40/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+--- linux-3.14.40.orig/arch/arm/mach-omap2/omap-mpuss-lowpower.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-omap2/omap-mpuss-lowpower.c 2015-05-01 14:57:57.915427001 -0500
+@@ -187,19 +187,15 @@
+ * in every restore MPUSS OFF path.
+ */
+ #ifdef CONFIG_CACHE_L2X0
+-static void save_l2x0_context(void)
++static void __init save_l2x0_context(void)
+ {
+- u32 val;
+- void __iomem *l2x0_base = omap4_get_l2cache_base();
+- if (l2x0_base) {
+- val = __raw_readl(l2x0_base + L2X0_AUX_CTRL);
+- __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
+- val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
+- __raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET);
+- }
++ __raw_writel(l2x0_saved_regs.aux_ctrl,
++ sar_base + L2X0_AUXCTRL_OFFSET);
++ __raw_writel(l2x0_saved_regs.prefetch_ctrl,
++ sar_base + L2X0_PREFETCH_CTRL_OFFSET);
+ }
+ #else
+-static void save_l2x0_context(void)
++static void __init save_l2x0_context(void)
+ {}
+ #endif
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-prima2/l2x0.c linux-3.14.40/arch/arm/mach-prima2/l2x0.c
+--- linux-3.14.40.orig/arch/arm/mach-prima2/l2x0.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-prima2/l2x0.c 2015-05-01 14:57:57.931427001 -0500
+@@ -8,43 +8,10 @@
+
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+-#include <linux/of.h>
+ #include <asm/hardware/cache-l2x0.h>
+
+-struct l2x0_aux
+-{
+- u32 val;
+- u32 mask;
+-};
+-
+-static struct l2x0_aux prima2_l2x0_aux __initconst = {
+- .val = 2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT,
+- .mask = 0,
+-};
+-
+-static struct l2x0_aux marco_l2x0_aux __initconst = {
+- .val = (2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
+- (1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT),
+- .mask = L2X0_AUX_CTRL_MASK,
+-};
+-
+-static struct of_device_id sirf_l2x0_ids[] __initconst = {
+- { .compatible = "sirf,prima2-pl310-cache", .data = &prima2_l2x0_aux, },
+- { .compatible = "sirf,marco-pl310-cache", .data = &marco_l2x0_aux, },
+- {},
+-};
+-
+ static int __init sirfsoc_l2x0_init(void)
+ {
+- struct device_node *np;
+- const struct l2x0_aux *aux;
+-
+- np = of_find_matching_node(NULL, sirf_l2x0_ids);
+- if (np) {
+- aux = of_match_node(sirf_l2x0_ids, np)->data;
+- return l2x0_of_init(aux->val, aux->mask);
+- }
+-
+- return 0;
++ return l2x0_of_init(0, ~0);
+ }
+ early_initcall(sirfsoc_l2x0_init);
+diff -Nur linux-3.14.40.orig/arch/arm/mach-prima2/pm.c linux-3.14.40/arch/arm/mach-prima2/pm.c
+--- linux-3.14.40.orig/arch/arm/mach-prima2/pm.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-prima2/pm.c 2015-05-01 14:57:57.935427001 -0500
+@@ -71,7 +71,6 @@
+ case PM_SUSPEND_MEM:
+ sirfsoc_pre_suspend_power_off();
+
+- outer_flush_all();
+ outer_disable();
+ /* go zzz */
+ cpu_suspend(0, sirfsoc_finish_suspend);
+diff -Nur linux-3.14.40.orig/arch/arm/mach-realview/realview_eb.c linux-3.14.40/arch/arm/mach-realview/realview_eb.c
+--- linux-3.14.40.orig/arch/arm/mach-realview/realview_eb.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-realview/realview_eb.c 2015-05-01 14:57:57.955427001 -0500
+@@ -442,8 +442,13 @@
+ realview_eb11mp_fixup();
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled
+- * Bits: .... ...0 0111 1001 0000 .... .... .... */
++ /*
++ * The PL220 needs to be manually configured as the hardware
++ * doesn't report the correct sizes.
++ * 1MB (128KB/way), 8-way associativity, event monitor and
++ * parity enabled, ignore share bit, no force write allocate
++ * Bits: .... ...0 0111 1001 0000 .... .... ....
++ */
+ l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff);
+ #endif
+ platform_device_register(&pmu_device);
+diff -Nur linux-3.14.40.orig/arch/arm/mach-realview/realview_pb1176.c linux-3.14.40/arch/arm/mach-realview/realview_pb1176.c
+--- linux-3.14.40.orig/arch/arm/mach-realview/realview_pb1176.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-realview/realview_pb1176.c 2015-05-01 14:57:57.963427001 -0500
+@@ -355,7 +355,13 @@
+ int i;
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* 128Kb (16Kb/way) 8-way associativity. evmon/parity/share enabled. */
++ /*
++ * The PL220 needs to be manually configured as the hardware
++ * doesn't report the correct sizes.
++ * 128kB (16kB/way), 8-way associativity, event monitor and
++ * parity enabled, ignore share bit, no force write allocate
++ * Bits: .... ...0 0111 0011 0000 .... .... ....
++ */
+ l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff);
+ #endif
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-realview/realview_pb11mp.c linux-3.14.40/arch/arm/mach-realview/realview_pb11mp.c
+--- linux-3.14.40.orig/arch/arm/mach-realview/realview_pb11mp.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-realview/realview_pb11mp.c 2015-05-01 14:57:57.963427001 -0500
+@@ -337,8 +337,13 @@
+ int i;
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled
+- * Bits: .... ...0 0111 1001 0000 .... .... .... */
++ /*
++ * The PL220 needs to be manually configured as the hardware
++ * doesn't report the correct sizes.
++ * 1MB (128KB/way), 8-way associativity, event monitor and
++ * parity enabled, ignore share bit, no force write allocate
++ * Bits: .... ...0 0111 1001 0000 .... .... ....
++ */
+ l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff);
+ #endif
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-realview/realview_pbx.c linux-3.14.40/arch/arm/mach-realview/realview_pbx.c
+--- linux-3.14.40.orig/arch/arm/mach-realview/realview_pbx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-realview/realview_pbx.c 2015-05-01 14:57:57.963427001 -0500
+@@ -370,8 +370,8 @@
+ __io_address(REALVIEW_PBX_TILE_L220_BASE);
+
+ /* set RAM latencies to 1 cycle for eASIC */
+- writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
+- writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
++ writel(0, l2x0_base + L310_TAG_LATENCY_CTRL);
++ writel(0, l2x0_base + L310_DATA_LATENCY_CTRL);
+
+ /* 16KB way size, 8-way associativity, parity disabled
+ * Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */
+diff -Nur linux-3.14.40.orig/arch/arm/mach-rockchip/rockchip.c linux-3.14.40/arch/arm/mach-rockchip/rockchip.c
+--- linux-3.14.40.orig/arch/arm/mach-rockchip/rockchip.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-rockchip/rockchip.c 2015-05-01 14:57:57.963427001 -0500
+@@ -25,7 +25,7 @@
+
+ static void __init rockchip_dt_init(void)
+ {
+- l2x0_of_init(0, ~0UL);
++ l2x0_of_init(0, ~0);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ }
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-shmobile/board-armadillo800eva.c linux-3.14.40/arch/arm/mach-shmobile/board-armadillo800eva.c
+--- linux-3.14.40.orig/arch/arm/mach-shmobile/board-armadillo800eva.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-shmobile/board-armadillo800eva.c 2015-05-01 14:57:57.975427001 -0500
+@@ -1270,8 +1270,8 @@
+
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* Early BRESP enable, Shared attribute override enable, 32K*8way */
+- l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff);
++ /* Shared attribute override enable, 32K*8way */
++ l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
+ #endif
+
+ i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
+diff -Nur linux-3.14.40.orig/arch/arm/mach-shmobile/board-armadillo800eva-reference.c linux-3.14.40/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
+--- linux-3.14.40.orig/arch/arm/mach-shmobile/board-armadillo800eva-reference.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-shmobile/board-armadillo800eva-reference.c 2015-05-01 14:57:57.983427001 -0500
+@@ -164,8 +164,8 @@
+ r8a7740_meram_workaround();
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* Early BRESP enable, Shared attribute override enable, 32K*8way */
+- l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff);
++ /* Shared attribute override enable, 32K*8way */
++ l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
+ #endif
+
+ r8a7740_add_standard_devices_dt();
+diff -Nur linux-3.14.40.orig/arch/arm/mach-shmobile/board-kzm9g.c linux-3.14.40/arch/arm/mach-shmobile/board-kzm9g.c
+--- linux-3.14.40.orig/arch/arm/mach-shmobile/board-kzm9g.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-shmobile/board-kzm9g.c 2015-05-01 14:57:57.983427001 -0500
+@@ -878,8 +878,8 @@
+ gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* Early BRESP enable, Shared attribute override enable, 64K*8way */
+- l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
++ /* Shared attribute override enable, 64K*8way */
++ l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
+ #endif
+
+ i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
+diff -Nur linux-3.14.40.orig/arch/arm/mach-shmobile/board-kzm9g-reference.c linux-3.14.40/arch/arm/mach-shmobile/board-kzm9g-reference.c
+--- linux-3.14.40.orig/arch/arm/mach-shmobile/board-kzm9g-reference.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-shmobile/board-kzm9g-reference.c 2015-05-01 14:57:57.995427001 -0500
+@@ -36,8 +36,8 @@
+ sh73a0_add_standard_devices_dt();
+
+ #ifdef CONFIG_CACHE_L2X0
+- /* Early BRESP enable, Shared attribute override enable, 64K*8way */
+- l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
++ /* Shared attribute override enable, 64K*8way */
++ l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
+ #endif
+ }
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-shmobile/setup-r8a7778.c linux-3.14.40/arch/arm/mach-shmobile/setup-r8a7778.c
+--- linux-3.14.40.orig/arch/arm/mach-shmobile/setup-r8a7778.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-shmobile/setup-r8a7778.c 2015-05-01 14:57:57.999427001 -0500
+@@ -298,10 +298,10 @@
+ void __iomem *base = ioremap_nocache(0xf0100000, 0x1000);
+ if (base) {
+ /*
+- * Early BRESP enable, Shared attribute override enable, 64K*16way
++ * Shared attribute override enable, 64K*16way
+ * don't call iounmap(base)
+ */
+- l2x0_init(base, 0x40470000, 0x82000fff);
++ l2x0_init(base, 0x00400000, 0xc20f0fff);
+ }
+ #endif
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-shmobile/setup-r8a7779.c linux-3.14.40/arch/arm/mach-shmobile/setup-r8a7779.c
+--- linux-3.14.40.orig/arch/arm/mach-shmobile/setup-r8a7779.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-shmobile/setup-r8a7779.c 2015-05-01 14:57:57.999427001 -0500
+@@ -700,8 +700,8 @@
+ void __init r8a7779_add_standard_devices(void)
+ {
+ #ifdef CONFIG_CACHE_L2X0
+- /* Early BRESP enable, Shared attribute override enable, 64K*16way */
+- l2x0_init(IOMEM(0xf0100000), 0x40470000, 0x82000fff);
++ /* Shared attribute override enable, 64K*16way */
++ l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
+ #endif
+ r8a7779_pm_init();
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-socfpga/socfpga.c linux-3.14.40/arch/arm/mach-socfpga/socfpga.c
+--- linux-3.14.40.orig/arch/arm/mach-socfpga/socfpga.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-socfpga/socfpga.c 2015-05-01 14:57:57.999427001 -0500
+@@ -104,7 +104,7 @@
+
+ static void __init socfpga_cyclone5_init(void)
+ {
+- l2x0_of_init(0, ~0UL);
++ l2x0_of_init(0, ~0);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ socfpga_init_clocks();
+ }
+diff -Nur linux-3.14.40.orig/arch/arm/mach-spear/platsmp.c linux-3.14.40/arch/arm/mach-spear/platsmp.c
+--- linux-3.14.40.orig/arch/arm/mach-spear/platsmp.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-spear/platsmp.c 2015-05-01 14:57:58.031427001 -0500
+@@ -20,6 +20,18 @@
+ #include <mach/spear.h>
+ #include "generic.h"
+
++/*
++ * Write pen_release in a way that is guaranteed to be visible to all
++ * observers, irrespective of whether they're taking part in coherency
++ * or not. This is necessary for the hotplug code to work reliably.
++ */
++static void write_pen_release(int val)
++{
++ pen_release = val;
++ smp_wmb();
++ sync_cache_w(&pen_release);
++}
++
+ static DEFINE_SPINLOCK(boot_lock);
+
+ static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+@@ -30,8 +42,7 @@
+ * let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+- pen_release = -1;
+- smp_wmb();
++ write_pen_release(-1);
+
+ /*
+ * Synchronise with the boot thread.
+@@ -58,9 +69,7 @@
+ * Note that "pen_release" is the hardware CPU ID, whereas
+ * "cpu" is Linux's internal ID.
+ */
+- pen_release = cpu;
+- flush_cache_all();
+- outer_flush_all();
++ write_pen_release(cpu);
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+diff -Nur linux-3.14.40.orig/arch/arm/mach-spear/spear13xx.c linux-3.14.40/arch/arm/mach-spear/spear13xx.c
+--- linux-3.14.40.orig/arch/arm/mach-spear/spear13xx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-spear/spear13xx.c 2015-05-01 14:57:58.035427001 -0500
+@@ -38,15 +38,15 @@
+ if (!IS_ENABLED(CONFIG_CACHE_L2X0))
+ return;
+
+- writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL);
++ writel_relaxed(0x06, VA_L2CC_BASE + L310_PREFETCH_CTRL);
+
+ /*
+ * Program following latencies in order to make
+ * SPEAr1340 work at 600 MHz
+ */
+- writel_relaxed(0x221, VA_L2CC_BASE + L2X0_TAG_LATENCY_CTRL);
+- writel_relaxed(0x441, VA_L2CC_BASE + L2X0_DATA_LATENCY_CTRL);
+- l2x0_init(VA_L2CC_BASE, 0x70A60001, 0xfe00ffff);
++ writel_relaxed(0x221, VA_L2CC_BASE + L310_TAG_LATENCY_CTRL);
++ writel_relaxed(0x441, VA_L2CC_BASE + L310_DATA_LATENCY_CTRL);
++ l2x0_init(VA_L2CC_BASE, 0x30a00001, 0xfe0fffff);
+ }
+
+ /*
+diff -Nur linux-3.14.40.orig/arch/arm/mach-sti/board-dt.c linux-3.14.40/arch/arm/mach-sti/board-dt.c
+--- linux-3.14.40.orig/arch/arm/mach-sti/board-dt.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-sti/board-dt.c 2015-05-01 14:57:58.035427001 -0500
+@@ -16,15 +16,9 @@
+
+ void __init stih41x_l2x0_init(void)
+ {
+- u32 way_size = 0x4;
+- u32 aux_ctrl;
+- /* may be this can be encoded in macros like BIT*() */
+- aux_ctrl = (0x1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
+- (0x1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
+- (0x1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
+- (way_size << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
+-
+- l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK);
++ l2x0_of_init(L2C_AUX_CTRL_SHARED_OVERRIDE |
++ L310_AUX_CTRL_DATA_PREFETCH |
++ L310_AUX_CTRL_INSTR_PREFETCH, 0xc00f0fff);
+ }
+
+ static void __init stih41x_machine_init(void)
+diff -Nur linux-3.14.40.orig/arch/arm/mach-tegra/pm.h linux-3.14.40/arch/arm/mach-tegra/pm.h
+--- linux-3.14.40.orig/arch/arm/mach-tegra/pm.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-tegra/pm.h 2015-05-01 14:57:58.035427001 -0500
+@@ -35,8 +35,6 @@
+ void tegra30_lp1_iram_hook(void);
+ void tegra30_sleep_core_init(void);
+
+-extern unsigned long l2x0_saved_regs_addr;
+-
+ void tegra_clear_cpu_in_lp2(void);
+ bool tegra_set_cpu_in_lp2(void);
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-tegra/reset-handler.S linux-3.14.40/arch/arm/mach-tegra/reset-handler.S
+--- linux-3.14.40.orig/arch/arm/mach-tegra/reset-handler.S 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-tegra/reset-handler.S 2015-05-01 14:57:58.035427001 -0500
+@@ -19,7 +19,6 @@
+
+ #include <asm/cache.h>
+ #include <asm/asm-offsets.h>
+-#include <asm/hardware/cache-l2x0.h>
+
+ #include "flowctrl.h"
+ #include "fuse.h"
+@@ -79,8 +78,10 @@
+ str r1, [r0]
+ #endif
+
++#ifdef CONFIG_CACHE_L2X0
+ /* L2 cache resume & re-enable */
+- l2_cache_resume r0, r1, r2, l2x0_saved_regs_addr
++ bl l2c310_early_resume
++#endif
+ end_ca9_scu_l2_resume:
+ mov32 r9, 0xc0f
+ cmp r8, r9
+@@ -90,12 +91,6 @@
+ ENDPROC(tegra_resume)
+ #endif
+
+-#ifdef CONFIG_CACHE_L2X0
+- .globl l2x0_saved_regs_addr
+-l2x0_saved_regs_addr:
+- .long 0
+-#endif
+-
+ .align L1_CACHE_SHIFT
+ ENTRY(__tegra_cpu_reset_handler_start)
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-tegra/sleep.h linux-3.14.40/arch/arm/mach-tegra/sleep.h
+--- linux-3.14.40.orig/arch/arm/mach-tegra/sleep.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-tegra/sleep.h 2015-05-01 14:57:58.035427001 -0500
+@@ -120,37 +120,6 @@
+ mov \tmp1, \tmp1, lsr #8
+ .endm
+
+-/* Macro to resume & re-enable L2 cache */
+-#ifndef L2X0_CTRL_EN
+-#define L2X0_CTRL_EN 1
+-#endif
+-
+-#ifdef CONFIG_CACHE_L2X0
+-.macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs
+- W(adr) \tmp1, \phys_l2x0_saved_regs
+- ldr \tmp1, [\tmp1]
+- ldr \tmp2, [\tmp1, #L2X0_R_PHY_BASE]
+- ldr \tmp3, [\tmp2, #L2X0_CTRL]
+- tst \tmp3, #L2X0_CTRL_EN
+- bne exit_l2_resume
+- ldr \tmp3, [\tmp1, #L2X0_R_TAG_LATENCY]
+- str \tmp3, [\tmp2, #L2X0_TAG_LATENCY_CTRL]
+- ldr \tmp3, [\tmp1, #L2X0_R_DATA_LATENCY]
+- str \tmp3, [\tmp2, #L2X0_DATA_LATENCY_CTRL]
+- ldr \tmp3, [\tmp1, #L2X0_R_PREFETCH_CTRL]
+- str \tmp3, [\tmp2, #L2X0_PREFETCH_CTRL]
+- ldr \tmp3, [\tmp1, #L2X0_R_PWR_CTRL]
+- str \tmp3, [\tmp2, #L2X0_POWER_CTRL]
+- ldr \tmp3, [\tmp1, #L2X0_R_AUX_CTRL]
+- str \tmp3, [\tmp2, #L2X0_AUX_CTRL]
+- mov \tmp3, #L2X0_CTRL_EN
+- str \tmp3, [\tmp2, #L2X0_CTRL]
+-exit_l2_resume:
+-.endm
+-#else /* CONFIG_CACHE_L2X0 */
+-.macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs
+-.endm
+-#endif /* CONFIG_CACHE_L2X0 */
+ #else
+ void tegra_pen_lock(void);
+ void tegra_pen_unlock(void);
+diff -Nur linux-3.14.40.orig/arch/arm/mach-tegra/tegra.c linux-3.14.40/arch/arm/mach-tegra/tegra.c
+--- linux-3.14.40.orig/arch/arm/mach-tegra/tegra.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-tegra/tegra.c 2015-05-01 14:57:58.035427001 -0500
+@@ -73,27 +73,7 @@
+ static void __init tegra_init_cache(void)
+ {
+ #ifdef CONFIG_CACHE_L2X0
+- static const struct of_device_id pl310_ids[] __initconst = {
+- { .compatible = "arm,pl310-cache", },
+- {}
+- };
+-
+- struct device_node *np;
+- int ret;
+- void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
+- u32 aux_ctrl, cache_type;
+-
+- np = of_find_matching_node(NULL, pl310_ids);
+- if (!np)
+- return;
+-
+- cache_type = readl(p + L2X0_CACHE_TYPE);
+- aux_ctrl = (cache_type & 0x700) << (17-8);
+- aux_ctrl |= 0x7C400001;
+-
+- ret = l2x0_of_init(aux_ctrl, 0x8200c3fe);
+- if (!ret)
+- l2x0_saved_regs_addr = virt_to_phys(&l2x0_saved_regs);
++ l2x0_of_init(0x3c400001, 0xc20fc3fe);
+ #endif
+ }
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-ux500/board-mop500-audio.c linux-3.14.40/arch/arm/mach-ux500/board-mop500-audio.c
+--- linux-3.14.40.orig/arch/arm/mach-ux500/board-mop500-audio.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-ux500/board-mop500-audio.c 2015-05-01 14:57:58.035427001 -0500
+@@ -9,7 +9,6 @@
+ #include <linux/gpio.h>
+ #include <linux/platform_data/dma-ste-dma40.h>
+
+-#include "irqs.h"
+ #include <linux/platform_data/asoc-ux500-msp.h>
+
+ #include "ste-dma40-db8500.h"
+diff -Nur linux-3.14.40.orig/arch/arm/mach-ux500/cache-l2x0.c linux-3.14.40/arch/arm/mach-ux500/cache-l2x0.c
+--- linux-3.14.40.orig/arch/arm/mach-ux500/cache-l2x0.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-ux500/cache-l2x0.c 2015-05-01 14:57:58.035427001 -0500
+@@ -35,10 +35,16 @@
+ return 0;
+ }
+
+-static int __init ux500_l2x0_init(void)
++static void ux500_l2c310_write_sec(unsigned long val, unsigned reg)
+ {
+- u32 aux_val = 0x3e000000;
++ /*
++ * We can't write to secure registers as we are in non-secure
++ * mode, until we have some SMI service available.
++ */
++}
+
++static int __init ux500_l2x0_init(void)
++{
+ if (cpu_is_u8500_family() || cpu_is_ux540_family())
+ l2x0_base = __io_address(U8500_L2CC_BASE);
+ else
+@@ -48,28 +54,12 @@
+ /* Unlock before init */
+ ux500_l2x0_unlock();
+
+- /* DBx540's L2 has 128KB way size */
+- if (cpu_is_ux540_family())
+- /* 128KB way size */
+- aux_val |= (0x4 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
+- else
+- /* 64KB way size */
+- aux_val |= (0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
++ outer_cache.write_sec = ux500_l2c310_write_sec;
+
+- /* 64KB way size, 8 way associativity, force WA */
+ if (of_have_populated_dt())
+- l2x0_of_init(aux_val, 0xc0000fff);
++ l2x0_of_init(0, ~0);
+ else
+- l2x0_init(l2x0_base, aux_val, 0xc0000fff);
+-
+- /*
+- * We can't disable l2 as we are in non secure mode, currently
+- * this seems be called only during kexec path. So let's
+- * override outer.disable with nasty assignment until we have
+- * some SMI service available.
+- */
+- outer_cache.disable = NULL;
+- outer_cache.set_debug = NULL;
++ l2x0_init(l2x0_base, 0, ~0);
+
+ return 0;
+ }
+diff -Nur linux-3.14.40.orig/arch/arm/mach-ux500/cpu-db8500.c linux-3.14.40/arch/arm/mach-ux500/cpu-db8500.c
+--- linux-3.14.40.orig/arch/arm/mach-ux500/cpu-db8500.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-ux500/cpu-db8500.c 2015-05-01 14:57:58.035427001 -0500
+@@ -27,7 +27,6 @@
+ #include <asm/mach/map.h>
+
+ #include "setup.h"
+-#include "irqs.h"
+
+ #include "board-mop500-regulators.h"
+ #include "board-mop500.h"
+@@ -35,14 +34,11 @@
+ #include "id.h"
+
+ struct ab8500_platform_data ab8500_platdata = {
+- .irq_base = MOP500_AB8500_IRQ_BASE,
+ .regulator = &ab8500_regulator_plat_data,
+ };
+
+ struct prcmu_pdata db8500_prcmu_pdata = {
+ .ab_platdata = &ab8500_platdata,
+- .ab_irq = IRQ_DB8500_AB8500,
+- .irq_base = IRQ_PRCMU_BASE,
+ .version_offset = DB8500_PRCMU_FW_VERSION_OFFSET,
+ .legacy_offset = DB8500_PRCMU_LEGACY_OFFSET,
+ };
+diff -Nur linux-3.14.40.orig/arch/arm/mach-ux500/irqs-board-mop500.h linux-3.14.40/arch/arm/mach-ux500/irqs-board-mop500.h
+--- linux-3.14.40.orig/arch/arm/mach-ux500/irqs-board-mop500.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-ux500/irqs-board-mop500.h 1969-12-31 18:00:00.000000000 -0600
+@@ -1,55 +0,0 @@
+-/*
+- * Copyright (C) ST-Ericsson SA 2010
+- *
+- * Author: Rabin Vincent <rabin.vincent@stericsson.com>
+- * License terms: GNU General Public License (GPL) version 2
+- */
+-
+-#ifndef __MACH_IRQS_BOARD_MOP500_H
+-#define __MACH_IRQS_BOARD_MOP500_H
+-
+-/* Number of AB8500 irqs is taken from header file */
+-#include <linux/mfd/abx500/ab8500.h>
+-
+-#define MOP500_AB8500_IRQ_BASE IRQ_BOARD_START
+-#define MOP500_AB8500_IRQ_END (MOP500_AB8500_IRQ_BASE \
+- + AB8500_MAX_NR_IRQS)
+-
+-/* TC35892 */
+-#define TC35892_NR_INTERNAL_IRQS 8
+-#define TC35892_INT_GPIO(x) (TC35892_NR_INTERNAL_IRQS + (x))
+-#define TC35892_NR_GPIOS 24
+-#define TC35892_NR_IRQS TC35892_INT_GPIO(TC35892_NR_GPIOS)
+-
+-#define MOP500_EGPIO_NR_IRQS TC35892_NR_IRQS
+-
+-#define MOP500_EGPIO_IRQ_BASE MOP500_AB8500_IRQ_END
+-#define MOP500_EGPIO_IRQ_END (MOP500_EGPIO_IRQ_BASE \
+- + MOP500_EGPIO_NR_IRQS)
+-/* STMPE1601 irqs */
+-#define STMPE_NR_INTERNAL_IRQS 9
+-#define STMPE_INT_GPIO(x) (STMPE_NR_INTERNAL_IRQS + (x))
+-#define STMPE_NR_GPIOS 24
+-#define STMPE_NR_IRQS STMPE_INT_GPIO(STMPE_NR_GPIOS)
+-
+-#define MOP500_STMPE1601_IRQBASE MOP500_EGPIO_IRQ_END
+-#define MOP500_STMPE1601_IRQ(x) (MOP500_STMPE1601_IRQBASE + (x))
+-
+-#define MOP500_STMPE1601_IRQ_END \
+- MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS)
+-
+-#define MOP500_NR_IRQS MOP500_STMPE1601_IRQ_END
+-
+-#define MOP500_IRQ_END MOP500_NR_IRQS
+-
+-/*
+- * We may have several boards, but only one will run at a
+- * time, so the one with most IRQs will bump this ahead,
+- * but the IRQ_BOARD_START remains the same for either board.
+- */
+-#if MOP500_IRQ_END > IRQ_BOARD_END
+-#undef IRQ_BOARD_END
+-#define IRQ_BOARD_END MOP500_IRQ_END
+-#endif
+-
+-#endif
+diff -Nur linux-3.14.40.orig/arch/arm/mach-ux500/irqs-db8500.h linux-3.14.40/arch/arm/mach-ux500/irqs-db8500.h
+--- linux-3.14.40.orig/arch/arm/mach-ux500/irqs-db8500.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-ux500/irqs-db8500.h 1969-12-31 18:00:00.000000000 -0600
+@@ -1,125 +0,0 @@
+-/*
+- * Copyright (C) ST-Ericsson SA 2010
+- *
+- * Author: Rabin Vincent <rabin.vincent@stericsson.com>
+- * License terms: GNU General Public License (GPL) version 2
+- */
+-
+-#ifndef __MACH_IRQS_DB8500_H
+-#define __MACH_IRQS_DB8500_H
+-
+-#define IRQ_DB8500_MTU0 (IRQ_SHPI_START + 4)
+-#define IRQ_DB8500_SPI2 (IRQ_SHPI_START + 6)
+-#define IRQ_DB8500_PMU (IRQ_SHPI_START + 7)
+-#define IRQ_DB8500_SPI0 (IRQ_SHPI_START + 8)
+-#define IRQ_DB8500_RTT (IRQ_SHPI_START + 9)
+-#define IRQ_DB8500_PKA (IRQ_SHPI_START + 10)
+-#define IRQ_DB8500_UART0 (IRQ_SHPI_START + 11)
+-#define IRQ_DB8500_I2C3 (IRQ_SHPI_START + 12)
+-#define IRQ_DB8500_L2CC (IRQ_SHPI_START + 13)
+-#define IRQ_DB8500_SSP0 (IRQ_SHPI_START + 14)
+-#define IRQ_DB8500_CRYP1 (IRQ_SHPI_START + 15)
+-#define IRQ_DB8500_MSP1_RX (IRQ_SHPI_START + 16)
+-#define IRQ_DB8500_MTU1 (IRQ_SHPI_START + 17)
+-#define IRQ_DB8500_RTC (IRQ_SHPI_START + 18)
+-#define IRQ_DB8500_UART1 (IRQ_SHPI_START + 19)
+-#define IRQ_DB8500_USB_WAKEUP (IRQ_SHPI_START + 20)
+-#define IRQ_DB8500_I2C0 (IRQ_SHPI_START + 21)
+-#define IRQ_DB8500_I2C1 (IRQ_SHPI_START + 22)
+-#define IRQ_DB8500_USBOTG (IRQ_SHPI_START + 23)
+-#define IRQ_DB8500_DMA_SECURE (IRQ_SHPI_START + 24)
+-#define IRQ_DB8500_DMA (IRQ_SHPI_START + 25)
+-#define IRQ_DB8500_UART2 (IRQ_SHPI_START + 26)
+-#define IRQ_DB8500_ICN_PMU1 (IRQ_SHPI_START + 27)
+-#define IRQ_DB8500_ICN_PMU2 (IRQ_SHPI_START + 28)
+-#define IRQ_DB8500_HSIR_EXCEP (IRQ_SHPI_START + 29)
+-#define IRQ_DB8500_MSP0 (IRQ_SHPI_START + 31)
+-#define IRQ_DB8500_HSIR_CH0_OVRRUN (IRQ_SHPI_START + 32)
+-#define IRQ_DB8500_HSIR_CH1_OVRRUN (IRQ_SHPI_START + 33)
+-#define IRQ_DB8500_HSIR_CH2_OVRRUN (IRQ_SHPI_START + 34)
+-#define IRQ_DB8500_HSIR_CH3_OVRRUN (IRQ_SHPI_START + 35)
+-#define IRQ_DB8500_HSIR_CH4_OVRRUN (IRQ_SHPI_START + 36)
+-#define IRQ_DB8500_HSIR_CH5_OVRRUN (IRQ_SHPI_START + 37)
+-#define IRQ_DB8500_HSIR_CH6_OVRRUN (IRQ_SHPI_START + 38)
+-#define IRQ_DB8500_HSIR_CH7_OVRRUN (IRQ_SHPI_START + 39)
+-#define IRQ_DB8500_AB8500 (IRQ_SHPI_START + 40)
+-#define IRQ_DB8500_SDMMC2 (IRQ_SHPI_START + 41)
+-#define IRQ_DB8500_SIA (IRQ_SHPI_START + 42)
+-#define IRQ_DB8500_SIA2 (IRQ_SHPI_START + 43)
+-#define IRQ_DB8500_SVA (IRQ_SHPI_START + 44)
+-#define IRQ_DB8500_SVA2 (IRQ_SHPI_START + 45)
+-#define IRQ_DB8500_PRCMU0 (IRQ_SHPI_START + 46)
+-#define IRQ_DB8500_PRCMU1 (IRQ_SHPI_START + 47)
+-#define IRQ_DB8500_DISP (IRQ_SHPI_START + 48)
+-#define IRQ_DB8500_SPI3 (IRQ_SHPI_START + 49)
+-#define IRQ_DB8500_SDMMC1 (IRQ_SHPI_START + 50)
+-#define IRQ_DB8500_I2C4 (IRQ_SHPI_START + 51)
+-#define IRQ_DB8500_SSP1 (IRQ_SHPI_START + 52)
+-#define IRQ_DB8500_SKE (IRQ_SHPI_START + 53)
+-#define IRQ_DB8500_KB (IRQ_SHPI_START + 54)
+-#define IRQ_DB8500_I2C2 (IRQ_SHPI_START + 55)
+-#define IRQ_DB8500_B2R2 (IRQ_SHPI_START + 56)
+-#define IRQ_DB8500_CRYP0 (IRQ_SHPI_START + 57)
+-#define IRQ_DB8500_SDMMC3 (IRQ_SHPI_START + 59)
+-#define IRQ_DB8500_SDMMC0 (IRQ_SHPI_START + 60)
+-#define IRQ_DB8500_HSEM (IRQ_SHPI_START + 61)
+-#define IRQ_DB8500_MSP1 (IRQ_SHPI_START + 62)
+-#define IRQ_DB8500_SBAG (IRQ_SHPI_START + 63)
+-#define IRQ_DB8500_SPI1 (IRQ_SHPI_START + 96)
+-#define IRQ_DB8500_SRPTIMER (IRQ_SHPI_START + 97)
+-#define IRQ_DB8500_MSP2 (IRQ_SHPI_START + 98)
+-#define IRQ_DB8500_SDMMC4 (IRQ_SHPI_START + 99)
+-#define IRQ_DB8500_SDMMC5 (IRQ_SHPI_START + 100)
+-#define IRQ_DB8500_HSIRD0 (IRQ_SHPI_START + 104)
+-#define IRQ_DB8500_HSIRD1 (IRQ_SHPI_START + 105)
+-#define IRQ_DB8500_HSITD0 (IRQ_SHPI_START + 106)
+-#define IRQ_DB8500_HSITD1 (IRQ_SHPI_START + 107)
+-#define IRQ_DB8500_CTI0 (IRQ_SHPI_START + 108)
+-#define IRQ_DB8500_CTI1 (IRQ_SHPI_START + 109)
+-#define IRQ_DB8500_ICN_ERR (IRQ_SHPI_START + 110)
+-#define IRQ_DB8500_MALI_PPMMU (IRQ_SHPI_START + 112)
+-#define IRQ_DB8500_MALI_PP (IRQ_SHPI_START + 113)
+-#define IRQ_DB8500_MALI_GPMMU (IRQ_SHPI_START + 114)
+-#define IRQ_DB8500_MALI_GP (IRQ_SHPI_START + 115)
+-#define IRQ_DB8500_MALI (IRQ_SHPI_START + 116)
+-#define IRQ_DB8500_PRCMU_SEM (IRQ_SHPI_START + 118)
+-#define IRQ_DB8500_GPIO0 (IRQ_SHPI_START + 119)
+-#define IRQ_DB8500_GPIO1 (IRQ_SHPI_START + 120)
+-#define IRQ_DB8500_GPIO2 (IRQ_SHPI_START + 121)
+-#define IRQ_DB8500_GPIO3 (IRQ_SHPI_START + 122)
+-#define IRQ_DB8500_GPIO4 (IRQ_SHPI_START + 123)
+-#define IRQ_DB8500_GPIO5 (IRQ_SHPI_START + 124)
+-#define IRQ_DB8500_GPIO6 (IRQ_SHPI_START + 125)
+-#define IRQ_DB8500_GPIO7 (IRQ_SHPI_START + 126)
+-#define IRQ_DB8500_GPIO8 (IRQ_SHPI_START + 127)
+-
+-#define IRQ_CA_WAKE_REQ_ED (IRQ_SHPI_START + 71)
+-#define IRQ_AC_READ_NOTIFICATION_0_ED (IRQ_SHPI_START + 66)
+-#define IRQ_AC_READ_NOTIFICATION_1_ED (IRQ_SHPI_START + 64)
+-#define IRQ_CA_MSG_PEND_NOTIFICATION_0_ED (IRQ_SHPI_START + 67)
+-#define IRQ_CA_MSG_PEND_NOTIFICATION_1_ED (IRQ_SHPI_START + 65)
+-
+-#define IRQ_CA_WAKE_REQ_V1 (IRQ_SHPI_START + 83)
+-#define IRQ_AC_READ_NOTIFICATION_0_V1 (IRQ_SHPI_START + 78)
+-#define IRQ_AC_READ_NOTIFICATION_1_V1 (IRQ_SHPI_START + 76)
+-#define IRQ_CA_MSG_PEND_NOTIFICATION_0_V1 (IRQ_SHPI_START + 79)
+-#define IRQ_CA_MSG_PEND_NOTIFICATION_1_V1 (IRQ_SHPI_START + 77)
+-
+-#ifdef CONFIG_UX500_SOC_DB8500
+-
+-/* Virtual interrupts corresponding to the PRCMU wakeups. */
+-#define IRQ_PRCMU_BASE IRQ_SOC_START
+-#define IRQ_PRCMU_END (IRQ_PRCMU_BASE + 23)
+-
+-/*
+- * We may have several SoCs, but only one will run at a
+- * time, so the one with most IRQs will bump this ahead,
+- * but the IRQ_SOC_START remains the same for either SoC.
+- */
+-#if IRQ_SOC_END < IRQ_PRCMU_END
+-#undef IRQ_SOC_END
+-#define IRQ_SOC_END IRQ_PRCMU_END
+-#endif
+-
+-#endif /* CONFIG_UX500_SOC_DB8500 */
+-#endif
+diff -Nur linux-3.14.40.orig/arch/arm/mach-ux500/irqs.h linux-3.14.40/arch/arm/mach-ux500/irqs.h
+--- linux-3.14.40.orig/arch/arm/mach-ux500/irqs.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-ux500/irqs.h 1969-12-31 18:00:00.000000000 -0600
+@@ -1,49 +0,0 @@
+-/*
+- * Copyright (C) 2008 STMicroelectronics
+- * Copyright (C) 2009 ST-Ericsson.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
+-#ifndef ASM_ARCH_IRQS_H
+-#define ASM_ARCH_IRQS_H
+-
+-#define IRQ_LOCALTIMER 29
+-#define IRQ_LOCALWDOG 30
+-
+-/* Shared Peripheral Interrupt (SHPI) */
+-#define IRQ_SHPI_START 32
+-
+-/*
+- * MTU0 preserved for now until plat-nomadik is taught not to use it. Don't
+- * add any other IRQs here, use the irqs-dbx500.h files.
+- */
+-#define IRQ_MTU0 (IRQ_SHPI_START + 4)
+-
+-#define DBX500_NR_INTERNAL_IRQS 166
+-
+-/* After chip-specific IRQ numbers we have the GPIO ones */
+-#define NOMADIK_NR_GPIO 288
+-#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + DBX500_NR_INTERNAL_IRQS)
+-#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - DBX500_NR_INTERNAL_IRQS)
+-#define IRQ_GPIO_END NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO)
+-
+-#define IRQ_SOC_START IRQ_GPIO_END
+-/* This will be overridden by SoC-specific irq headers */
+-#define IRQ_SOC_END IRQ_SOC_START
+-
+-#include "irqs-db8500.h"
+-
+-#define IRQ_BOARD_START IRQ_SOC_END
+-/* This will be overridden by board-specific irq headers */
+-#define IRQ_BOARD_END IRQ_BOARD_START
+-
+-#ifdef CONFIG_MACH_MOP500
+-#include "irqs-board-mop500.h"
+-#endif
+-
+-#define UX500_NR_IRQS IRQ_BOARD_END
+-
+-#endif /* ASM_ARCH_IRQS_H */
+diff -Nur linux-3.14.40.orig/arch/arm/mach-vexpress/ct-ca9x4.c linux-3.14.40/arch/arm/mach-vexpress/ct-ca9x4.c
+--- linux-3.14.40.orig/arch/arm/mach-vexpress/ct-ca9x4.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-vexpress/ct-ca9x4.c 2015-05-01 14:57:58.039427001 -0500
+@@ -45,6 +45,23 @@
+ iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
+ }
+
++static void __init ca9x4_l2_init(void)
++{
++#ifdef CONFIG_CACHE_L2X0
++ void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
++
++ if (l2x0_base) {
++ /* set RAM latencies to 1 cycle for this core tile. */
++ writel(0, l2x0_base + L310_TAG_LATENCY_CTRL);
++ writel(0, l2x0_base + L310_DATA_LATENCY_CTRL);
++
++ l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
++ } else {
++ pr_err("L2C: unable to map L2 cache controller\n");
++ }
++#endif
++}
++
+ #ifdef CONFIG_HAVE_ARM_TWD
+ static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER);
+
+@@ -63,6 +80,7 @@
+ gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K),
+ ioremap(A9_MPCORE_GIC_CPU, SZ_256));
+ ca9x4_twd_init();
++ ca9x4_l2_init();
+ }
+
+ static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
+@@ -141,16 +159,6 @@
+ {
+ int i;
+
+-#ifdef CONFIG_CACHE_L2X0
+- void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
+-
+- /* set RAM latencies to 1 cycle for this core tile. */
+- writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
+- writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
+-
+- l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
+-#endif
+-
+ for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
+ amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-vexpress/dcscb.c linux-3.14.40/arch/arm/mach-vexpress/dcscb.c
+--- linux-3.14.40.orig/arch/arm/mach-vexpress/dcscb.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-vexpress/dcscb.c 2015-05-01 14:57:58.039427001 -0500
+@@ -23,6 +23,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/cputype.h>
+ #include <asm/cp15.h>
++#include <asm/psci.h>
+
+
+ #define RST_HOLD0 0x0
+@@ -193,6 +194,12 @@
+ unsigned int cfg;
+ int ret;
+
++ ret = psci_probe();
++ if (!ret) {
++ pr_debug("psci found. Aborting native init\n");
++ return -ENODEV;
++ }
++
+ if (!cci_probed())
+ return -ENODEV;
+
+diff -Nur linux-3.14.40.orig/arch/arm/mach-vexpress/Kconfig linux-3.14.40/arch/arm/mach-vexpress/Kconfig
+--- linux-3.14.40.orig/arch/arm/mach-vexpress/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-vexpress/Kconfig 2015-05-01 14:57:58.039427001 -0500
+@@ -55,6 +55,7 @@
+
+ config ARCH_VEXPRESS_CA9X4
+ bool "Versatile Express Cortex-A9x4 tile"
++ select ARM_ERRATA_643719
+
+ config ARCH_VEXPRESS_DCSCB
+ bool "Dual Cluster System Control Block (DCSCB) support"
+diff -Nur linux-3.14.40.orig/arch/arm/mach-vexpress/Makefile linux-3.14.40/arch/arm/mach-vexpress/Makefile
+--- linux-3.14.40.orig/arch/arm/mach-vexpress/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-vexpress/Makefile 2015-05-01 14:57:58.039427001 -0500
+@@ -8,8 +8,15 @@
+ obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
+ obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
+ CFLAGS_dcscb.o += -march=armv7-a
++CFLAGS_REMOVE_dcscb.o = -pg
+ obj-$(CONFIG_ARCH_VEXPRESS_SPC) += spc.o
++CFLAGS_REMOVE_spc.o = -pg
+ obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o
+ CFLAGS_tc2_pm.o += -march=armv7-a
++CFLAGS_REMOVE_tc2_pm.o = -pg
++ifeq ($(CONFIG_ARCH_VEXPRESS_TC2_PM),y)
++obj-$(CONFIG_ARM_PSCI) += tc2_pm_psci.o
++CFLAGS_REMOVE_tc2_pm_psci.o = -pg
++endif
+ obj-$(CONFIG_SMP) += platsmp.o
+ obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+diff -Nur linux-3.14.40.orig/arch/arm/mach-vexpress/spc.c linux-3.14.40/arch/arm/mach-vexpress/spc.c
+--- linux-3.14.40.orig/arch/arm/mach-vexpress/spc.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-vexpress/spc.c 2015-05-01 14:57:58.039427001 -0500
+@@ -392,7 +392,7 @@
+ * +--------------------------+
+ * | 31 20 | 19 0 |
+ * +--------------------------+
+- * | u_volt | freq(kHz) |
++ * | m_volt | freq(kHz) |
+ * +--------------------------+
+ */
+ #define MULT_FACTOR 20
+@@ -414,7 +414,7 @@
+ ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
+ if (!ret) {
+ opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
+- opps->u_volt = data >> VOLT_SHIFT;
++ opps->u_volt = (data >> VOLT_SHIFT) * 1000;
+ } else {
+ break;
+ }
+diff -Nur linux-3.14.40.orig/arch/arm/mach-vexpress/tc2_pm.c linux-3.14.40/arch/arm/mach-vexpress/tc2_pm.c
+--- linux-3.14.40.orig/arch/arm/mach-vexpress/tc2_pm.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-vexpress/tc2_pm.c 2015-05-01 14:57:58.039427001 -0500
+@@ -27,6 +27,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/cputype.h>
+ #include <asm/cp15.h>
++#include <asm/psci.h>
+
+ #include <linux/arm-cci.h>
+
+@@ -329,6 +330,12 @@
+ u32 a15_cluster_id, a7_cluster_id, sys_info;
+ struct device_node *np;
+
++ ret = psci_probe();
++ if (!ret) {
++ pr_debug("psci found. Aborting native init\n");
++ return -ENODEV;
++ }
++
+ /*
+ * The power management-related features are hidden behind
+ * SCC registers. We need to extract runtime information like
+diff -Nur linux-3.14.40.orig/arch/arm/mach-vexpress/tc2_pm_psci.c linux-3.14.40/arch/arm/mach-vexpress/tc2_pm_psci.c
+--- linux-3.14.40.orig/arch/arm/mach-vexpress/tc2_pm_psci.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mach-vexpress/tc2_pm_psci.c 2015-05-01 14:57:58.039427001 -0500
+@@ -0,0 +1,173 @@
++/*
++ * arch/arm/mach-vexpress/tc2_pm_psci.c - TC2 PSCI support
++ *
++ * Created by: Achin Gupta, December 2012
++ * Copyright: (C) 2012 ARM Limited
++ *
++ * Some portions of this file were originally written by Nicolas Pitre
++ * Copyright: (C) 2012 Linaro Limited
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/of.h>
++#include <linux/spinlock.h>
++#include <linux/errno.h>
++
++#include <asm/mcpm.h>
++#include <asm/proc-fns.h>
++#include <asm/cacheflush.h>
++#include <asm/psci.h>
++#include <asm/atomic.h>
++#include <asm/cputype.h>
++#include <asm/cp15.h>
++
++#include <mach/motherboard.h>
++
++#include <linux/vexpress.h>
++
++/*
++ * Platform specific state id understood by the firmware and used to
++ * program the power controller
++ */
++#define PSCI_POWER_STATE_ID 0
++
++#define TC2_CLUSTERS 2
++#define TC2_MAX_CPUS_PER_CLUSTER 3
++
++static atomic_t tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS];
++
++static int tc2_pm_psci_power_up(unsigned int cpu, unsigned int cluster)
++{
++ unsigned int mpidr = (cluster << 8) | cpu;
++ int ret = 0;
++
++ BUG_ON(!psci_ops.cpu_on);
++
++ switch (atomic_inc_return(&tc2_pm_use_count[cpu][cluster])) {
++ case 1:
++ /*
++ * This is a request to power up a cpu that linux thinks has
++ * been powered down. Retries are needed if the firmware has
++ * seen the power down request as yet.
++ */
++ do
++ ret = psci_ops.cpu_on(mpidr,
++ virt_to_phys(mcpm_entry_point));
++ while (ret == -EAGAIN);
++
++ return ret;
++ case 2:
++ /* This power up request has overtaken a power down request */
++ return ret;
++ default:
++ /* Any other value is a bug */
++ BUG();
++ }
++}
++
++static void tc2_pm_psci_power_down(void)
++{
++ struct psci_power_state power_state;
++ unsigned int mpidr, cpu, cluster;
++
++ mpidr = read_cpuid_mpidr();
++ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
++ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
++
++ BUG_ON(!psci_ops.cpu_off);
++
++ switch (atomic_dec_return(&tc2_pm_use_count[cpu][cluster])) {
++ case 1:
++ /*
++ * Overtaken by a power up. Flush caches, exit coherency,
++ * return & fake a reset
++ */
++ set_cr(get_cr() & ~CR_C);
++
++ flush_cache_louis();
++
++ asm volatile ("clrex");
++ set_auxcr(get_auxcr() & ~(1 << 6));
++
++ return;
++ case 0:
++ /* A normal request to possibly power down the cluster */
++ power_state.id = PSCI_POWER_STATE_ID;
++ power_state.type = PSCI_POWER_STATE_TYPE_POWER_DOWN;
++ power_state.affinity_level = PSCI_POWER_STATE_AFFINITY_LEVEL1;
++
++ psci_ops.cpu_off(power_state);
++
++ /* On success this function never returns */
++ default:
++ /* Any other value is a bug */
++ BUG();
++ }
++}
++
++static void tc2_pm_psci_suspend(u64 unused)
++{
++ struct psci_power_state power_state;
++
++ BUG_ON(!psci_ops.cpu_suspend);
++
++ /* On TC2 always attempt to power down the cluster */
++ power_state.id = PSCI_POWER_STATE_ID;
++ power_state.type = PSCI_POWER_STATE_TYPE_POWER_DOWN;
++ power_state.affinity_level = PSCI_POWER_STATE_AFFINITY_LEVEL1;
++
++ psci_ops.cpu_suspend(power_state, virt_to_phys(mcpm_entry_point));
++
++ /* On success this function never returns */
++ BUG();
++}
++
++static const struct mcpm_platform_ops tc2_pm_power_ops = {
++ .power_up = tc2_pm_psci_power_up,
++ .power_down = tc2_pm_psci_power_down,
++ .suspend = tc2_pm_psci_suspend,
++};
++
++static void __init tc2_pm_usage_count_init(void)
++{
++ unsigned int mpidr, cpu, cluster;
++
++ mpidr = read_cpuid_mpidr();
++ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
++ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
++
++ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
++ BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
++
++ atomic_set(&tc2_pm_use_count[cpu][cluster], 1);
++}
++
++static int __init tc2_pm_psci_init(void)
++{
++ int ret;
++
++ ret = psci_probe();
++ if (ret) {
++ pr_debug("psci not found. Aborting psci init\n");
++ return -ENODEV;
++ }
++
++ if (!of_machine_is_compatible("arm,vexpress,v2p-ca15_a7"))
++ return -ENODEV;
++
++ tc2_pm_usage_count_init();
++
++ ret = mcpm_platform_register(&tc2_pm_power_ops);
++ if (!ret)
++ ret = mcpm_sync_init(NULL);
++ if (!ret)
++ pr_info("TC2 power management using PSCI initialized\n");
++ return ret;
++}
++
++early_initcall(tc2_pm_psci_init);
+diff -Nur linux-3.14.40.orig/arch/arm/mach-vexpress/v2m.c linux-3.14.40/arch/arm/mach-vexpress/v2m.c
+--- linux-3.14.40.orig/arch/arm/mach-vexpress/v2m.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-vexpress/v2m.c 2015-05-01 14:57:58.039427001 -0500
+@@ -7,6 +7,7 @@
+ #include <linux/io.h>
+ #include <linux/smp.h>
+ #include <linux/init.h>
++#include <linux/memblock.h>
+ #include <linux/of_address.h>
+ #include <linux/of_fdt.h>
+ #include <linux/of_irq.h>
+@@ -369,6 +370,31 @@
+ .init_machine = v2m_init,
+ MACHINE_END
+
++static void __init v2m_dt_hdlcd_init(void)
++{
++ struct device_node *node;
++ int len, na, ns;
++ const __be32 *prop;
++ phys_addr_t fb_base, fb_size;
++
++ node = of_find_compatible_node(NULL, NULL, "arm,hdlcd");
++ if (!node)
++ return;
++
++ na = of_n_addr_cells(node);
++ ns = of_n_size_cells(node);
++
++ prop = of_get_property(node, "framebuffer", &len);
++ if (WARN_ON(!prop || len < (na + ns) * sizeof(*prop)))
++ return;
++
++ fb_base = of_read_number(prop, na);
++ fb_size = of_read_number(prop + na, ns);
++
++ if (WARN_ON(memblock_remove(fb_base, fb_size)))
++ return;
++};
++
+ static struct map_desc v2m_rs1_io_desc __initdata = {
+ .virtual = V2M_PERIPH,
+ .pfn = __phys_to_pfn(0x1c000000),
+@@ -421,6 +447,8 @@
+ }
+
+ versatile_sched_clock_init(vexpress_get_24mhz_clock_base(), 24000000);
++
++ v2m_dt_hdlcd_init();
+ }
+
+ static const struct of_device_id v2m_dt_bus_match[] __initconst = {
+diff -Nur linux-3.14.40.orig/arch/arm/mach-zynq/common.c linux-3.14.40/arch/arm/mach-zynq/common.c
+--- linux-3.14.40.orig/arch/arm/mach-zynq/common.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mach-zynq/common.c 2015-05-01 14:57:58.039427001 -0500
+@@ -67,7 +67,7 @@
+ /*
+ * 64KB way size, 8-way associativity, parity disabled
+ */
+- l2x0_of_init(0x02060000, 0xF0F0FFFF);
++ l2x0_of_init(0x02000000, 0xf0ffffff);
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+
+diff -Nur linux-3.14.40.orig/arch/arm/mm/cache-feroceon-l2.c linux-3.14.40/arch/arm/mm/cache-feroceon-l2.c
+--- linux-3.14.40.orig/arch/arm/mm/cache-feroceon-l2.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mm/cache-feroceon-l2.c 2015-05-01 14:57:58.043427001 -0500
+@@ -343,7 +343,6 @@
+ outer_cache.inv_range = feroceon_l2_inv_range;
+ outer_cache.clean_range = feroceon_l2_clean_range;
+ outer_cache.flush_range = feroceon_l2_flush_range;
+- outer_cache.inv_all = l2_inv_all;
+
+ enable_l2();
+
+diff -Nur linux-3.14.40.orig/arch/arm/mm/cache-l2x0.c linux-3.14.40/arch/arm/mm/cache-l2x0.c
+--- linux-3.14.40.orig/arch/arm/mm/cache-l2x0.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mm/cache-l2x0.c 2015-05-01 14:57:58.043427001 -0500
+@@ -16,18 +16,33 @@
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
++#include <linux/cpu.h>
+ #include <linux/err.h>
+ #include <linux/init.h>
++#include <linux/smp.h>
+ #include <linux/spinlock.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+
+ #include <asm/cacheflush.h>
++#include <asm/cp15.h>
++#include <asm/cputype.h>
+ #include <asm/hardware/cache-l2x0.h>
+ #include "cache-tauros3.h"
+ #include "cache-aurora-l2.h"
+
++struct l2c_init_data {
++ const char *type;
++ unsigned way_size_0;
++ unsigned num_lock;
++ void (*of_parse)(const struct device_node *, u32 *, u32 *);
++ void (*enable)(void __iomem *, u32, unsigned);
++ void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
++ void (*save)(void __iomem *);
++ struct outer_cache_fns outer_cache;
++};
++
+ #define CACHE_LINE_SIZE 32
+
+ static void __iomem *l2x0_base;
+@@ -36,96 +51,116 @@
+ static u32 l2x0_size;
+ static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
+
+-/* Aurora don't have the cache ID register available, so we have to
+- * pass it though the device tree */
+-static u32 cache_id_part_number_from_dt;
+-
+ struct l2x0_regs l2x0_saved_regs;
+
+-struct l2x0_of_data {
+- void (*setup)(const struct device_node *, u32 *, u32 *);
+- void (*save)(void);
+- struct outer_cache_fns outer_cache;
+-};
+-
+-static bool of_init = false;
+-
+-static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
++/*
++ * Common code for all cache controllers.
++ */
++static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
+ {
+ /* wait for cache operation by line or way to complete */
+ while (readl_relaxed(reg) & mask)
+ cpu_relax();
+ }
+
+-#ifdef CONFIG_CACHE_PL310
+-static inline void cache_wait(void __iomem *reg, unsigned long mask)
++/*
++ * By default, we write directly to secure registers. Platforms must
++ * override this if they are running non-secure.
++ */
++static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
+ {
+- /* cache operations by line are atomic on PL310 */
++ if (val == readl_relaxed(base + reg))
++ return;
++ if (outer_cache.write_sec)
++ outer_cache.write_sec(val, reg);
++ else
++ writel_relaxed(val, base + reg);
+ }
+-#else
+-#define cache_wait cache_wait_way
+-#endif
+
+-static inline void cache_sync(void)
++/*
++ * This should only be called when we have a requirement that the
++ * register be written due to a work-around, as platforms running
++ * in non-secure mode may not be able to access this register.
++ */
++static inline void l2c_set_debug(void __iomem *base, unsigned long val)
+ {
+- void __iomem *base = l2x0_base;
+-
+- writel_relaxed(0, base + sync_reg_offset);
+- cache_wait(base + L2X0_CACHE_SYNC, 1);
++ l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
+ }
+
+-static inline void l2x0_clean_line(unsigned long addr)
++static void __l2c_op_way(void __iomem *reg)
+ {
+- void __iomem *base = l2x0_base;
+- cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
+- writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
++ writel_relaxed(l2x0_way_mask, reg);
++ l2c_wait_mask(reg, l2x0_way_mask);
+ }
+
+-static inline void l2x0_inv_line(unsigned long addr)
++static inline void l2c_unlock(void __iomem *base, unsigned num)
+ {
+- void __iomem *base = l2x0_base;
+- cache_wait(base + L2X0_INV_LINE_PA, 1);
+- writel_relaxed(addr, base + L2X0_INV_LINE_PA);
++ unsigned i;
++
++ for (i = 0; i < num; i++) {
++ writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
++ i * L2X0_LOCKDOWN_STRIDE);
++ writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
++ i * L2X0_LOCKDOWN_STRIDE);
++ }
+ }
+
+-#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
+-static inline void debug_writel(unsigned long val)
++/*
++ * Enable the L2 cache controller. This function must only be
++ * called when the cache controller is known to be disabled.
++ */
++static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
+ {
+- if (outer_cache.set_debug)
+- outer_cache.set_debug(val);
++ unsigned long flags;
++
++ l2c_write_sec(aux, base, L2X0_AUX_CTRL);
++
++ l2c_unlock(base, num_lock);
++
++ local_irq_save(flags);
++ __l2c_op_way(base + L2X0_INV_WAY);
++ writel_relaxed(0, base + sync_reg_offset);
++ l2c_wait_mask(base + sync_reg_offset, 1);
++ local_irq_restore(flags);
++
++ l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
+ }
+
+-static void pl310_set_debug(unsigned long val)
++static void l2c_disable(void)
+ {
+- writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
++ void __iomem *base = l2x0_base;
++
++ outer_cache.flush_all();
++ l2c_write_sec(0, base, L2X0_CTRL);
++ dsb(st);
+ }
+-#else
+-/* Optimised out for non-errata case */
+-static inline void debug_writel(unsigned long val)
++
++#ifdef CONFIG_CACHE_PL310
++static inline void cache_wait(void __iomem *reg, unsigned long mask)
+ {
++ /* cache operations by line are atomic on PL310 */
+ }
+-
+-#define pl310_set_debug NULL
++#else
++#define cache_wait l2c_wait_mask
+ #endif
+
+-#ifdef CONFIG_PL310_ERRATA_588369
+-static inline void l2x0_flush_line(unsigned long addr)
++static inline void cache_sync(void)
+ {
+ void __iomem *base = l2x0_base;
+
+- /* Clean by PA followed by Invalidate by PA */
+- cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
+- writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
+- cache_wait(base + L2X0_INV_LINE_PA, 1);
+- writel_relaxed(addr, base + L2X0_INV_LINE_PA);
++ writel_relaxed(0, base + sync_reg_offset);
++ cache_wait(base + L2X0_CACHE_SYNC, 1);
+ }
+-#else
+
+-static inline void l2x0_flush_line(unsigned long addr)
++#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
++static inline void debug_writel(unsigned long val)
++{
++ l2c_set_debug(l2x0_base, val);
++}
++#else
++/* Optimised out for non-errata case */
++static inline void debug_writel(unsigned long val)
+ {
+- void __iomem *base = l2x0_base;
+- cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+- writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
+ }
+ #endif
+
+@@ -141,8 +176,7 @@
+ static void __l2x0_flush_all(void)
+ {
+ debug_writel(0x03);
+- writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
+- cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
++ __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
+ cache_sync();
+ debug_writel(0x00);
+ }
+@@ -157,274 +191,882 @@
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
+
+-static void l2x0_clean_all(void)
++static void l2x0_disable(void)
+ {
+ unsigned long flags;
+
+- /* clean all ways */
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+- writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
+- cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
+- cache_sync();
++ __l2x0_flush_all();
++ l2c_write_sec(0, l2x0_base, L2X0_CTRL);
++ dsb(st);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
+
+-static void l2x0_inv_all(void)
++static void l2c_save(void __iomem *base)
+ {
+- unsigned long flags;
++ l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
++}
+
+- /* invalidate all ways */
+- raw_spin_lock_irqsave(&l2x0_lock, flags);
+- /* Invalidating when L2 is enabled is a nono */
+- BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
+- writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
+- cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
+- cache_sync();
+- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++/*
++ * L2C-210 specific code.
++ *
++ * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
++ * ensure that no background operation is running. The way operations
++ * are all background tasks.
++ *
++ * While a background operation is in progress, any new operation is
++ * ignored (unspecified whether this causes an error.) Thankfully, not
++ * used on SMP.
++ *
++ * Never has a different sync register other than L2X0_CACHE_SYNC, but
++ * we use sync_reg_offset here so we can share some of this with L2C-310.
++ */
++static void __l2c210_cache_sync(void __iomem *base)
++{
++ writel_relaxed(0, base + sync_reg_offset);
+ }
+
+-static void l2x0_inv_range(unsigned long start, unsigned long end)
++static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
++ unsigned long end)
++{
++ while (start < end) {
++ writel_relaxed(start, reg);
++ start += CACHE_LINE_SIZE;
++ }
++}
++
++static void l2c210_inv_range(unsigned long start, unsigned long end)
+ {
+ void __iomem *base = l2x0_base;
+- unsigned long flags;
+
+- raw_spin_lock_irqsave(&l2x0_lock, flags);
+ if (start & (CACHE_LINE_SIZE - 1)) {
+ start &= ~(CACHE_LINE_SIZE - 1);
+- debug_writel(0x03);
+- l2x0_flush_line(start);
+- debug_writel(0x00);
++ writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+
+ if (end & (CACHE_LINE_SIZE - 1)) {
+ end &= ~(CACHE_LINE_SIZE - 1);
+- debug_writel(0x03);
+- l2x0_flush_line(end);
+- debug_writel(0x00);
++ writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
+ }
+
++ __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
++ __l2c210_cache_sync(base);
++}
++
++static void l2c210_clean_range(unsigned long start, unsigned long end)
++{
++ void __iomem *base = l2x0_base;
++
++ start &= ~(CACHE_LINE_SIZE - 1);
++ __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
++ __l2c210_cache_sync(base);
++}
++
++static void l2c210_flush_range(unsigned long start, unsigned long end)
++{
++ void __iomem *base = l2x0_base;
++
++ start &= ~(CACHE_LINE_SIZE - 1);
++ __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
++ __l2c210_cache_sync(base);
++}
++
++static void l2c210_flush_all(void)
++{
++ void __iomem *base = l2x0_base;
++
++ BUG_ON(!irqs_disabled());
++
++ __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
++ __l2c210_cache_sync(base);
++}
++
++static void l2c210_sync(void)
++{
++ __l2c210_cache_sync(l2x0_base);
++}
++
++static void l2c210_resume(void)
++{
++ void __iomem *base = l2x0_base;
++
++ if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
++ l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
++}
++
++static const struct l2c_init_data l2c210_data __initconst = {
++ .type = "L2C-210",
++ .way_size_0 = SZ_8K,
++ .num_lock = 1,
++ .enable = l2c_enable,
++ .save = l2c_save,
++ .outer_cache = {
++ .inv_range = l2c210_inv_range,
++ .clean_range = l2c210_clean_range,
++ .flush_range = l2c210_flush_range,
++ .flush_all = l2c210_flush_all,
++ .disable = l2c_disable,
++ .sync = l2c210_sync,
++ .resume = l2c210_resume,
++ },
++};
++
++/*
++ * L2C-220 specific code.
++ *
++ * All operations are background operations: they have to be waited for.
++ * Conflicting requests generate a slave error (which will cause an
++ * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
++ * sync register here.
++ *
++ * However, we can re-use the l2c210_resume call.
++ */
++static inline void __l2c220_cache_sync(void __iomem *base)
++{
++ writel_relaxed(0, base + L2X0_CACHE_SYNC);
++ l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
++}
++
++static void l2c220_op_way(void __iomem *base, unsigned reg)
++{
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
++ __l2c_op_way(base + reg);
++ __l2c220_cache_sync(base);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++}
++
++static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
++ unsigned long end, unsigned long flags)
++{
++ raw_spinlock_t *lock = &l2x0_lock;
++
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+
+ while (start < blk_end) {
+- l2x0_inv_line(start);
++ l2c_wait_mask(reg, 1);
++ writel_relaxed(start, reg);
+ start += CACHE_LINE_SIZE;
+ }
+
+ if (blk_end < end) {
+- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- raw_spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ }
+ }
+- cache_wait(base + L2X0_INV_LINE_PA, 1);
+- cache_sync();
+- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++
++ return flags;
+ }
+
+-static void l2x0_clean_range(unsigned long start, unsigned long end)
++static void l2c220_inv_range(unsigned long start, unsigned long end)
+ {
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
+
+- if ((end - start) >= l2x0_size) {
+- l2x0_clean_all();
+- return;
+- }
+-
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+- start &= ~(CACHE_LINE_SIZE - 1);
+- while (start < end) {
+- unsigned long blk_end = start + min(end - start, 4096UL);
+-
+- while (start < blk_end) {
+- l2x0_clean_line(start);
++ if ((start | end) & (CACHE_LINE_SIZE - 1)) {
++ if (start & (CACHE_LINE_SIZE - 1)) {
++ start &= ~(CACHE_LINE_SIZE - 1);
++ writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+
+- if (blk_end < end) {
+- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- raw_spin_lock_irqsave(&l2x0_lock, flags);
++ if (end & (CACHE_LINE_SIZE - 1)) {
++ end &= ~(CACHE_LINE_SIZE - 1);
++ l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
++ writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
+ }
+ }
+- cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
+- cache_sync();
++
++ flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
++ start, end, flags);
++ l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
++ __l2c220_cache_sync(base);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
+
+-static void l2x0_flush_range(unsigned long start, unsigned long end)
++static void l2c220_clean_range(unsigned long start, unsigned long end)
+ {
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
+
++ start &= ~(CACHE_LINE_SIZE - 1);
+ if ((end - start) >= l2x0_size) {
+- l2x0_flush_all();
++ l2c220_op_way(base, L2X0_CLEAN_WAY);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
++ flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
++ start, end, flags);
++ l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
++ __l2c220_cache_sync(base);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++}
++
++static void l2c220_flush_range(unsigned long start, unsigned long end)
++{
++ void __iomem *base = l2x0_base;
++ unsigned long flags;
++
+ start &= ~(CACHE_LINE_SIZE - 1);
++ if ((end - start) >= l2x0_size) {
++ l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
++ return;
++ }
++
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
++ flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
++ start, end, flags);
++ l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
++ __l2c220_cache_sync(base);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++}
++
++static void l2c220_flush_all(void)
++{
++ l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
++}
++
++static void l2c220_sync(void)
++{
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
++ __l2c220_cache_sync(l2x0_base);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++}
++
++static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock)
++{
++ /*
++ * Always enable non-secure access to the lockdown registers -
++ * we write to them as part of the L2C enable sequence so they
++ * need to be accessible.
++ */
++ aux |= L220_AUX_CTRL_NS_LOCKDOWN;
++
++ l2c_enable(base, aux, num_lock);
++}
++
++static const struct l2c_init_data l2c220_data = {
++ .type = "L2C-220",
++ .way_size_0 = SZ_8K,
++ .num_lock = 1,
++ .enable = l2c220_enable,
++ .save = l2c_save,
++ .outer_cache = {
++ .inv_range = l2c220_inv_range,
++ .clean_range = l2c220_clean_range,
++ .flush_range = l2c220_flush_range,
++ .flush_all = l2c220_flush_all,
++ .disable = l2c_disable,
++ .sync = l2c220_sync,
++ .resume = l2c210_resume,
++ },
++};
++
++/*
++ * L2C-310 specific code.
++ *
++ * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
++ * and the way operations are all background tasks. However, issuing an
++ * operation while a background operation is in progress results in a
++ * SLVERR response. We can reuse:
++ *
++ * __l2c210_cache_sync (using sync_reg_offset)
++ * l2c210_sync
++ * l2c210_inv_range (if 588369 is not applicable)
++ * l2c210_clean_range
++ * l2c210_flush_range (if 588369 is not applicable)
++ * l2c210_flush_all (if 727915 is not applicable)
++ *
++ * Errata:
++ * 588369: PL310 R0P0->R1P0, fixed R2P0.
++ * Affects: all clean+invalidate operations
++ * clean and invalidate skips the invalidate step, so we need to issue
++ * separate operations. We also require the above debug workaround
++ * enclosing this code fragment on affected parts. On unaffected parts,
++ * we must not use this workaround without the debug register writes
++ * to avoid exposing a problem similar to 727915.
++ *
++ * 727915: PL310 R2P0->R3P0, fixed R3P1.
++ * Affects: clean+invalidate by way
++ * clean and invalidate by way runs in the background, and a store can
++ * hit the line between the clean operation and invalidate operation,
++ * resulting in the store being lost.
++ *
++ * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
++ * Affects: 8x64-bit (double fill) line fetches
++ * double fill line fetches can fail to cause dirty data to be evicted
++ * from the cache before the new data overwrites the second line.
++ *
++ * 753970: PL310 R3P0, fixed R3P1.
++ * Affects: sync
++ * prevents merging writes after the sync operation, until another L2C
++ * operation is performed (or a number of other conditions.)
++ *
++ * 769419: PL310 R0P0->R3P1, fixed R3P2.
++ * Affects: store buffer
++ * store buffer is not automatically drained.
++ */
++static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
++{
++ void __iomem *base = l2x0_base;
++
++ if ((start | end) & (CACHE_LINE_SIZE - 1)) {
++ unsigned long flags;
++
++ /* Erratum 588369 for both clean+invalidate operations */
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
++ l2c_set_debug(base, 0x03);
++
++ if (start & (CACHE_LINE_SIZE - 1)) {
++ start &= ~(CACHE_LINE_SIZE - 1);
++ writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
++ writel_relaxed(start, base + L2X0_INV_LINE_PA);
++ start += CACHE_LINE_SIZE;
++ }
++
++ if (end & (CACHE_LINE_SIZE - 1)) {
++ end &= ~(CACHE_LINE_SIZE - 1);
++ writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
++ writel_relaxed(end, base + L2X0_INV_LINE_PA);
++ }
++
++ l2c_set_debug(base, 0x00);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++ }
++
++ __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
++ __l2c210_cache_sync(base);
++}
++
++static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
++{
++ raw_spinlock_t *lock = &l2x0_lock;
++ unsigned long flags;
++ void __iomem *base = l2x0_base;
++
++ raw_spin_lock_irqsave(lock, flags);
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+
+- debug_writel(0x03);
++ l2c_set_debug(base, 0x03);
+ while (start < blk_end) {
+- l2x0_flush_line(start);
++ writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
++ writel_relaxed(start, base + L2X0_INV_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+- debug_writel(0x00);
++ l2c_set_debug(base, 0x00);
+
+ if (blk_end < end) {
+- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- raw_spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ }
+ }
+- cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+- cache_sync();
+- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
++ __l2c210_cache_sync(base);
+ }
+
+-static void l2x0_disable(void)
++static void l2c310_flush_all_erratum(void)
+ {
++ void __iomem *base = l2x0_base;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
+- __l2x0_flush_all();
+- writel_relaxed(0, l2x0_base + L2X0_CTRL);
+- dsb(st);
++ l2c_set_debug(base, 0x03);
++ __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
++ l2c_set_debug(base, 0x00);
++ __l2c210_cache_sync(base);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
+
+-static void l2x0_unlock(u32 cache_id)
++static void __init l2c310_save(void __iomem *base)
+ {
+- int lockregs;
+- int i;
++ unsigned revision;
+
+- switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+- case L2X0_CACHE_ID_PART_L310:
+- lockregs = 8;
+- break;
+- case AURORA_CACHE_ID:
+- lockregs = 4;
++ l2c_save(base);
++
++ l2x0_saved_regs.tag_latency = readl_relaxed(base +
++ L310_TAG_LATENCY_CTRL);
++ l2x0_saved_regs.data_latency = readl_relaxed(base +
++ L310_DATA_LATENCY_CTRL);
++ l2x0_saved_regs.filter_end = readl_relaxed(base +
++ L310_ADDR_FILTER_END);
++ l2x0_saved_regs.filter_start = readl_relaxed(base +
++ L310_ADDR_FILTER_START);
++
++ revision = readl_relaxed(base + L2X0_CACHE_ID) &
++ L2X0_CACHE_ID_RTL_MASK;
++
++ /* From r2p0, there is Prefetch offset/control register */
++ if (revision >= L310_CACHE_ID_RTL_R2P0)
++ l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
++ L310_PREFETCH_CTRL);
++
++ /* From r3p0, there is Power control register */
++ if (revision >= L310_CACHE_ID_RTL_R3P0)
++ l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
++ L310_POWER_CTRL);
++}
++
++static void l2c310_resume(void)
++{
++ void __iomem *base = l2x0_base;
++
++ if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
++ unsigned revision;
++
++ /* restore pl310 setup */
++ writel_relaxed(l2x0_saved_regs.tag_latency,
++ base + L310_TAG_LATENCY_CTRL);
++ writel_relaxed(l2x0_saved_regs.data_latency,
++ base + L310_DATA_LATENCY_CTRL);
++ writel_relaxed(l2x0_saved_regs.filter_end,
++ base + L310_ADDR_FILTER_END);
++ writel_relaxed(l2x0_saved_regs.filter_start,
++ base + L310_ADDR_FILTER_START);
++
++ revision = readl_relaxed(base + L2X0_CACHE_ID) &
++ L2X0_CACHE_ID_RTL_MASK;
++
++ if (revision >= L310_CACHE_ID_RTL_R2P0)
++ l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
++ L310_PREFETCH_CTRL);
++ if (revision >= L310_CACHE_ID_RTL_R3P0)
++ l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
++ L310_POWER_CTRL);
++
++ l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
++
++ /* Re-enable full-line-of-zeros for Cortex-A9 */
++ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
++ set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
++ }
++}
++
++static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
++{
++ switch (act & ~CPU_TASKS_FROZEN) {
++ case CPU_STARTING:
++ set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
+ break;
+- default:
+- /* L210 and unknown types */
+- lockregs = 1;
++ case CPU_DYING:
++ set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
+ break;
+ }
++ return NOTIFY_OK;
++}
+
+- for (i = 0; i < lockregs; i++) {
+- writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
+- i * L2X0_LOCKDOWN_STRIDE);
+- writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
+- i * L2X0_LOCKDOWN_STRIDE);
++static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
++{
++ unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
++ bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
++
++ if (rev >= L310_CACHE_ID_RTL_R2P0) {
++ if (cortex_a9) {
++ aux |= L310_AUX_CTRL_EARLY_BRESP;
++ pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
++ } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
++ pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
++ aux &= ~L310_AUX_CTRL_EARLY_BRESP;
++ }
++ }
++
++ if (cortex_a9) {
++ u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
++ u32 acr = get_auxcr();
++
++ pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
++
++ if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
++ pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
++
++ if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
++ pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
++
++ if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
++ aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
++ pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
++ }
++ } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
++ pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
++ aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
++ }
++
++ if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
++ u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
++
++ pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
++ aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
++ aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
++ 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
++ }
++
++ /* r3p0 or later has power control register */
++ if (rev >= L310_CACHE_ID_RTL_R3P0) {
++ u32 power_ctrl;
++
++ l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN,
++ base, L310_POWER_CTRL);
++ power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
++ pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
++ power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
++ power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
++ }
++
++ /*
++ * Always enable non-secure access to the lockdown registers -
++ * we write to them as part of the L2C enable sequence so they
++ * need to be accessible.
++ */
++ aux |= L310_AUX_CTRL_NS_LOCKDOWN;
++
++ l2c_enable(base, aux, num_lock);
++
++ if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
++ set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
++ cpu_notifier(l2c310_cpu_enable_flz, 0);
+ }
+ }
+
+-void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
++static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
++ struct outer_cache_fns *fns)
+ {
+- u32 aux;
+- u32 cache_id;
+- u32 way_size = 0;
+- int ways;
+- int way_size_shift = L2X0_WAY_SIZE_SHIFT;
+- const char *type;
++ unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
++ const char *errata[8];
++ unsigned n = 0;
+
+- l2x0_base = base;
+- if (cache_id_part_number_from_dt)
+- cache_id = cache_id_part_number_from_dt;
+- else
+- cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+- aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
++ if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
++ revision < L310_CACHE_ID_RTL_R2P0 &&
++ /* For bcm compatibility */
++ fns->inv_range == l2c210_inv_range) {
++ fns->inv_range = l2c310_inv_range_erratum;
++ fns->flush_range = l2c310_flush_range_erratum;
++ errata[n++] = "588369";
++ }
++
++ if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
++ revision >= L310_CACHE_ID_RTL_R2P0 &&
++ revision < L310_CACHE_ID_RTL_R3P1) {
++ fns->flush_all = l2c310_flush_all_erratum;
++ errata[n++] = "727915";
++ }
++
++ if (revision >= L310_CACHE_ID_RTL_R3P0 &&
++ revision < L310_CACHE_ID_RTL_R3P2) {
++ u32 val = readl_relaxed(base + L310_PREFETCH_CTRL);
++ /* I don't think bit23 is required here... but iMX6 does so */
++ if (val & (BIT(30) | BIT(23))) {
++ val &= ~(BIT(30) | BIT(23));
++ l2c_write_sec(val, base, L310_PREFETCH_CTRL);
++ errata[n++] = "752271";
++ }
++ }
++
++ if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
++ revision == L310_CACHE_ID_RTL_R3P0) {
++ sync_reg_offset = L2X0_DUMMY_REG;
++ errata[n++] = "753970";
++ }
++
++ if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
++ errata[n++] = "769419";
++
++ if (n) {
++ unsigned i;
++
++ pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
++ for (i = 0; i < n; i++)
++ pr_cont(" %s", errata[i]);
++ pr_cont(" enabled\n");
++ }
++}
++
++static void l2c310_disable(void)
++{
++ /*
++ * If full-line-of-zeros is enabled, we must first disable it in the
++ * Cortex-A9 auxiliary control register before disabling the L2 cache.
++ */
++ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
++ set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
+
++ l2c_disable();
++}
++
++static const struct l2c_init_data l2c310_init_fns __initconst = {
++ .type = "L2C-310",
++ .way_size_0 = SZ_8K,
++ .num_lock = 8,
++ .enable = l2c310_enable,
++ .fixup = l2c310_fixup,
++ .save = l2c310_save,
++ .outer_cache = {
++ .inv_range = l2c210_inv_range,
++ .clean_range = l2c210_clean_range,
++ .flush_range = l2c210_flush_range,
++ .flush_all = l2c210_flush_all,
++ .disable = l2c310_disable,
++ .sync = l2c210_sync,
++ .resume = l2c310_resume,
++ },
++};
++
++static void __init __l2c_init(const struct l2c_init_data *data,
++ u32 aux_val, u32 aux_mask, u32 cache_id)
++{
++ struct outer_cache_fns fns;
++ unsigned way_size_bits, ways;
++ u32 aux, old_aux;
++
++ /*
++ * Sanity check the aux values. aux_mask is the bits we preserve
++ * from reading the hardware register, and aux_val is the bits we
++ * set.
++ */
++ if (aux_val & aux_mask)
++ pr_alert("L2C: platform provided aux values permit register corruption.\n");
++
++ old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
+ aux &= aux_mask;
+ aux |= aux_val;
+
++ if (old_aux != aux)
++ pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
++ old_aux, aux);
++
+ /* Determine the number of ways */
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+ case L2X0_CACHE_ID_PART_L310:
++ if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
++ pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
+ if (aux & (1 << 16))
+ ways = 16;
+ else
+ ways = 8;
+- type = "L310";
+-#ifdef CONFIG_PL310_ERRATA_753970
+- /* Unmapped register. */
+- sync_reg_offset = L2X0_DUMMY_REG;
+-#endif
+- if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
+- outer_cache.set_debug = pl310_set_debug;
+ break;
++
+ case L2X0_CACHE_ID_PART_L210:
++ case L2X0_CACHE_ID_PART_L220:
+ ways = (aux >> 13) & 0xf;
+- type = "L210";
+ break;
+
+ case AURORA_CACHE_ID:
+- sync_reg_offset = AURORA_SYNC_REG;
+ ways = (aux >> 13) & 0xf;
+ ways = 2 << ((ways + 1) >> 2);
+- way_size_shift = AURORA_WAY_SIZE_SHIFT;
+- type = "Aurora";
+ break;
++
+ default:
+ /* Assume unknown chips have 8 ways */
+ ways = 8;
+- type = "L2x0 series";
+ break;
+ }
+
+ l2x0_way_mask = (1 << ways) - 1;
+
+ /*
+- * L2 cache Size = Way size * Number of ways
++ * way_size_0 is the size that a way_size value of zero would be
++ * given the calculation: way_size = way_size_0 << way_size_bits.
++ * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
++ * then way_size_0 would be 8k.
++ *
++ * L2 cache size = number of ways * way size.
++ */
++ way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
++ L2C_AUX_CTRL_WAY_SIZE_SHIFT;
++ l2x0_size = ways * (data->way_size_0 << way_size_bits);
++
++ fns = data->outer_cache;
++ fns.write_sec = outer_cache.write_sec;
++ if (data->fixup)
++ data->fixup(l2x0_base, cache_id, &fns);
++
++ /*
++ * Check if l2x0 controller is already enabled. If we are booting
++ * in non-secure mode accessing the below registers will fault.
+ */
+- way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
+- way_size = 1 << (way_size + way_size_shift);
++ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
++ data->enable(l2x0_base, aux, data->num_lock);
+
+- l2x0_size = ways * way_size * SZ_1K;
++ outer_cache = fns;
+
+ /*
+- * Check if l2x0 controller is already enabled.
+- * If you are booting from non-secure mode
+- * accessing the below registers will fault.
++ * It is strange to save the register state before initialisation,
++ * but hey, this is what the DT implementations decided to do.
+ */
+- if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+- /* Make sure that I&D is not locked down when starting */
+- l2x0_unlock(cache_id);
++ if (data->save)
++ data->save(l2x0_base);
++
++ /* Re-read it in case some bits are reserved. */
++ aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
++
++ pr_info("%s cache controller enabled, %d ways, %d kB\n",
++ data->type, ways, l2x0_size >> 10);
++ pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
++ data->type, cache_id, aux);
++}
+
+- /* l2x0 controller is disabled */
+- writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
++void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
++{
++ const struct l2c_init_data *data;
++ u32 cache_id;
+
+- l2x0_inv_all();
++ l2x0_base = base;
+
+- /* enable L2X0 */
+- writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
++ cache_id = readl_relaxed(base + L2X0_CACHE_ID);
++
++ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
++ default:
++ case L2X0_CACHE_ID_PART_L210:
++ data = &l2c210_data;
++ break;
++
++ case L2X0_CACHE_ID_PART_L220:
++ data = &l2c220_data;
++ break;
++
++ case L2X0_CACHE_ID_PART_L310:
++ data = &l2c310_init_fns;
++ break;
+ }
+
+- /* Re-read it in case some bits are reserved. */
+- aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
++ __l2c_init(data, aux_val, aux_mask, cache_id);
++}
++
++#ifdef CONFIG_OF
++static int l2_wt_override;
++
++/* Aurora don't have the cache ID register available, so we have to
++ * pass it though the device tree */
++static u32 cache_id_part_number_from_dt;
++
++static void __init l2x0_of_parse(const struct device_node *np,
++ u32 *aux_val, u32 *aux_mask)
++{
++ u32 data[2] = { 0, 0 };
++ u32 tag = 0;
++ u32 dirty = 0;
++ u32 val = 0, mask = 0;
++
++ of_property_read_u32(np, "arm,tag-latency", &tag);
++ if (tag) {
++ mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
++ val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
++ }
++
++ of_property_read_u32_array(np, "arm,data-latency",
++ data, ARRAY_SIZE(data));
++ if (data[0] && data[1]) {
++ mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
++ L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
++ val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
++ ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
++ }
++
++ of_property_read_u32(np, "arm,dirty-latency", &dirty);
++ if (dirty) {
++ mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
++ val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
++ }
+
+- /* Save the value for resuming. */
+- l2x0_saved_regs.aux_ctrl = aux;
++ *aux_val &= ~mask;
++ *aux_val |= val;
++ *aux_mask &= ~mask;
++}
++
++static const struct l2c_init_data of_l2c210_data __initconst = {
++ .type = "L2C-210",
++ .way_size_0 = SZ_8K,
++ .num_lock = 1,
++ .of_parse = l2x0_of_parse,
++ .enable = l2c_enable,
++ .save = l2c_save,
++ .outer_cache = {
++ .inv_range = l2c210_inv_range,
++ .clean_range = l2c210_clean_range,
++ .flush_range = l2c210_flush_range,
++ .flush_all = l2c210_flush_all,
++ .disable = l2c_disable,
++ .sync = l2c210_sync,
++ .resume = l2c210_resume,
++ },
++};
++
++static const struct l2c_init_data of_l2c220_data __initconst = {
++ .type = "L2C-220",
++ .way_size_0 = SZ_8K,
++ .num_lock = 1,
++ .of_parse = l2x0_of_parse,
++ .enable = l2c220_enable,
++ .save = l2c_save,
++ .outer_cache = {
++ .inv_range = l2c220_inv_range,
++ .clean_range = l2c220_clean_range,
++ .flush_range = l2c220_flush_range,
++ .flush_all = l2c220_flush_all,
++ .disable = l2c_disable,
++ .sync = l2c220_sync,
++ .resume = l2c210_resume,
++ },
++};
++
++static void __init l2c310_of_parse(const struct device_node *np,
++ u32 *aux_val, u32 *aux_mask)
++{
++ u32 data[3] = { 0, 0, 0 };
++ u32 tag[3] = { 0, 0, 0 };
++ u32 filter[2] = { 0, 0 };
++
++ of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
++ if (tag[0] && tag[1] && tag[2])
++ writel_relaxed(
++ L310_LATENCY_CTRL_RD(tag[0] - 1) |
++ L310_LATENCY_CTRL_WR(tag[1] - 1) |
++ L310_LATENCY_CTRL_SETUP(tag[2] - 1),
++ l2x0_base + L310_TAG_LATENCY_CTRL);
++
++ of_property_read_u32_array(np, "arm,data-latency",
++ data, ARRAY_SIZE(data));
++ if (data[0] && data[1] && data[2])
++ writel_relaxed(
++ L310_LATENCY_CTRL_RD(data[0] - 1) |
++ L310_LATENCY_CTRL_WR(data[1] - 1) |
++ L310_LATENCY_CTRL_SETUP(data[2] - 1),
++ l2x0_base + L310_DATA_LATENCY_CTRL);
+
+- if (!of_init) {
+- outer_cache.inv_range = l2x0_inv_range;
+- outer_cache.clean_range = l2x0_clean_range;
+- outer_cache.flush_range = l2x0_flush_range;
+- outer_cache.sync = l2x0_cache_sync;
+- outer_cache.flush_all = l2x0_flush_all;
+- outer_cache.inv_all = l2x0_inv_all;
+- outer_cache.disable = l2x0_disable;
+- }
+-
+- pr_info("%s cache controller enabled\n", type);
+- pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
+- ways, cache_id, aux, l2x0_size >> 10);
++ of_property_read_u32_array(np, "arm,filter-ranges",
++ filter, ARRAY_SIZE(filter));
++ if (filter[1]) {
++ writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
++ l2x0_base + L310_ADDR_FILTER_END);
++ writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
++ l2x0_base + L310_ADDR_FILTER_START);
++ }
+ }
+
+-#ifdef CONFIG_OF
+-static int l2_wt_override;
++static const struct l2c_init_data of_l2c310_data __initconst = {
++ .type = "L2C-310",
++ .way_size_0 = SZ_8K,
++ .num_lock = 8,
++ .of_parse = l2c310_of_parse,
++ .enable = l2c310_enable,
++ .fixup = l2c310_fixup,
++ .save = l2c310_save,
++ .outer_cache = {
++ .inv_range = l2c210_inv_range,
++ .clean_range = l2c210_clean_range,
++ .flush_range = l2c210_flush_range,
++ .flush_all = l2c210_flush_all,
++ .disable = l2c310_disable,
++ .sync = l2c210_sync,
++ .resume = l2c310_resume,
++ },
++};
+
+ /*
+ * Note that the end addresses passed to Linux primitives are
+@@ -524,6 +1166,100 @@
+ }
+ }
+
++static void aurora_save(void __iomem *base)
++{
++ l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
++ l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
++}
++
++static void aurora_resume(void)
++{
++ void __iomem *base = l2x0_base;
++
++ if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
++ writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
++ writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
++ }
++}
++
++/*
++ * For Aurora cache in no outer mode, enable via the CP15 coprocessor
++ * broadcasting of cache commands to L2.
++ */
++static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
++ unsigned num_lock)
++{
++ u32 u;
++
++ asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
++ u |= AURORA_CTRL_FW; /* Set the FW bit */
++ asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
++
++ isb();
++
++ l2c_enable(base, aux, num_lock);
++}
++
++static void __init aurora_fixup(void __iomem *base, u32 cache_id,
++ struct outer_cache_fns *fns)
++{
++ sync_reg_offset = AURORA_SYNC_REG;
++}
++
++static void __init aurora_of_parse(const struct device_node *np,
++ u32 *aux_val, u32 *aux_mask)
++{
++ u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
++ u32 mask = AURORA_ACR_REPLACEMENT_MASK;
++
++ of_property_read_u32(np, "cache-id-part",
++ &cache_id_part_number_from_dt);
++
++ /* Determine and save the write policy */
++ l2_wt_override = of_property_read_bool(np, "wt-override");
++
++ if (l2_wt_override) {
++ val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
++ mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
++ }
++
++ *aux_val &= ~mask;
++ *aux_val |= val;
++ *aux_mask &= ~mask;
++}
++
++static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
++ .type = "Aurora",
++ .way_size_0 = SZ_4K,
++ .num_lock = 4,
++ .of_parse = aurora_of_parse,
++ .enable = l2c_enable,
++ .fixup = aurora_fixup,
++ .save = aurora_save,
++ .outer_cache = {
++ .inv_range = aurora_inv_range,
++ .clean_range = aurora_clean_range,
++ .flush_range = aurora_flush_range,
++ .flush_all = l2x0_flush_all,
++ .disable = l2x0_disable,
++ .sync = l2x0_cache_sync,
++ .resume = aurora_resume,
++ },
++};
++
++static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
++ .type = "Aurora",
++ .way_size_0 = SZ_4K,
++ .num_lock = 4,
++ .of_parse = aurora_of_parse,
++ .enable = aurora_enable_no_outer,
++ .fixup = aurora_fixup,
++ .save = aurora_save,
++ .outer_cache = {
++ .resume = aurora_resume,
++ },
++};
++
+ /*
+ * For certain Broadcom SoCs, depending on the address range, different offsets
+ * need to be added to the address before passing it to L2 for
+@@ -588,16 +1324,16 @@
+
+ /* normal case, no cross section between start and end */
+ if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
+- l2x0_inv_range(new_start, new_end);
++ l2c210_inv_range(new_start, new_end);
+ return;
+ }
+
+ /* They cross sections, so it can only be a cross from section
+ * 2 to section 3
+ */
+- l2x0_inv_range(new_start,
++ l2c210_inv_range(new_start,
+ bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
+- l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
++ l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
+ new_end);
+ }
+
+@@ -610,26 +1346,21 @@
+ if (unlikely(end <= start))
+ return;
+
+- if ((end - start) >= l2x0_size) {
+- l2x0_clean_all();
+- return;
+- }
+-
+ new_start = bcm_l2_phys_addr(start);
+ new_end = bcm_l2_phys_addr(end);
+
+ /* normal case, no cross section between start and end */
+ if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
+- l2x0_clean_range(new_start, new_end);
++ l2c210_clean_range(new_start, new_end);
+ return;
+ }
+
+ /* They cross sections, so it can only be a cross from section
+ * 2 to section 3
+ */
+- l2x0_clean_range(new_start,
++ l2c210_clean_range(new_start,
+ bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
+- l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
++ l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
+ new_end);
+ }
+
+@@ -643,7 +1374,7 @@
+ return;
+
+ if ((end - start) >= l2x0_size) {
+- l2x0_flush_all();
++ outer_cache.flush_all();
+ return;
+ }
+
+@@ -652,283 +1383,67 @@
+
+ /* normal case, no cross section between start and end */
+ if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
+- l2x0_flush_range(new_start, new_end);
++ l2c210_flush_range(new_start, new_end);
+ return;
+ }
+
+ /* They cross sections, so it can only be a cross from section
+ * 2 to section 3
+ */
+- l2x0_flush_range(new_start,
++ l2c210_flush_range(new_start,
+ bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
+- l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
++ l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
+ new_end);
+ }
+
+-static void __init l2x0_of_setup(const struct device_node *np,
+- u32 *aux_val, u32 *aux_mask)
+-{
+- u32 data[2] = { 0, 0 };
+- u32 tag = 0;
+- u32 dirty = 0;
+- u32 val = 0, mask = 0;
+-
+- of_property_read_u32(np, "arm,tag-latency", &tag);
+- if (tag) {
+- mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
+- val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
+- }
+-
+- of_property_read_u32_array(np, "arm,data-latency",
+- data, ARRAY_SIZE(data));
+- if (data[0] && data[1]) {
+- mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
+- L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
+- val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
+- ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
+- }
+-
+- of_property_read_u32(np, "arm,dirty-latency", &dirty);
+- if (dirty) {
+- mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
+- val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
+- }
+-
+- *aux_val &= ~mask;
+- *aux_val |= val;
+- *aux_mask &= ~mask;
+-}
+-
+-static void __init pl310_of_setup(const struct device_node *np,
+- u32 *aux_val, u32 *aux_mask)
+-{
+- u32 data[3] = { 0, 0, 0 };
+- u32 tag[3] = { 0, 0, 0 };
+- u32 filter[2] = { 0, 0 };
+-
+- of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
+- if (tag[0] && tag[1] && tag[2])
+- writel_relaxed(
+- ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
+- ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
+- ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
+- l2x0_base + L2X0_TAG_LATENCY_CTRL);
+-
+- of_property_read_u32_array(np, "arm,data-latency",
+- data, ARRAY_SIZE(data));
+- if (data[0] && data[1] && data[2])
+- writel_relaxed(
+- ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
+- ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
+- ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
+- l2x0_base + L2X0_DATA_LATENCY_CTRL);
+-
+- of_property_read_u32_array(np, "arm,filter-ranges",
+- filter, ARRAY_SIZE(filter));
+- if (filter[1]) {
+- writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
+- l2x0_base + L2X0_ADDR_FILTER_END);
+- writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
+- l2x0_base + L2X0_ADDR_FILTER_START);
+- }
+-}
+-
+-static void __init pl310_save(void)
+-{
+- u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
+- L2X0_CACHE_ID_RTL_MASK;
+-
+- l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
+- L2X0_TAG_LATENCY_CTRL);
+- l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
+- L2X0_DATA_LATENCY_CTRL);
+- l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
+- L2X0_ADDR_FILTER_END);
+- l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
+- L2X0_ADDR_FILTER_START);
+-
+- if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
+- /*
+- * From r2p0, there is Prefetch offset/control register
+- */
+- l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
+- L2X0_PREFETCH_CTRL);
+- /*
+- * From r3p0, there is Power control register
+- */
+- if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
+- l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
+- L2X0_POWER_CTRL);
+- }
+-}
++/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
++static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
++ .type = "BCM-L2C-310",
++ .way_size_0 = SZ_8K,
++ .num_lock = 8,
++ .of_parse = l2c310_of_parse,
++ .enable = l2c310_enable,
++ .save = l2c310_save,
++ .outer_cache = {
++ .inv_range = bcm_inv_range,
++ .clean_range = bcm_clean_range,
++ .flush_range = bcm_flush_range,
++ .flush_all = l2c210_flush_all,
++ .disable = l2c310_disable,
++ .sync = l2c210_sync,
++ .resume = l2c310_resume,
++ },
++};
+
+-static void aurora_save(void)
++static void __init tauros3_save(void __iomem *base)
+ {
+- l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
+- l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
+-}
++ l2c_save(base);
+
+-static void __init tauros3_save(void)
+-{
+ l2x0_saved_regs.aux2_ctrl =
+- readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL);
++ readl_relaxed(base + TAUROS3_AUX2_CTRL);
+ l2x0_saved_regs.prefetch_ctrl =
+- readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
+-}
+-
+-static void l2x0_resume(void)
+-{
+- if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+- /* restore aux ctrl and enable l2 */
+- l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
+-
+- writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
+- L2X0_AUX_CTRL);
+-
+- l2x0_inv_all();
+-
+- writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
+- }
+-}
+-
+-static void pl310_resume(void)
+-{
+- u32 l2x0_revision;
+-
+- if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+- /* restore pl310 setup */
+- writel_relaxed(l2x0_saved_regs.tag_latency,
+- l2x0_base + L2X0_TAG_LATENCY_CTRL);
+- writel_relaxed(l2x0_saved_regs.data_latency,
+- l2x0_base + L2X0_DATA_LATENCY_CTRL);
+- writel_relaxed(l2x0_saved_regs.filter_end,
+- l2x0_base + L2X0_ADDR_FILTER_END);
+- writel_relaxed(l2x0_saved_regs.filter_start,
+- l2x0_base + L2X0_ADDR_FILTER_START);
+-
+- l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
+- L2X0_CACHE_ID_RTL_MASK;
+-
+- if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
+- writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
+- l2x0_base + L2X0_PREFETCH_CTRL);
+- if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
+- writel_relaxed(l2x0_saved_regs.pwr_ctrl,
+- l2x0_base + L2X0_POWER_CTRL);
+- }
+- }
+-
+- l2x0_resume();
+-}
+-
+-static void aurora_resume(void)
+-{
+- if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+- writel_relaxed(l2x0_saved_regs.aux_ctrl,
+- l2x0_base + L2X0_AUX_CTRL);
+- writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
+- }
++ readl_relaxed(base + L310_PREFETCH_CTRL);
+ }
+
+ static void tauros3_resume(void)
+ {
+- if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
++ void __iomem *base = l2x0_base;
++
++ if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+ writel_relaxed(l2x0_saved_regs.aux2_ctrl,
+- l2x0_base + TAUROS3_AUX2_CTRL);
++ base + TAUROS3_AUX2_CTRL);
+ writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
+- l2x0_base + L2X0_PREFETCH_CTRL);
+- }
+-
+- l2x0_resume();
+-}
+-
+-static void __init aurora_broadcast_l2_commands(void)
+-{
+- __u32 u;
+- /* Enable Broadcasting of cache commands to L2*/
+- __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
+- u |= AURORA_CTRL_FW; /* Set the FW bit */
+- __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
+- isb();
+-}
+-
+-static void __init aurora_of_setup(const struct device_node *np,
+- u32 *aux_val, u32 *aux_mask)
+-{
+- u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
+- u32 mask = AURORA_ACR_REPLACEMENT_MASK;
++ base + L310_PREFETCH_CTRL);
+
+- of_property_read_u32(np, "cache-id-part",
+- &cache_id_part_number_from_dt);
+-
+- /* Determine and save the write policy */
+- l2_wt_override = of_property_read_bool(np, "wt-override");
+-
+- if (l2_wt_override) {
+- val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
+- mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
++ l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
+ }
+-
+- *aux_val &= ~mask;
+- *aux_val |= val;
+- *aux_mask &= ~mask;
+ }
+
+-static const struct l2x0_of_data pl310_data = {
+- .setup = pl310_of_setup,
+- .save = pl310_save,
+- .outer_cache = {
+- .resume = pl310_resume,
+- .inv_range = l2x0_inv_range,
+- .clean_range = l2x0_clean_range,
+- .flush_range = l2x0_flush_range,
+- .sync = l2x0_cache_sync,
+- .flush_all = l2x0_flush_all,
+- .inv_all = l2x0_inv_all,
+- .disable = l2x0_disable,
+- },
+-};
+-
+-static const struct l2x0_of_data l2x0_data = {
+- .setup = l2x0_of_setup,
+- .save = NULL,
+- .outer_cache = {
+- .resume = l2x0_resume,
+- .inv_range = l2x0_inv_range,
+- .clean_range = l2x0_clean_range,
+- .flush_range = l2x0_flush_range,
+- .sync = l2x0_cache_sync,
+- .flush_all = l2x0_flush_all,
+- .inv_all = l2x0_inv_all,
+- .disable = l2x0_disable,
+- },
+-};
+-
+-static const struct l2x0_of_data aurora_with_outer_data = {
+- .setup = aurora_of_setup,
+- .save = aurora_save,
+- .outer_cache = {
+- .resume = aurora_resume,
+- .inv_range = aurora_inv_range,
+- .clean_range = aurora_clean_range,
+- .flush_range = aurora_flush_range,
+- .sync = l2x0_cache_sync,
+- .flush_all = l2x0_flush_all,
+- .inv_all = l2x0_inv_all,
+- .disable = l2x0_disable,
+- },
+-};
+-
+-static const struct l2x0_of_data aurora_no_outer_data = {
+- .setup = aurora_of_setup,
+- .save = aurora_save,
+- .outer_cache = {
+- .resume = aurora_resume,
+- },
+-};
+-
+-static const struct l2x0_of_data tauros3_data = {
+- .setup = NULL,
++static const struct l2c_init_data of_tauros3_data __initconst = {
++ .type = "Tauros3",
++ .way_size_0 = SZ_8K,
++ .num_lock = 8,
++ .enable = l2c_enable,
+ .save = tauros3_save,
+ /* Tauros3 broadcasts L1 cache operations to L2 */
+ .outer_cache = {
+@@ -936,43 +1451,26 @@
+ },
+ };
+
+-static const struct l2x0_of_data bcm_l2x0_data = {
+- .setup = pl310_of_setup,
+- .save = pl310_save,
+- .outer_cache = {
+- .resume = pl310_resume,
+- .inv_range = bcm_inv_range,
+- .clean_range = bcm_clean_range,
+- .flush_range = bcm_flush_range,
+- .sync = l2x0_cache_sync,
+- .flush_all = l2x0_flush_all,
+- .inv_all = l2x0_inv_all,
+- .disable = l2x0_disable,
+- },
+-};
+-
++#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
+ static const struct of_device_id l2x0_ids[] __initconst = {
+- { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
+- { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
+- { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
+- { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
+- .data = (void *)&bcm_l2x0_data},
+- { .compatible = "brcm,bcm11351-a2-pl310-cache",
+- .data = (void *)&bcm_l2x0_data},
+- { .compatible = "marvell,aurora-outer-cache",
+- .data = (void *)&aurora_with_outer_data},
+- { .compatible = "marvell,aurora-system-cache",
+- .data = (void *)&aurora_no_outer_data},
+- { .compatible = "marvell,tauros3-cache",
+- .data = (void *)&tauros3_data },
++ L2C_ID("arm,l210-cache", of_l2c210_data),
++ L2C_ID("arm,l220-cache", of_l2c220_data),
++ L2C_ID("arm,pl310-cache", of_l2c310_data),
++ L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
++ L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
++ L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
++ L2C_ID("marvell,tauros3-cache", of_tauros3_data),
++ /* Deprecated IDs */
++ L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
+ {}
+ };
+
+ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
+ {
++ const struct l2c_init_data *data;
+ struct device_node *np;
+- const struct l2x0_of_data *data;
+ struct resource res;
++ u32 cache_id, old_aux;
+
+ np = of_find_matching_node(NULL, l2x0_ids);
+ if (!np)
+@@ -989,23 +1487,29 @@
+
+ data = of_match_node(l2x0_ids, np)->data;
+
+- /* L2 configuration can only be changed if the cache is disabled */
+- if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+- if (data->setup)
+- data->setup(np, &aux_val, &aux_mask);
+-
+- /* For aurora cache in no outer mode select the
+- * correct mode using the coprocessor*/
+- if (data == &aurora_no_outer_data)
+- aurora_broadcast_l2_commands();
++ old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
++ if (old_aux != ((old_aux & aux_mask) | aux_val)) {
++ pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
++ old_aux, (old_aux & aux_mask) | aux_val);
++ } else if (aux_mask != ~0U && aux_val != 0) {
++ pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
+ }
+
+- if (data->save)
+- data->save();
++ /* All L2 caches are unified, so this property should be specified */
++ if (!of_property_read_bool(np, "cache-unified"))
++ pr_err("L2C: device tree omits to specify unified cache\n");
++
++ /* L2 configuration can only be changed if the cache is disabled */
++ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
++ if (data->of_parse)
++ data->of_parse(np, &aux_val, &aux_mask);
++
++ if (cache_id_part_number_from_dt)
++ cache_id = cache_id_part_number_from_dt;
++ else
++ cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+
+- of_init = true;
+- memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
+- l2x0_init(l2x0_base, aux_val, aux_mask);
++ __l2c_init(data, aux_val, aux_mask, cache_id);
+
+ return 0;
+ }
+diff -Nur linux-3.14.40.orig/arch/arm/mm/dma-mapping.c linux-3.14.40/arch/arm/mm/dma-mapping.c
+--- linux-3.14.40.orig/arch/arm/mm/dma-mapping.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mm/dma-mapping.c 2015-05-01 14:57:58.051427001 -0500
+@@ -26,6 +26,7 @@
+ #include <linux/io.h>
+ #include <linux/vmalloc.h>
+ #include <linux/sizes.h>
++#include <linux/cma.h>
+
+ #include <asm/memory.h>
+ #include <asm/highmem.h>
+diff -Nur linux-3.14.40.orig/arch/arm/mm/fault.c linux-3.14.40/arch/arm/mm/fault.c
+--- linux-3.14.40.orig/arch/arm/mm/fault.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mm/fault.c 2015-05-01 14:57:58.087427001 -0500
+@@ -449,8 +449,16 @@
+
+ if (pud_none(*pud_k))
+ goto bad_area;
+- if (!pud_present(*pud))
++ if (!pud_present(*pud)) {
+ set_pud(pud, *pud_k);
++ /*
++ * There is a small window during free_pgtables() where the
++ * user *pud entry is 0 but the TLB has not been invalidated
++ * and we get a level 2 (pmd) translation fault caused by the
++ * intermediate TLB caching of the old level 1 (pud) entry.
++ */
++ flush_tlb_kernel_page(addr);
++ }
+
+ pmd = pmd_offset(pud, addr);
+ pmd_k = pmd_offset(pud_k, addr);
+@@ -473,8 +481,9 @@
+ #endif
+ if (pmd_none(pmd_k[index]))
+ goto bad_area;
++ if (!pmd_present(pmd[index]))
++ copy_pmd(pmd, pmd_k);
+
+- copy_pmd(pmd, pmd_k);
+ return 0;
+
+ bad_area:
+diff -Nur linux-3.14.40.orig/arch/arm/mm/init.c linux-3.14.40/arch/arm/mm/init.c
+--- linux-3.14.40.orig/arch/arm/mm/init.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mm/init.c 2015-05-01 14:57:58.091427001 -0500
+@@ -327,7 +327,7 @@
+ * reserve memory for DMA contigouos allocations,
+ * must come from DMA area inside low memory
+ */
+- dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
++ dma_contiguous_reserve(arm_dma_limit);
+
+ arm_memblock_steal_permitted = false;
+ memblock_dump_all();
+diff -Nur linux-3.14.40.orig/arch/arm/mm/Kconfig linux-3.14.40/arch/arm/mm/Kconfig
+--- linux-3.14.40.orig/arch/arm/mm/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mm/Kconfig 2015-05-01 14:57:58.095427001 -0500
+@@ -898,6 +898,57 @@
+ This option enables optimisations for the PL310 cache
+ controller.
+
++config PL310_ERRATA_588369
++ bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
++ depends on CACHE_L2X0
++ help
++ The PL310 L2 cache controller implements three types of Clean &
++ Invalidate maintenance operations: by Physical Address
++ (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
++ They are architecturally defined to behave as the execution of a
++ clean operation followed immediately by an invalidate operation,
++ both performing to the same memory location. This functionality
++ is not correctly implemented in PL310 as clean lines are not
++ invalidated as a result of these operations.
++
++config PL310_ERRATA_727915
++ bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
++ depends on CACHE_L2X0
++ help
++ PL310 implements the Clean & Invalidate by Way L2 cache maintenance
++ operation (offset 0x7FC). This operation runs in background so that
++ PL310 can handle normal accesses while it is in progress. Under very
++ rare circumstances, due to this erratum, write data can be lost when
++ PL310 treats a cacheable write transaction during a Clean &
++ Invalidate by Way operation.
++
++config PL310_ERRATA_753970
++ bool "PL310 errata: cache sync operation may be faulty"
++ depends on CACHE_PL310
++ help
++ This option enables the workaround for the 753970 PL310 (r3p0) erratum.
++
++ Under some condition the effect of cache sync operation on
++ the store buffer still remains when the operation completes.
++ This means that the store buffer is always asked to drain and
++ this prevents it from merging any further writes. The workaround
++ is to replace the normal offset of cache sync operation (0x730)
++ by another offset targeting an unmapped PL310 register 0x740.
++ This has the same effect as the cache sync operation: store buffer
++ drain and waiting for all buffers empty.
++
++config PL310_ERRATA_769419
++ bool "PL310 errata: no automatic Store Buffer drain"
++ depends on CACHE_L2X0
++ help
++ On revisions of the PL310 prior to r3p2, the Store Buffer does
++ not automatically drain. This can cause normal, non-cacheable
++ writes to be retained when the memory system is idle, leading
++ to suboptimal I/O performance for drivers using coherent DMA.
++ This option adds a write barrier to the cpu_idle loop so that,
++ on systems with an outer cache, the store buffer is drained
++ explicitly.
++
+ config CACHE_TAUROS2
+ bool "Enable the Tauros2 L2 cache controller"
+ depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4)
+diff -Nur linux-3.14.40.orig/arch/arm/mm/l2c-common.c linux-3.14.40/arch/arm/mm/l2c-common.c
+--- linux-3.14.40.orig/arch/arm/mm/l2c-common.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mm/l2c-common.c 2015-05-01 14:57:58.095427001 -0500
+@@ -0,0 +1,20 @@
++/*
++ * Copyright (C) 2010 ARM Ltd.
++ * Written by Catalin Marinas <catalin.marinas@arm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#include <linux/bug.h>
++#include <linux/smp.h>
++#include <asm/outercache.h>
++
++void outer_disable(void)
++{
++ WARN_ON(!irqs_disabled());
++ WARN_ON(num_online_cpus() > 1);
++
++ if (outer_cache.disable)
++ outer_cache.disable();
++}
+diff -Nur linux-3.14.40.orig/arch/arm/mm/l2c-l2x0-resume.S linux-3.14.40/arch/arm/mm/l2c-l2x0-resume.S
+--- linux-3.14.40.orig/arch/arm/mm/l2c-l2x0-resume.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm/mm/l2c-l2x0-resume.S 2015-05-01 14:57:58.095427001 -0500
+@@ -0,0 +1,58 @@
++/*
++ * L2C-310 early resume code. This can be used by platforms to restore
++ * the settings of their L2 cache controller before restoring the
++ * processor state.
++ *
++ * This code can only be used to if you are running in the secure world.
++ */
++#include <linux/linkage.h>
++#include <asm/hardware/cache-l2x0.h>
++
++ .text
++
++ENTRY(l2c310_early_resume)
++ adr r0, 1f
++ ldr r2, [r0]
++ add r0, r2, r0
++
++ ldmia r0, {r1, r2, r3, r4, r5, r6, r7, r8}
++ @ r1 = phys address of L2C-310 controller
++ @ r2 = aux_ctrl
++ @ r3 = tag_latency
++ @ r4 = data_latency
++ @ r5 = filter_start
++ @ r6 = filter_end
++ @ r7 = prefetch_ctrl
++ @ r8 = pwr_ctrl
++
++ @ Check that the address has been initialised
++ teq r1, #0
++ moveq pc, lr
++
++ @ The prefetch and power control registers are revision dependent
++ @ and can be written whether or not the L2 cache is enabled
++ ldr r0, [r1, #L2X0_CACHE_ID]
++ and r0, r0, #L2X0_CACHE_ID_RTL_MASK
++ cmp r0, #L310_CACHE_ID_RTL_R2P0
++ strcs r7, [r1, #L310_PREFETCH_CTRL]
++ cmp r0, #L310_CACHE_ID_RTL_R3P0
++ strcs r8, [r1, #L310_POWER_CTRL]
++
++ @ Don't setup the L2 cache if it is already enabled
++ ldr r0, [r1, #L2X0_CTRL]
++ tst r0, #L2X0_CTRL_EN
++ movne pc, lr
++
++ str r3, [r1, #L310_TAG_LATENCY_CTRL]
++ str r4, [r1, #L310_DATA_LATENCY_CTRL]
++ str r6, [r1, #L310_ADDR_FILTER_END]
++ str r5, [r1, #L310_ADDR_FILTER_START]
++
++ str r2, [r1, #L2X0_AUX_CTRL]
++ mov r9, #L2X0_CTRL_EN
++ str r9, [r1, #L2X0_CTRL]
++ mov pc, lr
++ENDPROC(l2c310_early_resume)
++
++ .align
++1: .long l2x0_saved_regs - .
+diff -Nur linux-3.14.40.orig/arch/arm/mm/Makefile linux-3.14.40/arch/arm/mm/Makefile
+--- linux-3.14.40.orig/arch/arm/mm/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mm/Makefile 2015-05-01 14:57:58.111427001 -0500
+@@ -95,7 +95,8 @@
+ AFLAGS_proc-v6.o :=-Wa,-march=armv6
+ AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
+
++obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
+ obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
+-obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
++obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o
+ obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
+ obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o
+diff -Nur linux-3.14.40.orig/arch/arm/mm/proc-v7.S linux-3.14.40/arch/arm/mm/proc-v7.S
+--- linux-3.14.40.orig/arch/arm/mm/proc-v7.S 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm/mm/proc-v7.S 2015-05-01 14:57:58.119427001 -0500
+@@ -334,6 +334,17 @@
+ mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
+ 1:
+ #endif
++#ifdef CONFIG_ARM_ERRATA_794072
++ mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register
++ orr r10, r10, #1 << 4 @ set bit #4
++ mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register
++#endif
++#ifdef CONFIG_ARM_ERRATA_761320
++ cmp r6, #0x40 @ present prior to r4p0
++ mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
++ orrlt r10, r10, #1 << 21 @ set bit #21
++ mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
++#endif
+
+ /* Cortex-A15 Errata */
+ 3: ldr r10, =0x00000c0f @ Cortex-A15 primary part number
+diff -Nur linux-3.14.40.orig/arch/arm64/boot/dts/apm-mustang.dts linux-3.14.40/arch/arm64/boot/dts/apm-mustang.dts
+--- linux-3.14.40.orig/arch/arm64/boot/dts/apm-mustang.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/boot/dts/apm-mustang.dts 2015-05-01 14:57:58.127427001 -0500
+@@ -24,3 +24,7 @@
+ reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
+ };
+ };
++
++&serial0 {
++ status = "ok";
++};
+diff -Nur linux-3.14.40.orig/arch/arm64/boot/dts/apm-storm.dtsi linux-3.14.40/arch/arm64/boot/dts/apm-storm.dtsi
+--- linux-3.14.40.orig/arch/arm64/boot/dts/apm-storm.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/boot/dts/apm-storm.dtsi 2015-05-01 14:57:58.127427001 -0500
+@@ -176,16 +176,226 @@
+ reg-names = "csr-reg";
+ clock-output-names = "eth8clk";
+ };
++
++ sataphy1clk: sataphy1clk@1f21c000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x1f21c000 0x0 0x1000>;
++ reg-names = "csr-reg";
++ clock-output-names = "sataphy1clk";
++ status = "disabled";
++ csr-offset = <0x4>;
++ csr-mask = <0x00>;
++ enable-offset = <0x0>;
++ enable-mask = <0x06>;
++ };
++
++ sataphy2clk: sataphy1clk@1f22c000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x1f22c000 0x0 0x1000>;
++ reg-names = "csr-reg";
++ clock-output-names = "sataphy2clk";
++ status = "ok";
++ csr-offset = <0x4>;
++ csr-mask = <0x3a>;
++ enable-offset = <0x0>;
++ enable-mask = <0x06>;
++ };
++
++ sataphy3clk: sataphy1clk@1f23c000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x1f23c000 0x0 0x1000>;
++ reg-names = "csr-reg";
++ clock-output-names = "sataphy3clk";
++ status = "ok";
++ csr-offset = <0x4>;
++ csr-mask = <0x3a>;
++ enable-offset = <0x0>;
++ enable-mask = <0x06>;
++ };
++
++ sata01clk: sata01clk@1f21c000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x1f21c000 0x0 0x1000>;
++ reg-names = "csr-reg";
++ clock-output-names = "sata01clk";
++ csr-offset = <0x4>;
++ csr-mask = <0x05>;
++ enable-offset = <0x0>;
++ enable-mask = <0x39>;
++ };
++
++ sata23clk: sata23clk@1f22c000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x1f22c000 0x0 0x1000>;
++ reg-names = "csr-reg";
++ clock-output-names = "sata23clk";
++ csr-offset = <0x4>;
++ csr-mask = <0x05>;
++ enable-offset = <0x0>;
++ enable-mask = <0x39>;
++ };
++
++ sata45clk: sata45clk@1f23c000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x1f23c000 0x0 0x1000>;
++ reg-names = "csr-reg";
++ clock-output-names = "sata45clk";
++ csr-offset = <0x4>;
++ csr-mask = <0x05>;
++ enable-offset = <0x0>;
++ enable-mask = <0x39>;
++ };
++
++ rtcclk: rtcclk@17000000 {
++ compatible = "apm,xgene-device-clock";
++ #clock-cells = <1>;
++ clocks = <&socplldiv2 0>;
++ reg = <0x0 0x17000000 0x0 0x2000>;
++ reg-names = "csr-reg";
++ csr-offset = <0xc>;
++ csr-mask = <0x2>;
++ enable-offset = <0x10>;
++ enable-mask = <0x2>;
++ clock-output-names = "rtcclk";
++ };
+ };
+
+ serial0: serial@1c020000 {
++ status = "disabled";
+ device_type = "serial";
+- compatible = "ns16550";
++ compatible = "ns16550a";
+ reg = <0 0x1c020000 0x0 0x1000>;
+ reg-shift = <2>;
+ clock-frequency = <10000000>; /* Updated by bootloader */
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x4c 0x4>;
+ };
++
++ serial1: serial@1c021000 {
++ status = "disabled";
++ device_type = "serial";
++ compatible = "ns16550a";
++ reg = <0 0x1c021000 0x0 0x1000>;
++ reg-shift = <2>;
++ clock-frequency = <10000000>; /* Updated by bootloader */
++ interrupt-parent = <&gic>;
++ interrupts = <0x0 0x4d 0x4>;
++ };
++
++ serial2: serial@1c022000 {
++ status = "disabled";
++ device_type = "serial";
++ compatible = "ns16550a";
++ reg = <0 0x1c022000 0x0 0x1000>;
++ reg-shift = <2>;
++ clock-frequency = <10000000>; /* Updated by bootloader */
++ interrupt-parent = <&gic>;
++ interrupts = <0x0 0x4e 0x4>;
++ };
++
++ serial3: serial@1c023000 {
++ status = "disabled";
++ device_type = "serial";
++ compatible = "ns16550a";
++ reg = <0 0x1c023000 0x0 0x1000>;
++ reg-shift = <2>;
++ clock-frequency = <10000000>; /* Updated by bootloader */
++ interrupt-parent = <&gic>;
++ interrupts = <0x0 0x4f 0x4>;
++ };
++
++ phy1: phy@1f21a000 {
++ compatible = "apm,xgene-phy";
++ reg = <0x0 0x1f21a000 0x0 0x100>;
++ #phy-cells = <1>;
++ clocks = <&sataphy1clk 0>;
++ status = "disabled";
++ apm,tx-boost-gain = <30 30 30 30 30 30>;
++ apm,tx-eye-tuning = <2 10 10 2 10 10>;
++ };
++
++ phy2: phy@1f22a000 {
++ compatible = "apm,xgene-phy";
++ reg = <0x0 0x1f22a000 0x0 0x100>;
++ #phy-cells = <1>;
++ clocks = <&sataphy2clk 0>;
++ status = "ok";
++ apm,tx-boost-gain = <30 30 30 30 30 30>;
++ apm,tx-eye-tuning = <1 10 10 2 10 10>;
++ };
++
++ phy3: phy@1f23a000 {
++ compatible = "apm,xgene-phy";
++ reg = <0x0 0x1f23a000 0x0 0x100>;
++ #phy-cells = <1>;
++ clocks = <&sataphy3clk 0>;
++ status = "ok";
++ apm,tx-boost-gain = <31 31 31 31 31 31>;
++ apm,tx-eye-tuning = <2 10 10 2 10 10>;
++ };
++
++ sata1: sata@1a000000 {
++ compatible = "apm,xgene-ahci";
++ reg = <0x0 0x1a000000 0x0 0x1000>,
++ <0x0 0x1f210000 0x0 0x1000>,
++ <0x0 0x1f21d000 0x0 0x1000>,
++ <0x0 0x1f21e000 0x0 0x1000>,
++ <0x0 0x1f217000 0x0 0x1000>;
++ interrupts = <0x0 0x86 0x4>;
++ dma-coherent;
++ status = "disabled";
++ clocks = <&sata01clk 0>;
++ phys = <&phy1 0>;
++ phy-names = "sata-phy";
++ };
++
++ sata2: sata@1a400000 {
++ compatible = "apm,xgene-ahci";
++ reg = <0x0 0x1a400000 0x0 0x1000>,
++ <0x0 0x1f220000 0x0 0x1000>,
++ <0x0 0x1f22d000 0x0 0x1000>,
++ <0x0 0x1f22e000 0x0 0x1000>,
++ <0x0 0x1f227000 0x0 0x1000>;
++ interrupts = <0x0 0x87 0x4>;
++ dma-coherent;
++ status = "ok";
++ clocks = <&sata23clk 0>;
++ phys = <&phy2 0>;
++ phy-names = "sata-phy";
++ };
++
++ sata3: sata@1a800000 {
++ compatible = "apm,xgene-ahci";
++ reg = <0x0 0x1a800000 0x0 0x1000>,
++ <0x0 0x1f230000 0x0 0x1000>,
++ <0x0 0x1f23d000 0x0 0x1000>,
++ <0x0 0x1f23e000 0x0 0x1000>;
++ interrupts = <0x0 0x88 0x4>;
++ dma-coherent;
++ status = "ok";
++ clocks = <&sata45clk 0>;
++ phys = <&phy3 0>;
++ phy-names = "sata-phy";
++ };
++
++ rtc: rtc@10510000 {
++ compatible = "apm,xgene-rtc";
++ reg = <0x0 0x10510000 0x0 0x400>;
++ interrupts = <0x0 0x46 0x4>;
++ #clock-cells = <1>;
++ clocks = <&rtcclk 0>;
++ };
+ };
+ };
+diff -Nur linux-3.14.40.orig/arch/arm64/boot/dts/clcd-panels.dtsi linux-3.14.40/arch/arm64/boot/dts/clcd-panels.dtsi
+--- linux-3.14.40.orig/arch/arm64/boot/dts/clcd-panels.dtsi 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/boot/dts/clcd-panels.dtsi 2015-05-01 14:57:58.127427001 -0500
+@@ -0,0 +1,52 @@
++/*
++ * ARM Ltd. Versatile Express
++ *
++ */
++
++/ {
++ panels {
++ panel@0 {
++ compatible = "panel";
++ mode = "VGA";
++ refresh = <60>;
++ xres = <640>;
++ yres = <480>;
++ pixclock = <39721>;
++ left_margin = <40>;
++ right_margin = <24>;
++ upper_margin = <32>;
++ lower_margin = <11>;
++ hsync_len = <96>;
++ vsync_len = <2>;
++ sync = <0>;
++ vmode = "FB_VMODE_NONINTERLACED";
++
++ tim2 = "TIM2_BCD", "TIM2_IPC";
++ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
++ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
++ bpp = <16>;
++ };
++
++ panel@1 {
++ compatible = "panel";
++ mode = "XVGA";
++ refresh = <60>;
++ xres = <1024>;
++ yres = <768>;
++ pixclock = <15748>;
++ left_margin = <152>;
++ right_margin = <48>;
++ upper_margin = <23>;
++ lower_margin = <3>;
++ hsync_len = <104>;
++ vsync_len = <4>;
++ sync = <0>;
++ vmode = "FB_VMODE_NONINTERLACED";
++
++ tim2 = "TIM2_BCD", "TIM2_IPC";
++ cntl = "CNTL_LCDTFT", "CNTL_BGR", "CNTL_LCDVCOMP(1)";
++ caps = "CLCD_CAP_5551", "CLCD_CAP_565", "CLCD_CAP_888";
++ bpp = <16>;
++ };
++ };
++};
+diff -Nur linux-3.14.40.orig/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts linux-3.14.40/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts
+--- linux-3.14.40.orig/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/boot/dts/fvp-base-gicv2-psci.dts 2015-05-01 14:57:58.127427001 -0500
+@@ -0,0 +1,266 @@
++/*
++ * Copyright (c) 2013, ARM Limited. All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * Redistributions of source code must retain the above copyright notice, this
++ * list of conditions and the following disclaimer.
++ *
++ * Redistributions in binary form must reproduce the above copyright notice,
++ * this list of conditions and the following disclaimer in the documentation
++ * and/or other materials provided with the distribution.
++ *
++ * Neither the name of ARM nor the names of its contributors may be used
++ * to endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/dts-v1/;
++
++/memreserve/ 0x80000000 0x00010000;
++
++/ {
++};
++
++/ {
++ model = "FVP Base";
++ compatible = "arm,vfp-base", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ chosen { };
++
++ aliases {
++ serial0 = &v2m_serial0;
++ serial1 = &v2m_serial1;
++ serial2 = &v2m_serial2;
++ serial3 = &v2m_serial3;
++ };
++
++ psci {
++ compatible = "arm,psci";
++ method = "smc";
++ cpu_suspend = <0xc4000001>;
++ cpu_off = <0x84000002>;
++ cpu_on = <0xc4000003>;
++ };
++
++ cpus {
++ #address-cells = <2>;
++ #size-cells = <0>;
++
++ big0: cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57", "arm,armv8";
++ reg = <0x0 0x0>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ big1: cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57", "arm,armv8";
++ reg = <0x0 0x1>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ big2: cpu@2 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57", "arm,armv8";
++ reg = <0x0 0x2>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ big3: cpu@3 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57", "arm,armv8";
++ reg = <0x0 0x3>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ little0: cpu@100 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53", "arm,armv8";
++ reg = <0x0 0x100>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ little1: cpu@101 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53", "arm,armv8";
++ reg = <0x0 0x101>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ little2: cpu@102 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53", "arm,armv8";
++ reg = <0x0 0x102>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++ little3: cpu@103 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53", "arm,armv8";
++ reg = <0x0 0x103>;
++ enable-method = "psci";
++ clock-frequency = <1000000>;
++ };
++
++ cpu-map {
++ cluster0 {
++ core0 {
++ cpu = <&big0>;
++ };
++ core1 {
++ cpu = <&big1>;
++ };
++ core2 {
++ cpu = <&big2>;
++ };
++ core3 {
++ cpu = <&big3>;
++ };
++ };
++ cluster1 {
++ core0 {
++ cpu = <&little0>;
++ };
++ core1 {
++ cpu = <&little1>;
++ };
++ core2 {
++ cpu = <&little2>;
++ };
++ core3 {
++ cpu = <&little3>;
++ };
++ };
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0x00000000 0x80000000 0 0x80000000>,
++ <0x00000008 0x80000000 0 0x80000000>;
++ };
++
++ gic: interrupt-controller@2f000000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0x0 0x2f000000 0 0x10000>,
++ <0x0 0x2c000000 0 0x2000>,
++ <0x0 0x2c010000 0 0x2000>,
++ <0x0 0x2c02F000 0 0x2000>;
++ interrupts = <1 9 0xf04>;
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupts = <1 13 0xff01>,
++ <1 14 0xff01>,
++ <1 11 0xff01>,
++ <1 10 0xff01>;
++ clock-frequency = <100000000>;
++ };
++
++ timer@2a810000 {
++ compatible = "arm,armv7-timer-mem";
++ reg = <0x0 0x2a810000 0x0 0x10000>;
++ clock-frequency = <100000000>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++ frame@2a820000 {
++ frame-number = <0>;
++ interrupts = <0 25 4>;
++ reg = <0x0 0x2a820000 0x0 0x10000>;
++ };
++ };
++
++ pmu {
++ compatible = "arm,armv8-pmuv3";
++ interrupts = <0 60 4>,
++ <0 61 4>,
++ <0 62 4>,
++ <0 63 4>;
++ };
++
++ smb {
++ compatible = "simple-bus";
++
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 63>;
++ interrupt-map = <0 0 0 &gic 0 0 4>,
++ <0 0 1 &gic 0 1 4>,
++ <0 0 2 &gic 0 2 4>,
++ <0 0 3 &gic 0 3 4>,
++ <0 0 4 &gic 0 4 4>,
++ <0 0 5 &gic 0 5 4>,
++ <0 0 6 &gic 0 6 4>,
++ <0 0 7 &gic 0 7 4>,
++ <0 0 8 &gic 0 8 4>,
++ <0 0 9 &gic 0 9 4>,
++ <0 0 10 &gic 0 10 4>,
++ <0 0 11 &gic 0 11 4>,
++ <0 0 12 &gic 0 12 4>,
++ <0 0 13 &gic 0 13 4>,
++ <0 0 14 &gic 0 14 4>,
++ <0 0 15 &gic 0 15 4>,
++ <0 0 16 &gic 0 16 4>,
++ <0 0 17 &gic 0 17 4>,
++ <0 0 18 &gic 0 18 4>,
++ <0 0 19 &gic 0 19 4>,
++ <0 0 20 &gic 0 20 4>,
++ <0 0 21 &gic 0 21 4>,
++ <0 0 22 &gic 0 22 4>,
++ <0 0 23 &gic 0 23 4>,
++ <0 0 24 &gic 0 24 4>,
++ <0 0 25 &gic 0 25 4>,
++ <0 0 26 &gic 0 26 4>,
++ <0 0 27 &gic 0 27 4>,
++ <0 0 28 &gic 0 28 4>,
++ <0 0 29 &gic 0 29 4>,
++ <0 0 30 &gic 0 30 4>,
++ <0 0 31 &gic 0 31 4>,
++ <0 0 32 &gic 0 32 4>,
++ <0 0 33 &gic 0 33 4>,
++ <0 0 34 &gic 0 34 4>,
++ <0 0 35 &gic 0 35 4>,
++ <0 0 36 &gic 0 36 4>,
++ <0 0 37 &gic 0 37 4>,
++ <0 0 38 &gic 0 38 4>,
++ <0 0 39 &gic 0 39 4>,
++ <0 0 40 &gic 0 40 4>,
++ <0 0 41 &gic 0 41 4>,
++ <0 0 42 &gic 0 42 4>;
++
++ /include/ "rtsm_ve-motherboard.dtsi"
++ };
++};
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.40.orig/arch/arm64/boot/dts/juno.dts linux-3.14.40/arch/arm64/boot/dts/juno.dts
+--- linux-3.14.40.orig/arch/arm64/boot/dts/juno.dts 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/boot/dts/juno.dts 2015-05-01 14:57:58.127427001 -0500
+@@ -0,0 +1,498 @@
++/*
++ * ARM Ltd. Juno Plaform
++ *
++ * Fast Models FVP v2 support
++ */
++
++/dts-v1/;
++
++#include <dt-bindings/interrupt-controller/arm-gic.h>
++
++/ {
++ model = "Juno";
++ compatible = "arm,juno", "arm,vexpress";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ aliases {
++ serial0 = &soc_uart0;
++ };
++
++ cpus {
++ #address-cells = <2>;
++ #size-cells = <0>;
++
++ cpu@100 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53","arm,armv8";
++ reg = <0x0 0x100>;
++ enable-method = "psci";
++ };
++
++ cpu@101 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53","arm,armv8";
++ reg = <0x0 0x101>;
++ enable-method = "psci";
++ };
++
++ cpu@102 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53","arm,armv8";
++ reg = <0x0 0x102>;
++ enable-method = "psci";
++ };
++
++ cpu@103 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a53","arm,armv8";
++ reg = <0x0 0x103>;
++ enable-method = "psci";
++ };
++
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57","arm,armv8";
++ reg = <0x0 0x0>;
++ enable-method = "psci";
++ };
++
++ cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a57","arm,armv8";
++ reg = <0x0 0x1>;
++ enable-method = "psci";
++ };
++ };
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0x00000000 0x80000000 0x0 0x80000000>,
++ <0x00000008 0x80000000 0x1 0x80000000>;
++ };
++
++ /* memory@14000000 {
++ device_type = "memory";
++ reg = <0x00000000 0x14000000 0x0 0x02000000>;
++ }; */
++
++ gic: interrupt-controller@2c001000 {
++ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
++ #interrupt-cells = <3>;
++ #address-cells = <0>;
++ interrupt-controller;
++ reg = <0x0 0x2c010000 0 0x1000>,
++ <0x0 0x2c02f000 0 0x1000>,
++ <0x0 0x2c04f000 0 0x2000>,
++ <0x0 0x2c06f000 0 0x2000>;
++ interrupts = <GIC_PPI 9 0xf04>;
++ };
++
++ msi0: msi@2c1c0000 {
++ compatible = "arm,gic-msi";
++ reg = <0x0 0x2c1c0000 0 0x10000
++ 0x0 0x2c1d0000 0 0x10000
++ 0x0 0x2c1e0000 0 0x10000
++ 0x0 0x2c1f0000 0 0x10000>;
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupts = <GIC_PPI 13 0xff01>,
++ <GIC_PPI 14 0xff01>,
++ <GIC_PPI 11 0xff01>,
++ <GIC_PPI 10 0xff01>;
++ };
++
++ pmu {
++ compatible = "arm,armv8-pmuv3";
++ interrupts = <GIC_SPI 60 4>,
++ <GIC_SPI 61 4>,
++ <GIC_SPI 62 4>,
++ <GIC_SPI 63 4>;
++ };
++
++ psci {
++ compatible = "arm,psci";
++ method = "smc";
++ cpu_suspend = <0xC4000001>;
++ cpu_off = <0x84000002>;
++ cpu_on = <0xC4000003>;
++ migrate = <0xC4000005>;
++ };
++
++ pci0: pci@30000000 {
++ compatible = "arm,pcie-xr3";
++ device_type = "pci";
++ reg = <0 0x7ff30000 0 0x1000
++ 0 0x7ff20000 0 0x10000
++ 0 0x40000000 0 0x10000000>;
++ bus-range = <0 255>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ ranges = <0x01000000 0x0 0x00000000 0x00 0x5ff00000 0x0 0x00100000
++ 0x02000000 0x0 0x00000000 0x40 0x00000000 0x0 0x80000000
++ 0x42000000 0x0 0x80000000 0x40 0x80000000 0x0 0x80000000>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0 0 0 1 &gic 0 136 4
++ 0 0 0 2 &gic 0 137 4
++ 0 0 0 3 &gic 0 138 4
++ 0 0 0 4 &gic 0 139 4>;
++ };
++
++ scpi: scpi@2b1f0000 {
++ compatible = "arm,scpi-mhu";
++ reg = <0x0 0x2b1f0000 0x0 0x10000>, /* MHU registers */
++ <0x0 0x2e000000 0x0 0x10000>; /* Payload area */
++ interrupts = <0 36 4>, /* low priority interrupt */
++ <0 35 4>, /* high priority interrupt */
++ <0 37 4>; /* secure channel interrupt */
++ #clock-cells = <1>;
++ clock-output-names = "a57", "a53", "gpu", "hdlcd0", "hdlcd1";
++ };
++
++ hdlcd0_osc: scpi_osc@3 {
++ compatible = "arm,scpi-osc";
++ #clock-cells = <0>;
++ clocks = <&scpi 3>;
++ frequency-range = <23000000 210000000>;
++ clock-output-names = "pxlclk0";
++ };
++
++ hdlcd1_osc: scpi_osc@4 {
++ compatible = "arm,scpi-osc";
++ #clock-cells = <0>;
++ clocks = <&scpi 4>;
++ frequency-range = <23000000 210000000>;
++ clock-output-names = "pxlclk1";
++ };
++
++ soc_uartclk: refclk72738khz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <7273800>;
++ clock-output-names = "juno:uartclk";
++ };
++
++ soc_refclk24mhz: clk24mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <24000000>;
++ clock-output-names = "juno:clk24mhz";
++ };
++
++ mb_eth25mhz: clk25mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <25000000>;
++ clock-output-names = "ethclk25mhz";
++ };
++
++ soc_usb48mhz: clk48mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <48000000>;
++ clock-output-names = "clk48mhz";
++ };
++
++ soc_smc50mhz: clk50mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <50000000>;
++ clock-output-names = "smc_clk";
++ };
++
++ soc_refclk100mhz: refclk100mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <100000000>;
++ clock-output-names = "apb_pclk";
++ };
++
++ soc_faxiclk: refclk533mhz {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <533000000>;
++ clock-output-names = "faxi_clk";
++ };
++
++ soc_fixed_3v3: fixedregulator@0 {
++ compatible = "regulator-fixed";
++ regulator-name = "3V3";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ memory-controller@7ffd0000 {
++ compatible = "arm,pl354", "arm,primecell";
++ reg = <0 0x7ffd0000 0 0x1000>;
++ interrupts = <0 86 4>,
++ <0 87 4>;
++ clocks = <&soc_smc50mhz>;
++ clock-names = "apb_pclk";
++ chip5-memwidth = <16>;
++ };
++
++ dma0: dma@0x7ff00000 {
++ compatible = "arm,pl330", "arm,primecell";
++ reg = <0x0 0x7ff00000 0 0x1000>;
++ interrupts = <0 95 4>,
++ <0 88 4>,
++ <0 89 4>,
++ <0 90 4>,
++ <0 91 4>,
++ <0 108 4>,
++ <0 109 4>,
++ <0 110 4>,
++ <0 111 4>;
++ #dma-cells = <1>;
++ #dma-channels = <8>;
++ #dma-requests = <32>;
++ clocks = <&soc_faxiclk>;
++ clock-names = "apb_pclk";
++ };
++
++ soc_uart0: uart@7ff80000 {
++ compatible = "arm,pl011", "arm,primecell";
++ reg = <0x0 0x7ff80000 0x0 0x1000>;
++ interrupts = <0 83 4>;
++ clocks = <&soc_uartclk>, <&soc_refclk100mhz>;
++ clock-names = "uartclk", "apb_pclk";
++ dmas = <&dma0 1
++ &dma0 2>;
++ dma-names = "rx", "tx";
++ };
++
++ /* this UART is reserved for secure software.
++ soc_uart1: uart@7ff70000 {
++ compatible = "arm,pl011", "arm,primecell";
++ reg = <0x0 0x7ff70000 0x0 0x1000>;
++ interrupts = <0 84 4>;
++ clocks = <&soc_uartclk>, <&soc_refclk100mhz>;
++ clock-names = "uartclk", "apb_pclk";
++ }; */
++
++ ulpi_phy: phy@0 {
++ compatible = "phy-ulpi-generic";
++ reg = <0x0 0x94 0x0 0x4>;
++ phy-id = <0>;
++ };
++
++ ehci@7ffc0000 {
++ compatible = "snps,ehci-h20ahb";
++ /* compatible = "arm,h20ahb-ehci"; */
++ reg = <0x0 0x7ffc0000 0x0 0x10000>;
++ interrupts = <0 117 4>;
++ clocks = <&soc_usb48mhz>;
++ clock-names = "otg";
++ phys = <&ulpi_phy>;
++ };
++
++ ohci@0x7ffb0000 {
++ compatible = "generic-ohci";
++ reg = <0x0 0x7ffb0000 0x0 0x10000>;
++ interrupts = <0 116 4>;
++ clocks = <&soc_usb48mhz>;
++ clock-names = "otg";
++ };
++
++ i2c@0x7ffa0000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "snps,designware-i2c";
++ reg = <0x0 0x7ffa0000 0x0 0x1000>;
++ interrupts = <0 104 4>;
++ clock-frequency = <400000>;
++ i2c-sda-hold-time-ns = <500>;
++ clocks = <&soc_smc50mhz>;
++
++ dvi0: dvi-transmitter@70 {
++ compatible = "nxp,tda998x";
++ reg = <0x70>;
++ };
++
++ dvi1: dvi-transmitter@71 {
++ compatible = "nxp,tda998x";
++ reg = <0x71>;
++ };
++ };
++
++ /* mmci@1c050000 {
++ compatible = "arm,pl180", "arm,primecell";
++ reg = <0x0 0x1c050000 0x0 0x1000>;
++ interrupts = <0 73 4>,
++ <0 74 4>;
++ max-frequency = <12000000>;
++ vmmc-supply = <&soc_fixed_3v3>;
++ clocks = <&soc_refclk24mhz>, <&soc_refclk100mhz>;
++ clock-names = "mclk", "apb_pclk";
++ }; */
++
++ hdlcd@7ff60000 {
++ compatible = "arm,hdlcd";
++ reg = <0 0x7ff60000 0 0x1000>;
++ interrupts = <0 85 4>;
++ clocks = <&hdlcd0_osc>;
++ clock-names = "pxlclk";
++ i2c-slave = <&dvi0>;
++
++ /* display-timings {
++ native-mode = <&timing0>;
++ timing0: timing@0 {
++ /* 1024 x 768 framebufer, standard VGA timings * /
++ clock-frequency = <65000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hfront-porch = <24>;
++ hback-porch = <160>;
++ hsync-len = <136>;
++ vfront-porch = <3>;
++ vback-porch = <29>;
++ vsync-len = <6>;
++ };
++ }; */
++ };
++
++ hdlcd@7ff50000 {
++ compatible = "arm,hdlcd";
++ reg = <0 0x7ff50000 0 0x1000>;
++ interrupts = <0 93 4>;
++ clocks = <&hdlcd1_osc>;
++ clock-names = "pxlclk";
++ i2c-slave = <&dvi1>;
++
++ display-timings {
++ native-mode = <&timing1>;
++ timing1: timing@1 {
++ /* 1024 x 768 framebufer, standard VGA timings */
++ clock-frequency = <65000>;
++ hactive = <1024>;
++ vactive = <768>;
++ hfront-porch = <24>;
++ hback-porch = <160>;
++ hsync-len = <136>;
++ vfront-porch = <3>;
++ vback-porch = <29>;
++ vsync-len = <6>;
++ };
++ };
++ };
++
++ smb {
++ compatible = "simple-bus";
++ #address-cells = <2>;
++ #size-cells = <1>;
++ ranges = <0 0 0 0x08000000 0x04000000>,
++ <1 0 0 0x14000000 0x04000000>,
++ <2 0 0 0x18000000 0x04000000>,
++ <3 0 0 0x1c000000 0x04000000>,
++ <4 0 0 0x0c000000 0x04000000>,
++ <5 0 0 0x10000000 0x04000000>;
++
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 15>;
++ interrupt-map = <0 0 0 &gic 0 68 4>,
++ <0 0 1 &gic 0 69 4>,
++ <0 0 2 &gic 0 70 4>,
++ <0 0 3 &gic 0 160 4>,
++ <0 0 4 &gic 0 161 4>,
++ <0 0 5 &gic 0 162 4>,
++ <0 0 6 &gic 0 163 4>,
++ <0 0 7 &gic 0 164 4>,
++ <0 0 8 &gic 0 165 4>,
++ <0 0 9 &gic 0 166 4>,
++ <0 0 10 &gic 0 167 4>,
++ <0 0 11 &gic 0 168 4>,
++ <0 0 12 &gic 0 169 4>;
++
++ motherboard {
++ model = "V2M-Juno";
++ arm,hbi = <0x252>;
++ arm,vexpress,site = <0>;
++ arm,v2m-memory-map = "rs1";
++ compatible = "arm,vexpress,v2p-p1", "simple-bus";
++ #address-cells = <2>; /* SMB chipselect number and offset */
++ #size-cells = <1>;
++ #interrupt-cells = <1>;
++ ranges;
++
++ usb@5,00000000 {
++ compatible = "nxp,usb-isp1763";
++ reg = <5 0x00000000 0x20000>;
++ bus-width = <16>;
++ interrupts = <4>;
++ };
++
++ ethernet@2,00000000 {
++ compatible = "smsc,lan9118", "smsc,lan9115";
++ reg = <2 0x00000000 0x10000>;
++ interrupts = <3>;
++ phy-mode = "mii";
++ reg-io-width = <4>;
++ smsc,irq-active-high;
++ smsc,irq-push-pull;
++ clocks = <&mb_eth25mhz>;
++ vdd33a-supply = <&soc_fixed_3v3>; /* change this */
++ vddvario-supply = <&soc_fixed_3v3>; /* and this */
++ };
++
++ iofpga@3,00000000 {
++ compatible = "arm,amba-bus", "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0 3 0 0x200000>;
++
++ kmi@060000 {
++ compatible = "arm,pl050", "arm,primecell";
++ reg = <0x060000 0x1000>;
++ interrupts = <8>;
++ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
++ clock-names = "KMIREFCLK", "apb_pclk";
++ };
++
++ kmi@070000 {
++ compatible = "arm,pl050", "arm,primecell";
++ reg = <0x070000 0x1000>;
++ interrupts = <8>;
++ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
++ clock-names = "KMIREFCLK", "apb_pclk";
++ };
++
++ wdt@0f0000 {
++ compatible = "arm,sp805", "arm,primecell";
++ reg = <0x0f0000 0x10000>;
++ interrupts = <7>;
++ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
++ clock-names = "wdogclk", "apb_pclk";
++ };
++
++ v2m_timer01: timer@110000 {
++ compatible = "arm,sp804", "arm,primecell";
++ reg = <0x110000 0x10000>;
++ interrupts = <9>;
++ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
++ clock-names = "timclken1", "apb_pclk";
++ };
++
++ v2m_timer23: timer@120000 {
++ compatible = "arm,sp804", "arm,primecell";
++ reg = <0x120000 0x10000>;
++ interrupts = <9>;
++ clocks = <&soc_refclk24mhz>, <&soc_smc50mhz>;
++ clock-names = "timclken1", "apb_pclk";
++ };
++
++ rtc@170000 {
++ compatible = "arm,pl031", "arm,primecell";
++ reg = <0x170000 0x10000>;
++ interrupts = <0>;
++ clocks = <&soc_smc50mhz>;
++ clock-names = "apb_pclk";
++ };
++ };
++ };
++ };
++};
+diff -Nur linux-3.14.40.orig/arch/arm64/boot/dts/Makefile linux-3.14.40/arch/arm64/boot/dts/Makefile
+--- linux-3.14.40.orig/arch/arm64/boot/dts/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/boot/dts/Makefile 2015-05-01 14:57:58.127427001 -0500
+@@ -1,5 +1,7 @@
+-dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb
++dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb \
++ fvp-base-gicv2-psci.dtb
+ dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb
++dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb
+
+ targets += dtbs
+ targets += $(dtb-y)
+diff -Nur linux-3.14.40.orig/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts linux-3.14.40/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts
+--- linux-3.14.40.orig/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts 2015-05-01 14:57:58.131427001 -0500
+@@ -157,3 +157,5 @@
+ /include/ "rtsm_ve-motherboard.dtsi"
+ };
+ };
++
++/include/ "clcd-panels.dtsi"
+diff -Nur linux-3.14.40.orig/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi linux-3.14.40/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
+--- linux-3.14.40.orig/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi 2015-05-01 14:57:58.131427001 -0500
+@@ -182,6 +182,9 @@
+ interrupts = <14>;
+ clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
+ clock-names = "clcdclk", "apb_pclk";
++ mode = "XVGA";
++ use_dma = <0>;
++ framebuffer = <0x18000000 0x00180000>;
+ };
+
+ virtio_block@0130000 {
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/aes-ce-ccm-core.S linux-3.14.40/arch/arm64/crypto/aes-ce-ccm-core.S
+--- linux-3.14.40.orig/arch/arm64/crypto/aes-ce-ccm-core.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/aes-ce-ccm-core.S 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,222 @@
++/*
++ * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions
++ *
++ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++
++ .text
++ .arch armv8-a+crypto
++
++ /*
++ * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
++ * u32 *macp, u8 const rk[], u32 rounds);
++ */
++ENTRY(ce_aes_ccm_auth_data)
++ ldr w8, [x3] /* leftover from prev round? */
++ ld1 {v0.2d}, [x0] /* load mac */
++ cbz w8, 1f
++ sub w8, w8, #16
++ eor v1.16b, v1.16b, v1.16b
++0: ldrb w7, [x1], #1 /* get 1 byte of input */
++ subs w2, w2, #1
++ add w8, w8, #1
++ ins v1.b[0], w7
++ ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
++ beq 8f /* out of input? */
++ cbnz w8, 0b
++ eor v0.16b, v0.16b, v1.16b
++1: ld1 {v3.2d}, [x4] /* load first round key */
++ prfm pldl1strm, [x1]
++ cmp w5, #12 /* which key size? */
++ add x6, x4, #16
++ sub w7, w5, #2 /* modified # of rounds */
++ bmi 2f
++ bne 5f
++ mov v5.16b, v3.16b
++ b 4f
++2: mov v4.16b, v3.16b
++ ld1 {v5.2d}, [x6], #16 /* load 2nd round key */
++3: aese v0.16b, v4.16b
++ aesmc v0.16b, v0.16b
++4: ld1 {v3.2d}, [x6], #16 /* load next round key */
++ aese v0.16b, v5.16b
++ aesmc v0.16b, v0.16b
++5: ld1 {v4.2d}, [x6], #16 /* load next round key */
++ subs w7, w7, #3
++ aese v0.16b, v3.16b
++ aesmc v0.16b, v0.16b
++ ld1 {v5.2d}, [x6], #16 /* load next round key */
++ bpl 3b
++ aese v0.16b, v4.16b
++ subs w2, w2, #16 /* last data? */
++ eor v0.16b, v0.16b, v5.16b /* final round */
++ bmi 6f
++ ld1 {v1.16b}, [x1], #16 /* load next input block */
++ eor v0.16b, v0.16b, v1.16b /* xor with mac */
++ bne 1b
++6: st1 {v0.2d}, [x0] /* store mac */
++ beq 10f
++ adds w2, w2, #16
++ beq 10f
++ mov w8, w2
++7: ldrb w7, [x1], #1
++ umov w6, v0.b[0]
++ eor w6, w6, w7
++ strb w6, [x0], #1
++ subs w2, w2, #1
++ beq 10f
++ ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
++ b 7b
++8: mov w7, w8
++ add w8, w8, #16
++9: ext v1.16b, v1.16b, v1.16b, #1
++ adds w7, w7, #1
++ bne 9b
++ eor v0.16b, v0.16b, v1.16b
++ st1 {v0.2d}, [x0]
++10: str w8, [x3]
++ ret
++ENDPROC(ce_aes_ccm_auth_data)
++
++ /*
++ * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
++ * u32 rounds);
++ */
++ENTRY(ce_aes_ccm_final)
++ ld1 {v3.2d}, [x2], #16 /* load first round key */
++ ld1 {v0.2d}, [x0] /* load mac */
++ cmp w3, #12 /* which key size? */
++ sub w3, w3, #2 /* modified # of rounds */
++ ld1 {v1.2d}, [x1] /* load 1st ctriv */
++ bmi 0f
++ bne 3f
++ mov v5.16b, v3.16b
++ b 2f
++0: mov v4.16b, v3.16b
++1: ld1 {v5.2d}, [x2], #16 /* load next round key */
++ aese v0.16b, v4.16b
++ aese v1.16b, v4.16b
++ aesmc v0.16b, v0.16b
++ aesmc v1.16b, v1.16b
++2: ld1 {v3.2d}, [x2], #16 /* load next round key */
++ aese v0.16b, v5.16b
++ aese v1.16b, v5.16b
++ aesmc v0.16b, v0.16b
++ aesmc v1.16b, v1.16b
++3: ld1 {v4.2d}, [x2], #16 /* load next round key */
++ subs w3, w3, #3
++ aese v0.16b, v3.16b
++ aese v1.16b, v3.16b
++ aesmc v0.16b, v0.16b
++ aesmc v1.16b, v1.16b
++ bpl 1b
++ aese v0.16b, v4.16b
++ aese v1.16b, v4.16b
++ /* final round key cancels out */
++ eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
++ st1 {v0.2d}, [x0] /* store result */
++ ret
++ENDPROC(ce_aes_ccm_final)
++
++ .macro aes_ccm_do_crypt,enc
++ ldr x8, [x6, #8] /* load lower ctr */
++ ld1 {v0.2d}, [x5] /* load mac */
++ rev x8, x8 /* keep swabbed ctr in reg */
++0: /* outer loop */
++ ld1 {v1.1d}, [x6] /* load upper ctr */
++ prfm pldl1strm, [x1]
++ add x8, x8, #1
++ rev x9, x8
++ cmp w4, #12 /* which key size? */
++ sub w7, w4, #2 /* get modified # of rounds */
++ ins v1.d[1], x9 /* no carry in lower ctr */
++ ld1 {v3.2d}, [x3] /* load first round key */
++ add x10, x3, #16
++ bmi 1f
++ bne 4f
++ mov v5.16b, v3.16b
++ b 3f
++1: mov v4.16b, v3.16b
++ ld1 {v5.2d}, [x10], #16 /* load 2nd round key */
++2: /* inner loop: 3 rounds, 2x interleaved */
++ aese v0.16b, v4.16b
++ aese v1.16b, v4.16b
++ aesmc v0.16b, v0.16b
++ aesmc v1.16b, v1.16b
++3: ld1 {v3.2d}, [x10], #16 /* load next round key */
++ aese v0.16b, v5.16b
++ aese v1.16b, v5.16b
++ aesmc v0.16b, v0.16b
++ aesmc v1.16b, v1.16b
++4: ld1 {v4.2d}, [x10], #16 /* load next round key */
++ subs w7, w7, #3
++ aese v0.16b, v3.16b
++ aese v1.16b, v3.16b
++ aesmc v0.16b, v0.16b
++ aesmc v1.16b, v1.16b
++ ld1 {v5.2d}, [x10], #16 /* load next round key */
++ bpl 2b
++ aese v0.16b, v4.16b
++ aese v1.16b, v4.16b
++ subs w2, w2, #16
++ bmi 6f /* partial block? */
++ ld1 {v2.16b}, [x1], #16 /* load next input block */
++ .if \enc == 1
++ eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
++ eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */
++ .else
++ eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */
++ eor v1.16b, v2.16b, v5.16b /* final round enc */
++ .endif
++ eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
++ st1 {v1.16b}, [x0], #16 /* write output block */
++ bne 0b
++ rev x8, x8
++ st1 {v0.2d}, [x5] /* store mac */
++ str x8, [x6, #8] /* store lsb end of ctr (BE) */
++5: ret
++
++6: eor v0.16b, v0.16b, v5.16b /* final round mac */
++ eor v1.16b, v1.16b, v5.16b /* final round enc */
++ st1 {v0.2d}, [x5] /* store mac */
++ add w2, w2, #16 /* process partial tail block */
++7: ldrb w9, [x1], #1 /* get 1 byte of input */
++ umov w6, v1.b[0] /* get top crypted ctr byte */
++ umov w7, v0.b[0] /* get top mac byte */
++ .if \enc == 1
++ eor w7, w7, w9
++ eor w9, w9, w6
++ .else
++ eor w9, w9, w6
++ eor w7, w7, w9
++ .endif
++ strb w9, [x0], #1 /* store out byte */
++ strb w7, [x5], #1 /* store mac byte */
++ subs w2, w2, #1
++ beq 5b
++ ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */
++ ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */
++ b 7b
++ .endm
++
++ /*
++ * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
++ * u8 const rk[], u32 rounds, u8 mac[],
++ * u8 ctr[]);
++ * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
++ * u8 const rk[], u32 rounds, u8 mac[],
++ * u8 ctr[]);
++ */
++ENTRY(ce_aes_ccm_encrypt)
++ aes_ccm_do_crypt 1
++ENDPROC(ce_aes_ccm_encrypt)
++
++ENTRY(ce_aes_ccm_decrypt)
++ aes_ccm_do_crypt 0
++ENDPROC(ce_aes_ccm_decrypt)
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/aes-ce-ccm-glue.c linux-3.14.40/arch/arm64/crypto/aes-ce-ccm-glue.c
+--- linux-3.14.40.orig/arch/arm64/crypto/aes-ce-ccm-glue.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/aes-ce-ccm-glue.c 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,297 @@
++/*
++ * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
++ *
++ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <asm/neon.h>
++#include <asm/unaligned.h>
++#include <crypto/aes.h>
++#include <crypto/algapi.h>
++#include <crypto/scatterwalk.h>
++#include <linux/crypto.h>
++#include <linux/module.h>
++
++static int num_rounds(struct crypto_aes_ctx *ctx)
++{
++ /*
++ * # of rounds specified by AES:
++ * 128 bit key 10 rounds
++ * 192 bit key 12 rounds
++ * 256 bit key 14 rounds
++ * => n byte key => 6 + (n/4) rounds
++ */
++ return 6 + ctx->key_length / 4;
++}
++
++asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
++ u32 *macp, u32 const rk[], u32 rounds);
++
++asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
++ u32 const rk[], u32 rounds, u8 mac[],
++ u8 ctr[]);
++
++asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
++ u32 const rk[], u32 rounds, u8 mac[],
++ u8 ctr[]);
++
++asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
++ u32 rounds);
++
++static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
++ unsigned int key_len)
++{
++ struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
++ int ret;
++
++ ret = crypto_aes_expand_key(ctx, in_key, key_len);
++ if (!ret)
++ return 0;
++
++ tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
++ return -EINVAL;
++}
++
++static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
++{
++ if ((authsize & 1) || authsize < 4)
++ return -EINVAL;
++ return 0;
++}
++
++static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ __be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8];
++ u32 l = req->iv[0] + 1;
++
++ /* verify that CCM dimension 'L' is set correctly in the IV */
++ if (l < 2 || l > 8)
++ return -EINVAL;
++
++ /* verify that msglen can in fact be represented in L bytes */
++ if (l < 4 && msglen >> (8 * l))
++ return -EOVERFLOW;
++
++ /*
++ * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi
++ * uses a u32 type to represent msglen so the top 4 bytes are always 0.
++ */
++ n[0] = 0;
++ n[1] = cpu_to_be32(msglen);
++
++ memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
++
++ /*
++ * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C)
++ * - bits 0..2 : max # of bytes required to represent msglen, minus 1
++ * (already set by caller)
++ * - bits 3..5 : size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc)
++ * - bit 6 : indicates presence of authenticate-only data
++ */
++ maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2;
++ if (req->assoclen)
++ maciv[0] |= 0x40;
++
++ memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
++ return 0;
++}
++
++static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
++ struct __packed { __be16 l; __be32 h; u16 len; } ltag;
++ struct scatter_walk walk;
++ u32 len = req->assoclen;
++ u32 macp = 0;
++
++ /* prepend the AAD with a length tag */
++ if (len < 0xff00) {
++ ltag.l = cpu_to_be16(len);
++ ltag.len = 2;
++ } else {
++ ltag.l = cpu_to_be16(0xfffe);
++ put_unaligned_be32(len, &ltag.h);
++ ltag.len = 6;
++ }
++
++ ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, &macp, ctx->key_enc,
++ num_rounds(ctx));
++ scatterwalk_start(&walk, req->assoc);
++
++ do {
++ u32 n = scatterwalk_clamp(&walk, len);
++ u8 *p;
++
++ if (!n) {
++ scatterwalk_start(&walk, sg_next(walk.sg));
++ n = scatterwalk_clamp(&walk, len);
++ }
++ p = scatterwalk_map(&walk);
++ ce_aes_ccm_auth_data(mac, p, n, &macp, ctx->key_enc,
++ num_rounds(ctx));
++ len -= n;
++
++ scatterwalk_unmap(p);
++ scatterwalk_advance(&walk, n);
++ scatterwalk_done(&walk, 0, len);
++ } while (len);
++}
++
++static int ccm_encrypt(struct aead_request *req)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
++ struct blkcipher_desc desc = { .info = req->iv };
++ struct blkcipher_walk walk;
++ u8 __aligned(8) mac[AES_BLOCK_SIZE];
++ u8 buf[AES_BLOCK_SIZE];
++ u32 len = req->cryptlen;
++ int err;
++
++ err = ccm_init_mac(req, mac, len);
++ if (err)
++ return err;
++
++ kernel_neon_begin_partial(6);
++
++ if (req->assoclen)
++ ccm_calculate_auth_mac(req, mac);
++
++ /* preserve the original iv for the final round */
++ memcpy(buf, req->iv, AES_BLOCK_SIZE);
++
++ blkcipher_walk_init(&walk, req->dst, req->src, len);
++ err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
++ AES_BLOCK_SIZE);
++
++ while (walk.nbytes) {
++ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
++
++ if (walk.nbytes == len)
++ tail = 0;
++
++ ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ walk.nbytes - tail, ctx->key_enc,
++ num_rounds(ctx), mac, walk.iv);
++
++ len -= walk.nbytes - tail;
++ err = blkcipher_walk_done(&desc, &walk, tail);
++ }
++ if (!err)
++ ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
++
++ kernel_neon_end();
++
++ if (err)
++ return err;
++
++ /* copy authtag to end of dst */
++ scatterwalk_map_and_copy(mac, req->dst, req->cryptlen,
++ crypto_aead_authsize(aead), 1);
++
++ return 0;
++}
++
++static int ccm_decrypt(struct aead_request *req)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int authsize = crypto_aead_authsize(aead);
++ struct blkcipher_desc desc = { .info = req->iv };
++ struct blkcipher_walk walk;
++ u8 __aligned(8) mac[AES_BLOCK_SIZE];
++ u8 buf[AES_BLOCK_SIZE];
++ u32 len = req->cryptlen - authsize;
++ int err;
++
++ err = ccm_init_mac(req, mac, len);
++ if (err)
++ return err;
++
++ kernel_neon_begin_partial(6);
++
++ if (req->assoclen)
++ ccm_calculate_auth_mac(req, mac);
++
++ /* preserve the original iv for the final round */
++ memcpy(buf, req->iv, AES_BLOCK_SIZE);
++
++ blkcipher_walk_init(&walk, req->dst, req->src, len);
++ err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
++ AES_BLOCK_SIZE);
++
++ while (walk.nbytes) {
++ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
++
++ if (walk.nbytes == len)
++ tail = 0;
++
++ ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ walk.nbytes - tail, ctx->key_enc,
++ num_rounds(ctx), mac, walk.iv);
++
++ len -= walk.nbytes - tail;
++ err = blkcipher_walk_done(&desc, &walk, tail);
++ }
++ if (!err)
++ ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
++
++ kernel_neon_end();
++
++ if (err)
++ return err;
++
++ /* compare calculated auth tag with the stored one */
++ scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize,
++ authsize, 0);
++
++ if (memcmp(mac, buf, authsize))
++ return -EBADMSG;
++ return 0;
++}
++
++static struct crypto_alg ccm_aes_alg = {
++ .cra_name = "ccm(aes)",
++ .cra_driver_name = "ccm-aes-ce",
++ .cra_priority = 300,
++ .cra_flags = CRYPTO_ALG_TYPE_AEAD,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_aead_type,
++ .cra_module = THIS_MODULE,
++ .cra_aead = {
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = AES_BLOCK_SIZE,
++ .setkey = ccm_setkey,
++ .setauthsize = ccm_setauthsize,
++ .encrypt = ccm_encrypt,
++ .decrypt = ccm_decrypt,
++ }
++};
++
++static int __init aes_mod_init(void)
++{
++ if (!(elf_hwcap & HWCAP_AES))
++ return -ENODEV;
++ return crypto_register_alg(&ccm_aes_alg);
++}
++
++static void __exit aes_mod_exit(void)
++{
++ crypto_unregister_alg(&ccm_aes_alg);
++}
++
++module_init(aes_mod_init);
++module_exit(aes_mod_exit);
++
++MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
++MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("ccm(aes)");
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/aes-ce-cipher.c linux-3.14.40/arch/arm64/crypto/aes-ce-cipher.c
+--- linux-3.14.40.orig/arch/arm64/crypto/aes-ce-cipher.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/aes-ce-cipher.c 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,155 @@
++/*
++ * aes-ce-cipher.c - core AES cipher using ARMv8 Crypto Extensions
++ *
++ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <asm/neon.h>
++#include <crypto/aes.h>
++#include <linux/cpufeature.h>
++#include <linux/crypto.h>
++#include <linux/module.h>
++
++MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
++MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
++MODULE_LICENSE("GPL v2");
++
++struct aes_block {
++ u8 b[AES_BLOCK_SIZE];
++};
++
++static int num_rounds(struct crypto_aes_ctx *ctx)
++{
++ /*
++ * # of rounds specified by AES:
++ * 128 bit key 10 rounds
++ * 192 bit key 12 rounds
++ * 256 bit key 14 rounds
++ * => n byte key => 6 + (n/4) rounds
++ */
++ return 6 + ctx->key_length / 4;
++}
++
++static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
++{
++ struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
++ struct aes_block *out = (struct aes_block *)dst;
++ struct aes_block const *in = (struct aes_block *)src;
++ void *dummy0;
++ int dummy1;
++
++ kernel_neon_begin_partial(4);
++
++ __asm__(" ld1 {v0.16b}, %[in] ;"
++ " ld1 {v1.2d}, [%[key]], #16 ;"
++ " cmp %w[rounds], #10 ;"
++ " bmi 0f ;"
++ " bne 3f ;"
++ " mov v3.16b, v1.16b ;"
++ " b 2f ;"
++ "0: mov v2.16b, v1.16b ;"
++ " ld1 {v3.2d}, [%[key]], #16 ;"
++ "1: aese v0.16b, v2.16b ;"
++ " aesmc v0.16b, v0.16b ;"
++ "2: ld1 {v1.2d}, [%[key]], #16 ;"
++ " aese v0.16b, v3.16b ;"
++ " aesmc v0.16b, v0.16b ;"
++ "3: ld1 {v2.2d}, [%[key]], #16 ;"
++ " subs %w[rounds], %w[rounds], #3 ;"
++ " aese v0.16b, v1.16b ;"
++ " aesmc v0.16b, v0.16b ;"
++ " ld1 {v3.2d}, [%[key]], #16 ;"
++ " bpl 1b ;"
++ " aese v0.16b, v2.16b ;"
++ " eor v0.16b, v0.16b, v3.16b ;"
++ " st1 {v0.16b}, %[out] ;"
++
++ : [out] "=Q"(*out),
++ [key] "=r"(dummy0),
++ [rounds] "=r"(dummy1)
++ : [in] "Q"(*in),
++ "1"(ctx->key_enc),
++ "2"(num_rounds(ctx) - 2)
++ : "cc");
++
++ kernel_neon_end();
++}
++
++static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
++{
++ struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
++ struct aes_block *out = (struct aes_block *)dst;
++ struct aes_block const *in = (struct aes_block *)src;
++ void *dummy0;
++ int dummy1;
++
++ kernel_neon_begin_partial(4);
++
++ __asm__(" ld1 {v0.16b}, %[in] ;"
++ " ld1 {v1.2d}, [%[key]], #16 ;"
++ " cmp %w[rounds], #10 ;"
++ " bmi 0f ;"
++ " bne 3f ;"
++ " mov v3.16b, v1.16b ;"
++ " b 2f ;"
++ "0: mov v2.16b, v1.16b ;"
++ " ld1 {v3.2d}, [%[key]], #16 ;"
++ "1: aesd v0.16b, v2.16b ;"
++ " aesimc v0.16b, v0.16b ;"
++ "2: ld1 {v1.2d}, [%[key]], #16 ;"
++ " aesd v0.16b, v3.16b ;"
++ " aesimc v0.16b, v0.16b ;"
++ "3: ld1 {v2.2d}, [%[key]], #16 ;"
++ " subs %w[rounds], %w[rounds], #3 ;"
++ " aesd v0.16b, v1.16b ;"
++ " aesimc v0.16b, v0.16b ;"
++ " ld1 {v3.2d}, [%[key]], #16 ;"
++ " bpl 1b ;"
++ " aesd v0.16b, v2.16b ;"
++ " eor v0.16b, v0.16b, v3.16b ;"
++ " st1 {v0.16b}, %[out] ;"
++
++ : [out] "=Q"(*out),
++ [key] "=r"(dummy0),
++ [rounds] "=r"(dummy1)
++ : [in] "Q"(*in),
++ "1"(ctx->key_dec),
++ "2"(num_rounds(ctx) - 2)
++ : "cc");
++
++ kernel_neon_end();
++}
++
++static struct crypto_alg aes_alg = {
++ .cra_name = "aes",
++ .cra_driver_name = "aes-ce",
++ .cra_priority = 300,
++ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
++ .cra_module = THIS_MODULE,
++ .cra_cipher = {
++ .cia_min_keysize = AES_MIN_KEY_SIZE,
++ .cia_max_keysize = AES_MAX_KEY_SIZE,
++ .cia_setkey = crypto_aes_set_key,
++ .cia_encrypt = aes_cipher_encrypt,
++ .cia_decrypt = aes_cipher_decrypt
++ }
++};
++
++static int __init aes_mod_init(void)
++{
++ return crypto_register_alg(&aes_alg);
++}
++
++static void __exit aes_mod_exit(void)
++{
++ crypto_unregister_alg(&aes_alg);
++}
++
++module_cpu_feature_match(AES, aes_mod_init);
++module_exit(aes_mod_exit);
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/aes-ce.S linux-3.14.40/arch/arm64/crypto/aes-ce.S
+--- linux-3.14.40.orig/arch/arm64/crypto/aes-ce.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/aes-ce.S 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,133 @@
++/*
++ * linux/arch/arm64/crypto/aes-ce.S - AES cipher for ARMv8 with
++ * Crypto Extensions
++ *
++ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++
++#define AES_ENTRY(func) ENTRY(ce_ ## func)
++#define AES_ENDPROC(func) ENDPROC(ce_ ## func)
++
++ .arch armv8-a+crypto
++
++ /* preload all round keys */
++ .macro load_round_keys, rounds, rk
++ cmp \rounds, #12
++ blo 2222f /* 128 bits */
++ beq 1111f /* 192 bits */
++ ld1 {v17.16b-v18.16b}, [\rk], #32
++1111: ld1 {v19.16b-v20.16b}, [\rk], #32
++2222: ld1 {v21.16b-v24.16b}, [\rk], #64
++ ld1 {v25.16b-v28.16b}, [\rk], #64
++ ld1 {v29.16b-v31.16b}, [\rk]
++ .endm
++
++ /* prepare for encryption with key in rk[] */
++ .macro enc_prepare, rounds, rk, ignore
++ load_round_keys \rounds, \rk
++ .endm
++
++ /* prepare for encryption (again) but with new key in rk[] */
++ .macro enc_switch_key, rounds, rk, ignore
++ load_round_keys \rounds, \rk
++ .endm
++
++ /* prepare for decryption with key in rk[] */
++ .macro dec_prepare, rounds, rk, ignore
++ load_round_keys \rounds, \rk
++ .endm
++
++ .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3
++ aes\de \i0\().16b, \k\().16b
++ .ifnb \i1
++ aes\de \i1\().16b, \k\().16b
++ .ifnb \i3
++ aes\de \i2\().16b, \k\().16b
++ aes\de \i3\().16b, \k\().16b
++ .endif
++ .endif
++ aes\mc \i0\().16b, \i0\().16b
++ .ifnb \i1
++ aes\mc \i1\().16b, \i1\().16b
++ .ifnb \i3
++ aes\mc \i2\().16b, \i2\().16b
++ aes\mc \i3\().16b, \i3\().16b
++ .endif
++ .endif
++ .endm
++
++ /* up to 4 interleaved encryption rounds with the same round key */
++ .macro round_Nx, enc, k, i0, i1, i2, i3
++ .ifc \enc, e
++ do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3
++ .else
++ do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3
++ .endif
++ .endm
++
++ /* up to 4 interleaved final rounds */
++ .macro fin_round_Nx, de, k, k2, i0, i1, i2, i3
++ aes\de \i0\().16b, \k\().16b
++ .ifnb \i1
++ aes\de \i1\().16b, \k\().16b
++ .ifnb \i3
++ aes\de \i2\().16b, \k\().16b
++ aes\de \i3\().16b, \k\().16b
++ .endif
++ .endif
++ eor \i0\().16b, \i0\().16b, \k2\().16b
++ .ifnb \i1
++ eor \i1\().16b, \i1\().16b, \k2\().16b
++ .ifnb \i3
++ eor \i2\().16b, \i2\().16b, \k2\().16b
++ eor \i3\().16b, \i3\().16b, \k2\().16b
++ .endif
++ .endif
++ .endm
++
++ /* up to 4 interleaved blocks */
++ .macro do_block_Nx, enc, rounds, i0, i1, i2, i3
++ cmp \rounds, #12
++ blo 2222f /* 128 bits */
++ beq 1111f /* 192 bits */
++ round_Nx \enc, v17, \i0, \i1, \i2, \i3
++ round_Nx \enc, v18, \i0, \i1, \i2, \i3
++1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3
++ round_Nx \enc, v20, \i0, \i1, \i2, \i3
++2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29
++ round_Nx \enc, \key, \i0, \i1, \i2, \i3
++ .endr
++ fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3
++ .endm
++
++ .macro encrypt_block, in, rounds, t0, t1, t2
++ do_block_Nx e, \rounds, \in
++ .endm
++
++ .macro encrypt_block2x, i0, i1, rounds, t0, t1, t2
++ do_block_Nx e, \rounds, \i0, \i1
++ .endm
++
++ .macro encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
++ do_block_Nx e, \rounds, \i0, \i1, \i2, \i3
++ .endm
++
++ .macro decrypt_block, in, rounds, t0, t1, t2
++ do_block_Nx d, \rounds, \in
++ .endm
++
++ .macro decrypt_block2x, i0, i1, rounds, t0, t1, t2
++ do_block_Nx d, \rounds, \i0, \i1
++ .endm
++
++ .macro decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
++ do_block_Nx d, \rounds, \i0, \i1, \i2, \i3
++ .endm
++
++#include "aes-modes.S"
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/aes-glue.c linux-3.14.40/arch/arm64/crypto/aes-glue.c
+--- linux-3.14.40.orig/arch/arm64/crypto/aes-glue.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/aes-glue.c 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,446 @@
++/*
++ * linux/arch/arm64/crypto/aes-glue.c - wrapper code for ARMv8 AES
++ *
++ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <asm/neon.h>
++#include <asm/hwcap.h>
++#include <crypto/aes.h>
++#include <crypto/ablk_helper.h>
++#include <crypto/algapi.h>
++#include <linux/module.h>
++#include <linux/cpufeature.h>
++
++#ifdef USE_V8_CRYPTO_EXTENSIONS
++#define MODE "ce"
++#define PRIO 300
++#define aes_ecb_encrypt ce_aes_ecb_encrypt
++#define aes_ecb_decrypt ce_aes_ecb_decrypt
++#define aes_cbc_encrypt ce_aes_cbc_encrypt
++#define aes_cbc_decrypt ce_aes_cbc_decrypt
++#define aes_ctr_encrypt ce_aes_ctr_encrypt
++#define aes_xts_encrypt ce_aes_xts_encrypt
++#define aes_xts_decrypt ce_aes_xts_decrypt
++MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
++#else
++#define MODE "neon"
++#define PRIO 200
++#define aes_ecb_encrypt neon_aes_ecb_encrypt
++#define aes_ecb_decrypt neon_aes_ecb_decrypt
++#define aes_cbc_encrypt neon_aes_cbc_encrypt
++#define aes_cbc_decrypt neon_aes_cbc_decrypt
++#define aes_ctr_encrypt neon_aes_ctr_encrypt
++#define aes_xts_encrypt neon_aes_xts_encrypt
++#define aes_xts_decrypt neon_aes_xts_decrypt
++MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
++MODULE_ALIAS("ecb(aes)");
++MODULE_ALIAS("cbc(aes)");
++MODULE_ALIAS("ctr(aes)");
++MODULE_ALIAS("xts(aes)");
++#endif
++
++MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
++MODULE_LICENSE("GPL v2");
++
++/* defined in aes-modes.S */
++asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
++ int rounds, int blocks, int first);
++asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
++ int rounds, int blocks, int first);
++
++asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
++ int rounds, int blocks, u8 iv[], int first);
++asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
++ int rounds, int blocks, u8 iv[], int first);
++
++asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
++ int rounds, int blocks, u8 ctr[], int first);
++
++asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
++ int rounds, int blocks, u8 const rk2[], u8 iv[],
++ int first);
++asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[],
++ int rounds, int blocks, u8 const rk2[], u8 iv[],
++ int first);
++
++struct crypto_aes_xts_ctx {
++ struct crypto_aes_ctx key1;
++ struct crypto_aes_ctx __aligned(8) key2;
++};
++
++static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
++ unsigned int key_len)
++{
++ struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
++ int ret;
++
++ ret = crypto_aes_expand_key(&ctx->key1, in_key, key_len / 2);
++ if (!ret)
++ ret = crypto_aes_expand_key(&ctx->key2, &in_key[key_len / 2],
++ key_len / 2);
++ if (!ret)
++ return 0;
++
++ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
++ return -EINVAL;
++}
++
++static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key_length / 4;
++ struct blkcipher_walk walk;
++ unsigned int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt(desc, &walk);
++
++ kernel_neon_begin();
++ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
++ aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key_enc, rounds, blocks, first);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++ return err;
++}
++
++static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key_length / 4;
++ struct blkcipher_walk walk;
++ unsigned int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt(desc, &walk);
++
++ kernel_neon_begin();
++ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
++ aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key_dec, rounds, blocks, first);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++ return err;
++}
++
++static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key_length / 4;
++ struct blkcipher_walk walk;
++ unsigned int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt(desc, &walk);
++
++ kernel_neon_begin();
++ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
++ aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
++ first);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++ return err;
++}
++
++static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key_length / 4;
++ struct blkcipher_walk walk;
++ unsigned int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt(desc, &walk);
++
++ kernel_neon_begin();
++ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
++ aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
++ first);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++ return err;
++}
++
++static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key_length / 4;
++ struct blkcipher_walk walk;
++ int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
++
++ first = 1;
++ kernel_neon_begin();
++ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
++ aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
++ first);
++ first = 0;
++ nbytes -= blocks * AES_BLOCK_SIZE;
++ if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
++ break;
++ err = blkcipher_walk_done(desc, &walk,
++ walk.nbytes % AES_BLOCK_SIZE);
++ }
++ if (nbytes) {
++ u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
++ u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
++ u8 __aligned(8) tail[AES_BLOCK_SIZE];
++
++ /*
++ * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
++ * to tell aes_ctr_encrypt() to only read half a block.
++ */
++ blocks = (nbytes <= 8) ? -1 : 1;
++
++ aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds,
++ blocks, walk.iv, first);
++ memcpy(tdst, tail, nbytes);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++
++ return err;
++}
++
++static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key1.key_length / 4;
++ struct blkcipher_walk walk;
++ unsigned int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt(desc, &walk);
++
++ kernel_neon_begin();
++ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
++ aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key1.key_enc, rounds, blocks,
++ (u8 *)ctx->key2.key_enc, walk.iv, first);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++
++ return err;
++}
++
++static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
++ struct scatterlist *src, unsigned int nbytes)
++{
++ struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
++ int err, first, rounds = 6 + ctx->key1.key_length / 4;
++ struct blkcipher_walk walk;
++ unsigned int blocks;
++
++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++ blkcipher_walk_init(&walk, dst, src, nbytes);
++ err = blkcipher_walk_virt(desc, &walk);
++
++ kernel_neon_begin();
++ for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
++ aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
++ (u8 *)ctx->key1.key_dec, rounds, blocks,
++ (u8 *)ctx->key2.key_enc, walk.iv, first);
++ err = blkcipher_walk_done(desc, &walk, 0);
++ }
++ kernel_neon_end();
++
++ return err;
++}
++
++static struct crypto_alg aes_algs[] = { {
++ .cra_name = "__ecb-aes-" MODE,
++ .cra_driver_name = "__driver-ecb-aes-" MODE,
++ .cra_priority = 0,
++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_blkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_blkcipher = {
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = crypto_aes_set_key,
++ .encrypt = ecb_encrypt,
++ .decrypt = ecb_decrypt,
++ },
++}, {
++ .cra_name = "__cbc-aes-" MODE,
++ .cra_driver_name = "__driver-cbc-aes-" MODE,
++ .cra_priority = 0,
++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_blkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_blkcipher = {
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = crypto_aes_set_key,
++ .encrypt = cbc_encrypt,
++ .decrypt = cbc_decrypt,
++ },
++}, {
++ .cra_name = "__ctr-aes-" MODE,
++ .cra_driver_name = "__driver-ctr-aes-" MODE,
++ .cra_priority = 0,
++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_blkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_blkcipher = {
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = crypto_aes_set_key,
++ .encrypt = ctr_encrypt,
++ .decrypt = ctr_encrypt,
++ },
++}, {
++ .cra_name = "__xts-aes-" MODE,
++ .cra_driver_name = "__driver-xts-aes-" MODE,
++ .cra_priority = 0,
++ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_blkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_blkcipher = {
++ .min_keysize = 2 * AES_MIN_KEY_SIZE,
++ .max_keysize = 2 * AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = xts_set_key,
++ .encrypt = xts_encrypt,
++ .decrypt = xts_decrypt,
++ },
++}, {
++ .cra_name = "ecb(aes)",
++ .cra_driver_name = "ecb-aes-" MODE,
++ .cra_priority = PRIO,
++ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct async_helper_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_ablkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_init = ablk_init,
++ .cra_exit = ablk_exit,
++ .cra_ablkcipher = {
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = ablk_set_key,
++ .encrypt = ablk_encrypt,
++ .decrypt = ablk_decrypt,
++ }
++}, {
++ .cra_name = "cbc(aes)",
++ .cra_driver_name = "cbc-aes-" MODE,
++ .cra_priority = PRIO,
++ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct async_helper_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_ablkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_init = ablk_init,
++ .cra_exit = ablk_exit,
++ .cra_ablkcipher = {
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = ablk_set_key,
++ .encrypt = ablk_encrypt,
++ .decrypt = ablk_decrypt,
++ }
++}, {
++ .cra_name = "ctr(aes)",
++ .cra_driver_name = "ctr-aes-" MODE,
++ .cra_priority = PRIO,
++ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct async_helper_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_ablkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_init = ablk_init,
++ .cra_exit = ablk_exit,
++ .cra_ablkcipher = {
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = ablk_set_key,
++ .encrypt = ablk_encrypt,
++ .decrypt = ablk_decrypt,
++ }
++}, {
++ .cra_name = "xts(aes)",
++ .cra_driver_name = "xts-aes-" MODE,
++ .cra_priority = PRIO,
++ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct async_helper_ctx),
++ .cra_alignmask = 7,
++ .cra_type = &crypto_ablkcipher_type,
++ .cra_module = THIS_MODULE,
++ .cra_init = ablk_init,
++ .cra_exit = ablk_exit,
++ .cra_ablkcipher = {
++ .min_keysize = 2 * AES_MIN_KEY_SIZE,
++ .max_keysize = 2 * AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = ablk_set_key,
++ .encrypt = ablk_encrypt,
++ .decrypt = ablk_decrypt,
++ }
++} };
++
++static int __init aes_init(void)
++{
++ return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
++}
++
++static void __exit aes_exit(void)
++{
++ crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
++}
++
++#ifdef USE_V8_CRYPTO_EXTENSIONS
++module_cpu_feature_match(AES, aes_init);
++#else
++module_init(aes_init);
++#endif
++module_exit(aes_exit);
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/aes-modes.S linux-3.14.40/arch/arm64/crypto/aes-modes.S
+--- linux-3.14.40.orig/arch/arm64/crypto/aes-modes.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/aes-modes.S 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,532 @@
++/*
++ * linux/arch/arm64/crypto/aes-modes.S - chaining mode wrappers for AES
++ *
++ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* included by aes-ce.S and aes-neon.S */
++
++ .text
++ .align 4
++
++/*
++ * There are several ways to instantiate this code:
++ * - no interleave, all inline
++ * - 2-way interleave, 2x calls out of line (-DINTERLEAVE=2)
++ * - 2-way interleave, all inline (-DINTERLEAVE=2 -DINTERLEAVE_INLINE)
++ * - 4-way interleave, 4x calls out of line (-DINTERLEAVE=4)
++ * - 4-way interleave, all inline (-DINTERLEAVE=4 -DINTERLEAVE_INLINE)
++ *
++ * Macros imported by this code:
++ * - enc_prepare - setup NEON registers for encryption
++ * - dec_prepare - setup NEON registers for decryption
++ * - enc_switch_key - change to new key after having prepared for encryption
++ * - encrypt_block - encrypt a single block
++ * - decrypt block - decrypt a single block
++ * - encrypt_block2x - encrypt 2 blocks in parallel (if INTERLEAVE == 2)
++ * - decrypt_block2x - decrypt 2 blocks in parallel (if INTERLEAVE == 2)
++ * - encrypt_block4x - encrypt 4 blocks in parallel (if INTERLEAVE == 4)
++ * - decrypt_block4x - decrypt 4 blocks in parallel (if INTERLEAVE == 4)
++ */
++
++#if defined(INTERLEAVE) && !defined(INTERLEAVE_INLINE)
++#define FRAME_PUSH stp x29, x30, [sp,#-16]! ; mov x29, sp
++#define FRAME_POP ldp x29, x30, [sp],#16
++
++#if INTERLEAVE == 2
++
++aes_encrypt_block2x:
++ encrypt_block2x v0, v1, w3, x2, x6, w7
++ ret
++ENDPROC(aes_encrypt_block2x)
++
++aes_decrypt_block2x:
++ decrypt_block2x v0, v1, w3, x2, x6, w7
++ ret
++ENDPROC(aes_decrypt_block2x)
++
++#elif INTERLEAVE == 4
++
++aes_encrypt_block4x:
++ encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
++ ret
++ENDPROC(aes_encrypt_block4x)
++
++aes_decrypt_block4x:
++ decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
++ ret
++ENDPROC(aes_decrypt_block4x)
++
++#else
++#error INTERLEAVE should equal 2 or 4
++#endif
++
++ .macro do_encrypt_block2x
++ bl aes_encrypt_block2x
++ .endm
++
++ .macro do_decrypt_block2x
++ bl aes_decrypt_block2x
++ .endm
++
++ .macro do_encrypt_block4x
++ bl aes_encrypt_block4x
++ .endm
++
++ .macro do_decrypt_block4x
++ bl aes_decrypt_block4x
++ .endm
++
++#else
++#define FRAME_PUSH
++#define FRAME_POP
++
++ .macro do_encrypt_block2x
++ encrypt_block2x v0, v1, w3, x2, x6, w7
++ .endm
++
++ .macro do_decrypt_block2x
++ decrypt_block2x v0, v1, w3, x2, x6, w7
++ .endm
++
++ .macro do_encrypt_block4x
++ encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
++ .endm
++
++ .macro do_decrypt_block4x
++ decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
++ .endm
++
++#endif
++
++ /*
++ * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
++ * int blocks, int first)
++ * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
++ * int blocks, int first)
++ */
++
++AES_ENTRY(aes_ecb_encrypt)
++ FRAME_PUSH
++ cbz w5, .LecbencloopNx
++
++ enc_prepare w3, x2, x5
++
++.LecbencloopNx:
++#if INTERLEAVE >= 2
++ subs w4, w4, #INTERLEAVE
++ bmi .Lecbenc1x
++#if INTERLEAVE == 2
++ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
++ do_encrypt_block2x
++ st1 {v0.16b-v1.16b}, [x0], #32
++#else
++ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
++ do_encrypt_block4x
++ st1 {v0.16b-v3.16b}, [x0], #64
++#endif
++ b .LecbencloopNx
++.Lecbenc1x:
++ adds w4, w4, #INTERLEAVE
++ beq .Lecbencout
++#endif
++.Lecbencloop:
++ ld1 {v0.16b}, [x1], #16 /* get next pt block */
++ encrypt_block v0, w3, x2, x5, w6
++ st1 {v0.16b}, [x0], #16
++ subs w4, w4, #1
++ bne .Lecbencloop
++.Lecbencout:
++ FRAME_POP
++ ret
++AES_ENDPROC(aes_ecb_encrypt)
++
++
++AES_ENTRY(aes_ecb_decrypt)
++ FRAME_PUSH
++ cbz w5, .LecbdecloopNx
++
++ dec_prepare w3, x2, x5
++
++.LecbdecloopNx:
++#if INTERLEAVE >= 2
++ subs w4, w4, #INTERLEAVE
++ bmi .Lecbdec1x
++#if INTERLEAVE == 2
++ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
++ do_decrypt_block2x
++ st1 {v0.16b-v1.16b}, [x0], #32
++#else
++ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
++ do_decrypt_block4x
++ st1 {v0.16b-v3.16b}, [x0], #64
++#endif
++ b .LecbdecloopNx
++.Lecbdec1x:
++ adds w4, w4, #INTERLEAVE
++ beq .Lecbdecout
++#endif
++.Lecbdecloop:
++ ld1 {v0.16b}, [x1], #16 /* get next ct block */
++ decrypt_block v0, w3, x2, x5, w6
++ st1 {v0.16b}, [x0], #16
++ subs w4, w4, #1
++ bne .Lecbdecloop
++.Lecbdecout:
++ FRAME_POP
++ ret
++AES_ENDPROC(aes_ecb_decrypt)
++
++
++ /*
++ * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
++ * int blocks, u8 iv[], int first)
++ * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
++ * int blocks, u8 iv[], int first)
++ */
++
++AES_ENTRY(aes_cbc_encrypt)
++ cbz w6, .Lcbcencloop
++
++ ld1 {v0.16b}, [x5] /* get iv */
++ enc_prepare w3, x2, x5
++
++.Lcbcencloop:
++ ld1 {v1.16b}, [x1], #16 /* get next pt block */
++ eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
++ encrypt_block v0, w3, x2, x5, w6
++ st1 {v0.16b}, [x0], #16
++ subs w4, w4, #1
++ bne .Lcbcencloop
++ ret
++AES_ENDPROC(aes_cbc_encrypt)
++
++
++AES_ENTRY(aes_cbc_decrypt)
++ FRAME_PUSH
++ cbz w6, .LcbcdecloopNx
++
++ ld1 {v7.16b}, [x5] /* get iv */
++ dec_prepare w3, x2, x5
++
++.LcbcdecloopNx:
++#if INTERLEAVE >= 2
++ subs w4, w4, #INTERLEAVE
++ bmi .Lcbcdec1x
++#if INTERLEAVE == 2
++ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
++ mov v2.16b, v0.16b
++ mov v3.16b, v1.16b
++ do_decrypt_block2x
++ eor v0.16b, v0.16b, v7.16b
++ eor v1.16b, v1.16b, v2.16b
++ mov v7.16b, v3.16b
++ st1 {v0.16b-v1.16b}, [x0], #32
++#else
++ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
++ mov v4.16b, v0.16b
++ mov v5.16b, v1.16b
++ mov v6.16b, v2.16b
++ do_decrypt_block4x
++ sub x1, x1, #16
++ eor v0.16b, v0.16b, v7.16b
++ eor v1.16b, v1.16b, v4.16b
++ ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */
++ eor v2.16b, v2.16b, v5.16b
++ eor v3.16b, v3.16b, v6.16b
++ st1 {v0.16b-v3.16b}, [x0], #64
++#endif
++ b .LcbcdecloopNx
++.Lcbcdec1x:
++ adds w4, w4, #INTERLEAVE
++ beq .Lcbcdecout
++#endif
++.Lcbcdecloop:
++ ld1 {v1.16b}, [x1], #16 /* get next ct block */
++ mov v0.16b, v1.16b /* ...and copy to v0 */
++ decrypt_block v0, w3, x2, x5, w6
++ eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
++ mov v7.16b, v1.16b /* ct is next iv */
++ st1 {v0.16b}, [x0], #16
++ subs w4, w4, #1
++ bne .Lcbcdecloop
++.Lcbcdecout:
++ FRAME_POP
++ ret
++AES_ENDPROC(aes_cbc_decrypt)
++
++
++ /*
++ * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
++ * int blocks, u8 ctr[], int first)
++ */
++
++AES_ENTRY(aes_ctr_encrypt)
++ FRAME_PUSH
++ cbnz w6, .Lctrfirst /* 1st time around? */
++ umov x5, v4.d[1] /* keep swabbed ctr in reg */
++ rev x5, x5
++#if INTERLEAVE >= 2
++ cmn w5, w4 /* 32 bit overflow? */
++ bcs .Lctrinc
++ add x5, x5, #1 /* increment BE ctr */
++ b .LctrincNx
++#else
++ b .Lctrinc
++#endif
++.Lctrfirst:
++ enc_prepare w3, x2, x6
++ ld1 {v4.16b}, [x5]
++ umov x5, v4.d[1] /* keep swabbed ctr in reg */
++ rev x5, x5
++#if INTERLEAVE >= 2
++ cmn w5, w4 /* 32 bit overflow? */
++ bcs .Lctrloop
++.LctrloopNx:
++ subs w4, w4, #INTERLEAVE
++ bmi .Lctr1x
++#if INTERLEAVE == 2
++ mov v0.8b, v4.8b
++ mov v1.8b, v4.8b
++ rev x7, x5
++ add x5, x5, #1
++ ins v0.d[1], x7
++ rev x7, x5
++ add x5, x5, #1
++ ins v1.d[1], x7
++ ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
++ do_encrypt_block2x
++ eor v0.16b, v0.16b, v2.16b
++ eor v1.16b, v1.16b, v3.16b
++ st1 {v0.16b-v1.16b}, [x0], #32
++#else
++ ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
++ dup v7.4s, w5
++ mov v0.16b, v4.16b
++ add v7.4s, v7.4s, v8.4s
++ mov v1.16b, v4.16b
++ rev32 v8.16b, v7.16b
++ mov v2.16b, v4.16b
++ mov v3.16b, v4.16b
++ mov v1.s[3], v8.s[0]
++ mov v2.s[3], v8.s[1]
++ mov v3.s[3], v8.s[2]
++ ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
++ do_encrypt_block4x
++ eor v0.16b, v5.16b, v0.16b
++ ld1 {v5.16b}, [x1], #16 /* get 1 input block */
++ eor v1.16b, v6.16b, v1.16b
++ eor v2.16b, v7.16b, v2.16b
++ eor v3.16b, v5.16b, v3.16b
++ st1 {v0.16b-v3.16b}, [x0], #64
++ add x5, x5, #INTERLEAVE
++#endif
++ cbz w4, .LctroutNx
++.LctrincNx:
++ rev x7, x5
++ ins v4.d[1], x7
++ b .LctrloopNx
++.LctroutNx:
++ sub x5, x5, #1
++ rev x7, x5
++ ins v4.d[1], x7
++ b .Lctrout
++.Lctr1x:
++ adds w4, w4, #INTERLEAVE
++ beq .Lctrout
++#endif
++.Lctrloop:
++ mov v0.16b, v4.16b
++ encrypt_block v0, w3, x2, x6, w7
++ subs w4, w4, #1
++ bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
++ ld1 {v3.16b}, [x1], #16
++ eor v3.16b, v0.16b, v3.16b
++ st1 {v3.16b}, [x0], #16
++ beq .Lctrout
++.Lctrinc:
++ adds x5, x5, #1 /* increment BE ctr */
++ rev x7, x5
++ ins v4.d[1], x7
++ bcc .Lctrloop /* no overflow? */
++ umov x7, v4.d[0] /* load upper word of ctr */
++ rev x7, x7 /* ... to handle the carry */
++ add x7, x7, #1
++ rev x7, x7
++ ins v4.d[0], x7
++ b .Lctrloop
++.Lctrhalfblock:
++ ld1 {v3.8b}, [x1]
++ eor v3.8b, v0.8b, v3.8b
++ st1 {v3.8b}, [x0]
++.Lctrout:
++ FRAME_POP
++ ret
++AES_ENDPROC(aes_ctr_encrypt)
++ .ltorg
++
++
++ /*
++ * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
++ * int blocks, u8 const rk2[], u8 iv[], int first)
++ * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
++ * int blocks, u8 const rk2[], u8 iv[], int first)
++ */
++
++ .macro next_tweak, out, in, const, tmp
++ sshr \tmp\().2d, \in\().2d, #63
++ and \tmp\().16b, \tmp\().16b, \const\().16b
++ add \out\().2d, \in\().2d, \in\().2d
++ ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
++ eor \out\().16b, \out\().16b, \tmp\().16b
++ .endm
++
++.Lxts_mul_x:
++ .word 1, 0, 0x87, 0
++
++AES_ENTRY(aes_xts_encrypt)
++ FRAME_PUSH
++ cbz w7, .LxtsencloopNx
++
++ ld1 {v4.16b}, [x6]
++ enc_prepare w3, x5, x6
++ encrypt_block v4, w3, x5, x6, w7 /* first tweak */
++ enc_switch_key w3, x2, x6
++ ldr q7, .Lxts_mul_x
++ b .LxtsencNx
++
++.LxtsencloopNx:
++ ldr q7, .Lxts_mul_x
++ next_tweak v4, v4, v7, v8
++.LxtsencNx:
++#if INTERLEAVE >= 2
++ subs w4, w4, #INTERLEAVE
++ bmi .Lxtsenc1x
++#if INTERLEAVE == 2
++ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
++ next_tweak v5, v4, v7, v8
++ eor v0.16b, v0.16b, v4.16b
++ eor v1.16b, v1.16b, v5.16b
++ do_encrypt_block2x
++ eor v0.16b, v0.16b, v4.16b
++ eor v1.16b, v1.16b, v5.16b
++ st1 {v0.16b-v1.16b}, [x0], #32
++ cbz w4, .LxtsencoutNx
++ next_tweak v4, v5, v7, v8
++ b .LxtsencNx
++.LxtsencoutNx:
++ mov v4.16b, v5.16b
++ b .Lxtsencout
++#else
++ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
++ next_tweak v5, v4, v7, v8
++ eor v0.16b, v0.16b, v4.16b
++ next_tweak v6, v5, v7, v8
++ eor v1.16b, v1.16b, v5.16b
++ eor v2.16b, v2.16b, v6.16b
++ next_tweak v7, v6, v7, v8
++ eor v3.16b, v3.16b, v7.16b
++ do_encrypt_block4x
++ eor v3.16b, v3.16b, v7.16b
++ eor v0.16b, v0.16b, v4.16b
++ eor v1.16b, v1.16b, v5.16b
++ eor v2.16b, v2.16b, v6.16b
++ st1 {v0.16b-v3.16b}, [x0], #64
++ mov v4.16b, v7.16b
++ cbz w4, .Lxtsencout
++ b .LxtsencloopNx
++#endif
++.Lxtsenc1x:
++ adds w4, w4, #INTERLEAVE
++ beq .Lxtsencout
++#endif
++.Lxtsencloop:
++ ld1 {v1.16b}, [x1], #16
++ eor v0.16b, v1.16b, v4.16b
++ encrypt_block v0, w3, x2, x6, w7
++ eor v0.16b, v0.16b, v4.16b
++ st1 {v0.16b}, [x0], #16
++ subs w4, w4, #1
++ beq .Lxtsencout
++ next_tweak v4, v4, v7, v8
++ b .Lxtsencloop
++.Lxtsencout:
++ FRAME_POP
++ ret
++AES_ENDPROC(aes_xts_encrypt)
++
++
++AES_ENTRY(aes_xts_decrypt)
++ FRAME_PUSH
++ cbz w7, .LxtsdecloopNx
++
++ ld1 {v4.16b}, [x6]
++ enc_prepare w3, x5, x6
++ encrypt_block v4, w3, x5, x6, w7 /* first tweak */
++ dec_prepare w3, x2, x6
++ ldr q7, .Lxts_mul_x
++ b .LxtsdecNx
++
++.LxtsdecloopNx:
++ ldr q7, .Lxts_mul_x
++ next_tweak v4, v4, v7, v8
++.LxtsdecNx:
++#if INTERLEAVE >= 2
++ subs w4, w4, #INTERLEAVE
++ bmi .Lxtsdec1x
++#if INTERLEAVE == 2
++ ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
++ next_tweak v5, v4, v7, v8
++ eor v0.16b, v0.16b, v4.16b
++ eor v1.16b, v1.16b, v5.16b
++ do_decrypt_block2x
++ eor v0.16b, v0.16b, v4.16b
++ eor v1.16b, v1.16b, v5.16b
++ st1 {v0.16b-v1.16b}, [x0], #32
++ cbz w4, .LxtsdecoutNx
++ next_tweak v4, v5, v7, v8
++ b .LxtsdecNx
++.LxtsdecoutNx:
++ mov v4.16b, v5.16b
++ b .Lxtsdecout
++#else
++ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
++ next_tweak v5, v4, v7, v8
++ eor v0.16b, v0.16b, v4.16b
++ next_tweak v6, v5, v7, v8
++ eor v1.16b, v1.16b, v5.16b
++ eor v2.16b, v2.16b, v6.16b
++ next_tweak v7, v6, v7, v8
++ eor v3.16b, v3.16b, v7.16b
++ do_decrypt_block4x
++ eor v3.16b, v3.16b, v7.16b
++ eor v0.16b, v0.16b, v4.16b
++ eor v1.16b, v1.16b, v5.16b
++ eor v2.16b, v2.16b, v6.16b
++ st1 {v0.16b-v3.16b}, [x0], #64
++ mov v4.16b, v7.16b
++ cbz w4, .Lxtsdecout
++ b .LxtsdecloopNx
++#endif
++.Lxtsdec1x:
++ adds w4, w4, #INTERLEAVE
++ beq .Lxtsdecout
++#endif
++.Lxtsdecloop:
++ ld1 {v1.16b}, [x1], #16
++ eor v0.16b, v1.16b, v4.16b
++ decrypt_block v0, w3, x2, x6, w7
++ eor v0.16b, v0.16b, v4.16b
++ st1 {v0.16b}, [x0], #16
++ subs w4, w4, #1
++ beq .Lxtsdecout
++ next_tweak v4, v4, v7, v8
++ b .Lxtsdecloop
++.Lxtsdecout:
++ FRAME_POP
++ ret
++AES_ENDPROC(aes_xts_decrypt)
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/aes-neon.S linux-3.14.40/arch/arm64/crypto/aes-neon.S
+--- linux-3.14.40.orig/arch/arm64/crypto/aes-neon.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/aes-neon.S 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,382 @@
++/*
++ * linux/arch/arm64/crypto/aes-neon.S - AES cipher for ARMv8 NEON
++ *
++ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++
++#define AES_ENTRY(func) ENTRY(neon_ ## func)
++#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
++
++ /* multiply by polynomial 'x' in GF(2^8) */
++ .macro mul_by_x, out, in, temp, const
++ sshr \temp, \in, #7
++ add \out, \in, \in
++ and \temp, \temp, \const
++ eor \out, \out, \temp
++ .endm
++
++ /* preload the entire Sbox */
++ .macro prepare, sbox, shiftrows, temp
++ adr \temp, \sbox
++ movi v12.16b, #0x40
++ ldr q13, \shiftrows
++ movi v14.16b, #0x1b
++ ld1 {v16.16b-v19.16b}, [\temp], #64
++ ld1 {v20.16b-v23.16b}, [\temp], #64
++ ld1 {v24.16b-v27.16b}, [\temp], #64
++ ld1 {v28.16b-v31.16b}, [\temp]
++ .endm
++
++ /* do preload for encryption */
++ .macro enc_prepare, ignore0, ignore1, temp
++ prepare .LForward_Sbox, .LForward_ShiftRows, \temp
++ .endm
++
++ .macro enc_switch_key, ignore0, ignore1, temp
++ /* do nothing */
++ .endm
++
++ /* do preload for decryption */
++ .macro dec_prepare, ignore0, ignore1, temp
++ prepare .LReverse_Sbox, .LReverse_ShiftRows, \temp
++ .endm
++
++ /* apply SubBytes transformation using the the preloaded Sbox */
++ .macro sub_bytes, in
++ sub v9.16b, \in\().16b, v12.16b
++ tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b
++ sub v10.16b, v9.16b, v12.16b
++ tbx \in\().16b, {v20.16b-v23.16b}, v9.16b
++ sub v11.16b, v10.16b, v12.16b
++ tbx \in\().16b, {v24.16b-v27.16b}, v10.16b
++ tbx \in\().16b, {v28.16b-v31.16b}, v11.16b
++ .endm
++
++ /* apply MixColumns transformation */
++ .macro mix_columns, in
++ mul_by_x v10.16b, \in\().16b, v9.16b, v14.16b
++ rev32 v8.8h, \in\().8h
++ eor \in\().16b, v10.16b, \in\().16b
++ shl v9.4s, v8.4s, #24
++ shl v11.4s, \in\().4s, #24
++ sri v9.4s, v8.4s, #8
++ sri v11.4s, \in\().4s, #8
++ eor v9.16b, v9.16b, v8.16b
++ eor v10.16b, v10.16b, v9.16b
++ eor \in\().16b, v10.16b, v11.16b
++ .endm
++
++ /* Inverse MixColumns: pre-multiply by { 5, 0, 4, 0 } */
++ .macro inv_mix_columns, in
++ mul_by_x v11.16b, \in\().16b, v10.16b, v14.16b
++ mul_by_x v11.16b, v11.16b, v10.16b, v14.16b
++ eor \in\().16b, \in\().16b, v11.16b
++ rev32 v11.8h, v11.8h
++ eor \in\().16b, \in\().16b, v11.16b
++ mix_columns \in
++ .endm
++
++ .macro do_block, enc, in, rounds, rk, rkp, i
++ ld1 {v15.16b}, [\rk]
++ add \rkp, \rk, #16
++ mov \i, \rounds
++1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
++ tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */
++ sub_bytes \in
++ ld1 {v15.16b}, [\rkp], #16
++ subs \i, \i, #1
++ beq 2222f
++ .if \enc == 1
++ mix_columns \in
++ .else
++ inv_mix_columns \in
++ .endif
++ b 1111b
++2222: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
++ .endm
++
++ .macro encrypt_block, in, rounds, rk, rkp, i
++ do_block 1, \in, \rounds, \rk, \rkp, \i
++ .endm
++
++ .macro decrypt_block, in, rounds, rk, rkp, i
++ do_block 0, \in, \rounds, \rk, \rkp, \i
++ .endm
++
++ /*
++ * Interleaved versions: functionally equivalent to the
++ * ones above, but applied to 2 or 4 AES states in parallel.
++ */
++
++ .macro sub_bytes_2x, in0, in1
++ sub v8.16b, \in0\().16b, v12.16b
++ sub v9.16b, \in1\().16b, v12.16b
++ tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
++ tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
++ sub v10.16b, v8.16b, v12.16b
++ sub v11.16b, v9.16b, v12.16b
++ tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
++ tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
++ sub v8.16b, v10.16b, v12.16b
++ sub v9.16b, v11.16b, v12.16b
++ tbx \in0\().16b, {v24.16b-v27.16b}, v10.16b
++ tbx \in1\().16b, {v24.16b-v27.16b}, v11.16b
++ tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
++ tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
++ .endm
++
++ .macro sub_bytes_4x, in0, in1, in2, in3
++ sub v8.16b, \in0\().16b, v12.16b
++ tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
++ sub v9.16b, \in1\().16b, v12.16b
++ tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
++ sub v10.16b, \in2\().16b, v12.16b
++ tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b
++ sub v11.16b, \in3\().16b, v12.16b
++ tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b
++ tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
++ tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
++ sub v8.16b, v8.16b, v12.16b
++ tbx \in2\().16b, {v20.16b-v23.16b}, v10.16b
++ sub v9.16b, v9.16b, v12.16b
++ tbx \in3\().16b, {v20.16b-v23.16b}, v11.16b
++ sub v10.16b, v10.16b, v12.16b
++ tbx \in0\().16b, {v24.16b-v27.16b}, v8.16b
++ sub v11.16b, v11.16b, v12.16b
++ tbx \in1\().16b, {v24.16b-v27.16b}, v9.16b
++ sub v8.16b, v8.16b, v12.16b
++ tbx \in2\().16b, {v24.16b-v27.16b}, v10.16b
++ sub v9.16b, v9.16b, v12.16b
++ tbx \in3\().16b, {v24.16b-v27.16b}, v11.16b
++ sub v10.16b, v10.16b, v12.16b
++ tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
++ sub v11.16b, v11.16b, v12.16b
++ tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
++ tbx \in2\().16b, {v28.16b-v31.16b}, v10.16b
++ tbx \in3\().16b, {v28.16b-v31.16b}, v11.16b
++ .endm
++
++ .macro mul_by_x_2x, out0, out1, in0, in1, tmp0, tmp1, const
++ sshr \tmp0\().16b, \in0\().16b, #7
++ add \out0\().16b, \in0\().16b, \in0\().16b
++ sshr \tmp1\().16b, \in1\().16b, #7
++ and \tmp0\().16b, \tmp0\().16b, \const\().16b
++ add \out1\().16b, \in1\().16b, \in1\().16b
++ and \tmp1\().16b, \tmp1\().16b, \const\().16b
++ eor \out0\().16b, \out0\().16b, \tmp0\().16b
++ eor \out1\().16b, \out1\().16b, \tmp1\().16b
++ .endm
++
++ .macro mix_columns_2x, in0, in1
++ mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
++ rev32 v10.8h, \in0\().8h
++ rev32 v11.8h, \in1\().8h
++ eor \in0\().16b, v8.16b, \in0\().16b
++ eor \in1\().16b, v9.16b, \in1\().16b
++ shl v12.4s, v10.4s, #24
++ shl v13.4s, v11.4s, #24
++ eor v8.16b, v8.16b, v10.16b
++ sri v12.4s, v10.4s, #8
++ shl v10.4s, \in0\().4s, #24
++ eor v9.16b, v9.16b, v11.16b
++ sri v13.4s, v11.4s, #8
++ shl v11.4s, \in1\().4s, #24
++ sri v10.4s, \in0\().4s, #8
++ eor \in0\().16b, v8.16b, v12.16b
++ sri v11.4s, \in1\().4s, #8
++ eor \in1\().16b, v9.16b, v13.16b
++ eor \in0\().16b, v10.16b, \in0\().16b
++ eor \in1\().16b, v11.16b, \in1\().16b
++ .endm
++
++ .macro inv_mix_cols_2x, in0, in1
++ mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
++ mul_by_x_2x v8, v9, v8, v9, v10, v11, v14
++ eor \in0\().16b, \in0\().16b, v8.16b
++ eor \in1\().16b, \in1\().16b, v9.16b
++ rev32 v8.8h, v8.8h
++ rev32 v9.8h, v9.8h
++ eor \in0\().16b, \in0\().16b, v8.16b
++ eor \in1\().16b, \in1\().16b, v9.16b
++ mix_columns_2x \in0, \in1
++ .endm
++
++ .macro inv_mix_cols_4x, in0, in1, in2, in3
++ mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
++ mul_by_x_2x v10, v11, \in2, \in3, v12, v13, v14
++ mul_by_x_2x v8, v9, v8, v9, v12, v13, v14
++ mul_by_x_2x v10, v11, v10, v11, v12, v13, v14
++ eor \in0\().16b, \in0\().16b, v8.16b
++ eor \in1\().16b, \in1\().16b, v9.16b
++ eor \in2\().16b, \in2\().16b, v10.16b
++ eor \in3\().16b, \in3\().16b, v11.16b
++ rev32 v8.8h, v8.8h
++ rev32 v9.8h, v9.8h
++ rev32 v10.8h, v10.8h
++ rev32 v11.8h, v11.8h
++ eor \in0\().16b, \in0\().16b, v8.16b
++ eor \in1\().16b, \in1\().16b, v9.16b
++ eor \in2\().16b, \in2\().16b, v10.16b
++ eor \in3\().16b, \in3\().16b, v11.16b
++ mix_columns_2x \in0, \in1
++ mix_columns_2x \in2, \in3
++ .endm
++
++ .macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i
++ ld1 {v15.16b}, [\rk]
++ add \rkp, \rk, #16
++ mov \i, \rounds
++1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
++ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
++ sub_bytes_2x \in0, \in1
++ tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
++ tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
++ ld1 {v15.16b}, [\rkp], #16
++ subs \i, \i, #1
++ beq 2222f
++ .if \enc == 1
++ mix_columns_2x \in0, \in1
++ ldr q13, .LForward_ShiftRows
++ .else
++ inv_mix_cols_2x \in0, \in1
++ ldr q13, .LReverse_ShiftRows
++ .endif
++ movi v12.16b, #0x40
++ b 1111b
++2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
++ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
++ .endm
++
++ .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
++ ld1 {v15.16b}, [\rk]
++ add \rkp, \rk, #16
++ mov \i, \rounds
++1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
++ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
++ eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
++ eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
++ sub_bytes_4x \in0, \in1, \in2, \in3
++ tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
++ tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
++ tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */
++ tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */
++ ld1 {v15.16b}, [\rkp], #16
++ subs \i, \i, #1
++ beq 2222f
++ .if \enc == 1
++ mix_columns_2x \in0, \in1
++ mix_columns_2x \in2, \in3
++ ldr q13, .LForward_ShiftRows
++ .else
++ inv_mix_cols_4x \in0, \in1, \in2, \in3
++ ldr q13, .LReverse_ShiftRows
++ .endif
++ movi v12.16b, #0x40
++ b 1111b
++2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
++ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
++ eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
++ eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
++ .endm
++
++ .macro encrypt_block2x, in0, in1, rounds, rk, rkp, i
++ do_block_2x 1, \in0, \in1, \rounds, \rk, \rkp, \i
++ .endm
++
++ .macro decrypt_block2x, in0, in1, rounds, rk, rkp, i
++ do_block_2x 0, \in0, \in1, \rounds, \rk, \rkp, \i
++ .endm
++
++ .macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
++ do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
++ .endm
++
++ .macro decrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
++ do_block_4x 0, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
++ .endm
++
++#include "aes-modes.S"
++
++ .text
++ .align 4
++.LForward_ShiftRows:
++ .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3
++ .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb
++
++.LReverse_ShiftRows:
++ .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb
++ .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3
++
++.LForward_Sbox:
++ .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
++ .byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
++ .byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
++ .byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
++ .byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
++ .byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
++ .byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
++ .byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
++ .byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
++ .byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
++ .byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
++ .byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
++ .byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
++ .byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
++ .byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
++ .byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
++ .byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
++ .byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
++ .byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
++ .byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
++ .byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
++ .byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
++ .byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
++ .byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
++ .byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
++ .byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
++ .byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
++ .byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
++ .byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
++ .byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
++ .byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
++ .byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
++
++.LReverse_Sbox:
++ .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
++ .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
++ .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
++ .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
++ .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
++ .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
++ .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
++ .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
++ .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
++ .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
++ .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
++ .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
++ .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
++ .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
++ .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
++ .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
++ .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
++ .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
++ .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
++ .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
++ .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
++ .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
++ .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
++ .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
++ .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
++ .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
++ .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
++ .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
++ .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
++ .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
++ .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
++ .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/ghash-ce-core.S linux-3.14.40/arch/arm64/crypto/ghash-ce-core.S
+--- linux-3.14.40.orig/arch/arm64/crypto/ghash-ce-core.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/ghash-ce-core.S 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,79 @@
++/*
++ * Accelerated GHASH implementation with ARMv8 PMULL instructions.
++ *
++ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++#include <asm/assembler.h>
++
++ SHASH .req v0
++ SHASH2 .req v1
++ T1 .req v2
++ T2 .req v3
++ MASK .req v4
++ XL .req v5
++ XM .req v6
++ XH .req v7
++ IN1 .req v7
++
++ .text
++ .arch armv8-a+crypto
++
++ /*
++ * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
++ * struct ghash_key const *k, const char *head)
++ */
++ENTRY(pmull_ghash_update)
++ ld1 {SHASH.16b}, [x3]
++ ld1 {XL.16b}, [x1]
++ movi MASK.16b, #0xe1
++ ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
++ shl MASK.2d, MASK.2d, #57
++ eor SHASH2.16b, SHASH2.16b, SHASH.16b
++
++ /* do the head block first, if supplied */
++ cbz x4, 0f
++ ld1 {T1.2d}, [x4]
++ b 1f
++
++0: ld1 {T1.2d}, [x2], #16
++ sub w0, w0, #1
++
++1: /* multiply XL by SHASH in GF(2^128) */
++CPU_LE( rev64 T1.16b, T1.16b )
++
++ ext T2.16b, XL.16b, XL.16b, #8
++ ext IN1.16b, T1.16b, T1.16b, #8
++ eor T1.16b, T1.16b, T2.16b
++ eor XL.16b, XL.16b, IN1.16b
++
++ pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1
++ eor T1.16b, T1.16b, XL.16b
++ pmull XL.1q, SHASH.1d, XL.1d // a0 * b0
++ pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
++
++ ext T1.16b, XL.16b, XH.16b, #8
++ eor T2.16b, XL.16b, XH.16b
++ eor XM.16b, XM.16b, T1.16b
++ eor XM.16b, XM.16b, T2.16b
++ pmull T2.1q, XL.1d, MASK.1d
++
++ mov XH.d[0], XM.d[1]
++ mov XM.d[1], XL.d[0]
++
++ eor XL.16b, XM.16b, T2.16b
++ ext T2.16b, XL.16b, XL.16b, #8
++ pmull XL.1q, XL.1d, MASK.1d
++ eor T2.16b, T2.16b, XH.16b
++ eor XL.16b, XL.16b, T2.16b
++
++ cbnz w0, 0b
++
++ st1 {XL.16b}, [x1]
++ ret
++ENDPROC(pmull_ghash_update)
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/ghash-ce-glue.c linux-3.14.40/arch/arm64/crypto/ghash-ce-glue.c
+--- linux-3.14.40.orig/arch/arm64/crypto/ghash-ce-glue.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/ghash-ce-glue.c 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,156 @@
++/*
++ * Accelerated GHASH implementation with ARMv8 PMULL instructions.
++ *
++ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
++
++#include <asm/neon.h>
++#include <asm/unaligned.h>
++#include <crypto/internal/hash.h>
++#include <linux/cpufeature.h>
++#include <linux/crypto.h>
++#include <linux/module.h>
++
++MODULE_DESCRIPTION("GHASH secure hash using ARMv8 Crypto Extensions");
++MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
++MODULE_LICENSE("GPL v2");
++
++#define GHASH_BLOCK_SIZE 16
++#define GHASH_DIGEST_SIZE 16
++
++struct ghash_key {
++ u64 a;
++ u64 b;
++};
++
++struct ghash_desc_ctx {
++ u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
++ u8 buf[GHASH_BLOCK_SIZE];
++ u32 count;
++};
++
++asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src,
++ struct ghash_key const *k, const char *head);
++
++static int ghash_init(struct shash_desc *desc)
++{
++ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
++
++ *ctx = (struct ghash_desc_ctx){};
++ return 0;
++}
++
++static int ghash_update(struct shash_desc *desc, const u8 *src,
++ unsigned int len)
++{
++ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
++ unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
++
++ ctx->count += len;
++
++ if ((partial + len) >= GHASH_BLOCK_SIZE) {
++ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
++ int blocks;
++
++ if (partial) {
++ int p = GHASH_BLOCK_SIZE - partial;
++
++ memcpy(ctx->buf + partial, src, p);
++ src += p;
++ len -= p;
++ }
++
++ blocks = len / GHASH_BLOCK_SIZE;
++ len %= GHASH_BLOCK_SIZE;
++
++ kernel_neon_begin_partial(8);
++ pmull_ghash_update(blocks, ctx->digest, src, key,
++ partial ? ctx->buf : NULL);
++ kernel_neon_end();
++ src += blocks * GHASH_BLOCK_SIZE;
++ partial = 0;
++ }
++ if (len)
++ memcpy(ctx->buf + partial, src, len);
++ return 0;
++}
++
++static int ghash_final(struct shash_desc *desc, u8 *dst)
++{
++ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
++ unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
++
++ if (partial) {
++ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
++
++ memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
++
++ kernel_neon_begin_partial(8);
++ pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
++ kernel_neon_end();
++ }
++ put_unaligned_be64(ctx->digest[1], dst);
++ put_unaligned_be64(ctx->digest[0], dst + 8);
++
++ *ctx = (struct ghash_desc_ctx){};
++ return 0;
++}
++
++static int ghash_setkey(struct crypto_shash *tfm,
++ const u8 *inkey, unsigned int keylen)
++{
++ struct ghash_key *key = crypto_shash_ctx(tfm);
++ u64 a, b;
++
++ if (keylen != GHASH_BLOCK_SIZE) {
++ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++ }
++
++ /* perform multiplication by 'x' in GF(2^128) */
++ b = get_unaligned_be64(inkey);
++ a = get_unaligned_be64(inkey + 8);
++
++ key->a = (a << 1) | (b >> 63);
++ key->b = (b << 1) | (a >> 63);
++
++ if (b >> 63)
++ key->b ^= 0xc200000000000000UL;
++
++ return 0;
++}
++
++static struct shash_alg ghash_alg = {
++ .digestsize = GHASH_DIGEST_SIZE,
++ .init = ghash_init,
++ .update = ghash_update,
++ .final = ghash_final,
++ .setkey = ghash_setkey,
++ .descsize = sizeof(struct ghash_desc_ctx),
++ .base = {
++ .cra_name = "ghash",
++ .cra_driver_name = "ghash-ce",
++ .cra_priority = 200,
++ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
++ .cra_blocksize = GHASH_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct ghash_key),
++ .cra_module = THIS_MODULE,
++ },
++};
++
++static int __init ghash_ce_mod_init(void)
++{
++ return crypto_register_shash(&ghash_alg);
++}
++
++static void __exit ghash_ce_mod_exit(void)
++{
++ crypto_unregister_shash(&ghash_alg);
++}
++
++module_cpu_feature_match(PMULL, ghash_ce_mod_init);
++module_exit(ghash_ce_mod_exit);
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/Kconfig linux-3.14.40/arch/arm64/crypto/Kconfig
+--- linux-3.14.40.orig/arch/arm64/crypto/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/Kconfig 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,53 @@
++
++menuconfig ARM64_CRYPTO
++ bool "ARM64 Accelerated Cryptographic Algorithms"
++ depends on ARM64
++ help
++ Say Y here to choose from a selection of cryptographic algorithms
++ implemented using ARM64 specific CPU features or instructions.
++
++if ARM64_CRYPTO
++
++config CRYPTO_SHA1_ARM64_CE
++ tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_HASH
++
++config CRYPTO_SHA2_ARM64_CE
++ tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_HASH
++
++config CRYPTO_GHASH_ARM64_CE
++ tristate "GHASH (for GCM chaining mode) using ARMv8 Crypto Extensions"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_HASH
++
++config CRYPTO_AES_ARM64_CE
++ tristate "AES core cipher using ARMv8 Crypto Extensions"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_ALGAPI
++ select CRYPTO_AES
++
++config CRYPTO_AES_ARM64_CE_CCM
++ tristate "AES in CCM mode using ARMv8 Crypto Extensions"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_ALGAPI
++ select CRYPTO_AES
++ select CRYPTO_AEAD
++
++config CRYPTO_AES_ARM64_CE_BLK
++ tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_BLKCIPHER
++ select CRYPTO_AES
++ select CRYPTO_ABLK_HELPER
++
++config CRYPTO_AES_ARM64_NEON_BLK
++ tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
++ depends on ARM64 && KERNEL_MODE_NEON
++ select CRYPTO_BLKCIPHER
++ select CRYPTO_AES
++ select CRYPTO_ABLK_HELPER
++
++endif
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/Makefile linux-3.14.40/arch/arm64/crypto/Makefile
+--- linux-3.14.40.orig/arch/arm64/crypto/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/Makefile 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,38 @@
++#
++# linux/arch/arm64/crypto/Makefile
++#
++# Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License version 2 as
++# published by the Free Software Foundation.
++#
++
++obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o
++sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
++
++obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o
++sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
++
++obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
++ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
++
++obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
++CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto
++
++obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o
++aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o
++
++obj-$(CONFIG_CRYPTO_AES_ARM64_CE_BLK) += aes-ce-blk.o
++aes-ce-blk-y := aes-glue-ce.o aes-ce.o
++
++obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
++aes-neon-blk-y := aes-glue-neon.o aes-neon.o
++
++AFLAGS_aes-ce.o := -DINTERLEAVE=2 -DINTERLEAVE_INLINE
++AFLAGS_aes-neon.o := -DINTERLEAVE=4
++
++CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
++
++$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
++ $(call if_changed_dep,cc_o_c)
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/sha1-ce-core.S linux-3.14.40/arch/arm64/crypto/sha1-ce-core.S
+--- linux-3.14.40.orig/arch/arm64/crypto/sha1-ce-core.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/sha1-ce-core.S 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,153 @@
++/*
++ * sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions
++ *
++ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++#include <asm/assembler.h>
++
++ .text
++ .arch armv8-a+crypto
++
++ k0 .req v0
++ k1 .req v1
++ k2 .req v2
++ k3 .req v3
++
++ t0 .req v4
++ t1 .req v5
++
++ dga .req q6
++ dgav .req v6
++ dgb .req s7
++ dgbv .req v7
++
++ dg0q .req q12
++ dg0s .req s12
++ dg0v .req v12
++ dg1s .req s13
++ dg1v .req v13
++ dg2s .req s14
++
++ .macro add_only, op, ev, rc, s0, dg1
++ .ifc \ev, ev
++ add t1.4s, v\s0\().4s, \rc\().4s
++ sha1h dg2s, dg0s
++ .ifnb \dg1
++ sha1\op dg0q, \dg1, t0.4s
++ .else
++ sha1\op dg0q, dg1s, t0.4s
++ .endif
++ .else
++ .ifnb \s0
++ add t0.4s, v\s0\().4s, \rc\().4s
++ .endif
++ sha1h dg1s, dg0s
++ sha1\op dg0q, dg2s, t1.4s
++ .endif
++ .endm
++
++ .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1
++ sha1su0 v\s0\().4s, v\s1\().4s, v\s2\().4s
++ add_only \op, \ev, \rc, \s1, \dg1
++ sha1su1 v\s0\().4s, v\s3\().4s
++ .endm
++
++ /*
++ * The SHA1 round constants
++ */
++ .align 4
++.Lsha1_rcon:
++ .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
++
++ /*
++ * void sha1_ce_transform(int blocks, u8 const *src, u32 *state,
++ * u8 *head, long bytes)
++ */
++ENTRY(sha1_ce_transform)
++ /* load round constants */
++ adr x6, .Lsha1_rcon
++ ld1r {k0.4s}, [x6], #4
++ ld1r {k1.4s}, [x6], #4
++ ld1r {k2.4s}, [x6], #4
++ ld1r {k3.4s}, [x6]
++
++ /* load state */
++ ldr dga, [x2]
++ ldr dgb, [x2, #16]
++
++ /* load partial state (if supplied) */
++ cbz x3, 0f
++ ld1 {v8.4s-v11.4s}, [x3]
++ b 1f
++
++ /* load input */
++0: ld1 {v8.4s-v11.4s}, [x1], #64
++ sub w0, w0, #1
++
++1:
++CPU_LE( rev32 v8.16b, v8.16b )
++CPU_LE( rev32 v9.16b, v9.16b )
++CPU_LE( rev32 v10.16b, v10.16b )
++CPU_LE( rev32 v11.16b, v11.16b )
++
++2: add t0.4s, v8.4s, k0.4s
++ mov dg0v.16b, dgav.16b
++
++ add_update c, ev, k0, 8, 9, 10, 11, dgb
++ add_update c, od, k0, 9, 10, 11, 8
++ add_update c, ev, k0, 10, 11, 8, 9
++ add_update c, od, k0, 11, 8, 9, 10
++ add_update c, ev, k1, 8, 9, 10, 11
++
++ add_update p, od, k1, 9, 10, 11, 8
++ add_update p, ev, k1, 10, 11, 8, 9
++ add_update p, od, k1, 11, 8, 9, 10
++ add_update p, ev, k1, 8, 9, 10, 11
++ add_update p, od, k2, 9, 10, 11, 8
++
++ add_update m, ev, k2, 10, 11, 8, 9
++ add_update m, od, k2, 11, 8, 9, 10
++ add_update m, ev, k2, 8, 9, 10, 11
++ add_update m, od, k2, 9, 10, 11, 8
++ add_update m, ev, k3, 10, 11, 8, 9
++
++ add_update p, od, k3, 11, 8, 9, 10
++ add_only p, ev, k3, 9
++ add_only p, od, k3, 10
++ add_only p, ev, k3, 11
++ add_only p, od
++
++ /* update state */
++ add dgbv.2s, dgbv.2s, dg1v.2s
++ add dgav.4s, dgav.4s, dg0v.4s
++
++ cbnz w0, 0b
++
++ /*
++ * Final block: add padding and total bit count.
++ * Skip if we have no total byte count in x4. In that case, the input
++ * size was not a round multiple of the block size, and the padding is
++ * handled by the C code.
++ */
++ cbz x4, 3f
++ movi v9.2d, #0
++ mov x8, #0x80000000
++ movi v10.2d, #0
++ ror x7, x4, #29 // ror(lsl(x4, 3), 32)
++ fmov d8, x8
++ mov x4, #0
++ mov v11.d[0], xzr
++ mov v11.d[1], x7
++ b 2b
++
++ /* store new state */
++3: str dga, [x2]
++ str dgb, [x2, #16]
++ ret
++ENDPROC(sha1_ce_transform)
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/sha1-ce-glue.c linux-3.14.40/arch/arm64/crypto/sha1-ce-glue.c
+--- linux-3.14.40.orig/arch/arm64/crypto/sha1-ce-glue.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/sha1-ce-glue.c 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,174 @@
++/*
++ * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions
++ *
++ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <asm/neon.h>
++#include <asm/unaligned.h>
++#include <crypto/internal/hash.h>
++#include <crypto/sha.h>
++#include <linux/cpufeature.h>
++#include <linux/crypto.h>
++#include <linux/module.h>
++
++MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
++MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
++MODULE_LICENSE("GPL v2");
++
++asmlinkage void sha1_ce_transform(int blocks, u8 const *src, u32 *state,
++ u8 *head, long bytes);
++
++static int sha1_init(struct shash_desc *desc)
++{
++ struct sha1_state *sctx = shash_desc_ctx(desc);
++
++ *sctx = (struct sha1_state){
++ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
++ };
++ return 0;
++}
++
++static int sha1_update(struct shash_desc *desc, const u8 *data,
++ unsigned int len)
++{
++ struct sha1_state *sctx = shash_desc_ctx(desc);
++ unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
++
++ sctx->count += len;
++
++ if ((partial + len) >= SHA1_BLOCK_SIZE) {
++ int blocks;
++
++ if (partial) {
++ int p = SHA1_BLOCK_SIZE - partial;
++
++ memcpy(sctx->buffer + partial, data, p);
++ data += p;
++ len -= p;
++ }
++
++ blocks = len / SHA1_BLOCK_SIZE;
++ len %= SHA1_BLOCK_SIZE;
++
++ kernel_neon_begin_partial(16);
++ sha1_ce_transform(blocks, data, sctx->state,
++ partial ? sctx->buffer : NULL, 0);
++ kernel_neon_end();
++
++ data += blocks * SHA1_BLOCK_SIZE;
++ partial = 0;
++ }
++ if (len)
++ memcpy(sctx->buffer + partial, data, len);
++ return 0;
++}
++
++static int sha1_final(struct shash_desc *desc, u8 *out)
++{
++ static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
++
++ struct sha1_state *sctx = shash_desc_ctx(desc);
++ __be64 bits = cpu_to_be64(sctx->count << 3);
++ __be32 *dst = (__be32 *)out;
++ int i;
++
++ u32 padlen = SHA1_BLOCK_SIZE
++ - ((sctx->count + sizeof(bits)) % SHA1_BLOCK_SIZE);
++
++ sha1_update(desc, padding, padlen);
++ sha1_update(desc, (const u8 *)&bits, sizeof(bits));
++
++ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
++ put_unaligned_be32(sctx->state[i], dst++);
++
++ *sctx = (struct sha1_state){};
++ return 0;
++}
++
++static int sha1_finup(struct shash_desc *desc, const u8 *data,
++ unsigned int len, u8 *out)
++{
++ struct sha1_state *sctx = shash_desc_ctx(desc);
++ __be32 *dst = (__be32 *)out;
++ int blocks;
++ int i;
++
++ if (sctx->count || !len || (len % SHA1_BLOCK_SIZE)) {
++ sha1_update(desc, data, len);
++ return sha1_final(desc, out);
++ }
++
++ /*
++ * Use a fast path if the input is a multiple of 64 bytes. In
++ * this case, there is no need to copy data around, and we can
++ * perform the entire digest calculation in a single invocation
++ * of sha1_ce_transform()
++ */
++ blocks = len / SHA1_BLOCK_SIZE;
++
++ kernel_neon_begin_partial(16);
++ sha1_ce_transform(blocks, data, sctx->state, NULL, len);
++ kernel_neon_end();
++
++ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
++ put_unaligned_be32(sctx->state[i], dst++);
++
++ *sctx = (struct sha1_state){};
++ return 0;
++}
++
++static int sha1_export(struct shash_desc *desc, void *out)
++{
++ struct sha1_state *sctx = shash_desc_ctx(desc);
++ struct sha1_state *dst = out;
++
++ *dst = *sctx;
++ return 0;
++}
++
++static int sha1_import(struct shash_desc *desc, const void *in)
++{
++ struct sha1_state *sctx = shash_desc_ctx(desc);
++ struct sha1_state const *src = in;
++
++ *sctx = *src;
++ return 0;
++}
++
++static struct shash_alg alg = {
++ .init = sha1_init,
++ .update = sha1_update,
++ .final = sha1_final,
++ .finup = sha1_finup,
++ .export = sha1_export,
++ .import = sha1_import,
++ .descsize = sizeof(struct sha1_state),
++ .digestsize = SHA1_DIGEST_SIZE,
++ .statesize = sizeof(struct sha1_state),
++ .base = {
++ .cra_name = "sha1",
++ .cra_driver_name = "sha1-ce",
++ .cra_priority = 200,
++ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
++ .cra_blocksize = SHA1_BLOCK_SIZE,
++ .cra_module = THIS_MODULE,
++ }
++};
++
++static int __init sha1_ce_mod_init(void)
++{
++ return crypto_register_shash(&alg);
++}
++
++static void __exit sha1_ce_mod_fini(void)
++{
++ crypto_unregister_shash(&alg);
++}
++
++module_cpu_feature_match(SHA1, sha1_ce_mod_init);
++module_exit(sha1_ce_mod_fini);
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/sha2-ce-core.S linux-3.14.40/arch/arm64/crypto/sha2-ce-core.S
+--- linux-3.14.40.orig/arch/arm64/crypto/sha2-ce-core.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/sha2-ce-core.S 2015-05-01 14:57:58.131427001 -0500
+@@ -0,0 +1,156 @@
++/*
++ * sha2-ce-core.S - core SHA-224/SHA-256 transform using v8 Crypto Extensions
++ *
++ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++#include <asm/assembler.h>
++
++ .text
++ .arch armv8-a+crypto
++
++ dga .req q20
++ dgav .req v20
++ dgb .req q21
++ dgbv .req v21
++
++ t0 .req v22
++ t1 .req v23
++
++ dg0q .req q24
++ dg0v .req v24
++ dg1q .req q25
++ dg1v .req v25
++ dg2q .req q26
++ dg2v .req v26
++
++ .macro add_only, ev, rc, s0
++ mov dg2v.16b, dg0v.16b
++ .ifeq \ev
++ add t1.4s, v\s0\().4s, \rc\().4s
++ sha256h dg0q, dg1q, t0.4s
++ sha256h2 dg1q, dg2q, t0.4s
++ .else
++ .ifnb \s0
++ add t0.4s, v\s0\().4s, \rc\().4s
++ .endif
++ sha256h dg0q, dg1q, t1.4s
++ sha256h2 dg1q, dg2q, t1.4s
++ .endif
++ .endm
++
++ .macro add_update, ev, rc, s0, s1, s2, s3
++ sha256su0 v\s0\().4s, v\s1\().4s
++ add_only \ev, \rc, \s1
++ sha256su1 v\s0\().4s, v\s2\().4s, v\s3\().4s
++ .endm
++
++ /*
++ * The SHA-256 round constants
++ */
++ .align 4
++.Lsha2_rcon:
++ .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
++ .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
++ .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
++ .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
++ .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
++ .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
++ .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
++ .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
++ .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
++ .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
++ .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
++ .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
++ .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
++ .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
++ .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
++ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
++
++ /*
++ * void sha2_ce_transform(int blocks, u8 const *src, u32 *state,
++ * u8 *head, long bytes)
++ */
++ENTRY(sha2_ce_transform)
++ /* load round constants */
++ adr x8, .Lsha2_rcon
++ ld1 { v0.4s- v3.4s}, [x8], #64
++ ld1 { v4.4s- v7.4s}, [x8], #64
++ ld1 { v8.4s-v11.4s}, [x8], #64
++ ld1 {v12.4s-v15.4s}, [x8]
++
++ /* load state */
++ ldp dga, dgb, [x2]
++
++ /* load partial input (if supplied) */
++ cbz x3, 0f
++ ld1 {v16.4s-v19.4s}, [x3]
++ b 1f
++
++ /* load input */
++0: ld1 {v16.4s-v19.4s}, [x1], #64
++ sub w0, w0, #1
++
++1:
++CPU_LE( rev32 v16.16b, v16.16b )
++CPU_LE( rev32 v17.16b, v17.16b )
++CPU_LE( rev32 v18.16b, v18.16b )
++CPU_LE( rev32 v19.16b, v19.16b )
++
++2: add t0.4s, v16.4s, v0.4s
++ mov dg0v.16b, dgav.16b
++ mov dg1v.16b, dgbv.16b
++
++ add_update 0, v1, 16, 17, 18, 19
++ add_update 1, v2, 17, 18, 19, 16
++ add_update 0, v3, 18, 19, 16, 17
++ add_update 1, v4, 19, 16, 17, 18
++
++ add_update 0, v5, 16, 17, 18, 19
++ add_update 1, v6, 17, 18, 19, 16
++ add_update 0, v7, 18, 19, 16, 17
++ add_update 1, v8, 19, 16, 17, 18
++
++ add_update 0, v9, 16, 17, 18, 19
++ add_update 1, v10, 17, 18, 19, 16
++ add_update 0, v11, 18, 19, 16, 17
++ add_update 1, v12, 19, 16, 17, 18
++
++ add_only 0, v13, 17
++ add_only 1, v14, 18
++ add_only 0, v15, 19
++ add_only 1
++
++ /* update state */
++ add dgav.4s, dgav.4s, dg0v.4s
++ add dgbv.4s, dgbv.4s, dg1v.4s
++
++ /* handled all input blocks? */
++ cbnz w0, 0b
++
++ /*
++ * Final block: add padding and total bit count.
++ * Skip if we have no total byte count in x4. In that case, the input
++ * size was not a round multiple of the block size, and the padding is
++ * handled by the C code.
++ */
++ cbz x4, 3f
++ movi v17.2d, #0
++ mov x8, #0x80000000
++ movi v18.2d, #0
++ ror x7, x4, #29 // ror(lsl(x4, 3), 32)
++ fmov d16, x8
++ mov x4, #0
++ mov v19.d[0], xzr
++ mov v19.d[1], x7
++ b 2b
++
++ /* store new state */
++3: stp dga, dgb, [x2]
++ ret
++ENDPROC(sha2_ce_transform)
+diff -Nur linux-3.14.40.orig/arch/arm64/crypto/sha2-ce-glue.c linux-3.14.40/arch/arm64/crypto/sha2-ce-glue.c
+--- linux-3.14.40.orig/arch/arm64/crypto/sha2-ce-glue.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/crypto/sha2-ce-glue.c 2015-05-01 14:57:58.135427001 -0500
+@@ -0,0 +1,255 @@
++/*
++ * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
++ *
++ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <asm/neon.h>
++#include <asm/unaligned.h>
++#include <crypto/internal/hash.h>
++#include <crypto/sha.h>
++#include <linux/cpufeature.h>
++#include <linux/crypto.h>
++#include <linux/module.h>
++
++MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
++MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
++MODULE_LICENSE("GPL v2");
++
++asmlinkage int sha2_ce_transform(int blocks, u8 const *src, u32 *state,
++ u8 *head, long bytes);
++
++static int sha224_init(struct shash_desc *desc)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++
++ *sctx = (struct sha256_state){
++ .state = {
++ SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
++ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
++ }
++ };
++ return 0;
++}
++
++static int sha256_init(struct shash_desc *desc)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++
++ *sctx = (struct sha256_state){
++ .state = {
++ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
++ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
++ }
++ };
++ return 0;
++}
++
++static int sha2_update(struct shash_desc *desc, const u8 *data,
++ unsigned int len)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
++
++ sctx->count += len;
++
++ if ((partial + len) >= SHA256_BLOCK_SIZE) {
++ int blocks;
++
++ if (partial) {
++ int p = SHA256_BLOCK_SIZE - partial;
++
++ memcpy(sctx->buf + partial, data, p);
++ data += p;
++ len -= p;
++ }
++
++ blocks = len / SHA256_BLOCK_SIZE;
++ len %= SHA256_BLOCK_SIZE;
++
++ kernel_neon_begin_partial(28);
++ sha2_ce_transform(blocks, data, sctx->state,
++ partial ? sctx->buf : NULL, 0);
++ kernel_neon_end();
++
++ data += blocks * SHA256_BLOCK_SIZE;
++ partial = 0;
++ }
++ if (len)
++ memcpy(sctx->buf + partial, data, len);
++ return 0;
++}
++
++static void sha2_final(struct shash_desc *desc)
++{
++ static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
++
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ __be64 bits = cpu_to_be64(sctx->count << 3);
++ u32 padlen = SHA256_BLOCK_SIZE
++ - ((sctx->count + sizeof(bits)) % SHA256_BLOCK_SIZE);
++
++ sha2_update(desc, padding, padlen);
++ sha2_update(desc, (const u8 *)&bits, sizeof(bits));
++}
++
++static int sha224_final(struct shash_desc *desc, u8 *out)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ __be32 *dst = (__be32 *)out;
++ int i;
++
++ sha2_final(desc);
++
++ for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
++ put_unaligned_be32(sctx->state[i], dst++);
++
++ *sctx = (struct sha256_state){};
++ return 0;
++}
++
++static int sha256_final(struct shash_desc *desc, u8 *out)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ __be32 *dst = (__be32 *)out;
++ int i;
++
++ sha2_final(desc);
++
++ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
++ put_unaligned_be32(sctx->state[i], dst++);
++
++ *sctx = (struct sha256_state){};
++ return 0;
++}
++
++static void sha2_finup(struct shash_desc *desc, const u8 *data,
++ unsigned int len)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ int blocks;
++
++ if (sctx->count || !len || (len % SHA256_BLOCK_SIZE)) {
++ sha2_update(desc, data, len);
++ sha2_final(desc);
++ return;
++ }
++
++ /*
++ * Use a fast path if the input is a multiple of 64 bytes. In
++ * this case, there is no need to copy data around, and we can
++ * perform the entire digest calculation in a single invocation
++ * of sha2_ce_transform()
++ */
++ blocks = len / SHA256_BLOCK_SIZE;
++
++ kernel_neon_begin_partial(28);
++ sha2_ce_transform(blocks, data, sctx->state, NULL, len);
++ kernel_neon_end();
++ data += blocks * SHA256_BLOCK_SIZE;
++}
++
++static int sha224_finup(struct shash_desc *desc, const u8 *data,
++ unsigned int len, u8 *out)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ __be32 *dst = (__be32 *)out;
++ int i;
++
++ sha2_finup(desc, data, len);
++
++ for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
++ put_unaligned_be32(sctx->state[i], dst++);
++
++ *sctx = (struct sha256_state){};
++ return 0;
++}
++
++static int sha256_finup(struct shash_desc *desc, const u8 *data,
++ unsigned int len, u8 *out)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ __be32 *dst = (__be32 *)out;
++ int i;
++
++ sha2_finup(desc, data, len);
++
++ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
++ put_unaligned_be32(sctx->state[i], dst++);
++
++ *sctx = (struct sha256_state){};
++ return 0;
++}
++
++static int sha2_export(struct shash_desc *desc, void *out)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ struct sha256_state *dst = out;
++
++ *dst = *sctx;
++ return 0;
++}
++
++static int sha2_import(struct shash_desc *desc, const void *in)
++{
++ struct sha256_state *sctx = shash_desc_ctx(desc);
++ struct sha256_state const *src = in;
++
++ *sctx = *src;
++ return 0;
++}
++
++static struct shash_alg algs[] = { {
++ .init = sha224_init,
++ .update = sha2_update,
++ .final = sha224_final,
++ .finup = sha224_finup,
++ .export = sha2_export,
++ .import = sha2_import,
++ .descsize = sizeof(struct sha256_state),
++ .digestsize = SHA224_DIGEST_SIZE,
++ .statesize = sizeof(struct sha256_state),
++ .base = {
++ .cra_name = "sha224",
++ .cra_driver_name = "sha224-ce",
++ .cra_priority = 200,
++ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
++ .cra_blocksize = SHA256_BLOCK_SIZE,
++ .cra_module = THIS_MODULE,
++ }
++}, {
++ .init = sha256_init,
++ .update = sha2_update,
++ .final = sha256_final,
++ .finup = sha256_finup,
++ .export = sha2_export,
++ .import = sha2_import,
++ .descsize = sizeof(struct sha256_state),
++ .digestsize = SHA256_DIGEST_SIZE,
++ .statesize = sizeof(struct sha256_state),
++ .base = {
++ .cra_name = "sha256",
++ .cra_driver_name = "sha256-ce",
++ .cra_priority = 200,
++ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
++ .cra_blocksize = SHA256_BLOCK_SIZE,
++ .cra_module = THIS_MODULE,
++ }
++} };
++
++static int __init sha2_ce_mod_init(void)
++{
++ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
++}
++
++static void __exit sha2_ce_mod_fini(void)
++{
++ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
++}
++
++module_cpu_feature_match(SHA2, sha2_ce_mod_init);
++module_exit(sha2_ce_mod_fini);
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/bL_switcher.h linux-3.14.40/arch/arm64/include/asm/bL_switcher.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/bL_switcher.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/include/asm/bL_switcher.h 2015-05-01 14:57:58.135427001 -0500
+@@ -0,0 +1,54 @@
++/*
++ * Based on the stubs for the ARM implementation which is:
++ *
++ * Created by: Nicolas Pitre, April 2012
++ * Copyright: (C) 2012-2013 Linaro Limited
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef ASM_BL_SWITCHER_H
++#define ASM_BL_SWITCHER_H
++
++#include <linux/notifier.h>
++#include <linux/types.h>
++
++typedef void (*bL_switch_completion_handler)(void *cookie);
++
++static inline int bL_switch_request(unsigned int cpu,
++ unsigned int new_cluster_id)
++{
++ return -ENOTSUPP;
++}
++
++/*
++ * Register here to be notified about runtime enabling/disabling of
++ * the switcher.
++ *
++ * The notifier chain is called with the switcher activation lock held:
++ * the switcher will not be enabled or disabled during callbacks.
++ * Callbacks must not call bL_switcher_{get,put}_enabled().
++ */
++#define BL_NOTIFY_PRE_ENABLE 0
++#define BL_NOTIFY_POST_ENABLE 1
++#define BL_NOTIFY_PRE_DISABLE 2
++#define BL_NOTIFY_POST_DISABLE 3
++
++static inline int bL_switcher_register_notifier(struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline bool bL_switcher_get_enabled(void) { return false; }
++static inline void bL_switcher_put_enabled(void) { }
++static inline int bL_switcher_trace_trigger(void) { return 0; }
++static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
++
++#endif
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/cacheflush.h linux-3.14.40/arch/arm64/include/asm/cacheflush.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/cacheflush.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/cacheflush.h 2015-05-01 14:57:58.159427001 -0500
+@@ -85,6 +85,13 @@
+ }
+
+ /*
++ * Cache maintenance functions used by the DMA API. No to be used directly.
++ */
++extern void __dma_map_area(const void *, size_t, int);
++extern void __dma_unmap_area(const void *, size_t, int);
++extern void __dma_flush_range(const void *, const void *);
++
++/*
+ * Copy user data from/to a page which is mapped into a different
+ * processes address space. Really, we want to allow our "user
+ * space" model to handle this.
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/compat.h linux-3.14.40/arch/arm64/include/asm/compat.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/compat.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/compat.h 2015-05-01 14:57:58.167427001 -0500
+@@ -228,7 +228,7 @@
+ return (u32)(unsigned long)uptr;
+ }
+
+-#define compat_user_stack_pointer() (current_pt_regs()->compat_sp)
++#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
+
+ static inline void __user *arch_compat_alloc_user_space(long len)
+ {
+@@ -305,11 +305,6 @@
+
+ #else /* !CONFIG_COMPAT */
+
+-static inline int is_compat_task(void)
+-{
+- return 0;
+-}
+-
+ static inline int is_compat_thread(struct thread_info *thread)
+ {
+ return 0;
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/cpufeature.h linux-3.14.40/arch/arm64/include/asm/cpufeature.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/cpufeature.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/include/asm/cpufeature.h 2015-05-01 14:57:58.167427001 -0500
+@@ -0,0 +1,29 @@
++/*
++ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __ASM_CPUFEATURE_H
++#define __ASM_CPUFEATURE_H
++
++#include <asm/hwcap.h>
++
++/*
++ * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
++ * in the kernel and for user space to keep track of which optional features
++ * are supported by the current system. So let's map feature 'x' to HWCAP_x.
++ * Note that HWCAP_x constants are bit fields so we need to take the log.
++ */
++
++#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
++#define cpu_feature(x) ilog2(HWCAP_ ## x)
++
++static inline bool cpu_have_feature(unsigned int num)
++{
++ return elf_hwcap & (1UL << num);
++}
++
++#endif
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/debug-monitors.h linux-3.14.40/arch/arm64/include/asm/debug-monitors.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/debug-monitors.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/debug-monitors.h 2015-05-01 14:57:58.175427001 -0500
+@@ -26,6 +26,53 @@
+ #define DBG_ESR_EVT_HWWP 0x2
+ #define DBG_ESR_EVT_BRK 0x6
+
++/*
++ * Break point instruction encoding
++ */
++#define BREAK_INSTR_SIZE 4
++
++/*
++ * ESR values expected for dynamic and compile time BRK instruction
++ */
++#define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xfffff))
++
++/*
++ * #imm16 values used for BRK instruction generation
++ * Allowed values for kgbd are 0x400 - 0x7ff
++ * 0x400: for dynamic BRK instruction
++ * 0x401: for compile time BRK instruction
++ */
++#define KGDB_DYN_DGB_BRK_IMM 0x400
++#define KDBG_COMPILED_DBG_BRK_IMM 0x401
++
++/*
++ * BRK instruction encoding
++ * The #imm16 value should be placed at bits[20:5] within BRK ins
++ */
++#define AARCH64_BREAK_MON 0xd4200000
++
++/*
++ * Extract byte from BRK instruction
++ */
++#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \
++ ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
++
++/*
++ * Extract byte from BRK #imm16
++ */
++#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \
++ (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
++
++#define KGDB_DYN_DGB_BRK_BYTE(x) \
++ (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x))
++
++#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0)
++#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1)
++#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2)
++#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3)
++
++#define CACHE_FLUSH_IS_SAFE 1
++
+ enum debug_el {
+ DBG_ACTIVE_EL0 = 0,
+ DBG_ACTIVE_EL1,
+@@ -43,23 +90,6 @@
+ #ifndef __ASSEMBLY__
+ struct task_struct;
+
+-#define local_dbg_save(flags) \
+- do { \
+- typecheck(unsigned long, flags); \
+- asm volatile( \
+- "mrs %0, daif // local_dbg_save\n" \
+- "msr daifset, #8" \
+- : "=r" (flags) : : "memory"); \
+- } while (0)
+-
+-#define local_dbg_restore(flags) \
+- do { \
+- typecheck(unsigned long, flags); \
+- asm volatile( \
+- "msr daif, %0 // local_dbg_restore\n" \
+- : : "r" (flags) : "memory"); \
+- } while (0)
+-
+ #define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
+
+ #define DBG_HOOK_HANDLED 0
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/dma-mapping.h linux-3.14.40/arch/arm64/include/asm/dma-mapping.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/dma-mapping.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/dma-mapping.h 2015-05-01 14:57:58.175427001 -0500
+@@ -28,6 +28,8 @@
+
+ #define DMA_ERROR_CODE (~(dma_addr_t)0)
+ extern struct dma_map_ops *dma_ops;
++extern struct dma_map_ops coherent_swiotlb_dma_ops;
++extern struct dma_map_ops noncoherent_swiotlb_dma_ops;
+
+ static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
+ {
+@@ -45,6 +47,11 @@
+ return __generic_dma_ops(dev);
+ }
+
++static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
++{
++ dev->archdata.dma_ops = ops;
++}
++
+ #include <asm-generic/dma-mapping-common.h>
+
+ static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/ftrace.h linux-3.14.40/arch/arm64/include/asm/ftrace.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/ftrace.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/include/asm/ftrace.h 2015-05-01 14:57:58.175427001 -0500
+@@ -0,0 +1,59 @@
++/*
++ * arch/arm64/include/asm/ftrace.h
++ *
++ * Copyright (C) 2013 Linaro Limited
++ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#ifndef __ASM_FTRACE_H
++#define __ASM_FTRACE_H
++
++#include <asm/insn.h>
++
++#define MCOUNT_ADDR ((unsigned long)_mcount)
++#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
++
++#ifndef __ASSEMBLY__
++#include <linux/compat.h>
++
++extern void _mcount(unsigned long);
++extern void *return_address(unsigned int);
++
++struct dyn_arch_ftrace {
++ /* No extra data needed for arm64 */
++};
++
++extern unsigned long ftrace_graph_call;
++
++static inline unsigned long ftrace_call_adjust(unsigned long addr)
++{
++ /*
++ * addr is the address of the mcount call instruction.
++ * recordmcount does the necessary offset calculation.
++ */
++ return addr;
++}
++
++#define ftrace_return_address(n) return_address(n)
++
++/*
++ * Because AArch32 mode does not share the same syscall table with AArch64,
++ * tracing compat syscalls may result in reporting bogus syscalls or even
++ * hang-up, so just do not trace them.
++ * See kernel/trace/trace_syscalls.c
++ *
++ * x86 code says:
++ * If the user realy wants these, then they should use the
++ * raw syscall tracepoints with filtering.
++ */
++#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
++static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
++{
++ return is_compat_task();
++}
++#endif /* ifndef __ASSEMBLY__ */
++
++#endif /* __ASM_FTRACE_H */
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/hwcap.h linux-3.14.40/arch/arm64/include/asm/hwcap.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/hwcap.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/hwcap.h 2015-05-01 14:57:58.175427001 -0500
+@@ -33,6 +33,12 @@
+ #define COMPAT_HWCAP_LPAE (1 << 20)
+ #define COMPAT_HWCAP_EVTSTRM (1 << 21)
+
++#define COMPAT_HWCAP2_AES (1 << 0)
++#define COMPAT_HWCAP2_PMULL (1 << 1)
++#define COMPAT_HWCAP2_SHA1 (1 << 2)
++#define COMPAT_HWCAP2_SHA2 (1 << 3)
++#define COMPAT_HWCAP2_CRC32 (1 << 4)
++
+ #ifndef __ASSEMBLY__
+ /*
+ * This yields a mask that user programs can use to figure out what
+@@ -42,7 +48,8 @@
+
+ #ifdef CONFIG_COMPAT
+ #define COMPAT_ELF_HWCAP (compat_elf_hwcap)
+-extern unsigned int compat_elf_hwcap;
++#define COMPAT_ELF_HWCAP2 (compat_elf_hwcap2)
++extern unsigned int compat_elf_hwcap, compat_elf_hwcap2;
+ #endif
+
+ extern unsigned long elf_hwcap;
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/insn.h linux-3.14.40/arch/arm64/include/asm/insn.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/insn.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/insn.h 2015-05-01 14:57:58.175427001 -0500
+@@ -16,11 +16,14 @@
+ */
+ #ifndef __ASM_INSN_H
+ #define __ASM_INSN_H
++
+ #include <linux/types.h>
+
+ /* A64 instructions are always 32 bits. */
+ #define AARCH64_INSN_SIZE 4
+
++#ifndef __ASSEMBLY__
++
+ /*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section C3.1 "A64 instruction index by encoding":
+@@ -105,4 +108,6 @@
+ int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt);
+ int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
++#endif /* __ASSEMBLY__ */
++
+ #endif /* __ASM_INSN_H */
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/irqflags.h linux-3.14.40/arch/arm64/include/asm/irqflags.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/irqflags.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/irqflags.h 2015-05-01 14:57:58.175427001 -0500
+@@ -90,5 +90,28 @@
+ return flags & PSR_I_BIT;
+ }
+
++/*
++ * save and restore debug state
++ */
++#define local_dbg_save(flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ asm volatile( \
++ "mrs %0, daif // local_dbg_save\n" \
++ "msr daifset, #8" \
++ : "=r" (flags) : : "memory"); \
++ } while (0)
++
++#define local_dbg_restore(flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ asm volatile( \
++ "msr daif, %0 // local_dbg_restore\n" \
++ : : "r" (flags) : "memory"); \
++ } while (0)
++
++#define local_dbg_enable() asm("msr daifclr, #8" : : : "memory")
++#define local_dbg_disable() asm("msr daifset, #8" : : : "memory")
++
+ #endif
+ #endif
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/Kbuild linux-3.14.40/arch/arm64/include/asm/Kbuild
+--- linux-3.14.40.orig/arch/arm64/include/asm/Kbuild 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/Kbuild 2015-05-01 14:57:58.175427001 -0500
+@@ -35,6 +35,7 @@
+ generic-y += sembuf.h
+ generic-y += serial.h
+ generic-y += shmbuf.h
++generic-y += simd.h
+ generic-y += sizes.h
+ generic-y += socket.h
+ generic-y += sockios.h
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/kgdb.h linux-3.14.40/arch/arm64/include/asm/kgdb.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/kgdb.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/include/asm/kgdb.h 2015-05-01 14:57:58.175427001 -0500
+@@ -0,0 +1,84 @@
++/*
++ * AArch64 KGDB support
++ *
++ * Based on arch/arm/include/kgdb.h
++ *
++ * Copyright (C) 2013 Cavium Inc.
++ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef __ARM_KGDB_H
++#define __ARM_KGDB_H
++
++#include <linux/ptrace.h>
++#include <asm/debug-monitors.h>
++
++#ifndef __ASSEMBLY__
++
++static inline void arch_kgdb_breakpoint(void)
++{
++ asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM));
++}
++
++extern void kgdb_handle_bus_error(void);
++extern int kgdb_fault_expected;
++
++#endif /* !__ASSEMBLY__ */
++
++/*
++ * gdb is expecting the following registers layout.
++ *
++ * General purpose regs:
++ * r0-r30: 64 bit
++ * sp,pc : 64 bit
++ * pstate : 64 bit
++ * Total: 34
++ * FPU regs:
++ * f0-f31: 128 bit
++ * Total: 32
++ * Extra regs
++ * fpsr & fpcr: 32 bit
++ * Total: 2
++ *
++ */
++
++#define _GP_REGS 34
++#define _FP_REGS 32
++#define _EXTRA_REGS 2
++/*
++ * general purpose registers size in bytes.
++ * pstate is only 4 bytes. subtract 4 bytes
++ */
++#define GP_REG_BYTES (_GP_REGS * 8)
++#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
++
++/*
++ * Size of I/O buffer for gdb packet.
++ * considering to hold all register contents, size is set
++ */
++
++#define BUFMAX 2048
++
++/*
++ * Number of bytes required for gdb_regs buffer.
++ * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each
++ * GDB fails to connect for size beyond this with error
++ * "'g' packet reply is too long"
++ */
++
++#define NUMREGBYTES ((_GP_REGS * 8) + (_FP_REGS * 16) + \
++ (_EXTRA_REGS * 4))
++
++#endif /* __ASM_KGDB_H */
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/page.h linux-3.14.40/arch/arm64/include/asm/page.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/page.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/page.h 2015-05-01 14:57:58.175427001 -0500
+@@ -31,6 +31,15 @@
+ /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
+ #define __HAVE_ARCH_GATE_AREA 1
+
++/*
++ * The idmap and swapper page tables need some space reserved in the kernel
++ * image. The idmap only requires a pgd and a next level table to (section) map
++ * the kernel, while the swapper also maps the FDT and requires an additional
++ * table to map an early UART. See __create_page_tables for more information.
++ */
++#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
++#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
++
+ #ifndef __ASSEMBLY__
+
+ #ifdef CONFIG_ARM64_64K_PAGES
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/pgtable.h linux-3.14.40/arch/arm64/include/asm/pgtable.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/pgtable.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/pgtable.h 2015-05-01 14:57:58.175427001 -0500
+@@ -227,36 +227,36 @@
+
+ #define __HAVE_ARCH_PTE_SPECIAL
+
+-/*
+- * Software PMD bits for THP
+- */
++static inline pte_t pmd_pte(pmd_t pmd)
++{
++ return __pte(pmd_val(pmd));
++}
+
+-#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
+-#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57)
++static inline pmd_t pte_pmd(pte_t pte)
++{
++ return __pmd(pte_val(pte));
++}
+
+ /*
+ * THP definitions.
+ */
+-#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
+-
+-#define __HAVE_ARCH_PMD_WRITE
+-#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+-#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
++#define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd))
+ #endif
+
+-#define PMD_BIT_FUNC(fn,op) \
+-static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
++#define pmd_young(pmd) pte_young(pmd_pte(pmd))
++#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
++#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
++#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
++#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
++#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
++#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
++#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK))
+
+-PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
+-PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
+-PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
+-PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
+-PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
+-PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
+-PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
++#define __HAVE_ARCH_PMD_WRITE
++#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+
+ #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+
+@@ -266,16 +266,7 @@
+
+ #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+
+-static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+-{
+- const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN |
+- PMD_SECT_RDONLY | PMD_SECT_PROT_NONE |
+- PMD_SECT_VALID;
+- pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
+- return pmd;
+-}
+-
+-#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd)
++#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
+
+ static inline int has_transparent_hugepage(void)
+ {
+@@ -383,12 +374,14 @@
+ return pte;
+ }
+
++static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
++{
++ return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
++}
++
+ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+
+-#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
+-#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
+-
+ /*
+ * Encode and decode a swap entry:
+ * bits 0-1: present (must be zero)
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/ptrace.h linux-3.14.40/arch/arm64/include/asm/ptrace.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/ptrace.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/ptrace.h 2015-05-01 14:57:58.179427001 -0500
+@@ -68,6 +68,7 @@
+
+ /* Architecturally defined mapping between AArch32 and AArch64 registers */
+ #define compat_usr(x) regs[(x)]
++#define compat_fp regs[11]
+ #define compat_sp regs[13]
+ #define compat_lr regs[14]
+ #define compat_sp_hyp regs[15]
+@@ -132,7 +133,12 @@
+ (!((regs)->pstate & PSR_F_BIT))
+
+ #define user_stack_pointer(regs) \
+- ((regs)->sp)
++ (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp)
++
++static inline unsigned long regs_return_value(struct pt_regs *regs)
++{
++ return regs->regs[0];
++}
+
+ /*
+ * Are the current registers suitable for user mode? (used to maintain
+@@ -164,7 +170,7 @@
+ return 0;
+ }
+
+-#define instruction_pointer(regs) (regs)->pc
++#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
+
+ #ifdef CONFIG_SMP
+ extern unsigned long profile_pc(struct pt_regs *regs);
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/syscall.h linux-3.14.40/arch/arm64/include/asm/syscall.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/syscall.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/syscall.h 2015-05-01 14:57:58.195427001 -0500
+@@ -18,6 +18,7 @@
+
+ #include <linux/err.h>
+
++extern const void *sys_call_table[];
+
+ static inline int syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/thread_info.h linux-3.14.40/arch/arm64/include/asm/thread_info.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/thread_info.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/thread_info.h 2015-05-01 14:57:58.195427001 -0500
+@@ -91,6 +91,9 @@
+ /*
+ * thread information flags:
+ * TIF_SYSCALL_TRACE - syscall trace active
++ * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace
++ * TIF_SYSCALL_AUDIT - syscall auditing
++ * TIF_SECOMP - syscall secure computing
+ * TIF_SIGPENDING - signal pending
+ * TIF_NEED_RESCHED - rescheduling necessary
+ * TIF_NOTIFY_RESUME - callback before returning to user
+@@ -101,6 +104,9 @@
+ #define TIF_NEED_RESCHED 1
+ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+ #define TIF_SYSCALL_TRACE 8
++#define TIF_SYSCALL_AUDIT 9
++#define TIF_SYSCALL_TRACEPOINT 10
++#define TIF_SECCOMP 11
+ #define TIF_POLLING_NRFLAG 16
+ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+ #define TIF_FREEZE 19
+@@ -112,10 +118,17 @@
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
++#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
++#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
++#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
++#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+ #define _TIF_32BIT (1 << TIF_32BIT)
+
+ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME)
+
++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
++ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
++
+ #endif /* __KERNEL__ */
+ #endif /* __ASM_THREAD_INFO_H */
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/topology.h linux-3.14.40/arch/arm64/include/asm/topology.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/topology.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/include/asm/topology.h 2015-05-01 14:57:58.195427001 -0500
+@@ -0,0 +1,70 @@
++#ifndef __ASM_TOPOLOGY_H
++#define __ASM_TOPOLOGY_H
++
++#ifdef CONFIG_SMP
++
++#include <linux/cpumask.h>
++
++struct cpu_topology {
++ int thread_id;
++ int core_id;
++ int cluster_id;
++ cpumask_t thread_sibling;
++ cpumask_t core_sibling;
++};
++
++extern struct cpu_topology cpu_topology[NR_CPUS];
++
++#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
++#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
++#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
++#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
++
++#define mc_capable() (cpu_topology[0].cluster_id != -1)
++#define smt_capable() (cpu_topology[0].thread_id != -1)
++
++void init_cpu_topology(void);
++void store_cpu_topology(unsigned int cpuid);
++const struct cpumask *cpu_coregroup_mask(int cpu);
++
++#ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE
++/* Common values for CPUs */
++#ifndef SD_CPU_INIT
++#define SD_CPU_INIT (struct sched_domain) { \
++ .min_interval = 1, \
++ .max_interval = 4, \
++ .busy_factor = 64, \
++ .imbalance_pct = 125, \
++ .cache_nice_tries = 1, \
++ .busy_idx = 2, \
++ .idle_idx = 1, \
++ .newidle_idx = 0, \
++ .wake_idx = 0, \
++ .forkexec_idx = 0, \
++ \
++ .flags = 0*SD_LOAD_BALANCE \
++ | 1*SD_BALANCE_NEWIDLE \
++ | 1*SD_BALANCE_EXEC \
++ | 1*SD_BALANCE_FORK \
++ | 0*SD_BALANCE_WAKE \
++ | 1*SD_WAKE_AFFINE \
++ | 0*SD_SHARE_CPUPOWER \
++ | 0*SD_SHARE_PKG_RESOURCES \
++ | 0*SD_SERIALIZE \
++ , \
++ .last_balance = jiffies, \
++ .balance_interval = 1, \
++}
++#endif
++#endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */
++
++#else
++
++static inline void init_cpu_topology(void) { }
++static inline void store_cpu_topology(unsigned int cpuid) { }
++
++#endif
++
++#include <asm-generic/topology.h>
++
++#endif /* _ASM_ARM_TOPOLOGY_H */
+diff -Nur linux-3.14.40.orig/arch/arm64/include/asm/unistd.h linux-3.14.40/arch/arm64/include/asm/unistd.h
+--- linux-3.14.40.orig/arch/arm64/include/asm/unistd.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/asm/unistd.h 2015-05-01 14:57:58.195427001 -0500
+@@ -28,3 +28,5 @@
+ #endif
+ #define __ARCH_WANT_SYS_CLONE
+ #include <uapi/asm/unistd.h>
++
++#define NR_syscalls (__NR_syscalls)
+diff -Nur linux-3.14.40.orig/arch/arm64/include/uapi/asm/Kbuild linux-3.14.40/arch/arm64/include/uapi/asm/Kbuild
+--- linux-3.14.40.orig/arch/arm64/include/uapi/asm/Kbuild 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/include/uapi/asm/Kbuild 2015-05-01 14:57:58.195427001 -0500
+@@ -9,6 +9,7 @@
+ header-y += fcntl.h
+ header-y += hwcap.h
+ header-y += kvm_para.h
++header-y += perf_regs.h
+ header-y += param.h
+ header-y += ptrace.h
+ header-y += setup.h
+diff -Nur linux-3.14.40.orig/arch/arm64/include/uapi/asm/perf_regs.h linux-3.14.40/arch/arm64/include/uapi/asm/perf_regs.h
+--- linux-3.14.40.orig/arch/arm64/include/uapi/asm/perf_regs.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/include/uapi/asm/perf_regs.h 2015-05-01 14:57:58.195427001 -0500
+@@ -0,0 +1,40 @@
++#ifndef _ASM_ARM64_PERF_REGS_H
++#define _ASM_ARM64_PERF_REGS_H
++
++enum perf_event_arm_regs {
++ PERF_REG_ARM64_X0,
++ PERF_REG_ARM64_X1,
++ PERF_REG_ARM64_X2,
++ PERF_REG_ARM64_X3,
++ PERF_REG_ARM64_X4,
++ PERF_REG_ARM64_X5,
++ PERF_REG_ARM64_X6,
++ PERF_REG_ARM64_X7,
++ PERF_REG_ARM64_X8,
++ PERF_REG_ARM64_X9,
++ PERF_REG_ARM64_X10,
++ PERF_REG_ARM64_X11,
++ PERF_REG_ARM64_X12,
++ PERF_REG_ARM64_X13,
++ PERF_REG_ARM64_X14,
++ PERF_REG_ARM64_X15,
++ PERF_REG_ARM64_X16,
++ PERF_REG_ARM64_X17,
++ PERF_REG_ARM64_X18,
++ PERF_REG_ARM64_X19,
++ PERF_REG_ARM64_X20,
++ PERF_REG_ARM64_X21,
++ PERF_REG_ARM64_X22,
++ PERF_REG_ARM64_X23,
++ PERF_REG_ARM64_X24,
++ PERF_REG_ARM64_X25,
++ PERF_REG_ARM64_X26,
++ PERF_REG_ARM64_X27,
++ PERF_REG_ARM64_X28,
++ PERF_REG_ARM64_X29,
++ PERF_REG_ARM64_LR,
++ PERF_REG_ARM64_SP,
++ PERF_REG_ARM64_PC,
++ PERF_REG_ARM64_MAX,
++};
++#endif /* _ASM_ARM64_PERF_REGS_H */
+diff -Nur linux-3.14.40.orig/arch/arm64/Kconfig linux-3.14.40/arch/arm64/Kconfig
+--- linux-3.14.40.orig/arch/arm64/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/Kconfig 2015-05-01 14:57:58.199427001 -0500
+@@ -4,6 +4,7 @@
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
++ select ARCH_HAS_OPP
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+ select ARCH_WANT_FRAME_POINTERS
+@@ -17,6 +18,7 @@
+ select DCACHE_WORD_ACCESS
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
++ select GENERIC_CPU_AUTOPROBE
+ select GENERIC_IOMAP
+ select GENERIC_IRQ_PROBE
+ select GENERIC_IRQ_SHOW
+@@ -27,18 +29,27 @@
+ select GENERIC_TIME_VSYSCALL
+ select HARDIRQS_SW_RESEND
+ select HAVE_ARCH_JUMP_LABEL
++ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_TRACEHOOK
++ select HAVE_C_RECORDMCOUNT
+ select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DMA_API_DEBUG
+ select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
++ select HAVE_DYNAMIC_FTRACE
++ select HAVE_FTRACE_MCOUNT_RECORD
++ select HAVE_FUNCTION_TRACER
++ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_HW_BREAKPOINT if PERF_EVENTS
+ select HAVE_MEMBLOCK
+ select HAVE_PATA_PLATFORM
+ select HAVE_PERF_EVENTS
++ select HAVE_PERF_REGS
++ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_SYSCALL_TRACEPOINTS
+ select IRQ_DOMAIN
+ select MODULES_USE_ELF_RELA
+ select NO_BOOTMEM
+@@ -86,7 +97,7 @@
+ config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+-config ZONE_DMA32
++config ZONE_DMA
+ def_bool y
+
+ config ARCH_DMA_ADDR_T_64BIT
+@@ -165,6 +176,134 @@
+
+ If you don't know what to do here, say N.
+
++config SCHED_MC
++ bool "Multi-core scheduler support"
++ depends on SMP
++ help
++ Multi-core scheduler support improves the CPU scheduler's decision
++ making when dealing with multi-core CPU chips at a cost of slightly
++ increased overhead in some places. If unsure say N here.
++
++config SCHED_SMT
++ bool "SMT scheduler support"
++ depends on SMP
++ help
++ Improves the CPU scheduler's decision making when dealing with
++ MultiThreading at a cost of slightly increased overhead in some
++ places. If unsure say N here.
++
++config SCHED_MC
++ bool "Multi-core scheduler support"
++ depends on ARM_CPU_TOPOLOGY
++ help
++ Multi-core scheduler support improves the CPU scheduler's decision
++ making when dealing with multi-core CPU chips at a cost of slightly
++ increased overhead in some places. If unsure say N here.
++
++config SCHED_SMT
++ bool "SMT scheduler support"
++ depends on ARM_CPU_TOPOLOGY
++ help
++ Improves the CPU scheduler's decision making when dealing with
++ MultiThreading at a cost of slightly increased overhead in some
++ places. If unsure say N here.
++
++config DISABLE_CPU_SCHED_DOMAIN_BALANCE
++ bool "(EXPERIMENTAL) Disable CPU level scheduler load-balancing"
++ help
++ Disables scheduler load-balancing at CPU sched domain level.
++
++config SCHED_HMP
++ bool "(EXPERIMENTAL) Heterogenous multiprocessor scheduling"
++ depends on DISABLE_CPU_SCHED_DOMAIN_BALANCE && SCHED_MC && FAIR_GROUP_SCHED && !SCHED_AUTOGROUP
++ help
++ Experimental scheduler optimizations for heterogeneous platforms.
++ Attempts to introspectively select task affinity to optimize power
++ and performance. Basic support for multiple (>2) cpu types is in place,
++ but it has only been tested with two types of cpus.
++ There is currently no support for migration of task groups, hence
++ !SCHED_AUTOGROUP. Furthermore, normal load-balancing must be disabled
++ between cpus of different type (DISABLE_CPU_SCHED_DOMAIN_BALANCE).
++
++config SCHED_HMP_PRIO_FILTER
++ bool "(EXPERIMENTAL) Filter HMP migrations by task priority"
++ depends on SCHED_HMP
++ help
++ Enables task priority based HMP migration filter. Any task with
++ a NICE value above the threshold will always be on low-power cpus
++ with less compute capacity.
++
++config SCHED_HMP_PRIO_FILTER_VAL
++ int "NICE priority threshold"
++ default 5
++ depends on SCHED_HMP_PRIO_FILTER
++
++config HMP_FAST_CPU_MASK
++ string "HMP scheduler fast CPU mask"
++ depends on SCHED_HMP
++ help
++ Leave empty to use device tree information.
++ Specify the cpuids of the fast CPUs in the system as a list string,
++ e.g. cpuid 0+1 should be specified as 0-1.
++
++config HMP_SLOW_CPU_MASK
++ string "HMP scheduler slow CPU mask"
++ depends on SCHED_HMP
++ help
++ Leave empty to use device tree information.
++ Specify the cpuids of the slow CPUs in the system as a list string,
++ e.g. cpuid 0+1 should be specified as 0-1.
++
++config HMP_VARIABLE_SCALE
++ bool "Allows changing the load tracking scale through sysfs"
++ depends on SCHED_HMP
++ help
++ When turned on, this option exports the thresholds and load average
++ period value for the load tracking patches through sysfs.
++ The values can be modified to change the rate of load accumulation
++ and the thresholds used for HMP migration.
++ The load_avg_period_ms is the time in ms to reach a load average of
++ 0.5 for an idle task of 0 load average ratio that start a busy loop.
++ The up_threshold and down_threshold is the value to go to a faster
++ CPU or to go back to a slower cpu.
++ The {up,down}_threshold are devided by 1024 before being compared
++ to the load average.
++ For examples, with load_avg_period_ms = 128 and up_threshold = 512,
++ a running task with a load of 0 will be migrated to a bigger CPU after
++ 128ms, because after 128ms its load_avg_ratio is 0.5 and the real
++ up_threshold is 0.5.
++ This patch has the same behavior as changing the Y of the load
++ average computation to
++ (1002/1024)^(LOAD_AVG_PERIOD/load_avg_period_ms)
++ but it remove intermadiate overflows in computation.
++
++config HMP_FREQUENCY_INVARIANT_SCALE
++ bool "(EXPERIMENTAL) Frequency-Invariant Tracked Load for HMP"
++ depends on HMP_VARIABLE_SCALE && CPU_FREQ
++ help
++ Scales the current load contribution in line with the frequency
++ of the CPU that the task was executed on.
++ In this version, we use a simple linear scale derived from the
++ maximum frequency reported by CPUFreq.
++ Restricting tracked load to be scaled by the CPU's frequency
++ represents the consumption of possible compute capacity
++ (rather than consumption of actual instantaneous capacity as
++ normal) and allows the HMP migration's simple threshold
++ migration strategy to interact more predictably with CPUFreq's
++ asynchronous compute capacity changes.
++
++config SCHED_HMP_LITTLE_PACKING
++ bool "Small task packing for HMP"
++ depends on SCHED_HMP
++ default n
++ help
++ Allows the HMP Scheduler to pack small tasks into CPUs in the
++ smallest HMP domain.
++ Controlled by two sysfs files in sys/kernel/hmp.
++ packing_enable: 1 to enable, 0 to disable packing. Default 1.
++ packing_limit: runqueue load ratio where a RQ is considered
++ to be full. Default is NICE_0_LOAD * 9/8.
++
+ config NR_CPUS
+ int "Maximum number of CPUs (2-32)"
+ range 2 32
+@@ -317,5 +456,8 @@
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
++if CRYPTO
++source "arch/arm64/crypto/Kconfig"
++endif
+
+ source "lib/Kconfig"
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/arm64ksyms.c linux-3.14.40/arch/arm64/kernel/arm64ksyms.c
+--- linux-3.14.40.orig/arch/arm64/kernel/arm64ksyms.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/arm64ksyms.c 2015-05-01 14:57:58.211427001 -0500
+@@ -56,3 +56,7 @@
+ EXPORT_SYMBOL(test_and_clear_bit);
+ EXPORT_SYMBOL(change_bit);
+ EXPORT_SYMBOL(test_and_change_bit);
++
++#ifdef CONFIG_FUNCTION_TRACER
++EXPORT_SYMBOL(_mcount);
++#endif
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/debug-monitors.c linux-3.14.40/arch/arm64/kernel/debug-monitors.c
+--- linux-3.14.40.orig/arch/arm64/kernel/debug-monitors.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/debug-monitors.c 2015-05-01 14:57:58.215427001 -0500
+@@ -138,6 +138,7 @@
+ {
+ asm volatile("msr oslar_el1, %0" : : "r" (0));
+ isb();
++ local_dbg_enable();
+ }
+
+ static int os_lock_notify(struct notifier_block *self,
+@@ -314,9 +315,6 @@
+ if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+
+- pr_warn("unexpected brk exception at %lx, esr=0x%x\n",
+- (long)instruction_pointer(regs), esr);
+-
+ if (!user_mode(regs))
+ return -EFAULT;
+
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/entry-ftrace.S linux-3.14.40/arch/arm64/kernel/entry-ftrace.S
+--- linux-3.14.40.orig/arch/arm64/kernel/entry-ftrace.S 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/kernel/entry-ftrace.S 2015-05-01 14:57:58.215427001 -0500
+@@ -0,0 +1,218 @@
++/*
++ * arch/arm64/kernel/entry-ftrace.S
++ *
++ * Copyright (C) 2013 Linaro Limited
++ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++#include <asm/ftrace.h>
++#include <asm/insn.h>
++
++/*
++ * Gcc with -pg will put the following code in the beginning of each function:
++ * mov x0, x30
++ * bl _mcount
++ * [function's body ...]
++ * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
++ * ftrace is enabled.
++ *
++ * Please note that x0 as an argument will not be used here because we can
++ * get lr(x30) of instrumented function at any time by winding up call stack
++ * as long as the kernel is compiled without -fomit-frame-pointer.
++ * (or CONFIG_FRAME_POINTER, this is forced on arm64)
++ *
++ * stack layout after mcount_enter in _mcount():
++ *
++ * current sp/fp => 0:+-----+
++ * in _mcount() | x29 | -> instrumented function's fp
++ * +-----+
++ * | x30 | -> _mcount()'s lr (= instrumented function's pc)
++ * old sp => +16:+-----+
++ * when instrumented | |
++ * function calls | ... |
++ * _mcount() | |
++ * | |
++ * instrumented => +xx:+-----+
++ * function's fp | x29 | -> parent's fp
++ * +-----+
++ * | x30 | -> instrumented function's lr (= parent's pc)
++ * +-----+
++ * | ... |
++ */
++
++ .macro mcount_enter
++ stp x29, x30, [sp, #-16]!
++ mov x29, sp
++ .endm
++
++ .macro mcount_exit
++ ldp x29, x30, [sp], #16
++ ret
++ .endm
++
++ .macro mcount_adjust_addr rd, rn
++ sub \rd, \rn, #AARCH64_INSN_SIZE
++ .endm
++
++ /* for instrumented function's parent */
++ .macro mcount_get_parent_fp reg
++ ldr \reg, [x29]
++ ldr \reg, [\reg]
++ .endm
++
++ /* for instrumented function */
++ .macro mcount_get_pc0 reg
++ mcount_adjust_addr \reg, x30
++ .endm
++
++ .macro mcount_get_pc reg
++ ldr \reg, [x29, #8]
++ mcount_adjust_addr \reg, \reg
++ .endm
++
++ .macro mcount_get_lr reg
++ ldr \reg, [x29]
++ ldr \reg, [\reg, #8]
++ mcount_adjust_addr \reg, \reg
++ .endm
++
++ .macro mcount_get_lr_addr reg
++ ldr \reg, [x29]
++ add \reg, \reg, #8
++ .endm
++
++#ifndef CONFIG_DYNAMIC_FTRACE
++/*
++ * void _mcount(unsigned long return_address)
++ * @return_address: return address to instrumented function
++ *
++ * This function makes calls, if enabled, to:
++ * - tracer function to probe instrumented function's entry,
++ * - ftrace_graph_caller to set up an exit hook
++ */
++ENTRY(_mcount)
++#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
++ ldr x0, =ftrace_trace_stop
++ ldr x0, [x0] // if ftrace_trace_stop
++ ret // return;
++#endif
++ mcount_enter
++
++ ldr x0, =ftrace_trace_function
++ ldr x2, [x0]
++ adr x0, ftrace_stub
++ cmp x0, x2 // if (ftrace_trace_function
++ b.eq skip_ftrace_call // != ftrace_stub) {
++
++ mcount_get_pc x0 // function's pc
++ mcount_get_lr x1 // function's lr (= parent's pc)
++ blr x2 // (*ftrace_trace_function)(pc, lr);
++
++#ifndef CONFIG_FUNCTION_GRAPH_TRACER
++skip_ftrace_call: // return;
++ mcount_exit // }
++#else
++ mcount_exit // return;
++ // }
++skip_ftrace_call:
++ ldr x1, =ftrace_graph_return
++ ldr x2, [x1] // if ((ftrace_graph_return
++ cmp x0, x2 // != ftrace_stub)
++ b.ne ftrace_graph_caller
++
++ ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry
++ ldr x2, [x1] // != ftrace_graph_entry_stub))
++ ldr x0, =ftrace_graph_entry_stub
++ cmp x0, x2
++ b.ne ftrace_graph_caller // ftrace_graph_caller();
++
++ mcount_exit
++#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
++ENDPROC(_mcount)
++
++#else /* CONFIG_DYNAMIC_FTRACE */
++/*
++ * _mcount() is used to build the kernel with -pg option, but all the branch
++ * instructions to _mcount() are replaced to NOP initially at kernel start up,
++ * and later on, NOP to branch to ftrace_caller() when enabled or branch to
++ * NOP when disabled per-function base.
++ */
++ENTRY(_mcount)
++ ret
++ENDPROC(_mcount)
++
++/*
++ * void ftrace_caller(unsigned long return_address)
++ * @return_address: return address to instrumented function
++ *
++ * This function is a counterpart of _mcount() in 'static' ftrace, and
++ * makes calls to:
++ * - tracer function to probe instrumented function's entry,
++ * - ftrace_graph_caller to set up an exit hook
++ */
++ENTRY(ftrace_caller)
++ mcount_enter
++
++ mcount_get_pc0 x0 // function's pc
++ mcount_get_lr x1 // function's lr
++
++ .global ftrace_call
++ftrace_call: // tracer(pc, lr);
++ nop // This will be replaced with "bl xxx"
++ // where xxx can be any kind of tracer.
++
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++ .global ftrace_graph_call
++ftrace_graph_call: // ftrace_graph_caller();
++ nop // If enabled, this will be replaced
++ // "b ftrace_graph_caller"
++#endif
++
++ mcount_exit
++ENDPROC(ftrace_caller)
++#endif /* CONFIG_DYNAMIC_FTRACE */
++
++ENTRY(ftrace_stub)
++ ret
++ENDPROC(ftrace_stub)
++
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++/*
++ * void ftrace_graph_caller(void)
++ *
++ * Called from _mcount() or ftrace_caller() when function_graph tracer is
++ * selected.
++ * This function w/ prepare_ftrace_return() fakes link register's value on
++ * the call stack in order to intercept instrumented function's return path
++ * and run return_to_handler() later on its exit.
++ */
++ENTRY(ftrace_graph_caller)
++ mcount_get_lr_addr x0 // pointer to function's saved lr
++ mcount_get_pc x1 // function's pc
++ mcount_get_parent_fp x2 // parent's fp
++ bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
++
++ mcount_exit
++ENDPROC(ftrace_graph_caller)
++
++/*
++ * void return_to_handler(void)
++ *
++ * Run ftrace_return_to_handler() before going back to parent.
++ * @fp is checked against the value passed by ftrace_graph_caller()
++ * only when CONFIG_FUNCTION_GRAPH_FP_TEST is enabled.
++ */
++ENTRY(return_to_handler)
++ str x0, [sp, #-16]!
++ mov x0, x29 // parent's fp
++ bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
++ mov x30, x0 // restore the original return address
++ ldr x0, [sp], #16
++ ret
++END(return_to_handler)
++#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/entry.S linux-3.14.40/arch/arm64/kernel/entry.S
+--- linux-3.14.40.orig/arch/arm64/kernel/entry.S 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/entry.S 2015-05-01 14:57:58.219427001 -0500
+@@ -630,8 +630,9 @@
+ enable_irq
+
+ get_thread_info tsk
+- ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
+- tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
++ ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
++ tst x16, #_TIF_SYSCALL_WORK
++ b.ne __sys_trace
+ adr lr, ret_fast_syscall // return address
+ cmp scno, sc_nr // check upper syscall limit
+ b.hs ni_sys
+@@ -647,9 +648,8 @@
+ * switches, and waiting for our parent to respond.
+ */
+ __sys_trace:
+- mov x1, sp
+- mov w0, #0 // trace entry
+- bl syscall_trace
++ mov x0, sp
++ bl syscall_trace_enter
+ adr lr, __sys_trace_return // return address
+ uxtw scno, w0 // syscall number (possibly new)
+ mov x1, sp // pointer to regs
+@@ -664,9 +664,8 @@
+
+ __sys_trace_return:
+ str x0, [sp] // save returned x0
+- mov x1, sp
+- mov w0, #1 // trace exit
+- bl syscall_trace
++ mov x0, sp
++ bl syscall_trace_exit
+ b ret_to_user
+
+ /*
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/ftrace.c linux-3.14.40/arch/arm64/kernel/ftrace.c
+--- linux-3.14.40.orig/arch/arm64/kernel/ftrace.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/kernel/ftrace.c 2015-05-01 14:57:58.219427001 -0500
+@@ -0,0 +1,177 @@
++/*
++ * arch/arm64/kernel/ftrace.c
++ *
++ * Copyright (C) 2013 Linaro Limited
++ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/ftrace.h>
++#include <linux/swab.h>
++#include <linux/uaccess.h>
++
++#include <asm/cacheflush.h>
++#include <asm/ftrace.h>
++#include <asm/insn.h>
++
++#ifdef CONFIG_DYNAMIC_FTRACE
++/*
++ * Replace a single instruction, which may be a branch or NOP.
++ * If @validate == true, a replaced instruction is checked against 'old'.
++ */
++static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
++ bool validate)
++{
++ u32 replaced;
++
++ /*
++ * Note:
++ * Due to modules and __init, code can disappear and change,
++ * we need to protect against faulting as well as code changing.
++ * We do this by aarch64_insn_*() which use the probe_kernel_*().
++ *
++ * No lock is held here because all the modifications are run
++ * through stop_machine().
++ */
++ if (validate) {
++ if (aarch64_insn_read((void *)pc, &replaced))
++ return -EFAULT;
++
++ if (replaced != old)
++ return -EINVAL;
++ }
++ if (aarch64_insn_patch_text_nosync((void *)pc, new))
++ return -EPERM;
++
++ return 0;
++}
++
++/*
++ * Replace tracer function in ftrace_caller()
++ */
++int ftrace_update_ftrace_func(ftrace_func_t func)
++{
++ unsigned long pc;
++ u32 new;
++
++ pc = (unsigned long)&ftrace_call;
++ new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true);
++
++ return ftrace_modify_code(pc, 0, new, false);
++}
++
++/*
++ * Turn on the call to ftrace_caller() in instrumented function
++ */
++int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
++{
++ unsigned long pc = rec->ip;
++ u32 old, new;
++
++ old = aarch64_insn_gen_nop();
++ new = aarch64_insn_gen_branch_imm(pc, addr, true);
++
++ return ftrace_modify_code(pc, old, new, true);
++}
++
++/*
++ * Turn off the call to ftrace_caller() in instrumented function
++ */
++int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
++ unsigned long addr)
++{
++ unsigned long pc = rec->ip;
++ u32 old, new;
++
++ old = aarch64_insn_gen_branch_imm(pc, addr, true);
++ new = aarch64_insn_gen_nop();
++
++ return ftrace_modify_code(pc, old, new, true);
++}
++
++int __init ftrace_dyn_arch_init(void *data)
++{
++ *(unsigned long *)data = 0;
++ return 0;
++}
++#endif /* CONFIG_DYNAMIC_FTRACE */
++
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++/*
++ * function_graph tracer expects ftrace_return_to_handler() to be called
++ * on the way back to parent. For this purpose, this function is called
++ * in _mcount() or ftrace_caller() to replace return address (*parent) on
++ * the call stack to return_to_handler.
++ *
++ * Note that @frame_pointer is used only for sanity check later.
++ */
++void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
++ unsigned long frame_pointer)
++{
++ unsigned long return_hooker = (unsigned long)&return_to_handler;
++ unsigned long old;
++ struct ftrace_graph_ent trace;
++ int err;
++
++ if (unlikely(atomic_read(&current->tracing_graph_pause)))
++ return;
++
++ /*
++ * Note:
++ * No protection against faulting at *parent, which may be seen
++ * on other archs. It's unlikely on AArch64.
++ */
++ old = *parent;
++ *parent = return_hooker;
++
++ trace.func = self_addr;
++ trace.depth = current->curr_ret_stack + 1;
++
++ /* Only trace if the calling function expects to */
++ if (!ftrace_graph_entry(&trace)) {
++ *parent = old;
++ return;
++ }
++
++ err = ftrace_push_return_trace(old, self_addr, &trace.depth,
++ frame_pointer);
++ if (err == -EBUSY) {
++ *parent = old;
++ return;
++ }
++}
++
++#ifdef CONFIG_DYNAMIC_FTRACE
++/*
++ * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
++ * depending on @enable.
++ */
++static int ftrace_modify_graph_caller(bool enable)
++{
++ unsigned long pc = (unsigned long)&ftrace_graph_call;
++ u32 branch, nop;
++
++ branch = aarch64_insn_gen_branch_imm(pc,
++ (unsigned long)ftrace_graph_caller, false);
++ nop = aarch64_insn_gen_nop();
++
++ if (enable)
++ return ftrace_modify_code(pc, nop, branch, true);
++ else
++ return ftrace_modify_code(pc, branch, nop, true);
++}
++
++int ftrace_enable_ftrace_graph_caller(void)
++{
++ return ftrace_modify_graph_caller(true);
++}
++
++int ftrace_disable_ftrace_graph_caller(void)
++{
++ return ftrace_modify_graph_caller(false);
++}
++#endif /* CONFIG_DYNAMIC_FTRACE */
++#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/head.S linux-3.14.40/arch/arm64/kernel/head.S
+--- linux-3.14.40.orig/arch/arm64/kernel/head.S 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/head.S 2015-05-01 14:57:58.219427001 -0500
+@@ -26,6 +26,7 @@
+ #include <asm/assembler.h>
+ #include <asm/ptrace.h>
+ #include <asm/asm-offsets.h>
++#include <asm/cache.h>
+ #include <asm/cputype.h>
+ #include <asm/memory.h>
+ #include <asm/thread_info.h>
+@@ -34,29 +35,17 @@
+ #include <asm/page.h>
+ #include <asm/virt.h>
+
+-/*
+- * swapper_pg_dir is the virtual address of the initial page table. We place
+- * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
+- * 2 pages and is placed below swapper_pg_dir.
+- */
+ #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
+
+ #if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
+ #error KERNEL_RAM_VADDR must start at 0xXXX80000
+ #endif
+
+-#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
+-#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
+-
+- .globl swapper_pg_dir
+- .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE
+-
+- .globl idmap_pg_dir
+- .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
+-
+- .macro pgtbl, ttb0, ttb1, phys
+- add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
+- sub \ttb0, \ttb1, #IDMAP_DIR_SIZE
++ .macro pgtbl, ttb0, ttb1, virt_to_phys
++ ldr \ttb1, =swapper_pg_dir
++ ldr \ttb0, =idmap_pg_dir
++ add \ttb1, \ttb1, \virt_to_phys
++ add \ttb0, \ttb0, \virt_to_phys
+ .endm
+
+ #ifdef CONFIG_ARM64_64K_PAGES
+@@ -229,7 +218,11 @@
+ cmp w20, #BOOT_CPU_MODE_EL2
+ b.ne 1f
+ add x1, x1, #4
+-1: str w20, [x1] // This CPU has booted in EL1
++1: dc cvac, x1 // Clean potentially dirty cache line
++ dsb sy
++ str w20, [x1] // This CPU has booted in EL1
++ dc civac, x1 // Clean&invalidate potentially stale cache line
++ dsb sy
+ ret
+ ENDPROC(set_cpu_boot_mode_flag)
+
+@@ -240,8 +233,9 @@
+ * This is not in .bss, because we set it sufficiently early that the boot-time
+ * zeroing of .bss would clobber it.
+ */
+- .pushsection .data
++ .pushsection .data..cacheline_aligned
+ ENTRY(__boot_cpu_mode)
++ .align L1_CACHE_SHIFT
+ .long BOOT_CPU_MODE_EL2
+ .long 0
+ .popsection
+@@ -298,7 +292,7 @@
+ mov x23, x0 // x23=current cpu_table
+ cbz x23, __error_p // invalid processor (x23=0)?
+
+- pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1
++ pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1
+ ldr x12, [x23, #CPU_INFO_SETUP]
+ add x12, x12, x28 // __virt_to_phys
+ blr x12 // initialise processor
+@@ -340,8 +334,13 @@
+ * x27 = *virtual* address to jump to upon completion
+ *
+ * other registers depend on the function called upon completion
++ *
++ * We align the entire function to the smallest power of two larger than it to
++ * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
++ * close to the end of a 512MB or 1GB block we might require an additional
++ * table to map the entire function.
+ */
+- .align 6
++ .align 4
+ __turn_mmu_on:
+ msr sctlr_el1, x0
+ isb
+@@ -384,26 +383,18 @@
+ * Preserves: tbl, flags
+ * Corrupts: phys, start, end, pstate
+ */
+- .macro create_block_map, tbl, flags, phys, start, end, idmap=0
++ .macro create_block_map, tbl, flags, phys, start, end
+ lsr \phys, \phys, #BLOCK_SHIFT
+- .if \idmap
+- and \start, \phys, #PTRS_PER_PTE - 1 // table index
+- .else
+ lsr \start, \start, #BLOCK_SHIFT
+ and \start, \start, #PTRS_PER_PTE - 1 // table index
+- .endif
+ orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
+- .ifnc \start,\end
+ lsr \end, \end, #BLOCK_SHIFT
+ and \end, \end, #PTRS_PER_PTE - 1 // table end index
+- .endif
+ 9999: str \phys, [\tbl, \start, lsl #3] // store the entry
+- .ifnc \start,\end
+ add \start, \start, #1 // next entry
+ add \phys, \phys, #BLOCK_SIZE // next block
+ cmp \start, \end
+ b.ls 9999b
+- .endif
+ .endm
+
+ /*
+@@ -415,7 +406,16 @@
+ * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1)
+ */
+ __create_page_tables:
+- pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
++ pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
++ mov x27, lr
++
++ /*
++ * Invalidate the idmap and swapper page tables to avoid potential
++ * dirty cache lines being evicted.
++ */
++ mov x0, x25
++ add x1, x26, #SWAPPER_DIR_SIZE
++ bl __inval_cache_range
+
+ /*
+ * Clear the idmap and swapper page tables.
+@@ -435,9 +435,13 @@
+ * Create the identity mapping.
+ */
+ add x0, x25, #PAGE_SIZE // section table address
+- adr x3, __turn_mmu_on // virtual/physical address
++ ldr x3, =KERNEL_START
++ add x3, x3, x28 // __pa(KERNEL_START)
+ create_pgd_entry x25, x0, x3, x5, x6
+- create_block_map x0, x7, x3, x5, x5, idmap=1
++ ldr x6, =KERNEL_END
++ mov x5, x3 // __pa(KERNEL_START)
++ add x6, x6, x28 // __pa(KERNEL_END)
++ create_block_map x0, x7, x3, x5, x6
+
+ /*
+ * Map the kernel image (starting with PHYS_OFFSET).
+@@ -445,7 +449,7 @@
+ add x0, x26, #PAGE_SIZE // section table address
+ mov x5, #PAGE_OFFSET
+ create_pgd_entry x26, x0, x5, x3, x6
+- ldr x6, =KERNEL_END - 1
++ ldr x6, =KERNEL_END
+ mov x3, x24 // phys offset
+ create_block_map x0, x7, x3, x5, x6
+
+@@ -474,6 +478,17 @@
+ add x0, x26, #2 * PAGE_SIZE // section table address
+ create_pgd_entry x26, x0, x5, x6, x7
+ #endif
++
++ /*
++ * Since the page tables have been populated with non-cacheable
++ * accesses (MMU disabled), invalidate the idmap and swapper page
++ * tables again to remove any speculatively loaded cache lines.
++ */
++ mov x0, x25
++ add x1, x26, #SWAPPER_DIR_SIZE
++ bl __inval_cache_range
++
++ mov lr, x27
+ ret
+ ENDPROC(__create_page_tables)
+ .ltorg
+@@ -483,7 +498,7 @@
+ __switch_data:
+ .quad __mmap_switched
+ .quad __bss_start // x6
+- .quad _end // x7
++ .quad __bss_stop // x7
+ .quad processor_id // x4
+ .quad __fdt_pointer // x5
+ .quad memstart_addr // x6
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/hw_breakpoint.c linux-3.14.40/arch/arm64/kernel/hw_breakpoint.c
+--- linux-3.14.40.orig/arch/arm64/kernel/hw_breakpoint.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/hw_breakpoint.c 2015-05-01 14:57:58.219427001 -0500
+@@ -20,6 +20,7 @@
+
+ #define pr_fmt(fmt) "hw-breakpoint: " fmt
+
++#include <linux/compat.h>
+ #include <linux/cpu_pm.h>
+ #include <linux/errno.h>
+ #include <linux/hw_breakpoint.h>
+@@ -27,7 +28,6 @@
+ #include <linux/ptrace.h>
+ #include <linux/smp.h>
+
+-#include <asm/compat.h>
+ #include <asm/current.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/hw_breakpoint.h>
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/kgdb.c linux-3.14.40/arch/arm64/kernel/kgdb.c
+--- linux-3.14.40.orig/arch/arm64/kernel/kgdb.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/kernel/kgdb.c 2015-05-01 14:57:58.219427001 -0500
+@@ -0,0 +1,336 @@
++/*
++ * AArch64 KGDB support
++ *
++ * Based on arch/arm/kernel/kgdb.c
++ *
++ * Copyright (C) 2013 Cavium Inc.
++ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/irq.h>
++#include <linux/kdebug.h>
++#include <linux/kgdb.h>
++#include <asm/traps.h>
++
++struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
++ { "x0", 8, offsetof(struct pt_regs, regs[0])},
++ { "x1", 8, offsetof(struct pt_regs, regs[1])},
++ { "x2", 8, offsetof(struct pt_regs, regs[2])},
++ { "x3", 8, offsetof(struct pt_regs, regs[3])},
++ { "x4", 8, offsetof(struct pt_regs, regs[4])},
++ { "x5", 8, offsetof(struct pt_regs, regs[5])},
++ { "x6", 8, offsetof(struct pt_regs, regs[6])},
++ { "x7", 8, offsetof(struct pt_regs, regs[7])},
++ { "x8", 8, offsetof(struct pt_regs, regs[8])},
++ { "x9", 8, offsetof(struct pt_regs, regs[9])},
++ { "x10", 8, offsetof(struct pt_regs, regs[10])},
++ { "x11", 8, offsetof(struct pt_regs, regs[11])},
++ { "x12", 8, offsetof(struct pt_regs, regs[12])},
++ { "x13", 8, offsetof(struct pt_regs, regs[13])},
++ { "x14", 8, offsetof(struct pt_regs, regs[14])},
++ { "x15", 8, offsetof(struct pt_regs, regs[15])},
++ { "x16", 8, offsetof(struct pt_regs, regs[16])},
++ { "x17", 8, offsetof(struct pt_regs, regs[17])},
++ { "x18", 8, offsetof(struct pt_regs, regs[18])},
++ { "x19", 8, offsetof(struct pt_regs, regs[19])},
++ { "x20", 8, offsetof(struct pt_regs, regs[20])},
++ { "x21", 8, offsetof(struct pt_regs, regs[21])},
++ { "x22", 8, offsetof(struct pt_regs, regs[22])},
++ { "x23", 8, offsetof(struct pt_regs, regs[23])},
++ { "x24", 8, offsetof(struct pt_regs, regs[24])},
++ { "x25", 8, offsetof(struct pt_regs, regs[25])},
++ { "x26", 8, offsetof(struct pt_regs, regs[26])},
++ { "x27", 8, offsetof(struct pt_regs, regs[27])},
++ { "x28", 8, offsetof(struct pt_regs, regs[28])},
++ { "x29", 8, offsetof(struct pt_regs, regs[29])},
++ { "x30", 8, offsetof(struct pt_regs, regs[30])},
++ { "sp", 8, offsetof(struct pt_regs, sp)},
++ { "pc", 8, offsetof(struct pt_regs, pc)},
++ { "pstate", 8, offsetof(struct pt_regs, pstate)},
++ { "v0", 16, -1 },
++ { "v1", 16, -1 },
++ { "v2", 16, -1 },
++ { "v3", 16, -1 },
++ { "v4", 16, -1 },
++ { "v5", 16, -1 },
++ { "v6", 16, -1 },
++ { "v7", 16, -1 },
++ { "v8", 16, -1 },
++ { "v9", 16, -1 },
++ { "v10", 16, -1 },
++ { "v11", 16, -1 },
++ { "v12", 16, -1 },
++ { "v13", 16, -1 },
++ { "v14", 16, -1 },
++ { "v15", 16, -1 },
++ { "v16", 16, -1 },
++ { "v17", 16, -1 },
++ { "v18", 16, -1 },
++ { "v19", 16, -1 },
++ { "v20", 16, -1 },
++ { "v21", 16, -1 },
++ { "v22", 16, -1 },
++ { "v23", 16, -1 },
++ { "v24", 16, -1 },
++ { "v25", 16, -1 },
++ { "v26", 16, -1 },
++ { "v27", 16, -1 },
++ { "v28", 16, -1 },
++ { "v29", 16, -1 },
++ { "v30", 16, -1 },
++ { "v31", 16, -1 },
++ { "fpsr", 4, -1 },
++ { "fpcr", 4, -1 },
++};
++
++char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
++{
++ if (regno >= DBG_MAX_REG_NUM || regno < 0)
++ return NULL;
++
++ if (dbg_reg_def[regno].offset != -1)
++ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
++ dbg_reg_def[regno].size);
++ else
++ memset(mem, 0, dbg_reg_def[regno].size);
++ return dbg_reg_def[regno].name;
++}
++
++int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
++{
++ if (regno >= DBG_MAX_REG_NUM || regno < 0)
++ return -EINVAL;
++
++ if (dbg_reg_def[regno].offset != -1)
++ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
++ dbg_reg_def[regno].size);
++ return 0;
++}
++
++void
++sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
++{
++ struct pt_regs *thread_regs;
++
++ /* Initialize to zero */
++ memset((char *)gdb_regs, 0, NUMREGBYTES);
++ thread_regs = task_pt_regs(task);
++ memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
++}
++
++void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
++{
++ regs->pc = pc;
++}
++
++static int compiled_break;
++
++static void kgdb_arch_update_addr(struct pt_regs *regs,
++ char *remcom_in_buffer)
++{
++ unsigned long addr;
++ char *ptr;
++
++ ptr = &remcom_in_buffer[1];
++ if (kgdb_hex2long(&ptr, &addr))
++ kgdb_arch_set_pc(regs, addr);
++ else if (compiled_break == 1)
++ kgdb_arch_set_pc(regs, regs->pc + 4);
++
++ compiled_break = 0;
++}
++
++int kgdb_arch_handle_exception(int exception_vector, int signo,
++ int err_code, char *remcom_in_buffer,
++ char *remcom_out_buffer,
++ struct pt_regs *linux_regs)
++{
++ int err;
++
++ switch (remcom_in_buffer[0]) {
++ case 'D':
++ case 'k':
++ /*
++ * Packet D (Detach), k (kill). No special handling
++ * is required here. Handle same as c packet.
++ */
++ case 'c':
++ /*
++ * Packet c (Continue) to continue executing.
++ * Set pc to required address.
++ * Try to read optional parameter and set pc.
++ * If this was a compiled breakpoint, we need to move
++ * to the next instruction else we will just breakpoint
++ * over and over again.
++ */
++ kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
++ atomic_set(&kgdb_cpu_doing_single_step, -1);
++ kgdb_single_step = 0;
++
++ /*
++ * Received continue command, disable single step
++ */
++ if (kernel_active_single_step())
++ kernel_disable_single_step();
++
++ err = 0;
++ break;
++ case 's':
++ /*
++ * Update step address value with address passed
++ * with step packet.
++ * On debug exception return PC is copied to ELR
++ * So just update PC.
++ * If no step address is passed, resume from the address
++ * pointed by PC. Do not update PC
++ */
++ kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
++ atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
++ kgdb_single_step = 1;
++
++ /*
++ * Enable single step handling
++ */
++ if (!kernel_active_single_step())
++ kernel_enable_single_step(linux_regs);
++ err = 0;
++ break;
++ default:
++ err = -1;
++ }
++ return err;
++}
++
++static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
++{
++ kgdb_handle_exception(1, SIGTRAP, 0, regs);
++ return 0;
++}
++
++static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
++{
++ compiled_break = 1;
++ kgdb_handle_exception(1, SIGTRAP, 0, regs);
++
++ return 0;
++}
++
++static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
++{
++ kgdb_handle_exception(1, SIGTRAP, 0, regs);
++ return 0;
++}
++
++static struct break_hook kgdb_brkpt_hook = {
++ .esr_mask = 0xffffffff,
++ .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM),
++ .fn = kgdb_brk_fn
++};
++
++static struct break_hook kgdb_compiled_brkpt_hook = {
++ .esr_mask = 0xffffffff,
++ .esr_val = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM),
++ .fn = kgdb_compiled_brk_fn
++};
++
++static struct step_hook kgdb_step_hook = {
++ .fn = kgdb_step_brk_fn
++};
++
++static void kgdb_call_nmi_hook(void *ignored)
++{
++ kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
++}
++
++void kgdb_roundup_cpus(unsigned long flags)
++{
++ local_irq_enable();
++ smp_call_function(kgdb_call_nmi_hook, NULL, 0);
++ local_irq_disable();
++}
++
++static int __kgdb_notify(struct die_args *args, unsigned long cmd)
++{
++ struct pt_regs *regs = args->regs;
++
++ if (kgdb_handle_exception(1, args->signr, cmd, regs))
++ return NOTIFY_DONE;
++ return NOTIFY_STOP;
++}
++
++static int
++kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
++{
++ unsigned long flags;
++ int ret;
++
++ local_irq_save(flags);
++ ret = __kgdb_notify(ptr, cmd);
++ local_irq_restore(flags);
++
++ return ret;
++}
++
++static struct notifier_block kgdb_notifier = {
++ .notifier_call = kgdb_notify,
++ /*
++ * Want to be lowest priority
++ */
++ .priority = -INT_MAX,
++};
++
++/*
++ * kgdb_arch_init - Perform any architecture specific initalization.
++ * This function will handle the initalization of any architecture
++ * specific callbacks.
++ */
++int kgdb_arch_init(void)
++{
++ int ret = register_die_notifier(&kgdb_notifier);
++
++ if (ret != 0)
++ return ret;
++
++ register_break_hook(&kgdb_brkpt_hook);
++ register_break_hook(&kgdb_compiled_brkpt_hook);
++ register_step_hook(&kgdb_step_hook);
++ return 0;
++}
++
++/*
++ * kgdb_arch_exit - Perform any architecture specific uninitalization.
++ * This function will handle the uninitalization of any architecture
++ * specific callbacks, for dynamic registration and unregistration.
++ */
++void kgdb_arch_exit(void)
++{
++ unregister_break_hook(&kgdb_brkpt_hook);
++ unregister_break_hook(&kgdb_compiled_brkpt_hook);
++ unregister_step_hook(&kgdb_step_hook);
++ unregister_die_notifier(&kgdb_notifier);
++}
++
++/*
++ * ARM instructions are always in LE.
++ * Break instruction is encoded in LE format
++ */
++struct kgdb_arch arch_kgdb_ops = {
++ .gdb_bpt_instr = {
++ KGDB_DYN_BRK_INS_BYTE0,
++ KGDB_DYN_BRK_INS_BYTE1,
++ KGDB_DYN_BRK_INS_BYTE2,
++ KGDB_DYN_BRK_INS_BYTE3,
++ }
++};
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/Makefile linux-3.14.40/arch/arm64/kernel/Makefile
+--- linux-3.14.40.orig/arch/arm64/kernel/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/Makefile 2015-05-01 14:57:58.219427001 -0500
+@@ -5,21 +5,29 @@
+ CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
+ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+
++CFLAGS_REMOVE_ftrace.o = -pg
++CFLAGS_REMOVE_insn.o = -pg
++CFLAGS_REMOVE_return_address.o = -pg
++
+ # Object file lists.
+ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
+ entry-fpsimd.o process.o ptrace.o setup.o signal.o \
+ sys.o stacktrace.o time.o traps.o io.o vdso.o \
+- hyp-stub.o psci.o cpu_ops.o insn.o
++ hyp-stub.o psci.o cpu_ops.o insn.o return_address.o
+
+ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
+ sys_compat.o
++arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
+ arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
++arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
+ arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o
++arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
+ arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
+-arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
++arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+ arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+ arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
+ arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
++arm64-obj-$(CONFIG_KGDB) += kgdb.o
+
+ obj-y += $(arm64-obj-y) vdso/
+ obj-m += $(arm64-obj-m)
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/perf_event.c linux-3.14.40/arch/arm64/kernel/perf_event.c
+--- linux-3.14.40.orig/arch/arm64/kernel/perf_event.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/perf_event.c 2015-05-01 14:57:58.219427001 -0500
+@@ -1348,8 +1348,8 @@
+ * Callchain handling code.
+ */
+ struct frame_tail {
+- struct frame_tail __user *fp;
+- unsigned long lr;
++ struct frame_tail __user *fp;
++ unsigned long lr;
+ } __attribute__((packed));
+
+ /*
+@@ -1386,22 +1386,84 @@
+ return buftail.fp;
+ }
+
++#ifdef CONFIG_COMPAT
++/*
++ * The registers we're interested in are at the end of the variable
++ * length saved register structure. The fp points at the end of this
++ * structure so the address of this struct is:
++ * (struct compat_frame_tail *)(xxx->fp)-1
++ *
++ * This code has been adapted from the ARM OProfile support.
++ */
++struct compat_frame_tail {
++ compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
++ u32 sp;
++ u32 lr;
++} __attribute__((packed));
++
++static struct compat_frame_tail __user *
++compat_user_backtrace(struct compat_frame_tail __user *tail,
++ struct perf_callchain_entry *entry)
++{
++ struct compat_frame_tail buftail;
++ unsigned long err;
++
++ /* Also check accessibility of one struct frame_tail beyond */
++ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
++ return NULL;
++
++ pagefault_disable();
++ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
++ pagefault_enable();
++
++ if (err)
++ return NULL;
++
++ perf_callchain_store(entry, buftail.lr);
++
++ /*
++ * Frame pointers should strictly progress back up the stack
++ * (towards higher addresses).
++ */
++ if (tail + 1 >= (struct compat_frame_tail __user *)
++ compat_ptr(buftail.fp))
++ return NULL;
++
++ return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
++}
++#endif /* CONFIG_COMPAT */
++
+ void perf_callchain_user(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+ {
+- struct frame_tail __user *tail;
+-
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
+ perf_callchain_store(entry, regs->pc);
+- tail = (struct frame_tail __user *)regs->regs[29];
+
+- while (entry->nr < PERF_MAX_STACK_DEPTH &&
+- tail && !((unsigned long)tail & 0xf))
+- tail = user_backtrace(tail, entry);
++ if (!compat_user_mode(regs)) {
++ /* AARCH64 mode */
++ struct frame_tail __user *tail;
++
++ tail = (struct frame_tail __user *)regs->regs[29];
++
++ while (entry->nr < PERF_MAX_STACK_DEPTH &&
++ tail && !((unsigned long)tail & 0xf))
++ tail = user_backtrace(tail, entry);
++ } else {
++#ifdef CONFIG_COMPAT
++ /* AARCH32 compat mode */
++ struct compat_frame_tail __user *tail;
++
++ tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
++
++ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
++ tail && !((unsigned long)tail & 0x3))
++ tail = compat_user_backtrace(tail, entry);
++#endif
++ }
+ }
+
+ /*
+@@ -1429,6 +1491,7 @@
+ frame.fp = regs->regs[29];
+ frame.sp = regs->sp;
+ frame.pc = regs->pc;
++
+ walk_stackframe(&frame, callchain_trace, entry);
+ }
+
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/perf_regs.c linux-3.14.40/arch/arm64/kernel/perf_regs.c
+--- linux-3.14.40.orig/arch/arm64/kernel/perf_regs.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/kernel/perf_regs.c 2015-05-01 14:57:58.219427001 -0500
+@@ -0,0 +1,46 @@
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/perf_event.h>
++#include <linux/bug.h>
++
++#include <asm/compat.h>
++#include <asm/perf_regs.h>
++#include <asm/ptrace.h>
++
++u64 perf_reg_value(struct pt_regs *regs, int idx)
++{
++ if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_MAX))
++ return 0;
++
++ /*
++ * Compat (i.e. 32 bit) mode:
++ * - PC has been set in the pt_regs struct in kernel_entry,
++ * - Handle SP and LR here.
++ */
++ if (compat_user_mode(regs)) {
++ if ((u32)idx == PERF_REG_ARM64_SP)
++ return regs->compat_sp;
++ if ((u32)idx == PERF_REG_ARM64_LR)
++ return regs->compat_lr;
++ }
++
++ return regs->regs[idx];
++}
++
++#define REG_RESERVED (~((1ULL << PERF_REG_ARM64_MAX) - 1))
++
++int perf_reg_validate(u64 mask)
++{
++ if (!mask || mask & REG_RESERVED)
++ return -EINVAL;
++
++ return 0;
++}
++
++u64 perf_reg_abi(struct task_struct *task)
++{
++ if (is_compat_thread(task_thread_info(task)))
++ return PERF_SAMPLE_REGS_ABI_32;
++ else
++ return PERF_SAMPLE_REGS_ABI_64;
++}
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/process.c linux-3.14.40/arch/arm64/kernel/process.c
+--- linux-3.14.40.orig/arch/arm64/kernel/process.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/process.c 2015-05-01 14:57:58.223427001 -0500
+@@ -20,6 +20,7 @@
+
+ #include <stdarg.h>
+
++#include <linux/compat.h>
+ #include <linux/export.h>
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/ptrace.c linux-3.14.40/arch/arm64/kernel/ptrace.c
+--- linux-3.14.40.orig/arch/arm64/kernel/ptrace.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/ptrace.c 2015-05-01 14:57:58.223427001 -0500
+@@ -19,6 +19,7 @@
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <linux/compat.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/mm.h>
+@@ -41,6 +42,9 @@
+ #include <asm/traps.h>
+ #include <asm/system_misc.h>
+
++#define CREATE_TRACE_POINTS
++#include <trace/events/syscalls.h>
++
+ /*
+ * TODO: does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+@@ -1073,35 +1077,49 @@
+ return ptrace_request(child, request, addr, data);
+ }
+
+-asmlinkage int syscall_trace(int dir, struct pt_regs *regs)
++enum ptrace_syscall_dir {
++ PTRACE_SYSCALL_ENTER = 0,
++ PTRACE_SYSCALL_EXIT,
++};
++
++static void tracehook_report_syscall(struct pt_regs *regs,
++ enum ptrace_syscall_dir dir)
+ {
++ int regno;
+ unsigned long saved_reg;
+
+- if (!test_thread_flag(TIF_SYSCALL_TRACE))
+- return regs->syscallno;
+-
+- if (is_compat_task()) {
+- /* AArch32 uses ip (r12) for scratch */
+- saved_reg = regs->regs[12];
+- regs->regs[12] = dir;
+- } else {
+- /*
+- * Save X7. X7 is used to denote syscall entry/exit:
+- * X7 = 0 -> entry, = 1 -> exit
+- */
+- saved_reg = regs->regs[7];
+- regs->regs[7] = dir;
+- }
++ /*
++ * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
++ * used to denote syscall entry/exit:
++ */
++ regno = (is_compat_task() ? 12 : 7);
++ saved_reg = regs->regs[regno];
++ regs->regs[regno] = dir;
+
+- if (dir)
++ if (dir == PTRACE_SYSCALL_EXIT)
+ tracehook_report_syscall_exit(regs, 0);
+ else if (tracehook_report_syscall_entry(regs))
+ regs->syscallno = ~0UL;
+
+- if (is_compat_task())
+- regs->regs[12] = saved_reg;
+- else
+- regs->regs[7] = saved_reg;
++ regs->regs[regno] = saved_reg;
++}
++
++asmlinkage int syscall_trace_enter(struct pt_regs *regs)
++{
++ if (test_thread_flag(TIF_SYSCALL_TRACE))
++ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
++
++ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
++ trace_sys_enter(regs, regs->syscallno);
+
+ return regs->syscallno;
+ }
++
++asmlinkage void syscall_trace_exit(struct pt_regs *regs)
++{
++ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
++ trace_sys_exit(regs, regs_return_value(regs));
++
++ if (test_thread_flag(TIF_SYSCALL_TRACE))
++ tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
++}
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/return_address.c linux-3.14.40/arch/arm64/kernel/return_address.c
+--- linux-3.14.40.orig/arch/arm64/kernel/return_address.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/kernel/return_address.c 2015-05-01 14:57:58.223427001 -0500
+@@ -0,0 +1,55 @@
++/*
++ * arch/arm64/kernel/return_address.c
++ *
++ * Copyright (C) 2013 Linaro Limited
++ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/export.h>
++#include <linux/ftrace.h>
++
++#include <asm/stacktrace.h>
++
++struct return_address_data {
++ unsigned int level;
++ void *addr;
++};
++
++static int save_return_addr(struct stackframe *frame, void *d)
++{
++ struct return_address_data *data = d;
++
++ if (!data->level) {
++ data->addr = (void *)frame->pc;
++ return 1;
++ } else {
++ --data->level;
++ return 0;
++ }
++}
++
++void *return_address(unsigned int level)
++{
++ struct return_address_data data;
++ struct stackframe frame;
++ register unsigned long current_sp asm ("sp");
++
++ data.level = level + 2;
++ data.addr = NULL;
++
++ frame.fp = (unsigned long)__builtin_frame_address(0);
++ frame.sp = current_sp;
++ frame.pc = (unsigned long)return_address; /* dummy */
++
++ walk_stackframe(&frame, save_return_addr, &data);
++
++ if (!data.level)
++ return data.addr;
++ else
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(return_address);
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/setup.c linux-3.14.40/arch/arm64/kernel/setup.c
+--- linux-3.14.40.orig/arch/arm64/kernel/setup.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/setup.c 2015-05-01 14:57:58.223427001 -0500
+@@ -71,6 +71,7 @@
+ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
+ COMPAT_HWCAP_LPAE)
+ unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
++unsigned int compat_elf_hwcap2 __read_mostly;
+ #endif
+
+ static const char *cpu_name;
+@@ -258,6 +259,38 @@
+ block = (features >> 16) & 0xf;
+ if (block && !(block & 0x8))
+ elf_hwcap |= HWCAP_CRC32;
++
++#ifdef CONFIG_COMPAT
++ /*
++ * ID_ISAR5_EL1 carries similar information as above, but pertaining to
++ * the Aarch32 32-bit execution state.
++ */
++ features = read_cpuid(ID_ISAR5_EL1);
++ block = (features >> 4) & 0xf;
++ if (!(block & 0x8)) {
++ switch (block) {
++ default:
++ case 2:
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
++ case 1:
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
++ case 0:
++ break;
++ }
++ }
++
++ block = (features >> 8) & 0xf;
++ if (block && !(block & 0x8))
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
++
++ block = (features >> 12) & 0xf;
++ if (block && !(block & 0x8))
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
++
++ block = (features >> 16) & 0xf;
++ if (block && !(block & 0x8))
++ compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
++#endif
+ }
+
+ static void __init setup_machine_fdt(phys_addr_t dt_phys)
+@@ -374,7 +407,7 @@
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ return 0;
+ }
+-arch_initcall(arm64_device_init);
++arch_initcall_sync(arm64_device_init);
+
+ static int __init topology_init(void)
+ {
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/signal.c linux-3.14.40/arch/arm64/kernel/signal.c
+--- linux-3.14.40.orig/arch/arm64/kernel/signal.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/signal.c 2015-05-01 14:57:58.223427001 -0500
+@@ -17,6 +17,7 @@
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <linux/compat.h>
+ #include <linux/errno.h>
+ #include <linux/signal.h>
+ #include <linux/personality.h>
+@@ -25,7 +26,6 @@
+ #include <linux/tracehook.h>
+ #include <linux/ratelimit.h>
+
+-#include <asm/compat.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/elf.h>
+ #include <asm/cacheflush.h>
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/smp.c linux-3.14.40/arch/arm64/kernel/smp.c
+--- linux-3.14.40.orig/arch/arm64/kernel/smp.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/smp.c 2015-05-01 14:57:58.223427001 -0500
+@@ -114,6 +114,11 @@
+ return ret;
+ }
+
++static void smp_store_cpu_info(unsigned int cpuid)
++{
++ store_cpu_topology(cpuid);
++}
++
+ /*
+ * This is the secondary CPU boot entry. We're using this CPUs
+ * idle thread stack, but a set of temporary page tables.
+@@ -157,6 +162,8 @@
+ */
+ notify_cpu_starting(cpu);
+
++ smp_store_cpu_info(cpu);
++
+ /*
+ * OK, now it's safe to let the boot CPU continue. Wait for
+ * the CPU migration code to notice that the CPU is online
+@@ -395,6 +402,10 @@
+ int err;
+ unsigned int cpu, ncores = num_possible_cpus();
+
++ init_cpu_topology();
++
++ smp_store_cpu_info(smp_processor_id());
++
+ /*
+ * are we trying to boot more cores than exist?
+ */
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/stacktrace.c linux-3.14.40/arch/arm64/kernel/stacktrace.c
+--- linux-3.14.40.orig/arch/arm64/kernel/stacktrace.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/stacktrace.c 2015-05-01 14:57:58.223427001 -0500
+@@ -35,7 +35,7 @@
+ * ldp x29, x30, [sp]
+ * add sp, sp, #0x10
+ */
+-int unwind_frame(struct stackframe *frame)
++int notrace unwind_frame(struct stackframe *frame)
+ {
+ unsigned long high, low;
+ unsigned long fp = frame->fp;
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/topology.c linux-3.14.40/arch/arm64/kernel/topology.c
+--- linux-3.14.40.orig/arch/arm64/kernel/topology.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/arch/arm64/kernel/topology.c 2015-05-01 14:57:58.223427001 -0500
+@@ -0,0 +1,558 @@
++/*
++ * arch/arm64/kernel/topology.c
++ *
++ * Copyright (C) 2011,2013,2014 Linaro Limited.
++ *
++ * Based on the arm32 version written by Vincent Guittot in turn based on
++ * arch/sh/kernel/topology.c
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ */
++
++#include <linux/cpu.h>
++#include <linux/cpumask.h>
++#include <linux/init.h>
++#include <linux/percpu.h>
++#include <linux/node.h>
++#include <linux/nodemask.h>
++#include <linux/of.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++
++#include <asm/topology.h>
++
++/*
++ * cpu power table
++ * This per cpu data structure describes the relative capacity of each core.
++ * On a heteregenous system, cores don't have the same computation capacity
++ * and we reflect that difference in the cpu_power field so the scheduler can
++ * take this difference into account during load balance. A per cpu structure
++ * is preferred because each CPU updates its own cpu_power field during the
++ * load balance except for idle cores. One idle core is selected to run the
++ * rebalance_domains for all idle cores and the cpu_power can be updated
++ * during this sequence.
++ */
++static DEFINE_PER_CPU(unsigned long, cpu_scale);
++
++unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
++{
++ return per_cpu(cpu_scale, cpu);
++}
++
++static void set_power_scale(unsigned int cpu, unsigned long power)
++{
++ per_cpu(cpu_scale, cpu) = power;
++}
++
++static int __init get_cpu_for_node(struct device_node *node)
++{
++ struct device_node *cpu_node;
++ int cpu;
++
++ cpu_node = of_parse_phandle(node, "cpu", 0);
++ if (!cpu_node)
++ return -1;
++
++ for_each_possible_cpu(cpu) {
++ if (of_get_cpu_node(cpu, NULL) == cpu_node) {
++ of_node_put(cpu_node);
++ return cpu;
++ }
++ }
++
++ pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
++
++ of_node_put(cpu_node);
++ return -1;
++}
++
++static int __init parse_core(struct device_node *core, int cluster_id,
++ int core_id)
++{
++ char name[10];
++ bool leaf = true;
++ int i = 0;
++ int cpu;
++ struct device_node *t;
++
++ do {
++ snprintf(name, sizeof(name), "thread%d", i);
++ t = of_get_child_by_name(core, name);
++ if (t) {
++ leaf = false;
++ cpu = get_cpu_for_node(t);
++ if (cpu >= 0) {
++ cpu_topology[cpu].cluster_id = cluster_id;
++ cpu_topology[cpu].core_id = core_id;
++ cpu_topology[cpu].thread_id = i;
++ } else {
++ pr_err("%s: Can't get CPU for thread\n",
++ t->full_name);
++ of_node_put(t);
++ return -EINVAL;
++ }
++ of_node_put(t);
++ }
++ i++;
++ } while (t);
++
++ cpu = get_cpu_for_node(core);
++ if (cpu >= 0) {
++ if (!leaf) {
++ pr_err("%s: Core has both threads and CPU\n",
++ core->full_name);
++ return -EINVAL;
++ }
++
++ cpu_topology[cpu].cluster_id = cluster_id;
++ cpu_topology[cpu].core_id = core_id;
++ } else if (leaf) {
++ pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int __init parse_cluster(struct device_node *cluster, int depth)
++{
++ char name[10];
++ bool leaf = true;
++ bool has_cores = false;
++ struct device_node *c;
++ static int cluster_id __initdata;
++ int core_id = 0;
++ int i, ret;
++
++ /*
++ * First check for child clusters; we currently ignore any
++ * information about the nesting of clusters and present the
++ * scheduler with a flat list of them.
++ */
++ i = 0;
++ do {
++ snprintf(name, sizeof(name), "cluster%d", i);
++ c = of_get_child_by_name(cluster, name);
++ if (c) {
++ leaf = false;
++ ret = parse_cluster(c, depth + 1);
++ of_node_put(c);
++ if (ret != 0)
++ return ret;
++ }
++ i++;
++ } while (c);
++
++ /* Now check for cores */
++ i = 0;
++ do {
++ snprintf(name, sizeof(name), "core%d", i);
++ c = of_get_child_by_name(cluster, name);
++ if (c) {
++ has_cores = true;
++
++ if (depth == 0) {
++ pr_err("%s: cpu-map children should be clusters\n",
++ c->full_name);
++ of_node_put(c);
++ return -EINVAL;
++ }
++
++ if (leaf) {
++ ret = parse_core(c, cluster_id, core_id++);
++ } else {
++ pr_err("%s: Non-leaf cluster with core %s\n",
++ cluster->full_name, name);
++ ret = -EINVAL;
++ }
++
++ of_node_put(c);
++ if (ret != 0)
++ return ret;
++ }
++ i++;
++ } while (c);
++
++ if (leaf && !has_cores)
++ pr_warn("%s: empty cluster\n", cluster->full_name);
++
++ if (leaf)
++ cluster_id++;
++
++ return 0;
++}
++
++struct cpu_efficiency {
++ const char *compatible;
++ unsigned long efficiency;
++};
++
++/*
++ * Table of relative efficiency of each processors
++ * The efficiency value must fit in 20bit and the final
++ * cpu_scale value must be in the range
++ * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
++ * in order to return at most 1 when DIV_ROUND_CLOSEST
++ * is used to compute the capacity of a CPU.
++ * Processors that are not defined in the table,
++ * use the default SCHED_POWER_SCALE value for cpu_scale.
++ */
++static const struct cpu_efficiency table_efficiency[] = {
++ { "arm,cortex-a57", 3891 },
++ { "arm,cortex-a53", 2048 },
++ { NULL, },
++};
++
++static unsigned long *__cpu_capacity;
++#define cpu_capacity(cpu) __cpu_capacity[cpu]
++
++static unsigned long middle_capacity = 1;
++
++/*
++ * Iterate all CPUs' descriptor in DT and compute the efficiency
++ * (as per table_efficiency). Also calculate a middle efficiency
++ * as close as possible to (max{eff_i} - min{eff_i}) / 2
++ * This is later used to scale the cpu_power field such that an
++ * 'average' CPU is of middle power. Also see the comments near
++ * table_efficiency[] and update_cpu_power().
++ */
++static int __init parse_dt_topology(void)
++{
++ struct device_node *cn, *map;
++ int ret = 0;
++ int cpu;
++
++ cn = of_find_node_by_path("/cpus");
++ if (!cn) {
++ pr_err("No CPU information found in DT\n");
++ return 0;
++ }
++
++ /*
++ * When topology is provided cpu-map is essentially a root
++ * cluster with restricted subnodes.
++ */
++ map = of_get_child_by_name(cn, "cpu-map");
++ if (!map)
++ goto out;
++
++ ret = parse_cluster(map, 0);
++ if (ret != 0)
++ goto out_map;
++
++ /*
++ * Check that all cores are in the topology; the SMP code will
++ * only mark cores described in the DT as possible.
++ */
++ for_each_possible_cpu(cpu) {
++ if (cpu_topology[cpu].cluster_id == -1) {
++ pr_err("CPU%d: No topology information specified\n",
++ cpu);
++ ret = -EINVAL;
++ }
++ }
++
++out_map:
++ of_node_put(map);
++out:
++ of_node_put(cn);
++ return ret;
++}
++
++static void __init parse_dt_cpu_power(void)
++{
++ const struct cpu_efficiency *cpu_eff;
++ struct device_node *cn;
++ unsigned long min_capacity = ULONG_MAX;
++ unsigned long max_capacity = 0;
++ unsigned long capacity = 0;
++ int cpu;
++
++ __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
++ GFP_NOWAIT);
++
++ for_each_possible_cpu(cpu) {
++ const u32 *rate;
++ int len;
++
++ /* Too early to use cpu->of_node */
++ cn = of_get_cpu_node(cpu, NULL);
++ if (!cn) {
++ pr_err("Missing device node for CPU %d\n", cpu);
++ continue;
++ }
++
++ for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
++ if (of_device_is_compatible(cn, cpu_eff->compatible))
++ break;
++
++ if (cpu_eff->compatible == NULL) {
++ pr_warn("%s: Unknown CPU type\n", cn->full_name);
++ continue;
++ }
++
++ rate = of_get_property(cn, "clock-frequency", &len);
++ if (!rate || len != 4) {
++ pr_err("%s: Missing clock-frequency property\n",
++ cn->full_name);
++ continue;
++ }
++
++ capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
++
++ /* Save min capacity of the system */
++ if (capacity < min_capacity)
++ min_capacity = capacity;
++
++ /* Save max capacity of the system */
++ if (capacity > max_capacity)
++ max_capacity = capacity;
++
++ cpu_capacity(cpu) = capacity;
++ }
++
++ /* If min and max capacities are equal we bypass the update of the
++ * cpu_scale because all CPUs have the same capacity. Otherwise, we
++ * compute a middle_capacity factor that will ensure that the capacity
++ * of an 'average' CPU of the system will be as close as possible to
++ * SCHED_POWER_SCALE, which is the default value, but with the
++ * constraint explained near table_efficiency[].
++ */
++ if (min_capacity == max_capacity)
++ return;
++ else if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
++ middle_capacity = (min_capacity + max_capacity)
++ >> (SCHED_POWER_SHIFT+1);
++ else
++ middle_capacity = ((max_capacity / 3)
++ >> (SCHED_POWER_SHIFT-1)) + 1;
++}
++
++/*
++ * Look for a customed capacity of a CPU in the cpu_topo_data table during the
++ * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
++ * function returns directly for SMP system.
++ */
++static void update_cpu_power(unsigned int cpu)
++{
++ if (!cpu_capacity(cpu))
++ return;
++
++ set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
++
++ pr_info("CPU%u: update cpu_power %lu\n",
++ cpu, arch_scale_freq_power(NULL, cpu));
++}
++
++/*
++ * cpu topology table
++ */
++struct cpu_topology cpu_topology[NR_CPUS];
++EXPORT_SYMBOL_GPL(cpu_topology);
++
++const struct cpumask *cpu_coregroup_mask(int cpu)
++{
++ return &cpu_topology[cpu].core_sibling;
++}
++
++static void update_siblings_masks(unsigned int cpuid)
++{
++ struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
++ int cpu;
++
++ if (cpuid_topo->cluster_id == -1) {
++ /*
++ * DT does not contain topology information for this cpu.
++ */
++ pr_debug("CPU%u: No topology information configured\n", cpuid);
++ return;
++ }
++
++ /* update core and thread sibling masks */
++ for_each_possible_cpu(cpu) {
++ cpu_topo = &cpu_topology[cpu];
++
++ if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
++ continue;
++
++ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
++ if (cpu != cpuid)
++ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
++
++ if (cpuid_topo->core_id != cpu_topo->core_id)
++ continue;
++
++ cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
++ if (cpu != cpuid)
++ cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
++ }
++}
++
++void store_cpu_topology(unsigned int cpuid)
++{
++ update_siblings_masks(cpuid);
++ update_cpu_power(cpuid);
++}
++
++#ifdef CONFIG_SCHED_HMP
++
++/*
++ * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
++ * - mpidr: MPIDR[23:0] to be used for the look-up
++ *
++ * Returns the cpu logical index or -EINVAL on look-up error
++ */
++static inline int get_logical_index(u32 mpidr)
++{
++ int cpu;
++ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
++ if (cpu_logical_map(cpu) == mpidr)
++ return cpu;
++ return -EINVAL;
++}
++
++static const char * const little_cores[] = {
++ "arm,cortex-a53",
++ NULL,
++};
++
++static bool is_little_cpu(struct device_node *cn)
++{
++ const char * const *lc;
++ for (lc = little_cores; *lc; lc++)
++ if (of_device_is_compatible(cn, *lc))
++ return true;
++ return false;
++}
++
++void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
++ struct cpumask *slow)
++{
++ struct device_node *cn = NULL;
++ int cpu;
++
++ cpumask_clear(fast);
++ cpumask_clear(slow);
++
++ /*
++ * Use the config options if they are given. This helps testing
++ * HMP scheduling on systems without a big.LITTLE architecture.
++ */
++ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
++ if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
++ WARN(1, "Failed to parse HMP fast cpu mask!\n");
++ if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
++ WARN(1, "Failed to parse HMP slow cpu mask!\n");
++ return;
++ }
++
++ /*
++ * Else, parse device tree for little cores.
++ */
++ while ((cn = of_find_node_by_type(cn, "cpu"))) {
++
++ const u32 *mpidr;
++ int len;
++
++ mpidr = of_get_property(cn, "reg", &len);
++ if (!mpidr || len != 8) {
++ pr_err("%s missing reg property\n", cn->full_name);
++ continue;
++ }
++
++ cpu = get_logical_index(be32_to_cpup(mpidr+1));
++ if (cpu == -EINVAL) {
++ pr_err("couldn't get logical index for mpidr %x\n",
++ be32_to_cpup(mpidr+1));
++ break;
++ }
++
++ if (is_little_cpu(cn))
++ cpumask_set_cpu(cpu, slow);
++ else
++ cpumask_set_cpu(cpu, fast);
++ }
++
++ if (!cpumask_empty(fast) && !cpumask_empty(slow))
++ return;
++
++ /*
++ * We didn't find both big and little cores so let's call all cores
++ * fast as this will keep the system running, with all cores being
++ * treated equal.
++ */
++ cpumask_setall(fast);
++ cpumask_clear(slow);
++}
++
++struct cpumask hmp_slow_cpu_mask;
++
++void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
++{
++ struct cpumask hmp_fast_cpu_mask;
++ struct hmp_domain *domain;
++
++ arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
++
++ /*
++ * Initialize hmp_domains
++ * Must be ordered with respect to compute capacity.
++ * Fastest domain at head of list.
++ */
++ if(!cpumask_empty(&hmp_slow_cpu_mask)) {
++ domain = (struct hmp_domain *)
++ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
++ cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
++ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
++ list_add(&domain->hmp_domains, hmp_domains_list);
++ }
++ domain = (struct hmp_domain *)
++ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
++ cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
++ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
++ list_add(&domain->hmp_domains, hmp_domains_list);
++}
++#endif /* CONFIG_SCHED_HMP */
++
++static void __init reset_cpu_topology(void)
++{
++ unsigned int cpu;
++
++ for_each_possible_cpu(cpu) {
++ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
++
++ cpu_topo->thread_id = -1;
++ cpu_topo->core_id = 0;
++ cpu_topo->cluster_id = -1;
++
++ cpumask_clear(&cpu_topo->core_sibling);
++ cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
++ cpumask_clear(&cpu_topo->thread_sibling);
++ cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
++ }
++}
++
++static void __init reset_cpu_power(void)
++{
++ unsigned int cpu;
++
++ for_each_possible_cpu(cpu)
++ set_power_scale(cpu, SCHED_POWER_SCALE);
++}
++
++void __init init_cpu_topology(void)
++{
++ reset_cpu_topology();
++
++ /*
++ * Discard anything that was parsed if we hit an error so we
++ * don't use partial information.
++ */
++ if (parse_dt_topology())
++ reset_cpu_topology();
++
++ reset_cpu_power();
++ parse_dt_cpu_power();
++}
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/vdso/Makefile linux-3.14.40/arch/arm64/kernel/vdso/Makefile
+--- linux-3.14.40.orig/arch/arm64/kernel/vdso/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/vdso/Makefile 2015-05-01 14:57:58.223427001 -0500
+@@ -47,9 +47,9 @@
+ $(call if_changed_dep,vdsoas)
+
+ # Actual build commands
+-quiet_cmd_vdsold = VDSOL $@
++quiet_cmd_vdsold = VDSOL $@
+ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+-quiet_cmd_vdsoas = VDSOA $@
++quiet_cmd_vdsoas = VDSOA $@
+ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
+
+ # Install commands for the unstripped file
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/vdso.c linux-3.14.40/arch/arm64/kernel/vdso.c
+--- linux-3.14.40.orig/arch/arm64/kernel/vdso.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/vdso.c 2015-05-01 14:57:58.223427001 -0500
+@@ -156,11 +156,12 @@
+ int uses_interp)
+ {
+ struct mm_struct *mm = current->mm;
+- unsigned long vdso_base, vdso_mapping_len;
++ unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
+ int ret;
+
++ vdso_text_len = vdso_pages << PAGE_SHIFT;
+ /* Be sure to map the data page */
+- vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT;
++ vdso_mapping_len = vdso_text_len + PAGE_SIZE;
+
+ down_write(&mm->mmap_sem);
+ vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
+@@ -170,35 +171,52 @@
+ }
+ mm->context.vdso = (void *)vdso_base;
+
+- ret = install_special_mapping(mm, vdso_base, vdso_mapping_len,
++ ret = install_special_mapping(mm, vdso_base, vdso_text_len,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ vdso_pagelist);
+- if (ret) {
+- mm->context.vdso = NULL;
++ if (ret)
++ goto up_fail;
++
++ vdso_base += vdso_text_len;
++ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
++ VM_READ|VM_MAYREAD,
++ vdso_pagelist + vdso_pages);
++ if (ret)
+ goto up_fail;
+- }
+
+-up_fail:
+ up_write(&mm->mmap_sem);
++ return 0;
+
++up_fail:
++ mm->context.vdso = NULL;
++ up_write(&mm->mmap_sem);
+ return ret;
+ }
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
++ unsigned long vdso_text;
++
++ if (!vma->vm_mm)
++ return NULL;
++
++ vdso_text = (unsigned long)vma->vm_mm->context.vdso;
++
+ /*
+ * We can re-use the vdso pointer in mm_context_t for identifying
+ * the vectors page for compat applications. The vDSO will always
+ * sit above TASK_UNMAPPED_BASE and so we don't need to worry about
+ * it conflicting with the vectors base.
+ */
+- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) {
++ if (vma->vm_start == vdso_text) {
+ #ifdef CONFIG_COMPAT
+ if (vma->vm_start == AARCH32_VECTORS_BASE)
+ return "[vectors]";
+ #endif
+ return "[vdso]";
++ } else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) {
++ return "[vvar]";
+ }
+
+ return NULL;
+diff -Nur linux-3.14.40.orig/arch/arm64/kernel/vmlinux.lds.S linux-3.14.40/arch/arm64/kernel/vmlinux.lds.S
+--- linux-3.14.40.orig/arch/arm64/kernel/vmlinux.lds.S 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/kernel/vmlinux.lds.S 2015-05-01 14:57:58.227427001 -0500
+@@ -104,6 +104,13 @@
+ _edata = .;
+
+ BSS_SECTION(0, 0, 0)
++
++ . = ALIGN(PAGE_SIZE);
++ idmap_pg_dir = .;
++ . += IDMAP_DIR_SIZE;
++ swapper_pg_dir = .;
++ . += SWAPPER_DIR_SIZE;
++
+ _end = .;
+
+ STABS_DEBUG
+diff -Nur linux-3.14.40.orig/arch/arm64/Makefile linux-3.14.40/arch/arm64/Makefile
+--- linux-3.14.40.orig/arch/arm64/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/Makefile 2015-05-01 14:57:58.231427001 -0500
+@@ -45,6 +45,7 @@
+ core-y += arch/arm64/kernel/ arch/arm64/mm/
+ core-$(CONFIG_KVM) += arch/arm64/kvm/
+ core-$(CONFIG_XEN) += arch/arm64/xen/
++core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
+ libs-y := arch/arm64/lib/ $(libs-y)
+ libs-y += $(LIBGCC)
+
+diff -Nur linux-3.14.40.orig/arch/arm64/mm/cache.S linux-3.14.40/arch/arm64/mm/cache.S
+--- linux-3.14.40.orig/arch/arm64/mm/cache.S 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/mm/cache.S 2015-05-01 14:57:58.231427001 -0500
+@@ -30,7 +30,7 @@
+ *
+ * Corrupted registers: x0-x7, x9-x11
+ */
+-ENTRY(__flush_dcache_all)
++__flush_dcache_all:
+ dsb sy // ensure ordering with previous memory accesses
+ mrs x0, clidr_el1 // read clidr
+ and x3, x0, #0x7000000 // extract loc from clidr
+@@ -166,3 +166,97 @@
+ dsb sy
+ ret
+ ENDPROC(__flush_dcache_area)
++
++/*
++ * __inval_cache_range(start, end)
++ * - start - start address of region
++ * - end - end address of region
++ */
++ENTRY(__inval_cache_range)
++ /* FALLTHROUGH */
++
++/*
++ * __dma_inv_range(start, end)
++ * - start - virtual start address of region
++ * - end - virtual end address of region
++ */
++__dma_inv_range:
++ dcache_line_size x2, x3
++ sub x3, x2, #1
++ tst x1, x3 // end cache line aligned?
++ bic x1, x1, x3
++ b.eq 1f
++ dc civac, x1 // clean & invalidate D / U line
++1: tst x0, x3 // start cache line aligned?
++ bic x0, x0, x3
++ b.eq 2f
++ dc civac, x0 // clean & invalidate D / U line
++ b 3f
++2: dc ivac, x0 // invalidate D / U line
++3: add x0, x0, x2
++ cmp x0, x1
++ b.lo 2b
++ dsb sy
++ ret
++ENDPROC(__inval_cache_range)
++ENDPROC(__dma_inv_range)
++
++/*
++ * __dma_clean_range(start, end)
++ * - start - virtual start address of region
++ * - end - virtual end address of region
++ */
++__dma_clean_range:
++ dcache_line_size x2, x3
++ sub x3, x2, #1
++ bic x0, x0, x3
++1: dc cvac, x0 // clean D / U line
++ add x0, x0, x2
++ cmp x0, x1
++ b.lo 1b
++ dsb sy
++ ret
++ENDPROC(__dma_clean_range)
++
++/*
++ * __dma_flush_range(start, end)
++ * - start - virtual start address of region
++ * - end - virtual end address of region
++ */
++ENTRY(__dma_flush_range)
++ dcache_line_size x2, x3
++ sub x3, x2, #1
++ bic x0, x0, x3
++1: dc civac, x0 // clean & invalidate D / U line
++ add x0, x0, x2
++ cmp x0, x1
++ b.lo 1b
++ dsb sy
++ ret
++ENDPROC(__dma_flush_range)
++
++/*
++ * __dma_map_area(start, size, dir)
++ * - start - kernel virtual start address
++ * - size - size of region
++ * - dir - DMA direction
++ */
++ENTRY(__dma_map_area)
++ add x1, x1, x0
++ cmp w2, #DMA_FROM_DEVICE
++ b.eq __dma_inv_range
++ b __dma_clean_range
++ENDPROC(__dma_map_area)
++
++/*
++ * __dma_unmap_area(start, size, dir)
++ * - start - kernel virtual start address
++ * - size - size of region
++ * - dir - DMA direction
++ */
++ENTRY(__dma_unmap_area)
++ add x1, x1, x0
++ cmp w2, #DMA_TO_DEVICE
++ b.ne __dma_inv_range
++ ret
++ENDPROC(__dma_unmap_area)
+diff -Nur linux-3.14.40.orig/arch/arm64/mm/copypage.c linux-3.14.40/arch/arm64/mm/copypage.c
+--- linux-3.14.40.orig/arch/arm64/mm/copypage.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/mm/copypage.c 2015-05-01 14:57:58.231427001 -0500
+@@ -27,8 +27,10 @@
+ copy_page(kto, kfrom);
+ __flush_dcache_area(kto, PAGE_SIZE);
+ }
++EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
+
+ void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
+ {
+ clear_page(kaddr);
+ }
++EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
+diff -Nur linux-3.14.40.orig/arch/arm64/mm/dma-mapping.c linux-3.14.40/arch/arm64/mm/dma-mapping.c
+--- linux-3.14.40.orig/arch/arm64/mm/dma-mapping.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/mm/dma-mapping.c 2015-05-01 14:57:58.239427001 -0500
+@@ -22,26 +22,39 @@
+ #include <linux/slab.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dma-contiguous.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ #include <linux/vmalloc.h>
+ #include <linux/swiotlb.h>
++#include <linux/amba/bus.h>
+
+ #include <asm/cacheflush.h>
+
+ struct dma_map_ops *dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+-static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t flags,
+- struct dma_attrs *attrs)
++static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
++ bool coherent)
++{
++ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
++ return pgprot_writecombine(prot);
++ else if (!coherent)
++ return pgprot_dmacoherent(prot);
++ return prot;
++}
++
++static void *__dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flags,
++ struct dma_attrs *attrs)
+ {
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+ return NULL;
+ }
+
+- if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
++ if (IS_ENABLED(CONFIG_ZONE_DMA) &&
+ dev->coherent_dma_mask <= DMA_BIT_MASK(32))
+- flags |= GFP_DMA32;
++ flags |= GFP_DMA;
+ if (IS_ENABLED(CONFIG_DMA_CMA)) {
+ struct page *page;
+ void *addr;
+@@ -62,9 +75,9 @@
+ }
+ }
+
+-static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle,
+- struct dma_attrs *attrs)
++static void __dma_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle,
++ struct dma_attrs *attrs)
+ {
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+@@ -82,9 +95,212 @@
+ }
+ }
+
+-static struct dma_map_ops arm64_swiotlb_dma_ops = {
+- .alloc = arm64_swiotlb_alloc_coherent,
+- .free = arm64_swiotlb_free_coherent,
++static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flags,
++ struct dma_attrs *attrs)
++{
++ struct page *page, **map;
++ void *ptr, *coherent_ptr;
++ int order, i;
++
++ size = PAGE_ALIGN(size);
++ order = get_order(size);
++
++ ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
++ if (!ptr)
++ goto no_mem;
++ map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
++ if (!map)
++ goto no_map;
++
++ /* remove any dirty cache lines on the kernel alias */
++ __dma_flush_range(ptr, ptr + size);
++
++ /* create a coherent mapping */
++ page = virt_to_page(ptr);
++ for (i = 0; i < (size >> PAGE_SHIFT); i++)
++ map[i] = page + i;
++ coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
++ __get_dma_pgprot(attrs, pgprot_default, false));
++ kfree(map);
++ if (!coherent_ptr)
++ goto no_map;
++
++ return coherent_ptr;
++
++no_map:
++ __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
++no_mem:
++ *dma_handle = ~0;
++ return NULL;
++}
++
++static void __dma_free_noncoherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle,
++ struct dma_attrs *attrs)
++{
++ void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
++
++ vunmap(vaddr);
++ __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
++}
++
++static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ dma_addr_t dev_addr;
++
++ dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
++ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
++
++ return dev_addr;
++}
++
++
++static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
++ size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
++ swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
++}
++
++static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
++ int nelems, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct scatterlist *sg;
++ int i, ret;
++
++ ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
++ for_each_sg(sgl, sg, ret, i)
++ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
++ sg->length, dir);
++
++ return ret;
++}
++
++static void __swiotlb_unmap_sg_attrs(struct device *dev,
++ struct scatterlist *sgl, int nelems,
++ enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct scatterlist *sg;
++ int i;
++
++ for_each_sg(sgl, sg, nelems, i)
++ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
++ sg->length, dir);
++ swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
++}
++
++static void __swiotlb_sync_single_for_cpu(struct device *dev,
++ dma_addr_t dev_addr, size_t size,
++ enum dma_data_direction dir)
++{
++ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
++ swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
++}
++
++static void __swiotlb_sync_single_for_device(struct device *dev,
++ dma_addr_t dev_addr, size_t size,
++ enum dma_data_direction dir)
++{
++ swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
++ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
++}
++
++static void __swiotlb_sync_sg_for_cpu(struct device *dev,
++ struct scatterlist *sgl, int nelems,
++ enum dma_data_direction dir)
++{
++ struct scatterlist *sg;
++ int i;
++
++ for_each_sg(sgl, sg, nelems, i)
++ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
++ sg->length, dir);
++ swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
++}
++
++static void __swiotlb_sync_sg_for_device(struct device *dev,
++ struct scatterlist *sgl, int nelems,
++ enum dma_data_direction dir)
++{
++ struct scatterlist *sg;
++ int i;
++
++ swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
++ for_each_sg(sgl, sg, nelems, i)
++ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
++ sg->length, dir);
++}
++
++/* vma->vm_page_prot must be set appropriately before calling this function */
++static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size)
++{
++ int ret = -ENXIO;
++ unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
++ PAGE_SHIFT;
++ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
++ unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
++ unsigned long off = vma->vm_pgoff;
++
++ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
++ return ret;
++
++ if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
++ ret = remap_pfn_range(vma, vma->vm_start,
++ pfn + off,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot);
++ }
++
++ return ret;
++}
++
++static int __swiotlb_mmap_noncoherent(struct device *dev,
++ struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size,
++ struct dma_attrs *attrs)
++{
++ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
++ return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
++}
++
++static int __swiotlb_mmap_coherent(struct device *dev,
++ struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size,
++ struct dma_attrs *attrs)
++{
++ /* Just use whatever page_prot attributes were specified */
++ return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
++}
++
++struct dma_map_ops noncoherent_swiotlb_dma_ops = {
++ .alloc = __dma_alloc_noncoherent,
++ .free = __dma_free_noncoherent,
++ .mmap = __swiotlb_mmap_noncoherent,
++ .map_page = __swiotlb_map_page,
++ .unmap_page = __swiotlb_unmap_page,
++ .map_sg = __swiotlb_map_sg_attrs,
++ .unmap_sg = __swiotlb_unmap_sg_attrs,
++ .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
++ .sync_single_for_device = __swiotlb_sync_single_for_device,
++ .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
++ .sync_sg_for_device = __swiotlb_sync_sg_for_device,
++ .dma_supported = swiotlb_dma_supported,
++ .mapping_error = swiotlb_dma_mapping_error,
++};
++EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
++
++struct dma_map_ops coherent_swiotlb_dma_ops = {
++ .alloc = __dma_alloc_coherent,
++ .free = __dma_free_coherent,
++ .mmap = __swiotlb_mmap_coherent,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = swiotlb_map_sg_attrs,
+@@ -96,12 +312,47 @@
+ .dma_supported = swiotlb_dma_supported,
+ .mapping_error = swiotlb_dma_mapping_error,
+ };
++EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
+
+-void __init arm64_swiotlb_init(void)
++static int dma_bus_notifier(struct notifier_block *nb,
++ unsigned long event, void *_dev)
+ {
+- dma_ops = &arm64_swiotlb_dma_ops;
+- swiotlb_init(1);
++ struct device *dev = _dev;
++
++ if (event != BUS_NOTIFY_ADD_DEVICE)
++ return NOTIFY_DONE;
++
++ if (of_property_read_bool(dev->of_node, "dma-coherent"))
++ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
++
++ return NOTIFY_OK;
++}
++
++static struct notifier_block platform_bus_nb = {
++ .notifier_call = dma_bus_notifier,
++};
++
++static struct notifier_block amba_bus_nb = {
++ .notifier_call = dma_bus_notifier,
++};
++
++extern int swiotlb_late_init_with_default_size(size_t default_size);
++
++static int __init swiotlb_late_init(void)
++{
++ size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
++
++ /*
++ * These must be registered before of_platform_populate().
++ */
++ bus_register_notifier(&platform_bus_type, &platform_bus_nb);
++ bus_register_notifier(&amba_bustype, &amba_bus_nb);
++
++ dma_ops = &noncoherent_swiotlb_dma_ops;
++
++ return swiotlb_late_init_with_default_size(swiotlb_size);
+ }
++arch_initcall(swiotlb_late_init);
+
+ #define PREALLOC_DMA_DEBUG_ENTRIES 4096
+
+diff -Nur linux-3.14.40.orig/arch/arm64/mm/init.c linux-3.14.40/arch/arm64/mm/init.c
+--- linux-3.14.40.orig/arch/arm64/mm/init.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/mm/init.c 2015-05-01 14:57:58.243427001 -0500
+@@ -30,6 +30,7 @@
+ #include <linux/memblock.h>
+ #include <linux/sort.h>
+ #include <linux/of_fdt.h>
++#include <linux/dma-mapping.h>
+ #include <linux/dma-contiguous.h>
+
+ #include <asm/sections.h>
+@@ -59,22 +60,22 @@
+ early_param("initrd", early_initrd);
+ #endif
+
+-#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
+-
+ static void __init zone_sizes_init(unsigned long min, unsigned long max)
+ {
+ struct memblock_region *reg;
+ unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
+- unsigned long max_dma32 = min;
++ unsigned long max_dma = min;
+
+ memset(zone_size, 0, sizeof(zone_size));
+
+-#ifdef CONFIG_ZONE_DMA32
+ /* 4GB maximum for 32-bit only capable devices */
+- max_dma32 = max(min, min(max, MAX_DMA32_PFN));
+- zone_size[ZONE_DMA32] = max_dma32 - min;
+-#endif
+- zone_size[ZONE_NORMAL] = max - max_dma32;
++ if (IS_ENABLED(CONFIG_ZONE_DMA)) {
++ unsigned long max_dma_phys =
++ (unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
++ max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
++ zone_size[ZONE_DMA] = max_dma - min;
++ }
++ zone_size[ZONE_NORMAL] = max - max_dma;
+
+ memcpy(zhole_size, zone_size, sizeof(zhole_size));
+
+@@ -84,15 +85,15 @@
+
+ if (start >= max)
+ continue;
+-#ifdef CONFIG_ZONE_DMA32
+- if (start < max_dma32) {
+- unsigned long dma_end = min(end, max_dma32);
+- zhole_size[ZONE_DMA32] -= dma_end - start;
++
++ if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
++ unsigned long dma_end = min(end, max_dma);
++ zhole_size[ZONE_DMA] -= dma_end - start;
+ }
+-#endif
+- if (end > max_dma32) {
++
++ if (end > max_dma) {
+ unsigned long normal_end = min(end, max);
+- unsigned long normal_start = max(start, max_dma32);
++ unsigned long normal_start = max(start, max_dma);
+ zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
+ }
+ }
+@@ -127,20 +128,16 @@
+ {
+ u64 *reserve_map, base, size;
+
+- /* Register the kernel text, kernel data and initrd with memblock */
++ /*
++ * Register the kernel text, kernel data, initrd, and initial
++ * pagetables with memblock.
++ */
+ memblock_reserve(__pa(_text), _end - _text);
+ #ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start)
+ memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
+ #endif
+
+- /*
+- * Reserve the page tables. These are already in use,
+- * and can only be in node 0.
+- */
+- memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
+- memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
+-
+ /* Reserve the dtb region */
+ memblock_reserve(virt_to_phys(initial_boot_params),
+ be32_to_cpu(initial_boot_params->totalsize));
+@@ -261,8 +258,6 @@
+ */
+ void __init mem_init(void)
+ {
+- arm64_swiotlb_init();
+-
+ max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
+
+ #ifndef CONFIG_SPARSEMEM_VMEMMAP
+diff -Nur linux-3.14.40.orig/arch/arm64/mm/proc.S linux-3.14.40/arch/arm64/mm/proc.S
+--- linux-3.14.40.orig/arch/arm64/mm/proc.S 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/arm64/mm/proc.S 2015-05-01 14:57:58.243427001 -0500
+@@ -173,12 +173,6 @@
+ * value of the SCTLR_EL1 register.
+ */
+ ENTRY(__cpu_setup)
+- /*
+- * Preserve the link register across the function call.
+- */
+- mov x28, lr
+- bl __flush_dcache_all
+- mov lr, x28
+ ic iallu // I+BTB cache invalidate
+ tlbi vmalle1is // invalidate I + D TLBs
+ dsb sy
+diff -Nur linux-3.14.40.orig/arch/avr32/kernel/cpu.c linux-3.14.40/arch/avr32/kernel/cpu.c
+--- linux-3.14.40.orig/arch/avr32/kernel/cpu.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/avr32/kernel/cpu.c 2015-05-01 14:57:58.275427001 -0500
+@@ -39,10 +39,12 @@
+ size_t count)
+ {
+ unsigned long val;
+- char *endp;
++ int ret;
+
+- val = simple_strtoul(buf, &endp, 0);
+- if (endp == buf || val > 0x3f)
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
++ if (val > 0x3f)
+ return -EINVAL;
+ val = (val << 12) | (sysreg_read(PCCR) & 0xfffc0fff);
+ sysreg_write(PCCR, val);
+@@ -61,11 +63,11 @@
+ const char *buf, size_t count)
+ {
+ unsigned long val;
+- char *endp;
++ int ret;
+
+- val = simple_strtoul(buf, &endp, 0);
+- if (endp == buf)
+- return -EINVAL;
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
+ sysreg_write(PCNT0, val);
+
+ return count;
+@@ -84,10 +86,12 @@
+ size_t count)
+ {
+ unsigned long val;
+- char *endp;
++ int ret;
+
+- val = simple_strtoul(buf, &endp, 0);
+- if (endp == buf || val > 0x3f)
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
++ if (val > 0x3f)
+ return -EINVAL;
+ val = (val << 18) | (sysreg_read(PCCR) & 0xff03ffff);
+ sysreg_write(PCCR, val);
+@@ -106,11 +110,11 @@
+ size_t count)
+ {
+ unsigned long val;
+- char *endp;
++ int ret;
+
+- val = simple_strtoul(buf, &endp, 0);
+- if (endp == buf)
+- return -EINVAL;
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
+ sysreg_write(PCNT1, val);
+
+ return count;
+@@ -129,11 +133,11 @@
+ size_t count)
+ {
+ unsigned long val;
+- char *endp;
++ int ret;
+
+- val = simple_strtoul(buf, &endp, 0);
+- if (endp == buf)
+- return -EINVAL;
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
+ sysreg_write(PCCNT, val);
+
+ return count;
+@@ -152,11 +156,11 @@
+ size_t count)
+ {
+ unsigned long pccr, val;
+- char *endp;
++ int ret;
+
+- val = simple_strtoul(buf, &endp, 0);
+- if (endp == buf)
+- return -EINVAL;
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
+ if (val)
+ val = 1;
+
+diff -Nur linux-3.14.40.orig/arch/blackfin/include/asm/ftrace.h linux-3.14.40/arch/blackfin/include/asm/ftrace.h
+--- linux-3.14.40.orig/arch/blackfin/include/asm/ftrace.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/blackfin/include/asm/ftrace.h 2015-05-01 14:57:58.291427001 -0500
+@@ -66,16 +66,7 @@
+
+ #endif /* CONFIG_FRAME_POINTER */
+
+-#define HAVE_ARCH_CALLER_ADDR
+-
+-/* inline function or macro may lead to unexpected result */
+-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+-#define CALLER_ADDR1 ((unsigned long)return_address(1))
+-#define CALLER_ADDR2 ((unsigned long)return_address(2))
+-#define CALLER_ADDR3 ((unsigned long)return_address(3))
+-#define CALLER_ADDR4 ((unsigned long)return_address(4))
+-#define CALLER_ADDR5 ((unsigned long)return_address(5))
+-#define CALLER_ADDR6 ((unsigned long)return_address(6))
++#define ftrace_return_address(n) return_address(n)
+
+ #endif /* __ASSEMBLY__ */
+
+diff -Nur linux-3.14.40.orig/arch/hexagon/include/asm/elf.h linux-3.14.40/arch/hexagon/include/asm/elf.h
+--- linux-3.14.40.orig/arch/hexagon/include/asm/elf.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/hexagon/include/asm/elf.h 2015-05-01 14:57:58.299427001 -0500
+@@ -1,7 +1,7 @@
+ /*
+ * ELF definitions for the Hexagon architecture
+ *
+- * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+diff -Nur linux-3.14.40.orig/arch/parisc/include/asm/ftrace.h linux-3.14.40/arch/parisc/include/asm/ftrace.h
+--- linux-3.14.40.orig/arch/parisc/include/asm/ftrace.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/parisc/include/asm/ftrace.h 2015-05-01 14:57:58.299427001 -0500
+@@ -24,15 +24,7 @@
+
+ extern unsigned long return_address(unsigned int);
+
+-#define HAVE_ARCH_CALLER_ADDR
+-
+-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+-#define CALLER_ADDR1 return_address(1)
+-#define CALLER_ADDR2 return_address(2)
+-#define CALLER_ADDR3 return_address(3)
+-#define CALLER_ADDR4 return_address(4)
+-#define CALLER_ADDR5 return_address(5)
+-#define CALLER_ADDR6 return_address(6)
++#define ftrace_return_address(n) return_address(n)
+
+ #endif /* __ASSEMBLY__ */
+
+diff -Nur linux-3.14.40.orig/arch/s390/include/asm/cio.h linux-3.14.40/arch/s390/include/asm/cio.h
+--- linux-3.14.40.orig/arch/s390/include/asm/cio.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/s390/include/asm/cio.h 2015-05-01 14:57:58.311427001 -0500
+@@ -199,7 +199,7 @@
+ /**
+ * struct irb - interruption response block
+ * @scsw: subchannel status word
+- * @esw: extened status word
++ * @esw: extended status word
+ * @ecw: extended control word
+ *
+ * The irb that is handed to the device driver when an interrupt occurs. For
+diff -Nur linux-3.14.40.orig/arch/sh/include/asm/ftrace.h linux-3.14.40/arch/sh/include/asm/ftrace.h
+--- linux-3.14.40.orig/arch/sh/include/asm/ftrace.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/sh/include/asm/ftrace.h 2015-05-01 14:57:58.339427001 -0500
+@@ -40,15 +40,7 @@
+ /* arch/sh/kernel/return_address.c */
+ extern void *return_address(unsigned int);
+
+-#define HAVE_ARCH_CALLER_ADDR
+-
+-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+-#define CALLER_ADDR1 ((unsigned long)return_address(1))
+-#define CALLER_ADDR2 ((unsigned long)return_address(2))
+-#define CALLER_ADDR3 ((unsigned long)return_address(3))
+-#define CALLER_ADDR4 ((unsigned long)return_address(4))
+-#define CALLER_ADDR5 ((unsigned long)return_address(5))
+-#define CALLER_ADDR6 ((unsigned long)return_address(6))
++#define ftrace_return_address(n) return_address(n)
+
+ #endif /* __ASSEMBLY__ */
+
+diff -Nur linux-3.14.40.orig/arch/x86/kernel/setup.c linux-3.14.40/arch/x86/kernel/setup.c
+--- linux-3.14.40.orig/arch/x86/kernel/setup.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/arch/x86/kernel/setup.c 2015-05-01 14:57:58.351427001 -0500
+@@ -1120,7 +1120,7 @@
+ setup_real_mode();
+
+ memblock_set_current_limit(get_max_mapped());
+- dma_contiguous_reserve(0);
++ dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
+
+ /*
+ * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
+diff -Nur linux-3.14.40.orig/block/bfq-cgroup.c linux-3.14.40/block/bfq-cgroup.c
+--- linux-3.14.40.orig/block/bfq-cgroup.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/block/bfq-cgroup.c 2015-05-01 14:57:58.351427001 -0500
+@@ -0,0 +1,932 @@
++/*
++ * BFQ: CGROUPS support.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ */
++
++#ifdef CONFIG_CGROUP_BFQIO
++
++static DEFINE_MUTEX(bfqio_mutex);
++
++static bool bfqio_is_removed(struct bfqio_cgroup *bgrp)
++{
++ return bgrp ? !bgrp->online : false;
++}
++
++static struct bfqio_cgroup bfqio_root_cgroup = {
++ .weight = BFQ_DEFAULT_GRP_WEIGHT,
++ .ioprio = BFQ_DEFAULT_GRP_IOPRIO,
++ .ioprio_class = BFQ_DEFAULT_GRP_CLASS,
++};
++
++static inline void bfq_init_entity(struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++ entity->weight = entity->new_weight;
++ entity->orig_weight = entity->new_weight;
++ entity->ioprio = entity->new_ioprio;
++ entity->ioprio_class = entity->new_ioprio_class;
++ entity->parent = bfqg->my_entity;
++ entity->sched_data = &bfqg->sched_data;
++}
++
++static struct bfqio_cgroup *css_to_bfqio(struct cgroup_subsys_state *css)
++{
++ return css ? container_of(css, struct bfqio_cgroup, css) : NULL;
++}
++
++/*
++ * Search the bfq_group for bfqd into the hash table (by now only a list)
++ * of bgrp. Must be called under rcu_read_lock().
++ */
++static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp,
++ struct bfq_data *bfqd)
++{
++ struct bfq_group *bfqg;
++ void *key;
++
++ hlist_for_each_entry_rcu(bfqg, &bgrp->group_data, group_node) {
++ key = rcu_dereference(bfqg->bfqd);
++ if (key == bfqd)
++ return bfqg;
++ }
++
++ return NULL;
++}
++
++static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp,
++ struct bfq_group *bfqg)
++{
++ struct bfq_entity *entity = &bfqg->entity;
++
++ /*
++ * If the weight of the entity has never been set via the sysfs
++ * interface, then bgrp->weight == 0. In this case we initialize
++ * the weight from the current ioprio value. Otherwise, the group
++ * weight, if set, has priority over the ioprio value.
++ */
++ if (bgrp->weight == 0) {
++ entity->new_weight = bfq_ioprio_to_weight(bgrp->ioprio);
++ entity->new_ioprio = bgrp->ioprio;
++ } else {
++ entity->new_weight = bgrp->weight;
++ entity->new_ioprio = bfq_weight_to_ioprio(bgrp->weight);
++ }
++ entity->orig_weight = entity->weight = entity->new_weight;
++ entity->ioprio = entity->new_ioprio;
++ entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class;
++ entity->my_sched_data = &bfqg->sched_data;
++ bfqg->active_entities = 0;
++}
++
++static inline void bfq_group_set_parent(struct bfq_group *bfqg,
++ struct bfq_group *parent)
++{
++ struct bfq_entity *entity;
++
++ BUG_ON(parent == NULL);
++ BUG_ON(bfqg == NULL);
++
++ entity = &bfqg->entity;
++ entity->parent = parent->my_entity;
++ entity->sched_data = &parent->sched_data;
++}
++
++/**
++ * bfq_group_chain_alloc - allocate a chain of groups.
++ * @bfqd: queue descriptor.
++ * @css: the leaf cgroup_subsys_state this chain starts from.
++ *
++ * Allocate a chain of groups starting from the one belonging to
++ * @cgroup up to the root cgroup. Stop if a cgroup on the chain
++ * to the root has already an allocated group on @bfqd.
++ */
++static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd,
++ struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp;
++ struct bfq_group *bfqg, *prev = NULL, *leaf = NULL;
++
++ for (; css != NULL; css = css->parent) {
++ bgrp = css_to_bfqio(css);
++
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ if (bfqg != NULL) {
++ /*
++ * All the cgroups in the path from there to the
++ * root must have a bfq_group for bfqd, so we don't
++ * need any more allocations.
++ */
++ break;
++ }
++
++ bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC);
++ if (bfqg == NULL)
++ goto cleanup;
++
++ bfq_group_init_entity(bgrp, bfqg);
++ bfqg->my_entity = &bfqg->entity;
++
++ if (leaf == NULL) {
++ leaf = bfqg;
++ prev = leaf;
++ } else {
++ bfq_group_set_parent(prev, bfqg);
++ /*
++ * Build a list of allocated nodes using the bfqd
++ * filed, that is still unused and will be
++ * initialized only after the node will be
++ * connected.
++ */
++ prev->bfqd = bfqg;
++ prev = bfqg;
++ }
++ }
++
++ return leaf;
++
++cleanup:
++ while (leaf != NULL) {
++ prev = leaf;
++ leaf = leaf->bfqd;
++ kfree(prev);
++ }
++
++ return NULL;
++}
++
++/**
++ * bfq_group_chain_link - link an allocated group chain to a cgroup
++ * hierarchy.
++ * @bfqd: the queue descriptor.
++ * @css: the leaf cgroup_subsys_state to start from.
++ * @leaf: the leaf group (to be associated to @cgroup).
++ *
++ * Try to link a chain of groups to a cgroup hierarchy, connecting the
++ * nodes bottom-up, so we can be sure that when we find a cgroup in the
++ * hierarchy that already as a group associated to @bfqd all the nodes
++ * in the path to the root cgroup have one too.
++ *
++ * On locking: the queue lock protects the hierarchy (there is a hierarchy
++ * per device) while the bfqio_cgroup lock protects the list of groups
++ * belonging to the same cgroup.
++ */
++static void bfq_group_chain_link(struct bfq_data *bfqd,
++ struct cgroup_subsys_state *css,
++ struct bfq_group *leaf)
++{
++ struct bfqio_cgroup *bgrp;
++ struct bfq_group *bfqg, *next, *prev = NULL;
++ unsigned long flags;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ for (; css != NULL && leaf != NULL; css = css->parent) {
++ bgrp = css_to_bfqio(css);
++ next = leaf->bfqd;
++
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ BUG_ON(bfqg != NULL);
++
++ spin_lock_irqsave(&bgrp->lock, flags);
++
++ rcu_assign_pointer(leaf->bfqd, bfqd);
++ hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data);
++ hlist_add_head(&leaf->bfqd_node, &bfqd->group_list);
++
++ spin_unlock_irqrestore(&bgrp->lock, flags);
++
++ prev = leaf;
++ leaf = next;
++ }
++
++ BUG_ON(css == NULL && leaf != NULL);
++ if (css != NULL && prev != NULL) {
++ bgrp = css_to_bfqio(css);
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ bfq_group_set_parent(prev, bfqg);
++ }
++}
++
++/**
++ * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup.
++ * @bfqd: queue descriptor.
++ * @cgroup: cgroup being searched for.
++ *
++ * Return a group associated to @bfqd in @cgroup, allocating one if
++ * necessary. When a group is returned all the cgroups in the path
++ * to the root have a group associated to @bfqd.
++ *
++ * If the allocation fails, return the root group: this breaks guarantees
++ * but is a safe fallback. If this loss becomes a problem it can be
++ * mitigated using the equivalent weight (given by the product of the
++ * weights of the groups in the path from @group to the root) in the
++ * root scheduler.
++ *
++ * We allocate all the missing nodes in the path from the leaf cgroup
++ * to the root and we connect the nodes only after all the allocations
++ * have been successful.
++ */
++static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
++ struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++ struct bfq_group *bfqg;
++
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ if (bfqg != NULL)
++ return bfqg;
++
++ bfqg = bfq_group_chain_alloc(bfqd, css);
++ if (bfqg != NULL)
++ bfq_group_chain_link(bfqd, css, bfqg);
++ else
++ bfqg = bfqd->root_group;
++
++ return bfqg;
++}
++
++/**
++ * bfq_bfqq_move - migrate @bfqq to @bfqg.
++ * @bfqd: queue descriptor.
++ * @bfqq: the queue to move.
++ * @entity: @bfqq's entity.
++ * @bfqg: the group to move to.
++ *
++ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
++ * it on the new one. Avoid putting the entity on the old group idle tree.
++ *
++ * Must be called under the queue lock; the cgroup owning @bfqg must
++ * not disappear (by now this just means that we are called under
++ * rcu_read_lock()).
++ */
++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct bfq_entity *entity, struct bfq_group *bfqg)
++{
++ int busy, resume;
++
++ busy = bfq_bfqq_busy(bfqq);
++ resume = !RB_EMPTY_ROOT(&bfqq->sort_list);
++
++ BUG_ON(resume && !entity->on_st);
++ BUG_ON(busy && !resume && entity->on_st &&
++ bfqq != bfqd->in_service_queue);
++
++ if (busy) {
++ BUG_ON(atomic_read(&bfqq->ref) < 2);
++
++ if (!resume)
++ bfq_del_bfqq_busy(bfqd, bfqq, 0);
++ else
++ bfq_deactivate_bfqq(bfqd, bfqq, 0);
++ } else if (entity->on_st)
++ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
++
++ /*
++ * Here we use a reference to bfqg. We don't need a refcounter
++ * as the cgroup reference will not be dropped, so that its
++ * destroy() callback will not be invoked.
++ */
++ entity->parent = bfqg->my_entity;
++ entity->sched_data = &bfqg->sched_data;
++
++ if (busy && resume)
++ bfq_activate_bfqq(bfqd, bfqq);
++
++ if (bfqd->in_service_queue == NULL && !bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++}
++
++/**
++ * __bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bfqd: the queue descriptor.
++ * @bic: the bic to move.
++ * @cgroup: the cgroup to move to.
++ *
++ * Move bic to cgroup, assuming that bfqd->queue is locked; the caller
++ * has to make sure that the reference to cgroup is valid across the call.
++ *
++ * NOTE: an alternative approach might have been to store the current
++ * cgroup in bfqq and getting a reference to it, reducing the lookup
++ * time here, at the price of slightly more complex code.
++ */
++static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic,
++ struct cgroup_subsys_state *css)
++{
++ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
++ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
++ struct bfq_entity *entity;
++ struct bfq_group *bfqg;
++ struct bfqio_cgroup *bgrp;
++
++ bgrp = css_to_bfqio(css);
++
++ bfqg = bfq_find_alloc_group(bfqd, css);
++ if (async_bfqq != NULL) {
++ entity = &async_bfqq->entity;
++
++ if (entity->sched_data != &bfqg->sched_data) {
++ bic_set_bfqq(bic, NULL, 0);
++ bfq_log_bfqq(bfqd, async_bfqq,
++ "bic_change_group: %p %d",
++ async_bfqq, atomic_read(&async_bfqq->ref));
++ bfq_put_queue(async_bfqq);
++ }
++ }
++
++ if (sync_bfqq != NULL) {
++ entity = &sync_bfqq->entity;
++ if (entity->sched_data != &bfqg->sched_data)
++ bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg);
++ }
++
++ return bfqg;
++}
++
++/**
++ * bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bic: the bic being migrated.
++ * @cgroup: the destination cgroup.
++ *
++ * When the task owning @bic is moved to @cgroup, @bic is immediately
++ * moved into its new parent group.
++ */
++static void bfq_bic_change_cgroup(struct bfq_io_cq *bic,
++ struct cgroup_subsys_state *css)
++{
++ struct bfq_data *bfqd;
++ unsigned long uninitialized_var(flags);
++
++ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
++ &flags);
++ if (bfqd != NULL) {
++ __bfq_bic_change_cgroup(bfqd, bic, css);
++ bfq_put_bfqd_unlock(bfqd, &flags);
++ }
++}
++
++/**
++ * bfq_bic_update_cgroup - update the cgroup of @bic.
++ * @bic: the @bic to update.
++ *
++ * Make sure that @bic is enqueued in the cgroup of the current task.
++ * We need this in addition to moving bics during the cgroup attach
++ * phase because the task owning @bic could be at its first disk
++ * access or we may end up in the root cgroup as the result of a
++ * memory allocation failure and here we try to move to the right
++ * group.
++ *
++ * Must be called under the queue lock. It is safe to use the returned
++ * value even after the rcu_read_unlock() as the migration/destruction
++ * paths act under the queue lock too. IOW it is impossible to race with
++ * group migration/destruction and end up with an invalid group as:
++ * a) here cgroup has not yet been destroyed, nor its destroy callback
++ * has started execution, as current holds a reference to it,
++ * b) if it is destroyed after rcu_read_unlock() [after current is
++ * migrated to a different cgroup] its attach() callback will have
++ * taken care of remove all the references to the old cgroup data.
++ */
++static struct bfq_group *bfq_bic_update_cgroup(struct bfq_io_cq *bic)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ struct bfq_group *bfqg;
++ struct cgroup_subsys_state *css;
++
++ BUG_ON(bfqd == NULL);
++
++ rcu_read_lock();
++ css = task_css(current, bfqio_subsys_id);
++ bfqg = __bfq_bic_change_cgroup(bfqd, bic, css);
++ rcu_read_unlock();
++
++ return bfqg;
++}
++
++/**
++ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
++ * @st: the service tree being flushed.
++ */
++static inline void bfq_flush_idle_tree(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entity = st->first_idle;
++
++ for (; entity != NULL; entity = st->first_idle)
++ __bfq_deactivate_entity(entity, 0);
++}
++
++/**
++ * bfq_reparent_leaf_entity - move leaf entity to the root_group.
++ * @bfqd: the device data structure with the root group.
++ * @entity: the entity to move.
++ */
++static inline void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ BUG_ON(bfqq == NULL);
++ bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group);
++ return;
++}
++
++/**
++ * bfq_reparent_active_entities - move to the root group all active
++ * entities.
++ * @bfqd: the device data structure with the root group.
++ * @bfqg: the group to move from.
++ * @st: the service tree with the entities.
++ *
++ * Needs queue_lock to be taken and reference to be valid over the call.
++ */
++static inline void bfq_reparent_active_entities(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ struct bfq_service_tree *st)
++{
++ struct rb_root *active = &st->active;
++ struct bfq_entity *entity = NULL;
++
++ if (!RB_EMPTY_ROOT(&st->active))
++ entity = bfq_entity_of(rb_first(active));
++
++ for (; entity != NULL; entity = bfq_entity_of(rb_first(active)))
++ bfq_reparent_leaf_entity(bfqd, entity);
++
++ if (bfqg->sched_data.in_service_entity != NULL)
++ bfq_reparent_leaf_entity(bfqd,
++ bfqg->sched_data.in_service_entity);
++
++ return;
++}
++
++/**
++ * bfq_destroy_group - destroy @bfqg.
++ * @bgrp: the bfqio_cgroup containing @bfqg.
++ * @bfqg: the group being destroyed.
++ *
++ * Destroy @bfqg, making sure that it is not referenced from its parent.
++ */
++static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg)
++{
++ struct bfq_data *bfqd;
++ struct bfq_service_tree *st;
++ struct bfq_entity *entity = bfqg->my_entity;
++ unsigned long uninitialized_var(flags);
++ int i;
++
++ hlist_del(&bfqg->group_node);
++
++ /*
++ * Empty all service_trees belonging to this group before
++ * deactivating the group itself.
++ */
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
++ st = bfqg->sched_data.service_tree + i;
++
++ /*
++ * The idle tree may still contain bfq_queues belonging
++ * to exited task because they never migrated to a different
++ * cgroup from the one being destroyed now. No one else
++ * can access them so it's safe to act without any lock.
++ */
++ bfq_flush_idle_tree(st);
++
++ /*
++ * It may happen that some queues are still active
++ * (busy) upon group destruction (if the corresponding
++ * processes have been forced to terminate). We move
++ * all the leaf entities corresponding to these queues
++ * to the root_group.
++ * Also, it may happen that the group has an entity
++ * in service, which is disconnected from the active
++ * tree: it must be moved, too.
++ * There is no need to put the sync queues, as the
++ * scheduler has taken no reference.
++ */
++ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
++ if (bfqd != NULL) {
++ bfq_reparent_active_entities(bfqd, bfqg, st);
++ bfq_put_bfqd_unlock(bfqd, &flags);
++ }
++ BUG_ON(!RB_EMPTY_ROOT(&st->active));
++ BUG_ON(!RB_EMPTY_ROOT(&st->idle));
++ }
++ BUG_ON(bfqg->sched_data.next_in_service != NULL);
++ BUG_ON(bfqg->sched_data.in_service_entity != NULL);
++
++ /*
++ * We may race with device destruction, take extra care when
++ * dereferencing bfqg->bfqd.
++ */
++ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
++ if (bfqd != NULL) {
++ hlist_del(&bfqg->bfqd_node);
++ __bfq_deactivate_entity(entity, 0);
++ bfq_put_async_queues(bfqd, bfqg);
++ bfq_put_bfqd_unlock(bfqd, &flags);
++ }
++ BUG_ON(entity->tree != NULL);
++
++ /*
++ * No need to defer the kfree() to the end of the RCU grace
++ * period: we are called from the destroy() callback of our
++ * cgroup, so we can be sure that no one is a) still using
++ * this cgroup or b) doing lookups in it.
++ */
++ kfree(bfqg);
++}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++ struct hlist_node *tmp;
++ struct bfq_group *bfqg;
++
++ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node)
++ bfq_end_wr_async_queues(bfqd, bfqg);
++ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++/**
++ * bfq_disconnect_groups - disconnect @bfqd from all its groups.
++ * @bfqd: the device descriptor being exited.
++ *
++ * When the device exits we just make sure that no lookup can return
++ * the now unused group structures. They will be deallocated on cgroup
++ * destruction.
++ */
++static void bfq_disconnect_groups(struct bfq_data *bfqd)
++{
++ struct hlist_node *tmp;
++ struct bfq_group *bfqg;
++
++ bfq_log(bfqd, "disconnect_groups beginning");
++ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) {
++ hlist_del(&bfqg->bfqd_node);
++
++ __bfq_deactivate_entity(bfqg->my_entity, 0);
++
++ /*
++ * Don't remove from the group hash, just set an
++ * invalid key. No lookups can race with the
++ * assignment as bfqd is being destroyed; this
++ * implies also that new elements cannot be added
++ * to the list.
++ */
++ rcu_assign_pointer(bfqg->bfqd, NULL);
++
++ bfq_log(bfqd, "disconnect_groups: put async for group %p",
++ bfqg);
++ bfq_put_async_queues(bfqd, bfqg);
++ }
++}
++
++static inline void bfq_free_root_group(struct bfq_data *bfqd)
++{
++ struct bfqio_cgroup *bgrp = &bfqio_root_cgroup;
++ struct bfq_group *bfqg = bfqd->root_group;
++
++ bfq_put_async_queues(bfqd, bfqg);
++
++ spin_lock_irq(&bgrp->lock);
++ hlist_del_rcu(&bfqg->group_node);
++ spin_unlock_irq(&bgrp->lock);
++
++ /*
++ * No need to synchronize_rcu() here: since the device is gone
++ * there cannot be any read-side access to its root_group.
++ */
++ kfree(bfqg);
++}
++
++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
++{
++ struct bfq_group *bfqg;
++ struct bfqio_cgroup *bgrp;
++ int i;
++
++ bfqg = kzalloc_node(sizeof(*bfqg), GFP_KERNEL, node);
++ if (bfqg == NULL)
++ return NULL;
++
++ bfqg->entity.parent = NULL;
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++ bgrp = &bfqio_root_cgroup;
++ spin_lock_irq(&bgrp->lock);
++ rcu_assign_pointer(bfqg->bfqd, bfqd);
++ hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data);
++ spin_unlock_irq(&bgrp->lock);
++
++ return bfqg;
++}
++
++#define SHOW_FUNCTION(__VAR) \
++static u64 bfqio_cgroup_##__VAR##_read(struct cgroup_subsys_state *css, \
++ struct cftype *cftype) \
++{ \
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css); \
++ u64 ret = -ENODEV; \
++ \
++ mutex_lock(&bfqio_mutex); \
++ if (bfqio_is_removed(bgrp)) \
++ goto out_unlock; \
++ \
++ spin_lock_irq(&bgrp->lock); \
++ ret = bgrp->__VAR; \
++ spin_unlock_irq(&bgrp->lock); \
++ \
++out_unlock: \
++ mutex_unlock(&bfqio_mutex); \
++ return ret; \
++}
++
++SHOW_FUNCTION(weight);
++SHOW_FUNCTION(ioprio);
++SHOW_FUNCTION(ioprio_class);
++#undef SHOW_FUNCTION
++
++#define STORE_FUNCTION(__VAR, __MIN, __MAX) \
++static int bfqio_cgroup_##__VAR##_write(struct cgroup_subsys_state *css,\
++ struct cftype *cftype, \
++ u64 val) \
++{ \
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css); \
++ struct bfq_group *bfqg; \
++ int ret = -EINVAL; \
++ \
++ if (val < (__MIN) || val > (__MAX)) \
++ return ret; \
++ \
++ ret = -ENODEV; \
++ mutex_lock(&bfqio_mutex); \
++ if (bfqio_is_removed(bgrp)) \
++ goto out_unlock; \
++ ret = 0; \
++ \
++ spin_lock_irq(&bgrp->lock); \
++ bgrp->__VAR = (unsigned short)val; \
++ hlist_for_each_entry(bfqg, &bgrp->group_data, group_node) { \
++ /* \
++ * Setting the ioprio_changed flag of the entity \
++ * to 1 with new_##__VAR == ##__VAR would re-set \
++ * the value of the weight to its ioprio mapping. \
++ * Set the flag only if necessary. \
++ */ \
++ if ((unsigned short)val != bfqg->entity.new_##__VAR) { \
++ bfqg->entity.new_##__VAR = (unsigned short)val; \
++ /* \
++ * Make sure that the above new value has been \
++ * stored in bfqg->entity.new_##__VAR before \
++ * setting the ioprio_changed flag. In fact, \
++ * this flag may be read asynchronously (in \
++ * critical sections protected by a different \
++ * lock than that held here), and finding this \
++ * flag set may cause the execution of the code \
++ * for updating parameters whose value may \
++ * depend also on bfqg->entity.new_##__VAR (in \
++ * __bfq_entity_update_weight_prio). \
++ * This barrier makes sure that the new value \
++ * of bfqg->entity.new_##__VAR is correctly \
++ * seen in that code. \
++ */ \
++ smp_wmb(); \
++ bfqg->entity.ioprio_changed = 1; \
++ } \
++ } \
++ spin_unlock_irq(&bgrp->lock); \
++ \
++out_unlock: \
++ mutex_unlock(&bfqio_mutex); \
++ return ret; \
++}
++
++STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT);
++STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1);
++STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE);
++#undef STORE_FUNCTION
++
++static struct cftype bfqio_files[] = {
++ {
++ .name = "weight",
++ .read_u64 = bfqio_cgroup_weight_read,
++ .write_u64 = bfqio_cgroup_weight_write,
++ },
++ {
++ .name = "ioprio",
++ .read_u64 = bfqio_cgroup_ioprio_read,
++ .write_u64 = bfqio_cgroup_ioprio_write,
++ },
++ {
++ .name = "ioprio_class",
++ .read_u64 = bfqio_cgroup_ioprio_class_read,
++ .write_u64 = bfqio_cgroup_ioprio_class_write,
++ },
++ { }, /* terminate */
++};
++
++static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys_state
++ *parent_css)
++{
++ struct bfqio_cgroup *bgrp;
++
++ if (parent_css != NULL) {
++ bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL);
++ if (bgrp == NULL)
++ return ERR_PTR(-ENOMEM);
++ } else
++ bgrp = &bfqio_root_cgroup;
++
++ spin_lock_init(&bgrp->lock);
++ INIT_HLIST_HEAD(&bgrp->group_data);
++ bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO;
++ bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS;
++
++ return &bgrp->css;
++}
++
++/*
++ * We cannot support shared io contexts, as we have no means to support
++ * two tasks with the same ioc in two different groups without major rework
++ * of the main bic/bfqq data structures. By now we allow a task to change
++ * its cgroup only if it's the only owner of its ioc; the drawback of this
++ * behavior is that a group containing a task that forked using CLONE_IO
++ * will not be destroyed until the tasks sharing the ioc die.
++ */
++static int bfqio_can_attach(struct cgroup_subsys_state *css,
++ struct cgroup_taskset *tset)
++{
++ struct task_struct *task;
++ struct io_context *ioc;
++ int ret = 0;
++
++ cgroup_taskset_for_each(task, css, tset) {
++ /*
++ * task_lock() is needed to avoid races with
++ * exit_io_context()
++ */
++ task_lock(task);
++ ioc = task->io_context;
++ if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1)
++ /*
++ * ioc == NULL means that the task is either too
++ * young or exiting: if it has still no ioc the
++ * ioc can't be shared, if the task is exiting the
++ * attach will fail anyway, no matter what we
++ * return here.
++ */
++ ret = -EINVAL;
++ task_unlock(task);
++ if (ret)
++ break;
++ }
++
++ return ret;
++}
++
++static void bfqio_attach(struct cgroup_subsys_state *css,
++ struct cgroup_taskset *tset)
++{
++ struct task_struct *task;
++ struct io_context *ioc;
++ struct io_cq *icq;
++
++ /*
++ * IMPORTANT NOTE: The move of more than one process at a time to a
++ * new group has not yet been tested.
++ */
++ cgroup_taskset_for_each(task, css, tset) {
++ ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
++ if (ioc) {
++ /*
++ * Handle cgroup change here.
++ */
++ rcu_read_lock();
++ hlist_for_each_entry_rcu(icq, &ioc->icq_list, ioc_node)
++ if (!strncmp(
++ icq->q->elevator->type->elevator_name,
++ "bfq", ELV_NAME_MAX))
++ bfq_bic_change_cgroup(icq_to_bic(icq),
++ css);
++ rcu_read_unlock();
++ put_io_context(ioc);
++ }
++ }
++}
++
++static void bfqio_destroy(struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++ struct hlist_node *tmp;
++ struct bfq_group *bfqg;
++
++ /*
++ * Since we are destroying the cgroup, there are no more tasks
++ * referencing it, and all the RCU grace periods that may have
++ * referenced it are ended (as the destruction of the parent
++ * cgroup is RCU-safe); bgrp->group_data will not be accessed by
++ * anything else and we don't need any synchronization.
++ */
++ hlist_for_each_entry_safe(bfqg, tmp, &bgrp->group_data, group_node)
++ bfq_destroy_group(bgrp, bfqg);
++
++ BUG_ON(!hlist_empty(&bgrp->group_data));
++
++ kfree(bgrp);
++}
++
++static int bfqio_css_online(struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++
++ mutex_lock(&bfqio_mutex);
++ bgrp->online = true;
++ mutex_unlock(&bfqio_mutex);
++
++ return 0;
++}
++
++static void bfqio_css_offline(struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++
++ mutex_lock(&bfqio_mutex);
++ bgrp->online = false;
++ mutex_unlock(&bfqio_mutex);
++}
++
++struct cgroup_subsys bfqio_subsys = {
++ .name = "bfqio",
++ .css_alloc = bfqio_create,
++ .css_online = bfqio_css_online,
++ .css_offline = bfqio_css_offline,
++ .can_attach = bfqio_can_attach,
++ .attach = bfqio_attach,
++ .css_free = bfqio_destroy,
++ .subsys_id = bfqio_subsys_id,
++ .base_cftypes = bfqio_files,
++};
++#else
++static inline void bfq_init_entity(struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++ entity->weight = entity->new_weight;
++ entity->orig_weight = entity->new_weight;
++ entity->ioprio = entity->new_ioprio;
++ entity->ioprio_class = entity->new_ioprio_class;
++ entity->sched_data = &bfqg->sched_data;
++}
++
++static inline struct bfq_group *
++bfq_bic_update_cgroup(struct bfq_io_cq *bic)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ return bfqd->root_group;
++}
++
++static inline void bfq_bfqq_move(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++static inline void bfq_disconnect_groups(struct bfq_data *bfqd)
++{
++ bfq_put_async_queues(bfqd, bfqd->root_group);
++}
++
++static inline void bfq_free_root_group(struct bfq_data *bfqd)
++{
++ kfree(bfqd->root_group);
++}
++
++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
++{
++ struct bfq_group *bfqg;
++ int i;
++
++ bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
++ if (bfqg == NULL)
++ return NULL;
++
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++ return bfqg;
++}
++#endif
+diff -Nur linux-3.14.40.orig/block/bfq.h linux-3.14.40/block/bfq.h
+--- linux-3.14.40.orig/block/bfq.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/block/bfq.h 2015-05-01 14:57:58.351427001 -0500
+@@ -0,0 +1,770 @@
++/*
++ * BFQ-v7r5 for 3.14.0: data structures and common functions prototypes.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++#ifndef _BFQ_H
++#define _BFQ_H
++
++#include <linux/blktrace_api.h>
++#include <linux/hrtimer.h>
++#include <linux/ioprio.h>
++#include <linux/rbtree.h>
++
++#define BFQ_IOPRIO_CLASSES 3
++#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
++
++#define BFQ_MIN_WEIGHT 1
++#define BFQ_MAX_WEIGHT 1000
++
++#define BFQ_DEFAULT_GRP_WEIGHT 10
++#define BFQ_DEFAULT_GRP_IOPRIO 0
++#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
++
++struct bfq_entity;
++
++/**
++ * struct bfq_service_tree - per ioprio_class service tree.
++ * @active: tree for active entities (i.e., those backlogged).
++ * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i).
++ * @first_idle: idle entity with minimum F_i.
++ * @last_idle: idle entity with maximum F_i.
++ * @vtime: scheduler virtual time.
++ * @wsum: scheduler weight sum; active and idle entities contribute to it.
++ *
++ * Each service tree represents a B-WF2Q+ scheduler on its own. Each
++ * ioprio_class has its own independent scheduler, and so its own
++ * bfq_service_tree. All the fields are protected by the queue lock
++ * of the containing bfqd.
++ */
++struct bfq_service_tree {
++ struct rb_root active;
++ struct rb_root idle;
++
++ struct bfq_entity *first_idle;
++ struct bfq_entity *last_idle;
++
++ u64 vtime;
++ unsigned long wsum;
++};
++
++/**
++ * struct bfq_sched_data - multi-class scheduler.
++ * @in_service_entity: entity in service.
++ * @next_in_service: head-of-the-line entity in the scheduler.
++ * @service_tree: array of service trees, one per ioprio_class.
++ *
++ * bfq_sched_data is the basic scheduler queue. It supports three
++ * ioprio_classes, and can be used either as a toplevel queue or as
++ * an intermediate queue on a hierarchical setup.
++ * @next_in_service points to the active entity of the sched_data
++ * service trees that will be scheduled next.
++ *
++ * The supported ioprio_classes are the same as in CFQ, in descending
++ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
++ * Requests from higher priority queues are served before all the
++ * requests from lower priority queues; among requests of the same
++ * queue requests are served according to B-WF2Q+.
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_sched_data {
++ struct bfq_entity *in_service_entity;
++ struct bfq_entity *next_in_service;
++ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
++};
++
++/**
++ * struct bfq_weight_counter - counter of the number of all active entities
++ * with a given weight.
++ * @weight: weight of the entities that this counter refers to.
++ * @num_active: number of active entities with this weight.
++ * @weights_node: weights tree member (see bfq_data's @queue_weights_tree
++ * and @group_weights_tree).
++ */
++struct bfq_weight_counter {
++ short int weight;
++ unsigned int num_active;
++ struct rb_node weights_node;
++};
++
++/**
++ * struct bfq_entity - schedulable entity.
++ * @rb_node: service_tree member.
++ * @weight_counter: pointer to the weight counter associated with this entity.
++ * @on_st: flag, true if the entity is on a tree (either the active or
++ * the idle one of its service_tree).
++ * @finish: B-WF2Q+ finish timestamp (aka F_i).
++ * @start: B-WF2Q+ start timestamp (aka S_i).
++ * @tree: tree the entity is enqueued into; %NULL if not on a tree.
++ * @min_start: minimum start time of the (active) subtree rooted at
++ * this entity; used for O(log N) lookups into active trees.
++ * @service: service received during the last round of service.
++ * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight.
++ * @weight: weight of the queue
++ * @parent: parent entity, for hierarchical scheduling.
++ * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the
++ * associated scheduler queue, %NULL on leaf nodes.
++ * @sched_data: the scheduler queue this entity belongs to.
++ * @ioprio: the ioprio in use.
++ * @new_weight: when a weight change is requested, the new weight value.
++ * @orig_weight: original weight, used to implement weight boosting
++ * @new_ioprio: when an ioprio change is requested, the new ioprio value.
++ * @ioprio_class: the ioprio_class in use.
++ * @new_ioprio_class: when an ioprio_class change is requested, the new
++ * ioprio_class value.
++ * @ioprio_changed: flag, true when the user requested a weight, ioprio or
++ * ioprio_class change.
++ *
++ * A bfq_entity is used to represent either a bfq_queue (leaf node in the
++ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each
++ * entity belongs to the sched_data of the parent group in the cgroup
++ * hierarchy. Non-leaf entities have also their own sched_data, stored
++ * in @my_sched_data.
++ *
++ * Each entity stores independently its priority values; this would
++ * allow different weights on different devices, but this
++ * functionality is not exported to userspace by now. Priorities and
++ * weights are updated lazily, first storing the new values into the
++ * new_* fields, then setting the @ioprio_changed flag. As soon as
++ * there is a transition in the entity state that allows the priority
++ * update to take place the effective and the requested priority
++ * values are synchronized.
++ *
++ * Unless cgroups are used, the weight value is calculated from the
++ * ioprio to export the same interface as CFQ. When dealing with
++ * ``well-behaved'' queues (i.e., queues that do not spend too much
++ * time to consume their budget and have true sequential behavior, and
++ * when there are no external factors breaking anticipation) the
++ * relative weights at each level of the cgroups hierarchy should be
++ * guaranteed. All the fields are protected by the queue lock of the
++ * containing bfqd.
++ */
++struct bfq_entity {
++ struct rb_node rb_node;
++ struct bfq_weight_counter *weight_counter;
++
++ int on_st;
++
++ u64 finish;
++ u64 start;
++
++ struct rb_root *tree;
++
++ u64 min_start;
++
++ unsigned long service, budget;
++ unsigned short weight, new_weight;
++ unsigned short orig_weight;
++
++ struct bfq_entity *parent;
++
++ struct bfq_sched_data *my_sched_data;
++ struct bfq_sched_data *sched_data;
++
++ unsigned short ioprio, new_ioprio;
++ unsigned short ioprio_class, new_ioprio_class;
++
++ int ioprio_changed;
++};
++
++struct bfq_group;
++
++/**
++ * struct bfq_queue - leaf schedulable entity.
++ * @ref: reference counter.
++ * @bfqd: parent bfq_data.
++ * @new_bfqq: shared bfq_queue if queue is cooperating with
++ * one or more other queues.
++ * @pos_node: request-position tree member (see bfq_data's @rq_pos_tree).
++ * @pos_root: request-position tree root (see bfq_data's @rq_pos_tree).
++ * @sort_list: sorted list of pending requests.
++ * @next_rq: if fifo isn't expired, next request to serve.
++ * @queued: nr of requests queued in @sort_list.
++ * @allocated: currently allocated requests.
++ * @meta_pending: pending metadata requests.
++ * @fifo: fifo list of requests in sort_list.
++ * @entity: entity representing this queue in the scheduler.
++ * @max_budget: maximum budget allowed from the feedback mechanism.
++ * @budget_timeout: budget expiration (in jiffies).
++ * @dispatched: number of requests on the dispatch list or inside driver.
++ * @flags: status flags.
++ * @bfqq_list: node for active/idle bfqq list inside our bfqd.
++ * @seek_samples: number of seeks sampled
++ * @seek_total: sum of the distances of the seeks sampled
++ * @seek_mean: mean seek distance
++ * @last_request_pos: position of the last request enqueued
++ * @requests_within_timer: number of consecutive pairs of request completion
++ * and arrival, such that the queue becomes idle
++ * after the completion, but the next request arrives
++ * within an idle time slice; used only if the queue's
++ * IO_bound has been cleared.
++ * @pid: pid of the process owning the queue, used for logging purposes.
++ * @last_wr_start_finish: start time of the current weight-raising period if
++ * the @bfq-queue is being weight-raised, otherwise
++ * finish time of the last weight-raising period
++ * @wr_cur_max_time: current max raising time for this queue
++ * @soft_rt_next_start: minimum time instant such that, only if a new
++ * request is enqueued after this time instant in an
++ * idle @bfq_queue with no outstanding requests, then
++ * the task associated with the queue it is deemed as
++ * soft real-time (see the comments to the function
++ * bfq_bfqq_softrt_next_start())
++ * @last_idle_bklogged: time of the last transition of the @bfq_queue from
++ * idle to backlogged
++ * @service_from_backlogged: cumulative service received from the @bfq_queue
++ * since the last transition from idle to
++ * backlogged
++ * @bic: pointer to the bfq_io_cq owning the bfq_queue, set to %NULL if the
++ * queue is shared
++ *
++ * A bfq_queue is a leaf request queue; it can be associated with an
++ * io_context or more, if it is async or shared between cooperating
++ * processes. @cgroup holds a reference to the cgroup, to be sure that it
++ * does not disappear while a bfqq still references it (mostly to avoid
++ * races between request issuing and task migration followed by cgroup
++ * destruction).
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_queue {
++ atomic_t ref;
++ struct bfq_data *bfqd;
++
++ /* fields for cooperating queues handling */
++ struct bfq_queue *new_bfqq;
++ struct rb_node pos_node;
++ struct rb_root *pos_root;
++
++ struct rb_root sort_list;
++ struct request *next_rq;
++ int queued[2];
++ int allocated[2];
++ int meta_pending;
++ struct list_head fifo;
++
++ struct bfq_entity entity;
++
++ unsigned long max_budget;
++ unsigned long budget_timeout;
++
++ int dispatched;
++
++ unsigned int flags;
++
++ struct list_head bfqq_list;
++
++ unsigned int seek_samples;
++ u64 seek_total;
++ sector_t seek_mean;
++ sector_t last_request_pos;
++
++ unsigned int requests_within_timer;
++
++ pid_t pid;
++ struct bfq_io_cq *bic;
++
++ /* weight-raising fields */
++ unsigned long wr_cur_max_time;
++ unsigned long soft_rt_next_start;
++ unsigned long last_wr_start_finish;
++ unsigned int wr_coeff;
++ unsigned long last_idle_bklogged;
++ unsigned long service_from_backlogged;
++};
++
++/**
++ * struct bfq_ttime - per process thinktime stats.
++ * @ttime_total: total process thinktime
++ * @ttime_samples: number of thinktime samples
++ * @ttime_mean: average process thinktime
++ */
++struct bfq_ttime {
++ unsigned long last_end_request;
++
++ unsigned long ttime_total;
++ unsigned long ttime_samples;
++ unsigned long ttime_mean;
++};
++
++/**
++ * struct bfq_io_cq - per (request_queue, io_context) structure.
++ * @icq: associated io_cq structure
++ * @bfqq: array of two process queues, the sync and the async
++ * @ttime: associated @bfq_ttime struct
++ * @wr_time_left: snapshot of the time left before weight raising ends
++ * for the sync queue associated to this process; this
++ * snapshot is taken to remember this value while the weight
++ * raising is suspended because the queue is merged with a
++ * shared queue, and is used to set @raising_cur_max_time
++ * when the queue is split from the shared queue and its
++ * weight is raised again
++ * @saved_idle_window: same purpose as the previous field for the idle
++ * window
++ * @saved_IO_bound: same purpose as the previous two fields for the I/O
++ * bound classification of a queue
++ * @cooperations: counter of consecutive successful queue merges underwent
++ * by any of the process' @bfq_queues
++ * @failed_cooperations: counter of consecutive failed queue merges of any
++ * of the process' @bfq_queues
++ */
++struct bfq_io_cq {
++ struct io_cq icq; /* must be the first member */
++ struct bfq_queue *bfqq[2];
++ struct bfq_ttime ttime;
++ int ioprio;
++
++ unsigned int wr_time_left;
++ unsigned int saved_idle_window;
++ unsigned int saved_IO_bound;
++
++ unsigned int cooperations;
++ unsigned int failed_cooperations;
++};
++
++enum bfq_device_speed {
++ BFQ_BFQD_FAST,
++ BFQ_BFQD_SLOW,
++};
++
++/**
++ * struct bfq_data - per device data structure.
++ * @queue: request queue for the managed device.
++ * @root_group: root bfq_group for the device.
++ * @rq_pos_tree: rbtree sorted by next_request position, used when
++ * determining if two or more queues have interleaving
++ * requests (see bfq_close_cooperator()).
++ * @active_numerous_groups: number of bfq_groups containing more than one
++ * active @bfq_entity.
++ * @queue_weights_tree: rbtree of weight counters of @bfq_queues, sorted by
++ * weight. Used to keep track of whether all @bfq_queues
++ * have the same weight. The tree contains one counter
++ * for each distinct weight associated to some active
++ * and not weight-raised @bfq_queue (see the comments to
++ * the functions bfq_weights_tree_[add|remove] for
++ * further details).
++ * @group_weights_tree: rbtree of non-queue @bfq_entity weight counters, sorted
++ * by weight. Used to keep track of whether all
++ * @bfq_groups have the same weight. The tree contains
++ * one counter for each distinct weight associated to
++ * some active @bfq_group (see the comments to the
++ * functions bfq_weights_tree_[add|remove] for further
++ * details).
++ * @busy_queues: number of bfq_queues containing requests (including the
++ * queue in service, even if it is idling).
++ * @busy_in_flight_queues: number of @bfq_queues containing pending or
++ * in-flight requests, plus the @bfq_queue in
++ * service, even if idle but waiting for the
++ * possible arrival of its next sync request. This
++ * field is updated only if the device is rotational,
++ * but used only if the device is also NCQ-capable.
++ * The reason why the field is updated also for non-
++ * NCQ-capable rotational devices is related to the
++ * fact that the value of @hw_tag may be set also
++ * later than when busy_in_flight_queues may need to
++ * be incremented for the first time(s). Taking also
++ * this possibility into account, to avoid unbalanced
++ * increments/decrements, would imply more overhead
++ * than just updating busy_in_flight_queues
++ * regardless of the value of @hw_tag.
++ * @const_seeky_busy_in_flight_queues: number of constantly-seeky @bfq_queues
++ * (that is, seeky queues that expired
++ * for budget timeout at least once)
++ * containing pending or in-flight
++ * requests, including the in-service
++ * @bfq_queue if constantly seeky. This
++ * field is updated only if the device
++ * is rotational, but used only if the
++ * device is also NCQ-capable (see the
++ * comments to @busy_in_flight_queues).
++ * @wr_busy_queues: number of weight-raised busy @bfq_queues.
++ * @queued: number of queued requests.
++ * @rq_in_driver: number of requests dispatched and waiting for completion.
++ * @sync_flight: number of sync requests in the driver.
++ * @max_rq_in_driver: max number of reqs in driver in the last
++ * @hw_tag_samples completed requests.
++ * @hw_tag_samples: nr of samples used to calculate hw_tag.
++ * @hw_tag: flag set to one if the driver is showing a queueing behavior.
++ * @budgets_assigned: number of budgets assigned.
++ * @idle_slice_timer: timer set when idling for the next sequential request
++ * from the queue in service.
++ * @unplug_work: delayed work to restart dispatching on the request queue.
++ * @in_service_queue: bfq_queue in service.
++ * @in_service_bic: bfq_io_cq (bic) associated with the @in_service_queue.
++ * @last_position: on-disk position of the last served request.
++ * @last_budget_start: beginning of the last budget.
++ * @last_idling_start: beginning of the last idle slice.
++ * @peak_rate: peak transfer rate observed for a budget.
++ * @peak_rate_samples: number of samples used to calculate @peak_rate.
++ * @bfq_max_budget: maximum budget allotted to a bfq_queue before
++ * rescheduling.
++ * @group_list: list of all the bfq_groups active on the device.
++ * @active_list: list of all the bfq_queues active on the device.
++ * @idle_list: list of all the bfq_queues idle on the device.
++ * @bfq_quantum: max number of requests dispatched per dispatch round.
++ * @bfq_fifo_expire: timeout for async/sync requests; when it expires
++ * requests are served in fifo order.
++ * @bfq_back_penalty: weight of backward seeks wrt forward ones.
++ * @bfq_back_max: maximum allowed backward seek.
++ * @bfq_slice_idle: maximum idling time.
++ * @bfq_user_max_budget: user-configured max budget value
++ * (0 for auto-tuning).
++ * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to
++ * async queues.
++ * @bfq_timeout: timeout for bfq_queues to consume their budget; used to
++ * to prevent seeky queues to impose long latencies to well
++ * behaved ones (this also implies that seeky queues cannot
++ * receive guarantees in the service domain; after a timeout
++ * they are charged for the whole allocated budget, to try
++ * to preserve a behavior reasonably fair among them, but
++ * without service-domain guarantees).
++ * @bfq_coop_thresh: number of queue merges after which a @bfq_queue is
++ * no more granted any weight-raising.
++ * @bfq_failed_cooperations: number of consecutive failed cooperation
++ * chances after which weight-raising is restored
++ * to a queue subject to more than bfq_coop_thresh
++ * queue merges.
++ * @bfq_requests_within_timer: number of consecutive requests that must be
++ * issued within the idle time slice to set
++ * again idling to a queue which was marked as
++ * non-I/O-bound (see the definition of the
++ * IO_bound flag for further details).
++ * @bfq_wr_coeff: Maximum factor by which the weight of a weight-raised
++ * queue is multiplied
++ * @bfq_wr_max_time: maximum duration of a weight-raising period (jiffies)
++ * @bfq_wr_rt_max_time: maximum duration for soft real-time processes
++ * @bfq_wr_min_idle_time: minimum idle period after which weight-raising
++ * may be reactivated for a queue (in jiffies)
++ * @bfq_wr_min_inter_arr_async: minimum period between request arrivals
++ * after which weight-raising may be
++ * reactivated for an already busy queue
++ * (in jiffies)
++ * @bfq_wr_max_softrt_rate: max service-rate for a soft real-time queue,
++ * sectors per seconds
++ * @RT_prod: cached value of the product R*T used for computing the maximum
++ * duration of the weight raising automatically
++ * @device_speed: device-speed class for the low-latency heuristic
++ * @oom_bfqq: fallback dummy bfqq for extreme OOM conditions
++ *
++ * All the fields are protected by the @queue lock.
++ */
++struct bfq_data {
++ struct request_queue *queue;
++
++ struct bfq_group *root_group;
++ struct rb_root rq_pos_tree;
++
++#ifdef CONFIG_CGROUP_BFQIO
++ int active_numerous_groups;
++#endif
++
++ struct rb_root queue_weights_tree;
++ struct rb_root group_weights_tree;
++
++ int busy_queues;
++ int busy_in_flight_queues;
++ int const_seeky_busy_in_flight_queues;
++ int wr_busy_queues;
++ int queued;
++ int rq_in_driver;
++ int sync_flight;
++
++ int max_rq_in_driver;
++ int hw_tag_samples;
++ int hw_tag;
++
++ int budgets_assigned;
++
++ struct timer_list idle_slice_timer;
++ struct work_struct unplug_work;
++
++ struct bfq_queue *in_service_queue;
++ struct bfq_io_cq *in_service_bic;
++
++ sector_t last_position;
++
++ ktime_t last_budget_start;
++ ktime_t last_idling_start;
++ int peak_rate_samples;
++ u64 peak_rate;
++ unsigned long bfq_max_budget;
++
++ struct hlist_head group_list;
++ struct list_head active_list;
++ struct list_head idle_list;
++
++ unsigned int bfq_quantum;
++ unsigned int bfq_fifo_expire[2];
++ unsigned int bfq_back_penalty;
++ unsigned int bfq_back_max;
++ unsigned int bfq_slice_idle;
++ u64 bfq_class_idle_last_service;
++
++ unsigned int bfq_user_max_budget;
++ unsigned int bfq_max_budget_async_rq;
++ unsigned int bfq_timeout[2];
++
++ unsigned int bfq_coop_thresh;
++ unsigned int bfq_failed_cooperations;
++ unsigned int bfq_requests_within_timer;
++
++ bool low_latency;
++
++ /* parameters of the low_latency heuristics */
++ unsigned int bfq_wr_coeff;
++ unsigned int bfq_wr_max_time;
++ unsigned int bfq_wr_rt_max_time;
++ unsigned int bfq_wr_min_idle_time;
++ unsigned long bfq_wr_min_inter_arr_async;
++ unsigned int bfq_wr_max_softrt_rate;
++ u64 RT_prod;
++ enum bfq_device_speed device_speed;
++
++ struct bfq_queue oom_bfqq;
++};
++
++enum bfqq_state_flags {
++ BFQ_BFQQ_FLAG_busy = 0, /* has requests or is in service */
++ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */
++ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
++ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
++ BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */
++ BFQ_BFQQ_FLAG_prio_changed, /* task priority has changed */
++ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
++ BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */
++ BFQ_BFQQ_FLAG_IO_bound, /*
++ * bfqq has timed-out at least once
++ * having consumed at most 2/10 of
++ * its budget
++ */
++ BFQ_BFQQ_FLAG_constantly_seeky, /*
++ * bfqq has proved to be slow and
++ * seeky until budget timeout
++ */
++ BFQ_BFQQ_FLAG_softrt_update, /*
++ * may need softrt-next-start
++ * update
++ */
++ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
++ BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be split */
++ BFQ_BFQQ_FLAG_just_split, /* queue has just been split */
++};
++
++#define BFQ_BFQQ_FNS(name) \
++static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
++{ \
++ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \
++}
++
++BFQ_BFQQ_FNS(busy);
++BFQ_BFQQ_FNS(wait_request);
++BFQ_BFQQ_FNS(must_alloc);
++BFQ_BFQQ_FNS(fifo_expire);
++BFQ_BFQQ_FNS(idle_window);
++BFQ_BFQQ_FNS(prio_changed);
++BFQ_BFQQ_FNS(sync);
++BFQ_BFQQ_FNS(budget_new);
++BFQ_BFQQ_FNS(IO_bound);
++BFQ_BFQQ_FNS(constantly_seeky);
++BFQ_BFQQ_FNS(coop);
++BFQ_BFQQ_FNS(split_coop);
++BFQ_BFQQ_FNS(just_split);
++BFQ_BFQQ_FNS(softrt_update);
++#undef BFQ_BFQQ_FNS
++
++/* Logging facilities. */
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args)
++
++#define bfq_log(bfqd, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++
++/* Expiration reasons. */
++enum bfqq_expiration {
++ BFQ_BFQQ_TOO_IDLE = 0, /*
++ * queue has been idling for
++ * too long
++ */
++ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */
++ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */
++ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */
++};
++
++#ifdef CONFIG_CGROUP_BFQIO
++/**
++ * struct bfq_group - per (device, cgroup) data structure.
++ * @entity: schedulable entity to insert into the parent group sched_data.
++ * @sched_data: own sched_data, to contain child entities (they may be
++ * both bfq_queues and bfq_groups).
++ * @group_node: node to be inserted into the bfqio_cgroup->group_data
++ * list of the containing cgroup's bfqio_cgroup.
++ * @bfqd_node: node to be inserted into the @bfqd->group_list list
++ * of the groups active on the same device; used for cleanup.
++ * @bfqd: the bfq_data for the device this group acts upon.
++ * @async_bfqq: array of async queues for all the tasks belonging to
++ * the group, one queue per ioprio value per ioprio_class,
++ * except for the idle class that has only one queue.
++ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
++ * @my_entity: pointer to @entity, %NULL for the toplevel group; used
++ * to avoid too many special cases during group creation/
++ * migration.
++ * @active_entities: number of active entities belonging to the group;
++ * unused for the root group. Used to know whether there
++ * are groups with more than one active @bfq_entity
++ * (see the comments to the function
++ * bfq_bfqq_must_not_expire()).
++ *
++ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
++ * there is a set of bfq_groups, each one collecting the lower-level
++ * entities belonging to the group that are acting on the same device.
++ *
++ * Locking works as follows:
++ * o @group_node is protected by the bfqio_cgroup lock, and is accessed
++ * via RCU from its readers.
++ * o @bfqd is protected by the queue lock, RCU is used to access it
++ * from the readers.
++ * o All the other fields are protected by the @bfqd queue lock.
++ */
++struct bfq_group {
++ struct bfq_entity entity;
++ struct bfq_sched_data sched_data;
++
++ struct hlist_node group_node;
++ struct hlist_node bfqd_node;
++
++ void *bfqd;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++
++ struct bfq_entity *my_entity;
++
++ int active_entities;
++};
++
++/**
++ * struct bfqio_cgroup - bfq cgroup data structure.
++ * @css: subsystem state for bfq in the containing cgroup.
++ * @online: flag marked when the subsystem is inserted.
++ * @weight: cgroup weight.
++ * @ioprio: cgroup ioprio.
++ * @ioprio_class: cgroup ioprio_class.
++ * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data.
++ * @group_data: list containing the bfq_group belonging to this cgroup.
++ *
++ * @group_data is accessed using RCU, with @lock protecting the updates,
++ * @ioprio and @ioprio_class are protected by @lock.
++ */
++struct bfqio_cgroup {
++ struct cgroup_subsys_state css;
++ bool online;
++
++ unsigned short weight, ioprio, ioprio_class;
++
++ spinlock_t lock;
++ struct hlist_head group_data;
++};
++#else
++struct bfq_group {
++ struct bfq_sched_data sched_data;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++};
++#endif
++
++static inline struct bfq_service_tree *
++bfq_entity_service_tree(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sched_data = entity->sched_data;
++ unsigned int idx = entity->ioprio_class - 1;
++
++ BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
++ BUG_ON(sched_data == NULL);
++
++ return sched_data->service_tree + idx;
++}
++
++static inline struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic,
++ int is_sync)
++{
++ return bic->bfqq[!!is_sync];
++}
++
++static inline void bic_set_bfqq(struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq, int is_sync)
++{
++ bic->bfqq[!!is_sync] = bfqq;
++}
++
++static inline struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
++{
++ return bic->icq.q->elevator->elevator_data;
++}
++
++/**
++ * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer.
++ * @ptr: a pointer to a bfqd.
++ * @flags: storage for the flags to be saved.
++ *
++ * This function allows bfqg->bfqd to be protected by the
++ * queue lock of the bfqd they reference; the pointer is dereferenced
++ * under RCU, so the storage for bfqd is assured to be safe as long
++ * as the RCU read side critical section does not end. After the
++ * bfqd->queue->queue_lock is taken the pointer is rechecked, to be
++ * sure that no other writer accessed it. If we raced with a writer,
++ * the function returns NULL, with the queue unlocked, otherwise it
++ * returns the dereferenced pointer, with the queue locked.
++ */
++static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr,
++ unsigned long *flags)
++{
++ struct bfq_data *bfqd;
++
++ rcu_read_lock();
++ bfqd = rcu_dereference(*(struct bfq_data **)ptr);
++
++ if (bfqd != NULL) {
++ spin_lock_irqsave(bfqd->queue->queue_lock, *flags);
++ if (*ptr == bfqd)
++ goto out;
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
++ }
++
++ bfqd = NULL;
++out:
++ rcu_read_unlock();
++ return bfqd;
++}
++
++static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd,
++ unsigned long *flags)
++{
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
++}
++
++static void bfq_changed_ioprio(struct bfq_io_cq *bic);
++static void bfq_put_queue(struct bfq_queue *bfqq);
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bfq_group *bfqg, int is_sync,
++ struct bfq_io_cq *bic, gfp_t gfp_mask);
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg);
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
++
++#endif /* _BFQ_H */
+diff -Nur linux-3.14.40.orig/block/bfq-ioc.c linux-3.14.40/block/bfq-ioc.c
+--- linux-3.14.40.orig/block/bfq-ioc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/block/bfq-ioc.c 2015-05-01 14:57:58.351427001 -0500
+@@ -0,0 +1,36 @@
++/*
++ * BFQ: I/O context handling.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++/**
++ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
++ * @icq: the iocontext queue.
++ */
++static inline struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
++{
++ /* bic->icq is the first member, %NULL will convert to %NULL */
++ return container_of(icq, struct bfq_io_cq, icq);
++}
++
++/**
++ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
++ * @bfqd: the lookup key.
++ * @ioc: the io_context of the process doing I/O.
++ *
++ * Queue lock must be held.
++ */
++static inline struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
++ struct io_context *ioc)
++{
++ if (ioc)
++ return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue));
++ return NULL;
++}
+diff -Nur linux-3.14.40.orig/block/bfq-iosched.c linux-3.14.40/block/bfq-iosched.c
+--- linux-3.14.40.orig/block/bfq-iosched.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/block/bfq-iosched.c 2015-05-01 14:57:58.355427001 -0500
+@@ -0,0 +1,3919 @@
++/*
++ * Budget Fair Queueing (BFQ) disk scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ *
++ * BFQ is a proportional-share storage-I/O scheduling algorithm based on
++ * the slice-by-slice service scheme of CFQ. But BFQ assigns budgets,
++ * measured in number of sectors, to processes instead of time slices. The
++ * device is not granted to the in-service process for a given time slice,
++ * but until it has exhausted its assigned budget. This change from the time
++ * to the service domain allows BFQ to distribute the device throughput
++ * among processes as desired, without any distortion due to ZBR, workload
++ * fluctuations or other factors. BFQ uses an ad hoc internal scheduler,
++ * called B-WF2Q+, to schedule processes according to their budgets. More
++ * precisely, BFQ schedules queues associated to processes. Thanks to the
++ * accurate policy of B-WF2Q+, BFQ can afford to assign high budgets to
++ * I/O-bound processes issuing sequential requests (to boost the
++ * throughput), and yet guarantee a low latency to interactive and soft
++ * real-time applications.
++ *
++ * BFQ is described in [1], where also a reference to the initial, more
++ * theoretical paper on BFQ can be found. The interested reader can find
++ * in the latter paper full details on the main algorithm, as well as
++ * formulas of the guarantees and formal proofs of all the properties.
++ * With respect to the version of BFQ presented in these papers, this
++ * implementation adds a few more heuristics, such as the one that
++ * guarantees a low latency to soft real-time applications, and a
++ * hierarchical extension based on H-WF2Q+.
++ *
++ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
++ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
++ * complexity derives from the one introduced with EEVDF in [3].
++ *
++ * [1] P. Valente and M. Andreolini, ``Improving Application Responsiveness
++ * with the BFQ Disk I/O Scheduler'',
++ * Proceedings of the 5th Annual International Systems and Storage
++ * Conference (SYSTOR '12), June 2012.
++ *
++ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf
++ *
++ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
++ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
++ * Oct 1997.
++ *
++ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
++ *
++ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
++ * First: A Flexible and Accurate Mechanism for Proportional Share
++ * Resource Allocation,'' technical report.
++ *
++ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/cgroup.h>
++#include <linux/elevator.h>
++#include <linux/jiffies.h>
++#include <linux/rbtree.h>
++#include <linux/ioprio.h>
++#include "bfq.h"
++#include "blk.h"
++
++/* Max number of dispatches in one round of service. */
++static const int bfq_quantum = 4;
++
++/* Expiration time of sync (0) and async (1) requests, in jiffies. */
++static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
++
++/* Maximum backwards seek, in KiB. */
++static const int bfq_back_max = 16 * 1024;
++
++/* Penalty of a backwards seek, in number of sectors. */
++static const int bfq_back_penalty = 2;
++
++/* Idling period duration, in jiffies. */
++static int bfq_slice_idle = HZ / 125;
++
++/* Default maximum budget values, in sectors and number of requests. */
++static const int bfq_default_max_budget = 16 * 1024;
++static const int bfq_max_budget_async_rq = 4;
++
++/*
++ * Async to sync throughput distribution is controlled as follows:
++ * when an async request is served, the entity is charged the number
++ * of sectors of the request, multiplied by the factor below
++ */
++static const int bfq_async_charge_factor = 10;
++
++/* Default timeout values, in jiffies, approximating CFQ defaults. */
++static const int bfq_timeout_sync = HZ / 8;
++static int bfq_timeout_async = HZ / 25;
++
++struct kmem_cache *bfq_pool;
++
++/* Below this threshold (in ms), we consider thinktime immediate. */
++#define BFQ_MIN_TT 2
++
++/* hw_tag detection: parallel requests threshold and min samples needed. */
++#define BFQ_HW_QUEUE_THRESHOLD 4
++#define BFQ_HW_QUEUE_SAMPLES 32
++
++#define BFQQ_SEEK_THR (sector_t)(8 * 1024)
++#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR)
++
++/* Min samples used for peak rate estimation (for autotuning). */
++#define BFQ_PEAK_RATE_SAMPLES 32
++
++/* Shift used for peak rate fixed precision calculations. */
++#define BFQ_RATE_SHIFT 16
++
++/*
++ * By default, BFQ computes the duration of the weight raising for
++ * interactive applications automatically, using the following formula:
++ * duration = (R / r) * T, where r is the peak rate of the device, and
++ * R and T are two reference parameters.
++ * In particular, R is the peak rate of the reference device (see below),
++ * and T is a reference time: given the systems that are likely to be
++ * installed on the reference device according to its speed class, T is
++ * about the maximum time needed, under BFQ and while reading two files in
++ * parallel, to load typical large applications on these systems.
++ * In practice, the slower/faster the device at hand is, the more/less it
++ * takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to interactive
++ * applications.
++ *
++ * BFQ uses four different reference pairs (R, T), depending on:
++ * . whether the device is rotational or non-rotational;
++ * . whether the device is slow, such as old or portable HDDs, as well as
++ * SD cards, or fast, such as newer HDDs and SSDs.
++ *
++ * The device's speed class is dynamically (re)detected in
++ * bfq_update_peak_rate() every time the estimated peak rate is updated.
++ *
++ * In the following definitions, R_slow[0]/R_fast[0] and T_slow[0]/T_fast[0]
++ * are the reference values for a slow/fast rotational device, whereas
++ * R_slow[1]/R_fast[1] and T_slow[1]/T_fast[1] are the reference values for
++ * a slow/fast non-rotational device. Finally, device_speed_thresh are the
++ * thresholds used to switch between speed classes.
++ * Both the reference peak rates and the thresholds are measured in
++ * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
++ */
++static int R_slow[2] = {1536, 10752};
++static int R_fast[2] = {17415, 34791};
++/*
++ * To improve readability, a conversion function is used to initialize the
++ * following arrays, which entails that they can be initialized only in a
++ * function.
++ */
++static int T_slow[2];
++static int T_fast[2];
++static int device_speed_thresh[2];
++
++#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
++ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
++
++#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
++
++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd);
++
++#include "bfq-ioc.c"
++#include "bfq-sched.c"
++#include "bfq-cgroup.c"
++
++#define bfq_class_idle(bfqq) ((bfqq)->entity.ioprio_class ==\
++ IOPRIO_CLASS_IDLE)
++#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\
++ IOPRIO_CLASS_RT)
++
++#define bfq_sample_valid(samples) ((samples) > 80)
++
++/*
++ * We regard a request as SYNC, if either it's a read or has the SYNC bit
++ * set (in which case it could also be a direct WRITE).
++ */
++static inline int bfq_bio_sync(struct bio *bio)
++{
++ if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC))
++ return 1;
++
++ return 0;
++}
++
++/*
++ * Scheduler run of queue, if there are requests pending and no one in the
++ * driver that will restart queueing.
++ */
++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd)
++{
++ if (bfqd->queued != 0) {
++ bfq_log(bfqd, "schedule dispatch");
++ kblockd_schedule_work(bfqd->queue, &bfqd->unplug_work);
++ }
++}
++
++/*
++ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
++ * We choose the request that is closesr to the head right now. Distance
++ * behind the head is penalized and only allowed to a certain extent.
++ */
++static struct request *bfq_choose_req(struct bfq_data *bfqd,
++ struct request *rq1,
++ struct request *rq2,
++ sector_t last)
++{
++ sector_t s1, s2, d1 = 0, d2 = 0;
++ unsigned long back_max;
++#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
++#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
++ unsigned wrap = 0; /* bit mask: requests behind the disk head? */
++
++ if (rq1 == NULL || rq1 == rq2)
++ return rq2;
++ if (rq2 == NULL)
++ return rq1;
++
++ if (rq_is_sync(rq1) && !rq_is_sync(rq2))
++ return rq1;
++ else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
++ return rq2;
++ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
++ return rq1;
++ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
++ return rq2;
++
++ s1 = blk_rq_pos(rq1);
++ s2 = blk_rq_pos(rq2);
++
++ /*
++ * By definition, 1KiB is 2 sectors.
++ */
++ back_max = bfqd->bfq_back_max * 2;
++
++ /*
++ * Strict one way elevator _except_ in the case where we allow
++ * short backward seeks which are biased as twice the cost of a
++ * similar forward seek.
++ */
++ if (s1 >= last)
++ d1 = s1 - last;
++ else if (s1 + back_max >= last)
++ d1 = (last - s1) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ1_WRAP;
++
++ if (s2 >= last)
++ d2 = s2 - last;
++ else if (s2 + back_max >= last)
++ d2 = (last - s2) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ2_WRAP;
++
++ /* Found required data */
++
++ /*
++ * By doing switch() on the bit mask "wrap" we avoid having to
++ * check two variables for all permutations: --> faster!
++ */
++ switch (wrap) {
++ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
++ if (d1 < d2)
++ return rq1;
++ else if (d2 < d1)
++ return rq2;
++ else {
++ if (s1 >= s2)
++ return rq1;
++ else
++ return rq2;
++ }
++
++ case BFQ_RQ2_WRAP:
++ return rq1;
++ case BFQ_RQ1_WRAP:
++ return rq2;
++ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
++ default:
++ /*
++ * Since both rqs are wrapped,
++ * start with the one that's further behind head
++ * (--> only *one* back seek required),
++ * since back seek takes more time than forward.
++ */
++ if (s1 <= s2)
++ return rq1;
++ else
++ return rq2;
++ }
++}
++
++static struct bfq_queue *
++bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
++ sector_t sector, struct rb_node **ret_parent,
++ struct rb_node ***rb_link)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *bfqq = NULL;
++
++ parent = NULL;
++ p = &root->rb_node;
++ while (*p) {
++ struct rb_node **n;
++
++ parent = *p;
++ bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++
++ /*
++ * Sort strictly based on sector. Smallest to the left,
++ * largest to the right.
++ */
++ if (sector > blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_right;
++ else if (sector < blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_left;
++ else
++ break;
++ p = n;
++ bfqq = NULL;
++ }
++
++ *ret_parent = parent;
++ if (rb_link)
++ *rb_link = p;
++
++ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++ (long long unsigned)sector,
++ bfqq != NULL ? bfqq->pid : 0);
++
++ return bfqq;
++}
++
++static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *__bfqq;
++
++ if (bfqq->pos_root != NULL) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++
++ if (bfq_class_idle(bfqq))
++ return;
++ if (!bfqq->next_rq)
++ return;
++
++ bfqq->pos_root = &bfqd->rq_pos_tree;
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
++ blk_rq_pos(bfqq->next_rq), &parent, &p);
++ if (__bfqq == NULL) {
++ rb_link_node(&bfqq->pos_node, parent, p);
++ rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
++ } else
++ bfqq->pos_root = NULL;
++}
++
++/*
++ * Tell whether there are active queues or groups with differentiated weights.
++ */
++static inline bool bfq_differentiated_weights(struct bfq_data *bfqd)
++{
++ BUG_ON(!bfqd->hw_tag);
++ /*
++ * For weights to differ, at least one of the trees must contain
++ * at least two nodes.
++ */
++ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
++ (bfqd->queue_weights_tree.rb_node->rb_left ||
++ bfqd->queue_weights_tree.rb_node->rb_right)
++#ifdef CONFIG_CGROUP_BFQIO
++ ) ||
++ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
++ (bfqd->group_weights_tree.rb_node->rb_left ||
++ bfqd->group_weights_tree.rb_node->rb_right)
++#endif
++ );
++}
++
++/*
++ * If the weight-counter tree passed as input contains no counter for
++ * the weight of the input entity, then add that counter; otherwise just
++ * increment the existing counter.
++ *
++ * Note that weight-counter trees contain few nodes in mostly symmetric
++ * scenarios. For example, if all queues have the same weight, then the
++ * weight-counter tree for the queues may contain at most one node.
++ * This holds even if low_latency is on, because weight-raised queues
++ * are not inserted in the tree.
++ * In most scenarios, the rate at which nodes are created/destroyed
++ * should be low too.
++ */
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ struct rb_node **new = &(root->rb_node), *parent = NULL;
++
++ /*
++ * Do not insert if:
++ * - the device does not support queueing;
++ * - the entity is already associated with a counter, which happens if:
++ * 1) the entity is associated with a queue, 2) a request arrival
++ * has caused the queue to become both non-weight-raised, and hence
++ * change its weight, and backlogged; in this respect, each
++ * of the two events causes an invocation of this function,
++ * 3) this is the invocation of this function caused by the second
++ * event. This second invocation is actually useless, and we handle
++ * this fact by exiting immediately. More efficient or clearer
++ * solutions might possibly be adopted.
++ */
++ if (!bfqd->hw_tag || entity->weight_counter)
++ return;
++
++ while (*new) {
++ struct bfq_weight_counter *__counter = container_of(*new,
++ struct bfq_weight_counter,
++ weights_node);
++ parent = *new;
++
++ if (entity->weight == __counter->weight) {
++ entity->weight_counter = __counter;
++ goto inc_counter;
++ }
++ if (entity->weight < __counter->weight)
++ new = &((*new)->rb_left);
++ else
++ new = &((*new)->rb_right);
++ }
++
++ entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
++ GFP_ATOMIC);
++ entity->weight_counter->weight = entity->weight;
++ rb_link_node(&entity->weight_counter->weights_node, parent, new);
++ rb_insert_color(&entity->weight_counter->weights_node, root);
++
++inc_counter:
++ entity->weight_counter->num_active++;
++}
++
++/*
++ * Decrement the weight counter associated with the entity, and, if the
++ * counter reaches 0, remove the counter from the tree.
++ * See the comments to the function bfq_weights_tree_add() for considerations
++ * about overhead.
++ */
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ /*
++ * Check whether the entity is actually associated with a counter.
++ * In fact, the device may not be considered NCQ-capable for a while,
++ * which implies that no insertion in the weight trees is performed,
++ * after which the device may start to be deemed NCQ-capable, and hence
++ * this function may start to be invoked. This may cause the function
++ * to be invoked for entities that are not associated with any counter.
++ */
++ if (!entity->weight_counter)
++ return;
++
++ BUG_ON(RB_EMPTY_ROOT(root));
++ BUG_ON(entity->weight_counter->weight != entity->weight);
++
++ BUG_ON(!entity->weight_counter->num_active);
++ entity->weight_counter->num_active--;
++ if (entity->weight_counter->num_active > 0)
++ goto reset_entity_pointer;
++
++ rb_erase(&entity->weight_counter->weights_node, root);
++ kfree(entity->weight_counter);
++
++reset_entity_pointer:
++ entity->weight_counter = NULL;
++}
++
++static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct request *last)
++{
++ struct rb_node *rbnext = rb_next(&last->rb_node);
++ struct rb_node *rbprev = rb_prev(&last->rb_node);
++ struct request *next = NULL, *prev = NULL;
++
++ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
++
++ if (rbprev != NULL)
++ prev = rb_entry_rq(rbprev);
++
++ if (rbnext != NULL)
++ next = rb_entry_rq(rbnext);
++ else {
++ rbnext = rb_first(&bfqq->sort_list);
++ if (rbnext && rbnext != &last->rb_node)
++ next = rb_entry_rq(rbnext);
++ }
++
++ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
++}
++
++/* see the definition of bfq_async_charge_factor for details */
++static inline unsigned long bfq_serv_to_charge(struct request *rq,
++ struct bfq_queue *bfqq)
++{
++ return blk_rq_sectors(rq) *
++ (1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->wr_coeff == 1) *
++ bfq_async_charge_factor));
++}
++
++/**
++ * bfq_updated_next_req - update the queue after a new next_rq selection.
++ * @bfqd: the device data the queue belongs to.
++ * @bfqq: the queue to update.
++ *
++ * If the first request of a queue changes we make sure that the queue
++ * has enough budget to serve at least its first request (if the
++ * request has grown). We do this because if the queue has not enough
++ * budget for its first request, it has to go through two dispatch
++ * rounds to actually get it dispatched.
++ */
++static void bfq_updated_next_req(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ struct request *next_rq = bfqq->next_rq;
++ unsigned long new_budget;
++
++ if (next_rq == NULL)
++ return;
++
++ if (bfqq == bfqd->in_service_queue)
++ /*
++ * In order not to break guarantees, budgets cannot be
++ * changed after an entity has been selected.
++ */
++ return;
++
++ BUG_ON(entity->tree != &st->active);
++ BUG_ON(entity == entity->sched_data->in_service_entity);
++
++ new_budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ if (entity->budget != new_budget) {
++ entity->budget = new_budget;
++ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++ new_budget);
++ bfq_activate_bfqq(bfqd, bfqq);
++ }
++}
++
++static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd)
++{
++ u64 dur;
++
++ if (bfqd->bfq_wr_max_time > 0)
++ return bfqd->bfq_wr_max_time;
++
++ dur = bfqd->RT_prod;
++ do_div(dur, bfqd->peak_rate);
++
++ return dur;
++}
++
++static inline unsigned
++bfq_bfqq_cooperations(struct bfq_queue *bfqq)
++{
++ return bfqq->bic ? bfqq->bic->cooperations : 0;
++}
++
++static inline void
++bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++{
++ if (bic->saved_idle_window)
++ bfq_mark_bfqq_idle_window(bfqq);
++ else
++ bfq_clear_bfqq_idle_window(bfqq);
++ if (bic->saved_IO_bound)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ else
++ bfq_clear_bfqq_IO_bound(bfqq);
++ if (bic->wr_time_left && bfqq->bfqd->low_latency &&
++ bic->cooperations < bfqq->bfqd->bfq_coop_thresh) {
++ /*
++ * Start a weight raising period with the duration given by
++ * the raising_time_left snapshot.
++ */
++ if (bfq_bfqq_busy(bfqq))
++ bfqq->bfqd->wr_busy_queues++;
++ bfqq->wr_coeff = bfqq->bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bic->wr_time_left;
++ bfqq->last_wr_start_finish = jiffies;
++ bfqq->entity.ioprio_changed = 1;
++ }
++ /*
++ * Clear wr_time_left to prevent bfq_bfqq_save_state() from
++ * getting confused about the queue's need of a weight-raising
++ * period.
++ */
++ bic->wr_time_left = 0;
++}
++
++/*
++ * Must be called with the queue_lock held.
++ */
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++ int process_refs, io_refs;
++
++ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++ process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
++ BUG_ON(process_refs < 0);
++ return process_refs;
++}
++
++static void bfq_add_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *next_rq, *prev;
++ unsigned long old_wr_coeff = bfqq->wr_coeff;
++ int idle_for_long_time = 0;
++
++ bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
++ bfqq->queued[rq_is_sync(rq)]++;
++ bfqd->queued++;
++
++ elv_rb_add(&bfqq->sort_list, rq);
++
++ /*
++ * Check if this request is a better next-serve candidate.
++ */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
++ BUG_ON(next_rq == NULL);
++ bfqq->next_rq = next_rq;
++
++ /*
++ * Adjust priority tree position, if next_rq changes.
++ */
++ if (prev != bfqq->next_rq)
++ bfq_rq_pos_tree_add(bfqd, bfqq);
++
++ if (!bfq_bfqq_busy(bfqq)) {
++ int soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
++ bfq_bfqq_cooperations(bfqq) < bfqd->bfq_coop_thresh &&
++ time_is_before_jiffies(bfqq->soft_rt_next_start);
++ idle_for_long_time = bfq_bfqq_cooperations(bfqq) <
++ bfqd->bfq_coop_thresh &&
++ time_is_before_jiffies(
++ bfqq->budget_timeout +
++ bfqd->bfq_wr_min_idle_time);
++ entity->budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++
++ if (!bfq_bfqq_IO_bound(bfqq)) {
++ if (time_before(jiffies,
++ RQ_BIC(rq)->ttime.last_end_request +
++ bfqd->bfq_slice_idle)) {
++ bfqq->requests_within_timer++;
++ if (bfqq->requests_within_timer >=
++ bfqd->bfq_requests_within_timer)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ } else
++ bfqq->requests_within_timer = 0;
++ }
++
++ if (!bfqd->low_latency)
++ goto add_bfqq_busy;
++
++ if (bfq_bfqq_just_split(bfqq))
++ goto set_ioprio_changed;
++
++ /*
++ * If the queue:
++ * - is not being boosted,
++ * - has been idle for enough time,
++ * - is not a sync queue or is linked to a bfq_io_cq (it is
++ * shared "for its nature" or it is not shared and its
++ * requests have not been redirected to a shared queue)
++ * start a weight-raising period.
++ */
++ if (old_wr_coeff == 1 && (idle_for_long_time || soft_rt) &&
++ (!bfq_bfqq_sync(bfqq) || bfqq->bic != NULL)) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ if (idle_for_long_time)
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ else
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais starting at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ } else if (old_wr_coeff > 1) {
++ if (idle_for_long_time)
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ else if (bfq_bfqq_cooperations(bfqq) >=
++ bfqd->bfq_coop_thresh ||
++ (bfqq->wr_cur_max_time ==
++ bfqd->bfq_wr_rt_max_time &&
++ !soft_rt)) {
++ bfqq->wr_coeff = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais ending at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->
++ wr_cur_max_time));
++ } else if (time_before(
++ bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time,
++ jiffies +
++ bfqd->bfq_wr_rt_max_time) &&
++ soft_rt) {
++ /*
++ *
++ * The remaining weight-raising time is lower
++ * than bfqd->bfq_wr_rt_max_time, which means
++ * that the application is enjoying weight
++ * raising either because deemed soft-rt in
++ * the near past, or because deemed interactive
++ * a long ago.
++ * In both cases, resetting now the current
++ * remaining weight-raising time for the
++ * application to the weight-raising duration
++ * for soft rt applications would not cause any
++ * latency increase for the application (as the
++ * new duration would be higher than the
++ * remaining time).
++ *
++ * In addition, the application is now meeting
++ * the requirements for being deemed soft rt.
++ * In the end we can correctly and safely
++ * (re)charge the weight-raising duration for
++ * the application with the weight-raising
++ * duration for soft rt applications.
++ *
++ * In particular, doing this recharge now, i.e.,
++ * before the weight-raising period for the
++ * application finishes, reduces the probability
++ * of the following negative scenario:
++ * 1) the weight of a soft rt application is
++ * raised at startup (as for any newly
++ * created application),
++ * 2) since the application is not interactive,
++ * at a certain time weight-raising is
++ * stopped for the application,
++ * 3) at that time the application happens to
++ * still have pending requests, and hence
++ * is destined to not have a chance to be
++ * deemed soft rt before these requests are
++ * completed (see the comments to the
++ * function bfq_bfqq_softrt_next_start()
++ * for details on soft rt detection),
++ * 4) these pending requests experience a high
++ * latency because the application is not
++ * weight-raised while they are pending.
++ */
++ bfqq->last_wr_start_finish = jiffies;
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ }
++ }
++set_ioprio_changed:
++ if (old_wr_coeff != bfqq->wr_coeff)
++ entity->ioprio_changed = 1;
++add_bfqq_busy:
++ bfqq->last_idle_bklogged = jiffies;
++ bfqq->service_from_backlogged = 0;
++ bfq_clear_bfqq_softrt_update(bfqq);
++ bfq_add_bfqq_busy(bfqd, bfqq);
++ } else {
++ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
++ time_is_before_jiffies(
++ bfqq->last_wr_start_finish +
++ bfqd->bfq_wr_min_inter_arr_async)) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++
++ bfqd->wr_busy_queues++;
++ entity->ioprio_changed = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "non-idle wrais starting at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++ if (prev != bfqq->next_rq)
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ if (bfqd->low_latency &&
++ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 ||
++ idle_for_long_time))
++ bfqq->last_wr_start_finish = jiffies;
++}
++
++static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
++ struct bio *bio)
++{
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (bic == NULL)
++ return NULL;
++
++ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++ if (bfqq != NULL)
++ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
++
++ return NULL;
++}
++
++static void bfq_activate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ bfqd->rq_in_driver++;
++ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
++ bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
++ (long long unsigned)bfqd->last_position);
++}
++
++static inline void bfq_deactivate_request(struct request_queue *q,
++ struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ BUG_ON(bfqd->rq_in_driver == 0);
++ bfqd->rq_in_driver--;
++}
++
++static void bfq_remove_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ const int sync = rq_is_sync(rq);
++
++ if (bfqq->next_rq == rq) {
++ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ list_del_init(&rq->queuelist);
++ BUG_ON(bfqq->queued[sync] == 0);
++ bfqq->queued[sync]--;
++ bfqd->queued--;
++ elv_rb_del(&bfqq->sort_list, rq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue)
++ bfq_del_bfqq_busy(bfqd, bfqq, 1);
++ /*
++ * Remove queue from request-position tree as it is empty.
++ */
++ if (bfqq->pos_root != NULL) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++ }
++
++ if (rq->cmd_flags & REQ_META) {
++ BUG_ON(bfqq->meta_pending == 0);
++ bfqq->meta_pending--;
++ }
++}
++
++static int bfq_merge(struct request_queue *q, struct request **req,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct request *__rq;
++
++ __rq = bfq_find_rq_fmerge(bfqd, bio);
++ if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) {
++ *req = __rq;
++ return ELEVATOR_FRONT_MERGE;
++ }
++
++ return ELEVATOR_NO_MERGE;
++}
++
++static void bfq_merged_request(struct request_queue *q, struct request *req,
++ int type)
++{
++ if (type == ELEVATOR_FRONT_MERGE &&
++ rb_prev(&req->rb_node) &&
++ blk_rq_pos(req) <
++ blk_rq_pos(container_of(rb_prev(&req->rb_node),
++ struct request, rb_node))) {
++ struct bfq_queue *bfqq = RQ_BFQQ(req);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *prev, *next_rq;
++
++ /* Reposition request in its sort_list */
++ elv_rb_del(&bfqq->sort_list, req);
++ elv_rb_add(&bfqq->sort_list, req);
++ /* Choose next request to be served for bfqq */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
++ bfqd->last_position);
++ BUG_ON(next_rq == NULL);
++ bfqq->next_rq = next_rq;
++ /*
++ * If next_rq changes, update both the queue's budget to
++ * fit the new request and the queue's position in its
++ * rq_pos_tree.
++ */
++ if (prev != bfqq->next_rq) {
++ bfq_updated_next_req(bfqd, bfqq);
++ bfq_rq_pos_tree_add(bfqd, bfqq);
++ }
++ }
++}
++
++static void bfq_merged_requests(struct request_queue *q, struct request *rq,
++ struct request *next)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ /*
++ * Reposition in fifo if next is older than rq.
++ */
++ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
++ time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
++ list_move(&rq->queuelist, &next->queuelist);
++ rq_set_fifo_time(rq, rq_fifo_time(next));
++ }
++
++ if (bfqq->next_rq == next)
++ bfqq->next_rq = rq;
++
++ bfq_remove_request(next);
++}
++
++/* Must be called with bfqq != NULL */
++static inline void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
++{
++ BUG_ON(bfqq == NULL);
++ if (bfq_bfqq_busy(bfqq))
++ bfqq->bfqd->wr_busy_queues--;
++ bfqq->wr_coeff = 1;
++ bfqq->wr_cur_max_time = 0;
++ /* Trigger a weight change on the next activation of the queue */
++ bfqq->entity.ioprio_changed = 1;
++}
++
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ if (bfqg->async_bfqq[i][j] != NULL)
++ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
++ if (bfqg->async_idle_bfqq != NULL)
++ bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
++}
++
++static void bfq_end_wr(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ bfq_end_wr_async(bfqd);
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++}
++
++static inline sector_t bfq_io_struct_pos(void *io_struct, bool request)
++{
++ if (request)
++ return blk_rq_pos(io_struct);
++ else
++ return ((struct bio *)io_struct)->bi_iter.bi_sector;
++}
++
++static inline sector_t bfq_dist_from(sector_t pos1,
++ sector_t pos2)
++{
++ if (pos1 >= pos2)
++ return pos1 - pos2;
++ else
++ return pos2 - pos1;
++}
++
++static inline int bfq_rq_close_to_sector(void *io_struct, bool request,
++ sector_t sector)
++{
++ return bfq_dist_from(bfq_io_struct_pos(io_struct, request), sector) <=
++ BFQQ_SEEK_THR;
++}
++
++static struct bfq_queue *bfqq_close(struct bfq_data *bfqd, sector_t sector)
++{
++ struct rb_root *root = &bfqd->rq_pos_tree;
++ struct rb_node *parent, *node;
++ struct bfq_queue *__bfqq;
++
++ if (RB_EMPTY_ROOT(root))
++ return NULL;
++
++ /*
++ * First, if we find a request starting at the end of the last
++ * request, choose it.
++ */
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
++ if (__bfqq != NULL)
++ return __bfqq;
++
++ /*
++ * If the exact sector wasn't found, the parent of the NULL leaf
++ * will contain the closest sector (rq_pos_tree sorted by
++ * next_request position).
++ */
++ __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ if (blk_rq_pos(__bfqq->next_rq) < sector)
++ node = rb_next(&__bfqq->pos_node);
++ else
++ node = rb_prev(&__bfqq->pos_node);
++ if (node == NULL)
++ return NULL;
++
++ __bfqq = rb_entry(node, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ return NULL;
++}
++
++/*
++ * bfqd - obvious
++ * cur_bfqq - passed in so that we don't decide that the current queue
++ * is closely cooperating with itself
++ * sector - used as a reference point to search for a close queue
++ */
++static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
++ struct bfq_queue *cur_bfqq,
++ sector_t sector)
++{
++ struct bfq_queue *bfqq;
++
++ if (bfq_class_idle(cur_bfqq))
++ return NULL;
++ if (!bfq_bfqq_sync(cur_bfqq))
++ return NULL;
++ if (BFQQ_SEEKY(cur_bfqq))
++ return NULL;
++
++ /* If device has only one backlogged bfq_queue, don't search. */
++ if (bfqd->busy_queues == 1)
++ return NULL;
++
++ /*
++ * We should notice if some of the queues are cooperating, e.g.
++ * working closely on the same area of the disk. In that case,
++ * we can group them together and don't waste time idling.
++ */
++ bfqq = bfqq_close(bfqd, sector);
++ if (bfqq == NULL || bfqq == cur_bfqq)
++ return NULL;
++
++ /*
++ * Do not merge queues from different bfq_groups.
++ */
++ if (bfqq->entity.parent != cur_bfqq->entity.parent)
++ return NULL;
++
++ /*
++ * It only makes sense to merge sync queues.
++ */
++ if (!bfq_bfqq_sync(bfqq))
++ return NULL;
++ if (BFQQ_SEEKY(bfqq))
++ return NULL;
++
++ /*
++ * Do not merge queues of different priority classes.
++ */
++ if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq))
++ return NULL;
++
++ return bfqq;
++}
++
++static struct bfq_queue *
++bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ int process_refs, new_process_refs;
++ struct bfq_queue *__bfqq;
++
++ /*
++ * If there are no process references on the new_bfqq, then it is
++ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++ * may have dropped their last reference (not just their last process
++ * reference).
++ */
++ if (!bfqq_process_refs(new_bfqq))
++ return NULL;
++
++ /* Avoid a circular list and skip interim queue merges. */
++ while ((__bfqq = new_bfqq->new_bfqq)) {
++ if (__bfqq == bfqq)
++ return NULL;
++ new_bfqq = __bfqq;
++ }
++
++ process_refs = bfqq_process_refs(bfqq);
++ new_process_refs = bfqq_process_refs(new_bfqq);
++ /*
++ * If the process for the bfqq has gone away, there is no
++ * sense in merging the queues.
++ */
++ if (process_refs == 0 || new_process_refs == 0)
++ return NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++ new_bfqq->pid);
++
++ /*
++ * Merging is just a redirection: the requests of the process
++ * owning one of the two queues are redirected to the other queue.
++ * The latter queue, in its turn, is set as shared if this is the
++ * first time that the requests of some process are redirected to
++ * it.
++ *
++ * We redirect bfqq to new_bfqq and not the opposite, because we
++ * are in the context of the process owning bfqq, hence we have
++ * the io_cq of this process. So we can immediately configure this
++ * io_cq to redirect the requests of the process to new_bfqq.
++ *
++ * NOTE, even if new_bfqq coincides with the in-service queue, the
++ * io_cq of new_bfqq is not available, because, if the in-service
++ * queue is shared, bfqd->in_service_bic may not point to the
++ * io_cq of the in-service queue.
++ * Redirecting the requests of the process owning bfqq to the
++ * currently in-service queue is in any case the best option, as
++ * we feed the in-service queue with new requests close to the
++ * last request served and, by doing so, hopefully increase the
++ * throughput.
++ */
++ bfqq->new_bfqq = new_bfqq;
++ atomic_add(process_refs, &new_bfqq->ref);
++ return new_bfqq;
++}
++
++/*
++ * Attempt to schedule a merge of bfqq with the currently in-service queue
++ * or with a close queue among the scheduled queues.
++ * Return NULL if no merge was scheduled, a pointer to the shared bfq_queue
++ * structure otherwise.
++ */
++static struct bfq_queue *
++bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ void *io_struct, bool request)
++{
++ struct bfq_queue *in_service_bfqq, *new_bfqq;
++
++ if (bfqq->new_bfqq)
++ return bfqq->new_bfqq;
++
++ if (!io_struct)
++ return NULL;
++
++ in_service_bfqq = bfqd->in_service_queue;
++
++ if (in_service_bfqq == NULL || in_service_bfqq == bfqq ||
++ !bfqd->in_service_bic)
++ goto check_scheduled;
++
++ if (bfq_class_idle(in_service_bfqq) || bfq_class_idle(bfqq))
++ goto check_scheduled;
++
++ if (bfq_class_rt(in_service_bfqq) != bfq_class_rt(bfqq))
++ goto check_scheduled;
++
++ if (in_service_bfqq->entity.parent != bfqq->entity.parent)
++ goto check_scheduled;
++
++ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++ bfq_bfqq_sync(in_service_bfqq) && bfq_bfqq_sync(bfqq)) {
++ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
++ if (new_bfqq != NULL)
++ return new_bfqq; /* Merge with in-service queue */
++ }
++
++ /*
++ * Check whether there is a cooperator among currently scheduled
++ * queues. The only thing we need is that the bio/request is not
++ * NULL, as we need it to establish whether a cooperator exists.
++ */
++check_scheduled:
++ new_bfqq = bfq_close_cooperator(bfqd, bfqq,
++ bfq_io_struct_pos(io_struct, request));
++ if (new_bfqq)
++ return bfq_setup_merge(bfqq, new_bfqq);
++
++ return NULL;
++}
++
++static inline void
++bfq_bfqq_save_state(struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq->bic == NULL, the queue is already shared or its requests
++ * have already been redirected to a shared queue; both idle window
++ * and weight raising state have already been saved. Do nothing.
++ */
++ if (bfqq->bic == NULL)
++ return;
++ if (bfqq->bic->wr_time_left)
++ /*
++ * This is the queue of a just-started process, and would
++ * deserve weight raising: we set wr_time_left to the full
++ * weight-raising duration to trigger weight-raising when
++ * and if the queue is split and the first request of the
++ * queue is enqueued.
++ */
++ bfqq->bic->wr_time_left = bfq_wr_duration(bfqq->bfqd);
++ else if (bfqq->wr_coeff > 1) {
++ unsigned long wr_duration =
++ jiffies - bfqq->last_wr_start_finish;
++ /*
++ * It may happen that a queue's weight raising period lasts
++ * longer than its wr_cur_max_time, as weight raising is
++ * handled only when a request is enqueued or dispatched (it
++ * does not use any timer). If the weight raising period is
++ * about to end, don't save it.
++ */
++ if (bfqq->wr_cur_max_time <= wr_duration)
++ bfqq->bic->wr_time_left = 0;
++ else
++ bfqq->bic->wr_time_left =
++ bfqq->wr_cur_max_time - wr_duration;
++ /*
++ * The bfq_queue is becoming shared or the requests of the
++ * process owning the queue are being redirected to a shared
++ * queue. Stop the weight raising period of the queue, as in
++ * both cases it should not be owned by an interactive or
++ * soft real-time application.
++ */
++ bfq_bfqq_end_wr(bfqq);
++ } else
++ bfqq->bic->wr_time_left = 0;
++ bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
++ bfqq->bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
++ bfqq->bic->cooperations++;
++ bfqq->bic->failed_cooperations = 0;
++}
++
++static inline void
++bfq_get_bic_reference(struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq->bic has a non-NULL value, the bic to which it belongs
++ * is about to begin using a shared bfq_queue.
++ */
++ if (bfqq->bic)
++ atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
++}
++
++static void
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++ (long unsigned)new_bfqq->pid);
++ /* Save weight raising and idle window of the merged queues */
++ bfq_bfqq_save_state(bfqq);
++ bfq_bfqq_save_state(new_bfqq);
++ if (bfq_bfqq_IO_bound(bfqq))
++ bfq_mark_bfqq_IO_bound(new_bfqq);
++ bfq_clear_bfqq_IO_bound(bfqq);
++ /*
++ * Grab a reference to the bic, to prevent it from being destroyed
++ * before being possibly touched by a bfq_split_bfqq().
++ */
++ bfq_get_bic_reference(bfqq);
++ bfq_get_bic_reference(new_bfqq);
++ /*
++ * Merge queues (that is, let bic redirect its requests to new_bfqq)
++ */
++ bic_set_bfqq(bic, new_bfqq, 1);
++ bfq_mark_bfqq_coop(new_bfqq);
++ /*
++ * new_bfqq now belongs to at least two bics (it is a shared queue):
++ * set new_bfqq->bic to NULL. bfqq either:
++ * - does not belong to any bic any more, and hence bfqq->bic must
++ * be set to NULL, or
++ * - is a queue whose owning bics have already been redirected to a
++ * different queue, hence the queue is destined to not belong to
++ * any bic soon and bfqq->bic is already NULL (therefore the next
++ * assignment causes no harm).
++ */
++ new_bfqq->bic = NULL;
++ bfqq->bic = NULL;
++ bfq_put_queue(bfqq);
++}
++
++static inline void bfq_bfqq_increase_failed_cooperations(struct bfq_queue *bfqq)
++{
++ struct bfq_io_cq *bic = bfqq->bic;
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ if (bic && bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh) {
++ bic->failed_cooperations++;
++ if (bic->failed_cooperations >= bfqd->bfq_failed_cooperations)
++ bic->cooperations = 0;
++ }
++}
++
++static int bfq_allow_merge(struct request_queue *q, struct request *rq,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq, *new_bfqq;
++
++ /*
++ * Disallow merge of a sync bio into an async request.
++ */
++ if (bfq_bio_sync(bio) && !rq_is_sync(rq))
++ return 0;
++
++ /*
++ * Lookup the bfqq that this bio will be queued with. Allow
++ * merge only if rq is queued there.
++ * Queue lock is held here.
++ */
++ bic = bfq_bic_lookup(bfqd, current->io_context);
++ if (bic == NULL)
++ return 0;
++
++ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++ /*
++ * We take advantage of this function to perform an early merge
++ * of the queues of possible cooperating processes.
++ */
++ if (bfqq != NULL) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++ if (new_bfqq != NULL) {
++ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
++ /*
++ * If we get here, the bio will be queued in the
++ * shared queue, i.e., new_bfqq, so use new_bfqq
++ * to decide whether bio and rq can be merged.
++ */
++ bfqq = new_bfqq;
++ } else
++ bfq_bfqq_increase_failed_cooperations(bfqq);
++ }
++
++ return bfqq == RQ_BFQQ(rq);
++}
++
++static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ if (bfqq != NULL) {
++ bfq_mark_bfqq_must_alloc(bfqq);
++ bfq_mark_bfqq_budget_new(bfqq);
++ bfq_clear_bfqq_fifo_expire(bfqq);
++
++ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_in_service_queue, cur-budget = %lu",
++ bfqq->entity.budget);
++ }
++
++ bfqd->in_service_queue = bfqq;
++}
++
++/*
++ * Get and set a new queue for service.
++ */
++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
++
++ __bfq_set_in_service_queue(bfqd, bfqq);
++ return bfqq;
++}
++
++/*
++ * If enough samples have been computed, return the current max budget
++ * stored in bfqd, which is dynamically updated according to the
++ * estimated disk peak rate; otherwise return the default max budget
++ */
++static inline unsigned long bfq_max_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < 194)
++ return bfq_default_max_budget;
++ else
++ return bfqd->bfq_max_budget;
++}
++
++/*
++ * Return min budget, which is a fraction of the current or default
++ * max budget (trying with 1/32)
++ */
++static inline unsigned long bfq_min_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < 194)
++ return bfq_default_max_budget / 32;
++ else
++ return bfqd->bfq_max_budget / 32;
++}
++
++static void bfq_arm_slice_timer(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfqd->in_service_queue;
++ struct bfq_io_cq *bic;
++ unsigned long sl;
++
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ /* Processes have exited, don't wait. */
++ bic = bfqd->in_service_bic;
++ if (bic == NULL || atomic_read(&bic->icq.ioc->active_ref) == 0)
++ return;
++
++ bfq_mark_bfqq_wait_request(bfqq);
++
++ /*
++ * We don't want to idle for seeks, but we do want to allow
++ * fair distribution of slice time for a process doing back-to-back
++ * seeks. So allow a little bit of time for him to submit a new rq.
++ *
++ * To prevent processes with (partly) seeky workloads from
++ * being too ill-treated, grant them a small fraction of the
++ * assigned budget before reducing the waiting time to
++ * BFQ_MIN_TT. This happened to help reduce latency.
++ */
++ sl = bfqd->bfq_slice_idle;
++ /*
++ * Unless the queue is being weight-raised, grant only minimum idle
++ * time if the queue either has been seeky for long enough or has
++ * already proved to be constantly seeky.
++ */
++ if (bfq_sample_valid(bfqq->seek_samples) &&
++ ((BFQQ_SEEKY(bfqq) && bfqq->entity.service >
++ bfq_max_budget(bfqq->bfqd) / 8) ||
++ bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1)
++ sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
++ else if (bfqq->wr_coeff > 1)
++ sl = sl * 3;
++ bfqd->last_idling_start = ktime_get();
++ mod_timer(&bfqd->idle_slice_timer, jiffies + sl);
++ bfq_log(bfqd, "arm idle: %u/%u ms",
++ jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle));
++}
++
++/*
++ * Set the maximum time for the in-service queue to consume its
++ * budget. This prevents seeky processes from lowering the disk
++ * throughput (always guaranteed with a time slice scheme as in CFQ).
++ */
++static void bfq_set_budget_timeout(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfqd->in_service_queue;
++ unsigned int timeout_coeff;
++ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
++ timeout_coeff = 1;
++ else
++ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
++
++ bfqd->last_budget_start = ktime_get();
++
++ bfq_clear_bfqq_budget_new(bfqq);
++ bfqq->budget_timeout = jiffies +
++ bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff;
++
++ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++ jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] *
++ timeout_coeff));
++}
++
++/*
++ * Move request from internal lists to the request queue dispatch list.
++ */
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ /*
++ * For consistency, the next instruction should have been executed
++ * after removing the request from the queue and dispatching it.
++ * We execute instead this instruction before bfq_remove_request()
++ * (and hence introduce a temporary inconsistency), for efficiency.
++ * In fact, in a forced_dispatch, this prevents two counters related
++ * to bfqq->dispatched to risk to be uselessly decremented if bfqq
++ * is not in service, and then to be incremented again after
++ * incrementing bfqq->dispatched.
++ */
++ bfqq->dispatched++;
++ bfq_remove_request(rq);
++ elv_dispatch_sort(q, rq);
++
++ if (bfq_bfqq_sync(bfqq))
++ bfqd->sync_flight++;
++}
++
++/*
++ * Return expired entry, or NULL to just start from scratch in rbtree.
++ */
++static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
++{
++ struct request *rq = NULL;
++
++ if (bfq_bfqq_fifo_expire(bfqq))
++ return NULL;
++
++ bfq_mark_bfqq_fifo_expire(bfqq);
++
++ if (list_empty(&bfqq->fifo))
++ return NULL;
++
++ rq = rq_entry_fifo(bfqq->fifo.next);
++
++ if (time_before(jiffies, rq_fifo_time(rq)))
++ return NULL;
++
++ return rq;
++}
++
++static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ return entity->budget - entity->service;
++}
++
++static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ __bfq_bfqd_reset_in_service(bfqd);
++
++ /*
++ * If this bfqq is shared between multiple processes, check
++ * to make sure that those processes are still issuing I/Os
++ * within the mean seek distance. If not, it may be time to
++ * break the queues apart again.
++ */
++ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
++ bfq_mark_bfqq_split_coop(bfqq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ /*
++ * Overloading budget_timeout field to store the time
++ * at which the queue remains with no backlog; used by
++ * the weight-raising mechanism.
++ */
++ bfqq->budget_timeout = jiffies;
++ bfq_del_bfqq_busy(bfqd, bfqq, 1);
++ } else {
++ bfq_activate_bfqq(bfqd, bfqq);
++ /*
++ * Resort priority tree of potential close cooperators.
++ */
++ bfq_rq_pos_tree_add(bfqd, bfqq);
++ }
++}
++
++/**
++ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
++ * @bfqd: device data.
++ * @bfqq: queue to update.
++ * @reason: reason for expiration.
++ *
++ * Handle the feedback on @bfqq budget. See the body for detailed
++ * comments.
++ */
++static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ enum bfqq_expiration reason)
++{
++ struct request *next_rq;
++ unsigned long budget, min_budget;
++
++ budget = bfqq->max_budget;
++ min_budget = bfq_min_budget(bfqd);
++
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu",
++ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu",
++ budget, bfq_min_budget(bfqd));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
++
++ if (bfq_bfqq_sync(bfqq)) {
++ switch (reason) {
++ /*
++ * Caveat: in all the following cases we trade latency
++ * for throughput.
++ */
++ case BFQ_BFQQ_TOO_IDLE:
++ /*
++ * This is the only case where we may reduce
++ * the budget: if there is no request of the
++ * process still waiting for completion, then
++ * we assume (tentatively) that the timer has
++ * expired because the batch of requests of
++ * the process could have been served with a
++ * smaller budget. Hence, betting that
++ * process will behave in the same way when it
++ * becomes backlogged again, we reduce its
++ * next budget. As long as we guess right,
++ * this budget cut reduces the latency
++ * experienced by the process.
++ *
++ * However, if there are still outstanding
++ * requests, then the process may have not yet
++ * issued its next request just because it is
++ * still waiting for the completion of some of
++ * the still outstanding ones. So in this
++ * subcase we do not reduce its budget, on the
++ * contrary we increase it to possibly boost
++ * the throughput, as discussed in the
++ * comments to the BUDGET_TIMEOUT case.
++ */
++ if (bfqq->dispatched > 0) /* still outstanding reqs */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ else {
++ if (budget > 5 * min_budget)
++ budget -= 4 * min_budget;
++ else
++ budget = min_budget;
++ }
++ break;
++ case BFQ_BFQQ_BUDGET_TIMEOUT:
++ /*
++ * We double the budget here because: 1) it
++ * gives the chance to boost the throughput if
++ * this is not a seeky process (which may have
++ * bumped into this timeout because of, e.g.,
++ * ZBR), 2) together with charge_full_budget
++ * it helps give seeky processes higher
++ * timestamps, and hence be served less
++ * frequently.
++ */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_BUDGET_EXHAUSTED:
++ /*
++ * The process still has backlog, and did not
++ * let either the budget timeout or the disk
++ * idling timeout expire. Hence it is not
++ * seeky, has a short thinktime and may be
++ * happy with a higher budget too. So
++ * definitely increase the budget of this good
++ * candidate to boost the disk throughput.
++ */
++ budget = min(budget * 4, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_NO_MORE_REQUESTS:
++ /*
++ * Leave the budget unchanged.
++ */
++ default:
++ return;
++ }
++ } else /* async queue */
++ /* async queues get always the maximum possible budget
++ * (their ability to dispatch is limited by
++ * @bfqd->bfq_max_budget_async_rq).
++ */
++ budget = bfqd->bfq_max_budget;
++
++ bfqq->max_budget = budget;
++
++ if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 &&
++ bfqq->max_budget > bfqd->bfq_max_budget)
++ bfqq->max_budget = bfqd->bfq_max_budget;
++
++ /*
++ * Make sure that we have enough budget for the next request.
++ * Since the finish time of the bfqq must be kept in sync with
++ * the budget, be sure to call __bfq_bfqq_expire() after the
++ * update.
++ */
++ next_rq = bfqq->next_rq;
++ if (next_rq != NULL)
++ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ else
++ bfqq->entity.budget = bfqq->max_budget;
++
++ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu",
++ next_rq != NULL ? blk_rq_sectors(next_rq) : 0,
++ bfqq->entity.budget);
++}
++
++static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout)
++{
++ unsigned long max_budget;
++
++ /*
++ * The max_budget calculated when autotuning is equal to the
++ * amount of sectors transfered in timeout_sync at the
++ * estimated peak rate.
++ */
++ max_budget = (unsigned long)(peak_rate * 1000 *
++ timeout >> BFQ_RATE_SHIFT);
++
++ return max_budget;
++}
++
++/*
++ * In addition to updating the peak rate, checks whether the process
++ * is "slow", and returns 1 if so. This slow flag is used, in addition
++ * to the budget timeout, to reduce the amount of service provided to
++ * seeky processes, and hence reduce their chances to lower the
++ * throughput. See the code for more details.
++ */
++static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ int compensate, enum bfqq_expiration reason)
++{
++ u64 bw, usecs, expected, timeout;
++ ktime_t delta;
++ int update = 0;
++
++ if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq))
++ return 0;
++
++ if (compensate)
++ delta = bfqd->last_idling_start;
++ else
++ delta = ktime_get();
++ delta = ktime_sub(delta, bfqd->last_budget_start);
++ usecs = ktime_to_us(delta);
++
++ /* Don't trust short/unrealistic values. */
++ if (usecs < 100 || usecs >= LONG_MAX)
++ return 0;
++
++ /*
++ * Calculate the bandwidth for the last slice. We use a 64 bit
++ * value to store the peak rate, in sectors per usec in fixed
++ * point math. We do so to have enough precision in the estimate
++ * and to avoid overflows.
++ */
++ bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
++ do_div(bw, (unsigned long)usecs);
++
++ timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
++
++ /*
++ * Use only long (> 20ms) intervals to filter out spikes for
++ * the peak rate estimation.
++ */
++ if (usecs > 20000) {
++ if (bw > bfqd->peak_rate ||
++ (!BFQQ_SEEKY(bfqq) &&
++ reason == BFQ_BFQQ_BUDGET_TIMEOUT)) {
++ bfq_log(bfqd, "measured bw =%llu", bw);
++ /*
++ * To smooth oscillations use a low-pass filter with
++ * alpha=7/8, i.e.,
++ * new_rate = (7/8) * old_rate + (1/8) * bw
++ */
++ do_div(bw, 8);
++ if (bw == 0)
++ return 0;
++ bfqd->peak_rate *= 7;
++ do_div(bfqd->peak_rate, 8);
++ bfqd->peak_rate += bw;
++ update = 1;
++ bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate);
++ }
++
++ update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
++
++ if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
++ bfqd->peak_rate_samples++;
++
++ if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
++ update) {
++ int dev_type = blk_queue_nonrot(bfqd->queue);
++ if (bfqd->bfq_user_max_budget == 0) {
++ bfqd->bfq_max_budget =
++ bfq_calc_max_budget(bfqd->peak_rate,
++ timeout);
++ bfq_log(bfqd, "new max_budget=%lu",
++ bfqd->bfq_max_budget);
++ }
++ if (bfqd->device_speed == BFQ_BFQD_FAST &&
++ bfqd->peak_rate < device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_SLOW;
++ bfqd->RT_prod = R_slow[dev_type] *
++ T_slow[dev_type];
++ } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
++ bfqd->peak_rate > device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_FAST;
++ bfqd->RT_prod = R_fast[dev_type] *
++ T_fast[dev_type];
++ }
++ }
++ }
++
++ /*
++ * If the process has been served for a too short time
++ * interval to let its possible sequential accesses prevail on
++ * the initial seek time needed to move the disk head on the
++ * first sector it requested, then give the process a chance
++ * and for the moment return false.
++ */
++ if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8)
++ return 0;
++
++ /*
++ * A process is considered ``slow'' (i.e., seeky, so that we
++ * cannot treat it fairly in the service domain, as it would
++ * slow down too much the other processes) if, when a slice
++ * ends for whatever reason, it has received service at a
++ * rate that would not be high enough to complete the budget
++ * before the budget timeout expiration.
++ */
++ expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
++
++ /*
++ * Caveat: processes doing IO in the slower disk zones will
++ * tend to be slow(er) even if not seeky. And the estimated
++ * peak rate will actually be an average over the disk
++ * surface. Hence, to not be too harsh with unlucky processes,
++ * we keep a budget/3 margin of safety before declaring a
++ * process slow.
++ */
++ return expected > (4 * bfqq->entity.budget) / 3;
++}
++
++/*
++ * To be deemed as soft real-time, an application must meet two
++ * requirements. First, the application must not require an average
++ * bandwidth higher than the approximate bandwidth required to playback or
++ * record a compressed high-definition video.
++ * The next function is invoked on the completion of the last request of a
++ * batch, to compute the next-start time instant, soft_rt_next_start, such
++ * that, if the next request of the application does not arrive before
++ * soft_rt_next_start, then the above requirement on the bandwidth is met.
++ *
++ * The second requirement is that the request pattern of the application is
++ * isochronous, i.e., that, after issuing a request or a batch of requests,
++ * the application stops issuing new requests until all its pending requests
++ * have been completed. After that, the application may issue a new batch,
++ * and so on.
++ * For this reason the next function is invoked to compute
++ * soft_rt_next_start only for applications that meet this requirement,
++ * whereas soft_rt_next_start is set to infinity for applications that do
++ * not.
++ *
++ * Unfortunately, even a greedy application may happen to behave in an
++ * isochronous way if the CPU load is high. In fact, the application may
++ * stop issuing requests while the CPUs are busy serving other processes,
++ * then restart, then stop again for a while, and so on. In addition, if
++ * the disk achieves a low enough throughput with the request pattern
++ * issued by the application (e.g., because the request pattern is random
++ * and/or the device is slow), then the application may meet the above
++ * bandwidth requirement too. To prevent such a greedy application to be
++ * deemed as soft real-time, a further rule is used in the computation of
++ * soft_rt_next_start: soft_rt_next_start must be higher than the current
++ * time plus the maximum time for which the arrival of a request is waited
++ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
++ * This filters out greedy applications, as the latter issue instead their
++ * next request as soon as possible after the last one has been completed
++ * (in contrast, when a batch of requests is completed, a soft real-time
++ * application spends some time processing data).
++ *
++ * Unfortunately, the last filter may easily generate false positives if
++ * only bfqd->bfq_slice_idle is used as a reference time interval and one
++ * or both the following cases occur:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or higher
++ * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
++ * HZ=100.
++ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
++ * for a while, then suddenly 'jump' by several units to recover the lost
++ * increments. This seems to happen, e.g., inside virtual machines.
++ * To address this issue, we do not use as a reference time interval just
++ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
++ * particular we add the minimum number of jiffies for which the filter
++ * seems to be quite precise also in embedded systems and KVM/QEMU virtual
++ * machines.
++ */
++static inline unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ return max(bfqq->last_idle_bklogged +
++ HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies + bfqq->bfqd->bfq_slice_idle + 4);
++}
++
++/*
++ * Return the largest-possible time instant such that, for as long as possible,
++ * the current time will be lower than this time instant according to the macro
++ * time_is_before_jiffies().
++ */
++static inline unsigned long bfq_infinity_from_now(unsigned long now)
++{
++ return now + ULONG_MAX / 2;
++}
++
++/**
++ * bfq_bfqq_expire - expire a queue.
++ * @bfqd: device owning the queue.
++ * @bfqq: the queue to expire.
++ * @compensate: if true, compensate for the time spent idling.
++ * @reason: the reason causing the expiration.
++ *
++ *
++ * If the process associated to the queue is slow (i.e., seeky), or in
++ * case of budget timeout, or, finally, if it is async, we
++ * artificially charge it an entire budget (independently of the
++ * actual service it received). As a consequence, the queue will get
++ * higher timestamps than the correct ones upon reactivation, and
++ * hence it will be rescheduled as if it had received more service
++ * than what it actually received. In the end, this class of processes
++ * will receive less service in proportion to how slowly they consume
++ * their budgets (and hence how seriously they tend to lower the
++ * throughput).
++ *
++ * In contrast, when a queue expires because it has been idling for
++ * too much or because it exhausted its budget, we do not touch the
++ * amount of service it has received. Hence when the queue will be
++ * reactivated and its timestamps updated, the latter will be in sync
++ * with the actual service received by the queue until expiration.
++ *
++ * Charging a full budget to the first type of queues and the exact
++ * service to the others has the effect of using the WF2Q+ policy to
++ * schedule the former on a timeslice basis, without violating the
++ * service domain guarantees of the latter.
++ */
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ int compensate,
++ enum bfqq_expiration reason)
++{
++ int slow;
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ /* Update disk peak rate for autotuning and check whether the
++ * process is slow (see bfq_update_peak_rate).
++ */
++ slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason);
++
++ /*
++ * As above explained, 'punish' slow (i.e., seeky), timed-out
++ * and async queues, to favor sequential sync workloads.
++ *
++ * Processes doing I/O in the slower disk zones will tend to be
++ * slow(er) even if not seeky. Hence, since the estimated peak
++ * rate is actually an average over the disk surface, these
++ * processes may timeout just for bad luck. To avoid punishing
++ * them we do not charge a full budget to a process that
++ * succeeded in consuming at least 2/3 of its budget.
++ */
++ if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3))
++ bfq_bfqq_charge_full_budget(bfqq);
++
++ bfqq->service_from_backlogged += bfqq->entity.service;
++
++ if (BFQQ_SEEKY(bfqq) && reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++ !bfq_bfqq_constantly_seeky(bfqq)) {
++ bfq_mark_bfqq_constantly_seeky(bfqq);
++ if (!blk_queue_nonrot(bfqd->queue))
++ bfqd->const_seeky_busy_in_flight_queues++;
++ }
++
++ if (reason == BFQ_BFQQ_TOO_IDLE &&
++ bfqq->entity.service <= 2 * bfqq->entity.budget / 10 )
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ if (bfqd->low_latency && bfqq->wr_coeff == 1)
++ bfqq->last_wr_start_finish = jiffies;
++
++ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ /*
++ * If we get here, and there are no outstanding requests,
++ * then the request pattern is isochronous (see the comments
++ * to the function bfq_bfqq_softrt_next_start()). Hence we
++ * can compute soft_rt_next_start. If, instead, the queue
++ * still has outstanding requests, then we have to wait
++ * for the completion of all the outstanding requests to
++ * discover whether the request pattern is actually
++ * isochronous.
++ */
++ if (bfqq->dispatched == 0)
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++ else {
++ /*
++ * The application is still waiting for the
++ * completion of one or more requests:
++ * prevent it from possibly being incorrectly
++ * deemed as soft real-time by setting its
++ * soft_rt_next_start to infinity. In fact,
++ * without this assignment, the application
++ * would be incorrectly deemed as soft
++ * real-time if:
++ * 1) it issued a new request before the
++ * completion of all its in-flight
++ * requests, and
++ * 2) at that time, its soft_rt_next_start
++ * happened to be in the past.
++ */
++ bfqq->soft_rt_next_start =
++ bfq_infinity_from_now(jiffies);
++ /*
++ * Schedule an update of soft_rt_next_start to when
++ * the task may be discovered to be isochronous.
++ */
++ bfq_mark_bfqq_softrt_update(bfqq);
++ }
++ }
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
++ slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
++
++ /*
++ * Increase, decrease or leave budget unchanged according to
++ * reason.
++ */
++ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
++ __bfq_bfqq_expire(bfqd, bfqq);
++}
++
++/*
++ * Budget timeout is not implemented through a dedicated timer, but
++ * just checked on request arrivals and completions, as well as on
++ * idle timer expirations.
++ */
++static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_budget_new(bfqq) ||
++ time_before(jiffies, bfqq->budget_timeout))
++ return 0;
++ return 1;
++}
++
++/*
++ * If we expire a queue that is waiting for the arrival of a new
++ * request, we may prevent the fictitious timestamp back-shifting that
++ * allows the guarantees of the queue to be preserved (see [1] for
++ * this tricky aspect). Hence we return true only if this condition
++ * does not hold, or if the queue is slow enough to deserve only to be
++ * kicked off for preserving a high throughput.
++*/
++static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "may_budget_timeout: wait_request %d left %d timeout %d",
++ bfq_bfqq_wait_request(bfqq),
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
++ bfq_bfqq_budget_timeout(bfqq));
++
++ return (!bfq_bfqq_wait_request(bfqq) ||
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
++ &&
++ bfq_bfqq_budget_timeout(bfqq);
++}
++
++/*
++ * Device idling is allowed only for the queues for which this function
++ * returns true. For this reason, the return value of this function plays a
++ * critical role for both throughput boosting and service guarantees. The
++ * return value is computed through a logical expression. In this rather
++ * long comment, we try to briefly describe all the details and motivations
++ * behind the components of this logical expression.
++ *
++ * First, the expression may be true only for sync queues. Besides, if
++ * bfqq is also being weight-raised, then the expression always evaluates
++ * to true, as device idling is instrumental for preserving low-latency
++ * guarantees (see [1]). Otherwise, the expression evaluates to true only
++ * if bfqq has a non-null idle window and at least one of the following
++ * two conditions holds. The first condition is that the device is not
++ * performing NCQ, because idling the device most certainly boosts the
++ * throughput if this condition holds and bfqq has been granted a non-null
++ * idle window. The second compound condition is made of the logical AND of
++ * two components.
++ *
++ * The first component is true only if there is no weight-raised busy
++ * queue. This guarantees that the device is not idled for a sync non-
++ * weight-raised queue when there are busy weight-raised queues. The former
++ * is then expired immediately if empty. Combined with the timestamping
++ * rules of BFQ (see [1] for details), this causes sync non-weight-raised
++ * queues to get a lower number of requests served, and hence to ask for a
++ * lower number of requests from the request pool, before the busy weight-
++ * raised queues get served again.
++ *
++ * This is beneficial for the processes associated with weight-raised
++ * queues, when the request pool is saturated (e.g., in the presence of
++ * write hogs). In fact, if the processes associated with the other queues
++ * ask for requests at a lower rate, then weight-raised processes have a
++ * higher probability to get a request from the pool immediately (or at
++ * least soon) when they need one. Hence they have a higher probability to
++ * actually get a fraction of the disk throughput proportional to their
++ * high weight. This is especially true with NCQ-capable drives, which
++ * enqueue several requests in advance and further reorder internally-
++ * queued requests.
++ *
++ * In the end, mistreating non-weight-raised queues when there are busy
++ * weight-raised queues seems to mitigate starvation problems in the
++ * presence of heavy write workloads and NCQ, and hence to guarantee a
++ * higher application and system responsiveness in these hostile scenarios.
++ *
++ * If the first component of the compound condition is instead true, i.e.,
++ * there is no weight-raised busy queue, then the second component of the
++ * compound condition takes into account service-guarantee and throughput
++ * issues related to NCQ (recall that the compound condition is evaluated
++ * only if the device is detected as supporting NCQ).
++ *
++ * As for service guarantees, allowing the drive to enqueue more than one
++ * request at a time, and hence delegating de facto final scheduling
++ * decisions to the drive's internal scheduler, causes loss of control on
++ * the actual request service order. In this respect, when the drive is
++ * allowed to enqueue more than one request at a time, the service
++ * distribution enforced by the drive's internal scheduler is likely to
++ * coincide with the desired device-throughput distribution only in the
++ * following, perfectly symmetric, scenario:
++ * 1) all active queues have the same weight,
++ * 2) all active groups at the same level in the groups tree have the same
++ * weight,
++ * 3) all active groups at the same level in the groups tree have the same
++ * number of children.
++ *
++ * Even in such a scenario, sequential I/O may still receive a preferential
++ * treatment, but this is not likely to be a big issue with flash-based
++ * devices, because of their non-dramatic loss of throughput with random
++ * I/O. Things do differ with HDDs, for which additional care is taken, as
++ * explained after completing the discussion for flash-based devices.
++ *
++ * Unfortunately, keeping the necessary state for evaluating exactly the
++ * above symmetry conditions would be quite complex and time-consuming.
++ * Therefore BFQ evaluates instead the following stronger sub-conditions,
++ * for which it is much easier to maintain the needed state:
++ * 1) all active queues have the same weight,
++ * 2) all active groups have the same weight,
++ * 3) all active groups have at most one active child each.
++ * In particular, the last two conditions are always true if hierarchical
++ * support and the cgroups interface are not enabled, hence no state needs
++ * to be maintained in this case.
++ *
++ * According to the above considerations, the second component of the
++ * compound condition evaluates to true if any of the above symmetry
++ * sub-condition does not hold, or the device is not flash-based. Therefore,
++ * if also the first component is true, then idling is allowed for a sync
++ * queue. These are the only sub-conditions considered if the device is
++ * flash-based, as, for such a device, it is sensible to force idling only
++ * for service-guarantee issues. In fact, as for throughput, idling
++ * NCQ-capable flash-based devices would not boost the throughput even
++ * with sequential I/O; rather it would lower the throughput in proportion
++ * to how fast the device is. In the end, (only) if all the three
++ * sub-conditions hold and the device is flash-based, the compound
++ * condition evaluates to false and therefore no idling is performed.
++ *
++ * As already said, things change with a rotational device, where idling
++ * boosts the throughput with sequential I/O (even with NCQ). Hence, for
++ * such a device the second component of the compound condition evaluates
++ * to true also if the following additional sub-condition does not hold:
++ * the queue is constantly seeky. Unfortunately, this different behavior
++ * with respect to flash-based devices causes an additional asymmetry: if
++ * some sync queues enjoy idling and some other sync queues do not, then
++ * the latter get a low share of the device throughput, simply because the
++ * former get many requests served after being set as in service, whereas
++ * the latter do not. As a consequence, to guarantee the desired throughput
++ * distribution, on HDDs the compound expression evaluates to true (and
++ * hence device idling is performed) also if the following last symmetry
++ * condition does not hold: no other queue is benefiting from idling. Also
++ * this last condition is actually replaced with a simpler-to-maintain and
++ * stronger condition: there is no busy queue which is not constantly seeky
++ * (and hence may also benefit from idling).
++ *
++ * To sum up, when all the required symmetry and throughput-boosting
++ * sub-conditions hold, the second component of the compound condition
++ * evaluates to false, and hence no idling is performed. This helps to
++ * keep the drives' internal queues full on NCQ-capable devices, and hence
++ * to boost the throughput, without causing 'almost' any loss of service
++ * guarantees. The 'almost' follows from the fact that, if the internal
++ * queue of one such device is filled while all the sub-conditions hold,
++ * but at some point in time some sub-condition stops to hold, then it may
++ * become impossible to let requests be served in the new desired order
++ * until all the requests already queued in the device have been served.
++ */
++static inline bool bfq_bfqq_must_not_expire(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++#ifdef CONFIG_CGROUP_BFQIO
++#define symmetric_scenario (!bfqd->active_numerous_groups && \
++ !bfq_differentiated_weights(bfqd))
++#else
++#define symmetric_scenario (!bfq_differentiated_weights(bfqd))
++#endif
++#define cond_for_seeky_on_ncq_hdd (bfq_bfqq_constantly_seeky(bfqq) && \
++ bfqd->busy_in_flight_queues == \
++ bfqd->const_seeky_busy_in_flight_queues)
++/*
++ * Condition for expiring a non-weight-raised queue (and hence not idling
++ * the device).
++ */
++#define cond_for_expiring_non_wr (bfqd->hw_tag && \
++ (bfqd->wr_busy_queues > 0 || \
++ (symmetric_scenario && \
++ (blk_queue_nonrot(bfqd->queue) || \
++ cond_for_seeky_on_ncq_hdd))))
++
++ return bfq_bfqq_sync(bfqq) &&
++ (bfq_bfqq_IO_bound(bfqq) || bfqq->wr_coeff > 1) &&
++ (bfqq->wr_coeff > 1 ||
++ (bfq_bfqq_idle_window(bfqq) &&
++ !cond_for_expiring_non_wr)
++ );
++}
++
++/*
++ * If the in-service queue is empty but sync, and the function
++ * bfq_bfqq_must_not_expire returns true, then:
++ * 1) the queue must remain in service and cannot be expired, and
++ * 2) the disk must be idled to wait for the possible arrival of a new
++ * request for the queue.
++ * See the comments to the function bfq_bfqq_must_not_expire for the reasons
++ * why performing device idling is the best choice to boost the throughput
++ * and preserve service guarantees when bfq_bfqq_must_not_expire itself
++ * returns true.
++ */
++static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
++ bfq_bfqq_must_not_expire(bfqq);
++}
++
++/*
++ * Select a queue for service. If we have a current queue in service,
++ * check whether to continue servicing it, or retrieve and set a new one.
++ */
++static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++ struct request *next_rq;
++ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++
++ bfqq = bfqd->in_service_queue;
++ if (bfqq == NULL)
++ goto new_queue;
++
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++
++ if (bfq_may_expire_for_budg_timeout(bfqq) &&
++ !timer_pending(&bfqd->idle_slice_timer) &&
++ !bfq_bfqq_must_idle(bfqq))
++ goto expire;
++
++ next_rq = bfqq->next_rq;
++ /*
++ * If bfqq has requests queued and it has enough budget left to
++ * serve them, keep the queue, otherwise expire it.
++ */
++ if (next_rq != NULL) {
++ if (bfq_serv_to_charge(next_rq, bfqq) >
++ bfq_bfqq_budget_left(bfqq)) {
++ reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
++ goto expire;
++ } else {
++ /*
++ * The idle timer may be pending because we may
++ * not disable disk idling even when a new request
++ * arrives.
++ */
++ if (timer_pending(&bfqd->idle_slice_timer)) {
++ /*
++ * If we get here: 1) at least a new request
++ * has arrived but we have not disabled the
++ * timer because the request was too small,
++ * 2) then the block layer has unplugged
++ * the device, causing the dispatch to be
++ * invoked.
++ *
++ * Since the device is unplugged, now the
++ * requests are probably large enough to
++ * provide a reasonable throughput.
++ * So we disable idling.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ del_timer(&bfqd->idle_slice_timer);
++ }
++ goto keep_queue;
++ }
++ }
++
++ /*
++ * No requests pending. If the in-service queue still has requests
++ * in flight (possibly waiting for a completion) or is idling for a
++ * new request, then keep it.
++ */
++ if (timer_pending(&bfqd->idle_slice_timer) ||
++ (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq))) {
++ bfqq = NULL;
++ goto keep_queue;
++ }
++
++ reason = BFQ_BFQQ_NO_MORE_REQUESTS;
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, 0, reason);
++new_queue:
++ bfqq = bfq_set_in_service_queue(bfqd);
++ bfq_log(bfqd, "select_queue: new queue %d returned",
++ bfqq != NULL ? bfqq->pid : 0);
++keep_queue:
++ return bfqq;
++}
++
++static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
++ bfq_log_bfqq(bfqd, bfqq,
++ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqq->wr_coeff,
++ bfqq->entity.weight, bfqq->entity.orig_weight);
++
++ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight !=
++ entity->orig_weight * bfqq->wr_coeff);
++ if (entity->ioprio_changed)
++ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
++
++ /*
++ * If too much time has elapsed from the beginning
++ * of this weight-raising period, or the queue has
++ * exceeded the acceptable number of cooperations,
++ * stop it.
++ */
++ if (bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh ||
++ time_is_before_jiffies(bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time)) {
++ bfqq->last_wr_start_finish = jiffies;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais ending at %lu, rais_max_time %u",
++ bfqq->last_wr_start_finish,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ bfq_bfqq_end_wr(bfqq);
++ }
++ }
++ /* Update weight both if it must be raised and if it must be lowered */
++ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
++ __bfq_entity_update_weight_prio(
++ bfq_entity_service_tree(entity),
++ entity);
++}
++
++/*
++ * Dispatch one request from bfqq, moving it to the request queue
++ * dispatch list.
++ */
++static int bfq_dispatch_request(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++ struct request *rq;
++ unsigned long service_to_charge;
++
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ /* Follow expired path, else get first next available. */
++ rq = bfq_check_fifo(bfqq);
++ if (rq == NULL)
++ rq = bfqq->next_rq;
++ service_to_charge = bfq_serv_to_charge(rq, bfqq);
++
++ if (service_to_charge > bfq_bfqq_budget_left(bfqq)) {
++ /*
++ * This may happen if the next rq is chosen in fifo order
++ * instead of sector order. The budget is properly
++ * dimensioned to be always sufficient to serve the next
++ * request only if it is chosen in sector order. The reason
++ * is that it would be quite inefficient and little useful
++ * to always make sure that the budget is large enough to
++ * serve even the possible next rq in fifo order.
++ * In fact, requests are seldom served in fifo order.
++ *
++ * Expire the queue for budget exhaustion, and make sure
++ * that the next act_budget is enough to serve the next
++ * request, even if it comes from the fifo expired path.
++ */
++ bfqq->next_rq = rq;
++ /*
++ * Since this dispatch is failed, make sure that
++ * a new one will be performed
++ */
++ if (!bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++ goto expire;
++ }
++
++ /* Finally, insert request into driver dispatch list. */
++ bfq_bfqq_served(bfqq, service_to_charge);
++ bfq_dispatch_insert(bfqd->queue, rq);
++
++ bfq_update_wr_data(bfqd, bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "dispatched %u sec req (%llu), budg left %lu",
++ blk_rq_sectors(rq),
++ (long long unsigned)blk_rq_pos(rq),
++ bfq_bfqq_budget_left(bfqq));
++
++ dispatched++;
++
++ if (bfqd->in_service_bic == NULL) {
++ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
++ bfqd->in_service_bic = RQ_BIC(rq);
++ }
++
++ if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) &&
++ dispatched >= bfqd->bfq_max_budget_async_rq) ||
++ bfq_class_idle(bfqq)))
++ goto expire;
++
++ return dispatched;
++
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED);
++ return dispatched;
++}
++
++static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++
++ while (bfqq->next_rq != NULL) {
++ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
++ dispatched++;
++ }
++
++ BUG_ON(!list_empty(&bfqq->fifo));
++ return dispatched;
++}
++
++/*
++ * Drain our current requests.
++ * Used for barriers and when switching io schedulers on-the-fly.
++ */
++static int bfq_forced_dispatch(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq, *n;
++ struct bfq_service_tree *st;
++ int dispatched = 0;
++
++ bfqq = bfqd->in_service_queue;
++ if (bfqq != NULL)
++ __bfq_bfqq_expire(bfqd, bfqq);
++
++ /*
++ * Loop through classes, and be careful to leave the scheduler
++ * in a consistent state, as feedback mechanisms and vtime
++ * updates cannot be disabled during the process.
++ */
++ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
++ st = bfq_entity_service_tree(&bfqq->entity);
++
++ dispatched += __bfq_forced_dispatch_bfqq(bfqq);
++ bfqq->max_budget = bfq_max_budget(bfqd);
++
++ bfq_forget_idle(st);
++ }
++
++ BUG_ON(bfqd->busy_queues != 0);
++
++ return dispatched;
++}
++
++static int bfq_dispatch_requests(struct request_queue *q, int force)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq;
++ int max_dispatch;
++
++ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++ if (bfqd->busy_queues == 0)
++ return 0;
++
++ if (unlikely(force))
++ return bfq_forced_dispatch(bfqd);
++
++ bfqq = bfq_select_queue(bfqd);
++ if (bfqq == NULL)
++ return 0;
++
++ max_dispatch = bfqd->bfq_quantum;
++ if (bfq_class_idle(bfqq))
++ max_dispatch = 1;
++
++ if (!bfq_bfqq_sync(bfqq))
++ max_dispatch = bfqd->bfq_max_budget_async_rq;
++
++ if (bfqq->dispatched >= max_dispatch) {
++ if (bfqd->busy_queues > 1)
++ return 0;
++ if (bfqq->dispatched >= 4 * max_dispatch)
++ return 0;
++ }
++
++ if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq))
++ return 0;
++
++ bfq_clear_bfqq_wait_request(bfqq);
++ BUG_ON(timer_pending(&bfqd->idle_slice_timer));
++
++ if (!bfq_dispatch_request(bfqd, bfqq))
++ return 0;
++
++ bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d (max_disp %d)",
++ bfqq->pid, max_dispatch);
++
++ return 1;
++}
++
++/*
++ * Task holds one reference to the queue, dropped when task exits. Each rq
++ * in-flight on this queue also holds a reference, dropped when rq is freed.
++ *
++ * Queue lock must be held here.
++ */
++static void bfq_put_queue(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ BUG_ON(atomic_read(&bfqq->ref) <= 0);
++
++ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq,
++ atomic_read(&bfqq->ref));
++ if (!atomic_dec_and_test(&bfqq->ref))
++ return;
++
++ BUG_ON(rb_first(&bfqq->sort_list) != NULL);
++ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
++ BUG_ON(bfqq->entity.tree != NULL);
++ BUG_ON(bfq_bfqq_busy(bfqq));
++ BUG_ON(bfqd->in_service_queue == bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq);
++
++ kmem_cache_free(bfq_pool, bfqq);
++}
++
++static void bfq_put_cooperator(struct bfq_queue *bfqq)
++{
++ struct bfq_queue *__bfqq, *next;
++
++ /*
++ * If this queue was scheduled to merge with another queue, be
++ * sure to drop the reference taken on that queue (and others in
++ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
++ */
++ __bfqq = bfqq->new_bfqq;
++ while (__bfqq) {
++ if (__bfqq == bfqq)
++ break;
++ next = __bfqq->new_bfqq;
++ bfq_put_queue(__bfqq);
++ __bfqq = next;
++ }
++}
++
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ if (bfqq == bfqd->in_service_queue) {
++ __bfq_bfqq_expire(bfqd, bfqq);
++ bfq_schedule_dispatch(bfqd);
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
++ atomic_read(&bfqq->ref));
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq);
++}
++
++static inline void bfq_init_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++
++ bic->ttime.last_end_request = jiffies;
++ /*
++ * A newly created bic indicates that the process has just
++ * started doing I/O, and is probably mapping into memory its
++ * executable and libraries: it definitely needs weight raising.
++ * There is however the possibility that the process performs,
++ * for a while, I/O close to some other process. EQM intercepts
++ * this behavior and may merge the queue corresponding to the
++ * process with some other queue, BEFORE the weight of the queue
++ * is raised. Merged queues are not weight-raised (they are assumed
++ * to belong to processes that benefit only from high throughput).
++ * If the merge is basically the consequence of an accident, then
++ * the queue will be split soon and will get back its old weight.
++ * It is then important to write down somewhere that this queue
++ * does need weight raising, even if it did not make it to get its
++ * weight raised before being merged. To this purpose, we overload
++ * the field raising_time_left and assign 1 to it, to mark the queue
++ * as needing weight raising.
++ */
++ bic->wr_time_left = 1;
++}
++
++static void bfq_exit_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++
++ if (bic->bfqq[BLK_RW_ASYNC]) {
++ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_ASYNC]);
++ bic->bfqq[BLK_RW_ASYNC] = NULL;
++ }
++
++ if (bic->bfqq[BLK_RW_SYNC]) {
++ /*
++ * If the bic is using a shared queue, put the reference
++ * taken on the io_context when the bic started using a
++ * shared bfq_queue.
++ */
++ if (bfq_bfqq_coop(bic->bfqq[BLK_RW_SYNC]))
++ put_io_context(icq->ioc);
++ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
++ bic->bfqq[BLK_RW_SYNC] = NULL;
++ }
++}
++
++/*
++ * Update the entity prio values; note that the new values will not
++ * be used until the next (re)activation.
++ */
++static void bfq_init_prio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++{
++ struct task_struct *tsk = current;
++ int ioprio_class;
++
++ if (!bfq_bfqq_prio_changed(bfqq))
++ return;
++
++ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ switch (ioprio_class) {
++ default:
++ dev_err(bfqq->bfqd->queue->backing_dev_info.dev,
++ "bfq: bad prio %x\n", ioprio_class);
++ case IOPRIO_CLASS_NONE:
++ /*
++ * No prio set, inherit CPU scheduling settings.
++ */
++ bfqq->entity.new_ioprio = task_nice_ioprio(tsk);
++ bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk);
++ break;
++ case IOPRIO_CLASS_RT:
++ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT;
++ break;
++ case IOPRIO_CLASS_BE:
++ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE;
++ break;
++ case IOPRIO_CLASS_IDLE:
++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE;
++ bfqq->entity.new_ioprio = 7;
++ bfq_clear_bfqq_idle_window(bfqq);
++ break;
++ }
++
++ bfqq->entity.ioprio_changed = 1;
++
++ bfq_clear_bfqq_prio_changed(bfqq);
++}
++
++static void bfq_changed_ioprio(struct bfq_io_cq *bic)
++{
++ struct bfq_data *bfqd;
++ struct bfq_queue *bfqq, *new_bfqq;
++ struct bfq_group *bfqg;
++ unsigned long uninitialized_var(flags);
++ int ioprio = bic->icq.ioc->ioprio;
++
++ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
++ &flags);
++ /*
++ * This condition may trigger on a newly created bic, be sure to
++ * drop the lock before returning.
++ */
++ if (unlikely(bfqd == NULL) || likely(bic->ioprio == ioprio))
++ goto out;
++
++ bfqq = bic->bfqq[BLK_RW_ASYNC];
++ if (bfqq != NULL) {
++ bfqg = container_of(bfqq->entity.sched_data, struct bfq_group,
++ sched_data);
++ new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, bic,
++ GFP_ATOMIC);
++ if (new_bfqq != NULL) {
++ bic->bfqq[BLK_RW_ASYNC] = new_bfqq;
++ bfq_log_bfqq(bfqd, bfqq,
++ "changed_ioprio: bfqq %p %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ }
++ }
++
++ bfqq = bic->bfqq[BLK_RW_SYNC];
++ if (bfqq != NULL)
++ bfq_mark_bfqq_prio_changed(bfqq);
++
++ bic->ioprio = ioprio;
++
++out:
++ bfq_put_bfqd_unlock(bfqd, &flags);
++}
++
++static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ pid_t pid, int is_sync)
++{
++ RB_CLEAR_NODE(&bfqq->entity.rb_node);
++ INIT_LIST_HEAD(&bfqq->fifo);
++
++ atomic_set(&bfqq->ref, 0);
++ bfqq->bfqd = bfqd;
++
++ bfq_mark_bfqq_prio_changed(bfqq);
++
++ if (is_sync) {
++ if (!bfq_class_idle(bfqq))
++ bfq_mark_bfqq_idle_window(bfqq);
++ bfq_mark_bfqq_sync(bfqq);
++ }
++ bfq_mark_bfqq_IO_bound(bfqq);
++
++ /* Tentative initial value to trade off between thr and lat */
++ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
++ bfqq->pid = pid;
++
++ bfqq->wr_coeff = 1;
++ bfqq->last_wr_start_finish = 0;
++ /*
++ * Set to the value for which bfqq will not be deemed as
++ * soft rt when it becomes backlogged.
++ */
++ bfqq->soft_rt_next_start = bfq_infinity_from_now(jiffies);
++}
++
++static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ int is_sync,
++ struct bfq_io_cq *bic,
++ gfp_t gfp_mask)
++{
++ struct bfq_queue *bfqq, *new_bfqq = NULL;
++
++retry:
++ /* bic always exists here */
++ bfqq = bic_to_bfqq(bic, is_sync);
++
++ /*
++ * Always try a new alloc if we fall back to the OOM bfqq
++ * originally, since it should just be a temporary situation.
++ */
++ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
++ bfqq = NULL;
++ if (new_bfqq != NULL) {
++ bfqq = new_bfqq;
++ new_bfqq = NULL;
++ } else if (gfp_mask & __GFP_WAIT) {
++ spin_unlock_irq(bfqd->queue->queue_lock);
++ new_bfqq = kmem_cache_alloc_node(bfq_pool,
++ gfp_mask | __GFP_ZERO,
++ bfqd->queue->node);
++ spin_lock_irq(bfqd->queue->queue_lock);
++ if (new_bfqq != NULL)
++ goto retry;
++ } else {
++ bfqq = kmem_cache_alloc_node(bfq_pool,
++ gfp_mask | __GFP_ZERO,
++ bfqd->queue->node);
++ }
++
++ if (bfqq != NULL) {
++ bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync);
++ bfq_log_bfqq(bfqd, bfqq, "allocated");
++ } else {
++ bfqq = &bfqd->oom_bfqq;
++ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
++ }
++
++ bfq_init_prio_data(bfqq, bic);
++ bfq_init_entity(&bfqq->entity, bfqg);
++ }
++
++ if (new_bfqq != NULL)
++ kmem_cache_free(bfq_pool, new_bfqq);
++
++ return bfqq;
++}
++
++static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ int ioprio_class, int ioprio)
++{
++ switch (ioprio_class) {
++ case IOPRIO_CLASS_RT:
++ return &bfqg->async_bfqq[0][ioprio];
++ case IOPRIO_CLASS_NONE:
++ ioprio = IOPRIO_NORM;
++ /* fall through */
++ case IOPRIO_CLASS_BE:
++ return &bfqg->async_bfqq[1][ioprio];
++ case IOPRIO_CLASS_IDLE:
++ return &bfqg->async_idle_bfqq;
++ default:
++ BUG();
++ }
++}
++
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bfq_group *bfqg, int is_sync,
++ struct bfq_io_cq *bic, gfp_t gfp_mask)
++{
++ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ struct bfq_queue **async_bfqq = NULL;
++ struct bfq_queue *bfqq = NULL;
++
++ if (!is_sync) {
++ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
++ ioprio);
++ bfqq = *async_bfqq;
++ }
++
++ if (bfqq == NULL)
++ bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
++
++ /*
++ * Pin the queue now that it's allocated, scheduler exit will
++ * prune it.
++ */
++ if (!is_sync && *async_bfqq == NULL) {
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++ bfqq, atomic_read(&bfqq->ref));
++ *async_bfqq = bfqq;
++ }
++
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq,
++ atomic_read(&bfqq->ref));
++ return bfqq;
++}
++
++static void bfq_update_io_thinktime(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic)
++{
++ unsigned long elapsed = jiffies - bic->ttime.last_end_request;
++ unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle);
++
++ bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
++ bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8;
++ bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) /
++ bic->ttime.ttime_samples;
++}
++
++static void bfq_update_io_seektime(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ sector_t sdist;
++ u64 total;
++
++ if (bfqq->last_request_pos < blk_rq_pos(rq))
++ sdist = blk_rq_pos(rq) - bfqq->last_request_pos;
++ else
++ sdist = bfqq->last_request_pos - blk_rq_pos(rq);
++
++ /*
++ * Don't allow the seek distance to get too large from the
++ * odd fragment, pagein, etc.
++ */
++ if (bfqq->seek_samples == 0) /* first request, not really a seek */
++ sdist = 0;
++ else if (bfqq->seek_samples <= 60) /* second & third seek */
++ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024);
++ else
++ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64);
++
++ bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8;
++ bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8;
++ total = bfqq->seek_total + (bfqq->seek_samples/2);
++ do_div(total, bfqq->seek_samples);
++ bfqq->seek_mean = (sector_t)total;
++
++ bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist,
++ (u64)bfqq->seek_mean);
++}
++
++/*
++ * Disable idle window if the process thinks too long or seeks so much that
++ * it doesn't matter.
++ */
++static void bfq_update_idle_window(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
++{
++ int enable_idle;
++
++ /* Don't idle for async or idle io prio class. */
++ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
++ return;
++
++ /* Idle window just restored, statistics are meaningless. */
++ if (bfq_bfqq_just_split(bfqq))
++ return;
++
++ enable_idle = bfq_bfqq_idle_window(bfqq);
++
++ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
++ bfqd->bfq_slice_idle == 0 ||
++ (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
++ bfqq->wr_coeff == 1))
++ enable_idle = 0;
++ else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
++ if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
++ bfqq->wr_coeff == 1)
++ enable_idle = 0;
++ else
++ enable_idle = 1;
++ }
++ bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
++ enable_idle);
++
++ if (enable_idle)
++ bfq_mark_bfqq_idle_window(bfqq);
++ else
++ bfq_clear_bfqq_idle_window(bfqq);
++}
++
++/*
++ * Called when a new fs request (rq) is added to bfqq. Check if there's
++ * something we should do about it.
++ */
++static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ struct bfq_io_cq *bic = RQ_BIC(rq);
++
++ if (rq->cmd_flags & REQ_META)
++ bfqq->meta_pending++;
++
++ bfq_update_io_thinktime(bfqd, bic);
++ bfq_update_io_seektime(bfqd, bfqq, rq);
++ if (!BFQQ_SEEKY(bfqq) && bfq_bfqq_constantly_seeky(bfqq)) {
++ bfq_clear_bfqq_constantly_seeky(bfqq);
++ if (!blk_queue_nonrot(bfqd->queue)) {
++ BUG_ON(!bfqd->const_seeky_busy_in_flight_queues);
++ bfqd->const_seeky_busy_in_flight_queues--;
++ }
++ }
++ if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
++ !BFQQ_SEEKY(bfqq))
++ bfq_update_idle_window(bfqd, bfqq, bic);
++ bfq_clear_bfqq_just_split(bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
++ bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
++ (long long unsigned)bfqq->seek_mean);
++
++ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
++
++ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
++ int small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
++ blk_rq_sectors(rq) < 32;
++ int budget_timeout = bfq_bfqq_budget_timeout(bfqq);
++
++ /*
++ * There is just this request queued: if the request
++ * is small and the queue is not to be expired, then
++ * just exit.
++ *
++ * In this way, if the disk is being idled to wait for
++ * a new request from the in-service queue, we avoid
++ * unplugging the device and committing the disk to serve
++ * just a small request. On the contrary, we wait for
++ * the block layer to decide when to unplug the device:
++ * hopefully, new requests will be merged to this one
++ * quickly, then the device will be unplugged and
++ * larger requests will be dispatched.
++ */
++ if (small_req && !budget_timeout)
++ return;
++
++ /*
++ * A large enough request arrived, or the queue is to
++ * be expired: in both cases disk idling is to be
++ * stopped, so clear wait_request flag and reset
++ * timer.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ del_timer(&bfqd->idle_slice_timer);
++
++ /*
++ * The queue is not empty, because a new request just
++ * arrived. Hence we can safely expire the queue, in
++ * case of budget timeout, without risking that the
++ * timestamps of the queue are not updated correctly.
++ * See [1] for more details.
++ */
++ if (budget_timeout)
++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
++
++ /*
++ * Let the request rip immediately, or let a new queue be
++ * selected if bfqq has just been expired.
++ */
++ __blk_run_queue(bfqd->queue);
++ }
++}
++
++static void bfq_insert_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ /*
++ * An unplug may trigger a requeue of a request from the device
++ * driver: make sure we are in process context while trying to
++ * merge two bfq_queues.
++ */
++ if (!in_interrupt()) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
++ if (new_bfqq != NULL) {
++ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
++ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
++ /*
++ * Release the request's reference to the old bfqq
++ * and make sure one is taken to the shared queue.
++ */
++ new_bfqq->allocated[rq_data_dir(rq)]++;
++ bfqq->allocated[rq_data_dir(rq)]--;
++ atomic_inc(&new_bfqq->ref);
++ bfq_put_queue(bfqq);
++ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
++ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
++ bfqq, new_bfqq);
++ rq->elv.priv[1] = new_bfqq;
++ bfqq = new_bfqq;
++ } else
++ bfq_bfqq_increase_failed_cooperations(bfqq);
++ }
++
++ bfq_init_prio_data(bfqq, RQ_BIC(rq));
++
++ bfq_add_request(rq);
++
++ /*
++ * Here a newly-created bfq_queue has already started a weight-raising
++ * period: clear raising_time_left to prevent bfq_bfqq_save_state()
++ * from assigning it a full weight-raising period. See the detailed
++ * comments about this field in bfq_init_icq().
++ */
++ if (bfqq->bic != NULL)
++ bfqq->bic->wr_time_left = 0;
++ rq_set_fifo_time(rq, jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]);
++ list_add_tail(&rq->queuelist, &bfqq->fifo);
++
++ bfq_rq_enqueued(bfqd, bfqq, rq);
++}
++
++static void bfq_update_hw_tag(struct bfq_data *bfqd)
++{
++ bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver,
++ bfqd->rq_in_driver);
++
++ if (bfqd->hw_tag == 1)
++ return;
++
++ /*
++ * This sample is valid if the number of outstanding requests
++ * is large enough to allow a queueing behavior. Note that the
++ * sum is not exact, as it's not taking into account deactivated
++ * requests.
++ */
++ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
++ return;
++
++ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
++ return;
++
++ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
++ bfqd->max_rq_in_driver = 0;
++ bfqd->hw_tag_samples = 0;
++}
++
++static void bfq_completed_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ bool sync = bfq_bfqq_sync(bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left (%d)",
++ blk_rq_sectors(rq), sync);
++
++ bfq_update_hw_tag(bfqd);
++
++ BUG_ON(!bfqd->rq_in_driver);
++ BUG_ON(!bfqq->dispatched);
++ bfqd->rq_in_driver--;
++ bfqq->dispatched--;
++
++ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
++ bfq_weights_tree_remove(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ if (!blk_queue_nonrot(bfqd->queue)) {
++ BUG_ON(!bfqd->busy_in_flight_queues);
++ bfqd->busy_in_flight_queues--;
++ if (bfq_bfqq_constantly_seeky(bfqq)) {
++ BUG_ON(!bfqd->
++ const_seeky_busy_in_flight_queues);
++ bfqd->const_seeky_busy_in_flight_queues--;
++ }
++ }
++ }
++
++ if (sync) {
++ bfqd->sync_flight--;
++ RQ_BIC(rq)->ttime.last_end_request = jiffies;
++ }
++
++ /*
++ * If we are waiting to discover whether the request pattern of the
++ * task associated with the queue is actually isochronous, and
++ * both requisites for this condition to hold are satisfied, then
++ * compute soft_rt_next_start (see the comments to the function
++ * bfq_bfqq_softrt_next_start()).
++ */
++ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list))
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++
++ /*
++ * If this is the in-service queue, check if it needs to be expired,
++ * or if we want to idle in case it has no pending requests.
++ */
++ if (bfqd->in_service_queue == bfqq) {
++ if (bfq_bfqq_budget_new(bfqq))
++ bfq_set_budget_timeout(bfqd);
++
++ if (bfq_bfqq_must_idle(bfqq)) {
++ bfq_arm_slice_timer(bfqd);
++ goto out;
++ } else if (bfq_may_expire_for_budg_timeout(bfqq))
++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
++ else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
++ (bfqq->dispatched == 0 ||
++ !bfq_bfqq_must_not_expire(bfqq)))
++ bfq_bfqq_expire(bfqd, bfqq, 0,
++ BFQ_BFQQ_NO_MORE_REQUESTS);
++ }
++
++ if (!bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++
++out:
++ return;
++}
++
++static inline int __bfq_may_queue(struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
++ bfq_clear_bfqq_must_alloc(bfqq);
++ return ELV_MQUEUE_MUST;
++ }
++
++ return ELV_MQUEUE_MAY;
++}
++
++static int bfq_may_queue(struct request_queue *q, int rw)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ /*
++ * Don't force setup of a queue from here, as a call to may_queue
++ * does not necessarily imply that a request actually will be
++ * queued. So just lookup a possibly existing queue, or return
++ * 'may queue' if that fails.
++ */
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (bic == NULL)
++ return ELV_MQUEUE_MAY;
++
++ bfqq = bic_to_bfqq(bic, rw_is_sync(rw));
++ if (bfqq != NULL) {
++ bfq_init_prio_data(bfqq, bic);
++
++ return __bfq_may_queue(bfqq);
++ }
++
++ return ELV_MQUEUE_MAY;
++}
++
++/*
++ * Queue lock held here.
++ */
++static void bfq_put_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ if (bfqq != NULL) {
++ const int rw = rq_data_dir(rq);
++
++ BUG_ON(!bfqq->allocated[rw]);
++ bfqq->allocated[rw]--;
++
++ rq->elv.priv[0] = NULL;
++ rq->elv.priv[1] = NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ }
++}
++
++/*
++ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
++ * was the last process referring to said bfqq.
++ */
++static struct bfq_queue *
++bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++
++ put_io_context(bic->icq.ioc);
++
++ if (bfqq_process_refs(bfqq) == 1) {
++ bfqq->pid = current->pid;
++ bfq_clear_bfqq_coop(bfqq);
++ bfq_clear_bfqq_split_coop(bfqq);
++ return bfqq;
++ }
++
++ bic_set_bfqq(bic, NULL, 1);
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq);
++ return NULL;
++}
++
++/*
++ * Allocate bfq data structures associated with this request.
++ */
++static int bfq_set_request(struct request_queue *q, struct request *rq,
++ struct bio *bio, gfp_t gfp_mask)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
++ const int rw = rq_data_dir(rq);
++ const int is_sync = rq_is_sync(rq);
++ struct bfq_queue *bfqq;
++ struct bfq_group *bfqg;
++ unsigned long flags;
++ bool split = false;
++
++ might_sleep_if(gfp_mask & __GFP_WAIT);
++
++ bfq_changed_ioprio(bic);
++
++ spin_lock_irqsave(q->queue_lock, flags);
++
++ if (bic == NULL)
++ goto queue_fail;
++
++ bfqg = bfq_bic_update_cgroup(bic);
++
++new_queue:
++ bfqq = bic_to_bfqq(bic, is_sync);
++ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
++ bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
++ bic_set_bfqq(bic, bfqq, is_sync);
++ } else {
++ /* If the queue was seeky for too long, break it apart. */
++ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
++ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
++ bfqq = bfq_split_bfqq(bic, bfqq);
++ split = true;
++ if (!bfqq)
++ goto new_queue;
++ }
++ }
++
++ bfqq->allocated[rw]++;
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq,
++ atomic_read(&bfqq->ref));
++
++ rq->elv.priv[0] = bic;
++ rq->elv.priv[1] = bfqq;
++
++ /*
++ * If a bfq_queue has only one process reference, it is owned
++ * by only one bfq_io_cq: we can set the bic field of the
++ * bfq_queue to the address of that structure. Also, if the
++ * queue has just been split, mark a flag so that the
++ * information is available to the other scheduler hooks.
++ */
++ if (bfqq_process_refs(bfqq) == 1) {
++ bfqq->bic = bic;
++ if (split) {
++ bfq_mark_bfqq_just_split(bfqq);
++ /*
++ * If the queue has just been split from a shared
++ * queue, restore the idle window and the possible
++ * weight raising period.
++ */
++ bfq_bfqq_resume_state(bfqq, bic);
++ }
++ }
++
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 0;
++
++queue_fail:
++ bfq_schedule_dispatch(bfqd);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 1;
++}
++
++static void bfq_kick_queue(struct work_struct *work)
++{
++ struct bfq_data *bfqd =
++ container_of(work, struct bfq_data, unplug_work);
++ struct request_queue *q = bfqd->queue;
++
++ spin_lock_irq(q->queue_lock);
++ __blk_run_queue(q);
++ spin_unlock_irq(q->queue_lock);
++}
++
++/*
++ * Handler of the expiration of the timer running if the in-service queue
++ * is idling inside its time slice.
++ */
++static void bfq_idle_slice_timer(unsigned long data)
++{
++ struct bfq_data *bfqd = (struct bfq_data *)data;
++ struct bfq_queue *bfqq;
++ unsigned long flags;
++ enum bfqq_expiration reason;
++
++ spin_lock_irqsave(bfqd->queue->queue_lock, flags);
++
++ bfqq = bfqd->in_service_queue;
++ /*
++ * Theoretical race here: the in-service queue can be NULL or
++ * different from the queue that was idling if the timer handler
++ * spins on the queue_lock and a new request arrives for the
++ * current queue and there is a full dispatch cycle that changes
++ * the in-service queue. This can hardly happen, but in the worst
++ * case we just expire a queue too early.
++ */
++ if (bfqq != NULL) {
++ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
++ if (bfq_bfqq_budget_timeout(bfqq))
++ /*
++ * Also here the queue can be safely expired
++ * for budget timeout without wasting
++ * guarantees
++ */
++ reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
++ /*
++ * The queue may not be empty upon timer expiration,
++ * because we may not disable the timer when the
++ * first request of the in-service queue arrives
++ * during disk idling.
++ */
++ reason = BFQ_BFQQ_TOO_IDLE;
++ else
++ goto schedule_dispatch;
++
++ bfq_bfqq_expire(bfqd, bfqq, 1, reason);
++ }
++
++schedule_dispatch:
++ bfq_schedule_dispatch(bfqd);
++
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
++}
++
++static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
++{
++ del_timer_sync(&bfqd->idle_slice_timer);
++ cancel_work_sync(&bfqd->unplug_work);
++}
++
++static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd,
++ struct bfq_queue **bfqq_ptr)
++{
++ struct bfq_group *root_group = bfqd->root_group;
++ struct bfq_queue *bfqq = *bfqq_ptr;
++
++ bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++ if (bfqq != NULL) {
++ bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group);
++ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ *bfqq_ptr = NULL;
++ }
++}
++
++/*
++ * Release all the bfqg references to its async queues. If we are
++ * deallocating the group these queues may still contain requests, so
++ * we reparent them to the root cgroup (i.e., the only one that will
++ * exist for sure until all the requests on a device are gone).
++ */
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
++
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
++}
++
++static void bfq_exit_queue(struct elevator_queue *e)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ struct request_queue *q = bfqd->queue;
++ struct bfq_queue *bfqq, *n;
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ spin_lock_irq(q->queue_lock);
++
++ BUG_ON(bfqd->in_service_queue != NULL);
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
++ bfq_deactivate_bfqq(bfqd, bfqq, 0);
++
++ bfq_disconnect_groups(bfqd);
++ spin_unlock_irq(q->queue_lock);
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ synchronize_rcu();
++
++ BUG_ON(timer_pending(&bfqd->idle_slice_timer));
++
++ bfq_free_root_group(bfqd);
++ kfree(bfqd);
++}
++
++static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
++{
++ struct bfq_group *bfqg;
++ struct bfq_data *bfqd;
++ struct elevator_queue *eq;
++
++ eq = elevator_alloc(q, e);
++ if (eq == NULL)
++ return -ENOMEM;
++
++ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
++ if (bfqd == NULL) {
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++ }
++ eq->elevator_data = bfqd;
++
++ /*
++ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
++ * Grab a permanent reference to it, so that the normal code flow
++ * will not attempt to free it.
++ */
++ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0);
++ atomic_inc(&bfqd->oom_bfqq.ref);
++
++ bfqd->queue = q;
++
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
++
++ bfqg = bfq_alloc_root_group(bfqd, q->node);
++ if (bfqg == NULL) {
++ kfree(bfqd);
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++ }
++
++ bfqd->root_group = bfqg;
++#ifdef CONFIG_CGROUP_BFQIO
++ bfqd->active_numerous_groups = 0;
++#endif
++
++ init_timer(&bfqd->idle_slice_timer);
++ bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
++ bfqd->idle_slice_timer.data = (unsigned long)bfqd;
++
++ bfqd->rq_pos_tree = RB_ROOT;
++ bfqd->queue_weights_tree = RB_ROOT;
++ bfqd->group_weights_tree = RB_ROOT;
++
++ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
++
++ INIT_LIST_HEAD(&bfqd->active_list);
++ INIT_LIST_HEAD(&bfqd->idle_list);
++
++ bfqd->hw_tag = -1;
++
++ bfqd->bfq_max_budget = bfq_default_max_budget;
++
++ bfqd->bfq_quantum = bfq_quantum;
++ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
++ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
++ bfqd->bfq_back_max = bfq_back_max;
++ bfqd->bfq_back_penalty = bfq_back_penalty;
++ bfqd->bfq_slice_idle = bfq_slice_idle;
++ bfqd->bfq_class_idle_last_service = 0;
++ bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq;
++ bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
++ bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
++
++ bfqd->bfq_coop_thresh = 2;
++ bfqd->bfq_failed_cooperations = 7000;
++ bfqd->bfq_requests_within_timer = 120;
++
++ bfqd->low_latency = true;
++
++ bfqd->bfq_wr_coeff = 20;
++ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
++ bfqd->bfq_wr_max_time = 0;
++ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
++ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
++ bfqd->bfq_wr_max_softrt_rate = 7000; /*
++ * Approximate rate required
++ * to playback or record a
++ * high-definition compressed
++ * video.
++ */
++ bfqd->wr_busy_queues = 0;
++ bfqd->busy_in_flight_queues = 0;
++ bfqd->const_seeky_busy_in_flight_queues = 0;
++
++ /*
++ * Begin by assuming, optimistically, that the device peak rate is
++ * equal to the highest reference rate.
++ */
++ bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
++ T_fast[blk_queue_nonrot(bfqd->queue)];
++ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)];
++ bfqd->device_speed = BFQ_BFQD_FAST;
++
++ return 0;
++}
++
++static void bfq_slab_kill(void)
++{
++ if (bfq_pool != NULL)
++ kmem_cache_destroy(bfq_pool);
++}
++
++static int __init bfq_slab_setup(void)
++{
++ bfq_pool = KMEM_CACHE(bfq_queue, 0);
++ if (bfq_pool == NULL)
++ return -ENOMEM;
++ return 0;
++}
++
++static ssize_t bfq_var_show(unsigned int var, char *page)
++{
++ return sprintf(page, "%d\n", var);
++}
++
++static ssize_t bfq_var_store(unsigned long *var, const char *page,
++ size_t count)
++{
++ unsigned long new_val;
++ int ret = kstrtoul(page, 10, &new_val);
++
++ if (ret == 0)
++ *var = new_val;
++
++ return count;
++}
++
++static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
++ jiffies_to_msecs(bfqd->bfq_wr_max_time) :
++ jiffies_to_msecs(bfq_wr_duration(bfqd)));
++}
++
++static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_queue *bfqq;
++ struct bfq_data *bfqd = e->elevator_data;
++ ssize_t num_char = 0;
++
++ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
++ bfqd->queued);
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ num_char += sprintf(page + num_char, "Active:\n");
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, nr_queued %d %d, dur %d/%u\n",
++ bfqq->pid,
++ bfqq->entity.weight,
++ bfqq->queued[0],
++ bfqq->queued[1],
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ num_char += sprintf(page + num_char, "Idle:\n");
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, dur %d/%u\n",
++ bfqq->pid,
++ bfqq->entity.weight,
++ jiffies_to_msecs(jiffies -
++ bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++
++ return num_char;
++}
++
++#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
++static ssize_t __FUNC(struct elevator_queue *e, char *page) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned int __data = __VAR; \
++ if (__CONV) \
++ __data = jiffies_to_msecs(__data); \
++ return bfq_var_show(__data, (page)); \
++}
++SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0);
++SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1);
++SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1);
++SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
++SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
++SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1);
++SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
++SHOW_FUNCTION(bfq_max_budget_async_rq_show,
++ bfqd->bfq_max_budget_async_rq, 0);
++SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1);
++SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1);
++SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
++SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
++SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
++SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1);
++SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async,
++ 1);
++SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0);
++#undef SHOW_FUNCTION
++
++#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
++static ssize_t \
++__FUNC(struct elevator_queue *e, const char *page, size_t count) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned long uninitialized_var(__data); \
++ int ret = bfq_var_store(&__data, (page), count); \
++ if (__data < (MIN)) \
++ __data = (MIN); \
++ else if (__data > (MAX)) \
++ __data = (MAX); \
++ if (__CONV) \
++ *(__PTR) = msecs_to_jiffies(__data); \
++ else \
++ *(__PTR) = __data; \
++ return ret; \
++}
++STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
++STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
++ INT_MAX, 0);
++STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq,
++ 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
++ 1);
++STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_min_inter_arr_async_store,
++ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0,
++ INT_MAX, 0);
++#undef STORE_FUNCTION
++
++/* do nothing for the moment */
++static ssize_t bfq_weights_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ return count;
++}
++
++static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
++{
++ u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
++
++ if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES)
++ return bfq_calc_max_budget(bfqd->peak_rate, timeout);
++ else
++ return bfq_default_max_budget;
++}
++
++static ssize_t bfq_max_budget_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data == 0)
++ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
++ else {
++ if (__data > INT_MAX)
++ __data = INT_MAX;
++ bfqd->bfq_max_budget = __data;
++ }
++
++ bfqd->bfq_user_max_budget = __data;
++
++ return ret;
++}
++
++static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data < 1)
++ __data = 1;
++ else if (__data > INT_MAX)
++ __data = INT_MAX;
++
++ bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data);
++ if (bfqd->bfq_user_max_budget == 0)
++ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
++
++ return ret;
++}
++
++static ssize_t bfq_low_latency_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data > 1)
++ __data = 1;
++ if (__data == 0 && bfqd->low_latency != 0)
++ bfq_end_wr(bfqd);
++ bfqd->low_latency = __data;
++
++ return ret;
++}
++
++#define BFQ_ATTR(name) \
++ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
++
++static struct elv_fs_entry bfq_attrs[] = {
++ BFQ_ATTR(quantum),
++ BFQ_ATTR(fifo_expire_sync),
++ BFQ_ATTR(fifo_expire_async),
++ BFQ_ATTR(back_seek_max),
++ BFQ_ATTR(back_seek_penalty),
++ BFQ_ATTR(slice_idle),
++ BFQ_ATTR(max_budget),
++ BFQ_ATTR(max_budget_async_rq),
++ BFQ_ATTR(timeout_sync),
++ BFQ_ATTR(timeout_async),
++ BFQ_ATTR(low_latency),
++ BFQ_ATTR(wr_coeff),
++ BFQ_ATTR(wr_max_time),
++ BFQ_ATTR(wr_rt_max_time),
++ BFQ_ATTR(wr_min_idle_time),
++ BFQ_ATTR(wr_min_inter_arr_async),
++ BFQ_ATTR(wr_max_softrt_rate),
++ BFQ_ATTR(weights),
++ __ATTR_NULL
++};
++
++static struct elevator_type iosched_bfq = {
++ .ops = {
++ .elevator_merge_fn = bfq_merge,
++ .elevator_merged_fn = bfq_merged_request,
++ .elevator_merge_req_fn = bfq_merged_requests,
++ .elevator_allow_merge_fn = bfq_allow_merge,
++ .elevator_dispatch_fn = bfq_dispatch_requests,
++ .elevator_add_req_fn = bfq_insert_request,
++ .elevator_activate_req_fn = bfq_activate_request,
++ .elevator_deactivate_req_fn = bfq_deactivate_request,
++ .elevator_completed_req_fn = bfq_completed_request,
++ .elevator_former_req_fn = elv_rb_former_request,
++ .elevator_latter_req_fn = elv_rb_latter_request,
++ .elevator_init_icq_fn = bfq_init_icq,
++ .elevator_exit_icq_fn = bfq_exit_icq,
++ .elevator_set_req_fn = bfq_set_request,
++ .elevator_put_req_fn = bfq_put_request,
++ .elevator_may_queue_fn = bfq_may_queue,
++ .elevator_init_fn = bfq_init_queue,
++ .elevator_exit_fn = bfq_exit_queue,
++ },
++ .icq_size = sizeof(struct bfq_io_cq),
++ .icq_align = __alignof__(struct bfq_io_cq),
++ .elevator_attrs = bfq_attrs,
++ .elevator_name = "bfq",
++ .elevator_owner = THIS_MODULE,
++};
++
++static int __init bfq_init(void)
++{
++ /*
++ * Can be 0 on HZ < 1000 setups.
++ */
++ if (bfq_slice_idle == 0)
++ bfq_slice_idle = 1;
++
++ if (bfq_timeout_async == 0)
++ bfq_timeout_async = 1;
++
++ if (bfq_slab_setup())
++ return -ENOMEM;
++
++ /*
++ * Times to load large popular applications for the typical systems
++ * installed on the reference devices (see the comments before the
++ * definitions of the two arrays).
++ */
++ T_slow[0] = msecs_to_jiffies(2600);
++ T_slow[1] = msecs_to_jiffies(1000);
++ T_fast[0] = msecs_to_jiffies(5500);
++ T_fast[1] = msecs_to_jiffies(2000);
++
++ /*
++ * Thresholds that determine the switch between speed classes (see
++ * the comments before the definition of the array).
++ */
++ device_speed_thresh[0] = (R_fast[0] + R_slow[0]) / 2;
++ device_speed_thresh[1] = (R_fast[1] + R_slow[1]) / 2;
++
++ elv_register(&iosched_bfq);
++ pr_info("BFQ I/O-scheduler version: v7r5");
++
++ return 0;
++}
++
++static void __exit bfq_exit(void)
++{
++ elv_unregister(&iosched_bfq);
++ bfq_slab_kill();
++}
++
++module_init(bfq_init);
++module_exit(bfq_exit);
++
++MODULE_AUTHOR("Fabio Checconi, Paolo Valente");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/block/bfq-sched.c linux-3.14.40/block/bfq-sched.c
+--- linux-3.14.40.orig/block/bfq-sched.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/block/bfq-sched.c 2015-05-01 14:57:58.355427001 -0500
+@@ -0,0 +1,1179 @@
++/*
++ * BFQ: Hierarchical B-WF2Q+ scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++#ifdef CONFIG_CGROUP_BFQIO
++#define for_each_entity(entity) \
++ for (; entity != NULL; entity = entity->parent)
++
++#define for_each_entity_safe(entity, parent) \
++ for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
++
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++ int extract,
++ struct bfq_data *bfqd);
++
++static inline void bfq_update_budget(struct bfq_entity *next_in_service)
++{
++ struct bfq_entity *bfqg_entity;
++ struct bfq_group *bfqg;
++ struct bfq_sched_data *group_sd;
++
++ BUG_ON(next_in_service == NULL);
++
++ group_sd = next_in_service->sched_data;
++
++ bfqg = container_of(group_sd, struct bfq_group, sched_data);
++ /*
++ * bfq_group's my_entity field is not NULL only if the group
++ * is not the root group. We must not touch the root entity
++ * as it must never become an in-service entity.
++ */
++ bfqg_entity = bfqg->my_entity;
++ if (bfqg_entity != NULL)
++ bfqg_entity->budget = next_in_service->budget;
++}
++
++static int bfq_update_next_in_service(struct bfq_sched_data *sd)
++{
++ struct bfq_entity *next_in_service;
++
++ if (sd->in_service_entity != NULL)
++ /* will update/requeue at the end of service */
++ return 0;
++
++ /*
++ * NOTE: this can be improved in many ways, such as returning
++ * 1 (and thus propagating upwards the update) only when the
++ * budget changes, or caching the bfqq that will be scheduled
++ * next from this subtree. By now we worry more about
++ * correctness than about performance...
++ */
++ next_in_service = bfq_lookup_next_entity(sd, 0, NULL);
++ sd->next_in_service = next_in_service;
++
++ if (next_in_service != NULL)
++ bfq_update_budget(next_in_service);
++
++ return 1;
++}
++
++static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
++ struct bfq_entity *entity)
++{
++ BUG_ON(sd->next_in_service != entity);
++}
++#else
++#define for_each_entity(entity) \
++ for (; entity != NULL; entity = NULL)
++
++#define for_each_entity_safe(entity, parent) \
++ for (parent = NULL; entity != NULL; entity = parent)
++
++static inline int bfq_update_next_in_service(struct bfq_sched_data *sd)
++{
++ return 0;
++}
++
++static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
++ struct bfq_entity *entity)
++{
++}
++
++static inline void bfq_update_budget(struct bfq_entity *next_in_service)
++{
++}
++#endif
++
++/*
++ * Shift for timestamp calculations. This actually limits the maximum
++ * service allowed in one timestamp delta (small shift values increase it),
++ * the maximum total weight that can be used for the queues in the system
++ * (big shift values increase it), and the period of virtual time
++ * wraparounds.
++ */
++#define WFQ_SERVICE_SHIFT 22
++
++/**
++ * bfq_gt - compare two timestamps.
++ * @a: first ts.
++ * @b: second ts.
++ *
++ * Return @a > @b, dealing with wrapping correctly.
++ */
++static inline int bfq_gt(u64 a, u64 b)
++{
++ return (s64)(a - b) > 0;
++}
++
++static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = NULL;
++
++ BUG_ON(entity == NULL);
++
++ if (entity->my_sched_data == NULL)
++ bfqq = container_of(entity, struct bfq_queue, entity);
++
++ return bfqq;
++}
++
++
++/**
++ * bfq_delta - map service into the virtual time domain.
++ * @service: amount of service.
++ * @weight: scale factor (weight of an entity or weight sum).
++ */
++static inline u64 bfq_delta(unsigned long service,
++ unsigned long weight)
++{
++ u64 d = (u64)service << WFQ_SERVICE_SHIFT;
++
++ do_div(d, weight);
++ return d;
++}
++
++/**
++ * bfq_calc_finish - assign the finish time to an entity.
++ * @entity: the entity to act upon.
++ * @service: the service to be charged to the entity.
++ */
++static inline void bfq_calc_finish(struct bfq_entity *entity,
++ unsigned long service)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ BUG_ON(entity->weight == 0);
++
++ entity->finish = entity->start +
++ bfq_delta(service, entity->weight);
++
++ if (bfqq != NULL) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_finish: serv %lu, w %d",
++ service, entity->weight);
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_finish: start %llu, finish %llu, delta %llu",
++ entity->start, entity->finish,
++ bfq_delta(service, entity->weight));
++ }
++}
++
++/**
++ * bfq_entity_of - get an entity from a node.
++ * @node: the node field of the entity.
++ *
++ * Convert a node pointer to the relative entity. This is used only
++ * to simplify the logic of some functions and not as the generic
++ * conversion mechanism because, e.g., in the tree walking functions,
++ * the check for a %NULL value would be redundant.
++ */
++static inline struct bfq_entity *bfq_entity_of(struct rb_node *node)
++{
++ struct bfq_entity *entity = NULL;
++
++ if (node != NULL)
++ entity = rb_entry(node, struct bfq_entity, rb_node);
++
++ return entity;
++}
++
++/**
++ * bfq_extract - remove an entity from a tree.
++ * @root: the tree root.
++ * @entity: the entity to remove.
++ */
++static inline void bfq_extract(struct rb_root *root,
++ struct bfq_entity *entity)
++{
++ BUG_ON(entity->tree != root);
++
++ entity->tree = NULL;
++ rb_erase(&entity->rb_node, root);
++}
++
++/**
++ * bfq_idle_extract - extract an entity from the idle tree.
++ * @st: the service tree of the owning @entity.
++ * @entity: the entity being removed.
++ */
++static void bfq_idle_extract(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *next;
++
++ BUG_ON(entity->tree != &st->idle);
++
++ if (entity == st->first_idle) {
++ next = rb_next(&entity->rb_node);
++ st->first_idle = bfq_entity_of(next);
++ }
++
++ if (entity == st->last_idle) {
++ next = rb_prev(&entity->rb_node);
++ st->last_idle = bfq_entity_of(next);
++ }
++
++ bfq_extract(&st->idle, entity);
++
++ if (bfqq != NULL)
++ list_del(&bfqq->bfqq_list);
++}
++
++/**
++ * bfq_insert - generic tree insertion.
++ * @root: tree root.
++ * @entity: entity to insert.
++ *
++ * This is used for the idle and the active tree, since they are both
++ * ordered by finish time.
++ */
++static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
++{
++ struct bfq_entity *entry;
++ struct rb_node **node = &root->rb_node;
++ struct rb_node *parent = NULL;
++
++ BUG_ON(entity->tree != NULL);
++
++ while (*node != NULL) {
++ parent = *node;
++ entry = rb_entry(parent, struct bfq_entity, rb_node);
++
++ if (bfq_gt(entry->finish, entity->finish))
++ node = &parent->rb_left;
++ else
++ node = &parent->rb_right;
++ }
++
++ rb_link_node(&entity->rb_node, parent, node);
++ rb_insert_color(&entity->rb_node, root);
++
++ entity->tree = root;
++}
++
++/**
++ * bfq_update_min - update the min_start field of a entity.
++ * @entity: the entity to update.
++ * @node: one of its children.
++ *
++ * This function is called when @entity may store an invalid value for
++ * min_start due to updates to the active tree. The function assumes
++ * that the subtree rooted at @node (which may be its left or its right
++ * child) has a valid min_start value.
++ */
++static inline void bfq_update_min(struct bfq_entity *entity,
++ struct rb_node *node)
++{
++ struct bfq_entity *child;
++
++ if (node != NULL) {
++ child = rb_entry(node, struct bfq_entity, rb_node);
++ if (bfq_gt(entity->min_start, child->min_start))
++ entity->min_start = child->min_start;
++ }
++}
++
++/**
++ * bfq_update_active_node - recalculate min_start.
++ * @node: the node to update.
++ *
++ * @node may have changed position or one of its children may have moved,
++ * this function updates its min_start value. The left and right subtrees
++ * are assumed to hold a correct min_start value.
++ */
++static inline void bfq_update_active_node(struct rb_node *node)
++{
++ struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
++
++ entity->min_start = entity->start;
++ bfq_update_min(entity, node->rb_right);
++ bfq_update_min(entity, node->rb_left);
++}
++
++/**
++ * bfq_update_active_tree - update min_start for the whole active tree.
++ * @node: the starting node.
++ *
++ * @node must be the deepest modified node after an update. This function
++ * updates its min_start using the values held by its children, assuming
++ * that they did not change, and then updates all the nodes that may have
++ * changed in the path to the root. The only nodes that may have changed
++ * are the ones in the path or their siblings.
++ */
++static void bfq_update_active_tree(struct rb_node *node)
++{
++ struct rb_node *parent;
++
++up:
++ bfq_update_active_node(node);
++
++ parent = rb_parent(node);
++ if (parent == NULL)
++ return;
++
++ if (node == parent->rb_left && parent->rb_right != NULL)
++ bfq_update_active_node(parent->rb_right);
++ else if (parent->rb_left != NULL)
++ bfq_update_active_node(parent->rb_left);
++
++ node = parent;
++ goto up;
++}
++
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root);
++
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root);
++
++
++/**
++ * bfq_active_insert - insert an entity in the active tree of its
++ * group/device.
++ * @st: the service tree of the entity.
++ * @entity: the entity being inserted.
++ *
++ * The active tree is ordered by finish time, but an extra key is kept
++ * per each node, containing the minimum value for the start times of
++ * its children (and the node itself), so it's possible to search for
++ * the eligible node with the lowest finish time in logarithmic time.
++ */
++static void bfq_active_insert(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *node = &entity->rb_node;
++#ifdef CONFIG_CGROUP_BFQIO
++ struct bfq_sched_data *sd = NULL;
++ struct bfq_group *bfqg = NULL;
++ struct bfq_data *bfqd = NULL;
++#endif
++
++ bfq_insert(&st->active, entity);
++
++ if (node->rb_left != NULL)
++ node = node->rb_left;
++ else if (node->rb_right != NULL)
++ node = node->rb_right;
++
++ bfq_update_active_tree(node);
++
++#ifdef CONFIG_CGROUP_BFQIO
++ sd = entity->sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++ if (bfqq != NULL)
++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
++#ifdef CONFIG_CGROUP_BFQIO
++ else { /* bfq_group */
++ BUG_ON(!bfqd);
++ bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
++ }
++ if (bfqg != bfqd->root_group) {
++ BUG_ON(!bfqg);
++ BUG_ON(!bfqd);
++ bfqg->active_entities++;
++ if (bfqg->active_entities == 2)
++ bfqd->active_numerous_groups++;
++ }
++#endif
++}
++
++/**
++ * bfq_ioprio_to_weight - calc a weight from an ioprio.
++ * @ioprio: the ioprio value to convert.
++ */
++static inline unsigned short bfq_ioprio_to_weight(int ioprio)
++{
++ BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR);
++ return IOPRIO_BE_NR - ioprio;
++}
++
++/**
++ * bfq_weight_to_ioprio - calc an ioprio from a weight.
++ * @weight: the weight value to convert.
++ *
++ * To preserve as mush as possible the old only-ioprio user interface,
++ * 0 is used as an escape ioprio value for weights (numerically) equal or
++ * larger than IOPRIO_BE_NR
++ */
++static inline unsigned short bfq_weight_to_ioprio(int weight)
++{
++ BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT);
++ return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight;
++}
++
++static inline void bfq_get_entity(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ if (bfqq != NULL) {
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
++ bfqq, atomic_read(&bfqq->ref));
++ }
++}
++
++/**
++ * bfq_find_deepest - find the deepest node that an extraction can modify.
++ * @node: the node being removed.
++ *
++ * Do the first step of an extraction in an rb tree, looking for the
++ * node that will replace @node, and returning the deepest node that
++ * the following modifications to the tree can touch. If @node is the
++ * last node in the tree return %NULL.
++ */
++static struct rb_node *bfq_find_deepest(struct rb_node *node)
++{
++ struct rb_node *deepest;
++
++ if (node->rb_right == NULL && node->rb_left == NULL)
++ deepest = rb_parent(node);
++ else if (node->rb_right == NULL)
++ deepest = node->rb_left;
++ else if (node->rb_left == NULL)
++ deepest = node->rb_right;
++ else {
++ deepest = rb_next(node);
++ if (deepest->rb_right != NULL)
++ deepest = deepest->rb_right;
++ else if (rb_parent(deepest) != node)
++ deepest = rb_parent(deepest);
++ }
++
++ return deepest;
++}
++
++/**
++ * bfq_active_extract - remove an entity from the active tree.
++ * @st: the service_tree containing the tree.
++ * @entity: the entity being removed.
++ */
++static void bfq_active_extract(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *node;
++#ifdef CONFIG_CGROUP_BFQIO
++ struct bfq_sched_data *sd = NULL;
++ struct bfq_group *bfqg = NULL;
++ struct bfq_data *bfqd = NULL;
++#endif
++
++ node = bfq_find_deepest(&entity->rb_node);
++ bfq_extract(&st->active, entity);
++
++ if (node != NULL)
++ bfq_update_active_tree(node);
++
++#ifdef CONFIG_CGROUP_BFQIO
++ sd = entity->sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++ if (bfqq != NULL)
++ list_del(&bfqq->bfqq_list);
++#ifdef CONFIG_CGROUP_BFQIO
++ else { /* bfq_group */
++ BUG_ON(!bfqd);
++ bfq_weights_tree_remove(bfqd, entity,
++ &bfqd->group_weights_tree);
++ }
++ if (bfqg != bfqd->root_group) {
++ BUG_ON(!bfqg);
++ BUG_ON(!bfqd);
++ BUG_ON(!bfqg->active_entities);
++ bfqg->active_entities--;
++ if (bfqg->active_entities == 1) {
++ BUG_ON(!bfqd->active_numerous_groups);
++ bfqd->active_numerous_groups--;
++ }
++ }
++#endif
++}
++
++/**
++ * bfq_idle_insert - insert an entity into the idle tree.
++ * @st: the service tree containing the tree.
++ * @entity: the entity to insert.
++ */
++static void bfq_idle_insert(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_entity *first_idle = st->first_idle;
++ struct bfq_entity *last_idle = st->last_idle;
++
++ if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish))
++ st->first_idle = entity;
++ if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish))
++ st->last_idle = entity;
++
++ bfq_insert(&st->idle, entity);
++
++ if (bfqq != NULL)
++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
++}
++
++/**
++ * bfq_forget_entity - remove an entity from the wfq trees.
++ * @st: the service tree.
++ * @entity: the entity being removed.
++ *
++ * Update the device status and forget everything about @entity, putting
++ * the device reference to it, if it is a queue. Entities belonging to
++ * groups are not refcounted.
++ */
++static void bfq_forget_entity(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_sched_data *sd;
++
++ BUG_ON(!entity->on_st);
++
++ entity->on_st = 0;
++ st->wsum -= entity->weight;
++ if (bfqq != NULL) {
++ sd = entity->sched_data;
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ }
++}
++
++/**
++ * bfq_put_idle_entity - release the idle tree ref of an entity.
++ * @st: service tree for the entity.
++ * @entity: the entity being released.
++ */
++static void bfq_put_idle_entity(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ bfq_idle_extract(st, entity);
++ bfq_forget_entity(st, entity);
++}
++
++/**
++ * bfq_forget_idle - update the idle tree if necessary.
++ * @st: the service tree to act upon.
++ *
++ * To preserve the global O(log N) complexity we only remove one entry here;
++ * as the idle tree will not grow indefinitely this can be done safely.
++ */
++static void bfq_forget_idle(struct bfq_service_tree *st)
++{
++ struct bfq_entity *first_idle = st->first_idle;
++ struct bfq_entity *last_idle = st->last_idle;
++
++ if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL &&
++ !bfq_gt(last_idle->finish, st->vtime)) {
++ /*
++ * Forget the whole idle tree, increasing the vtime past
++ * the last finish time of idle entities.
++ */
++ st->vtime = last_idle->finish;
++ }
++
++ if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime))
++ bfq_put_idle_entity(st, first_idle);
++}
++
++static struct bfq_service_tree *
++__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
++ struct bfq_entity *entity)
++{
++ struct bfq_service_tree *new_st = old_st;
++
++ if (entity->ioprio_changed) {
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ unsigned short prev_weight, new_weight;
++ struct bfq_data *bfqd = NULL;
++ struct rb_root *root;
++#ifdef CONFIG_CGROUP_BFQIO
++ struct bfq_sched_data *sd;
++ struct bfq_group *bfqg;
++#endif
++
++ if (bfqq != NULL)
++ bfqd = bfqq->bfqd;
++#ifdef CONFIG_CGROUP_BFQIO
++ else {
++ sd = entity->my_sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++ BUG_ON(!bfqd);
++ }
++#endif
++
++ BUG_ON(old_st->wsum < entity->weight);
++ old_st->wsum -= entity->weight;
++
++ if (entity->new_weight != entity->orig_weight) {
++ entity->orig_weight = entity->new_weight;
++ entity->ioprio =
++ bfq_weight_to_ioprio(entity->orig_weight);
++ } else if (entity->new_ioprio != entity->ioprio) {
++ entity->ioprio = entity->new_ioprio;
++ entity->orig_weight =
++ bfq_ioprio_to_weight(entity->ioprio);
++ } else
++ entity->new_weight = entity->orig_weight =
++ bfq_ioprio_to_weight(entity->ioprio);
++
++ entity->ioprio_class = entity->new_ioprio_class;
++ entity->ioprio_changed = 0;
++
++ /*
++ * NOTE: here we may be changing the weight too early,
++ * this will cause unfairness. The correct approach
++ * would have required additional complexity to defer
++ * weight changes to the proper time instants (i.e.,
++ * when entity->finish <= old_st->vtime).
++ */
++ new_st = bfq_entity_service_tree(entity);
++
++ prev_weight = entity->weight;
++ new_weight = entity->orig_weight *
++ (bfqq != NULL ? bfqq->wr_coeff : 1);
++ /*
++ * If the weight of the entity changes, remove the entity
++ * from its old weight counter (if there is a counter
++ * associated with the entity), and add it to the counter
++ * associated with its new weight.
++ */
++ if (prev_weight != new_weight) {
++ root = bfqq ? &bfqd->queue_weights_tree :
++ &bfqd->group_weights_tree;
++ bfq_weights_tree_remove(bfqd, entity, root);
++ }
++ entity->weight = new_weight;
++ /*
++ * Add the entity to its weights tree only if it is
++ * not associated with a weight-raised queue.
++ */
++ if (prev_weight != new_weight &&
++ (bfqq ? bfqq->wr_coeff == 1 : 1))
++ /* If we get here, root has been initialized. */
++ bfq_weights_tree_add(bfqd, entity, root);
++
++ new_st->wsum += entity->weight;
++
++ if (new_st != old_st)
++ entity->start = new_st->vtime;
++ }
++
++ return new_st;
++}
++
++/**
++ * bfq_bfqq_served - update the scheduler status after selection for
++ * service.
++ * @bfqq: the queue being served.
++ * @served: bytes to transfer.
++ *
++ * NOTE: this can be optimized, as the timestamps of upper level entities
++ * are synchronized every time a new bfqq is selected for service. By now,
++ * we keep it to better check consistency.
++ */
++static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st;
++
++ for_each_entity(entity) {
++ st = bfq_entity_service_tree(entity);
++
++ entity->service += served;
++ BUG_ON(entity->service > entity->budget);
++ BUG_ON(st->wsum == 0);
++
++ st->vtime += bfq_delta(served, st->wsum);
++ bfq_forget_idle(st);
++ }
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served);
++}
++
++/**
++ * bfq_bfqq_charge_full_budget - set the service to the entity budget.
++ * @bfqq: the queue that needs a service update.
++ *
++ * When it's not possible to be fair in the service domain, because
++ * a queue is not consuming its budget fast enough (the meaning of
++ * fast depends on the timeout parameter), we charge it a full
++ * budget. In this way we should obtain a sort of time-domain
++ * fairness among all the seeky/slow queues.
++ */
++static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget");
++
++ bfq_bfqq_served(bfqq, entity->budget - entity->service);
++}
++
++/**
++ * __bfq_activate_entity - activate an entity.
++ * @entity: the entity being activated.
++ *
++ * Called whenever an entity is activated, i.e., it is not active and one
++ * of its children receives a new request, or has to be reactivated due to
++ * budget exhaustion. It uses the current budget of the entity (and the
++ * service received if @entity is active) of the queue to calculate its
++ * timestamps.
++ */
++static void __bfq_activate_entity(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++
++ if (entity == sd->in_service_entity) {
++ BUG_ON(entity->tree != NULL);
++ /*
++ * If we are requeueing the current entity we have
++ * to take care of not charging to it service it has
++ * not received.
++ */
++ bfq_calc_finish(entity, entity->service);
++ entity->start = entity->finish;
++ sd->in_service_entity = NULL;
++ } else if (entity->tree == &st->active) {
++ /*
++ * Requeueing an entity due to a change of some
++ * next_in_service entity below it. We reuse the
++ * old start time.
++ */
++ bfq_active_extract(st, entity);
++ } else if (entity->tree == &st->idle) {
++ /*
++ * Must be on the idle tree, bfq_idle_extract() will
++ * check for that.
++ */
++ bfq_idle_extract(st, entity);
++ entity->start = bfq_gt(st->vtime, entity->finish) ?
++ st->vtime : entity->finish;
++ } else {
++ /*
++ * The finish time of the entity may be invalid, and
++ * it is in the past for sure, otherwise the queue
++ * would have been on the idle tree.
++ */
++ entity->start = st->vtime;
++ st->wsum += entity->weight;
++ bfq_get_entity(entity);
++
++ BUG_ON(entity->on_st);
++ entity->on_st = 1;
++ }
++
++ st = __bfq_entity_update_weight_prio(st, entity);
++ bfq_calc_finish(entity, entity->budget);
++ bfq_active_insert(st, entity);
++}
++
++/**
++ * bfq_activate_entity - activate an entity and its ancestors if necessary.
++ * @entity: the entity to activate.
++ *
++ * Activate @entity and all the entities on the path from it to the root.
++ */
++static void bfq_activate_entity(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sd;
++
++ for_each_entity(entity) {
++ __bfq_activate_entity(entity);
++
++ sd = entity->sched_data;
++ if (!bfq_update_next_in_service(sd))
++ /*
++ * No need to propagate the activation to the
++ * upper entities, as they will be updated when
++ * the in-service entity is rescheduled.
++ */
++ break;
++ }
++}
++
++/**
++ * __bfq_deactivate_entity - deactivate an entity from its service tree.
++ * @entity: the entity to deactivate.
++ * @requeue: if false, the entity will not be put into the idle tree.
++ *
++ * Deactivate an entity, independently from its previous state. If the
++ * entity was not on a service tree just return, otherwise if it is on
++ * any scheduler tree, extract it from that tree, and if necessary
++ * and if the caller did not specify @requeue, put it on the idle tree.
++ *
++ * Return %1 if the caller should update the entity hierarchy, i.e.,
++ * if the entity was in service or if it was the next_in_service for
++ * its sched_data; return %0 otherwise.
++ */
++static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ int was_in_service = entity == sd->in_service_entity;
++ int ret = 0;
++
++ if (!entity->on_st)
++ return 0;
++
++ BUG_ON(was_in_service && entity->tree != NULL);
++
++ if (was_in_service) {
++ bfq_calc_finish(entity, entity->service);
++ sd->in_service_entity = NULL;
++ } else if (entity->tree == &st->active)
++ bfq_active_extract(st, entity);
++ else if (entity->tree == &st->idle)
++ bfq_idle_extract(st, entity);
++ else if (entity->tree != NULL)
++ BUG();
++
++ if (was_in_service || sd->next_in_service == entity)
++ ret = bfq_update_next_in_service(sd);
++
++ if (!requeue || !bfq_gt(entity->finish, st->vtime))
++ bfq_forget_entity(st, entity);
++ else
++ bfq_idle_insert(st, entity);
++
++ BUG_ON(sd->in_service_entity == entity);
++ BUG_ON(sd->next_in_service == entity);
++
++ return ret;
++}
++
++/**
++ * bfq_deactivate_entity - deactivate an entity.
++ * @entity: the entity to deactivate.
++ * @requeue: true if the entity can be put on the idle tree
++ */
++static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
++{
++ struct bfq_sched_data *sd;
++ struct bfq_entity *parent;
++
++ for_each_entity_safe(entity, parent) {
++ sd = entity->sched_data;
++
++ if (!__bfq_deactivate_entity(entity, requeue))
++ /*
++ * The parent entity is still backlogged, and
++ * we don't need to update it as it is still
++ * in service.
++ */
++ break;
++
++ if (sd->next_in_service != NULL)
++ /*
++ * The parent entity is still backlogged and
++ * the budgets on the path towards the root
++ * need to be updated.
++ */
++ goto update;
++
++ /*
++ * If we reach there the parent is no more backlogged and
++ * we want to propagate the dequeue upwards.
++ */
++ requeue = 1;
++ }
++
++ return;
++
++update:
++ entity = parent;
++ for_each_entity(entity) {
++ __bfq_activate_entity(entity);
++
++ sd = entity->sched_data;
++ if (!bfq_update_next_in_service(sd))
++ break;
++ }
++}
++
++/**
++ * bfq_update_vtime - update vtime if necessary.
++ * @st: the service tree to act upon.
++ *
++ * If necessary update the service tree vtime to have at least one
++ * eligible entity, skipping to its start time. Assumes that the
++ * active tree of the device is not empty.
++ *
++ * NOTE: this hierarchical implementation updates vtimes quite often,
++ * we may end up with reactivated processes getting timestamps after a
++ * vtime skip done because we needed a ->first_active entity on some
++ * intermediate node.
++ */
++static void bfq_update_vtime(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entry;
++ struct rb_node *node = st->active.rb_node;
++
++ entry = rb_entry(node, struct bfq_entity, rb_node);
++ if (bfq_gt(entry->min_start, st->vtime)) {
++ st->vtime = entry->min_start;
++ bfq_forget_idle(st);
++ }
++}
++
++/**
++ * bfq_first_active_entity - find the eligible entity with
++ * the smallest finish time
++ * @st: the service tree to select from.
++ *
++ * This function searches the first schedulable entity, starting from the
++ * root of the tree and going on the left every time on this side there is
++ * a subtree with at least one eligible (start >= vtime) entity. The path on
++ * the right is followed only if a) the left subtree contains no eligible
++ * entities and b) no eligible entity has been found yet.
++ */
++static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entry, *first = NULL;
++ struct rb_node *node = st->active.rb_node;
++
++ while (node != NULL) {
++ entry = rb_entry(node, struct bfq_entity, rb_node);
++left:
++ if (!bfq_gt(entry->start, st->vtime))
++ first = entry;
++
++ BUG_ON(bfq_gt(entry->min_start, st->vtime));
++
++ if (node->rb_left != NULL) {
++ entry = rb_entry(node->rb_left,
++ struct bfq_entity, rb_node);
++ if (!bfq_gt(entry->min_start, st->vtime)) {
++ node = node->rb_left;
++ goto left;
++ }
++ }
++ if (first != NULL)
++ break;
++ node = node->rb_right;
++ }
++
++ BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active));
++ return first;
++}
++
++/**
++ * __bfq_lookup_next_entity - return the first eligible entity in @st.
++ * @st: the service tree.
++ *
++ * Update the virtual time in @st and return the first eligible entity
++ * it contains.
++ */
++static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st,
++ bool force)
++{
++ struct bfq_entity *entity, *new_next_in_service = NULL;
++
++ if (RB_EMPTY_ROOT(&st->active))
++ return NULL;
++
++ bfq_update_vtime(st);
++ entity = bfq_first_active_entity(st);
++ BUG_ON(bfq_gt(entity->start, st->vtime));
++
++ /*
++ * If the chosen entity does not match with the sched_data's
++ * next_in_service and we are forcedly serving the IDLE priority
++ * class tree, bubble up budget update.
++ */
++ if (unlikely(force && entity != entity->sched_data->next_in_service)) {
++ new_next_in_service = entity;
++ for_each_entity(new_next_in_service)
++ bfq_update_budget(new_next_in_service);
++ }
++
++ return entity;
++}
++
++/**
++ * bfq_lookup_next_entity - return the first eligible entity in @sd.
++ * @sd: the sched_data.
++ * @extract: if true the returned entity will be also extracted from @sd.
++ *
++ * NOTE: since we cache the next_in_service entity at each level of the
++ * hierarchy, the complexity of the lookup can be decreased with
++ * absolutely no effort just returning the cached next_in_service value;
++ * we prefer to do full lookups to test the consistency of * the data
++ * structures.
++ */
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++ int extract,
++ struct bfq_data *bfqd)
++{
++ struct bfq_service_tree *st = sd->service_tree;
++ struct bfq_entity *entity;
++ int i = 0;
++
++ BUG_ON(sd->in_service_entity != NULL);
++
++ if (bfqd != NULL &&
++ jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) {
++ entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1,
++ true);
++ if (entity != NULL) {
++ i = BFQ_IOPRIO_CLASSES - 1;
++ bfqd->bfq_class_idle_last_service = jiffies;
++ sd->next_in_service = entity;
++ }
++ }
++ for (; i < BFQ_IOPRIO_CLASSES; i++) {
++ entity = __bfq_lookup_next_entity(st + i, false);
++ if (entity != NULL) {
++ if (extract) {
++ bfq_check_next_in_service(sd, entity);
++ bfq_active_extract(st + i, entity);
++ sd->in_service_entity = entity;
++ sd->next_in_service = NULL;
++ }
++ break;
++ }
++ }
++
++ return entity;
++}
++
++/*
++ * Get next queue for service.
++ */
++static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
++{
++ struct bfq_entity *entity = NULL;
++ struct bfq_sched_data *sd;
++ struct bfq_queue *bfqq;
++
++ BUG_ON(bfqd->in_service_queue != NULL);
++
++ if (bfqd->busy_queues == 0)
++ return NULL;
++
++ sd = &bfqd->root_group->sched_data;
++ for (; sd != NULL; sd = entity->my_sched_data) {
++ entity = bfq_lookup_next_entity(sd, 1, bfqd);
++ BUG_ON(entity == NULL);
++ entity->service = 0;
++ }
++
++ bfqq = bfq_entity_to_bfqq(entity);
++ BUG_ON(bfqq == NULL);
++
++ return bfqq;
++}
++
++static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
++{
++ if (bfqd->in_service_bic != NULL) {
++ put_io_context(bfqd->in_service_bic->icq.ioc);
++ bfqd->in_service_bic = NULL;
++ }
++
++ bfqd->in_service_queue = NULL;
++ del_timer(&bfqd->idle_slice_timer);
++}
++
++static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ int requeue)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ if (bfqq == bfqd->in_service_queue)
++ __bfq_bfqd_reset_in_service(bfqd);
++
++ bfq_deactivate_entity(entity, requeue);
++}
++
++static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_activate_entity(entity);
++}
++
++/*
++ * Called when the bfqq no longer has requests pending, remove it from
++ * the service tree.
++ */
++static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ int requeue)
++{
++ BUG_ON(!bfq_bfqq_busy(bfqq));
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ bfq_log_bfqq(bfqd, bfqq, "del from busy");
++
++ bfq_clear_bfqq_busy(bfqq);
++
++ BUG_ON(bfqd->busy_queues == 0);
++ bfqd->busy_queues--;
++
++ if (!bfqq->dispatched) {
++ bfq_weights_tree_remove(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ if (!blk_queue_nonrot(bfqd->queue)) {
++ BUG_ON(!bfqd->busy_in_flight_queues);
++ bfqd->busy_in_flight_queues--;
++ if (bfq_bfqq_constantly_seeky(bfqq)) {
++ BUG_ON(!bfqd->
++ const_seeky_busy_in_flight_queues);
++ bfqd->const_seeky_busy_in_flight_queues--;
++ }
++ }
++ }
++ if (bfqq->wr_coeff > 1)
++ bfqd->wr_busy_queues--;
++
++ bfq_deactivate_bfqq(bfqd, bfqq, requeue);
++}
++
++/*
++ * Called when an inactive queue receives a new request.
++ */
++static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfq_bfqq_busy(bfqq));
++ BUG_ON(bfqq == bfqd->in_service_queue);
++
++ bfq_log_bfqq(bfqd, bfqq, "add to busy");
++
++ bfq_activate_bfqq(bfqd, bfqq);
++
++ bfq_mark_bfqq_busy(bfqq);
++ bfqd->busy_queues++;
++
++ if (!bfqq->dispatched) {
++ if (bfqq->wr_coeff == 1)
++ bfq_weights_tree_add(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ if (!blk_queue_nonrot(bfqd->queue)) {
++ bfqd->busy_in_flight_queues++;
++ if (bfq_bfqq_constantly_seeky(bfqq))
++ bfqd->const_seeky_busy_in_flight_queues++;
++ }
++ }
++ if (bfqq->wr_coeff > 1)
++ bfqd->wr_busy_queues++;
++}
+diff -Nur linux-3.14.40.orig/block/blk-core.c linux-3.14.40/block/blk-core.c
+--- linux-3.14.40.orig/block/blk-core.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/block/blk-core.c 2015-05-01 14:57:58.359427001 -0500
+@@ -1928,7 +1928,7 @@
+ * in some cases below, so export this function.
+ * Request stacking drivers like request-based dm may change the queue
+ * limits while requests are in the queue (e.g. dm's table swapping).
+- * Such request stacking drivers should check those requests agaist
++ * Such request stacking drivers should check those requests against
+ * the new queue limits again when they dispatch those requests,
+ * although such checkings are also done against the old queue limits
+ * when submitting requests.
+diff -Nur linux-3.14.40.orig/block/blk-map.c linux-3.14.40/block/blk-map.c
+--- linux-3.14.40.orig/block/blk-map.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/block/blk-map.c 2015-05-01 14:57:58.367427001 -0500
+@@ -285,7 +285,7 @@
+ *
+ * Description:
+ * Data will be mapped directly if possible. Otherwise a bounce
+- * buffer is used. Can be called multple times to append multple
++ * buffer is used. Can be called multiple times to append multiple
+ * buffers.
+ */
+ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
+diff -Nur linux-3.14.40.orig/block/Kconfig.iosched linux-3.14.40/block/Kconfig.iosched
+--- linux-3.14.40.orig/block/Kconfig.iosched 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/block/Kconfig.iosched 2015-05-01 14:57:58.367427001 -0500
+@@ -39,6 +39,27 @@
+ ---help---
+ Enable group IO scheduling in CFQ.
+
++config IOSCHED_BFQ
++ tristate "BFQ I/O scheduler"
++ default n
++ ---help---
++ The BFQ I/O scheduler tries to distribute bandwidth among
++ all processes according to their weights.
++ It aims at distributing the bandwidth as desired, independently of
++ the disk parameters and with any workload. It also tries to
++ guarantee low latency to interactive and soft real-time
++ applications. If compiled built-in (saying Y here), BFQ can
++ be configured to support hierarchical scheduling.
++
++config CGROUP_BFQIO
++ bool "BFQ hierarchical scheduling support"
++ depends on CGROUPS && IOSCHED_BFQ=y
++ default n
++ ---help---
++ Enable hierarchical scheduling in BFQ, using the cgroups
++ filesystem interface. The name of the subsystem will be
++ bfqio.
++
+ choice
+ prompt "Default I/O scheduler"
+ default DEFAULT_CFQ
+@@ -52,6 +73,16 @@
+ config DEFAULT_CFQ
+ bool "CFQ" if IOSCHED_CFQ=y
+
++ config DEFAULT_BFQ
++ bool "BFQ" if IOSCHED_BFQ=y
++ help
++ Selects BFQ as the default I/O scheduler which will be
++ used by default for all block devices.
++ The BFQ I/O scheduler aims at distributing the bandwidth
++ as desired, independently of the disk parameters and with
++ any workload. It also tries to guarantee low latency to
++ interactive and soft real-time applications.
++
+ config DEFAULT_NOOP
+ bool "No-op"
+
+@@ -61,6 +92,7 @@
+ string
+ default "deadline" if DEFAULT_DEADLINE
+ default "cfq" if DEFAULT_CFQ
++ default "bfq" if DEFAULT_BFQ
+ default "noop" if DEFAULT_NOOP
+
+ endmenu
+diff -Nur linux-3.14.40.orig/block/Makefile linux-3.14.40/block/Makefile
+--- linux-3.14.40.orig/block/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/block/Makefile 2015-05-01 14:57:58.367427001 -0500
+@@ -16,6 +16,7 @@
+ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
+ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
+ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
++obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
+
+ obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
+ obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
+diff -Nur linux-3.14.40.orig/crypto/blkcipher.c linux-3.14.40/crypto/blkcipher.c
+--- linux-3.14.40.orig/crypto/blkcipher.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/crypto/blkcipher.c 2015-05-01 14:57:58.383427001 -0500
+@@ -70,14 +70,12 @@
+ return max(start, end_page);
+ }
+
+-static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
+- struct blkcipher_walk *walk,
++static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
+ unsigned int bsize)
+ {
+ u8 *addr;
+- unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
+
+- addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
++ addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
+ addr = blkcipher_get_spot(addr, bsize);
+ scatterwalk_copychunks(addr, &walk->out, bsize, 1);
+ return bsize;
+@@ -105,7 +103,6 @@
+ int blkcipher_walk_done(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk, int err)
+ {
+- struct crypto_blkcipher *tfm = desc->tfm;
+ unsigned int nbytes = 0;
+
+ if (likely(err >= 0)) {
+@@ -117,7 +114,7 @@
+ err = -EINVAL;
+ goto err;
+ } else
+- n = blkcipher_done_slow(tfm, walk, n);
++ n = blkcipher_done_slow(walk, n);
+
+ nbytes = walk->total - n;
+ err = 0;
+@@ -136,7 +133,7 @@
+ }
+
+ if (walk->iv != desc->info)
+- memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
++ memcpy(desc->info, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+@@ -226,22 +223,20 @@
+ static int blkcipher_walk_next(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk)
+ {
+- struct crypto_blkcipher *tfm = desc->tfm;
+- unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
+ unsigned int bsize;
+ unsigned int n;
+ int err;
+
+ n = walk->total;
+- if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
++ if (unlikely(n < walk->cipher_blocksize)) {
+ desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+ return blkcipher_walk_done(desc, walk, -EINVAL);
+ }
+
+ walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
+ BLKCIPHER_WALK_DIFF);
+- if (!scatterwalk_aligned(&walk->in, alignmask) ||
+- !scatterwalk_aligned(&walk->out, alignmask)) {
++ if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
++ !scatterwalk_aligned(&walk->out, walk->alignmask)) {
+ walk->flags |= BLKCIPHER_WALK_COPY;
+ if (!walk->page) {
+ walk->page = (void *)__get_free_page(GFP_ATOMIC);
+@@ -250,12 +245,12 @@
+ }
+ }
+
+- bsize = min(walk->blocksize, n);
++ bsize = min(walk->walk_blocksize, n);
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (unlikely(n < bsize)) {
+- err = blkcipher_next_slow(desc, walk, bsize, alignmask);
++ err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
+ goto set_phys_lowmem;
+ }
+
+@@ -277,28 +272,26 @@
+ return err;
+ }
+
+-static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
+- struct crypto_blkcipher *tfm,
+- unsigned int alignmask)
+-{
+- unsigned bs = walk->blocksize;
+- unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
+- unsigned aligned_bs = ALIGN(bs, alignmask + 1);
+- unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
+- (alignmask + 1);
++static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
++{
++ unsigned bs = walk->walk_blocksize;
++ unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
++ unsigned int size = aligned_bs * 2 +
++ walk->ivsize + max(aligned_bs, walk->ivsize) -
++ (walk->alignmask + 1);
+ u8 *iv;
+
+- size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
++ size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
+ walk->buffer = kmalloc(size, GFP_ATOMIC);
+ if (!walk->buffer)
+ return -ENOMEM;
+
+- iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
++ iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
+ iv = blkcipher_get_spot(iv, bs) + aligned_bs;
+ iv = blkcipher_get_spot(iv, bs) + aligned_bs;
+- iv = blkcipher_get_spot(iv, ivsize);
++ iv = blkcipher_get_spot(iv, walk->ivsize);
+
+- walk->iv = memcpy(iv, walk->iv, ivsize);
++ walk->iv = memcpy(iv, walk->iv, walk->ivsize);
+ return 0;
+ }
+
+@@ -306,7 +299,10 @@
+ struct blkcipher_walk *walk)
+ {
+ walk->flags &= ~BLKCIPHER_WALK_PHYS;
+- walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
++ walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
++ walk->cipher_blocksize = walk->walk_blocksize;
++ walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
++ walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
+ return blkcipher_walk_first(desc, walk);
+ }
+ EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
+@@ -315,7 +311,10 @@
+ struct blkcipher_walk *walk)
+ {
+ walk->flags |= BLKCIPHER_WALK_PHYS;
+- walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
++ walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
++ walk->cipher_blocksize = walk->walk_blocksize;
++ walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
++ walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
+ return blkcipher_walk_first(desc, walk);
+ }
+ EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
+@@ -323,9 +322,6 @@
+ static int blkcipher_walk_first(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk)
+ {
+- struct crypto_blkcipher *tfm = desc->tfm;
+- unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
+-
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
+@@ -335,8 +331,8 @@
+
+ walk->buffer = NULL;
+ walk->iv = desc->info;
+- if (unlikely(((unsigned long)walk->iv & alignmask))) {
+- int err = blkcipher_copy_iv(walk, tfm, alignmask);
++ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
++ int err = blkcipher_copy_iv(walk);
+ if (err)
+ return err;
+ }
+@@ -353,11 +349,28 @@
+ unsigned int blocksize)
+ {
+ walk->flags &= ~BLKCIPHER_WALK_PHYS;
+- walk->blocksize = blocksize;
++ walk->walk_blocksize = blocksize;
++ walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
++ walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
++ walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
+ return blkcipher_walk_first(desc, walk);
+ }
+ EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
+
++int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
++ struct blkcipher_walk *walk,
++ struct crypto_aead *tfm,
++ unsigned int blocksize)
++{
++ walk->flags &= ~BLKCIPHER_WALK_PHYS;
++ walk->walk_blocksize = blocksize;
++ walk->cipher_blocksize = crypto_aead_blocksize(tfm);
++ walk->ivsize = crypto_aead_ivsize(tfm);
++ walk->alignmask = crypto_aead_alignmask(tfm);
++ return blkcipher_walk_first(desc, walk);
++}
++EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
++
+ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+ {
+diff -Nur linux-3.14.40.orig/crypto/tcrypt.c linux-3.14.40/crypto/tcrypt.c
+--- linux-3.14.40.orig/crypto/tcrypt.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/crypto/tcrypt.c 2015-05-01 14:57:58.395427001 -0500
+@@ -33,6 +33,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/timex.h>
+ #include <linux/interrupt.h>
++#include <linux/sched.h>
+ #include "tcrypt.h"
+ #include "internal.h"
+
+@@ -447,6 +448,7 @@
+ goto out;
+ }
+
++ schedule();
+ printk("test %u (%d bit key, %d byte blocks): ", i,
+ *keysize * 8, *b_size);
+
+@@ -713,6 +715,7 @@
+ if (speed[i].klen)
+ crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
+
++ schedule();
+ printk(KERN_INFO "test%3u "
+ "(%5u byte blocks,%5u bytes per update,%4u updates): ",
+ i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
+@@ -953,6 +956,7 @@
+ break;
+ }
+
++ schedule();
+ pr_info("test%3u "
+ "(%5u byte blocks,%5u bytes per update,%4u updates): ",
+ i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
+@@ -1118,6 +1122,7 @@
+ goto out_free_req;
+ }
+
++ schedule();
+ pr_info("test %u (%d bit key, %d byte blocks): ", i,
+ *keysize * 8, *b_size);
+
+@@ -1199,6 +1204,7 @@
+ printk("alg %s ", *name);
+ printk(crypto_has_alg(*name, 0, 0) ?
+ "found\n" : "not found\n");
++ schedule();
+ name++;
+ }
+ }
+diff -Nur linux-3.14.40.orig/Documentation/ABI/testing/sysfs-class-net-statistics linux-3.14.40/Documentation/ABI/testing/sysfs-class-net-statistics
+--- linux-3.14.40.orig/Documentation/ABI/testing/sysfs-class-net-statistics 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/ABI/testing/sysfs-class-net-statistics 2015-05-01 14:57:58.395427001 -0500
+@@ -0,0 +1,201 @@
++What: /sys/class/<iface>/statistics/collisions
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of collisions seen by this network device.
++ This value might not be relevant with all MAC layers.
++
++What: /sys/class/<iface>/statistics/multicast
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of multicast packets received by this
++ network device.
++
++What: /sys/class/<iface>/statistics/rx_bytes
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of bytes received by this network device.
++ See the network driver for the exact meaning of when this
++ value is incremented.
++
++What: /sys/class/<iface>/statistics/rx_compressed
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of compressed packets received by this
++ network device. This value might only be relevant for interfaces
++ that support packet compression (e.g: PPP).
++
++What: /sys/class/<iface>/statistics/rx_crc_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets received with a CRC (FCS) error
++ by this network device. Note that the specific meaning might
++ depend on the MAC layer used by the interface.
++
++What: /sys/class/<iface>/statistics/rx_dropped
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets received by the network device
++ but dropped, that are not forwarded to the upper layers for
++ packet processing. See the network driver for the exact
++ meaning of this value.
++
++What: /sys/class/<iface>/statistics/rx_fifo_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of receive FIFO errors seen by this
++ network device. See the network driver for the exact
++ meaning of this value.
++
++What: /sys/class/<iface>/statistics/rx_frame_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of received frames with error, such as
++ alignment errors. Note that the specific meaning depends on
++ on the MAC layer protocol used. See the network driver for
++ the exact meaning of this value.
++
++What: /sys/class/<iface>/statistics/rx_length_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of received error packet with a length
++ error, oversized or undersized. See the network driver for the
++ exact meaning of this value.
++
++What: /sys/class/<iface>/statistics/rx_missed_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of received packets that have been missed
++ due to lack of capacity in the receive side. See the network
++ driver for the exact meaning of this value.
++
++What: /sys/class/<iface>/statistics/rx_over_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of received packets that are oversized
++ compared to what the network device is configured to accept
++ (e.g: larger than MTU). See the network driver for the exact
++ meaning of this value.
++
++What: /sys/class/<iface>/statistics/rx_packets
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the total number of good packets received by this
++ network device.
++
++What: /sys/class/<iface>/statistics/tx_aborted_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets that have been aborted
++ during transmission by a network device (e.g: because of
++ a medium collision). See the network driver for the exact
++ meaning of this value.
++
++What: /sys/class/<iface>/statistics/tx_bytes
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of bytes transmitted by a network
++ device. See the network driver for the exact meaning of this
++ value, in particular whether this accounts for all successfully
++ transmitted packets or all packets that have been queued for
++ transmission.
++
++What: /sys/class/<iface>/statistics/tx_carrier_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets that could not be transmitted
++ because of carrier errors (e.g: physical link down). See the
++ network driver for the exact meaning of this value.
++
++What: /sys/class/<iface>/statistics/tx_compressed
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of transmitted compressed packets. Note
++ this might only be relevant for devices that support
++ compression (e.g: PPP).
++
++What: /sys/class/<iface>/statistics/tx_dropped
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets dropped during transmission.
++ See the driver for the exact reasons as to why the packets were
++ dropped.
++
++What: /sys/class/<iface>/statistics/tx_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets in error during transmission by
++ a network device. See the driver for the exact reasons as to
++ why the packets were dropped.
++
++What: /sys/class/<iface>/statistics/tx_fifo_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets having caused a transmit
++ FIFO error. See the driver for the exact reasons as to why the
++ packets were dropped.
++
++What: /sys/class/<iface>/statistics/tx_heartbeat_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets transmitted that have been
++ reported as heartbeat errors. See the driver for the exact
++ reasons as to why the packets were dropped.
++
++What: /sys/class/<iface>/statistics/tx_packets
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets transmitted by a network
++ device. See the driver for whether this reports the number of all
++ attempted or successful transmissions.
++
++What: /sys/class/<iface>/statistics/tx_window_errors
++Date: April 2005
++KernelVersion: 2.6.12
++Contact: netdev@vger.kernel.org
++Description:
++ Indicates the number of packets not successfully transmitted
++ due to a window collision. The specific meaning depends on the
++ MAC layer used. On Ethernet this is usually used to report
++ late collisions errors.
+diff -Nur linux-3.14.40.orig/Documentation/arm64/booting.txt linux-3.14.40/Documentation/arm64/booting.txt
+--- linux-3.14.40.orig/Documentation/arm64/booting.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/arm64/booting.txt 2015-05-01 14:57:58.407427001 -0500
+@@ -111,8 +111,14 @@
+ - Caches, MMUs
+ The MMU must be off.
+ Instruction cache may be on or off.
+- Data cache must be off and invalidated.
+- External caches (if present) must be configured and disabled.
++ The address range corresponding to the loaded kernel image must be
++ cleaned to the PoC. In the presence of a system cache or other
++ coherent masters with caches enabled, this will typically require
++ cache maintenance by VA rather than set/way operations.
++ System caches which respect the architected cache maintenance by VA
++ operations must be configured and may be enabled.
++ System caches which do not respect architected cache maintenance by VA
++ operations (not recommended) must be configured and disabled.
+
+ - Architected timers
+ CNTFRQ must be programmed with the timer frequency and CNTVOFF must
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/arm/imx/busfreq-imx6.txt linux-3.14.40/Documentation/devicetree/bindings/arm/imx/busfreq-imx6.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/arm/imx/busfreq-imx6.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/arm/imx/busfreq-imx6.txt 2015-05-01 14:57:58.419427001 -0500
+@@ -0,0 +1,64 @@
++Freescale Busfreq driver
++
++It is a generic driver that manages the frequency of the DDR, AHB and AXI buses in the iMX6x architecture.
++It works for both SMP and UP systems and for both DDR3 and LPDDR2 memory types.
++
++Required properties are listed below:
++- compatible: should be "fsl,imx6_busfreq"
++- clocks: Lists the various clocks used by the busfreq driver
++- interrupts - Lists the interrupts used by the busfreq driver. This is needed only for SMP architecutre.
++- fsl,max_ddr_freq - The max ddr freq for this chip
++
++Examples:
++For SOC imx6q.dtsi:
++ busfreq { /* BUSFREQ */
++ compatible = "fsl,imx6_busfreq";
++ clocks = <&clks 171>, <&clks 6>, <&clks 11>, <&clks 104>, <&clks 172>, <&clks 58>,
++ <&clks 18>, <&clks 60>, <&clks 20>, <&clks 3>;
++ clock-names = "pll2_bus", "pll2_pfd2_396m", "pll2_198m", "arm", "pll3_usb_otg", "periph",
++ "periph_pre", "periph_clk2", "periph_clk2_sel", "osc";
++ interrupts = <0 107 0x04>, <0 112 0x4>, <0 113 0x4>, <0 114 0x4>;
++ interrupt-names = "irq_busfreq_0", "irq_busfreq_1", "irq_busfreq_2", "irq_busfreq_3";
++ fsl,max_ddr_freq = <528000000>;
++ };
++
++The Freescale Busfreq driver supports the following setpoints for the DDR freq:
++enum bus_freq_mode {
++ BUS_FREQ_HIGH, -> The max freq the SOC supports
++ BUS_FREQ_MED, -> Medium setpoint (ex 400MHz for DDR3 when the max is 528MHz)
++ BUS_FREQ_AUDIO, -> Audio playback freq (50MHz)
++ BUS_FREQ_LOW, -> Low power IDLE freq (24MHz)
++};
++
++Currently the Freescale Busfreq driver implementation requires drivers to call the following APIs:
++1. request_bus_freq(enum bus_freq_mode):
++ The driver is requesting the system and ddr freq to be set to the requested value. The driver should call this
++ API before it even enables its clocks.
++
++2. release_bus_freq(enum bus_freq_mode):
++ The driver no longer needs the system and ddr freq at the required value. The driver should call this API after
++ its work is done and it has disabled its clocks.
++
++Examples:
++In the IPU driver, the requesting and releasing of the required bus frequency is tied into the runtime PM implementation:
++
++int ipu_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ dev_dbg(dev, "ipu busfreq high release.\n");
++
++ return 0;
++}
++
++int ipu_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ dev_dbg(dev, "ipu busfreq high requst.\n");
++
++ return 0;
++}
++
++static const struct dev_pm_ops ipu_pm_ops = {
++ SET_RUNTIME_PM_OPS(ipu_runtime_suspend, ipu_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(ipu_suspend, ipu_resume)
++};
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/arm/imx/gpc.txt linux-3.14.40/Documentation/devicetree/bindings/arm/imx/gpc.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/arm/imx/gpc.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/arm/imx/gpc.txt 2015-05-01 14:57:58.419427001 -0500
+@@ -0,0 +1,20 @@
++Freescale imx GPC bindings
++
++Optional properties:
++- fsl,cpu_pupscr_sw2iso: for powering up CPU, number of 32K clock cycle PGC will wait before negating isolation signal.
++- fsl,cpu_pupscr_sw: for powering up CPU, number of 32K clock cycle PGC will wait before asserting isolation signal.
++- fsl,cpu_pdnscr_iso2sw: for powering down CPU, number of ipg clock cycle PGC will wait before negating isolation signal.
++- fsl,cpu_pdnscr_iso: for powering down CPU, number of ipg clock cycle PGC will wait before asserting isolation signal.
++
++These properties are for adjusting the GPC PGC CPU power up/down setting, if there is no such property in dts, then default
++value in GPC PGC registers will be used.
++
++
++Example:
++
++ &gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++ };
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/arm/pmu.txt linux-3.14.40/Documentation/devicetree/bindings/arm/pmu.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/arm/pmu.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/devicetree/bindings/arm/pmu.txt 2015-05-01 14:57:58.435427001 -0500
+@@ -17,6 +17,9 @@
+ "arm,arm1176-pmu"
+ "arm,arm1136-pmu"
+ - interrupts : 1 combined interrupt or 1 per core.
++- cluster : a phandle to the cluster to which it belongs
++ If there are more than one cluster with same CPU type
++ then there should be separate PMU nodes per cluster.
+
+ Example:
+
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/ata/ahci-platform.txt linux-3.14.40/Documentation/devicetree/bindings/ata/ahci-platform.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/ata/ahci-platform.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/devicetree/bindings/ata/ahci-platform.txt 2015-05-01 14:57:58.443427001 -0500
+@@ -4,12 +4,19 @@
+ Each SATA controller should have its own node.
+
+ Required properties:
+-- compatible : compatible list, contains "snps,spear-ahci"
++- compatible : compatible list, contains "snps,spear-ahci",
++ "fsl,imx53-ahci" or "fsl,imx6q-ahci"
+ - interrupts : <interrupt mapping for SATA IRQ>
+ - reg : <registers mapping>
+
+ Optional properties:
+ - dma-coherent : Present if dma operations are coherent
++- clocks : a list of phandle + clock specifier pairs
++- target-supply : regulator for SATA target power
++
++"fsl,imx53-ahci", "fsl,imx6q-ahci" required properties:
++- clocks : must contain the sata, sata_ref and ahb clocks
++- clock-names : must contain "ahb" for the ahb clock
+
+ Example:
+ sata@ffe08000 {
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/clock/imx6q-clock.txt linux-3.14.40/Documentation/devicetree/bindings/clock/imx6q-clock.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/clock/imx6q-clock.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/devicetree/bindings/clock/imx6q-clock.txt 2015-05-01 14:57:58.451427001 -0500
+@@ -89,8 +89,6 @@
+ gpu3d_shader 74
+ ipu1_podf 75
+ ipu2_podf 76
+- ldb_di0_podf 77
+- ldb_di1_podf 78
+ ipu1_di0_pre 79
+ ipu1_di1_pre 80
+ ipu2_di0_pre 81
+@@ -220,6 +218,20 @@
+ lvds2_sel 205
+ lvds1_gate 206
+ lvds2_gate 207
++ gpt_3m 208
++ video_27m 209
++ ldb_di0_div_7 210
++ ldb_di1_div_7 211
++ ldb_di0_div_sel 212
++ ldb_di1_div_sel 213
++ caam_mem 214
++ caam_aclk 215
++ caam_ipg 216
++ epit1 217
++ epit2 218
++ tzasc2 219
++ lvds1_in 220
++ lvds1_out 221
+
+ Examples:
+
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt linux-3.14.40/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt 2015-05-01 14:57:58.455427001 -0500
+@@ -47,6 +47,7 @@
+ 20 ASRC
+ 21 ESAI
+ 22 SSI Dual FIFO (needs firmware ver >= 2)
++ 23 HDMI Audio
+
+ The third cell specifies the transfer priority as below.
+
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/fb/fsl_ipuv3_fb.txt linux-3.14.40/Documentation/devicetree/bindings/fb/fsl_ipuv3_fb.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/fb/fsl_ipuv3_fb.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/fb/fsl_ipuv3_fb.txt 2015-05-01 14:57:58.455427001 -0500
+@@ -0,0 +1,146 @@
++* FSL IPUv3 Display/FB
++
++The FSL IPUv3 is Image Processing Unit version 3, a part of video and graphics
++subsystem in an application processor. The goal of the IPU is to provide
++comprehensive support for the flow of data from an image sensor or/and to a
++display device.
++
++Two IPU units are on the imx6q SOC while only one IPU unit on the imx6dl SOC.
++Each IPU unit has two display interfaces.
++
++For LDB/LVDS panel, there are two LVDS channels(LVDS0 and LVDS1) which can
++transfer video data, these two channels can be used as
++split/dual/single/separate mode.
++-split mode means display data from DI0 or DI1 will send to both channels
++ LVDS0+LVDS1.
++-dual mode means display data from DI0 or DI1 will be duplicated on LVDS0
++ and LVDS1, it said, LVDS0 and LVDS1 has the same content.
++-single mode means only work for DI0/DI1->LVDS0 or DI0/DI1->LVDS1.
++-separate mode means you can make DI0/DI1->LVDS0 and DI0/DI1->LVDS1 work
++ at the same time.
++ "ldb=spl0/1" -- split mode on DI0/1
++ "ldb=dul0/1" -- dual mode on DI0/1
++ "ldb=sin0/1" -- single mode on LVDS0/1
++ "ldb=sep0/1" -- separate mode begin from LVDS0/1
++
++Required properties for IPU:
++- bypass_reset :Bypass reset to avoid display channel being.
++ stopped by probe since it may start to work in bootloader: 0 or 1.
++- compatible : should be "fsl,imx6q-ipu".
++- reg : the register address range.
++- interrupts : the error and sync interrupts request.
++- clocks : the clock sources that it depends on.
++- clock-names: the related clock names.
++- resets : IPU reset specifier. See reset.txt and fsl,imx-src.txt in
++ Documentation/devicetree/bindings/reset/ for details.
++
++Required properties for fb:
++- compatible : should be "fsl,mxc_sdc_fb".
++- disp_dev : display device: "ldb", "lcd", "hdmi", "mipi_dsi".
++- mode_str : video mode string: "LDB-XGA" or "LDB-1080P60" for ldb,
++ "CLAA-WVGA" for lcd, "TRULY-WVGA" for TRULY mipi_dsi lcd panel,
++ "1920x1080M@60" for hdmi.
++- default_bpp : default bits per pixel: 8/16/24/32
++- int_clk : use internal clock as pixel clock: 0 or 1
++- late_init : to avoid display channel being re-initialized
++ as we've probably setup the channel in bootloader: 0 or 1
++- interface_pix_fmt : display interface pixel format as below:
++ RGB666 IPU_PIX_FMT_RGB666
++ RGB565 IPU_PIX_FMT_RGB565
++ RGB24 IPU_PIX_FMT_RGB24
++ BGR24 IPU_PIX_FMT_BGR24
++ GBR24 IPU_PIX_FMT_GBR24
++ YUV444 IPU_PIX_FMT_YUV444
++ LVDS666 IPU_PIX_FMT_LVDS666
++ YUYV IPU_PIX_FMT_YUYV
++ UYVY IPU_PIX_FMT_UYVY
++ YVYV IPU_PIX_FMT_YVYU
++ VYUY IPU_PIX_FMT_VYUY
++
++Required properties for display:
++- compatible : should be "fsl,lcd" for lcd panel, "fsl,imx6q-ldb" for ldb
++- reg : the register address range if necessary to have.
++- interrupts : the error and sync interrupts if necessary to have.
++- clocks : the clock sources that it depends on if necessary to have.
++- clock-names: the related clock names if necessary to have.
++- ipu_id : ipu id for the first display device: 0 or 1
++- disp_id : display interface id for the first display interface: 0 or 1
++- default_ifmt : save as above display interface pixel format for lcd
++- pinctrl-names : should be "default"
++- pinctrl-0 : should be pinctrl_ipu1_1 or pinctrl_ipu2_1, which depends on the
++ IPU connected.
++- sec_ipu_id : secondary ipu id for the second display device(ldb only): 0 or 1
++- sec_disp_id : secondary display interface id for the second display
++ device(ldb only): 0 or 1
++- ext_ref : reference resistor select for ldb only: 0 or 1
++- mode : ldb mode as below:
++ spl0 LDB_SPL_DI0
++ spl1 LDB_SPL_DI1
++ dul0 LDB_DUL_DI0
++ dul1 LDB_DUL_DI1
++ sin0 LDB_SIN0
++ sin1 LDB_SIN1
++ sep0 LDB_SEP0
++ sep1 LDB_SEP1
++- gpr : the mux controller for the display engine's display interfaces and the display encoder
++ (only valid for mipi dsi now).
++- disp-power-on-supply : the regulator to control display panel's power.
++ (only valid for mipi dsi now).
++- resets : the gpio pin to reset the display device(only valid for mipi display panel now).
++- lcd_panel : the video mode name for the display device(only valid for mipi display panel now).
++- dev_id : the display engine's identity within the system, which intends to replace ipu_id
++ (only valid for mipi dsi now).
++
++Example for IPU:
++ ipu1: ipu@02400000 {
++ compatible = "fsl,imx6q-ipu";
++ reg = <0x02400000 0x400000>;
++ interrupts = <0 6 0x4 0 5 0x4>;
++ clocks = <&clks 130>, <&clks 131>, <&clks 132>,
++ <&clks 39>, <&clks 40>,
++ <&clks 135>, <&clks 136>;
++ clock-names = "bus", "di0", "di1",
++ "di0_sel", "di1_sel",
++ "ldb_di0", "ldb_di1";
++ resets = <&src 2>;
++ bypass_reset = <0>;
++ };
++
++Example for fb:
++ fb0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "ldb";
++ interface_pix_fmt = "RGB666";
++ mode_str ="LDB-XGA";
++ default_bpp = <16>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "okay";
++ };
++
++Example for ldb display:
++ ldb@020e0000 {
++ ipu_id = <1>;
++ disp_id = <0>;
++ ext_ref = <1>;
++ mode = "sep0";
++ sec_ipu_id = <1>;
++ sec_disp_id = <1>;
++ status = "okay";
++ };
++
++Example for mipi dsi display:
++ mipi_dsi: mipi@021e0000 {
++ compatible = "fsl,imx6q-mipi-dsi";
++ reg = <0x021e0000 0x4000>;
++ interrupts = <0 102 0x04>;
++ gpr = <&gpr>;
++ clocks = <&clks 138>, <&clks 204>;
++ clock-names = "mipi_pllref_clk", "mipi_cfg_clk";
++ dev_id = <0>;
++ disp_id = <0>;
++ lcd_panel = "TRULY-WVGA";
++ disp-power-on-supply = <&reg_mipi_dsi_pwr_on>
++ resets = <&mipi_dsi_reset>;
++ status = "okay";
++ };
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/leds/leds-pwm.txt linux-3.14.40/Documentation/devicetree/bindings/leds/leds-pwm.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/leds/leds-pwm.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/devicetree/bindings/leds/leds-pwm.txt 2015-05-01 14:57:58.459427001 -0500
+@@ -13,6 +13,8 @@
+ For the pwms and pwm-names property please refer to:
+ Documentation/devicetree/bindings/pwm/pwm.txt
+ - max-brightness : Maximum brightness possible for the LED
++- active-low : (optional) For PWMs where the LED is wired to supply
++ rather than ground.
+ - label : (optional)
+ see Documentation/devicetree/bindings/leds/common.txt
+ - linux,default-trigger : (optional)
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/mailbox/mailbox.txt linux-3.14.40/Documentation/devicetree/bindings/mailbox/mailbox.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/mailbox/mailbox.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/mailbox/mailbox.txt 2015-05-01 14:57:58.459427001 -0500
+@@ -0,0 +1,33 @@
++* Generic Mailbox Controller and client driver bindings
++
++Generic binding to provide a way for Mailbox controller drivers to
++assign appropriate mailbox channel to client drivers.
++
++* Mailbox Controller
++
++Required property:
++- #mbox-cells: Must be at least 1. Number of cells in a mailbox
++ specifier.
++
++Example:
++ mailbox: mailbox {
++ ...
++ #mbox-cells = <1>;
++ };
++
++
++* Mailbox Client
++
++Required property:
++- mbox: List of phandle and mailbox channel specifier.
++
++- mbox-names: List of identifier strings for each mailbox channel
++ required by the client.
++
++Example:
++ pwr_cntrl: power {
++ ...
++ mbox-names = "pwr-ctrl", "rpc";
++ mbox = <&mailbox 0
++ &mailbox 1>;
++ };
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/mlb/mlb150.txt linux-3.14.40/Documentation/devicetree/bindings/mlb/mlb150.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/mlb/mlb150.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/mlb/mlb150.txt 2015-05-01 14:57:58.463427001 -0500
+@@ -0,0 +1,22 @@
++* Freescale Media Local Bus Host Controller (MLB) for i.MX6Q/DL
++
++The Media Local Bus Host Controller on Freescale i.MX family
++provides an interface for MOST network.
++
++Required properties:
++- compatible : Should be "fsl,<chip>-mlb150"
++- reg : Should contain mlb registers location and length
++- interrupts : Should contain mlb interrupt
++- clocks: Should contain the mlb clock sources
++- clock-names: Should be the names of mlb clock sources
++- iram : phandle pointing to the SRAM device node
++
++Examples:
++mlb@0218c000 {
++ compatible = "fsl,imx6q-mlb150";
++ reg = <0x0218c000 0x4000>;
++ interrupts = <0 53 0x04 0 117 0x04 0 126 0x04>;
++ clocks = <&clks 139>, <&clks 175>;
++ clock-names = "mlb", "pll8_mlb";
++ iram = <&ocram>;
++};
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/mmc/mmc.txt linux-3.14.40/Documentation/devicetree/bindings/mmc/mmc.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/mmc/mmc.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/devicetree/bindings/mmc/mmc.txt 2015-05-01 14:57:58.471427001 -0500
+@@ -5,6 +5,8 @@
+ Interpreted by the OF core:
+ - reg: Registers location and length.
+ - interrupts: Interrupts used by the MMC controller.
++- clocks: Clocks needed for the host controller, if any.
++- clock-names: Goes with clocks above.
+
+ Card detection:
+ If no property below is supplied, host native card detect is used.
+@@ -30,6 +32,15 @@
+ - cap-sdio-irq: enable SDIO IRQ signalling on this interface
+ - full-pwr-cycle: full power cycle of the card is supported
+
++Card power and reset control:
++The following properties can be specified for cases where the MMC
++peripheral needs additional reset, regulator and clock lines. It is for
++example common for WiFi/BT adapters to have these separate from the main
++MMC bus:
++ - card-reset-gpios: Specify GPIOs for card reset (reset active low)
++ - card-external-vcc-supply: Regulator to drive (independent) card VCC
++ - clock with name "card_ext_clock": External clock provided to the card
++
+ *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
+ polarity properties, we have to fix the meaning of the "normal" and "inverted"
+ line levels. We choose to follow the SDHCI standard, which specifies both those
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt linux-3.14.40/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt 2015-05-01 14:57:58.479427001 -0500
+@@ -71,6 +71,13 @@
+ name for integer state ID 0, list entry 1 for state ID 1, and
+ so on.
+
++pinctrl-assert-gpios:
++ List of phandles, each pointing at a GPIO which is used by some
++ board design to steer pins between two peripherals on the board.
++ It plays like a board level pin multiplexer to choose different
++ functions for given pins by pulling up/down the GPIOs. See
++ bindings/gpio/gpio.txt for details of how to specify GPIO.
++
+ For example:
+
+ /* For a client device requiring named states */
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/reset/gpio-reset.txt linux-3.14.40/Documentation/devicetree/bindings/reset/gpio-reset.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/reset/gpio-reset.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/reset/gpio-reset.txt 2015-05-01 14:57:58.479427001 -0500
+@@ -0,0 +1,35 @@
++GPIO reset controller
++=====================
++
++A GPIO reset controller controls a single GPIO that is connected to the reset
++pin of a peripheral IC. Please also refer to reset.txt in this directory for
++common reset controller binding usage.
++
++Required properties:
++- compatible: Should be "gpio-reset"
++- reset-gpios: A gpio used as reset line. The gpio specifier for this property
++ depends on the gpio controller that provides the gpio.
++- #reset-cells: 0, see below
++
++Optional properties:
++- reset-delay-us: delay in microseconds. The gpio reset line will be asserted for
++ this duration to reset.
++- initially-in-reset: boolean. If not set, the initial state should be a
++ deasserted reset line. If this property exists, the
++ reset line should be kept in reset.
++
++example:
++
++sii902x_reset: gpio-reset {
++ compatible = "gpio-reset";
++ reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
++ reset-delay-us = <10000>;
++ initially-in-reset;
++ #reset-cells = <0>;
++};
++
++/* Device with nRESET pin connected to GPIO5_0 */
++sii902x@39 {
++ /* ... */
++ resets = <&sii902x_reset>; /* active-low GPIO5_0, 10 ms delay */
++};
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/sound/cs42888.txt linux-3.14.40/Documentation/devicetree/bindings/sound/cs42888.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/sound/cs42888.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/sound/cs42888.txt 2015-05-01 14:57:58.479427001 -0500
+@@ -0,0 +1,29 @@
++CS42888 audio CODEC
++
++This device supports I2C only.
++
++Required properties:
++
++ - compatible: "cirrus,cs42888"
++ - reg: the I2C address of the device.
++ - clocks: Phandle to the clock node.
++ - clock-names: Contains name for each entry in clocks.
++ "codec_osc" : the external oscillator.
++ "esai" : the hckt clock from esai.
++ - <name>-supply: Phandle to the regulator <name>.
++
++Note: cs42888 needs a regulators node and a clocks node.
++
++Example:
++In this case, the clock is external oscillator.
++
++codec: cs42888@48 {
++ compatible = "cirrus,cs42888";
++ reg = <0x048>;
++ clocks = <&codec_osc 0>;
++ clock-names = "codec_osc";
++ VA-supply = <&reg_audio>;
++ VD-supply = <&reg_audio>;
++ VLS-supply = <&reg_audio>;
++ VLC-supply = <&reg_audio>;
++};
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/sound/fsl-asrc-p2p.txt linux-3.14.40/Documentation/devicetree/bindings/sound/fsl-asrc-p2p.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/sound/fsl-asrc-p2p.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/sound/fsl-asrc-p2p.txt 2015-05-01 14:57:58.479427001 -0500
+@@ -0,0 +1,23 @@
++* Freescale Asynchronous Sample Rate Converter (ASRC)
++
++This document is for asrc p2p node. p2p is one of asrc mode. asrc p2p depend on
++MXC_ASRC.
++
++Required properties:
++ - compatible: Should be "fsl,<chip>-asrc-p2p".
++ - fsl,output-rate: the output rate of asrc p2p. which can be <32000> to <192000>,
++ - fsl,output-width: the output width of asrc p2p. which can be <16>, <24>.
++ - fsl,asrc-dma-rx-events: The rx dma event of the asrc, <a b c> corresponding
++ to 3 pair of asrc.
++ - fsl,asrc-dma-tx-events: The tx dma event of the esai, <a b c> corresponding
++ to 3 pair of asrc.
++
++Example:
++asrc_p2p: asrc_p2p {
++ compatible = "fsl,imx6q-asrc-p2p";
++ fsl,output-rate = <48000>;
++ fsl,output-width = <16>;
++ fsl,asrc-dma-rx-events = <17 18 19>;
++ fsl,asrc-dma-tx-events = <20 21 22>;
++ status = "okay";
++};
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/sound/imx-audio-cs42888.txt linux-3.14.40/Documentation/devicetree/bindings/sound/imx-audio-cs42888.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/sound/imx-audio-cs42888.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/sound/imx-audio-cs42888.txt 2015-05-01 14:57:58.479427001 -0500
+@@ -0,0 +1,25 @@
++Freescale i.MX audio complex with CS42888 codec
++
++Required properties:
++- compatible : "fsl,imx-audio-cs42888"
++- model : The user-visible name of this sound complex
++- esai-controller : The phandle of the i.MX SSI controller
++- audio-codec : The phandle of the CS42888 audio codec
++
++Optional properties:
++- asrc-controller : The phandle of the i.MX ASRC controller
++- audio-routing : A list of the connections between audio components.
++ Each entry is a pair of strings, the first being the connection's sink,
++ the second being the connection's source. Valid names could be power
++ supplies, CS42888 pins, and the jacks on the board:
++
++Example:
++
++sound {
++ compatible = "fsl,imx6q-sabresd-wm8962",
++ "fsl,imx-audio-wm8962";
++ model = "cs42888-audio";
++ esai-controller = <&esai>;
++ asrc-controller = <&asrc_p2p>;
++ audio-codec = <&codec>;
++};
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt linux-3.14.40/Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt 2015-05-01 14:57:58.479427001 -0500
+@@ -24,6 +24,12 @@
+ Note: The AUDMUX port numbering should start at 1, which is consistent with
+ hardware manual.
+
++Optional properties:
++- hp-det-gpios : The gpio pin to detect plug in/out event that happens to
++ Headphone jack.
++- mic-det-gpios: The gpio pin to detect plug in/out event that happens to
++ Microphone jack.
++
+ Example:
+
+ sound {
+@@ -43,4 +49,6 @@
+ "DMICDAT", "DMIC";
+ mux-int-port = <2>;
+ mux-ext-port = <3>;
++ hp-det-gpios = <&gpio7 8 1>;
++ mic-det-gpios = <&gpio1 9 1>;
+ };
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/sound/wm8962.txt linux-3.14.40/Documentation/devicetree/bindings/sound/wm8962.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/sound/wm8962.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/devicetree/bindings/sound/wm8962.txt 2015-05-01 14:57:58.479427001 -0500
+@@ -13,6 +13,14 @@
+ of R51 (Class D Control 2) gets set, indicating that the speaker is
+ in mono mode.
+
++ - amic-mono: This is a boolean property. If present, indicating that the
++ analog micphone is hardware mono input, the driver would enable monomix
++ for it.
++
++ - dmic-mono: This is a boolean property. If present, indicating that the
++ digital micphone is hardware mono input, the driver would enable monomix
++ for it.
++
+ - mic-cfg : Default register value for R48 (Additional Control 4).
+ If absent, the default should be the register default.
+
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/usb/mxs-phy.txt linux-3.14.40/Documentation/devicetree/bindings/usb/mxs-phy.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/usb/mxs-phy.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/devicetree/bindings/usb/mxs-phy.txt 2015-05-01 14:57:58.487427001 -0500
+@@ -1,13 +1,16 @@
+ * Freescale MXS USB Phy Device
+
+ Required properties:
+-- compatible: Should be "fsl,imx23-usbphy"
++- compatible: "fsl,imx23-usbphy" for imx23 and imx28, "fsl,imx6q-usbphy"
++for imx6dq and imx6dl, "fsl,imx6sl-usbphy" for imx6sl
+ - reg: Should contain registers location and length
+ - interrupts: Should contain phy interrupt
++- fsl,anatop: phandle for anatop register, it is only for imx6 SoC series
+
+ Example:
+ usbphy1: usbphy@020c9000 {
+ compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
+ reg = <0x020c9000 0x1000>;
+ interrupts = <0 44 0x04>;
++ fsl,anatop = <&anatop>;
+ };
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/video/fsl,csi-v4l2-capture.txt linux-3.14.40/Documentation/devicetree/bindings/video/fsl,csi-v4l2-capture.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/video/fsl,csi-v4l2-capture.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/video/fsl,csi-v4l2-capture.txt 2015-05-01 14:57:58.487427001 -0500
+@@ -0,0 +1,61 @@
++* Freescale CMOS Sensor Interface (CSI) V4L2 Capture
++
++Required properties for CSI
++- compatible: "fsl,<soc>-csi". Supported chip includes imx6sl
++- reg: Address and length of the register set for CSI
++- interrupts: Should contain CSI interrupts
++
++Required properties for v4l2_capture
++- compatible: should be "fsl,<soc>-csi-v4l2", supported socs include imx6sl
++
++Required properties for sensor
++- compatible: "<vendor>,<sensor>"
++ please check the supported sensor in the Supported Sensor fields.
++- reg: sensor I2C slave address
++- pinctrl-names: should be "default" for parallel sensor
++- pinctrl-0: should depend on the connection between sensor and i.MX
++ connection between sensor and i.MX could be only legacy parallel on i.MX6SL
++- clocks: should be the clock source provided to sensor.
++- clock-names: should be "csi_mclk"
++- AVDD-supply: set according to the board.
++- DVDD-supply: set according to the board.
++- pwn-gpios: set according to the board.
++- rst-gpios: set according to the board.
++- csi_id: csi id for v4l2 capture device
++ should be 0 for i.MX6SL
++- mclk: should the value of mclk clock send out the sensor. unit is Hz.
++- mclk_source: should be 0 for i.MX6SL
++
++Supported Sensor
++- ovti, ov5640
++
++Example for CSI:
++ csi: csi@020e4000 {
++ compatible = "fsl,imx6sl-csi";
++ reg = <0x020e4000 0x4000>;
++ interrupts = <0 7 0x04>;
++ status = "disabled";
++ };
++
++Examples for v4l2_capture:
++ csi_v4l2_cap {
++ compatible = "fsl,imx6q-v4l2-capture";
++ status = "okay";
++ };
++
++Examples for sensors:
++ ov564x: ov564x@3c {
++ compatible = "ovti,ov564x";
++ reg = <0x3c>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_csi_0>;
++ clocks = <&clks IMX6SL_CLK_CSI>;
++ clock-names = "csi_mclk";
++ AVDD-supply = <&vgen6_reg>; /* 2.8v */
++ DVDD-supply = <&vgen2_reg>; /* 1.5v*/
++ pwn-gpios = <&gpio1 25 1>;
++ rst-gpios = <&gpio1 26 0>;
++ csi_id = <0>;
++ mclk = <24000000>;
++ mclk_source = <0>;
++ };
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/video/fsl,mipi-csi2.txt linux-3.14.40/Documentation/devicetree/bindings/video/fsl,mipi-csi2.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/video/fsl,mipi-csi2.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/video/fsl,mipi-csi2.txt 2015-05-01 14:57:58.487427001 -0500
+@@ -0,0 +1,42 @@
++* Freescale MIPI CSI2 Controller for i.MX6DQ/i.MX6SDL
++
++Required properties for mipi csi2 controller:
++- compatible: should be "fsl,imx6q-mipi-csi2"
++- reg: <base addr, range> contains mipi csi2 register base address and range
++- interrupts: <type num flag> where type is a interrupt type, num is the
++ interrupt number and flag is a field that level/trigger information for
++ the interrupt.
++- clocks: the clock sources that mipi csi2 depends on.
++- clock-names: the name is related to the clock source one by one.
++- status: should be set to "disable".
++
++Required properties for mipi csi2 on specified board:
++- ipu_id: ipu id which mipi csi2 connected to.
++ should be 0 or 1 for i.MX6DQ; should be 0 for i.MX6SDL
++- csi_id: csi id which mipi csi2 connected to.
++ should be 0 or 1 for i.MX6DQ/i.MX6SDL
++- v_channel: virtual channel which send to MIPI CSI2 controller
++ should keep consistent with the input MIPI signal.
++- lanes: data lanes of input MIPI signal. The maximum data lanes is 4.
++ should keep consistent with the input MIPI signal.
++- status: should be set to "okay".
++
++Examples:
++for SOC imx6qdl.dtsi:
++ mipi_csi@021dc000 {
++ compatible = "fsl,imx6q-mipi-csi2";
++ reg = <0x021dc000 0x4000>;
++ interrupts = <0 100 0x04>, <0 101 0x04>;
++ clocks = <&clks 138>, <&clks 53>, <&clks 204>;
++ clock-names = "dphy_clk", "pixel_clk", "cfg_clk";
++ status = "disabled";
++ };
++
++for board imx6qdl-sabresd.dtsi:
++ mipi_csi@021dc000 {
++ status = "okay";
++ ipu_id = <0>;
++ csi_id = <1>;
++ v_channel = <0>;
++ lanes = <2>;
++ };
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/video/fsl,pxp.txt linux-3.14.40/Documentation/devicetree/bindings/video/fsl,pxp.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/video/fsl,pxp.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/video/fsl,pxp.txt 2015-05-01 14:57:58.487427001 -0500
+@@ -0,0 +1,30 @@
++* Freescale PxP Controller for i.MX6DL, i.MX6SL
++
++Required properties for PxP controller:
++- compatible: should be "fsl,<soc>-pxp-dma"
++- reg: <base addr, range> contains pxp register base address and range
++- interrupts: <type num flag> where type is an interrupt type, num is the
++ interrupt number and flag is a field that level/trigger information for
++ the interrupt.
++- clocks: the clock sources that pxp depends on.
++- clock-names: the name is related to the clock source
++
++Required properties for pxp on specified board:
++- status: should be set to "okay" if want to use PxP
++
++Examples:
++for SOC imx6dl.dtsi:
++ pxp@020f0000 {
++ compatible = "fsl,imx6dl-pxp-dma";
++ reg = <0x020f0000 0x4000>;
++ interrupts = <0 98 0x04>;
++ clocks = <&clks 133>;
++ clock-names = "pxp-axi";
++ status = "disabled";
++ };
++
++
++for board imx6dl-sabresd.dts:
++ &pxp {
++ status = "okay";
++ };
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/video/fsl,v4l2-capture.txt linux-3.14.40/Documentation/devicetree/bindings/video/fsl,v4l2-capture.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/video/fsl,v4l2-capture.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/video/fsl,v4l2-capture.txt 2015-05-01 14:57:58.487427001 -0500
+@@ -0,0 +1,102 @@
++* Freescale V4L2 Capture for i.MX6DQ/i.MX6SDL
++
++Required board properties for IPUv3 capture:
++- clocks: should include the clock provided by i.MX6 to sensor
++- clock-names: sensor clock's name should be "ipux_csiy"
++ x should be 1 or 2 for i.MX6DQ; should be 1 for i.MX6SDL
++ y is 0 or 1 for i.MX6DQ/i.MX6SDL
++Note: other detailed information for IPUv3, please refer to
++Documentation/devicetree/bindings/fb/fsl_ipuv3_fb.txt
++
++Required properties for v4l2_capture
++- compatible: should be "fsl,imx6q-v4l2-capture"
++- ipu_id: ipu id for v4l2 capture device
++ should be 0 or 1 for i.MX6DQ; should be 0 for i.MX6SDL
++- csi_id: csi id for v4l2 capture device
++ should be 0 or 1 for i.MX6DQ/i.MX6SDL
++- mclk_source: should be 0 or 1. two mclk sources at most now
++- status: should be set to "okay" to enable this device
++
++Required properties for sensor
++- compatible: "<vendor>,<sensor>"
++ please check the supported sensor in the Supported Sensor fields.
++- reg: sensor I2C slave address
++- pinctrl-names: should be "default" for parallel sensor
++- pinctrl-0: should depend on the connection between sensor and i.MX
++ connection between sensor and i.MX could be MIPI-CSI2 or legacy parallel
++- clocks: should be the clock source provided to sensor.
++- clock-names: should be "csi_mclk"
++- DOVDD-supply: set according to the board.
++- AVDD-supply: set according to the board.
++- DVDD-supply: set according to the board.
++- pwn-gpios: set according to the board.
++- rst-gpios: set according to the board.
++- csi_id: csi id for v4l2 capture device
++ should be 0 or 1 for i.MX6DQ/i.MX6SDL.
++- mclk: should the value of mclk clock send out the sensor. unit is Hz.
++- mclk_source: should be 0 or 1 and should be the same as the setting in
++ v4l2_capture.
++- cvbs: 1 for CVBS input, 0 YPbPr input. This property is only needed for
++ adv7180 tv decoder.
++
++Supported Sensor
++- ov5640
++- ov5642
++- ov5640_mipi
++- adv7180
++
++
++Example for IPUv3 including capture settings on imx6q-sabresd.dts:
++ ipu1: ipu@02400000 { /* IPU1 */
++ compatible = "fsl,imx6q-ipuv3";
++ reg = <0x02400000 0x400000>;
++ interrupts = <0 5 0x04>, < 0 6 0x04>;
++ clocks = <&clks 130>, <&clks 131>, <&clks 132>, <&clks 39>, <&clks 40>, <&clks 169>;
++ clock-names = "ipu1", "ipu1_di0", "ipu1_di1", "ipu1_di0_sel", "ipu1_di1_sel", "ipu1_csi0";
++ status = "disabled";
++ };
++
++Examples for v4l2_capture:
++ v4l2_cap {
++ compatible = "fsl,imx6q-v4l2-capture";
++ ipu_id = <0>;
++ csi_id = <0>;
++ mclk_source = <0>;
++ status = "okay";
++ };
++
++Examples for sensors:
++ ov5642: ov5642@3c {
++ compatible = "ovti,ov5642";
++ reg = <0x3c>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ipu1_2>;
++ clocks = <&clks 201>;
++ clock-names = "csi_mclk";
++ DOVDD-supply = <&vgen4_reg>; /* 1.8v */
++ AVDD-supply = <&vgen3_reg>; /* 2.8v, on rev C board is VGEN3 */
++ DVDD-supply = <&vgen2_reg>; /* 1.5v*/
++ pwn-gpios = <&gpio1 16 1>; /* active low: SD1_DAT0 */
++ rst-gpios = <&gpio1 17 0>; /* active high: SD1_DAT1 */
++ csi_id = <0>;
++ mclk = <24000000>;
++ mclk_source = <0>;
++ };
++
++ adv7180: adv7180@21 {
++ compatible = "adv,adv7180";
++ reg = <0x21>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_ipu1_3>;
++ clocks = <&clks 201>;
++ clock-names = "csi_mclk";
++ DOVDD-supply = <&reg_3p3v>; /* 3.3v, enabled via 2.8 VGEN6 */
++ AVDD-supply = <&reg_3p3v>; /* 1.8v */
++ DVDD-supply = <&reg_3p3v>; /* 1.8v */
++ PVDD-supply = <&reg_3p3v>; /* 1.8v */
++ pwn-gpios = <&max7310_b 2 0>;
++ csi_id = <0>;
++ mclk = <24000000>;
++ mclk_source = <0>;
++ cvbs = <1>;
++ };
+diff -Nur linux-3.14.40.orig/Documentation/devicetree/bindings/video/mxc_hdmi_video.txt linux-3.14.40/Documentation/devicetree/bindings/video/mxc_hdmi_video.txt
+--- linux-3.14.40.orig/Documentation/devicetree/bindings/video/mxc_hdmi_video.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/Documentation/devicetree/bindings/video/mxc_hdmi_video.txt 2015-05-01 14:57:58.491427001 -0500
+@@ -0,0 +1,20 @@
++Device-Tree bindings for hdmi video driver
++
++Required properties:
++- compatible: value should be "fsl,imx6q-hdmi-video".
++- fsl,hdcp: define the property in dts, hdmi driver will initalize for hdcp,
++ otherwise hdcp function will not supported.
++- fsl,phy_reg_vlev: hdmi phy register,Voltage Level Control Register offset 0x0e,
++ adjust hdmi phy signal voltage level.
++- fsl,phy_reg_cksymtx: hdmi phy register, clock symbol and transmitter control
++ register offset 0x09, adjust hdmi signal pre-emphasis.
++
++Example:
++
++ hdmi_video {
++ compatible = "fsl,imx6q-hdmi-video";
++ fsl,hdcp;
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ };
++
+diff -Nur linux-3.14.40.orig/Documentation/filesystems/hfsplus.txt linux-3.14.40/Documentation/filesystems/hfsplus.txt
+--- linux-3.14.40.orig/Documentation/filesystems/hfsplus.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/filesystems/hfsplus.txt 2015-05-01 14:57:58.499427001 -0500
+@@ -56,4 +56,4 @@
+
+ kernel source: <file:fs/hfsplus>
+
+-Apple Technote 1150 http://developer.apple.com/technotes/tn/tn1150.html
++Apple Technote 1150 https://developer.apple.com/legacy/library/technotes/tn/tn1150.html
+diff -Nur linux-3.14.40.orig/Documentation/kernel-parameters.txt linux-3.14.40/Documentation/kernel-parameters.txt
+--- linux-3.14.40.orig/Documentation/kernel-parameters.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/kernel-parameters.txt 2015-05-01 14:57:58.511427001 -0500
+@@ -603,8 +603,11 @@
+ Also note the kernel might malfunction if you disable
+ some critical bits.
+
+- cma=nn[MG] [ARM,KNL]
+- Sets the size of kernel global memory area for contiguous
++ cma=nn[MG]@[start[MG][-end[MG]]]
++ [ARM,X86,KNL]
++ Sets the size of kernel global memory area for
++ contiguous memory allocations and optionally the
++ placement constraint by the physical address range of
+ memory allocations. For more information, see
+ include/linux/dma-contiguous.h
+
+diff -Nur linux-3.14.40.orig/Documentation/networking/gianfar.txt linux-3.14.40/Documentation/networking/gianfar.txt
+--- linux-3.14.40.orig/Documentation/networking/gianfar.txt 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/Documentation/networking/gianfar.txt 2015-05-01 14:57:58.523427001 -0500
+@@ -1,38 +1,8 @@
+ The Gianfar Ethernet Driver
+-Sysfs File description
+
+ Author: Andy Fleming <afleming@freescale.com>
+ Updated: 2005-07-28
+
+-SYSFS
+-
+-Several of the features of the gianfar driver are controlled
+-through sysfs files. These are:
+-
+-bd_stash:
+-To stash RX Buffer Descriptors in the L2, echo 'on' or '1' to
+-bd_stash, echo 'off' or '0' to disable
+-
+-rx_stash_len:
+-To stash the first n bytes of the packet in L2, echo the number
+-of bytes to buf_stash_len. echo 0 to disable.
+-
+-WARNING: You could really screw these up if you set them too low or high!
+-fifo_threshold:
+-To change the number of bytes the controller needs in the
+-fifo before it starts transmission, echo the number of bytes to
+-fifo_thresh. Range should be 0-511.
+-
+-fifo_starve:
+-When the FIFO has less than this many bytes during a transmit, it
+-enters starve mode, and increases the priority of TX memory
+-transactions. To change, echo the number of bytes to
+-fifo_starve. Range should be 0-511.
+-
+-fifo_starve_off:
+-Once in starve mode, the FIFO remains there until it has this
+-many bytes. To change, echo the number of bytes to
+-fifo_starve_off. Range should be 0-511.
+
+ CHECKSUM OFFLOADING
+
+diff -Nur linux-3.14.40.orig/drivers/ata/acard-ahci.c linux-3.14.40/drivers/ata/acard-ahci.c
+--- linux-3.14.40.orig/drivers/ata/acard-ahci.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/acard-ahci.c 2015-05-01 14:57:58.539427001 -0500
+@@ -36,7 +36,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/ahci.c linux-3.14.40/drivers/ata/ahci.c
+--- linux-3.14.40.orig/drivers/ata/ahci.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/ahci.c 2015-05-01 14:57:58.551427001 -0500
+@@ -35,7 +35,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+@@ -610,6 +609,7 @@
+ unsigned long deadline)
+ {
+ struct ata_port *ap = link->ap;
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ bool online;
+ int rc;
+
+@@ -620,7 +620,7 @@
+ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+ deadline, &online, NULL);
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+
+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+
+@@ -635,6 +635,7 @@
+ {
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+@@ -650,7 +651,7 @@
+ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+ deadline, &online, NULL);
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+
+ /* The pseudo configuration device on SIMG4726 attached to
+ * ASUS P5W-DH Deluxe doesn't send signature FIS after
+@@ -1146,6 +1147,17 @@
+ return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
+ }
+
++static bool ahci_broken_devslp(struct pci_dev *pdev)
++{
++ /* device with broken DEVSLP but still showing SDS capability */
++ static const struct pci_device_id ids[] = {
++ { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
++ {}
++ };
++
++ return pci_match_id(ids, pdev);
++}
++
+ #ifdef CONFIG_ATA_ACPI
+ static void ahci_gtf_filter_workaround(struct ata_host *host)
+ {
+@@ -1397,6 +1409,10 @@
+
+ hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+
++ /* must set flag prior to save config in order to take effect */
++ if (ahci_broken_devslp(pdev))
++ hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
++
+ /* save initial config */
+ ahci_pci_save_initial_config(pdev, hpriv);
+
+diff -Nur linux-3.14.40.orig/drivers/ata/ahci.h linux-3.14.40/drivers/ata/ahci.h
+--- linux-3.14.40.orig/drivers/ata/ahci.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/ahci.h 2015-05-01 14:57:58.571427001 -0500
+@@ -37,6 +37,8 @@
+
+ #include <linux/clk.h>
+ #include <linux/libata.h>
++#include <linux/phy/phy.h>
++#include <linux/regulator/consumer.h>
+
+ /* Enclosure Management Control */
+ #define EM_CTRL_MSG_TYPE 0x000f0000
+@@ -51,6 +53,7 @@
+
+ enum {
+ AHCI_MAX_PORTS = 32,
++ AHCI_MAX_CLKS = 3,
+ AHCI_MAX_SG = 168, /* hardware max is 64K */
+ AHCI_DMA_BOUNDARY = 0xffffffff,
+ AHCI_MAX_CMDS = 32,
+@@ -233,6 +236,8 @@
+ port start (wait until
+ error-handling stage) */
+ AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
++ AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
++ AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
+
+ /* ap->flags bits */
+
+@@ -322,8 +327,17 @@
+ u32 em_loc; /* enclosure management location */
+ u32 em_buf_sz; /* EM buffer size in byte */
+ u32 em_msg_type; /* EM message type */
+- struct clk *clk; /* Only for platforms supporting clk */
++ bool got_runtime_pm; /* Did we do pm_runtime_get? */
++ struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
++ struct regulator *target_pwr; /* Optional */
++ struct phy *phy; /* If platform uses phy */
+ void *plat_data; /* Other platform data */
++ /*
++ * Optional ahci_start_engine override, if not set this gets set to the
++ * default ahci_start_engine during ahci_save_initial_config, this can
++ * be overridden anytime before the host is activated.
++ */
++ void (*start_engine)(struct ata_port *ap);
+ };
+
+ extern int ahci_ignore_sss;
+diff -Nur linux-3.14.40.orig/drivers/ata/ahci_imx.c linux-3.14.40/drivers/ata/ahci_imx.c
+--- linux-3.14.40.orig/drivers/ata/ahci_imx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/ahci_imx.c 2015-05-01 14:57:58.571427001 -0500
+@@ -26,12 +26,29 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+ #include <linux/libata.h>
++#include <linux/busfreq-imx6.h>
+ #include "ahci.h"
+
+ enum {
+- PORT_PHY_CTL = 0x178, /* Port0 PHY Control */
+- PORT_PHY_CTL_PDDQ_LOC = 0x100000, /* PORT_PHY_CTL bits */
+- HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
++ /* Timer 1-ms Register */
++ IMX_TIMER1MS = 0x00e0,
++ /* Port0 PHY Control Register */
++ IMX_P0PHYCR = 0x0178,
++ IMX_P0PHYCR_TEST_PDDQ = 1 << 20,
++ IMX_P0PHYCR_CR_READ = 1 << 19,
++ IMX_P0PHYCR_CR_WRITE = 1 << 18,
++ IMX_P0PHYCR_CR_CAP_DATA = 1 << 17,
++ IMX_P0PHYCR_CR_CAP_ADDR = 1 << 16,
++ /* Port0 PHY Status Register */
++ IMX_P0PHYSR = 0x017c,
++ IMX_P0PHYSR_CR_ACK = 1 << 18,
++ IMX_P0PHYSR_CR_DATA_OUT = 0xffff << 0,
++ /* Lane0 Output Status Register */
++ IMX_LANE0_OUT_STAT = 0x2003,
++ IMX_LANE0_OUT_STAT_RX_PLL_STATE = 1 << 1,
++ /* Clock Reset Register */
++ IMX_CLOCK_RESET = 0x7f3f,
++ IMX_CLOCK_RESET_RESET = 1 << 0,
+ };
+
+ enum ahci_imx_type {
+@@ -42,62 +59,230 @@
+ struct imx_ahci_priv {
+ struct platform_device *ahci_pdev;
+ enum ahci_imx_type type;
+-
+- /* i.MX53 clock */
+- struct clk *sata_gate_clk;
+- /* Common clock */
+- struct clk *sata_ref_clk;
+ struct clk *ahb_clk;
+-
+ struct regmap *gpr;
+ bool no_device;
+ bool first_time;
++ u32 phy_params;
+ };
+
+ static int ahci_imx_hotplug;
+ module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
+ MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
+
+-static int imx_sata_clock_enable(struct device *dev)
++static void ahci_imx_host_stop(struct ata_host *host);
++
++static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert)
++{
++ int timeout = 10;
++ u32 crval;
++ u32 srval;
++
++ /* Assert or deassert the bit */
++ crval = readl(mmio + IMX_P0PHYCR);
++ if (assert)
++ crval |= bit;
++ else
++ crval &= ~bit;
++ writel(crval, mmio + IMX_P0PHYCR);
++
++ /* Wait for the cr_ack signal */
++ do {
++ srval = readl(mmio + IMX_P0PHYSR);
++ if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK)
++ break;
++ usleep_range(100, 200);
++ } while (--timeout);
++
++ return timeout ? 0 : -ETIMEDOUT;
++}
++
++static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio)
+ {
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
++ u32 crval = addr;
+ int ret;
+
+- if (imxpriv->type == AHCI_IMX53) {
+- ret = clk_prepare_enable(imxpriv->sata_gate_clk);
+- if (ret < 0) {
+- dev_err(dev, "prepare-enable sata_gate clock err:%d\n",
+- ret);
+- return ret;
+- }
++ /* Supply the address on cr_data_in */
++ writel(crval, mmio + IMX_P0PHYCR);
++
++ /* Assert the cr_cap_addr signal */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true);
++ if (ret)
++ return ret;
++
++ /* Deassert cr_cap_addr */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false);
++ if (ret)
++ return ret;
++
++ return 0;
++}
++
++static int imx_phy_reg_write(u16 val, void __iomem *mmio)
++{
++ u32 crval = val;
++ int ret;
++
++ /* Supply the data on cr_data_in */
++ writel(crval, mmio + IMX_P0PHYCR);
++
++ /* Assert the cr_cap_data signal */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true);
++ if (ret)
++ return ret;
++
++ /* Deassert cr_cap_data */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false);
++ if (ret)
++ return ret;
++
++ if (val & IMX_CLOCK_RESET_RESET) {
++ /*
++ * In case we're resetting the phy, it's unable to acknowledge,
++ * so we return immediately here.
++ */
++ crval |= IMX_P0PHYCR_CR_WRITE;
++ writel(crval, mmio + IMX_P0PHYCR);
++ goto out;
+ }
+
+- ret = clk_prepare_enable(imxpriv->sata_ref_clk);
+- if (ret < 0) {
+- dev_err(dev, "prepare-enable sata_ref clock err:%d\n",
+- ret);
+- goto clk_err;
++ /* Assert the cr_write signal */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true);
++ if (ret)
++ return ret;
++
++ /* Deassert cr_write */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false);
++ if (ret)
++ return ret;
++
++out:
++ return 0;
++}
++
++static int imx_phy_reg_read(u16 *val, void __iomem *mmio)
++{
++ int ret;
++
++ /* Assert the cr_read signal */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true);
++ if (ret)
++ return ret;
++
++ /* Capture the data from cr_data_out[] */
++ *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT;
++
++ /* Deassert cr_read */
++ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false);
++ if (ret)
++ return ret;
++
++ return 0;
++}
++
++static int imx_sata_phy_reset(struct ahci_host_priv *hpriv)
++{
++ void __iomem *mmio = hpriv->mmio;
++ int timeout = 10;
++ u16 val;
++ int ret;
++
++ /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */
++ ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio);
++ if (ret)
++ return ret;
++ ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio);
++ if (ret)
++ return ret;
++
++ /* Wait for PHY RX_PLL to be stable */
++ do {
++ usleep_range(100, 200);
++ ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio);
++ if (ret)
++ return ret;
++ ret = imx_phy_reg_read(&val, mmio);
++ if (ret)
++ return ret;
++ if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE)
++ break;
++ } while (--timeout);
++
++ return timeout ? 0 : -ETIMEDOUT;
++}
++
++static int imx_sata_enable(struct ahci_host_priv *hpriv)
++{
++ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
++ struct device *dev = &imxpriv->ahci_pdev->dev;
++ int ret;
++
++ if (imxpriv->no_device)
++ return 0;
++
++ if (hpriv->target_pwr) {
++ ret = regulator_enable(hpriv->target_pwr);
++ if (ret)
++ return ret;
+ }
+
++ request_bus_freq(BUS_FREQ_HIGH);
++
++ ret = ahci_platform_enable_clks(hpriv);
++ if (ret < 0)
++ goto disable_regulator;
++
+ if (imxpriv->type == AHCI_IMX6Q) {
++ /*
++ * set PHY Paremeters, two steps to configure the GPR13,
++ * one write for rest of parameters, mask of first write
++ * is 0x07ffffff, and the other one write for setting
++ * the mpll_clk_en.
++ */
++ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
++ IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
++ IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
++ IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
++ IMX6Q_GPR13_SATA_SPD_MODE_MASK |
++ IMX6Q_GPR13_SATA_MPLL_SS_EN |
++ IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
++ IMX6Q_GPR13_SATA_TX_BOOST_MASK |
++ IMX6Q_GPR13_SATA_TX_LVL_MASK |
++ IMX6Q_GPR13_SATA_MPLL_CLK_EN |
++ IMX6Q_GPR13_SATA_TX_EDGE_RATE,
++ imxpriv->phy_params);
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN);
++
++ usleep_range(100, 200);
++
++ ret = imx_sata_phy_reset(hpriv);
++ if (ret) {
++ dev_err(dev, "failed to reset phy: %d\n", ret);
++ goto disable_regulator;
++ }
+ }
+
+ usleep_range(1000, 2000);
+
+ return 0;
+
+-clk_err:
+- if (imxpriv->type == AHCI_IMX53)
+- clk_disable_unprepare(imxpriv->sata_gate_clk);
++disable_regulator:
++ release_bus_freq(BUS_FREQ_HIGH);
++
++ if (hpriv->target_pwr)
++ regulator_disable(hpriv->target_pwr);
++
+ return ret;
+ }
+
+-static void imx_sata_clock_disable(struct device *dev)
++static void imx_sata_disable(struct ahci_host_priv *hpriv)
+ {
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
++ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
++
++ if (imxpriv->no_device)
++ return;
+
+ if (imxpriv->type == AHCI_IMX6Q) {
+ regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+@@ -105,10 +290,12 @@
+ !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ }
+
+- clk_disable_unprepare(imxpriv->sata_ref_clk);
++ ahci_platform_disable_clks(hpriv);
+
+- if (imxpriv->type == AHCI_IMX53)
+- clk_disable_unprepare(imxpriv->sata_gate_clk);
++ release_bus_freq(BUS_FREQ_HIGH);
++
++ if (hpriv->target_pwr)
++ regulator_disable(hpriv->target_pwr);
+ }
+
+ static void ahci_imx_error_handler(struct ata_port *ap)
+@@ -118,7 +305,7 @@
+ struct ata_host *host = dev_get_drvdata(ap->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
++ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+
+ ahci_error_handler(ap);
+
+@@ -134,17 +321,23 @@
+ * without full reset once the pddq mode is enabled making it
+ * impossible to use as part of libata LPM.
+ */
+- reg_val = readl(mmio + PORT_PHY_CTL);
+- writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
+- imx_sata_clock_disable(ap->dev);
++ reg_val = readl(mmio + IMX_P0PHYCR);
++ writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
++ imx_sata_disable(hpriv);
+ imxpriv->no_device = true;
++
++ dev_info(ap->dev, "no device found, disabling link.\n");
++ dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX
++ ".hotplug=1 to enable hotplug\n");
+ }
+
+ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+ {
+ struct ata_port *ap = link->ap;
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
++ struct ata_host *host = dev_get_drvdata(ap->dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+ int ret = -EIO;
+
+ if (imxpriv->type == AHCI_IMX53)
+@@ -156,7 +349,8 @@
+ }
+
+ static struct ata_port_operations ahci_imx_ops = {
+- .inherits = &ahci_platform_ops,
++ .inherits = &ahci_ops,
++ .host_stop = ahci_imx_host_stop,
+ .error_handler = ahci_imx_error_handler,
+ .softreset = ahci_imx_softreset,
+ };
+@@ -168,234 +362,306 @@
+ .port_ops = &ahci_imx_ops,
+ };
+
+-static int imx_sata_init(struct device *dev, void __iomem *mmio)
+-{
+- int ret = 0;
+- unsigned int reg_val;
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+-
+- ret = imx_sata_clock_enable(dev);
+- if (ret < 0)
+- return ret;
++static const struct of_device_id imx_ahci_of_match[] = {
++ { .compatible = "fsl,imx53-ahci", .data = (void *)AHCI_IMX53 },
++ { .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q },
++ {},
++};
++MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
+
+- /*
+- * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
+- * and IP vendor specific register HOST_TIMER1MS.
+- * Configure CAP_SSS (support stagered spin up).
+- * Implement the port0.
+- * Get the ahb clock rate, and configure the TIMER1MS register.
+- */
+- reg_val = readl(mmio + HOST_CAP);
+- if (!(reg_val & HOST_CAP_SSS)) {
+- reg_val |= HOST_CAP_SSS;
+- writel(reg_val, mmio + HOST_CAP);
+- }
+- reg_val = readl(mmio + HOST_PORTS_IMPL);
+- if (!(reg_val & 0x1)) {
+- reg_val |= 0x1;
+- writel(reg_val, mmio + HOST_PORTS_IMPL);
+- }
++struct reg_value {
++ u32 of_value;
++ u32 reg_value;
++};
+
+- reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
+- writel(reg_val, mmio + HOST_TIMER1MS);
++struct reg_property {
++ const char *name;
++ const struct reg_value *values;
++ size_t num_values;
++ u32 def_value;
++ u32 set_value;
++};
+
+- return 0;
+-}
++static const struct reg_value gpr13_tx_level[] = {
++ { 937, IMX6Q_GPR13_SATA_TX_LVL_0_937_V },
++ { 947, IMX6Q_GPR13_SATA_TX_LVL_0_947_V },
++ { 957, IMX6Q_GPR13_SATA_TX_LVL_0_957_V },
++ { 966, IMX6Q_GPR13_SATA_TX_LVL_0_966_V },
++ { 976, IMX6Q_GPR13_SATA_TX_LVL_0_976_V },
++ { 986, IMX6Q_GPR13_SATA_TX_LVL_0_986_V },
++ { 996, IMX6Q_GPR13_SATA_TX_LVL_0_996_V },
++ { 1005, IMX6Q_GPR13_SATA_TX_LVL_1_005_V },
++ { 1015, IMX6Q_GPR13_SATA_TX_LVL_1_015_V },
++ { 1025, IMX6Q_GPR13_SATA_TX_LVL_1_025_V },
++ { 1035, IMX6Q_GPR13_SATA_TX_LVL_1_035_V },
++ { 1045, IMX6Q_GPR13_SATA_TX_LVL_1_045_V },
++ { 1054, IMX6Q_GPR13_SATA_TX_LVL_1_054_V },
++ { 1064, IMX6Q_GPR13_SATA_TX_LVL_1_064_V },
++ { 1074, IMX6Q_GPR13_SATA_TX_LVL_1_074_V },
++ { 1084, IMX6Q_GPR13_SATA_TX_LVL_1_084_V },
++ { 1094, IMX6Q_GPR13_SATA_TX_LVL_1_094_V },
++ { 1104, IMX6Q_GPR13_SATA_TX_LVL_1_104_V },
++ { 1113, IMX6Q_GPR13_SATA_TX_LVL_1_113_V },
++ { 1123, IMX6Q_GPR13_SATA_TX_LVL_1_123_V },
++ { 1133, IMX6Q_GPR13_SATA_TX_LVL_1_133_V },
++ { 1143, IMX6Q_GPR13_SATA_TX_LVL_1_143_V },
++ { 1152, IMX6Q_GPR13_SATA_TX_LVL_1_152_V },
++ { 1162, IMX6Q_GPR13_SATA_TX_LVL_1_162_V },
++ { 1172, IMX6Q_GPR13_SATA_TX_LVL_1_172_V },
++ { 1182, IMX6Q_GPR13_SATA_TX_LVL_1_182_V },
++ { 1191, IMX6Q_GPR13_SATA_TX_LVL_1_191_V },
++ { 1201, IMX6Q_GPR13_SATA_TX_LVL_1_201_V },
++ { 1211, IMX6Q_GPR13_SATA_TX_LVL_1_211_V },
++ { 1221, IMX6Q_GPR13_SATA_TX_LVL_1_221_V },
++ { 1230, IMX6Q_GPR13_SATA_TX_LVL_1_230_V },
++ { 1240, IMX6Q_GPR13_SATA_TX_LVL_1_240_V }
++};
+
+-static void imx_sata_exit(struct device *dev)
+-{
+- imx_sata_clock_disable(dev);
+-}
++static const struct reg_value gpr13_tx_boost[] = {
++ { 0, IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB },
++ { 370, IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB },
++ { 740, IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB },
++ { 1110, IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB },
++ { 1480, IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB },
++ { 1850, IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB },
++ { 2220, IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB },
++ { 2590, IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB },
++ { 2960, IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB },
++ { 3330, IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB },
++ { 3700, IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB },
++ { 4070, IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB },
++ { 4440, IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB },
++ { 4810, IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB },
++ { 5280, IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB },
++ { 5750, IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB }
++};
+
+-static int imx_ahci_suspend(struct device *dev)
+-{
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
++static const struct reg_value gpr13_tx_atten[] = {
++ { 8, IMX6Q_GPR13_SATA_TX_ATTEN_8_16 },
++ { 9, IMX6Q_GPR13_SATA_TX_ATTEN_9_16 },
++ { 10, IMX6Q_GPR13_SATA_TX_ATTEN_10_16 },
++ { 12, IMX6Q_GPR13_SATA_TX_ATTEN_12_16 },
++ { 14, IMX6Q_GPR13_SATA_TX_ATTEN_14_16 },
++ { 16, IMX6Q_GPR13_SATA_TX_ATTEN_16_16 },
++};
+
+- /*
+- * If no_device is set, The CLKs had been gated off in the
+- * initialization so don't do it again here.
+- */
+- if (!imxpriv->no_device)
+- imx_sata_clock_disable(dev);
++static const struct reg_value gpr13_rx_eq[] = {
++ { 500, IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB },
++ { 1000, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB },
++ { 1500, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB },
++ { 2000, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB },
++ { 2500, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB },
++ { 3000, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB },
++ { 3500, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB },
++ { 4000, IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB },
++};
+
+- return 0;
+-}
++static const struct reg_property gpr13_props[] = {
++ {
++ .name = "fsl,transmit-level-mV",
++ .values = gpr13_tx_level,
++ .num_values = ARRAY_SIZE(gpr13_tx_level),
++ .def_value = IMX6Q_GPR13_SATA_TX_LVL_1_025_V,
++ }, {
++ .name = "fsl,transmit-boost-mdB",
++ .values = gpr13_tx_boost,
++ .num_values = ARRAY_SIZE(gpr13_tx_boost),
++ .def_value = IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB,
++ }, {
++ .name = "fsl,transmit-atten-16ths",
++ .values = gpr13_tx_atten,
++ .num_values = ARRAY_SIZE(gpr13_tx_atten),
++ .def_value = IMX6Q_GPR13_SATA_TX_ATTEN_9_16,
++ }, {
++ .name = "fsl,receive-eq-mdB",
++ .values = gpr13_rx_eq,
++ .num_values = ARRAY_SIZE(gpr13_rx_eq),
++ .def_value = IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB,
++ }, {
++ .name = "fsl,no-spread-spectrum",
++ .def_value = IMX6Q_GPR13_SATA_MPLL_SS_EN,
++ .set_value = 0,
++ },
++};
+
+-static int imx_ahci_resume(struct device *dev)
++static u32 imx_ahci_parse_props(struct device *dev,
++ const struct reg_property *prop, size_t num)
+ {
+- struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+- int ret = 0;
+-
+- if (!imxpriv->no_device)
+- ret = imx_sata_clock_enable(dev);
++ struct device_node *np = dev->of_node;
++ u32 reg_value = 0;
++ int i, j;
++
++ for (i = 0; i < num; i++, prop++) {
++ u32 of_val;
++
++ if (prop->num_values == 0) {
++ if (of_property_read_bool(np, prop->name))
++ reg_value |= prop->set_value;
++ else
++ reg_value |= prop->def_value;
++ continue;
++ }
+
+- return ret;
+-}
++ if (of_property_read_u32(np, prop->name, &of_val)) {
++ dev_info(dev, "%s not specified, using %08x\n",
++ prop->name, prop->def_value);
++ reg_value |= prop->def_value;
++ continue;
++ }
+
+-static struct ahci_platform_data imx_sata_pdata = {
+- .init = imx_sata_init,
+- .exit = imx_sata_exit,
+- .ata_port_info = &ahci_imx_port_info,
+- .suspend = imx_ahci_suspend,
+- .resume = imx_ahci_resume,
++ for (j = 0; j < prop->num_values; j++) {
++ if (prop->values[j].of_value == of_val) {
++ dev_info(dev, "%s value %u, using %08x\n",
++ prop->name, of_val, prop->values[j].reg_value);
++ reg_value |= prop->values[j].reg_value;
++ break;
++ }
++ }
+
+-};
++ if (j == prop->num_values) {
++ dev_err(dev, "DT property %s is not a valid value\n",
++ prop->name);
++ reg_value |= prop->def_value;
++ }
++ }
+
+-static const struct of_device_id imx_ahci_of_match[] = {
+- { .compatible = "fsl,imx53-ahci", .data = (void *)AHCI_IMX53 },
+- { .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q },
+- {},
+-};
+-MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
++ return reg_value;
++}
+
+ static int imx_ahci_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+- struct resource *mem, *irq, res[2];
+ const struct of_device_id *of_id;
+- enum ahci_imx_type type;
+- const struct ahci_platform_data *pdata = NULL;
++ struct ahci_host_priv *hpriv;
+ struct imx_ahci_priv *imxpriv;
+- struct device *ahci_dev;
+- struct platform_device *ahci_pdev;
++ unsigned int reg_val;
+ int ret;
+
+ of_id = of_match_device(imx_ahci_of_match, dev);
+ if (!of_id)
+ return -EINVAL;
+
+- type = (enum ahci_imx_type)of_id->data;
+- pdata = &imx_sata_pdata;
+-
+ imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
+- if (!imxpriv) {
+- dev_err(dev, "can't alloc ahci_host_priv\n");
++ if (!imxpriv)
+ return -ENOMEM;
+- }
+-
+- ahci_pdev = platform_device_alloc("ahci", -1);
+- if (!ahci_pdev)
+- return -ENODEV;
+-
+- ahci_dev = &ahci_pdev->dev;
+- ahci_dev->parent = dev;
+
++ imxpriv->ahci_pdev = pdev;
+ imxpriv->no_device = false;
+ imxpriv->first_time = true;
+- imxpriv->type = type;
+-
++ imxpriv->type = (enum ahci_imx_type)of_id->data;
+ imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(imxpriv->ahb_clk)) {
+ dev_err(dev, "can't get ahb clock.\n");
+- ret = PTR_ERR(imxpriv->ahb_clk);
+- goto err_out;
+- }
+-
+- if (type == AHCI_IMX53) {
+- imxpriv->sata_gate_clk = devm_clk_get(dev, "sata_gate");
+- if (IS_ERR(imxpriv->sata_gate_clk)) {
+- dev_err(dev, "can't get sata_gate clock.\n");
+- ret = PTR_ERR(imxpriv->sata_gate_clk);
+- goto err_out;
+- }
+- }
+-
+- imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
+- if (IS_ERR(imxpriv->sata_ref_clk)) {
+- dev_err(dev, "can't get sata_ref clock.\n");
+- ret = PTR_ERR(imxpriv->sata_ref_clk);
+- goto err_out;
++ return PTR_ERR(imxpriv->ahb_clk);
+ }
+
+- imxpriv->ahci_pdev = ahci_pdev;
+- platform_set_drvdata(pdev, imxpriv);
+-
+- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+- if (!mem || !irq) {
+- dev_err(dev, "no mmio/irq resource\n");
+- ret = -ENOMEM;
+- goto err_out;
+- }
+-
+- res[0] = *mem;
+- res[1] = *irq;
+-
+- ahci_dev->coherent_dma_mask = DMA_BIT_MASK(32);
+- ahci_dev->dma_mask = &ahci_dev->coherent_dma_mask;
+- ahci_dev->of_node = dev->of_node;
++ if (imxpriv->type == AHCI_IMX6Q) {
++ u32 reg_value;
+
+- if (type == AHCI_IMX6Q) {
+ imxpriv->gpr = syscon_regmap_lookup_by_compatible(
+ "fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imxpriv->gpr)) {
+ dev_err(dev,
+ "failed to find fsl,imx6q-iomux-gpr regmap\n");
+- ret = PTR_ERR(imxpriv->gpr);
+- goto err_out;
++ return PTR_ERR(imxpriv->gpr);
+ }
+
+- /*
+- * Set PHY Paremeters, two steps to configure the GPR13,
+- * one write for rest of parameters, mask of first write
+- * is 0x07fffffe, and the other one write for setting
+- * the mpll_clk_en happens in imx_sata_clock_enable().
+- */
+- regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+- IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
+- IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
+- IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
+- IMX6Q_GPR13_SATA_SPD_MODE_MASK |
+- IMX6Q_GPR13_SATA_MPLL_SS_EN |
+- IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
+- IMX6Q_GPR13_SATA_TX_BOOST_MASK |
+- IMX6Q_GPR13_SATA_TX_LVL_MASK |
+- IMX6Q_GPR13_SATA_MPLL_CLK_EN |
+- IMX6Q_GPR13_SATA_TX_EDGE_RATE,
+- IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
++ reg_value = imx_ahci_parse_props(dev, gpr13_props,
++ ARRAY_SIZE(gpr13_props));
++
++ imxpriv->phy_params =
+ IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
+ IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
+ IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
+- IMX6Q_GPR13_SATA_MPLL_SS_EN |
+- IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
+- IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
+- IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
++ reg_value;
+ }
+
+- ret = platform_device_add_resources(ahci_pdev, res, 2);
++ hpriv = ahci_platform_get_resources(pdev);
++ if (IS_ERR(hpriv))
++ return PTR_ERR(hpriv);
++
++ hpriv->plat_data = imxpriv;
++
++ ret = imx_sata_enable(hpriv);
+ if (ret)
+- goto err_out;
++ return ret;
+
+- ret = platform_device_add_data(ahci_pdev, pdata, sizeof(*pdata));
++ /*
++ * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
++ * and IP vendor specific register IMX_TIMER1MS.
++ * Configure CAP_SSS (support stagered spin up).
++ * Implement the port0.
++ * Get the ahb clock rate, and configure the TIMER1MS register.
++ */
++ reg_val = readl(hpriv->mmio + HOST_CAP);
++ if (!(reg_val & HOST_CAP_SSS)) {
++ reg_val |= HOST_CAP_SSS;
++ writel(reg_val, hpriv->mmio + HOST_CAP);
++ }
++ reg_val = readl(hpriv->mmio + HOST_PORTS_IMPL);
++ if (!(reg_val & 0x1)) {
++ reg_val |= 0x1;
++ writel(reg_val, hpriv->mmio + HOST_PORTS_IMPL);
++ }
++
++ reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
++ writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
++
++ ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
++ 0, 0, 0);
+ if (ret)
+- goto err_out;
++ imx_sata_disable(hpriv);
++
++ return ret;
++}
+
+- ret = platform_device_add(ahci_pdev);
+- if (ret) {
+-err_out:
+- platform_device_put(ahci_pdev);
++static void ahci_imx_host_stop(struct ata_host *host)
++{
++ struct ahci_host_priv *hpriv = host->private_data;
++
++ imx_sata_disable(hpriv);
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int imx_ahci_suspend(struct device *dev)
++{
++ struct ata_host *host = dev_get_drvdata(dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ int ret;
++
++ ret = ahci_platform_suspend_host(dev);
++ if (ret)
+ return ret;
+- }
++
++ imx_sata_disable(hpriv);
+
+ return 0;
+ }
+
+-static int imx_ahci_remove(struct platform_device *pdev)
++static int imx_ahci_resume(struct device *dev)
+ {
+- struct imx_ahci_priv *imxpriv = platform_get_drvdata(pdev);
+- struct platform_device *ahci_pdev = imxpriv->ahci_pdev;
++ struct ata_host *host = dev_get_drvdata(dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ int ret;
+
+- platform_device_unregister(ahci_pdev);
+- return 0;
++ ret = imx_sata_enable(hpriv);
++ if (ret)
++ return ret;
++
++ return ahci_platform_resume_host(dev);
+ }
++#endif
++
++static SIMPLE_DEV_PM_OPS(ahci_imx_pm_ops, imx_ahci_suspend, imx_ahci_resume);
+
+ static struct platform_driver imx_ahci_driver = {
+ .probe = imx_ahci_probe,
+- .remove = imx_ahci_remove,
++ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "ahci-imx",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_ahci_of_match,
++ .pm = &ahci_imx_pm_ops,
+ },
+ };
+ module_platform_driver(imx_ahci_driver);
+diff -Nur linux-3.14.40.orig/drivers/ata/ahci_platform.c linux-3.14.40/drivers/ata/ahci_platform.c
+--- linux-3.14.40.orig/drivers/ata/ahci_platform.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/ahci_platform.c 2015-05-01 14:57:58.583427001 -0500
+@@ -12,135 +12,36 @@
+ * any later version.
+ */
+
+-#include <linux/clk.h>
+ #include <linux/kernel.h>
+-#include <linux/gfp.h>
+ #include <linux/module.h>
+ #include <linux/pm.h>
+-#include <linux/init.h>
+-#include <linux/interrupt.h>
+ #include <linux/device.h>
+ #include <linux/platform_device.h>
+ #include <linux/libata.h>
+ #include <linux/ahci_platform.h>
+ #include "ahci.h"
+
+-static void ahci_host_stop(struct ata_host *host);
+-
+-enum ahci_type {
+- AHCI, /* standard platform ahci */
+- IMX53_AHCI, /* ahci on i.mx53 */
+- STRICT_AHCI, /* delayed DMA engine start */
+-};
+-
+-static struct platform_device_id ahci_devtype[] = {
+- {
+- .name = "ahci",
+- .driver_data = AHCI,
+- }, {
+- .name = "imx53-ahci",
+- .driver_data = IMX53_AHCI,
+- }, {
+- .name = "strict-ahci",
+- .driver_data = STRICT_AHCI,
+- }, {
+- /* sentinel */
+- }
+-};
+-MODULE_DEVICE_TABLE(platform, ahci_devtype);
+-
+-struct ata_port_operations ahci_platform_ops = {
+- .inherits = &ahci_ops,
+- .host_stop = ahci_host_stop,
+-};
+-EXPORT_SYMBOL_GPL(ahci_platform_ops);
+-
+-static struct ata_port_operations ahci_platform_retry_srst_ops = {
+- .inherits = &ahci_pmp_retry_srst_ops,
+- .host_stop = ahci_host_stop,
+-};
+-
+-static const struct ata_port_info ahci_port_info[] = {
+- /* by features */
+- [AHCI] = {
+- .flags = AHCI_FLAG_COMMON,
+- .pio_mask = ATA_PIO4,
+- .udma_mask = ATA_UDMA6,
+- .port_ops = &ahci_platform_ops,
+- },
+- [IMX53_AHCI] = {
+- .flags = AHCI_FLAG_COMMON,
+- .pio_mask = ATA_PIO4,
+- .udma_mask = ATA_UDMA6,
+- .port_ops = &ahci_platform_retry_srst_ops,
+- },
+- [STRICT_AHCI] = {
+- AHCI_HFLAGS (AHCI_HFLAG_DELAY_ENGINE),
+- .flags = AHCI_FLAG_COMMON,
+- .pio_mask = ATA_PIO4,
+- .udma_mask = ATA_UDMA6,
+- .port_ops = &ahci_platform_ops,
+- },
+-};
+-
+-static struct scsi_host_template ahci_platform_sht = {
+- AHCI_SHT("ahci_platform"),
++static const struct ata_port_info ahci_port_info = {
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_platform_ops,
+ };
+
+ static int ahci_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+- const struct platform_device_id *id = platform_get_device_id(pdev);
+- struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
+- const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct ahci_host_priv *hpriv;
+- struct ata_host *host;
+- struct resource *mem;
+- int irq;
+- int n_ports;
+- int i;
+ int rc;
+
+- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- if (!mem) {
+- dev_err(dev, "no mmio space\n");
+- return -EINVAL;
+- }
+-
+- irq = platform_get_irq(pdev, 0);
+- if (irq <= 0) {
+- dev_err(dev, "no irq\n");
+- return -EINVAL;
+- }
+-
+- if (pdata && pdata->ata_port_info)
+- pi = *pdata->ata_port_info;
+-
+- hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+- if (!hpriv) {
+- dev_err(dev, "can't alloc ahci_host_priv\n");
+- return -ENOMEM;
+- }
+-
+- hpriv->flags |= (unsigned long)pi.private_data;
++ hpriv = ahci_platform_get_resources(pdev);
++ if (IS_ERR(hpriv))
++ return PTR_ERR(hpriv);
+
+- hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
+- if (!hpriv->mmio) {
+- dev_err(dev, "can't map %pR\n", mem);
+- return -ENOMEM;
+- }
+-
+- hpriv->clk = clk_get(dev, NULL);
+- if (IS_ERR(hpriv->clk)) {
+- dev_err(dev, "can't get clock\n");
+- } else {
+- rc = clk_prepare_enable(hpriv->clk);
+- if (rc) {
+- dev_err(dev, "clock prepare enable failed");
+- goto free_clk;
+- }
+- }
++ rc = ahci_platform_enable_resources(hpriv);
++ if (rc)
++ return rc;
+
+ /*
+ * Some platforms might need to prepare for mmio region access,
+@@ -151,69 +52,10 @@
+ if (pdata && pdata->init) {
+ rc = pdata->init(dev, hpriv->mmio);
+ if (rc)
+- goto disable_unprepare_clk;
+- }
+-
+- ahci_save_initial_config(dev, hpriv,
+- pdata ? pdata->force_port_map : 0,
+- pdata ? pdata->mask_port_map : 0);
+-
+- /* prepare host */
+- if (hpriv->cap & HOST_CAP_NCQ)
+- pi.flags |= ATA_FLAG_NCQ;
+-
+- if (hpriv->cap & HOST_CAP_PMP)
+- pi.flags |= ATA_FLAG_PMP;
+-
+- ahci_set_em_messages(hpriv, &pi);
+-
+- /* CAP.NP sometimes indicate the index of the last enabled
+- * port, at other times, that of the last possible port, so
+- * determining the maximum port number requires looking at
+- * both CAP.NP and port_map.
+- */
+- n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+-
+- host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+- if (!host) {
+- rc = -ENOMEM;
+- goto pdata_exit;
++ goto disable_resources;
+ }
+
+- host->private_data = hpriv;
+-
+- if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+- host->flags |= ATA_HOST_PARALLEL_SCAN;
+- else
+- dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
+-
+- if (pi.flags & ATA_FLAG_EM)
+- ahci_reset_em(host);
+-
+- for (i = 0; i < host->n_ports; i++) {
+- struct ata_port *ap = host->ports[i];
+-
+- ata_port_desc(ap, "mmio %pR", mem);
+- ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+-
+- /* set enclosure management message type */
+- if (ap->flags & ATA_FLAG_EM)
+- ap->em_message_type = hpriv->em_msg_type;
+-
+- /* disabled/not-implemented port */
+- if (!(hpriv->port_map & (1 << i)))
+- ap->ops = &ata_dummy_port_ops;
+- }
+-
+- rc = ahci_reset_controller(host);
+- if (rc)
+- goto pdata_exit;
+-
+- ahci_init_controller(host);
+- ahci_print_info(host, "platform");
+-
+- rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
+- &ahci_platform_sht);
++ rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, 0, 0, 0);
+ if (rc)
+ goto pdata_exit;
+
+@@ -221,115 +63,19 @@
+ pdata_exit:
+ if (pdata && pdata->exit)
+ pdata->exit(dev);
+-disable_unprepare_clk:
+- if (!IS_ERR(hpriv->clk))
+- clk_disable_unprepare(hpriv->clk);
+-free_clk:
+- if (!IS_ERR(hpriv->clk))
+- clk_put(hpriv->clk);
+- return rc;
+-}
+-
+-static void ahci_host_stop(struct ata_host *host)
+-{
+- struct device *dev = host->dev;
+- struct ahci_platform_data *pdata = dev_get_platdata(dev);
+- struct ahci_host_priv *hpriv = host->private_data;
+-
+- if (pdata && pdata->exit)
+- pdata->exit(dev);
+-
+- if (!IS_ERR(hpriv->clk)) {
+- clk_disable_unprepare(hpriv->clk);
+- clk_put(hpriv->clk);
+- }
+-}
+-
+-#ifdef CONFIG_PM_SLEEP
+-static int ahci_suspend(struct device *dev)
+-{
+- struct ahci_platform_data *pdata = dev_get_platdata(dev);
+- struct ata_host *host = dev_get_drvdata(dev);
+- struct ahci_host_priv *hpriv = host->private_data;
+- void __iomem *mmio = hpriv->mmio;
+- u32 ctl;
+- int rc;
+-
+- if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+- dev_err(dev, "firmware update required for suspend/resume\n");
+- return -EIO;
+- }
+-
+- /*
+- * AHCI spec rev1.1 section 8.3.3:
+- * Software must disable interrupts prior to requesting a
+- * transition of the HBA to D3 state.
+- */
+- ctl = readl(mmio + HOST_CTL);
+- ctl &= ~HOST_IRQ_EN;
+- writel(ctl, mmio + HOST_CTL);
+- readl(mmio + HOST_CTL); /* flush */
+-
+- rc = ata_host_suspend(host, PMSG_SUSPEND);
+- if (rc)
+- return rc;
+-
+- if (pdata && pdata->suspend)
+- return pdata->suspend(dev);
+-
+- if (!IS_ERR(hpriv->clk))
+- clk_disable_unprepare(hpriv->clk);
+-
+- return 0;
+-}
+-
+-static int ahci_resume(struct device *dev)
+-{
+- struct ahci_platform_data *pdata = dev_get_platdata(dev);
+- struct ata_host *host = dev_get_drvdata(dev);
+- struct ahci_host_priv *hpriv = host->private_data;
+- int rc;
+-
+- if (!IS_ERR(hpriv->clk)) {
+- rc = clk_prepare_enable(hpriv->clk);
+- if (rc) {
+- dev_err(dev, "clock prepare enable failed");
+- return rc;
+- }
+- }
+-
+- if (pdata && pdata->resume) {
+- rc = pdata->resume(dev);
+- if (rc)
+- goto disable_unprepare_clk;
+- }
+-
+- if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+- rc = ahci_reset_controller(host);
+- if (rc)
+- goto disable_unprepare_clk;
+-
+- ahci_init_controller(host);
+- }
+-
+- ata_host_resume(host);
+-
+- return 0;
+-
+-disable_unprepare_clk:
+- if (!IS_ERR(hpriv->clk))
+- clk_disable_unprepare(hpriv->clk);
+-
++disable_resources:
++ ahci_platform_disable_resources(hpriv);
+ return rc;
+ }
+-#endif
+
+-static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
++static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
++ ahci_platform_resume);
+
+ static const struct of_device_id ahci_of_match[] = {
+ { .compatible = "snps,spear-ahci", },
+ { .compatible = "snps,exynos5440-ahci", },
+ { .compatible = "ibm,476gtr-ahci", },
++ { .compatible = "snps,dwc-ahci", },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, ahci_of_match);
+@@ -343,7 +89,6 @@
+ .of_match_table = ahci_of_match,
+ .pm = &ahci_pm_ops,
+ },
+- .id_table = ahci_devtype,
+ };
+ module_platform_driver(ahci_driver);
+
+diff -Nur linux-3.14.40.orig/drivers/ata/ata_generic.c linux-3.14.40/drivers/ata/ata_generic.c
+--- linux-3.14.40.orig/drivers/ata/ata_generic.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/ata_generic.c 2015-05-01 14:57:58.583427001 -0500
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/Kconfig linux-3.14.40/drivers/ata/Kconfig
+--- linux-3.14.40.orig/drivers/ata/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/Kconfig 2015-05-01 14:57:58.655427001 -0500
+@@ -99,7 +99,7 @@
+
+ config AHCI_IMX
+ tristate "Freescale i.MX AHCI SATA support"
+- depends on SATA_AHCI_PLATFORM && MFD_SYSCON
++ depends on MFD_SYSCON
+ help
+ This option enables support for the Freescale i.MX SoC's
+ onboard AHCI SATA.
+diff -Nur linux-3.14.40.orig/drivers/ata/libahci.c linux-3.14.40/drivers/ata/libahci.c
+--- linux-3.14.40.orig/drivers/ata/libahci.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/libahci.c 2015-05-01 14:57:58.683427001 -0500
+@@ -35,7 +35,6 @@
+ #include <linux/kernel.h>
+ #include <linux/gfp.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+@@ -394,6 +393,9 @@
+ *
+ * If inconsistent, config values are fixed up by this function.
+ *
++ * If it is not set already this function sets hpriv->start_engine to
++ * ahci_start_engine.
++ *
+ * LOCKING:
+ * None.
+ */
+@@ -450,11 +452,23 @@
+ cap &= ~HOST_CAP_SNTF;
+ }
+
++ if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
++ dev_info(dev,
++ "controller can't do DEVSLP, turning off\n");
++ cap2 &= ~HOST_CAP2_SDS;
++ cap2 &= ~HOST_CAP2_SADM;
++ }
++
+ if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
+ dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
+ cap |= HOST_CAP_FBS;
+ }
+
++ if ((cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_NO_FBS)) {
++ dev_info(dev, "controller can't do FBS, turning off CAP_FBS\n");
++ cap &= ~HOST_CAP_FBS;
++ }
++
+ if (force_port_map && port_map != force_port_map) {
+ dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
+ port_map, force_port_map);
+@@ -500,6 +514,9 @@
+ hpriv->cap = cap;
+ hpriv->cap2 = cap2;
+ hpriv->port_map = port_map;
++
++ if (!hpriv->start_engine)
++ hpriv->start_engine = ahci_start_engine;
+ }
+ EXPORT_SYMBOL_GPL(ahci_save_initial_config);
+
+@@ -766,7 +783,7 @@
+
+ /* enable DMA */
+ if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE))
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+
+ /* turn on LEDs */
+ if (ap->flags & ATA_FLAG_EM) {
+@@ -1234,7 +1251,7 @@
+
+ /* restart engine */
+ out_restart:
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(ahci_kick_engine);
+@@ -1426,6 +1443,7 @@
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+@@ -1443,7 +1461,7 @@
+ rc = sata_link_hardreset(link, timing, deadline, &online,
+ ahci_check_ready);
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+@@ -2007,10 +2025,12 @@
+
+ void ahci_error_handler(struct ata_port *ap)
+ {
++ struct ahci_host_priv *hpriv = ap->host->private_data;
++
+ if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+ /* restart engine */
+ ahci_stop_engine(ap);
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+ }
+
+ sata_pmp_error_handler(ap);
+@@ -2031,6 +2051,7 @@
+
+ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
+ {
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_device *dev = ap->link.device;
+ u32 devslp, dm, dito, mdat, deto;
+@@ -2094,7 +2115,7 @@
+ PORT_DEVSLP_ADSE);
+ writel(devslp, port_mmio + PORT_DEVSLP);
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+
+ /* enable device sleep feature for the drive */
+ err_mask = ata_dev_set_feature(dev,
+@@ -2106,6 +2127,7 @@
+
+ static void ahci_enable_fbs(struct ata_port *ap)
+ {
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+@@ -2134,11 +2156,12 @@
+ } else
+ dev_err(ap->host->dev, "Failed to enable FBS\n");
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+ }
+
+ static void ahci_disable_fbs(struct ata_port *ap)
+ {
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+@@ -2166,7 +2189,7 @@
+ pp->fbs_enabled = false;
+ }
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+ }
+
+ static void ahci_pmp_attach(struct ata_port *ap)
+diff -Nur linux-3.14.40.orig/drivers/ata/libahci_platform.c linux-3.14.40/drivers/ata/libahci_platform.c
+--- linux-3.14.40.orig/drivers/ata/libahci_platform.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/ata/libahci_platform.c 2015-05-01 14:57:58.683427001 -0500
+@@ -0,0 +1,544 @@
++/*
++ * AHCI SATA platform library
++ *
++ * Copyright 2004-2005 Red Hat, Inc.
++ * Jeff Garzik <jgarzik@pobox.com>
++ * Copyright 2010 MontaVista Software, LLC.
++ * Anton Vorontsov <avorontsov@ru.mvista.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ */
++
++#include <linux/clk.h>
++#include <linux/kernel.h>
++#include <linux/gfp.h>
++#include <linux/module.h>
++#include <linux/pm.h>
++#include <linux/interrupt.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/libata.h>
++#include <linux/ahci_platform.h>
++#include <linux/phy/phy.h>
++#include <linux/pm_runtime.h>
++#include "ahci.h"
++
++static void ahci_host_stop(struct ata_host *host);
++
++struct ata_port_operations ahci_platform_ops = {
++ .inherits = &ahci_ops,
++ .host_stop = ahci_host_stop,
++};
++EXPORT_SYMBOL_GPL(ahci_platform_ops);
++
++static struct scsi_host_template ahci_platform_sht = {
++ AHCI_SHT("ahci_platform"),
++};
++
++/**
++ * ahci_platform_enable_clks - Enable platform clocks
++ * @hpriv: host private area to store config values
++ *
++ * This function enables all the clks found in hpriv->clks, starting at
++ * index 0. If any clk fails to enable it disables all the clks already
++ * enabled in reverse order, and then returns an error.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_enable_clks(struct ahci_host_priv *hpriv)
++{
++ int c, rc;
++
++ for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) {
++ rc = clk_prepare_enable(hpriv->clks[c]);
++ if (rc)
++ goto disable_unprepare_clk;
++ }
++ return 0;
++
++disable_unprepare_clk:
++ while (--c >= 0)
++ clk_disable_unprepare(hpriv->clks[c]);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(ahci_platform_enable_clks);
++
++/**
++ * ahci_platform_disable_clks - Disable platform clocks
++ * @hpriv: host private area to store config values
++ *
++ * This function disables all the clks found in hpriv->clks, in reverse
++ * order of ahci_platform_enable_clks (starting at the end of the array).
++ */
++void ahci_platform_disable_clks(struct ahci_host_priv *hpriv)
++{
++ int c;
++
++ for (c = AHCI_MAX_CLKS - 1; c >= 0; c--)
++ if (hpriv->clks[c])
++ clk_disable_unprepare(hpriv->clks[c]);
++}
++EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
++
++/**
++ * ahci_platform_enable_resources - Enable platform resources
++ * @hpriv: host private area to store config values
++ *
++ * This function enables all ahci_platform managed resources in the
++ * following order:
++ * 1) Regulator
++ * 2) Clocks (through ahci_platform_enable_clks)
++ * 3) Phy
++ *
++ * If resource enabling fails at any point the previous enabled resources
++ * are disabled in reverse order.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
++{
++ int rc;
++
++ if (hpriv->target_pwr) {
++ rc = regulator_enable(hpriv->target_pwr);
++ if (rc)
++ return rc;
++ }
++
++ rc = ahci_platform_enable_clks(hpriv);
++ if (rc)
++ goto disable_regulator;
++
++ if (hpriv->phy) {
++ rc = phy_init(hpriv->phy);
++ if (rc)
++ goto disable_clks;
++
++ rc = phy_power_on(hpriv->phy);
++ if (rc) {
++ phy_exit(hpriv->phy);
++ goto disable_clks;
++ }
++ }
++
++ return 0;
++
++disable_clks:
++ ahci_platform_disable_clks(hpriv);
++
++disable_regulator:
++ if (hpriv->target_pwr)
++ regulator_disable(hpriv->target_pwr);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
++
++/**
++ * ahci_platform_disable_resources - Disable platform resources
++ * @hpriv: host private area to store config values
++ *
++ * This function disables all ahci_platform managed resources in the
++ * following order:
++ * 1) Phy
++ * 2) Clocks (through ahci_platform_disable_clks)
++ * 3) Regulator
++ */
++void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
++{
++ if (hpriv->phy) {
++ phy_power_off(hpriv->phy);
++ phy_exit(hpriv->phy);
++ }
++
++ ahci_platform_disable_clks(hpriv);
++
++ if (hpriv->target_pwr)
++ regulator_disable(hpriv->target_pwr);
++}
++EXPORT_SYMBOL_GPL(ahci_platform_disable_resources);
++
++static void ahci_platform_put_resources(struct device *dev, void *res)
++{
++ struct ahci_host_priv *hpriv = res;
++ int c;
++
++ if (hpriv->got_runtime_pm) {
++ pm_runtime_put_sync(dev);
++ pm_runtime_disable(dev);
++ }
++
++ for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++)
++ clk_put(hpriv->clks[c]);
++}
++
++/**
++ * ahci_platform_get_resources - Get platform resources
++ * @pdev: platform device to get resources for
++ *
++ * This function allocates an ahci_host_priv struct, and gets the following
++ * resources, storing a reference to them inside the returned struct:
++ *
++ * 1) mmio registers (IORESOURCE_MEM 0, mandatory)
++ * 2) regulator for controlling the targets power (optional)
++ * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
++ * or for non devicetree enabled platforms a single clock
++ * 4) phy (optional)
++ *
++ * RETURNS:
++ * The allocated ahci_host_priv on success, otherwise an ERR_PTR value
++ */
++struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct ahci_host_priv *hpriv;
++ struct clk *clk;
++ int i, rc = -ENOMEM;
++
++ if (!devres_open_group(dev, NULL, GFP_KERNEL))
++ return ERR_PTR(-ENOMEM);
++
++ hpriv = devres_alloc(ahci_platform_put_resources, sizeof(*hpriv),
++ GFP_KERNEL);
++ if (!hpriv)
++ goto err_out;
++
++ devres_add(dev, hpriv);
++
++ hpriv->mmio = devm_ioremap_resource(dev,
++ platform_get_resource(pdev, IORESOURCE_MEM, 0));
++ if (IS_ERR(hpriv->mmio)) {
++ dev_err(dev, "no mmio space\n");
++ rc = PTR_ERR(hpriv->mmio);
++ goto err_out;
++ }
++
++ hpriv->target_pwr = devm_regulator_get_optional(dev, "target");
++ if (IS_ERR(hpriv->target_pwr)) {
++ rc = PTR_ERR(hpriv->target_pwr);
++ if (rc == -EPROBE_DEFER)
++ goto err_out;
++ hpriv->target_pwr = NULL;
++ }
++
++ for (i = 0; i < AHCI_MAX_CLKS; i++) {
++ /*
++ * For now we must use clk_get(dev, NULL) for the first clock,
++ * because some platforms (da850, spear13xx) are not yet
++ * converted to use devicetree for clocks. For new platforms
++ * this is equivalent to of_clk_get(dev->of_node, 0).
++ */
++ if (i == 0)
++ clk = clk_get(dev, NULL);
++ else
++ clk = of_clk_get(dev->of_node, i);
++
++ if (IS_ERR(clk)) {
++ rc = PTR_ERR(clk);
++ if (rc == -EPROBE_DEFER)
++ goto err_out;
++ break;
++ }
++ hpriv->clks[i] = clk;
++ }
++
++ hpriv->phy = devm_phy_get(dev, "sata-phy");
++ if (IS_ERR(hpriv->phy)) {
++ rc = PTR_ERR(hpriv->phy);
++ switch (rc) {
++ case -ENODEV:
++ case -ENOSYS:
++ /* continue normally */
++ hpriv->phy = NULL;
++ break;
++
++ case -EPROBE_DEFER:
++ goto err_out;
++
++ default:
++ dev_err(dev, "couldn't get sata-phy\n");
++ goto err_out;
++ }
++ }
++
++ pm_runtime_enable(dev);
++ pm_runtime_get_sync(dev);
++ hpriv->got_runtime_pm = true;
++
++ devres_remove_group(dev, NULL);
++ return hpriv;
++
++err_out:
++ devres_release_group(dev, NULL);
++ return ERR_PTR(rc);
++}
++EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
++
++/**
++ * ahci_platform_init_host - Bring up an ahci-platform host
++ * @pdev: platform device pointer for the host
++ * @hpriv: ahci-host private data for the host
++ * @pi_template: template for the ata_port_info to use
++ * @host_flags: ahci host flags used in ahci_host_priv
++ * @force_port_map: param passed to ahci_save_initial_config
++ * @mask_port_map: param passed to ahci_save_initial_config
++ *
++ * This function does all the usual steps needed to bring up an
++ * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
++ * must be initialized / enabled before calling this.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_init_host(struct platform_device *pdev,
++ struct ahci_host_priv *hpriv,
++ const struct ata_port_info *pi_template,
++ unsigned long host_flags,
++ unsigned int force_port_map,
++ unsigned int mask_port_map)
++{
++ struct device *dev = &pdev->dev;
++ struct ata_port_info pi = *pi_template;
++ const struct ata_port_info *ppi[] = { &pi, NULL };
++ struct ata_host *host;
++ int i, irq, n_ports, rc;
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq <= 0) {
++ dev_err(dev, "no irq\n");
++ return -EINVAL;
++ }
++
++ /* prepare host */
++ pi.private_data = (void *)host_flags;
++ hpriv->flags |= host_flags;
++
++ ahci_save_initial_config(dev, hpriv, force_port_map, mask_port_map);
++
++ if (hpriv->cap & HOST_CAP_NCQ)
++ pi.flags |= ATA_FLAG_NCQ;
++
++ if (hpriv->cap & HOST_CAP_PMP)
++ pi.flags |= ATA_FLAG_PMP;
++
++ ahci_set_em_messages(hpriv, &pi);
++
++ /* CAP.NP sometimes indicate the index of the last enabled
++ * port, at other times, that of the last possible port, so
++ * determining the maximum port number requires looking at
++ * both CAP.NP and port_map.
++ */
++ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
++
++ host = ata_host_alloc_pinfo(dev, ppi, n_ports);
++ if (!host)
++ return -ENOMEM;
++
++ host->private_data = hpriv;
++
++ if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
++ host->flags |= ATA_HOST_PARALLEL_SCAN;
++ else
++ dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
++
++ if (pi.flags & ATA_FLAG_EM)
++ ahci_reset_em(host);
++
++ for (i = 0; i < host->n_ports; i++) {
++ struct ata_port *ap = host->ports[i];
++
++ ata_port_desc(ap, "mmio %pR",
++ platform_get_resource(pdev, IORESOURCE_MEM, 0));
++ ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
++
++ /* set enclosure management message type */
++ if (ap->flags & ATA_FLAG_EM)
++ ap->em_message_type = hpriv->em_msg_type;
++
++ /* disabled/not-implemented port */
++ if (!(hpriv->port_map & (1 << i)))
++ ap->ops = &ata_dummy_port_ops;
++ }
++
++ rc = ahci_reset_controller(host);
++ if (rc)
++ return rc;
++
++ ahci_init_controller(host);
++ ahci_print_info(host, "platform");
++
++ return ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
++ &ahci_platform_sht);
++}
++EXPORT_SYMBOL_GPL(ahci_platform_init_host);
++
++static void ahci_host_stop(struct ata_host *host)
++{
++ struct device *dev = host->dev;
++ struct ahci_platform_data *pdata = dev_get_platdata(dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++
++ if (pdata && pdata->exit)
++ pdata->exit(dev);
++
++ ahci_platform_disable_resources(hpriv);
++}
++
++#ifdef CONFIG_PM_SLEEP
++/**
++ * ahci_platform_suspend_host - Suspend an ahci-platform host
++ * @dev: device pointer for the host
++ *
++ * This function does all the usual steps needed to suspend an
++ * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
++ * must be disabled after calling this.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_suspend_host(struct device *dev)
++{
++ struct ata_host *host = dev_get_drvdata(dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ void __iomem *mmio = hpriv->mmio;
++ u32 ctl;
++
++ if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
++ dev_err(dev, "firmware update required for suspend/resume\n");
++ return -EIO;
++ }
++
++ /*
++ * AHCI spec rev1.1 section 8.3.3:
++ * Software must disable interrupts prior to requesting a
++ * transition of the HBA to D3 state.
++ */
++ ctl = readl(mmio + HOST_CTL);
++ ctl &= ~HOST_IRQ_EN;
++ writel(ctl, mmio + HOST_CTL);
++ readl(mmio + HOST_CTL); /* flush */
++
++ return ata_host_suspend(host, PMSG_SUSPEND);
++}
++EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
++
++/**
++ * ahci_platform_resume_host - Resume an ahci-platform host
++ * @dev: device pointer for the host
++ *
++ * This function does all the usual steps needed to resume an ahci-platform
++ * host, note any necessary resources (ie clks, phy, etc.) must be
++ * initialized / enabled before calling this.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_resume_host(struct device *dev)
++{
++ struct ata_host *host = dev_get_drvdata(dev);
++ int rc;
++
++ if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
++ rc = ahci_reset_controller(host);
++ if (rc)
++ return rc;
++
++ ahci_init_controller(host);
++ }
++
++ ata_host_resume(host);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(ahci_platform_resume_host);
++
++/**
++ * ahci_platform_suspend - Suspend an ahci-platform device
++ * @dev: the platform device to suspend
++ *
++ * This function suspends the host associated with the device, followed by
++ * disabling all the resources of the device.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_suspend(struct device *dev)
++{
++ struct ahci_platform_data *pdata = dev_get_platdata(dev);
++ struct ata_host *host = dev_get_drvdata(dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ int rc;
++
++ rc = ahci_platform_suspend_host(dev);
++ if (rc)
++ return rc;
++
++ if (pdata && pdata->suspend) {
++ rc = pdata->suspend(dev);
++ if (rc)
++ goto resume_host;
++ }
++
++ ahci_platform_disable_resources(hpriv);
++
++ return 0;
++
++resume_host:
++ ahci_platform_resume_host(dev);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(ahci_platform_suspend);
++
++/**
++ * ahci_platform_resume - Resume an ahci-platform device
++ * @dev: the platform device to resume
++ *
++ * This function enables all the resources of the device followed by
++ * resuming the host associated with the device.
++ *
++ * RETURNS:
++ * 0 on success otherwise a negative error code
++ */
++int ahci_platform_resume(struct device *dev)
++{
++ struct ahci_platform_data *pdata = dev_get_platdata(dev);
++ struct ata_host *host = dev_get_drvdata(dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ int rc;
++
++ rc = ahci_platform_enable_resources(hpriv);
++ if (rc)
++ return rc;
++
++ if (pdata && pdata->resume) {
++ rc = pdata->resume(dev);
++ if (rc)
++ goto disable_resources;
++ }
++
++ rc = ahci_platform_resume_host(dev);
++ if (rc)
++ goto disable_resources;
++
++ /* We resumed so update PM runtime state */
++ pm_runtime_disable(dev);
++ pm_runtime_set_active(dev);
++ pm_runtime_enable(dev);
++
++ return 0;
++
++disable_resources:
++ ahci_platform_disable_resources(hpriv);
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(ahci_platform_resume);
++#endif
++
++MODULE_DESCRIPTION("AHCI SATA platform library");
++MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/ata/libata-core.c linux-3.14.40/drivers/ata/libata-core.c
+--- linux-3.14.40.orig/drivers/ata/libata-core.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/libata-core.c 2015-05-01 14:57:58.703427001 -0500
+@@ -1524,7 +1524,7 @@
+ * @dev: Device to which the command is sent
+ * @tf: Taskfile registers for the command and the result
+ * @cdb: CDB for packet command
+- * @dma_dir: Data tranfer direction of the command
++ * @dma_dir: Data transfer direction of the command
+ * @sgl: sg list for the data buffer of the command
+ * @n_elem: Number of sg entries
+ * @timeout: Timeout in msecs (0 for default)
+@@ -1712,7 +1712,7 @@
+ * @dev: Device to which the command is sent
+ * @tf: Taskfile registers for the command and the result
+ * @cdb: CDB for packet command
+- * @dma_dir: Data tranfer direction of the command
++ * @dma_dir: Data transfer direction of the command
+ * @buf: Data buffer of the command
+ * @buflen: Length of data buffer
+ * @timeout: Timeout in msecs (0 for default)
+diff -Nur linux-3.14.40.orig/drivers/ata/Makefile linux-3.14.40/drivers/ata/Makefile
+--- linux-3.14.40.orig/drivers/ata/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/Makefile 2015-05-01 14:57:58.703427001 -0500
+@@ -4,13 +4,13 @@
+ # non-SFF interface
+ obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
+ obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o
+-obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
++obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
+ obj-$(CONFIG_SATA_FSL) += sata_fsl.o
+ obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
+ obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
+ obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
+ obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
+-obj-$(CONFIG_AHCI_IMX) += ahci_imx.o
++obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
+
+ # SFF w/ custom DMA
+ obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_acpi.c linux-3.14.40/drivers/ata/pata_acpi.c
+--- linux-3.14.40.orig/drivers/ata/pata_acpi.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_acpi.c 2015-05-01 14:57:58.703427001 -0500
+@@ -7,7 +7,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_amd.c linux-3.14.40/drivers/ata/pata_amd.c
+--- linux-3.14.40.orig/drivers/ata/pata_amd.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_amd.c 2015-05-01 14:57:58.703427001 -0500
+@@ -17,7 +17,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_artop.c linux-3.14.40/drivers/ata/pata_artop.c
+--- linux-3.14.40.orig/drivers/ata/pata_artop.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_artop.c 2015-05-01 14:57:58.703427001 -0500
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_at91.c linux-3.14.40/drivers/ata/pata_at91.c
+--- linux-3.14.40.orig/drivers/ata/pata_at91.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_at91.c 2015-05-01 14:57:58.703427001 -0500
+@@ -18,7 +18,6 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/gfp.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_atiixp.c linux-3.14.40/drivers/ata/pata_atiixp.c
+--- linux-3.14.40.orig/drivers/ata/pata_atiixp.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_atiixp.c 2015-05-01 14:57:58.707427001 -0500
+@@ -15,7 +15,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_atp867x.c linux-3.14.40/drivers/ata/pata_atp867x.c
+--- linux-3.14.40.orig/drivers/ata/pata_atp867x.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_atp867x.c 2015-05-01 14:57:58.707427001 -0500
+@@ -29,7 +29,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_cmd640.c linux-3.14.40/drivers/ata/pata_cmd640.c
+--- linux-3.14.40.orig/drivers/ata/pata_cmd640.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_cmd640.c 2015-05-01 14:57:58.707427001 -0500
+@@ -15,7 +15,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/gfp.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_cmd64x.c linux-3.14.40/drivers/ata/pata_cmd64x.c
+--- linux-3.14.40.orig/drivers/ata/pata_cmd64x.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_cmd64x.c 2015-05-01 14:57:58.707427001 -0500
+@@ -26,7 +26,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_cs5520.c linux-3.14.40/drivers/ata/pata_cs5520.c
+--- linux-3.14.40.orig/drivers/ata/pata_cs5520.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_cs5520.c 2015-05-01 14:57:58.707427001 -0500
+@@ -34,7 +34,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_cs5530.c linux-3.14.40/drivers/ata/pata_cs5530.c
+--- linux-3.14.40.orig/drivers/ata/pata_cs5530.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_cs5530.c 2015-05-01 14:57:58.707427001 -0500
+@@ -26,7 +26,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_cs5535.c linux-3.14.40/drivers/ata/pata_cs5535.c
+--- linux-3.14.40.orig/drivers/ata/pata_cs5535.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_cs5535.c 2015-05-01 14:57:58.707427001 -0500
+@@ -31,7 +31,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_cs5536.c linux-3.14.40/drivers/ata/pata_cs5536.c
+--- linux-3.14.40.orig/drivers/ata/pata_cs5536.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_cs5536.c 2015-05-01 14:57:58.707427001 -0500
+@@ -33,7 +33,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/libata.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_cypress.c linux-3.14.40/drivers/ata/pata_cypress.c
+--- linux-3.14.40.orig/drivers/ata/pata_cypress.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_cypress.c 2015-05-01 14:57:58.707427001 -0500
+@@ -11,7 +11,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_efar.c linux-3.14.40/drivers/ata/pata_efar.c
+--- linux-3.14.40.orig/drivers/ata/pata_efar.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_efar.c 2015-05-01 14:57:58.707427001 -0500
+@@ -14,7 +14,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_ep93xx.c linux-3.14.40/drivers/ata/pata_ep93xx.c
+--- linux-3.14.40.orig/drivers/ata/pata_ep93xx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_ep93xx.c 2015-05-01 14:57:58.711427001 -0500
+@@ -34,7 +34,6 @@
+ #include <linux/err.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <scsi/scsi_host.h>
+ #include <linux/ata.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_hpt366.c linux-3.14.40/drivers/ata/pata_hpt366.c
+--- linux-3.14.40.orig/drivers/ata/pata_hpt366.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_hpt366.c 2015-05-01 14:57:58.711427001 -0500
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_hpt37x.c linux-3.14.40/drivers/ata/pata_hpt37x.c
+--- linux-3.14.40.orig/drivers/ata/pata_hpt37x.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_hpt37x.c 2015-05-01 14:57:58.735427001 -0500
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_hpt3x2n.c linux-3.14.40/drivers/ata/pata_hpt3x2n.c
+--- linux-3.14.40.orig/drivers/ata/pata_hpt3x2n.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_hpt3x2n.c 2015-05-01 14:57:58.739427001 -0500
+@@ -20,7 +20,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_hpt3x3.c linux-3.14.40/drivers/ata/pata_hpt3x3.c
+--- linux-3.14.40.orig/drivers/ata/pata_hpt3x3.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_hpt3x3.c 2015-05-01 14:57:58.739427001 -0500
+@@ -16,7 +16,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_imx.c linux-3.14.40/drivers/ata/pata_imx.c
+--- linux-3.14.40.orig/drivers/ata/pata_imx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_imx.c 2015-05-01 14:57:58.739427001 -0500
+@@ -15,7 +15,6 @@
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <scsi/scsi_host.h>
+ #include <linux/ata.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_it8213.c linux-3.14.40/drivers/ata/pata_it8213.c
+--- linux-3.14.40.orig/drivers/ata/pata_it8213.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_it8213.c 2015-05-01 14:57:58.767427001 -0500
+@@ -10,7 +10,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_it821x.c linux-3.14.40/drivers/ata/pata_it821x.c
+--- linux-3.14.40.orig/drivers/ata/pata_it821x.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_it821x.c 2015-05-01 14:57:58.767427001 -0500
+@@ -72,7 +72,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_jmicron.c linux-3.14.40/drivers/ata/pata_jmicron.c
+--- linux-3.14.40.orig/drivers/ata/pata_jmicron.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_jmicron.c 2015-05-01 14:57:58.767427001 -0500
+@@ -10,7 +10,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_marvell.c linux-3.14.40/drivers/ata/pata_marvell.c
+--- linux-3.14.40.orig/drivers/ata/pata_marvell.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_marvell.c 2015-05-01 14:57:58.767427001 -0500
+@@ -11,7 +11,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_mpiix.c linux-3.14.40/drivers/ata/pata_mpiix.c
+--- linux-3.14.40.orig/drivers/ata/pata_mpiix.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_mpiix.c 2015-05-01 14:57:58.767427001 -0500
+@@ -28,7 +28,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_netcell.c linux-3.14.40/drivers/ata/pata_netcell.c
+--- linux-3.14.40.orig/drivers/ata/pata_netcell.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_netcell.c 2015-05-01 14:57:58.767427001 -0500
+@@ -7,7 +7,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_ninja32.c linux-3.14.40/drivers/ata/pata_ninja32.c
+--- linux-3.14.40.orig/drivers/ata/pata_ninja32.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_ninja32.c 2015-05-01 14:57:58.767427001 -0500
+@@ -37,7 +37,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_ns87410.c linux-3.14.40/drivers/ata/pata_ns87410.c
+--- linux-3.14.40.orig/drivers/ata/pata_ns87410.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_ns87410.c 2015-05-01 14:57:58.771427001 -0500
+@@ -20,7 +20,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_ns87415.c linux-3.14.40/drivers/ata/pata_ns87415.c
+--- linux-3.14.40.orig/drivers/ata/pata_ns87415.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_ns87415.c 2015-05-01 14:57:58.771427001 -0500
+@@ -25,7 +25,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_oldpiix.c linux-3.14.40/drivers/ata/pata_oldpiix.c
+--- linux-3.14.40.orig/drivers/ata/pata_oldpiix.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_oldpiix.c 2015-05-01 14:57:58.771427001 -0500
+@@ -16,7 +16,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_opti.c linux-3.14.40/drivers/ata/pata_opti.c
+--- linux-3.14.40.orig/drivers/ata/pata_opti.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_opti.c 2015-05-01 14:57:58.771427001 -0500
+@@ -26,7 +26,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_optidma.c linux-3.14.40/drivers/ata/pata_optidma.c
+--- linux-3.14.40.orig/drivers/ata/pata_optidma.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_optidma.c 2015-05-01 14:57:58.771427001 -0500
+@@ -25,7 +25,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_pcmcia.c linux-3.14.40/drivers/ata/pata_pcmcia.c
+--- linux-3.14.40.orig/drivers/ata/pata_pcmcia.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_pcmcia.c 2015-05-01 14:57:58.771427001 -0500
+@@ -26,7 +26,6 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_pdc2027x.c linux-3.14.40/drivers/ata/pata_pdc2027x.c
+--- linux-3.14.40.orig/drivers/ata/pata_pdc2027x.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_pdc2027x.c 2015-05-01 14:57:58.771427001 -0500
+@@ -25,7 +25,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_pdc202xx_old.c linux-3.14.40/drivers/ata/pata_pdc202xx_old.c
+--- linux-3.14.40.orig/drivers/ata/pata_pdc202xx_old.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_pdc202xx_old.c 2015-05-01 14:57:58.771427001 -0500
+@@ -15,7 +15,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_piccolo.c linux-3.14.40/drivers/ata/pata_piccolo.c
+--- linux-3.14.40.orig/drivers/ata/pata_piccolo.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_piccolo.c 2015-05-01 14:57:58.771427001 -0500
+@@ -18,7 +18,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_platform.c linux-3.14.40/drivers/ata/pata_platform.c
+--- linux-3.14.40.orig/drivers/ata/pata_platform.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_platform.c 2015-05-01 14:57:58.771427001 -0500
+@@ -13,7 +13,6 @@
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <scsi/scsi_host.h>
+ #include <linux/ata.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_pxa.c linux-3.14.40/drivers/ata/pata_pxa.c
+--- linux-3.14.40.orig/drivers/ata/pata_pxa.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_pxa.c 2015-05-01 14:57:58.771427001 -0500
+@@ -20,7 +20,6 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/ata.h>
+ #include <linux/libata.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_radisys.c linux-3.14.40/drivers/ata/pata_radisys.c
+--- linux-3.14.40.orig/drivers/ata/pata_radisys.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_radisys.c 2015-05-01 14:57:58.775427001 -0500
+@@ -15,7 +15,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_rdc.c linux-3.14.40/drivers/ata/pata_rdc.c
+--- linux-3.14.40.orig/drivers/ata/pata_rdc.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_rdc.c 2015-05-01 14:57:58.775427001 -0500
+@@ -24,7 +24,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_rz1000.c linux-3.14.40/drivers/ata/pata_rz1000.c
+--- linux-3.14.40.orig/drivers/ata/pata_rz1000.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_rz1000.c 2015-05-01 14:57:58.775427001 -0500
+@@ -14,7 +14,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_sc1200.c linux-3.14.40/drivers/ata/pata_sc1200.c
+--- linux-3.14.40.orig/drivers/ata/pata_sc1200.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_sc1200.c 2015-05-01 14:57:58.775427001 -0500
+@@ -32,7 +32,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_scc.c linux-3.14.40/drivers/ata/pata_scc.c
+--- linux-3.14.40.orig/drivers/ata/pata_scc.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_scc.c 2015-05-01 14:57:58.775427001 -0500
+@@ -35,7 +35,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_sch.c linux-3.14.40/drivers/ata/pata_sch.c
+--- linux-3.14.40.orig/drivers/ata/pata_sch.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_sch.c 2015-05-01 14:57:58.775427001 -0500
+@@ -27,7 +27,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_serverworks.c linux-3.14.40/drivers/ata/pata_serverworks.c
+--- linux-3.14.40.orig/drivers/ata/pata_serverworks.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_serverworks.c 2015-05-01 14:57:58.775427001 -0500
+@@ -34,7 +34,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_sil680.c linux-3.14.40/drivers/ata/pata_sil680.c
+--- linux-3.14.40.orig/drivers/ata/pata_sil680.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_sil680.c 2015-05-01 14:57:58.775427001 -0500
+@@ -25,7 +25,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_sis.c linux-3.14.40/drivers/ata/pata_sis.c
+--- linux-3.14.40.orig/drivers/ata/pata_sis.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_sis.c 2015-05-01 14:57:58.775427001 -0500
+@@ -26,7 +26,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_sl82c105.c linux-3.14.40/drivers/ata/pata_sl82c105.c
+--- linux-3.14.40.orig/drivers/ata/pata_sl82c105.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_sl82c105.c 2015-05-01 14:57:58.775427001 -0500
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_triflex.c linux-3.14.40/drivers/ata/pata_triflex.c
+--- linux-3.14.40.orig/drivers/ata/pata_triflex.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_triflex.c 2015-05-01 14:57:58.779427001 -0500
+@@ -36,7 +36,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <scsi/scsi_host.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pata_via.c linux-3.14.40/drivers/ata/pata_via.c
+--- linux-3.14.40.orig/drivers/ata/pata_via.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pata_via.c 2015-05-01 14:57:58.779427001 -0500
+@@ -55,7 +55,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/gfp.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/pdc_adma.c linux-3.14.40/drivers/ata/pdc_adma.c
+--- linux-3.14.40.orig/drivers/ata/pdc_adma.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/pdc_adma.c 2015-05-01 14:57:58.779427001 -0500
+@@ -36,7 +36,6 @@
+ #include <linux/module.h>
+ #include <linux/gfp.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/sata_dwc_460ex.c linux-3.14.40/drivers/ata/sata_dwc_460ex.c
+--- linux-3.14.40.orig/drivers/ata/sata_dwc_460ex.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/sata_dwc_460ex.c 2015-05-01 14:57:58.779427001 -0500
+@@ -29,7 +29,6 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/device.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/sata_highbank.c linux-3.14.40/drivers/ata/sata_highbank.c
+--- linux-3.14.40.orig/drivers/ata/sata_highbank.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/sata_highbank.c 2015-05-01 14:57:58.779427001 -0500
+@@ -19,7 +19,6 @@
+ #include <linux/kernel.h>
+ #include <linux/gfp.h>
+ #include <linux/module.h>
+-#include <linux/init.h>
+ #include <linux/types.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+@@ -403,6 +402,7 @@
+ static const unsigned long timing[] = { 5, 100, 500};
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
++ struct ahci_host_priv *hpriv = ap->host->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+@@ -431,7 +431,7 @@
+ break;
+ } while (!online && retry--);
+
+- ahci_start_engine(ap);
++ hpriv->start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+diff -Nur linux-3.14.40.orig/drivers/ata/sata_nv.c linux-3.14.40/drivers/ata/sata_nv.c
+--- linux-3.14.40.orig/drivers/ata/sata_nv.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/sata_nv.c 2015-05-01 14:57:58.779427001 -0500
+@@ -40,7 +40,6 @@
+ #include <linux/module.h>
+ #include <linux/gfp.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/sata_promise.c linux-3.14.40/drivers/ata/sata_promise.c
+--- linux-3.14.40.orig/drivers/ata/sata_promise.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/sata_promise.c 2015-05-01 14:57:58.783427001 -0500
+@@ -35,7 +35,6 @@
+ #include <linux/module.h>
+ #include <linux/gfp.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/sata_qstor.c linux-3.14.40/drivers/ata/sata_qstor.c
+--- linux-3.14.40.orig/drivers/ata/sata_qstor.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/sata_qstor.c 2015-05-01 14:57:58.783427001 -0500
+@@ -31,7 +31,6 @@
+ #include <linux/module.h>
+ #include <linux/gfp.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/sata_sil.c linux-3.14.40/drivers/ata/sata_sil.c
+--- linux-3.14.40.orig/drivers/ata/sata_sil.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/sata_sil.c 2015-05-01 14:57:58.783427001 -0500
+@@ -37,7 +37,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/sata_sis.c linux-3.14.40/drivers/ata/sata_sis.c
+--- linux-3.14.40.orig/drivers/ata/sata_sis.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/sata_sis.c 2015-05-01 14:57:58.783427001 -0500
+@@ -33,7 +33,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/sata_svw.c linux-3.14.40/drivers/ata/sata_svw.c
+--- linux-3.14.40.orig/drivers/ata/sata_svw.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/sata_svw.c 2015-05-01 14:57:58.783427001 -0500
+@@ -39,7 +39,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/sata_sx4.c linux-3.14.40/drivers/ata/sata_sx4.c
+--- linux-3.14.40.orig/drivers/ata/sata_sx4.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/sata_sx4.c 2015-05-01 14:57:58.783427001 -0500
+@@ -82,7 +82,6 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/sata_uli.c linux-3.14.40/drivers/ata/sata_uli.c
+--- linux-3.14.40.orig/drivers/ata/sata_uli.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/sata_uli.c 2015-05-01 14:57:58.783427001 -0500
+@@ -28,7 +28,6 @@
+ #include <linux/module.h>
+ #include <linux/gfp.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/sata_via.c linux-3.14.40/drivers/ata/sata_via.c
+--- linux-3.14.40.orig/drivers/ata/sata_via.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/sata_via.c 2015-05-01 14:57:58.783427001 -0500
+@@ -36,7 +36,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+diff -Nur linux-3.14.40.orig/drivers/ata/sata_vsc.c linux-3.14.40/drivers/ata/sata_vsc.c
+--- linux-3.14.40.orig/drivers/ata/sata_vsc.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ata/sata_vsc.c 2015-05-01 14:57:58.787427001 -0500
+@@ -37,7 +37,6 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+-#include <linux/init.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+diff -Nur linux-3.14.40.orig/drivers/base/bus.c linux-3.14.40/drivers/base/bus.c
+--- linux-3.14.40.orig/drivers/base/bus.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/base/bus.c 2015-05-01 14:57:58.803427001 -0500
+@@ -1220,7 +1220,7 @@
+ * with the name of the subsystem. The root device can carry subsystem-
+ * wide attributes. All registered devices are below this single root
+ * device and are named after the subsystem with a simple enumeration
+- * number appended. The registered devices are not explicitely named;
++ * number appended. The registered devices are not explicitly named;
+ * only 'id' in the device needs to be set.
+ *
+ * Do not use this interface for anything new, it exists for compatibility
+diff -Nur linux-3.14.40.orig/drivers/base/cpu.c linux-3.14.40/drivers/base/cpu.c
+--- linux-3.14.40.orig/drivers/base/cpu.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/base/cpu.c 2015-05-01 14:57:58.811427001 -0500
+@@ -15,6 +15,7 @@
+ #include <linux/percpu.h>
+ #include <linux/acpi.h>
+ #include <linux/of.h>
++#include <linux/cpufeature.h>
+
+ #include "base.h"
+
+@@ -286,6 +287,45 @@
+ */
+ }
+
++#ifdef CONFIG_HAVE_CPU_AUTOPROBE
++#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
++static ssize_t print_cpu_modalias(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ ssize_t n;
++ u32 i;
++
++ n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
++ CPU_FEATURE_TYPEVAL);
++
++ for (i = 0; i < MAX_CPU_FEATURES; i++)
++ if (cpu_have_feature(i)) {
++ if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
++ WARN(1, "CPU features overflow page\n");
++ break;
++ }
++ n += sprintf(&buf[n], ",%04X", i);
++ }
++ buf[n++] = '\n';
++ return n;
++}
++#else
++#define print_cpu_modalias arch_print_cpu_modalias
++#endif
++
++static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
++{
++ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
++ if (buf) {
++ print_cpu_modalias(NULL, NULL, buf);
++ add_uevent_var(env, "MODALIAS=%s", buf);
++ kfree(buf);
++ }
++ return 0;
++}
++#endif
++
+ /*
+ * register_cpu - Setup a sysfs device for a CPU.
+ * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
+@@ -306,8 +346,8 @@
+ cpu->dev.offline_disabled = !cpu->hotpluggable;
+ cpu->dev.offline = !cpu_online(num);
+ cpu->dev.of_node = of_get_cpu_node(num, NULL);
+-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+- cpu->dev.bus->uevent = arch_cpu_uevent;
++#ifdef CONFIG_HAVE_CPU_AUTOPROBE
++ cpu->dev.bus->uevent = cpu_uevent;
+ #endif
+ cpu->dev.groups = common_cpu_attr_groups;
+ if (cpu->hotpluggable)
+@@ -330,8 +370,8 @@
+ }
+ EXPORT_SYMBOL_GPL(get_cpu_device);
+
+-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+-static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
++#ifdef CONFIG_HAVE_CPU_AUTOPROBE
++static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
+ #endif
+
+ static struct attribute *cpu_root_attrs[] = {
+@@ -344,7 +384,7 @@
+ &cpu_attrs[2].attr.attr,
+ &dev_attr_kernel_max.attr,
+ &dev_attr_offline.attr,
+-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
++#ifdef CONFIG_HAVE_CPU_AUTOPROBE
+ &dev_attr_modalias.attr,
+ #endif
+ NULL
+diff -Nur linux-3.14.40.orig/drivers/base/dma-buf.c linux-3.14.40/drivers/base/dma-buf.c
+--- linux-3.14.40.orig/drivers/base/dma-buf.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/base/dma-buf.c 2015-05-01 14:57:58.811427001 -0500
+@@ -251,9 +251,8 @@
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ *
+- * Returns struct dma_buf_attachment * for this attachment; may return negative
+- * error codes.
+- *
++ * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
++ * error.
+ */
+ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+@@ -319,9 +318,8 @@
+ * @attach: [in] attachment whose scatterlist is to be returned
+ * @direction: [in] direction of DMA transfer
+ *
+- * Returns sg_table containing the scatterlist to be returned; may return NULL
+- * or ERR_PTR.
+- *
++ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
++ * on error.
+ */
+ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+@@ -334,6 +332,8 @@
+ return ERR_PTR(-EINVAL);
+
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
++ if (!sg_table)
++ sg_table = ERR_PTR(-ENOMEM);
+
+ return sg_table;
+ }
+@@ -544,6 +544,8 @@
+ * These calls are optional in drivers. The intended use for them
+ * is for mapping objects linear in kernel space for high use objects.
+ * Please attempt to use kmap/kunmap before thinking about these interfaces.
++ *
++ * Returns NULL on error.
+ */
+ void *dma_buf_vmap(struct dma_buf *dmabuf)
+ {
+@@ -566,7 +568,9 @@
+ BUG_ON(dmabuf->vmap_ptr);
+
+ ptr = dmabuf->ops->vmap(dmabuf);
+- if (IS_ERR_OR_NULL(ptr))
++ if (WARN_ON_ONCE(IS_ERR(ptr)))
++ ptr = NULL;
++ if (!ptr)
+ goto out_unlock;
+
+ dmabuf->vmap_ptr = ptr;
+diff -Nur linux-3.14.40.orig/drivers/base/dma-contiguous.c linux-3.14.40/drivers/base/dma-contiguous.c
+--- linux-3.14.40.orig/drivers/base/dma-contiguous.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/base/dma-contiguous.c 2015-05-01 14:57:58.815427001 -0500
+@@ -24,22 +24,9 @@
+
+ #include <linux/memblock.h>
+ #include <linux/err.h>
+-#include <linux/mm.h>
+-#include <linux/mutex.h>
+-#include <linux/page-isolation.h>
+ #include <linux/sizes.h>
+-#include <linux/slab.h>
+-#include <linux/swap.h>
+-#include <linux/mm_types.h>
+ #include <linux/dma-contiguous.h>
+-
+-struct cma {
+- unsigned long base_pfn;
+- unsigned long count;
+- unsigned long *bitmap;
+-};
+-
+-struct cma *dma_contiguous_default_area;
++#include <linux/cma.h>
+
+ #ifdef CONFIG_CMA_SIZE_MBYTES
+ #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+@@ -47,6 +34,8 @@
+ #define CMA_SIZE_MBYTES 0
+ #endif
+
++struct cma *dma_contiguous_default_area;
++
+ /*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is useful mainly for distro maintainers to create a kernel
+@@ -59,11 +48,22 @@
+ */
+ static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+ static phys_addr_t size_cmdline = -1;
++static phys_addr_t base_cmdline;
++static phys_addr_t limit_cmdline;
+
+ static int __init early_cma(char *p)
+ {
+ pr_debug("%s(%s)\n", __func__, p);
+ size_cmdline = memparse(p, &p);
++ if (*p != '@')
++ return 0;
++ base_cmdline = memparse(p + 1, &p);
++ if (*p != '-') {
++ limit_cmdline = base_cmdline + size_cmdline;
++ return 0;
++ }
++ limit_cmdline = memparse(p + 1, &p);
++
+ return 0;
+ }
+ early_param("cma", early_cma);
+@@ -107,11 +107,18 @@
+ void __init dma_contiguous_reserve(phys_addr_t limit)
+ {
+ phys_addr_t selected_size = 0;
++ phys_addr_t selected_base = 0;
++ phys_addr_t selected_limit = limit;
++ bool fixed = false;
+
+ pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+ if (size_cmdline != -1) {
+ selected_size = size_cmdline;
++ selected_base = base_cmdline;
++ selected_limit = min_not_zero(limit_cmdline, limit);
++ if (base_cmdline + size_cmdline == limit_cmdline)
++ fixed = true;
+ } else {
+ #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+ selected_size = size_bytes;
+@@ -128,68 +135,12 @@
+ pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+ (unsigned long)selected_size / SZ_1M);
+
+- dma_contiguous_reserve_area(selected_size, 0, limit,
+- &dma_contiguous_default_area);
+- }
+-};
+-
+-static DEFINE_MUTEX(cma_mutex);
+-
+-static int __init cma_activate_area(struct cma *cma)
+-{
+- int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
+- unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
+- unsigned i = cma->count >> pageblock_order;
+- struct zone *zone;
+-
+- cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+-
+- if (!cma->bitmap)
+- return -ENOMEM;
+-
+- WARN_ON_ONCE(!pfn_valid(pfn));
+- zone = page_zone(pfn_to_page(pfn));
+-
+- do {
+- unsigned j;
+- base_pfn = pfn;
+- for (j = pageblock_nr_pages; j; --j, pfn++) {
+- WARN_ON_ONCE(!pfn_valid(pfn));
+- /*
+- * alloc_contig_range requires the pfn range
+- * specified to be in the same zone. Make this
+- * simple by forcing the entire CMA resv range
+- * to be in the same zone.
+- */
+- if (page_zone(pfn_to_page(pfn)) != zone)
+- goto err;
+- }
+- init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+- } while (--i);
+-
+- return 0;
+-
+-err:
+- kfree(cma->bitmap);
+- return -EINVAL;
+-}
+-
+-static struct cma cma_areas[MAX_CMA_AREAS];
+-static unsigned cma_area_count;
+-
+-static int __init cma_init_reserved_areas(void)
+-{
+- int i;
+-
+- for (i = 0; i < cma_area_count; i++) {
+- int ret = cma_activate_area(&cma_areas[i]);
+- if (ret)
+- return ret;
++ dma_contiguous_reserve_area(selected_size, selected_base,
++ selected_limit,
++ &dma_contiguous_default_area,
++ fixed);
+ }
+-
+- return 0;
+ }
+-core_initcall(cma_init_reserved_areas);
+
+ /**
+ * dma_contiguous_reserve_area() - reserve custom contiguous area
+@@ -197,78 +148,32 @@
+ * @base: Base address of the reserved area optional, use 0 for any
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ * @res_cma: Pointer to store the created cma region.
++ * @fixed: hint about where to place the reserved area
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. This function allows to create custom reserved areas for specific
+ * devices.
++ *
++ * If @fixed is true, reserve contiguous area at exactly @base. If false,
++ * reserve in range from @base to @limit.
+ */
+ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+- phys_addr_t limit, struct cma **res_cma)
++ phys_addr_t limit, struct cma **res_cma,
++ bool fixed)
+ {
+- struct cma *cma = &cma_areas[cma_area_count];
+- phys_addr_t alignment;
+- int ret = 0;
+-
+- pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
+- (unsigned long)size, (unsigned long)base,
+- (unsigned long)limit);
+-
+- /* Sanity checks */
+- if (cma_area_count == ARRAY_SIZE(cma_areas)) {
+- pr_err("Not enough slots for CMA reserved regions!\n");
+- return -ENOSPC;
+- }
+-
+- if (!size)
+- return -EINVAL;
+-
+- /* Sanitise input arguments */
+- alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+- base = ALIGN(base, alignment);
+- size = ALIGN(size, alignment);
+- limit &= ~(alignment - 1);
+-
+- /* Reserve memory */
+- if (base) {
+- if (memblock_is_region_reserved(base, size) ||
+- memblock_reserve(base, size) < 0) {
+- ret = -EBUSY;
+- goto err;
+- }
+- } else {
+- /*
+- * Use __memblock_alloc_base() since
+- * memblock_alloc_base() panic()s.
+- */
+- phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
+- if (!addr) {
+- ret = -ENOMEM;
+- goto err;
+- } else {
+- base = addr;
+- }
+- }
+-
+- /*
+- * Each reserved area must be initialised later, when more kernel
+- * subsystems (like slab allocator) are available.
+- */
+- cma->base_pfn = PFN_DOWN(base);
+- cma->count = size >> PAGE_SHIFT;
+- *res_cma = cma;
+- cma_area_count++;
++ int ret;
+
+- pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
+- (unsigned long)base);
++ ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
++ if (ret)
++ return ret;
+
+ /* Architecture specific contiguous memory fixup. */
+- dma_contiguous_early_fixup(base, size);
++ dma_contiguous_early_fixup(cma_get_base(*res_cma),
++ cma_get_size(*res_cma));
++
+ return 0;
+-err:
+- pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
+- return ret;
+ }
+
+ /**
+@@ -279,57 +184,16 @@
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+- * global one. Requires architecture specific get_dev_cma_area() helper
++ * global one. Requires architecture specific dev_get_cma_area() helper
+ * function.
+ */
+ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int align)
+ {
+- unsigned long mask, pfn, pageno, start = 0;
+- struct cma *cma = dev_get_cma_area(dev);
+- struct page *page = NULL;
+- int ret;
+-
+- if (!cma || !cma->count)
+- return NULL;
+-
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+- pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+- count, align);
+-
+- if (!count)
+- return NULL;
+-
+- mask = (1 << align) - 1;
+-
+- mutex_lock(&cma_mutex);
+-
+- for (;;) {
+- pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
+- start, count, mask);
+- if (pageno >= cma->count)
+- break;
+-
+- pfn = cma->base_pfn + pageno;
+- ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+- if (ret == 0) {
+- bitmap_set(cma->bitmap, pageno, count);
+- page = pfn_to_page(pfn);
+- break;
+- } else if (ret != -EBUSY) {
+- break;
+- }
+- pr_debug("%s(): memory range at %p is busy, retrying\n",
+- __func__, pfn_to_page(pfn));
+- /* try again with a bit different memory target */
+- start = pageno + mask + 1;
+- }
+-
+- mutex_unlock(&cma_mutex);
+- pr_debug("%s(): returned %p\n", __func__, page);
+- return page;
++ return cma_alloc(dev_get_cma_area(dev), count, align);
+ }
+
+ /**
+@@ -345,25 +209,5 @@
+ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count)
+ {
+- struct cma *cma = dev_get_cma_area(dev);
+- unsigned long pfn;
+-
+- if (!cma || !pages)
+- return false;
+-
+- pr_debug("%s(page %p)\n", __func__, (void *)pages);
+-
+- pfn = page_to_pfn(pages);
+-
+- if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+- return false;
+-
+- VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+-
+- mutex_lock(&cma_mutex);
+- bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+- free_contig_range(pfn, count);
+- mutex_unlock(&cma_mutex);
+-
+- return true;
++ return cma_release(dev_get_cma_area(dev), pages, count);
+ }
+diff -Nur linux-3.14.40.orig/drivers/base/Kconfig linux-3.14.40/drivers/base/Kconfig
+--- linux-3.14.40.orig/drivers/base/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/base/Kconfig 2015-05-01 14:57:58.815427001 -0500
+@@ -185,6 +185,14 @@
+ bool
+ default n
+
++config HAVE_CPU_AUTOPROBE
++ def_bool ARCH_HAS_CPU_AUTOPROBE
++
++config GENERIC_CPU_AUTOPROBE
++ bool
++ depends on !ARCH_HAS_CPU_AUTOPROBE
++ select HAVE_CPU_AUTOPROBE
++
+ config SOC_BUS
+ bool
+
+@@ -266,16 +274,6 @@
+
+ If unsure, leave the default value "8".
+
+-config CMA_AREAS
+- int "Maximum count of the CMA device-private areas"
+- default 7
+- help
+- CMA allows to create CMA areas for particular devices. This parameter
+- sets the maximum number of such device private CMA areas in the
+- system.
+-
+- If unsure, leave the default value "7".
+-
+ endif
+
+ endmenu
+diff -Nur linux-3.14.40.orig/drivers/bus/arm-cci.c linux-3.14.40/drivers/bus/arm-cci.c
+--- linux-3.14.40.orig/drivers/bus/arm-cci.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/bus/arm-cci.c 2015-05-01 14:57:58.815427001 -0500
+@@ -26,6 +26,7 @@
+
+ #include <asm/cacheflush.h>
+ #include <asm/irq_regs.h>
++#include <asm/psci.h>
+ #include <asm/pmu.h>
+ #include <asm/smp_plat.h>
+
+@@ -544,6 +545,7 @@
+
+ cci_pmu->plat_device = pdev;
+ cci_pmu->num_events = pmu_get_max_counters();
++ cpumask_setall(&cci_pmu->valid_cpus);
+
+ return armpmu_register(cci_pmu, -1);
+ }
+@@ -969,6 +971,11 @@
+ const char *match_str;
+ bool is_ace;
+
++ if (psci_probe() == 0) {
++ pr_debug("psci found. Aborting cci probe\n");
++ return -ENODEV;
++ }
++
+ np = of_find_matching_node(NULL, arm_cci_matches);
+ if (!np)
+ return -ENODEV;
+diff -Nur linux-3.14.40.orig/drivers/char/fsl_otp.c linux-3.14.40/drivers/char/fsl_otp.c
+--- linux-3.14.40.orig/drivers/char/fsl_otp.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/char/fsl_otp.c 2015-05-01 14:57:58.815427001 -0500
+@@ -0,0 +1,316 @@
++/*
++ * Freescale On-Chip OTP driver
++ *
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/kobject.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/sysfs.h>
++#include <linux/fsl_otp.h>
++
++#define HW_OCOTP_CTRL 0x00000000
++#define HW_OCOTP_CTRL_SET 0x00000004
++#define BP_OCOTP_CTRL_WR_UNLOCK 16
++#define BM_OCOTP_CTRL_WR_UNLOCK 0xFFFF0000
++#define BM_OCOTP_CTRL_RELOAD_SHADOWS 0x00000400
++#define BM_OCOTP_CTRL_ERROR 0x00000200
++#define BM_OCOTP_CTRL_BUSY 0x00000100
++#define BP_OCOTP_CTRL_ADDR 0
++#define BM_OCOTP_CTRL_ADDR 0x0000007F
++
++#define HW_OCOTP_TIMING 0x00000010
++#define BP_OCOTP_TIMING_STROBE_READ 16
++#define BM_OCOTP_TIMING_STROBE_READ 0x003F0000
++#define BP_OCOTP_TIMING_RELAX 12
++#define BM_OCOTP_TIMING_RELAX 0x0000F000
++#define BP_OCOTP_TIMING_STROBE_PROG 0
++#define BM_OCOTP_TIMING_STROBE_PROG 0x00000FFF
++
++#define HW_OCOTP_DATA 0x00000020
++
++#define HW_OCOTP_CUST_N(n) (0x00000400 + (n) * 0x10)
++#define BF(value, field) (((value) << BP_##field) & BM_##field)
++
++#define DEF_RELAX 20 /* > 16.5ns */
++
++#define BANK(a, b, c, d, e, f, g, h) { \
++ "HW_OCOTP_"#a, "HW_OCOTP_"#b, "HW_OCOTP_"#c, "HW_OCOTP_"#d, \
++ "HW_OCOTP_"#e, "HW_OCOTP_"#f, "HW_OCOTP_"#g, "HW_OCOTP_"#h, \
++}
++
++static const char *imx6q_otp_desc[16][8] = {
++ BANK(LOCK, CFG0, CFG1, CFG2, CFG3, CFG4, CFG5, CFG6),
++ BANK(MEM0, MEM1, MEM2, MEM3, MEM4, ANA0, ANA1, ANA2),
++ BANK(OTPMK0, OTPMK1, OTPMK2, OTPMK3, OTPMK4, OTPMK5, OTPMK6, OTPMK7),
++ BANK(SRK0, SRK1, SRK2, SRK3, SRK4, SRK5, SRK6, SRK7),
++ BANK(RESP0, HSJC_RESP1, MAC0, MAC1, HDCP_KSV0, HDCP_KSV1, GP1, GP2),
++ BANK(DTCP_KEY0, DTCP_KEY1, DTCP_KEY2, DTCP_KEY3, DTCP_KEY4, MISC_CONF, FIELD_RETURN, SRK_REVOKE),
++ BANK(HDCP_KEY0, HDCP_KEY1, HDCP_KEY2, HDCP_KEY3, HDCP_KEY4, HDCP_KEY5, HDCP_KEY6, HDCP_KEY7),
++ BANK(HDCP_KEY8, HDCP_KEY9, HDCP_KEY10, HDCP_KEY11, HDCP_KEY12, HDCP_KEY13, HDCP_KEY14, HDCP_KEY15),
++ BANK(HDCP_KEY16, HDCP_KEY17, HDCP_KEY18, HDCP_KEY19, HDCP_KEY20, HDCP_KEY21, HDCP_KEY22, HDCP_KEY23),
++ BANK(HDCP_KEY24, HDCP_KEY25, HDCP_KEY26, HDCP_KEY27, HDCP_KEY28, HDCP_KEY29, HDCP_KEY30, HDCP_KEY31),
++ BANK(HDCP_KEY32, HDCP_KEY33, HDCP_KEY34, HDCP_KEY35, HDCP_KEY36, HDCP_KEY37, HDCP_KEY38, HDCP_KEY39),
++ BANK(HDCP_KEY40, HDCP_KEY41, HDCP_KEY42, HDCP_KEY43, HDCP_KEY44, HDCP_KEY45, HDCP_KEY46, HDCP_KEY47),
++ BANK(HDCP_KEY48, HDCP_KEY49, HDCP_KEY50, HDCP_KEY51, HDCP_KEY52, HDCP_KEY53, HDCP_KEY54, HDCP_KEY55),
++ BANK(HDCP_KEY56, HDCP_KEY57, HDCP_KEY58, HDCP_KEY59, HDCP_KEY60, HDCP_KEY61, HDCP_KEY62, HDCP_KEY63),
++ BANK(HDCP_KEY64, HDCP_KEY65, HDCP_KEY66, HDCP_KEY67, HDCP_KEY68, HDCP_KEY69, HDCP_KEY70, HDCP_KEY71),
++ BANK(CRC0, CRC1, CRC2, CRC3, CRC4, CRC5, CRC6, CRC7),
++};
++
++static DEFINE_MUTEX(otp_mutex);
++static void __iomem *otp_base;
++static struct clk *otp_clk;
++struct kobject *otp_kobj;
++struct kobj_attribute *otp_kattr;
++struct attribute_group *otp_attr_group;
++
++static void set_otp_timing(void)
++{
++ unsigned long clk_rate = 0;
++ unsigned long strobe_read, relex, strobe_prog;
++ u32 timing = 0;
++
++ clk_rate = clk_get_rate(otp_clk);
++
++ /* do optimization for too many zeros */
++ relex = clk_rate / (1000000000 / DEF_RELAX) - 1;
++ strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
++ strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
++
++ timing = BF(relex, OCOTP_TIMING_RELAX);
++ timing |= BF(strobe_read, OCOTP_TIMING_STROBE_READ);
++ timing |= BF(strobe_prog, OCOTP_TIMING_STROBE_PROG);
++
++ __raw_writel(timing, otp_base + HW_OCOTP_TIMING);
++}
++
++static int otp_wait_busy(u32 flags)
++{
++ int count;
++ u32 c;
++
++ for (count = 10000; count >= 0; count--) {
++ c = __raw_readl(otp_base + HW_OCOTP_CTRL);
++ if (!(c & (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR | flags)))
++ break;
++ cpu_relax();
++ }
++
++ if (count < 0)
++ return -ETIMEDOUT;
++
++ return 0;
++}
++
++int fsl_otp_readl(unsigned long offset, u32 *value)
++{
++ int ret = 0;
++
++ ret = clk_prepare_enable(otp_clk);
++ if (ret)
++ return ret;
++
++ mutex_lock(&otp_mutex);
++
++ set_otp_timing();
++ ret = otp_wait_busy(0);
++ if (ret)
++ goto out;
++
++ *value = __raw_readl(otp_base + offset);
++
++out:
++ mutex_unlock(&otp_mutex);
++ clk_disable_unprepare(otp_clk);
++ return ret;
++}
++EXPORT_SYMBOL(fsl_otp_readl);
++
++static ssize_t fsl_otp_show(struct kobject *kobj, struct kobj_attribute *attr,
++ char *buf)
++{
++ unsigned int index = attr - otp_kattr;
++ u32 value = 0;
++ int ret;
++
++ ret = fsl_otp_readl(HW_OCOTP_CUST_N(index), &value);
++
++ return ret ? 0 : sprintf(buf, "0x%x\n", value);
++}
++
++#ifdef CONFIG_FSL_OTP_WRITE_ENABLE
++static int otp_write_bits(int addr, u32 data, u32 magic)
++{
++ u32 c; /* for control register */
++
++ /* init the control register */
++ c = __raw_readl(otp_base + HW_OCOTP_CTRL);
++ c &= ~BM_OCOTP_CTRL_ADDR;
++ c |= BF(addr, OCOTP_CTRL_ADDR);
++ c |= BF(magic, OCOTP_CTRL_WR_UNLOCK);
++ __raw_writel(c, otp_base + HW_OCOTP_CTRL);
++
++ /* init the data register */
++ __raw_writel(data, otp_base + HW_OCOTP_DATA);
++ otp_wait_busy(0);
++
++ mdelay(2); /* Write Postamble */
++
++ return 0;
++}
++
++static ssize_t fsl_otp_store(struct kobject *kobj, struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned int index = attr - otp_kattr;
++ u32 value;
++ int ret;
++
++ sscanf(buf, "0x%x", &value);
++
++ ret = clk_prepare_enable(otp_clk);
++ if (ret)
++ return 0;
++
++ mutex_lock(&otp_mutex);
++
++ set_otp_timing();
++ ret = otp_wait_busy(0);
++ if (ret)
++ goto out;
++
++ otp_write_bits(index, value, 0x3e77);
++
++ /* Reload all the shadow registers */
++ __raw_writel(BM_OCOTP_CTRL_RELOAD_SHADOWS,
++ otp_base + HW_OCOTP_CTRL_SET);
++ udelay(1);
++ otp_wait_busy(BM_OCOTP_CTRL_RELOAD_SHADOWS);
++
++out:
++ mutex_unlock(&otp_mutex);
++ clk_disable_unprepare(otp_clk);
++ return ret ? 0 : count;
++}
++#endif
++
++static int fsl_otp_probe(struct platform_device *pdev)
++{
++ struct resource *res;
++ struct attribute **attrs;
++ const char **desc;
++ int i, num;
++ int ret;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ otp_base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(otp_base)) {
++ ret = PTR_ERR(otp_base);
++ dev_err(&pdev->dev, "failed to ioremap resource: %d\n", ret);
++ return ret;
++ }
++
++ otp_clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(otp_clk)) {
++ ret = PTR_ERR(otp_clk);
++ dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
++ return ret;
++ }
++
++ desc = (const char **) imx6q_otp_desc;
++ num = sizeof(imx6q_otp_desc) / sizeof(void *);
++
++ /* The last one is NULL, which is used to detect the end */
++ attrs = devm_kzalloc(&pdev->dev, (num + 1) * sizeof(*attrs),
++ GFP_KERNEL);
++ otp_kattr = devm_kzalloc(&pdev->dev, num * sizeof(*otp_kattr),
++ GFP_KERNEL);
++ otp_attr_group = devm_kzalloc(&pdev->dev, sizeof(*otp_attr_group),
++ GFP_KERNEL);
++ if (!attrs || !otp_kattr || !otp_attr_group)
++ return -ENOMEM;
++
++ for (i = 0; i < num; i++) {
++ sysfs_attr_init(&otp_kattr[i].attr);
++ otp_kattr[i].attr.name = desc[i];
++#ifdef CONFIG_FSL_OTP_WRITE_ENABLE
++ otp_kattr[i].attr.mode = 0600;
++ otp_kattr[i].store = fsl_otp_store;
++#else
++ otp_kattr[i].attr.mode = 0400;
++#endif
++ otp_kattr[i].show = fsl_otp_show;
++ attrs[i] = &otp_kattr[i].attr;
++ }
++ otp_attr_group->attrs = attrs;
++
++ otp_kobj = kobject_create_and_add("fsl_otp", NULL);
++ if (!otp_kobj) {
++ dev_err(&pdev->dev, "failed to add kobject\n");
++ return -ENOMEM;
++ }
++
++ ret = sysfs_create_group(otp_kobj, otp_attr_group);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to create sysfs group: %d\n", ret);
++ kobject_put(otp_kobj);
++ return ret;
++ }
++
++ mutex_init(&otp_mutex);
++
++ return 0;
++}
++
++static int fsl_otp_remove(struct platform_device *pdev)
++{
++ sysfs_remove_group(otp_kobj, otp_attr_group);
++ kobject_put(otp_kobj);
++
++ return 0;
++}
++
++static const struct of_device_id fsl_otp_dt_ids[] = {
++ { .compatible = "fsl,imx6q-ocotp", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, fsl_otp_dt_ids);
++
++static struct platform_driver fsl_otp_driver = {
++ .driver = {
++ .name = "imx-ocotp",
++ .owner = THIS_MODULE,
++ .of_match_table = fsl_otp_dt_ids,
++ },
++ .probe = fsl_otp_probe,
++ .remove = fsl_otp_remove,
++};
++module_platform_driver(fsl_otp_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Huang Shijie <b32955@freescale.com>");
++MODULE_DESCRIPTION("Freescale i.MX OCOTP driver");
+diff -Nur linux-3.14.40.orig/drivers/char/Kconfig linux-3.14.40/drivers/char/Kconfig
+--- linux-3.14.40.orig/drivers/char/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/char/Kconfig 2015-05-01 14:57:58.823427001 -0500
+@@ -82,6 +82,21 @@
+
+ If unsure, say N.
+
++config FSL_OTP
++ tristate "Freescale On-Chip OTP Memory Support"
++ depends on HAS_IOMEM && OF
++ help
++ If you say Y here, you will get support for a character device
++ interface into the One Time Programmable memory pages that are
++ stored on the some Freescale i.MX processors. This will not get
++ you access to the secure memory pages however. You will need to
++ write your own secure code and reader for that.
++
++ To compile this driver as a module, choose M here: the module
++ will be called fsl_otp.
++
++ If unsure, it is safe to say Y.
++
+ config PRINTER
+ tristate "Parallel printer support"
+ depends on PARPORT
+diff -Nur linux-3.14.40.orig/drivers/char/Makefile linux-3.14.40/drivers/char/Makefile
+--- linux-3.14.40.orig/drivers/char/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/char/Makefile 2015-05-01 14:57:58.839427001 -0500
+@@ -16,6 +16,7 @@
+ obj-$(CONFIG_IBM_BSR) += bsr.o
+ obj-$(CONFIG_SGI_MBCS) += mbcs.o
+ obj-$(CONFIG_BFIN_OTP) += bfin-otp.o
++obj-$(CONFIG_FSL_OTP) += fsl_otp.o
+
+ obj-$(CONFIG_PRINTER) += lp.o
+
+diff -Nur linux-3.14.40.orig/drivers/clk/clk.c linux-3.14.40/drivers/clk/clk.c
+--- linux-3.14.40.orig/drivers/clk/clk.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/clk/clk.c 2015-05-01 14:57:58.851427001 -0500
+@@ -1707,6 +1707,7 @@
+ */
+ int clk_set_parent(struct clk *clk, struct clk *parent)
+ {
++ struct clk *child;
+ int ret = 0;
+ int p_index = 0;
+ unsigned long p_rate = 0;
+@@ -1733,6 +1734,18 @@
+ goto out;
+ }
+
++ /* check two consecutive basic mux clocks */
++ if (clk->flags & CLK_IS_BASIC_MUX) {
++ hlist_for_each_entry(child, &clk->children, child_node) {
++ if (child->flags & CLK_IS_BASIC_MUX) {
++ pr_err("%s: failed to switch parent of %s due to child mux %s\n",
++ __func__, clk->name, child->name);
++ ret = -EBUSY;
++ goto out;
++ }
++ }
++ }
++
+ /* try finding the new parent index */
+ if (parent) {
+ p_index = clk_fetch_parent_index(clk, parent);
+diff -Nur linux-3.14.40.orig/drivers/clk/clk-mux.c linux-3.14.40/drivers/clk/clk-mux.c
+--- linux-3.14.40.orig/drivers/clk/clk-mux.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/clk/clk-mux.c 2015-05-01 14:57:58.863427001 -0500
+@@ -143,7 +143,7 @@
+ init.ops = &clk_mux_ro_ops;
+ else
+ init.ops = &clk_mux_ops;
+- init.flags = flags | CLK_IS_BASIC;
++ init.flags = flags | CLK_IS_BASIC | CLK_IS_BASIC_MUX;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+diff -Nur linux-3.14.40.orig/drivers/cpufreq/cpufreq_interactive.c linux-3.14.40/drivers/cpufreq/cpufreq_interactive.c
+--- linux-3.14.40.orig/drivers/cpufreq/cpufreq_interactive.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/cpufreq/cpufreq_interactive.c 2015-05-01 14:57:58.863427001 -0500
+@@ -0,0 +1,1349 @@
++/*
++ * drivers/cpufreq/cpufreq_interactive.c
++ *
++ * Copyright (C) 2010 Google, Inc.
++ *
++ * This software is licensed under the terms of the GNU General Public
++ * License version 2, as published by the Free Software Foundation, and
++ * may be copied, distributed, and modified under those terms.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Author: Mike Chan (mike@android.com)
++ *
++ */
++
++#include <linux/cpu.h>
++#include <linux/cpumask.h>
++#include <linux/cpufreq.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/sched/rt.h>
++#include <linux/tick.h>
++#include <linux/time.h>
++#include <linux/timer.h>
++#include <linux/workqueue.h>
++#include <linux/kthread.h>
++#include <linux/slab.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/cpufreq_interactive.h>
++
++struct cpufreq_interactive_cpuinfo {
++ struct timer_list cpu_timer;
++ struct timer_list cpu_slack_timer;
++ spinlock_t load_lock; /* protects the next 4 fields */
++ u64 time_in_idle;
++ u64 time_in_idle_timestamp;
++ u64 cputime_speedadj;
++ u64 cputime_speedadj_timestamp;
++ struct cpufreq_policy *policy;
++ struct cpufreq_frequency_table *freq_table;
++ unsigned int target_freq;
++ unsigned int floor_freq;
++ u64 floor_validate_time;
++ u64 hispeed_validate_time;
++ struct rw_semaphore enable_sem;
++ int governor_enabled;
++};
++
++static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
++
++/* realtime thread handles frequency scaling */
++static struct task_struct *speedchange_task;
++static cpumask_t speedchange_cpumask;
++static spinlock_t speedchange_cpumask_lock;
++static struct mutex gov_lock;
++
++/* Target load. Lower values result in higher CPU speeds. */
++#define DEFAULT_TARGET_LOAD 90
++static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
++
++#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
++#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
++static unsigned int default_above_hispeed_delay[] = {
++ DEFAULT_ABOVE_HISPEED_DELAY };
++
++struct cpufreq_interactive_tunables {
++ int usage_count;
++ /* Hi speed to bump to from lo speed when load burst (default max) */
++ unsigned int hispeed_freq;
++ /* Go to hi speed when CPU load at or above this value. */
++#define DEFAULT_GO_HISPEED_LOAD 99
++ unsigned long go_hispeed_load;
++ /* Target load. Lower values result in higher CPU speeds. */
++ spinlock_t target_loads_lock;
++ unsigned int *target_loads;
++ int ntarget_loads;
++ /*
++ * The minimum amount of time to spend at a frequency before we can ramp
++ * down.
++ */
++#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
++ unsigned long min_sample_time;
++ /*
++ * The sample rate of the timer used to increase frequency
++ */
++ unsigned long timer_rate;
++ /*
++ * Wait this long before raising speed above hispeed, by default a
++ * single timer interval.
++ */
++ spinlock_t above_hispeed_delay_lock;
++ unsigned int *above_hispeed_delay;
++ int nabove_hispeed_delay;
++ /* Non-zero means indefinite speed boost active */
++ int boost_val;
++ /* Duration of a boot pulse in usecs */
++ int boostpulse_duration_val;
++ /* End time of boost pulse in ktime converted to usecs */
++ u64 boostpulse_endtime;
++ /*
++ * Max additional time to wait in idle, beyond timer_rate, at speeds
++ * above minimum before wakeup to reduce speed, or -1 if unnecessary.
++ */
++#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
++ int timer_slack_val;
++ bool io_is_busy;
++};
++
++/* For cases where we have single governor instance for system */
++struct cpufreq_interactive_tunables *common_tunables;
++
++static struct attribute_group *get_sysfs_attr(void);
++
++static void cpufreq_interactive_timer_resched(
++ struct cpufreq_interactive_cpuinfo *pcpu)
++{
++ struct cpufreq_interactive_tunables *tunables =
++ pcpu->policy->governor_data;
++ unsigned long expires;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcpu->load_lock, flags);
++ pcpu->time_in_idle =
++ get_cpu_idle_time(smp_processor_id(),
++ &pcpu->time_in_idle_timestamp,
++ tunables->io_is_busy);
++ pcpu->cputime_speedadj = 0;
++ pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
++ expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
++ mod_timer_pinned(&pcpu->cpu_timer, expires);
++
++ if (tunables->timer_slack_val >= 0 &&
++ pcpu->target_freq > pcpu->policy->min) {
++ expires += usecs_to_jiffies(tunables->timer_slack_val);
++ mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
++ }
++
++ spin_unlock_irqrestore(&pcpu->load_lock, flags);
++}
++
++/* The caller shall take enable_sem write semaphore to avoid any timer race.
++ * The cpu_timer and cpu_slack_timer must be deactivated when calling this
++ * function.
++ */
++static void cpufreq_interactive_timer_start(
++ struct cpufreq_interactive_tunables *tunables, int cpu)
++{
++ struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
++ unsigned long expires = jiffies +
++ usecs_to_jiffies(tunables->timer_rate);
++ unsigned long flags;
++
++ pcpu->cpu_timer.expires = expires;
++ add_timer_on(&pcpu->cpu_timer, cpu);
++ if (tunables->timer_slack_val >= 0 &&
++ pcpu->target_freq > pcpu->policy->min) {
++ expires += usecs_to_jiffies(tunables->timer_slack_val);
++ pcpu->cpu_slack_timer.expires = expires;
++ add_timer_on(&pcpu->cpu_slack_timer, cpu);
++ }
++
++ spin_lock_irqsave(&pcpu->load_lock, flags);
++ pcpu->time_in_idle =
++ get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
++ tunables->io_is_busy);
++ pcpu->cputime_speedadj = 0;
++ pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
++ spin_unlock_irqrestore(&pcpu->load_lock, flags);
++}
++
++static unsigned int freq_to_above_hispeed_delay(
++ struct cpufreq_interactive_tunables *tunables,
++ unsigned int freq)
++{
++ int i;
++ unsigned int ret;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
++
++ for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
++ freq >= tunables->above_hispeed_delay[i+1]; i += 2)
++ ;
++
++ ret = tunables->above_hispeed_delay[i];
++ spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
++ return ret;
++}
++
++static unsigned int freq_to_targetload(
++ struct cpufreq_interactive_tunables *tunables, unsigned int freq)
++{
++ int i;
++ unsigned int ret;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tunables->target_loads_lock, flags);
++
++ for (i = 0; i < tunables->ntarget_loads - 1 &&
++ freq >= tunables->target_loads[i+1]; i += 2)
++ ;
++
++ ret = tunables->target_loads[i];
++ spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
++ return ret;
++}
++
++/*
++ * If increasing frequencies never map to a lower target load then
++ * choose_freq() will find the minimum frequency that does not exceed its
++ * target load given the current load.
++ */
++static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
++ unsigned int loadadjfreq)
++{
++ unsigned int freq = pcpu->policy->cur;
++ unsigned int prevfreq, freqmin, freqmax;
++ unsigned int tl;
++ int index;
++
++ freqmin = 0;
++ freqmax = UINT_MAX;
++
++ do {
++ prevfreq = freq;
++ tl = freq_to_targetload(pcpu->policy->governor_data, freq);
++
++ /*
++ * Find the lowest frequency where the computed load is less
++ * than or equal to the target load.
++ */
++
++ if (cpufreq_frequency_table_target(
++ pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
++ CPUFREQ_RELATION_L, &index))
++ break;
++ freq = pcpu->freq_table[index].frequency;
++
++ if (freq > prevfreq) {
++ /* The previous frequency is too low. */
++ freqmin = prevfreq;
++
++ if (freq >= freqmax) {
++ /*
++ * Find the highest frequency that is less
++ * than freqmax.
++ */
++ if (cpufreq_frequency_table_target(
++ pcpu->policy, pcpu->freq_table,
++ freqmax - 1, CPUFREQ_RELATION_H,
++ &index))
++ break;
++ freq = pcpu->freq_table[index].frequency;
++
++ if (freq == freqmin) {
++ /*
++ * The first frequency below freqmax
++ * has already been found to be too
++ * low. freqmax is the lowest speed
++ * we found that is fast enough.
++ */
++ freq = freqmax;
++ break;
++ }
++ }
++ } else if (freq < prevfreq) {
++ /* The previous frequency is high enough. */
++ freqmax = prevfreq;
++
++ if (freq <= freqmin) {
++ /*
++ * Find the lowest frequency that is higher
++ * than freqmin.
++ */
++ if (cpufreq_frequency_table_target(
++ pcpu->policy, pcpu->freq_table,
++ freqmin + 1, CPUFREQ_RELATION_L,
++ &index))
++ break;
++ freq = pcpu->freq_table[index].frequency;
++
++ /*
++ * If freqmax is the first frequency above
++ * freqmin then we have already found that
++ * this speed is fast enough.
++ */
++ if (freq == freqmax)
++ break;
++ }
++ }
++
++ /* If same frequency chosen as previous then done. */
++ } while (freq != prevfreq);
++
++ return freq;
++}
++
++static u64 update_load(int cpu)
++{
++ struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
++ struct cpufreq_interactive_tunables *tunables =
++ pcpu->policy->governor_data;
++ u64 now;
++ u64 now_idle;
++ unsigned int delta_idle;
++ unsigned int delta_time;
++ u64 active_time;
++
++ now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
++ delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
++ delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
++
++ if (delta_time <= delta_idle)
++ active_time = 0;
++ else
++ active_time = delta_time - delta_idle;
++
++ pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
++
++ pcpu->time_in_idle = now_idle;
++ pcpu->time_in_idle_timestamp = now;
++ return now;
++}
++
++static void cpufreq_interactive_timer(unsigned long data)
++{
++ u64 now;
++ unsigned int delta_time;
++ u64 cputime_speedadj;
++ int cpu_load;
++ struct cpufreq_interactive_cpuinfo *pcpu =
++ &per_cpu(cpuinfo, data);
++ struct cpufreq_interactive_tunables *tunables =
++ pcpu->policy->governor_data;
++ unsigned int new_freq;
++ unsigned int loadadjfreq;
++ unsigned int index;
++ unsigned long flags;
++ bool boosted;
++
++ if (!down_read_trylock(&pcpu->enable_sem))
++ return;
++ if (!pcpu->governor_enabled)
++ goto exit;
++
++ spin_lock_irqsave(&pcpu->load_lock, flags);
++ now = update_load(data);
++ delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
++ cputime_speedadj = pcpu->cputime_speedadj;
++ spin_unlock_irqrestore(&pcpu->load_lock, flags);
++
++ if (WARN_ON_ONCE(!delta_time))
++ goto rearm;
++
++ do_div(cputime_speedadj, delta_time);
++ loadadjfreq = (unsigned int)cputime_speedadj * 100;
++ cpu_load = loadadjfreq / pcpu->target_freq;
++ boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
++
++ if (cpu_load >= tunables->go_hispeed_load || boosted) {
++ if (pcpu->target_freq < tunables->hispeed_freq) {
++ new_freq = tunables->hispeed_freq;
++ } else {
++ new_freq = choose_freq(pcpu, loadadjfreq);
++
++ if (new_freq < tunables->hispeed_freq)
++ new_freq = tunables->hispeed_freq;
++ }
++ } else {
++ new_freq = choose_freq(pcpu, loadadjfreq);
++ }
++
++ if (pcpu->target_freq >= tunables->hispeed_freq &&
++ new_freq > pcpu->target_freq &&
++ now - pcpu->hispeed_validate_time <
++ freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
++ trace_cpufreq_interactive_notyet(
++ data, cpu_load, pcpu->target_freq,
++ pcpu->policy->cur, new_freq);
++ goto rearm;
++ }
++
++ pcpu->hispeed_validate_time = now;
++
++ if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
++ new_freq, CPUFREQ_RELATION_L,
++ &index))
++ goto rearm;
++
++ new_freq = pcpu->freq_table[index].frequency;
++
++ /*
++ * Do not scale below floor_freq unless we have been at or above the
++ * floor frequency for the minimum sample time since last validated.
++ */
++ if (new_freq < pcpu->floor_freq) {
++ if (now - pcpu->floor_validate_time <
++ tunables->min_sample_time) {
++ trace_cpufreq_interactive_notyet(
++ data, cpu_load, pcpu->target_freq,
++ pcpu->policy->cur, new_freq);
++ goto rearm;
++ }
++ }
++
++ /*
++ * Update the timestamp for checking whether speed has been held at
++ * or above the selected frequency for a minimum of min_sample_time,
++ * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
++ * allow the speed to drop as soon as the boostpulse duration expires
++ * (or the indefinite boost is turned off).
++ */
++
++ if (!boosted || new_freq > tunables->hispeed_freq) {
++ pcpu->floor_freq = new_freq;
++ pcpu->floor_validate_time = now;
++ }
++
++ if (pcpu->target_freq == new_freq) {
++ trace_cpufreq_interactive_already(
++ data, cpu_load, pcpu->target_freq,
++ pcpu->policy->cur, new_freq);
++ goto rearm_if_notmax;
++ }
++
++ trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
++ pcpu->policy->cur, new_freq);
++
++ pcpu->target_freq = new_freq;
++ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
++ cpumask_set_cpu(data, &speedchange_cpumask);
++ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
++ wake_up_process(speedchange_task);
++
++rearm_if_notmax:
++ /*
++ * Already set max speed and don't see a need to change that,
++ * wait until next idle to re-evaluate, don't need timer.
++ */
++ if (pcpu->target_freq == pcpu->policy->max)
++ goto exit;
++
++rearm:
++ if (!timer_pending(&pcpu->cpu_timer))
++ cpufreq_interactive_timer_resched(pcpu);
++
++exit:
++ up_read(&pcpu->enable_sem);
++ return;
++}
++
++static void cpufreq_interactive_idle_start(void)
++{
++ struct cpufreq_interactive_cpuinfo *pcpu =
++ &per_cpu(cpuinfo, smp_processor_id());
++ int pending;
++
++ if (!down_read_trylock(&pcpu->enable_sem))
++ return;
++ if (!pcpu->governor_enabled) {
++ up_read(&pcpu->enable_sem);
++ return;
++ }
++
++ pending = timer_pending(&pcpu->cpu_timer);
++
++ if (pcpu->target_freq != pcpu->policy->min) {
++ /*
++ * Entering idle while not at lowest speed. On some
++ * platforms this can hold the other CPU(s) at that speed
++ * even though the CPU is idle. Set a timer to re-evaluate
++ * speed so this idle CPU doesn't hold the other CPUs above
++ * min indefinitely. This should probably be a quirk of
++ * the CPUFreq driver.
++ */
++ if (!pending)
++ cpufreq_interactive_timer_resched(pcpu);
++ }
++
++ up_read(&pcpu->enable_sem);
++}
++
++static void cpufreq_interactive_idle_end(void)
++{
++ struct cpufreq_interactive_cpuinfo *pcpu =
++ &per_cpu(cpuinfo, smp_processor_id());
++
++ if (!down_read_trylock(&pcpu->enable_sem))
++ return;
++ if (!pcpu->governor_enabled) {
++ up_read(&pcpu->enable_sem);
++ return;
++ }
++
++ /* Arm the timer for 1-2 ticks later if not already. */
++ if (!timer_pending(&pcpu->cpu_timer)) {
++ cpufreq_interactive_timer_resched(pcpu);
++ } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
++ del_timer(&pcpu->cpu_timer);
++ del_timer(&pcpu->cpu_slack_timer);
++ cpufreq_interactive_timer(smp_processor_id());
++ }
++
++ up_read(&pcpu->enable_sem);
++}
++
++static int cpufreq_interactive_speedchange_task(void *data)
++{
++ unsigned int cpu;
++ cpumask_t tmp_mask;
++ unsigned long flags;
++ struct cpufreq_interactive_cpuinfo *pcpu;
++
++ while (1) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
++
++ if (cpumask_empty(&speedchange_cpumask)) {
++ spin_unlock_irqrestore(&speedchange_cpumask_lock,
++ flags);
++ schedule();
++
++ if (kthread_should_stop())
++ break;
++
++ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
++ }
++
++ set_current_state(TASK_RUNNING);
++ tmp_mask = speedchange_cpumask;
++ cpumask_clear(&speedchange_cpumask);
++ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
++
++ for_each_cpu(cpu, &tmp_mask) {
++ unsigned int j;
++ unsigned int max_freq = 0;
++
++ pcpu = &per_cpu(cpuinfo, cpu);
++ if (!down_read_trylock(&pcpu->enable_sem))
++ continue;
++ if (!pcpu->governor_enabled) {
++ up_read(&pcpu->enable_sem);
++ continue;
++ }
++
++ for_each_cpu(j, pcpu->policy->cpus) {
++ struct cpufreq_interactive_cpuinfo *pjcpu =
++ &per_cpu(cpuinfo, j);
++
++ if (pjcpu->target_freq > max_freq)
++ max_freq = pjcpu->target_freq;
++ }
++
++ if (max_freq != pcpu->policy->cur)
++ __cpufreq_driver_target(pcpu->policy,
++ max_freq,
++ CPUFREQ_RELATION_H);
++ trace_cpufreq_interactive_setspeed(cpu,
++ pcpu->target_freq,
++ pcpu->policy->cur);
++
++ up_read(&pcpu->enable_sem);
++ }
++ }
++
++ return 0;
++}
++
++static void cpufreq_interactive_boost(void)
++{
++ int i;
++ int anyboost = 0;
++ unsigned long flags;
++ struct cpufreq_interactive_cpuinfo *pcpu;
++ struct cpufreq_interactive_tunables *tunables;
++
++ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
++
++ for_each_online_cpu(i) {
++ pcpu = &per_cpu(cpuinfo, i);
++ tunables = pcpu->policy->governor_data;
++
++ if (pcpu->target_freq < tunables->hispeed_freq) {
++ pcpu->target_freq = tunables->hispeed_freq;
++ cpumask_set_cpu(i, &speedchange_cpumask);
++ pcpu->hispeed_validate_time =
++ ktime_to_us(ktime_get());
++ anyboost = 1;
++ }
++
++ /*
++ * Set floor freq and (re)start timer for when last
++ * validated.
++ */
++
++ pcpu->floor_freq = tunables->hispeed_freq;
++ pcpu->floor_validate_time = ktime_to_us(ktime_get());
++ }
++
++ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
++
++ if (anyboost)
++ wake_up_process(speedchange_task);
++}
++
++static int cpufreq_interactive_notifier(
++ struct notifier_block *nb, unsigned long val, void *data)
++{
++ struct cpufreq_freqs *freq = data;
++ struct cpufreq_interactive_cpuinfo *pcpu;
++ int cpu;
++ unsigned long flags;
++
++ if (val == CPUFREQ_POSTCHANGE) {
++ pcpu = &per_cpu(cpuinfo, freq->cpu);
++ if (!down_read_trylock(&pcpu->enable_sem))
++ return 0;
++ if (!pcpu->governor_enabled) {
++ up_read(&pcpu->enable_sem);
++ return 0;
++ }
++
++ for_each_cpu(cpu, pcpu->policy->cpus) {
++ struct cpufreq_interactive_cpuinfo *pjcpu =
++ &per_cpu(cpuinfo, cpu);
++ if (cpu != freq->cpu) {
++ if (!down_read_trylock(&pjcpu->enable_sem))
++ continue;
++ if (!pjcpu->governor_enabled) {
++ up_read(&pjcpu->enable_sem);
++ continue;
++ }
++ }
++ spin_lock_irqsave(&pjcpu->load_lock, flags);
++ update_load(cpu);
++ spin_unlock_irqrestore(&pjcpu->load_lock, flags);
++ if (cpu != freq->cpu)
++ up_read(&pjcpu->enable_sem);
++ }
++
++ up_read(&pcpu->enable_sem);
++ }
++ return 0;
++}
++
++static struct notifier_block cpufreq_notifier_block = {
++ .notifier_call = cpufreq_interactive_notifier,
++};
++
++static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
++{
++ const char *cp;
++ int i;
++ int ntokens = 1;
++ unsigned int *tokenized_data;
++ int err = -EINVAL;
++
++ cp = buf;
++ while ((cp = strpbrk(cp + 1, " :")))
++ ntokens++;
++
++ if (!(ntokens & 0x1))
++ goto err;
++
++ tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
++ if (!tokenized_data) {
++ err = -ENOMEM;
++ goto err;
++ }
++
++ cp = buf;
++ i = 0;
++ while (i < ntokens) {
++ if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
++ goto err_kfree;
++
++ cp = strpbrk(cp, " :");
++ if (!cp)
++ break;
++ cp++;
++ }
++
++ if (i != ntokens)
++ goto err_kfree;
++
++ *num_tokens = ntokens;
++ return tokenized_data;
++
++err_kfree:
++ kfree(tokenized_data);
++err:
++ return ERR_PTR(err);
++}
++
++static ssize_t show_target_loads(
++ struct cpufreq_interactive_tunables *tunables,
++ char *buf)
++{
++ int i;
++ ssize_t ret = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tunables->target_loads_lock, flags);
++
++ for (i = 0; i < tunables->ntarget_loads; i++)
++ ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
++ i & 0x1 ? ":" : " ");
++
++ sprintf(buf + ret - 1, "\n");
++ spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
++ return ret;
++}
++
++static ssize_t store_target_loads(
++ struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ntokens;
++ unsigned int *new_target_loads = NULL;
++ unsigned long flags;
++
++ new_target_loads = get_tokenized_data(buf, &ntokens);
++ if (IS_ERR(new_target_loads))
++ return PTR_RET(new_target_loads);
++
++ spin_lock_irqsave(&tunables->target_loads_lock, flags);
++ if (tunables->target_loads != default_target_loads)
++ kfree(tunables->target_loads);
++ tunables->target_loads = new_target_loads;
++ tunables->ntarget_loads = ntokens;
++ spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
++ return count;
++}
++
++static ssize_t show_above_hispeed_delay(
++ struct cpufreq_interactive_tunables *tunables, char *buf)
++{
++ int i;
++ ssize_t ret = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
++
++ for (i = 0; i < tunables->nabove_hispeed_delay; i++)
++ ret += sprintf(buf + ret, "%u%s",
++ tunables->above_hispeed_delay[i],
++ i & 0x1 ? ":" : " ");
++
++ sprintf(buf + ret - 1, "\n");
++ spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
++ return ret;
++}
++
++static ssize_t store_above_hispeed_delay(
++ struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ntokens;
++ unsigned int *new_above_hispeed_delay = NULL;
++ unsigned long flags;
++
++ new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
++ if (IS_ERR(new_above_hispeed_delay))
++ return PTR_RET(new_above_hispeed_delay);
++
++ spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
++ if (tunables->above_hispeed_delay != default_above_hispeed_delay)
++ kfree(tunables->above_hispeed_delay);
++ tunables->above_hispeed_delay = new_above_hispeed_delay;
++ tunables->nabove_hispeed_delay = ntokens;
++ spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
++ return count;
++
++}
++
++static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
++ char *buf)
++{
++ return sprintf(buf, "%u\n", tunables->hispeed_freq);
++}
++
++static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ret;
++ long unsigned int val;
++
++ ret = strict_strtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++ tunables->hispeed_freq = val;
++ return count;
++}
++
++static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
++ *tunables, char *buf)
++{
++ return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
++}
++
++static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
++ *tunables, const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = strict_strtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++ tunables->go_hispeed_load = val;
++ return count;
++}
++
++static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
++ *tunables, char *buf)
++{
++ return sprintf(buf, "%lu\n", tunables->min_sample_time);
++}
++
++static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
++ *tunables, const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = strict_strtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++ tunables->min_sample_time = val;
++ return count;
++}
++
++static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
++ char *buf)
++{
++ return sprintf(buf, "%lu\n", tunables->timer_rate);
++}
++
++static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = strict_strtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++ tunables->timer_rate = val;
++ return count;
++}
++
++static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
++ char *buf)
++{
++ return sprintf(buf, "%d\n", tunables->timer_slack_val);
++}
++
++static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = kstrtol(buf, 10, &val);
++ if (ret < 0)
++ return ret;
++
++ tunables->timer_slack_val = val;
++ return count;
++}
++
++static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
++ char *buf)
++{
++ return sprintf(buf, "%d\n", tunables->boost_val);
++}
++
++static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = kstrtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++
++ tunables->boost_val = val;
++
++ if (tunables->boost_val) {
++ trace_cpufreq_interactive_boost("on");
++ cpufreq_interactive_boost();
++ } else {
++ trace_cpufreq_interactive_unboost("off");
++ }
++
++ return count;
++}
++
++static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = kstrtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++
++ tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
++ tunables->boostpulse_duration_val;
++ trace_cpufreq_interactive_boost("pulse");
++ cpufreq_interactive_boost();
++ return count;
++}
++
++static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
++ *tunables, char *buf)
++{
++ return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
++}
++
++static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
++ *tunables, const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = kstrtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++
++ tunables->boostpulse_duration_val = val;
++ return count;
++}
++
++static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
++ char *buf)
++{
++ return sprintf(buf, "%u\n", tunables->io_is_busy);
++}
++
++static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
++ const char *buf, size_t count)
++{
++ int ret;
++ unsigned long val;
++
++ ret = kstrtoul(buf, 0, &val);
++ if (ret < 0)
++ return ret;
++ tunables->io_is_busy = val;
++ return count;
++}
++
++/*
++ * Create show/store routines
++ * - sys: One governor instance for complete SYSTEM
++ * - pol: One governor instance per struct cpufreq_policy
++ */
++#define show_gov_pol_sys(file_name) \
++static ssize_t show_##file_name##_gov_sys \
++(struct kobject *kobj, struct attribute *attr, char *buf) \
++{ \
++ return show_##file_name(common_tunables, buf); \
++} \
++ \
++static ssize_t show_##file_name##_gov_pol \
++(struct cpufreq_policy *policy, char *buf) \
++{ \
++ return show_##file_name(policy->governor_data, buf); \
++}
++
++#define store_gov_pol_sys(file_name) \
++static ssize_t store_##file_name##_gov_sys \
++(struct kobject *kobj, struct attribute *attr, const char *buf, \
++ size_t count) \
++{ \
++ return store_##file_name(common_tunables, buf, count); \
++} \
++ \
++static ssize_t store_##file_name##_gov_pol \
++(struct cpufreq_policy *policy, const char *buf, size_t count) \
++{ \
++ return store_##file_name(policy->governor_data, buf, count); \
++}
++
++#define show_store_gov_pol_sys(file_name) \
++show_gov_pol_sys(file_name); \
++store_gov_pol_sys(file_name)
++
++show_store_gov_pol_sys(target_loads);
++show_store_gov_pol_sys(above_hispeed_delay);
++show_store_gov_pol_sys(hispeed_freq);
++show_store_gov_pol_sys(go_hispeed_load);
++show_store_gov_pol_sys(min_sample_time);
++show_store_gov_pol_sys(timer_rate);
++show_store_gov_pol_sys(timer_slack);
++show_store_gov_pol_sys(boost);
++store_gov_pol_sys(boostpulse);
++show_store_gov_pol_sys(boostpulse_duration);
++show_store_gov_pol_sys(io_is_busy);
++
++#define gov_sys_attr_rw(_name) \
++static struct global_attr _name##_gov_sys = \
++__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
++
++#define gov_pol_attr_rw(_name) \
++static struct freq_attr _name##_gov_pol = \
++__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
++
++#define gov_sys_pol_attr_rw(_name) \
++ gov_sys_attr_rw(_name); \
++ gov_pol_attr_rw(_name)
++
++gov_sys_pol_attr_rw(target_loads);
++gov_sys_pol_attr_rw(above_hispeed_delay);
++gov_sys_pol_attr_rw(hispeed_freq);
++gov_sys_pol_attr_rw(go_hispeed_load);
++gov_sys_pol_attr_rw(min_sample_time);
++gov_sys_pol_attr_rw(timer_rate);
++gov_sys_pol_attr_rw(timer_slack);
++gov_sys_pol_attr_rw(boost);
++gov_sys_pol_attr_rw(boostpulse_duration);
++gov_sys_pol_attr_rw(io_is_busy);
++
++static struct global_attr boostpulse_gov_sys =
++ __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
++
++static struct freq_attr boostpulse_gov_pol =
++ __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
++
++/* One Governor instance for entire system */
++static struct attribute *interactive_attributes_gov_sys[] = {
++ &target_loads_gov_sys.attr,
++ &above_hispeed_delay_gov_sys.attr,
++ &hispeed_freq_gov_sys.attr,
++ &go_hispeed_load_gov_sys.attr,
++ &min_sample_time_gov_sys.attr,
++ &timer_rate_gov_sys.attr,
++ &timer_slack_gov_sys.attr,
++ &boost_gov_sys.attr,
++ &boostpulse_gov_sys.attr,
++ &boostpulse_duration_gov_sys.attr,
++ &io_is_busy_gov_sys.attr,
++ NULL,
++};
++
++static struct attribute_group interactive_attr_group_gov_sys = {
++ .attrs = interactive_attributes_gov_sys,
++ .name = "interactive",
++};
++
++/* Per policy governor instance */
++static struct attribute *interactive_attributes_gov_pol[] = {
++ &target_loads_gov_pol.attr,
++ &above_hispeed_delay_gov_pol.attr,
++ &hispeed_freq_gov_pol.attr,
++ &go_hispeed_load_gov_pol.attr,
++ &min_sample_time_gov_pol.attr,
++ &timer_rate_gov_pol.attr,
++ &timer_slack_gov_pol.attr,
++ &boost_gov_pol.attr,
++ &boostpulse_gov_pol.attr,
++ &boostpulse_duration_gov_pol.attr,
++ &io_is_busy_gov_pol.attr,
++ NULL,
++};
++
++static struct attribute_group interactive_attr_group_gov_pol = {
++ .attrs = interactive_attributes_gov_pol,
++ .name = "interactive",
++};
++
++static struct attribute_group *get_sysfs_attr(void)
++{
++ if (have_governor_per_policy())
++ return &interactive_attr_group_gov_pol;
++ else
++ return &interactive_attr_group_gov_sys;
++}
++
++static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
++ unsigned long val,
++ void *data)
++{
++ switch (val) {
++ case IDLE_START:
++ cpufreq_interactive_idle_start();
++ break;
++ case IDLE_END:
++ cpufreq_interactive_idle_end();
++ break;
++ }
++
++ return 0;
++}
++
++static struct notifier_block cpufreq_interactive_idle_nb = {
++ .notifier_call = cpufreq_interactive_idle_notifier,
++};
++
++static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
++ unsigned int event)
++{
++ int rc;
++ unsigned int j;
++ struct cpufreq_interactive_cpuinfo *pcpu;
++ struct cpufreq_frequency_table *freq_table;
++ struct cpufreq_interactive_tunables *tunables;
++
++ if (have_governor_per_policy())
++ tunables = policy->governor_data;
++ else
++ tunables = common_tunables;
++
++ WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
++
++ switch (event) {
++ case CPUFREQ_GOV_POLICY_INIT:
++ if (have_governor_per_policy()) {
++ WARN_ON(tunables);
++ } else if (tunables) {
++ tunables->usage_count++;
++ policy->governor_data = tunables;
++ return 0;
++ }
++
++ tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
++ if (!tunables) {
++ pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
++ return -ENOMEM;
++ }
++
++ tunables->usage_count = 1;
++ tunables->above_hispeed_delay = default_above_hispeed_delay;
++ tunables->nabove_hispeed_delay =
++ ARRAY_SIZE(default_above_hispeed_delay);
++ tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
++ tunables->target_loads = default_target_loads;
++ tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
++ tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
++ tunables->timer_rate = DEFAULT_TIMER_RATE;
++ tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
++ tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
++
++ spin_lock_init(&tunables->target_loads_lock);
++ spin_lock_init(&tunables->above_hispeed_delay_lock);
++
++ policy->governor_data = tunables;
++ if (!have_governor_per_policy()) {
++ common_tunables = tunables;
++ WARN_ON(cpufreq_get_global_kobject());
++ }
++
++ rc = sysfs_create_group(get_governor_parent_kobj(policy),
++ get_sysfs_attr());
++ if (rc) {
++ kfree(tunables);
++ policy->governor_data = NULL;
++ if (!have_governor_per_policy())
++ common_tunables = NULL;
++ return rc;
++ }
++
++ if (!policy->governor->initialized) {
++ idle_notifier_register(&cpufreq_interactive_idle_nb);
++ cpufreq_register_notifier(&cpufreq_notifier_block,
++ CPUFREQ_TRANSITION_NOTIFIER);
++ }
++
++ break;
++
++ case CPUFREQ_GOV_POLICY_EXIT:
++ if (!--tunables->usage_count) {
++ sysfs_remove_group(get_governor_parent_kobj(policy),
++ get_sysfs_attr());
++
++ if (!have_governor_per_policy())
++ cpufreq_put_global_kobject();
++
++ if (policy->governor->initialized == 1) {
++ cpufreq_unregister_notifier(&cpufreq_notifier_block,
++ CPUFREQ_TRANSITION_NOTIFIER);
++ idle_notifier_unregister(&cpufreq_interactive_idle_nb);
++ }
++
++ kfree(tunables);
++ common_tunables = NULL;
++ }
++
++ policy->governor_data = NULL;
++ break;
++
++ case CPUFREQ_GOV_START:
++ mutex_lock(&gov_lock);
++
++ freq_table = cpufreq_frequency_get_table(policy->cpu);
++ if (!tunables->hispeed_freq)
++ tunables->hispeed_freq = policy->max;
++
++ for_each_cpu(j, policy->cpus) {
++ pcpu = &per_cpu(cpuinfo, j);
++ pcpu->policy = policy;
++ pcpu->target_freq = policy->cur;
++ pcpu->freq_table = freq_table;
++ pcpu->floor_freq = pcpu->target_freq;
++ pcpu->floor_validate_time =
++ ktime_to_us(ktime_get());
++ pcpu->hispeed_validate_time =
++ pcpu->floor_validate_time;
++ down_write(&pcpu->enable_sem);
++ del_timer_sync(&pcpu->cpu_timer);
++ del_timer_sync(&pcpu->cpu_slack_timer);
++ cpufreq_interactive_timer_start(tunables, j);
++ pcpu->governor_enabled = 1;
++ up_write(&pcpu->enable_sem);
++ }
++
++ mutex_unlock(&gov_lock);
++ break;
++
++ case CPUFREQ_GOV_STOP:
++ mutex_lock(&gov_lock);
++ for_each_cpu(j, policy->cpus) {
++ pcpu = &per_cpu(cpuinfo, j);
++ down_write(&pcpu->enable_sem);
++ pcpu->governor_enabled = 0;
++ del_timer_sync(&pcpu->cpu_timer);
++ del_timer_sync(&pcpu->cpu_slack_timer);
++ up_write(&pcpu->enable_sem);
++ }
++
++ mutex_unlock(&gov_lock);
++ break;
++
++ case CPUFREQ_GOV_LIMITS:
++ if (policy->max < policy->cur)
++ __cpufreq_driver_target(policy,
++ policy->max, CPUFREQ_RELATION_H);
++ else if (policy->min > policy->cur)
++ __cpufreq_driver_target(policy,
++ policy->min, CPUFREQ_RELATION_L);
++ for_each_cpu(j, policy->cpus) {
++ pcpu = &per_cpu(cpuinfo, j);
++
++ /* hold write semaphore to avoid race */
++ down_write(&pcpu->enable_sem);
++ if (pcpu->governor_enabled == 0) {
++ up_write(&pcpu->enable_sem);
++ continue;
++ }
++
++ /* update target_freq firstly */
++ if (policy->max < pcpu->target_freq)
++ pcpu->target_freq = policy->max;
++ else if (policy->min > pcpu->target_freq)
++ pcpu->target_freq = policy->min;
++
++ /* Reschedule timer.
++ * Delete the timers, else the timer callback may
++ * return without re-arm the timer when failed
++ * acquire the semaphore. This race may cause timer
++ * stopped unexpectedly.
++ */
++ del_timer_sync(&pcpu->cpu_timer);
++ del_timer_sync(&pcpu->cpu_slack_timer);
++ cpufreq_interactive_timer_start(tunables, j);
++ up_write(&pcpu->enable_sem);
++ }
++ break;
++ }
++ return 0;
++}
++
++#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
++static
++#endif
++struct cpufreq_governor cpufreq_gov_interactive = {
++ .name = "interactive",
++ .governor = cpufreq_governor_interactive,
++ .max_transition_latency = 10000000,
++ .owner = THIS_MODULE,
++};
++
++static void cpufreq_interactive_nop_timer(unsigned long data)
++{
++}
++
++static int __init cpufreq_interactive_init(void)
++{
++ unsigned int i;
++ struct cpufreq_interactive_cpuinfo *pcpu;
++ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
++
++ /* Initalize per-cpu timers */
++ for_each_possible_cpu(i) {
++ pcpu = &per_cpu(cpuinfo, i);
++ init_timer_deferrable(&pcpu->cpu_timer);
++ pcpu->cpu_timer.function = cpufreq_interactive_timer;
++ pcpu->cpu_timer.data = i;
++ init_timer(&pcpu->cpu_slack_timer);
++ pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
++ spin_lock_init(&pcpu->load_lock);
++ init_rwsem(&pcpu->enable_sem);
++ }
++
++ spin_lock_init(&speedchange_cpumask_lock);
++ mutex_init(&gov_lock);
++ speedchange_task =
++ kthread_create(cpufreq_interactive_speedchange_task, NULL,
++ "cfinteractive");
++ if (IS_ERR(speedchange_task))
++ return PTR_ERR(speedchange_task);
++
++ sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
++ get_task_struct(speedchange_task);
++
++ /* NB: wake up so the thread does not look hung to the freezer */
++ wake_up_process(speedchange_task);
++
++ return cpufreq_register_governor(&cpufreq_gov_interactive);
++}
++
++#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
++fs_initcall(cpufreq_interactive_init);
++#else
++module_init(cpufreq_interactive_init);
++#endif
++
++static void __exit cpufreq_interactive_exit(void)
++{
++ cpufreq_unregister_governor(&cpufreq_gov_interactive);
++ kthread_stop(speedchange_task);
++ put_task_struct(speedchange_task);
++}
++
++module_exit(cpufreq_interactive_exit);
++
++MODULE_AUTHOR("Mike Chan <mike@android.com>");
++MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
++ "Latency sensitive workloads");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/cpufreq/highbank-cpufreq.c linux-3.14.40/drivers/cpufreq/highbank-cpufreq.c
+--- linux-3.14.40.orig/drivers/cpufreq/highbank-cpufreq.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/cpufreq/highbank-cpufreq.c 2015-05-01 14:57:58.895427001 -0500
+@@ -19,7 +19,7 @@
+ #include <linux/cpu.h>
+ #include <linux/err.h>
+ #include <linux/of.h>
+-#include <linux/mailbox.h>
++#include <linux/pl320-ipc.h>
+ #include <linux/platform_device.h>
+
+ #define HB_CPUFREQ_CHANGE_NOTE 0x80000001
+diff -Nur linux-3.14.40.orig/drivers/cpufreq/imx6-cpufreq.c linux-3.14.40/drivers/cpufreq/imx6-cpufreq.c
+--- linux-3.14.40.orig/drivers/cpufreq/imx6-cpufreq.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/cpufreq/imx6-cpufreq.c 2015-05-01 14:57:58.895427001 -0500
+@@ -0,0 +1,393 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/busfreq-imx6.h>
++#include <linux/clk.h>
++#include <linux/cpu.h>
++#include <linux/cpufreq.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/pm_opp.h>
++#include <linux/platform_device.h>
++#include <linux/regulator/consumer.h>
++#include <linux/suspend.h>
++
++#define PU_SOC_VOLTAGE_NORMAL 1250000
++#define PU_SOC_VOLTAGE_HIGH 1275000
++#define FREQ_1P2_GHZ 1200000000
++
++static struct regulator *arm_reg;
++static struct regulator *pu_reg;
++static struct regulator *soc_reg;
++
++static struct clk *arm_clk;
++static struct clk *pll1_sys_clk;
++static struct clk *pll1_sw_clk;
++static struct clk *step_clk;
++static struct clk *pll2_pfd2_396m_clk;
++
++static struct device *cpu_dev;
++static struct cpufreq_frequency_table *freq_table;
++static unsigned int transition_latency;
++static struct mutex set_cpufreq_lock;
++
++static u32 *imx6_soc_volt;
++static u32 soc_opp_count;
++
++static int imx6_set_target(struct cpufreq_policy *policy, unsigned int index)
++{
++ struct dev_pm_opp *opp;
++ unsigned long freq_hz, volt, volt_old;
++ unsigned int old_freq, new_freq;
++ int ret;
++
++ mutex_lock(&set_cpufreq_lock);
++
++ new_freq = freq_table[index].frequency;
++ freq_hz = new_freq * 1000;
++ old_freq = clk_get_rate(arm_clk) / 1000;
++
++ rcu_read_lock();
++ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
++ if (IS_ERR(opp)) {
++ rcu_read_unlock();
++ dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
++ ret = PTR_ERR(opp);
++ goto unlock;
++ }
++
++ volt = dev_pm_opp_get_voltage(opp);
++ rcu_read_unlock();
++ volt_old = regulator_get_voltage(arm_reg);
++
++ dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
++ old_freq / 1000, volt_old / 1000,
++ new_freq / 1000, volt / 1000);
++
++ /*
++ * CPU freq is increasing, so need to ensure
++ * that bus frequency is increased too.
++ */
++ if (old_freq == freq_table[0].frequency)
++ request_bus_freq(BUS_FREQ_HIGH);
++
++ /* scaling up? scale voltage before frequency */
++ if (new_freq > old_freq) {
++ if (regulator_is_enabled(pu_reg)) {
++ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
++ if (ret) {
++ dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
++ goto unlock;
++ }
++ }
++ ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
++ if (ret) {
++ dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret);
++ goto unlock;
++ }
++ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
++ if (ret) {
++ dev_err(cpu_dev,
++ "failed to scale vddarm up: %d\n", ret);
++ goto unlock;
++ }
++ }
++
++ /*
++ * The setpoints are selected per PLL/PDF frequencies, so we need to
++ * reprogram PLL for frequency scaling. The procedure of reprogramming
++ * PLL1 is as below.
++ *
++ * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
++ * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
++ * - Disable pll2_pfd2_396m_clk
++ */
++ clk_set_parent(step_clk, pll2_pfd2_396m_clk);
++ clk_set_parent(pll1_sw_clk, step_clk);
++ if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
++ clk_set_rate(pll1_sys_clk, new_freq * 1000);
++ clk_set_parent(pll1_sw_clk, pll1_sys_clk);
++ }
++
++ /* Ensure the arm clock divider is what we expect */
++ ret = clk_set_rate(arm_clk, new_freq * 1000);
++ if (ret) {
++ dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
++ regulator_set_voltage_tol(arm_reg, volt_old, 0);
++ goto unlock;
++ }
++
++ /* scaling down? scale voltage after frequency */
++ if (new_freq < old_freq) {
++ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
++ if (ret) {
++ dev_warn(cpu_dev,
++ "failed to scale vddarm down: %d\n", ret);
++ ret = 0;
++ }
++ ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
++ if (ret) {
++ dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
++ ret = 0;
++ }
++ if (regulator_is_enabled(pu_reg)) {
++ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
++ if (ret) {
++ dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
++ ret = 0;
++ }
++ }
++ }
++
++ if (policy->cur == freq_table[0].frequency)
++ release_bus_freq(BUS_FREQ_HIGH);
++
++unlock:
++ mutex_unlock(&set_cpufreq_lock);
++ return ret;
++}
++
++static int imx6_cpufreq_init(struct cpufreq_policy *policy)
++{
++ policy->clk = arm_clk;
++
++ if (policy->cur > freq_table[0].frequency)
++ request_bus_freq(BUS_FREQ_HIGH);
++
++ return cpufreq_generic_init(policy, freq_table, transition_latency);
++}
++
++static struct cpufreq_driver imx6_cpufreq_driver = {
++ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
++ .verify = cpufreq_generic_frequency_table_verify,
++ .target_index = imx6_set_target,
++ .get = cpufreq_generic_get,
++ .init = imx6_cpufreq_init,
++ .exit = cpufreq_generic_exit,
++ .name = "imx6-cpufreq",
++ .attr = cpufreq_generic_attr,
++};
++
++static int imx6_cpufreq_pm_notify(struct notifier_block *nb,
++ unsigned long event, void *dummy)
++{
++ struct cpufreq_policy *data = cpufreq_cpu_get(0);
++ static u32 cpufreq_policy_min_pre_suspend;
++
++ /*
++ * During suspend/resume, When cpufreq driver try to increase
++ * voltage/freq, it needs to control I2C/SPI to communicate
++ * with external PMIC to adjust voltage, but these I2C/SPI
++ * devices may be already suspended, to avoid such scenario,
++ * we just increase cpufreq to highest setpoint before suspend.
++ */
++ switch (event) {
++ case PM_SUSPEND_PREPARE:
++ cpufreq_policy_min_pre_suspend = data->user_policy.min;
++ data->user_policy.min = data->user_policy.max;
++ break;
++ case PM_POST_SUSPEND:
++ data->user_policy.min = cpufreq_policy_min_pre_suspend;
++ break;
++ default:
++ break;
++ }
++
++ cpufreq_update_policy(0);
++
++ return NOTIFY_OK;
++}
++
++static struct notifier_block imx6_cpufreq_pm_notifier = {
++ .notifier_call = imx6_cpufreq_pm_notify,
++};
++
++static int imx6_cpufreq_probe(struct platform_device *pdev)
++{
++ struct device_node *np;
++ struct dev_pm_opp *opp;
++ unsigned long min_volt, max_volt;
++ int num, ret;
++ const struct property *prop;
++ const __be32 *val;
++ u32 nr, i, j;
++
++ cpu_dev = get_cpu_device(0);
++ if (!cpu_dev) {
++ pr_err("failed to get cpu0 device\n");
++ return -ENODEV;
++ }
++
++ np = of_node_get(cpu_dev->of_node);
++ if (!np) {
++ dev_err(cpu_dev, "failed to find cpu0 node\n");
++ return -ENOENT;
++ }
++
++ arm_clk = devm_clk_get(cpu_dev, "arm");
++ pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
++ pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
++ step_clk = devm_clk_get(cpu_dev, "step");
++ pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m");
++ if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
++ IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
++ dev_err(cpu_dev, "failed to get clocks\n");
++ ret = -ENOENT;
++ goto put_node;
++ }
++
++ arm_reg = devm_regulator_get(cpu_dev, "arm");
++ pu_reg = devm_regulator_get(cpu_dev, "pu");
++ soc_reg = devm_regulator_get(cpu_dev, "soc");
++ if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) {
++ dev_err(cpu_dev, "failed to get regulators\n");
++ ret = -ENOENT;
++ goto put_node;
++ }
++
++ /*
++ * We expect an OPP table supplied by platform.
++ * Just, incase the platform did not supply the OPP
++ * table, it will try to get it.
++ */
++ num = dev_pm_opp_get_opp_count(cpu_dev);
++ if (num < 0) {
++ ret = of_init_opp_table(cpu_dev);
++ if (ret < 0) {
++ dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
++ goto put_node;
++ }
++
++ num = dev_pm_opp_get_opp_count(cpu_dev);
++ if (num < 0) {
++ ret = num;
++ dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
++ goto put_node;
++ }
++ }
++
++ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
++ if (ret) {
++ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
++ goto put_node;
++ }
++
++ /* Make imx6_soc_volt array's size same as arm opp number */
++ imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL);
++ if (imx6_soc_volt == NULL) {
++ ret = -ENOMEM;
++ goto free_freq_table;
++ }
++
++ prop = of_find_property(np, "fsl,soc-operating-points", NULL);
++ if (!prop || !prop->value)
++ goto soc_opp_out;
++
++ /*
++ * Each OPP is a set of tuples consisting of frequency and
++ * voltage like <freq-kHz vol-uV>.
++ */
++ nr = prop->length / sizeof(u32);
++ if (nr % 2 || (nr / 2) < num)
++ goto soc_opp_out;
++
++ for (j = 0; j < num; j++) {
++ val = prop->value;
++ for (i = 0; i < nr / 2; i++) {
++ unsigned long freq = be32_to_cpup(val++);
++ unsigned long volt = be32_to_cpup(val++);
++ if (freq_table[j].frequency == freq) {
++ imx6_soc_volt[soc_opp_count++] = volt;
++ break;
++ }
++ }
++ }
++
++soc_opp_out:
++ /* use fixed soc opp volt if no valid soc opp info found in dtb */
++ if (soc_opp_count != num) {
++ dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n");
++ for (j = 0; j < num; j++)
++ imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL;
++ if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ)
++ imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH;
++ }
++
++ if (of_property_read_u32(np, "clock-latency", &transition_latency))
++ transition_latency = CPUFREQ_ETERNAL;
++
++ /*
++ * Calculate the ramp time for max voltage change in the
++ * VDDSOC and VDDPU regulators.
++ */
++ ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
++ if (ret > 0)
++ transition_latency += ret * 1000;
++ ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
++ if (ret > 0)
++ transition_latency += ret * 1000;
++
++ /*
++ * OPP is maintained in order of increasing frequency, and
++ * freq_table initialised from OPP is therefore sorted in the
++ * same order.
++ */
++ rcu_read_lock();
++ opp = dev_pm_opp_find_freq_exact(cpu_dev,
++ freq_table[0].frequency * 1000, true);
++ min_volt = dev_pm_opp_get_voltage(opp);
++ opp = dev_pm_opp_find_freq_exact(cpu_dev,
++ freq_table[--num].frequency * 1000, true);
++ max_volt = dev_pm_opp_get_voltage(opp);
++ rcu_read_unlock();
++ ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
++ if (ret > 0)
++ transition_latency += ret * 1000;
++
++ mutex_init(&set_cpufreq_lock);
++ register_pm_notifier(&imx6_cpufreq_pm_notifier);
++
++ ret = cpufreq_register_driver(&imx6_cpufreq_driver);
++ if (ret) {
++ dev_err(cpu_dev, "failed register driver: %d\n", ret);
++ goto free_freq_table;
++ }
++
++ of_node_put(np);
++ return 0;
++
++free_freq_table:
++ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
++put_node:
++ of_node_put(np);
++ return ret;
++}
++
++static int imx6_cpufreq_remove(struct platform_device *pdev)
++{
++ cpufreq_unregister_driver(&imx6_cpufreq_driver);
++ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
++
++ return 0;
++}
++
++static struct platform_driver imx6_cpufreq_platdrv = {
++ .driver = {
++ .name = "imx6-cpufreq",
++ .owner = THIS_MODULE,
++ },
++ .probe = imx6_cpufreq_probe,
++ .remove = imx6_cpufreq_remove,
++};
++module_platform_driver(imx6_cpufreq_platdrv);
++
++MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
++MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/cpufreq/imx6q-cpufreq.c linux-3.14.40/drivers/cpufreq/imx6q-cpufreq.c
+--- linux-3.14.40.orig/drivers/cpufreq/imx6q-cpufreq.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/cpufreq/imx6q-cpufreq.c 1969-12-31 18:00:00.000000000 -0600
+@@ -1,330 +0,0 @@
+-/*
+- * Copyright (C) 2013 Freescale Semiconductor, Inc.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-
+-#include <linux/clk.h>
+-#include <linux/cpu.h>
+-#include <linux/cpufreq.h>
+-#include <linux/delay.h>
+-#include <linux/err.h>
+-#include <linux/module.h>
+-#include <linux/of.h>
+-#include <linux/pm_opp.h>
+-#include <linux/platform_device.h>
+-#include <linux/regulator/consumer.h>
+-
+-#define PU_SOC_VOLTAGE_NORMAL 1250000
+-#define PU_SOC_VOLTAGE_HIGH 1275000
+-#define FREQ_1P2_GHZ 1200000000
+-
+-static struct regulator *arm_reg;
+-static struct regulator *pu_reg;
+-static struct regulator *soc_reg;
+-
+-static struct clk *arm_clk;
+-static struct clk *pll1_sys_clk;
+-static struct clk *pll1_sw_clk;
+-static struct clk *step_clk;
+-static struct clk *pll2_pfd2_396m_clk;
+-
+-static struct device *cpu_dev;
+-static struct cpufreq_frequency_table *freq_table;
+-static unsigned int transition_latency;
+-
+-static u32 *imx6_soc_volt;
+-static u32 soc_opp_count;
+-
+-static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
+-{
+- struct dev_pm_opp *opp;
+- unsigned long freq_hz, volt, volt_old;
+- unsigned int old_freq, new_freq;
+- int ret;
+-
+- new_freq = freq_table[index].frequency;
+- freq_hz = new_freq * 1000;
+- old_freq = clk_get_rate(arm_clk) / 1000;
+-
+- rcu_read_lock();
+- opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
+- if (IS_ERR(opp)) {
+- rcu_read_unlock();
+- dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
+- return PTR_ERR(opp);
+- }
+-
+- volt = dev_pm_opp_get_voltage(opp);
+- rcu_read_unlock();
+- volt_old = regulator_get_voltage(arm_reg);
+-
+- dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
+- old_freq / 1000, volt_old / 1000,
+- new_freq / 1000, volt / 1000);
+-
+- /* scaling up? scale voltage before frequency */
+- if (new_freq > old_freq) {
+- ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+- if (ret) {
+- dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
+- return ret;
+- }
+- ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
+- if (ret) {
+- dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret);
+- return ret;
+- }
+- ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+- if (ret) {
+- dev_err(cpu_dev,
+- "failed to scale vddarm up: %d\n", ret);
+- return ret;
+- }
+- }
+-
+- /*
+- * The setpoints are selected per PLL/PDF frequencies, so we need to
+- * reprogram PLL for frequency scaling. The procedure of reprogramming
+- * PLL1 is as below.
+- *
+- * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
+- * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
+- * - Disable pll2_pfd2_396m_clk
+- */
+- clk_set_parent(step_clk, pll2_pfd2_396m_clk);
+- clk_set_parent(pll1_sw_clk, step_clk);
+- if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
+- clk_set_rate(pll1_sys_clk, new_freq * 1000);
+- clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+- }
+-
+- /* Ensure the arm clock divider is what we expect */
+- ret = clk_set_rate(arm_clk, new_freq * 1000);
+- if (ret) {
+- dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
+- regulator_set_voltage_tol(arm_reg, volt_old, 0);
+- return ret;
+- }
+-
+- /* scaling down? scale voltage after frequency */
+- if (new_freq < old_freq) {
+- ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+- if (ret) {
+- dev_warn(cpu_dev,
+- "failed to scale vddarm down: %d\n", ret);
+- ret = 0;
+- }
+- ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
+- if (ret) {
+- dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
+- ret = 0;
+- }
+- ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+- if (ret) {
+- dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
+- ret = 0;
+- }
+- }
+-
+- return 0;
+-}
+-
+-static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
+-{
+- policy->clk = arm_clk;
+- return cpufreq_generic_init(policy, freq_table, transition_latency);
+-}
+-
+-static struct cpufreq_driver imx6q_cpufreq_driver = {
+- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+- .verify = cpufreq_generic_frequency_table_verify,
+- .target_index = imx6q_set_target,
+- .get = cpufreq_generic_get,
+- .init = imx6q_cpufreq_init,
+- .exit = cpufreq_generic_exit,
+- .name = "imx6q-cpufreq",
+- .attr = cpufreq_generic_attr,
+-};
+-
+-static int imx6q_cpufreq_probe(struct platform_device *pdev)
+-{
+- struct device_node *np;
+- struct dev_pm_opp *opp;
+- unsigned long min_volt, max_volt;
+- int num, ret;
+- const struct property *prop;
+- const __be32 *val;
+- u32 nr, i, j;
+-
+- cpu_dev = get_cpu_device(0);
+- if (!cpu_dev) {
+- pr_err("failed to get cpu0 device\n");
+- return -ENODEV;
+- }
+-
+- np = of_node_get(cpu_dev->of_node);
+- if (!np) {
+- dev_err(cpu_dev, "failed to find cpu0 node\n");
+- return -ENOENT;
+- }
+-
+- arm_clk = devm_clk_get(cpu_dev, "arm");
+- pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
+- pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
+- step_clk = devm_clk_get(cpu_dev, "step");
+- pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m");
+- if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
+- IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
+- dev_err(cpu_dev, "failed to get clocks\n");
+- ret = -ENOENT;
+- goto put_node;
+- }
+-
+- arm_reg = devm_regulator_get(cpu_dev, "arm");
+- pu_reg = devm_regulator_get(cpu_dev, "pu");
+- soc_reg = devm_regulator_get(cpu_dev, "soc");
+- if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) {
+- dev_err(cpu_dev, "failed to get regulators\n");
+- ret = -ENOENT;
+- goto put_node;
+- }
+-
+- /*
+- * We expect an OPP table supplied by platform.
+- * Just, incase the platform did not supply the OPP
+- * table, it will try to get it.
+- */
+- num = dev_pm_opp_get_opp_count(cpu_dev);
+- if (num < 0) {
+- ret = of_init_opp_table(cpu_dev);
+- if (ret < 0) {
+- dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
+- goto put_node;
+- }
+-
+- num = dev_pm_opp_get_opp_count(cpu_dev);
+- if (num < 0) {
+- ret = num;
+- dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
+- goto put_node;
+- }
+- }
+-
+- ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+- if (ret) {
+- dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+- goto put_node;
+- }
+-
+- /* Make imx6_soc_volt array's size same as arm opp number */
+- imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL);
+- if (imx6_soc_volt == NULL) {
+- ret = -ENOMEM;
+- goto free_freq_table;
+- }
+-
+- prop = of_find_property(np, "fsl,soc-operating-points", NULL);
+- if (!prop || !prop->value)
+- goto soc_opp_out;
+-
+- /*
+- * Each OPP is a set of tuples consisting of frequency and
+- * voltage like <freq-kHz vol-uV>.
+- */
+- nr = prop->length / sizeof(u32);
+- if (nr % 2 || (nr / 2) < num)
+- goto soc_opp_out;
+-
+- for (j = 0; j < num; j++) {
+- val = prop->value;
+- for (i = 0; i < nr / 2; i++) {
+- unsigned long freq = be32_to_cpup(val++);
+- unsigned long volt = be32_to_cpup(val++);
+- if (freq_table[j].frequency == freq) {
+- imx6_soc_volt[soc_opp_count++] = volt;
+- break;
+- }
+- }
+- }
+-
+-soc_opp_out:
+- /* use fixed soc opp volt if no valid soc opp info found in dtb */
+- if (soc_opp_count != num) {
+- dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n");
+- for (j = 0; j < num; j++)
+- imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL;
+- if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ)
+- imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH;
+- }
+-
+- if (of_property_read_u32(np, "clock-latency", &transition_latency))
+- transition_latency = CPUFREQ_ETERNAL;
+-
+- /*
+- * Calculate the ramp time for max voltage change in the
+- * VDDSOC and VDDPU regulators.
+- */
+- ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+- if (ret > 0)
+- transition_latency += ret * 1000;
+- ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+- if (ret > 0)
+- transition_latency += ret * 1000;
+-
+- /*
+- * OPP is maintained in order of increasing frequency, and
+- * freq_table initialised from OPP is therefore sorted in the
+- * same order.
+- */
+- rcu_read_lock();
+- opp = dev_pm_opp_find_freq_exact(cpu_dev,
+- freq_table[0].frequency * 1000, true);
+- min_volt = dev_pm_opp_get_voltage(opp);
+- opp = dev_pm_opp_find_freq_exact(cpu_dev,
+- freq_table[--num].frequency * 1000, true);
+- max_volt = dev_pm_opp_get_voltage(opp);
+- rcu_read_unlock();
+- ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
+- if (ret > 0)
+- transition_latency += ret * 1000;
+-
+- ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
+- if (ret) {
+- dev_err(cpu_dev, "failed register driver: %d\n", ret);
+- goto free_freq_table;
+- }
+-
+- of_node_put(np);
+- return 0;
+-
+-free_freq_table:
+- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+-put_node:
+- of_node_put(np);
+- return ret;
+-}
+-
+-static int imx6q_cpufreq_remove(struct platform_device *pdev)
+-{
+- cpufreq_unregister_driver(&imx6q_cpufreq_driver);
+- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+-
+- return 0;
+-}
+-
+-static struct platform_driver imx6q_cpufreq_platdrv = {
+- .driver = {
+- .name = "imx6q-cpufreq",
+- .owner = THIS_MODULE,
+- },
+- .probe = imx6q_cpufreq_probe,
+- .remove = imx6q_cpufreq_remove,
+-};
+-module_platform_driver(imx6q_cpufreq_platdrv);
+-
+-MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+-MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
+-MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/cpufreq/Kconfig linux-3.14.40/drivers/cpufreq/Kconfig
+--- linux-3.14.40.orig/drivers/cpufreq/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/cpufreq/Kconfig 2015-05-01 14:57:58.895427001 -0500
+@@ -91,6 +91,15 @@
+ governor. If unsure have a look at the help section of the
+ driver. Fallback governor will be the performance governor.
+
++config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
++ bool "interactive"
++ select CPU_FREQ_GOV_INTERACTIVE
++ help
++ Use the CPUFreq governor 'interactive' as default. This allows
++ you to get a full dynamic cpu frequency capable system by simply
++ loading your cpufreq low-level hardware driver, using the
++ 'interactive' governor for latency-sensitive workloads.
++
+ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+ bool "conservative"
+ select CPU_FREQ_GOV_CONSERVATIVE
+@@ -157,6 +166,24 @@
+
+ For details, take a look at linux/Documentation/cpu-freq.
+
++ If in doubt, say N.
++
++config CPU_FREQ_GOV_INTERACTIVE
++ tristate "'interactive' cpufreq policy governor"
++ default n
++ help
++ 'interactive' - This driver adds a dynamic cpufreq policy governor
++ designed for latency-sensitive workloads.
++
++ This governor attempts to reduce the latency of clock
++ increases so that the system is more responsive to
++ interactive workloads.
++
++ To compile this driver as a module, choose M here: the
++ module will be called cpufreq_interactive.
++
++ For details, take a look at linux/Documentation/cpu-freq.
++
+ If in doubt, say N.
+
+ config CPU_FREQ_GOV_CONSERVATIVE
+diff -Nur linux-3.14.40.orig/drivers/cpufreq/Kconfig.arm linux-3.14.40/drivers/cpufreq/Kconfig.arm
+--- linux-3.14.40.orig/drivers/cpufreq/Kconfig.arm 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/cpufreq/Kconfig.arm 2015-05-01 14:57:58.911427001 -0500
+@@ -4,7 +4,8 @@
+
+ config ARM_BIG_LITTLE_CPUFREQ
+ tristate "Generic ARM big LITTLE CPUfreq driver"
+- depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
++ depends on (BIG_LITTLE && ARM_CPU_TOPOLOGY) || (ARM64 && SMP)
++ depends on HAVE_CLK
+ select PM_OPP
+ help
+ This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
+@@ -95,7 +96,7 @@
+
+ If in doubt, say N.
+
+-config ARM_IMX6Q_CPUFREQ
++config ARM_IMX6_CPUFREQ
+ tristate "Freescale i.MX6 cpufreq support"
+ depends on ARCH_MXC
+ depends on REGULATOR_ANATOP
+diff -Nur linux-3.14.40.orig/drivers/cpufreq/Makefile linux-3.14.40/drivers/cpufreq/Makefile
+--- linux-3.14.40.orig/drivers/cpufreq/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/cpufreq/Makefile 2015-05-01 14:57:58.951427001 -0500
+@@ -8,6 +8,7 @@
+ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
+ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
+ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
++obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
+ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
+ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
+
+@@ -55,7 +56,7 @@
+ obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
+ obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
+ obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
+-obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
++obj-$(CONFIG_ARM_IMX6_CPUFREQ) += imx6-cpufreq.o
+ obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
+ obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
+ obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
+diff -Nur linux-3.14.40.orig/drivers/crypto/caam/secvio.c linux-3.14.40/drivers/crypto/caam/secvio.c
+--- linux-3.14.40.orig/drivers/crypto/caam/secvio.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/crypto/caam/secvio.c 2015-05-01 14:57:58.951427001 -0500
+@@ -0,0 +1,335 @@
++
++/*
++ * CAAM/SEC 4.x Security Violation Handler
++ * Copyright (C) 2013 Freescale Semiconductor, Inc., All Rights Reserved
++ */
++
++#include "compat.h"
++#include "intern.h"
++#include "secvio.h"
++#include "regs.h"
++
++/*
++ * These names are associated with each violation handler.
++ * The source names were taken from MX6, and are based on recommendations
++ * for most common SoCs.
++ */
++static const u8 *violation_src_name[] = {
++ "CAAM Security Violation",
++ "JTAG Alarm",
++ "Watchdog",
++ "(reserved)",
++ "External Boot",
++ "Tamper Detect",
++};
++
++/* Top-level security violation interrupt */
++static irqreturn_t caam_secvio_interrupt(int irq, void *snvsdev)
++{
++ struct device *dev = snvsdev;
++ struct caam_drv_private_secvio *svpriv = dev_get_drvdata(dev);
++ u32 irqstate;
++
++ /* Check the HP secvio status register */
++ irqstate = rd_reg32(&svpriv->svregs->hp.secvio_status) |
++ HP_SECVIOST_SECVIOMASK;
++
++ if (!irqstate)
++ return IRQ_NONE;
++
++ /* Mask out one or more causes for deferred service */
++ clrbits32(&svpriv->svregs->hp.secvio_int_ctl, irqstate);
++
++ /* Now ACK causes */
++ setbits32(&svpriv->svregs->hp.secvio_status, irqstate);
++
++ /* And run deferred service */
++ preempt_disable();
++ tasklet_schedule(&svpriv->irqtask[smp_processor_id()]);
++ preempt_enable();
++
++ return IRQ_HANDLED;
++}
++
++/* Deferred service handler. Tasklet arg is simply the SNVS dev */
++static void caam_secvio_dispatch(unsigned long indev)
++{
++ struct device *dev = (struct device *)indev;
++ struct caam_drv_private_secvio *svpriv = dev_get_drvdata(dev);
++ unsigned long flags, cause;
++ int i;
++
++
++ /*
++ * Capture the interrupt cause, using masked interrupts as
++ * identification. This only works if all are enabled; if
++ * this changes in the future, a "cause queue" will have to
++ * be built
++ */
++ cause = rd_reg32(&svpriv->svregs->hp.secvio_int_ctl) &
++ (HP_SECVIO_INTEN_SRC5 | HP_SECVIO_INTEN_SRC4 |
++ HP_SECVIO_INTEN_SRC3 | HP_SECVIO_INTEN_SRC2 |
++ HP_SECVIO_INTEN_SRC1 | HP_SECVIO_INTEN_SRC0);
++
++ /* Look through causes, call each handler if exists */
++ for (i = 0; i < MAX_SECVIO_SOURCES; i++)
++ if (cause & (1 << i)) {
++ spin_lock_irqsave(&svpriv->svlock, flags);
++ svpriv->intsrc[i].handler(dev, i,
++ svpriv->intsrc[i].ext);
++ spin_unlock_irqrestore(&svpriv->svlock, flags);
++ };
++
++ /* Re-enable now-serviced interrupts */
++ setbits32(&svpriv->svregs->hp.secvio_int_ctl, cause);
++}
++
++/*
++ * Default cause handler, used in lieu of an application-defined handler.
++ * All it does at this time is print a console message. It could force a halt.
++ */
++static void caam_secvio_default(struct device *dev, u32 cause, void *ext)
++{
++ struct caam_drv_private_secvio *svpriv = dev_get_drvdata(dev);
++
++ dev_err(dev, "Unhandled Security Violation Interrupt %d = %s\n",
++ cause, svpriv->intsrc[cause].intname);
++}
++
++/*
++ * Install an application-defined handler for a specified cause
++ * Arguments:
++ * - dev points to SNVS-owning device
++ * - cause interrupt source cause
++ * - handler application-defined handler, gets called with dev
++ * source cause, and locally-defined handler argument
++ * - cause_description points to a string to override the default cause
++ * name, this can be used as an alternate for error
++ * messages and such. If left NULL, the default
++ * description string is used.
++ * - ext pointer to any extra data needed by the handler.
++ */
++int caam_secvio_install_handler(struct device *dev, enum secvio_cause cause,
++ void (*handler)(struct device *dev, u32 cause,
++ void *ext),
++ u8 *cause_description, void *ext)
++{
++ unsigned long flags;
++ struct caam_drv_private_secvio *svpriv;
++
++ svpriv = dev_get_drvdata(dev);
++
++ if ((handler == NULL) || (cause > SECVIO_CAUSE_SOURCE_5))
++ return -EINVAL;
++
++ spin_lock_irqsave(&svpriv->svlock, flags);
++ svpriv->intsrc[cause].handler = handler;
++ if (cause_description != NULL)
++ svpriv->intsrc[cause].intname = cause_description;
++ if (ext != NULL)
++ svpriv->intsrc[cause].ext = ext;
++ spin_unlock_irqrestore(&svpriv->svlock, flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(caam_secvio_install_handler);
++
++/*
++ * Remove an application-defined handler for a specified cause (and, by
++ * implication, restore the "default".
++ * Arguments:
++ * - dev points to SNVS-owning device
++ * - cause interrupt source cause
++ */
++int caam_secvio_remove_handler(struct device *dev, enum secvio_cause cause)
++{
++ unsigned long flags;
++ struct caam_drv_private_secvio *svpriv;
++
++ svpriv = dev_get_drvdata(dev);
++
++ if (cause > SECVIO_CAUSE_SOURCE_5)
++ return -EINVAL;
++
++ spin_lock_irqsave(&svpriv->svlock, flags);
++ svpriv->intsrc[cause].intname = violation_src_name[cause];
++ svpriv->intsrc[cause].handler = caam_secvio_default;
++ svpriv->intsrc[cause].ext = NULL;
++ spin_unlock_irqrestore(&svpriv->svlock, flags);
++ return 0;
++}
++EXPORT_SYMBOL(caam_secvio_remove_handler);
++
++int caam_secvio_startup(struct platform_device *pdev)
++{
++ struct device *ctrldev, *svdev;
++ struct caam_drv_private *ctrlpriv;
++ struct caam_drv_private_secvio *svpriv;
++ struct platform_device *svpdev;
++ struct device_node *np;
++ const void *prop;
++ int i, error, secvio_inten_src;
++
++ ctrldev = &pdev->dev;
++ ctrlpriv = dev_get_drvdata(ctrldev);
++ /*
++ * Set up the private block for secure memory
++ * Only one instance is possible
++ */
++ svpriv = kzalloc(sizeof(struct caam_drv_private_secvio), GFP_KERNEL);
++ if (svpriv == NULL) {
++ dev_err(ctrldev, "can't alloc private mem for secvio\n");
++ return -ENOMEM;
++ }
++ svpriv->parentdev = ctrldev;
++
++ /* Create the security violation dev */
++#ifdef CONFIG_OF
++
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-secvio");
++ if (!np)
++ return -ENODEV;
++
++ ctrlpriv->secvio_irq = of_irq_to_resource(np, 0, NULL);
++
++ prop = of_get_property(np, "secvio_src", NULL);
++ if (prop)
++ secvio_inten_src = of_read_ulong(prop, 1);
++ else
++ secvio_inten_src = HP_SECVIO_INTEN_ALL;
++
++ printk(KERN_ERR "secvio_inten_src = %x\n", secvio_inten_src);
++
++ svpdev = of_platform_device_create(np, NULL, ctrldev);
++ if (!svpdev)
++ return -ENODEV;
++
++#else
++ svpdev = platform_device_register_data(ctrldev, "caam_secvio", 0,
++ svpriv,
++ sizeof(struct caam_drv_private_secvio));
++
++ secvio_inten_src = HP_SECVIO_INTEN_ALL;
++#endif
++ if (svpdev == NULL) {
++ kfree(svpriv);
++ return -EINVAL;
++ }
++ svdev = &svpdev->dev;
++ dev_set_drvdata(svdev, svpriv);
++ ctrlpriv->secviodev = svdev;
++ svpriv->svregs = ctrlpriv->snvs;
++
++ /*
++ * Now we have all the dev data set up. Init interrupt
++ * source descriptions
++ */
++ for (i = 0; i < MAX_SECVIO_SOURCES; i++) {
++ svpriv->intsrc[i].intname = violation_src_name[i];
++ svpriv->intsrc[i].handler = caam_secvio_default;
++ }
++
++ /* Connect main handler */
++ for_each_possible_cpu(i)
++ tasklet_init(&svpriv->irqtask[i], caam_secvio_dispatch,
++ (unsigned long)svdev);
++
++ error = request_irq(ctrlpriv->secvio_irq, caam_secvio_interrupt,
++ IRQF_SHARED, "caam_secvio", svdev);
++ if (error) {
++ dev_err(svdev, "can't connect secvio interrupt\n");
++ irq_dispose_mapping(ctrlpriv->secvio_irq);
++ ctrlpriv->secvio_irq = 0;
++ return -EINVAL;
++ }
++
++ /* Enable all sources */
++ wr_reg32(&svpriv->svregs->hp.secvio_int_ctl, secvio_inten_src);
++
++ dev_info(svdev, "security violation service handlers armed\n");
++
++ return 0;
++}
++
++void caam_secvio_shutdown(struct platform_device *pdev)
++{
++ struct device *ctrldev, *svdev;
++ struct caam_drv_private *priv;
++ struct caam_drv_private_secvio *svpriv;
++ int i;
++
++ ctrldev = &pdev->dev;
++ priv = dev_get_drvdata(ctrldev);
++ svdev = priv->secviodev;
++ svpriv = dev_get_drvdata(svdev);
++
++ /* Shut off all sources */
++
++ wr_reg32(&svpriv->svregs->hp.secvio_int_ctl, 0);
++
++ /* Remove tasklets and release interrupt */
++ for_each_possible_cpu(i)
++ tasklet_kill(&svpriv->irqtask[i]);
++
++ free_irq(priv->secvio_irq, svdev);
++
++ kfree(svpriv);
++}
++
++
++#ifdef CONFIG_OF
++static void __exit caam_secvio_exit(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return;
++
++ of_node_get(dev_node);
++
++ caam_secvio_shutdown(pdev);
++
++}
++
++static int __init caam_secvio_init(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL,
++ "arm,imx6-caam-secvio");
++ if (!dev_node)
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return -ENODEV;
++
++ of_node_put(dev_node);
++
++ return caam_secvio_startup(pdev);
++}
++
++module_init(caam_secvio_init);
++module_exit(caam_secvio_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM/SNVS Security Violation Handler");
++MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
++#endif
+diff -Nur linux-3.14.40.orig/drivers/crypto/caam/secvio.h linux-3.14.40/drivers/crypto/caam/secvio.h
+--- linux-3.14.40.orig/drivers/crypto/caam/secvio.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/crypto/caam/secvio.h 2015-05-01 14:57:58.951427001 -0500
+@@ -0,0 +1,64 @@
++
++/*
++ * CAAM Security Violation Handler
++ * Copyright (C) 2013 Freescale Semiconductor, Inc., All Rights Reserved
++ */
++
++#ifndef SECVIO_H
++#define SECVIO_H
++
++#include "snvsregs.h"
++
++
++/*
++ * Defines the published interfaces to install/remove application-specified
++ * handlers for catching violations
++ */
++
++#define MAX_SECVIO_SOURCES 6
++
++/* these are the untranslated causes */
++enum secvio_cause {
++ SECVIO_CAUSE_SOURCE_0,
++ SECVIO_CAUSE_SOURCE_1,
++ SECVIO_CAUSE_SOURCE_2,
++ SECVIO_CAUSE_SOURCE_3,
++ SECVIO_CAUSE_SOURCE_4,
++ SECVIO_CAUSE_SOURCE_5
++};
++
++/* These are common "recommended" cause definitions for most devices */
++#define SECVIO_CAUSE_CAAM_VIOLATION SECVIO_CAUSE_SOURCE_0
++#define SECVIO_CAUSE JTAG_ALARM SECVIO_CAUSE_SOURCE_1
++#define SECVIO_CAUSE_WATCHDOG SECVIO_CAUSE_SOURCE_2
++#define SECVIO_CAUSE_EXTERNAL_BOOT SECVIO_CAUSE_SOURCE_4
++#define SECVIO_CAUSE_TAMPER_DETECT SECVIO_CAUSE_SOURCE_5
++
++int caam_secvio_install_handler(struct device *dev, enum secvio_cause cause,
++ void (*handler)(struct device *dev, u32 cause,
++ void *ext),
++ u8 *cause_description, void *ext);
++int caam_secvio_remove_handler(struct device *dev, enum secvio_cause cause);
++
++/*
++ * Private data definitions for the secvio "driver"
++ */
++
++struct secvio_int_src {
++ const u8 *intname; /* Points to a descriptive name for source */
++ void *ext; /* Extended data to pass to the handler */
++ void (*handler)(struct device *dev, u32 cause, void *ext);
++};
++
++struct caam_drv_private_secvio {
++ struct device *parentdev; /* points back to the controller */
++ spinlock_t svlock ____cacheline_aligned;
++ struct tasklet_struct irqtask[NR_CPUS];
++ struct snvs_full __iomem *svregs; /* both HP and LP domains */
++
++ /* Registered handlers for each violation */
++ struct secvio_int_src intsrc[MAX_SECVIO_SOURCES];
++
++};
++
++#endif /* SECVIO_H */
+diff -Nur linux-3.14.40.orig/drivers/crypto/caam/sm.h linux-3.14.40/drivers/crypto/caam/sm.h
+--- linux-3.14.40.orig/drivers/crypto/caam/sm.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/crypto/caam/sm.h 2015-05-01 14:57:58.951427001 -0500
+@@ -0,0 +1,88 @@
++
++/*
++ * CAAM Secure Memory/Keywrap API Definitions
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
++ */
++
++#ifndef SM_H
++#define SM_H
++
++
++/* Storage access permissions */
++#define SM_PERM_READ 0x01
++#define SM_PERM_WRITE 0x02
++#define SM_PERM_BLOB 0x03
++
++
++/* Keystore maintenance functions */
++void sm_init_keystore(struct device *dev);
++u32 sm_detect_keystore_units(struct device *dev);
++int sm_establish_keystore(struct device *dev, u32 unit);
++void sm_release_keystore(struct device *dev, u32 unit);
++void caam_sm_shutdown(struct platform_device *pdev);
++int caam_sm_example_init(struct platform_device *pdev);
++
++/* Keystore accessor functions */
++extern int sm_keystore_slot_alloc(struct device *dev, u32 unit, u32 size,
++ u32 *slot);
++extern int sm_keystore_slot_dealloc(struct device *dev, u32 unit, u32 slot);
++extern int sm_keystore_slot_load(struct device *dev, u32 unit, u32 slot,
++ const u8 *key_data, u32 key_length);
++extern int sm_keystore_slot_read(struct device *dev, u32 unit, u32 slot,
++ u32 key_length, u8 *key_data);
++extern int sm_keystore_slot_encapsulate(struct device *dev, u32 unit,
++ u32 inslot, u32 outslot, u16 secretlen,
++ u8 *keymod, u16 keymodlen);
++extern int sm_keystore_slot_decapsulate(struct device *dev, u32 unit,
++ u32 inslot, u32 outslot, u16 secretlen,
++ u8 *keymod, u16 keymodlen);
++
++/* Data structure to hold per-slot information */
++struct keystore_data_slot_info {
++ u8 allocated; /* Track slot assignments */
++ u32 key_length; /* Size of the key */
++};
++
++/* Data structure to hold keystore information */
++struct keystore_data {
++ void *base_address; /* Base of the Secure Partition */
++ u32 slot_count; /* Number of slots in the keystore */
++ struct keystore_data_slot_info *slot; /* Per-slot information */
++};
++
++/* store the detected attributes of a secure memory page */
++struct sm_page_descriptor {
++ u16 phys_pagenum; /* may be discontiguous */
++ u16 own_part; /* Owning partition */
++ void *pg_base; /* Calculated virtual address */
++ struct keystore_data *ksdata;
++};
++
++struct caam_drv_private_sm {
++ struct device *parentdev; /* this ends up as the controller */
++ struct device *smringdev; /* ring that owns this instance */
++ spinlock_t kslock ____cacheline_aligned;
++
++ /* Default parameters for geometry */
++ u32 max_pages; /* maximum pages this instance can support */
++ u32 top_partition; /* highest partition number in this instance */
++ u32 top_page; /* highest page number in this instance */
++ u32 page_size; /* page size */
++ u32 slot_size; /* selected size of each storage block */
++
++ /* Partition/Page Allocation Map */
++ u32 localpages; /* Number of pages we can access */
++ struct sm_page_descriptor *pagedesc; /* Allocated per-page */
++
++ /* Installed handlers for keystore access */
++ int (*data_init)(struct device *dev, u32 unit);
++ void (*data_cleanup)(struct device *dev, u32 unit);
++ int (*slot_alloc)(struct device *dev, u32 unit, u32 size, u32 *slot);
++ int (*slot_dealloc)(struct device *dev, u32 unit, u32 slot);
++ void *(*slot_get_address)(struct device *dev, u32 unit, u32 handle);
++ u32 (*slot_get_base)(struct device *dev, u32 unit, u32 handle);
++ u32 (*slot_get_offset)(struct device *dev, u32 unit, u32 handle);
++ u32 (*slot_get_slot_size)(struct device *dev, u32 unit, u32 handle);
++};
++
++#endif /* SM_H */
+diff -Nur linux-3.14.40.orig/drivers/crypto/caam/sm_store.c linux-3.14.40/drivers/crypto/caam/sm_store.c
+--- linux-3.14.40.orig/drivers/crypto/caam/sm_store.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/crypto/caam/sm_store.c 2015-05-01 14:57:58.951427001 -0500
+@@ -0,0 +1,896 @@
++
++/*
++ * CAAM Secure Memory Storage Interface
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
++ *
++ * Loosely based on the SHW Keystore API for SCC/SCC2
++ * Experimental implementation and NOT intended for upstream use. Expect
++ * this interface to be amended significantly in the future once it becomes
++ * integrated into live applications.
++ *
++ * Known issues:
++ *
++ * - Executes one instance of an secure memory "driver". This is tied to the
++ * fact that job rings can't run as standalone instances in the present
++ * configuration.
++ *
++ * - It does not expose a userspace interface. The value of a userspace
++ * interface for access to secrets is a point for further architectural
++ * discussion.
++ *
++ * - Partition/permission management is not part of this interface. It
++ * depends on some level of "knowledge" agreed upon between bootloader,
++ * provisioning applications, and OS-hosted software (which uses this
++ * driver).
++ *
++ * - No means of identifying the location or purpose of secrets managed by
++ * this interface exists; "slot location" and format of a given secret
++ * needs to be agreed upon between bootloader, provisioner, and OS-hosted
++ * application.
++ */
++
++#include "compat.h"
++#include "regs.h"
++#include "jr.h"
++#include "desc.h"
++#include "intern.h"
++#include "error.h"
++#include "sm.h"
++
++#ifdef SM_DEBUG_CONT
++void sm_show_page(struct device *dev, struct sm_page_descriptor *pgdesc)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ u32 i, *smdata;
++
++ dev_info(dev, "physical page %d content at 0x%08x\n",
++ pgdesc->phys_pagenum, pgdesc->pg_base);
++ smdata = pgdesc->pg_base;
++ for (i = 0; i < (smpriv->page_size / sizeof(u32)); i += 4)
++ dev_info(dev, "[0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
++ (u32)&smdata[i], smdata[i], smdata[i+1], smdata[i+2],
++ smdata[i+3]);
++}
++#endif
++
++/*
++ * Construct a secure memory blob encapsulation job descriptor
++ *
++ * - desc pointer to hold new (to be allocated) pointer to the generated
++ * descriptor for later use. Calling thread can kfree the
++ * descriptor after execution.
++ * - keymod Physical pointer to key modifier (contiguous piece).
++ * - keymodsz Size of key modifier in bytes (should normally be 8).
++ * - secretbuf Physical pointer (within an accessible secure memory page)
++ * of the secret to be encapsulated.
++ * - outbuf Physical pointer (within an accessible secure memory page)
++ * of the encapsulated output. This will be larger than the
++ * input secret because of the added encapsulation data.
++ * - secretsz Size of input secret, in bytes.
++ * - auth If nonzero, use AES-CCM for encapsulation, else use ECB
++ *
++ * Note: this uses 32-bit pointers at present
++ */
++#define INITIAL_DESCSZ 16 /* size of tmp buffer for descriptor const. */
++static int blob_encap_desc(u32 **desc, dma_addr_t keymod, u16 keymodsz,
++ dma_addr_t secretbuf, dma_addr_t outbuf,
++ u16 secretsz, bool auth)
++{
++ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
++ u16 dsize, idx;
++
++ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
++ idx = 1;
++
++ /* Load key modifier */
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY |
++ ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK) |
++ (keymodsz & LDST_LEN_MASK);
++
++ tmpdesc[idx++] = (u32)keymod;
++
++ /* Encapsulate to secure memory */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | secretsz;
++ tmpdesc[idx++] = (u32)secretbuf;
++
++ /* Add space for BKEK and MAC tag */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | (secretsz + (32 + 16));
++
++ tmpdesc[idx++] = (u32)outbuf;
++ tmpdesc[idx] = CMD_OPERATION | OP_TYPE_ENCAP_PROTOCOL | OP_PCLID_BLOB |
++ OP_PCL_BLOB_PTXT_SECMEM;
++ if (auth)
++ tmpdesc[idx] |= OP_PCL_BLOB_EKT;
++
++ idx++;
++ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
++ dsize = idx * sizeof(u32);
++
++ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
++ if (tdesc == NULL)
++ return 0;
++
++ memcpy(tdesc, tmpdesc, dsize);
++ *desc = tdesc;
++ return dsize;
++}
++
++/*
++ * Construct a secure memory blob decapsulation job descriptor
++ *
++ * - desc pointer to hold new (to be allocated) pointer to the generated
++ * descriptor for later use. Calling thread can kfree the
++ * descriptor after execution.
++ * - keymod Physical pointer to key modifier (contiguous piece).
++ * - keymodsz Size of key modifier in bytes (should normally be 16).
++ * - blobbuf Physical pointer (within an accessible secure memory page)
++ * of the blob to be decapsulated.
++ * - outbuf Physical pointer (within an accessible secure memory page)
++ * of the decapsulated output.
++ * - secretsz Size of input blob, in bytes.
++ * - auth If nonzero, assume AES-CCM for decapsulation, else use ECB
++ *
++ * Note: this uses 32-bit pointers at present
++ */
++static int blob_decap_desc(u32 **desc, dma_addr_t keymod, u16 keymodsz,
++ dma_addr_t blobbuf, dma_addr_t outbuf,
++ u16 blobsz, bool auth)
++{
++ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
++ u16 dsize, idx;
++
++ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
++ idx = 1;
++
++ /* Load key modifier */
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY |
++ ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK) |
++ (keymodsz & LDST_LEN_MASK);
++
++ tmpdesc[idx++] = (u32)keymod;
++
++ /* Compensate BKEK + MAC tag */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | (blobsz + 32 + 16);
++
++ tmpdesc[idx++] = (u32)blobbuf;
++ tmpdesc[idx++] = CMD_SEQ_OUT_PTR | blobsz;
++ tmpdesc[idx++] = (u32)outbuf;
++
++ /* Decapsulate from secure memory partition to black blob */
++ tmpdesc[idx] = CMD_OPERATION | OP_TYPE_DECAP_PROTOCOL | OP_PCLID_BLOB |
++ OP_PCL_BLOB_PTXT_SECMEM | OP_PCL_BLOB_BLACK;
++ if (auth)
++ tmpdesc[idx] |= OP_PCL_BLOB_EKT;
++
++ idx++;
++ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
++ dsize = idx * sizeof(u32);
++
++ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
++ if (tdesc == NULL)
++ return 0;
++
++ memcpy(tdesc, tmpdesc, dsize);
++ *desc = tdesc;
++ return dsize;
++}
++
++/*
++ * Pseudo-synchronous ring access functions for carrying out key
++ * encapsulation and decapsulation
++ */
++
++struct sm_key_job_result {
++ int error;
++ struct completion completion;
++};
++
++void sm_key_job_done(struct device *dev, u32 *desc, u32 err, void *context)
++{
++ struct sm_key_job_result *res = context;
++
++ res->error = err; /* save off the error for postprocessing */
++ complete(&res->completion); /* mark us complete */
++}
++
++static int sm_key_job(struct device *ksdev, u32 *jobdesc)
++{
++ struct sm_key_job_result testres;
++ struct caam_drv_private_sm *kspriv;
++ int rtn = 0;
++
++ kspriv = dev_get_drvdata(ksdev);
++
++ init_completion(&testres.completion);
++
++ rtn = caam_jr_enqueue(kspriv->smringdev, jobdesc, sm_key_job_done,
++ &testres);
++ if (!rtn) {
++ wait_for_completion_interruptible(&testres.completion);
++ rtn = testres.error;
++ }
++ return rtn;
++}
++
++/*
++ * Following section establishes the default methods for keystore access
++ * They are NOT intended for use external to this module
++ *
++ * In the present version, these are the only means for the higher-level
++ * interface to deal with the mechanics of accessing the phyiscal keystore
++ */
++
++
++int slot_alloc(struct device *dev, u32 unit, u32 size, u32 *slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++ u32 i;
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_alloc(): requesting slot for %d bytes\n", size);
++#endif
++
++ if (size > smpriv->slot_size)
++ return -EKEYREJECTED;
++
++ for (i = 0; i < ksdata->slot_count; i++) {
++ if (ksdata->slot[i].allocated == 0) {
++ ksdata->slot[i].allocated = 1;
++ (*slot) = i;
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_alloc(): new slot %d allocated\n",
++ *slot);
++#endif
++ return 0;
++ }
++ }
++
++ return -ENOSPC;
++}
++EXPORT_SYMBOL(slot_alloc);
++
++int slot_dealloc(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++ u8 __iomem *slotdata;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_dealloc(): releasing slot %d\n", slot);
++#endif
++ if (slot >= ksdata->slot_count)
++ return -EINVAL;
++ slotdata = ksdata->base_address + slot * smpriv->slot_size;
++
++ if (ksdata->slot[slot].allocated == 1) {
++ /* Forcibly overwrite the data from the keystore */
++ memset(ksdata->base_address + slot * smpriv->slot_size, 0,
++ smpriv->slot_size);
++
++ ksdata->slot[slot].allocated = 0;
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_dealloc(): slot %d released\n", slot);
++#endif
++ return 0;
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL(slot_dealloc);
++
++void *slot_get_address(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ if (slot >= ksdata->slot_count)
++ return NULL;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_address(): slot %d is 0x%08x\n", slot,
++ (u32)ksdata->base_address + slot * smpriv->slot_size);
++#endif
++
++ return ksdata->base_address + slot * smpriv->slot_size;
++}
++
++u32 slot_get_base(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ /*
++ * There could potentially be more than one secure partition object
++ * associated with this keystore. For now, there is just one.
++ */
++
++ (void)slot;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_base(): slot %d = 0x%08x\n",
++ slot, (u32)ksdata->base_address);
++#endif
++
++ return (u32)(ksdata->base_address);
++}
++
++u32 slot_get_offset(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ if (slot >= ksdata->slot_count)
++ return -EINVAL;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_offset(): slot %d = %d\n", slot,
++ slot * smpriv->slot_size);
++#endif
++
++ return slot * smpriv->slot_size;
++}
++
++u32 slot_get_slot_size(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_slot_size(): slot %d = %d\n", slot,
++ smpriv->slot_size);
++#endif
++ /* All slots are the same size in the default implementation */
++ return smpriv->slot_size;
++}
++
++
++
++int kso_init_data(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++ struct keystore_data *keystore_data = NULL;
++ u32 slot_count;
++ u32 keystore_data_size;
++
++ /*
++ * Calculate the required size of the keystore data structure, based
++ * on the number of keys that can fit in the partition.
++ */
++ slot_count = smpriv->page_size / smpriv->slot_size;
++#ifdef SM_DEBUG
++ dev_info(dev, "kso_init_data: %d slots initializing\n", slot_count);
++#endif
++
++ keystore_data_size = sizeof(struct keystore_data) +
++ slot_count *
++ sizeof(struct keystore_data_slot_info);
++
++ keystore_data = kzalloc(keystore_data_size, GFP_KERNEL);
++
++ if (keystore_data == NULL) {
++ retval = -ENOSPC;
++ goto out;
++ }
++
++#ifdef SM_DEBUG
++ dev_info(dev, "kso_init_data: keystore data size = %d\n",
++ keystore_data_size);
++#endif
++
++ /*
++ * Place the slot information structure directly after the keystore data
++ * structure.
++ */
++ keystore_data->slot = (struct keystore_data_slot_info *)
++ (keystore_data + 1);
++ keystore_data->slot_count = slot_count;
++
++ smpriv->pagedesc[unit].ksdata = keystore_data;
++ smpriv->pagedesc[unit].ksdata->base_address =
++ smpriv->pagedesc[unit].pg_base;
++
++ retval = 0;
++
++out:
++ if (retval != 0)
++ if (keystore_data != NULL)
++ kfree(keystore_data);
++
++
++ return retval;
++}
++
++void kso_cleanup_data(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *keystore_data = NULL;
++
++ if (smpriv->pagedesc[unit].ksdata != NULL)
++ keystore_data = smpriv->pagedesc[unit].ksdata;
++
++ /* Release the allocated keystore management data */
++ kfree(smpriv->pagedesc[unit].ksdata);
++
++ return;
++}
++
++
++
++/*
++ * Keystore management section
++ */
++
++void sm_init_keystore(struct device *dev)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++ smpriv->data_init = kso_init_data;
++ smpriv->data_cleanup = kso_cleanup_data;
++ smpriv->slot_alloc = slot_alloc;
++ smpriv->slot_dealloc = slot_dealloc;
++ smpriv->slot_get_address = slot_get_address;
++ smpriv->slot_get_base = slot_get_base;
++ smpriv->slot_get_offset = slot_get_offset;
++ smpriv->slot_get_slot_size = slot_get_slot_size;
++#ifdef SM_DEBUG
++ dev_info(dev, "sm_init_keystore(): handlers installed\n");
++#endif
++}
++EXPORT_SYMBOL(sm_init_keystore);
++
++/* Return available pages/units */
++u32 sm_detect_keystore_units(struct device *dev)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++ return smpriv->localpages;
++}
++EXPORT_SYMBOL(sm_detect_keystore_units);
++
++/*
++ * Do any keystore specific initializations
++ */
++int sm_establish_keystore(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++#ifdef SM_DEBUG
++ dev_info(dev, "sm_establish_keystore(): unit %d initializing\n", unit);
++#endif
++
++ if (smpriv->data_init == NULL)
++ return -EINVAL;
++
++ /* Call the data_init function for any user setup */
++ return smpriv->data_init(dev, unit);
++}
++EXPORT_SYMBOL(sm_establish_keystore);
++
++void sm_release_keystore(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++#ifdef SM_DEBUG
++ dev_info(dev, "sm_establish_keystore(): unit %d releasing\n", unit);
++#endif
++ if ((smpriv != NULL) && (smpriv->data_cleanup != NULL))
++ smpriv->data_cleanup(dev, unit);
++
++ return;
++}
++EXPORT_SYMBOL(sm_release_keystore);
++
++/*
++ * Subsequent interfacce (sm_keystore_*) forms the accessor interfacce to
++ * the keystore
++ */
++int sm_keystore_slot_alloc(struct device *dev, u32 unit, u32 size, u32 *slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++
++ spin_lock(&smpriv->kslock);
++
++ if ((smpriv->slot_alloc == NULL) ||
++ (smpriv->pagedesc[unit].ksdata == NULL))
++ goto out;
++
++ retval = smpriv->slot_alloc(dev, unit, size, slot);
++
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_alloc);
++
++int sm_keystore_slot_dealloc(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++
++ spin_lock(&smpriv->kslock);
++
++ if ((smpriv->slot_alloc == NULL) ||
++ (smpriv->pagedesc[unit].ksdata == NULL))
++ goto out;
++
++ retval = smpriv->slot_dealloc(dev, unit, slot);
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_dealloc);
++
++int sm_keystore_slot_load(struct device *dev, u32 unit, u32 slot,
++ const u8 *key_data, u32 key_length)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++ u32 slot_size;
++ u32 i;
++ u8 __iomem *slot_location;
++
++ spin_lock(&smpriv->kslock);
++
++ slot_size = smpriv->slot_get_slot_size(dev, unit, slot);
++
++ if (key_length > slot_size) {
++ retval = -EFBIG;
++ goto out;
++ }
++
++ slot_location = smpriv->slot_get_address(dev, unit, slot);
++
++ for (i = 0; i < key_length; i++)
++ slot_location[i] = key_data[i];
++
++ retval = 0;
++
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_load);
++
++int sm_keystore_slot_read(struct device *dev, u32 unit, u32 slot,
++ u32 key_length, u8 *key_data)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++ u8 __iomem *slot_addr;
++ u32 slot_size;
++
++ spin_lock(&smpriv->kslock);
++
++ slot_addr = smpriv->slot_get_address(dev, unit, slot);
++ slot_size = smpriv->slot_get_slot_size(dev, unit, slot);
++
++ if (key_length > slot_size) {
++ retval = -EKEYREJECTED;
++ goto out;
++ }
++
++ memcpy(key_data, slot_addr, key_length);
++ retval = 0;
++
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_read);
++
++int sm_keystore_slot_encapsulate(struct device *dev, u32 unit, u32 inslot,
++ u32 outslot, u16 secretlen, u8 *keymod,
++ u16 keymodlen)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = 0;
++ u32 slot_length, dsize, jstat;
++ u32 __iomem *encapdesc = NULL;
++ u8 __iomem *lkeymod, *inpslotaddr, *outslotaddr;
++ dma_addr_t keymod_dma;
++
++ /* Ensure that the full blob will fit in the key slot */
++ slot_length = smpriv->slot_get_slot_size(dev, unit, outslot);
++ if ((secretlen + 48) > slot_length)
++ goto out;
++
++ /* Get the base addresses of both keystore slots */
++ inpslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, inslot);
++ outslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, outslot);
++
++ /* Build the key modifier */
++ lkeymod = kmalloc(keymodlen, GFP_KERNEL | GFP_DMA);
++ memcpy(lkeymod, keymod, keymodlen);
++ keymod_dma = dma_map_single(dev, lkeymod, keymodlen, DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
++
++ /* Build the encapsulation job descriptor */
++ dsize = blob_encap_desc(&encapdesc, keymod_dma, keymodlen,
++ __pa(inpslotaddr), __pa(outslotaddr),
++ secretlen, 0);
++ if (!dsize) {
++ dev_err(dev, "can't alloc an encap descriptor\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ jstat = sm_key_job(dev, encapdesc);
++
++ dma_unmap_single(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
++ kfree(encapdesc);
++
++out:
++ return retval;
++
++}
++EXPORT_SYMBOL(sm_keystore_slot_encapsulate);
++
++int sm_keystore_slot_decapsulate(struct device *dev, u32 unit, u32 inslot,
++ u32 outslot, u16 secretlen, u8 *keymod,
++ u16 keymodlen)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = 0;
++ u32 slot_length, dsize, jstat;
++ u32 __iomem *decapdesc = NULL;
++ u8 __iomem *lkeymod, *inpslotaddr, *outslotaddr;
++ dma_addr_t keymod_dma;
++
++ /* Ensure that the decap data will fit in the key slot */
++ slot_length = smpriv->slot_get_slot_size(dev, unit, outslot);
++ if (secretlen > slot_length)
++ goto out;
++
++ /* Get the base addresses of both keystore slots */
++ inpslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, inslot);
++ outslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, outslot);
++
++ /* Build the key modifier */
++ lkeymod = kmalloc(keymodlen, GFP_KERNEL | GFP_DMA);
++ memcpy(lkeymod, keymod, keymodlen);
++ keymod_dma = dma_map_single(dev, lkeymod, keymodlen, DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
++
++ /* Build the decapsulation job descriptor */
++ dsize = blob_decap_desc(&decapdesc, keymod_dma, keymodlen,
++ __pa(inpslotaddr), __pa(outslotaddr),
++ secretlen, 0);
++ if (!dsize) {
++ dev_err(dev, "can't alloc a decap descriptor\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ jstat = sm_key_job(dev, decapdesc);
++
++ dma_unmap_single(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
++ kfree(decapdesc);
++
++out:
++ return retval;
++
++}
++EXPORT_SYMBOL(sm_keystore_slot_decapsulate);
++
++
++/*
++ * Initialization/shutdown subsystem
++ * Assumes statically-invoked startup/shutdown from the controller driver
++ * for the present time, to be reworked when a device tree becomes
++ * available. This code will not modularize in present form.
++ *
++ * Also, simply uses ring 0 for execution at the present
++ */
++
++int caam_sm_startup(struct platform_device *pdev)
++{
++ struct device *ctrldev, *smdev;
++ struct caam_drv_private *ctrlpriv;
++ struct caam_drv_private_sm *smpriv;
++ struct caam_drv_private_jr *jrpriv; /* need this for reg page */
++ struct platform_device *sm_pdev;
++ struct sm_page_descriptor *lpagedesc;
++ u32 page, pgstat, lpagect, detectedpage;
++
++ struct device_node *np;
++ ctrldev = &pdev->dev;
++ ctrlpriv = dev_get_drvdata(ctrldev);
++
++ /*
++ * Set up the private block for secure memory
++ * Only one instance is possible
++ */
++ smpriv = kzalloc(sizeof(struct caam_drv_private_sm), GFP_KERNEL);
++ if (smpriv == NULL) {
++ dev_err(ctrldev, "can't alloc private mem for secure memory\n");
++ return -ENOMEM;
++ }
++ smpriv->parentdev = ctrldev; /* copy of parent dev is handy */
++
++ /* Create the dev */
++#ifdef CONFIG_OF
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-sm");
++ sm_pdev = of_platform_device_create(np, "caam_sm", ctrldev);
++#else
++ sm_pdev = platform_device_register_data(ctrldev, "caam_sm", 0,
++ smpriv,
++ sizeof(struct caam_drv_private_sm));
++#endif
++ if (sm_pdev == NULL) {
++ kfree(smpriv);
++ return -EINVAL;
++ }
++ smdev = &sm_pdev->dev;
++ dev_set_drvdata(smdev, smpriv);
++ ctrlpriv->smdev = smdev;
++
++ /*
++ * Collect configuration limit data for reference
++ * This batch comes from the partition data/vid registers in perfmon
++ */
++ smpriv->max_pages = ((rd_reg32(&ctrlpriv->ctrl->perfmon.smpart)
++ & SMPART_MAX_NUMPG_MASK) >>
++ SMPART_MAX_NUMPG_SHIFT) + 1;
++ smpriv->top_partition = ((rd_reg32(&ctrlpriv->ctrl->perfmon.smpart)
++ & SMPART_MAX_PNUM_MASK) >>
++ SMPART_MAX_PNUM_SHIFT) + 1;
++ smpriv->top_page = ((rd_reg32(&ctrlpriv->ctrl->perfmon.smpart)
++ & SMPART_MAX_PG_MASK) >> SMPART_MAX_PG_SHIFT) + 1;
++ smpriv->page_size = 1024 << ((rd_reg32(&ctrlpriv->ctrl->perfmon.smvid)
++ & SMVID_PG_SIZE_MASK) >> SMVID_PG_SIZE_SHIFT);
++ smpriv->slot_size = 1 << CONFIG_CRYPTO_DEV_FSL_CAAM_SM_SLOTSIZE;
++
++#ifdef SM_DEBUG
++ dev_info(smdev, "max pages = %d, top partition = %d\n",
++ smpriv->max_pages, smpriv->top_partition);
++ dev_info(smdev, "top page = %d, page size = %d (total = %d)\n",
++ smpriv->top_page, smpriv->page_size,
++ smpriv->top_page * smpriv->page_size);
++ dev_info(smdev, "selected slot size = %d\n", smpriv->slot_size);
++#endif
++
++ /*
++ * Now probe for partitions/pages to which we have access. Note that
++ * these have likely been set up by a bootloader or platform
++ * provisioning application, so we have to assume that we "inherit"
++ * a configuration and work within the constraints of what it might be.
++ *
++ * Assume use of the zeroth ring in the present iteration (until
++ * we can divorce the controller and ring drivers, and then assign
++ * an SM instance to any ring instance).
++ */
++ smpriv->smringdev = ctrlpriv->jrdev[0];
++ jrpriv = dev_get_drvdata(smpriv->smringdev);
++ lpagect = 0;
++ lpagedesc = kzalloc(sizeof(struct sm_page_descriptor)
++ * smpriv->max_pages, GFP_KERNEL);
++ if (lpagedesc == NULL) {
++ kfree(smpriv);
++ return -ENOMEM;
++ }
++
++ for (page = 0; page < smpriv->max_pages; page++) {
++ wr_reg32(&jrpriv->rregs->sm_cmd,
++ ((page << SMC_PAGE_SHIFT) & SMC_PAGE_MASK) |
++ (SMC_CMD_PAGE_INQUIRY & SMC_CMD_MASK));
++ pgstat = rd_reg32(&jrpriv->rregs->sm_status);
++ if (((pgstat & SMCS_PGWON_MASK) >> SMCS_PGOWN_SHIFT)
++ == SMCS_PGOWN_OWNED) { /* our page? */
++ lpagedesc[page].phys_pagenum =
++ (pgstat & SMCS_PAGE_MASK) >> SMCS_PAGE_SHIFT;
++ lpagedesc[page].own_part =
++ (pgstat & SMCS_PART_SHIFT) >> SMCS_PART_MASK;
++ lpagedesc[page].pg_base = ctrlpriv->sm_base +
++ ((smpriv->page_size * page) / sizeof(u32));
++ lpagect++;
++#ifdef SM_DEBUG
++ dev_info(smdev,
++ "physical page %d, owning partition = %d\n",
++ lpagedesc[page].phys_pagenum,
++ lpagedesc[page].own_part);
++#endif
++ }
++ }
++
++ smpriv->pagedesc = kzalloc(sizeof(struct sm_page_descriptor) * lpagect,
++ GFP_KERNEL);
++ if (smpriv->pagedesc == NULL) {
++ kfree(lpagedesc);
++ kfree(smpriv);
++ return -ENOMEM;
++ }
++ smpriv->localpages = lpagect;
++
++ detectedpage = 0;
++ for (page = 0; page < smpriv->max_pages; page++) {
++ if (lpagedesc[page].pg_base != NULL) { /* e.g. live entry */
++ memcpy(&smpriv->pagedesc[detectedpage],
++ &lpagedesc[page],
++ sizeof(struct sm_page_descriptor));
++#ifdef SM_DEBUG_CONT
++ sm_show_page(smdev, &smpriv->pagedesc[detectedpage]);
++#endif
++ detectedpage++;
++ }
++ }
++
++ kfree(lpagedesc);
++
++ sm_init_keystore(smdev);
++
++ return 0;
++}
++
++void caam_sm_shutdown(struct platform_device *pdev)
++{
++ struct device *ctrldev, *smdev;
++ struct caam_drv_private *priv;
++ struct caam_drv_private_sm *smpriv;
++
++ ctrldev = &pdev->dev;
++ priv = dev_get_drvdata(ctrldev);
++ smdev = priv->smdev;
++ smpriv = dev_get_drvdata(smdev);
++
++ kfree(smpriv->pagedesc);
++ kfree(smpriv);
++}
++EXPORT_SYMBOL(caam_sm_shutdown);
++#ifdef CONFIG_OF
++static void __exit caam_sm_exit(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return;
++
++ of_node_put(dev_node);
++
++ caam_sm_shutdown(pdev);
++
++ return;
++}
++
++static int __init caam_sm_init(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return -ENODEV;
++
++ of_node_get(dev_node);
++
++ caam_sm_startup(pdev);
++
++ return 0;
++}
++
++module_init(caam_sm_init);
++module_exit(caam_sm_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM Secure Memory / Keystore");
++MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
++#endif
+diff -Nur linux-3.14.40.orig/drivers/crypto/caam/sm_test.c linux-3.14.40/drivers/crypto/caam/sm_test.c
+--- linux-3.14.40.orig/drivers/crypto/caam/sm_test.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/crypto/caam/sm_test.c 2015-05-01 14:57:58.955427001 -0500
+@@ -0,0 +1,844 @@
++/*
++ * Secure Memory / Keystore Exemplification Module
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved
++ *
++ * Serves as a functional example, and as a self-contained unit test for
++ * the functionality contained in sm_store.c.
++ *
++ * The example function, caam_sm_example_init(), runs a thread that:
++ *
++ * - initializes a set of fixed keys
++ * - stores one copy in clear buffers
++ * - stores them again in secure memory
++ * - extracts stored keys back out for use
++ * - intializes 3 data buffers for a test:
++ * (1) containing cleartext
++ * (2) to hold ciphertext encrypted with an extracted black key
++ * (3) to hold extracted cleartext decrypted with an equivalent clear key
++ *
++ * The function then builds simple job descriptors that reference the key
++ * material and buffers as initialized, and executes an encryption job
++ * with a black key, and a decryption job using a the same key held in the
++ * clear. The output of the decryption job is compared to the original
++ * cleartext; if they don't compare correctly, one can assume a key problem
++ * exists, where the function will exit with an error.
++ *
++ * This module can use a substantial amount of refactoring, which may occur
++ * after the API gets some mileage. Furthermore, expect this module to
++ * eventually disappear once the API is integrated into "real" software.
++ */
++
++#include "compat.h"
++#include "intern.h"
++#include "desc.h"
++#include "error.h"
++#include "jr.h"
++#include "sm.h"
++
++static u8 skeymod[] = {
++ 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
++ 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
++};
++static u8 symkey[] = {
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
++};
++
++static u8 symdata[] = {
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x0f, 0x06, 0x07,
++ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
++ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
++ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
++ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
++ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
++ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
++ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
++ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
++ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
++ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
++ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
++ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
++ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
++ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
++ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
++ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
++ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
++ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
++ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
++ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
++ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
++ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
++ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
++ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
++ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
++ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
++ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
++};
++
++static int mk_job_desc(u32 *desc, dma_addr_t key, u16 keysz, dma_addr_t indata,
++ dma_addr_t outdata, u16 sz, u32 cipherdir, u32 keymode)
++{
++ desc[1] = CMD_KEY | CLASS_1 | (keysz & KEY_LENGTH_MASK) | keymode;
++ desc[2] = (u32)key;
++ desc[3] = CMD_OPERATION | OP_TYPE_CLASS1_ALG | OP_ALG_AAI_ECB |
++ cipherdir;
++ desc[4] = CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1 | sz;
++ desc[5] = (u32)indata;
++ desc[6] = CMD_FIFO_STORE | FIFOST_TYPE_MESSAGE_DATA | sz;
++ desc[7] = (u32)outdata;
++
++ desc[0] = CMD_DESC_HDR | HDR_ONE | (8 & HDR_DESCLEN_MASK);
++ return 8 * sizeof(u32);
++}
++
++struct exec_test_result {
++ int error;
++ struct completion completion;
++};
++
++void exec_test_done(struct device *dev, u32 *desc, u32 err, void *context)
++{
++ struct exec_test_result *res = context;
++
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
++ dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
++
++ res->error = err;
++ complete(&res->completion);
++}
++
++static int exec_test_job(struct device *ksdev, u32 *jobdesc)
++{
++ struct exec_test_result testres;
++ struct caam_drv_private_sm *kspriv;
++ int rtn = 0;
++
++ kspriv = dev_get_drvdata(ksdev);
++
++ init_completion(&testres.completion);
++
++ rtn = caam_jr_enqueue(kspriv->smringdev, jobdesc, exec_test_done,
++ &testres);
++ if (!rtn) {
++ wait_for_completion_interruptible(&testres.completion);
++ rtn = testres.error;
++ }
++ return rtn;
++}
++
++
++int caam_sm_example_init(struct platform_device *pdev)
++{
++ struct device *ctrldev, *ksdev;
++ struct caam_drv_private *ctrlpriv;
++ struct caam_drv_private_sm *kspriv;
++ u32 unit, units, jdescsz;
++ int stat, jstat, rtnval = 0;
++ u8 __iomem *syminp, *symint, *symout = NULL;
++ dma_addr_t syminp_dma, symint_dma, symout_dma;
++ u8 __iomem *black_key_des, *black_key_aes128;
++ u8 __iomem *black_key_aes256;
++ dma_addr_t black_key_des_dma, black_key_aes128_dma;
++ dma_addr_t black_key_aes256_dma;
++ u8 __iomem *clear_key_des, *clear_key_aes128, *clear_key_aes256;
++ dma_addr_t clear_key_des_dma, clear_key_aes128_dma;
++ dma_addr_t clear_key_aes256_dma;
++ u32 __iomem *jdesc;
++ u32 keyslot_des, keyslot_aes128, keyslot_aes256 = 0;
++
++ jdesc = NULL;
++ black_key_des = black_key_aes128 = black_key_aes256 = NULL;
++ clear_key_des = clear_key_aes128 = clear_key_aes256 = NULL;
++
++ /* We can lose this cruft once we can get a pdev by name */
++ ctrldev = &pdev->dev;
++ ctrlpriv = dev_get_drvdata(ctrldev);
++ ksdev = ctrlpriv->smdev;
++ kspriv = dev_get_drvdata(ksdev);
++ if (kspriv == NULL)
++ return -ENODEV;
++
++ /* Now that we have the dev for the single SM instance, connect */
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test_init() running\n");
++#endif
++ /* Probe to see what keystores are available to us */
++ units = sm_detect_keystore_units(ksdev);
++ if (!units)
++ dev_err(ksdev, "caam_sm_test: no keystore units available\n");
++
++ /*
++ * MX6 bootloader stores some stuff in unit 0, so let's
++ * use 1 or above
++ */
++ if (units < 2) {
++ dev_err(ksdev, "caam_sm_test: insufficient keystore units\n");
++ return -ENODEV;
++ }
++ unit = 1;
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: %d keystore units available\n", units);
++#endif
++
++ /* Initialize/Establish Keystore */
++ sm_establish_keystore(ksdev, unit); /* Initalize store in #1 */
++
++ /*
++ * Top of main test thread
++ */
++
++ /* Allocate test data blocks (input, intermediate, output) */
++ syminp = kmalloc(256, GFP_KERNEL | GFP_DMA);
++ symint = kmalloc(256, GFP_KERNEL | GFP_DMA);
++ symout = kmalloc(256, GFP_KERNEL | GFP_DMA);
++ if ((syminp == NULL) || (symint == NULL) || (symout == NULL)) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "caam_sm_test: can't get test data buffers\n");
++ goto freemem;
++ }
++
++ /* Allocate storage for 3 black keys: encapsulated 8, 16, 32 */
++ black_key_des = kmalloc(16, GFP_KERNEL | GFP_DMA); /* padded to 16... */
++ black_key_aes128 = kmalloc(16, GFP_KERNEL | GFP_DMA);
++ black_key_aes256 = kmalloc(16, GFP_KERNEL | GFP_DMA);
++ if ((black_key_des == NULL) || (black_key_aes128 == NULL) ||
++ (black_key_aes256 == NULL)) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "caam_sm_test: can't black key buffers\n");
++ goto freemem;
++ }
++
++ clear_key_des = kmalloc(8, GFP_KERNEL | GFP_DMA);
++ clear_key_aes128 = kmalloc(16, GFP_KERNEL | GFP_DMA);
++ clear_key_aes256 = kmalloc(32, GFP_KERNEL | GFP_DMA);
++ if ((clear_key_des == NULL) || (clear_key_aes128 == NULL) ||
++ (clear_key_aes256 == NULL)) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "caam_sm_test: can't get clear key buffers\n");
++ goto freemem;
++ }
++
++ /* Allocate storage for job descriptor */
++ jdesc = kmalloc(8 * sizeof(u32), GFP_KERNEL | GFP_DMA);
++ if (jdesc == NULL) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "caam_sm_test: can't get descriptor buffers\n");
++ goto freemem;
++ }
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: all buffers allocated\n");
++#endif
++
++ /* Load up input data block, clear outputs */
++ memcpy(syminp, symdata, 256);
++ memset(symint, 0, 256);
++ memset(symout, 0, 256);
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[0], syminp[1], syminp[2], syminp[3],
++ syminp[4], syminp[5], syminp[6], syminp[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[0], symout[1], symout[2], symout[3],
++ symout[4], symout[5], symout[6], symout[7]);
++
++ dev_info(ksdev, "caam_sm_test: data buffers initialized\n");
++#endif
++
++ /* Load up clear keys */
++ memcpy(clear_key_des, symkey, 8);
++ memcpy(clear_key_aes128, symkey, 16);
++ memcpy(clear_key_aes256, symkey, 32);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: all clear keys loaded\n");
++#endif
++
++ /*
++ * Place clear keys in keystore.
++ * All the interesting stuff happens here.
++ */
++ /* 8 bit DES key */
++ stat = sm_keystore_slot_alloc(ksdev, unit, 8, &keyslot_des);
++ if (stat)
++ goto freemem;
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: 8 byte key slot in %d\n", keyslot_des);
++#endif
++ stat = sm_keystore_slot_load(ksdev, unit, keyslot_des, clear_key_des,
++ 8);
++ if (stat) {
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: can't load 8 byte key in %d\n",
++ keyslot_des);
++#endif
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++
++ /* 16 bit AES key */
++ stat = sm_keystore_slot_alloc(ksdev, unit, 16, &keyslot_aes128);
++ if (stat) {
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: 16 byte key slot in %d\n",
++ keyslot_aes128);
++#endif
++ stat = sm_keystore_slot_load(ksdev, unit, keyslot_aes128,
++ clear_key_aes128, 16);
++ if (stat) {
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: can't load 16 byte key in %d\n",
++ keyslot_aes128);
++#endif
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++
++ /* 32 bit AES key */
++ stat = sm_keystore_slot_alloc(ksdev, unit, 32, &keyslot_aes256);
++ if (stat) {
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: 32 byte key slot in %d\n",
++ keyslot_aes256);
++#endif
++ stat = sm_keystore_slot_load(ksdev, unit, keyslot_aes256,
++ clear_key_aes256, 32);
++ if (stat) {
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: can't load 32 byte key in %d\n",
++ keyslot_aes128);
++#endif
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes256);
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++
++ /* Encapsulate all keys as SM blobs */
++ stat = sm_keystore_slot_encapsulate(ksdev, unit, keyslot_des,
++ keyslot_des, 8, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't encapsulate DES key\n");
++ goto freekeys;
++ }
++
++ stat = sm_keystore_slot_encapsulate(ksdev, unit, keyslot_aes128,
++ keyslot_aes128, 16, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't encapsulate AES128 key\n");
++ goto freekeys;
++ }
++
++ stat = sm_keystore_slot_encapsulate(ksdev, unit, keyslot_aes256,
++ keyslot_aes256, 32, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't encapsulate AES256 key\n");
++ goto freekeys;
++ }
++
++ /* Now decapsulate as black key blobs */
++ stat = sm_keystore_slot_decapsulate(ksdev, unit, keyslot_des,
++ keyslot_des, 8, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't decapsulate DES key\n");
++ goto freekeys;
++ }
++
++ stat = sm_keystore_slot_decapsulate(ksdev, unit, keyslot_aes128,
++ keyslot_aes128, 16, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't decapsulate AES128 key\n");
++ goto freekeys;
++ }
++
++ stat = sm_keystore_slot_decapsulate(ksdev, unit, keyslot_aes256,
++ keyslot_aes256, 32, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't decapsulate AES128 key\n");
++ goto freekeys;
++ }
++
++ /* Extract 8/16/32 byte black keys */
++ sm_keystore_slot_read(ksdev, unit, keyslot_des, 8, black_key_des);
++ sm_keystore_slot_read(ksdev, unit, keyslot_aes128, 16,
++ black_key_aes128);
++ sm_keystore_slot_read(ksdev, unit, keyslot_aes256, 32,
++ black_key_aes256);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: all black keys extracted\n");
++#endif
++
++ /* DES encrypt using 8 byte black key */
++ black_key_des_dma = dma_map_single(ksdev, black_key_des, 8,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, black_key_des_dma, 8, DMA_TO_DEVICE);
++ syminp_dma = dma_map_single(ksdev, syminp, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, black_key_des_dma, 8, syminp_dma,
++ symint_dma, 256,
++ OP_ALG_ENCRYPT | OP_ALG_ALGSEL_DES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, black_key_des_dma, 8, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "input block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[0], syminp[1], syminp[2], syminp[3],
++ syminp[4], syminp[5], syminp[6], syminp[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[8], syminp[9], syminp[10], syminp[11],
++ syminp[12], syminp[13], syminp[14], syminp[15]);
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "caam_sm_test: encrypt cycle with 8 byte key\n");
++#endif
++
++ /* DES decrypt using 8 byte clear key */
++ clear_key_des_dma = dma_map_single(ksdev, clear_key_des, 8,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, clear_key_des_dma, 8, DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ symout_dma = dma_map_single(ksdev, symout, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, clear_key_des_dma, 8, symint_dma,
++ symout_dma, 256,
++ OP_ALG_DECRYPT | OP_ALG_ALGSEL_DES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, clear_key_des_dma, 8, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "decrypted block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[0], symout[1], symout[2], symout[3],
++ symout[4], symout[5], symout[6], symout[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[8], symout[9], symout[10], symout[11],
++ symout[12], symout[13], symout[14], symout[15]);
++ dev_info(ksdev, "caam_sm_test: decrypt cycle with 8 byte key\n");
++#endif
++
++ /* Check result */
++ if (memcmp(symout, syminp, 256)) {
++ dev_info(ksdev, "caam_sm_test: 8-byte key test mismatch\n");
++ rtnval = -1;
++ goto freekeys;
++ } else
++ dev_info(ksdev, "caam_sm_test: 8-byte key test match OK\n");
++
++ /* AES-128 encrypt using 16 byte black key */
++ black_key_aes128_dma = dma_map_single(ksdev, black_key_aes128, 16,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, black_key_aes128_dma, 16,
++ DMA_TO_DEVICE);
++ syminp_dma = dma_map_single(ksdev, syminp, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, black_key_aes128_dma, 16, syminp_dma,
++ symint_dma, 256,
++ OP_ALG_ENCRYPT | OP_ALG_ALGSEL_AES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, black_key_aes128_dma, 16, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "input block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[0], syminp[1], syminp[2], syminp[3],
++ syminp[4], syminp[5], syminp[6], syminp[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[8], syminp[9], syminp[10], syminp[11],
++ syminp[12], syminp[13], syminp[14], syminp[15]);
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "caam_sm_test: encrypt cycle with 16 byte key\n");
++#endif
++
++ /* AES-128 decrypt using 16 byte clear key */
++ clear_key_aes128_dma = dma_map_single(ksdev, clear_key_aes128, 16,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, clear_key_aes128_dma, 16,
++ DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ symout_dma = dma_map_single(ksdev, symout, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, clear_key_aes128_dma, 16, symint_dma,
++ symout_dma, 256,
++ OP_ALG_DECRYPT | OP_ALG_ALGSEL_AES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, clear_key_aes128_dma, 16, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "decrypted block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[0], symout[1], symout[2], symout[3],
++ symout[4], symout[5], symout[6], symout[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[8], symout[9], symout[10], symout[11],
++ symout[12], symout[13], symout[14], symout[15]);
++ dev_info(ksdev, "caam_sm_test: decrypt cycle with 16 byte key\n");
++#endif
++
++ /* Check result */
++ if (memcmp(symout, syminp, 256)) {
++ dev_info(ksdev, "caam_sm_test: 16-byte key test mismatch\n");
++ rtnval = -1;
++ goto freekeys;
++ } else
++ dev_info(ksdev, "caam_sm_test: 16-byte key test match OK\n");
++
++ /* AES-256 encrypt using 32 byte black key */
++ black_key_aes256_dma = dma_map_single(ksdev, black_key_aes256, 32,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, black_key_aes256_dma, 32,
++ DMA_TO_DEVICE);
++ syminp_dma = dma_map_single(ksdev, syminp, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, black_key_aes256_dma, 32, syminp_dma,
++ symint_dma, 256,
++ OP_ALG_ENCRYPT | OP_ALG_ALGSEL_AES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, black_key_aes256_dma, 32, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "input block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[0], syminp[1], syminp[2], syminp[3],
++ syminp[4], syminp[5], syminp[6], syminp[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[8], syminp[9], syminp[10], syminp[11],
++ syminp[12], syminp[13], syminp[14], syminp[15]);
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "caam_sm_test: encrypt cycle with 32 byte key\n");
++#endif
++
++ /* AES-256 decrypt using 32-byte black key */
++ clear_key_aes256_dma = dma_map_single(ksdev, clear_key_aes256, 32,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, clear_key_aes256_dma, 32,
++ DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ symout_dma = dma_map_single(ksdev, symout, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, clear_key_aes256_dma, 32, symint_dma,
++ symout_dma, 256,
++ OP_ALG_DECRYPT | OP_ALG_ALGSEL_AES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, clear_key_aes256_dma, 32, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "decrypted block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[0], symout[1], symout[2], symout[3],
++ symout[4], symout[5], symout[6], symout[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[8], symout[9], symout[10], symout[11],
++ symout[12], symout[13], symout[14], symout[15]);
++ dev_info(ksdev, "caam_sm_test: decrypt cycle with 32 byte key\n");
++#endif
++
++ /* Check result */
++ if (memcmp(symout, syminp, 256)) {
++ dev_info(ksdev, "caam_sm_test: 32-byte key test mismatch\n");
++ rtnval = -1;
++ goto freekeys;
++ } else
++ dev_info(ksdev, "caam_sm_test: 32-byte key test match OK\n");
++
++
++ /* Remove 8/16/32 byte keys from keystore */
++freekeys:
++ stat = sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ if (stat)
++ dev_info(ksdev, "caam_sm_test: can't release slot %d\n",
++ keyslot_des);
++
++ stat = sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
++ if (stat)
++ dev_info(ksdev, "caam_sm_test: can't release slot %d\n",
++ keyslot_aes128);
++
++ stat = sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes256);
++ if (stat)
++ dev_info(ksdev, "caam_sm_test: can't release slot %d\n",
++ keyslot_aes256);
++
++
++ /* Free resources */
++freemem:
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: cleaning up\n");
++#endif
++ kfree(syminp);
++ kfree(symint);
++ kfree(symout);
++ kfree(clear_key_des);
++ kfree(clear_key_aes128);
++ kfree(clear_key_aes256);
++ kfree(black_key_des);
++ kfree(black_key_aes128);
++ kfree(black_key_aes256);
++ kfree(jdesc);
++
++ /* Disconnect from keystore and leave */
++ sm_release_keystore(ksdev, unit);
++
++ return rtnval;
++}
++EXPORT_SYMBOL(caam_sm_example_init);
++
++void caam_sm_example_shutdown(void)
++{
++ /* unused in present version */
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return;
++
++ of_node_get(dev_node);
++
++}
++
++static int __init caam_sm_test_init(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return -ENODEV;
++
++ of_node_put(dev_node);
++
++ caam_sm_example_init(pdev);
++
++ return 0;
++}
++
++
++/* Module-based initialization needs to wait for dev tree */
++#ifdef CONFIG_OF
++module_init(caam_sm_test_init);
++module_exit(caam_sm_example_shutdown);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM Keystore Usage Example");
++MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
++#endif
+diff -Nur linux-3.14.40.orig/drivers/crypto/caam/snvsregs.h linux-3.14.40/drivers/crypto/caam/snvsregs.h
+--- linux-3.14.40.orig/drivers/crypto/caam/snvsregs.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/crypto/caam/snvsregs.h 2015-05-01 14:57:58.955427001 -0500
+@@ -0,0 +1,237 @@
++/*
++ * SNVS hardware register-level view
++ *
++ * Copyright (C) 2013 Freescale Semiconductor, Inc., All Rights Reserved
++ */
++
++#ifndef SNVSREGS_H
++#define SNVSREGS_H
++
++#include <linux/types.h>
++#include <linux/io.h>
++
++/*
++ * SNVS High Power Domain
++ * Includes security violations, HA counter, RTC, alarm
++ */
++struct snvs_hp {
++ u32 lock;
++ u32 cmd;
++ u32 ctl;
++ u32 secvio_int_en; /* Security Violation Interrupt Enable */
++ u32 secvio_int_ctl; /* Security Violation Interrupt Control */
++ u32 status;
++ u32 secvio_status; /* Security Violation Status */
++ u32 ha_counteriv; /* High Assurance Counter IV */
++ u32 ha_counter; /* High Assurance Counter */
++ u32 rtc_msb; /* Real Time Clock/Counter MSB */
++ u32 rtc_lsb; /* Real Time Counter LSB */
++ u32 time_alarm_msb; /* Time Alarm MSB */
++ u32 time_alarm_lsb; /* Time Alarm LSB */
++};
++
++#define HP_LOCK_HAC_LCK 0x00040000
++#define HP_LOCK_HPSICR_LCK 0x00020000
++#define HP_LOCK_HPSVCR_LCK 0x00010000
++#define HP_LOCK_MKEYSEL_LCK 0x00000200
++#define HP_LOCK_TAMPCFG_LCK 0x00000100
++#define HP_LOCK_TAMPFLT_LCK 0x00000080
++#define HP_LOCK_SECVIO_LCK 0x00000040
++#define HP_LOCK_GENP_LCK 0x00000020
++#define HP_LOCK_MONOCTR_LCK 0x00000010
++#define HP_LOCK_CALIB_LCK 0x00000008
++#define HP_LOCK_SRTC_LCK 0x00000004
++#define HP_LOCK_ZMK_RD_LCK 0x00000002
++#define HP_LOCK_ZMK_WT_LCK 0x00000001
++
++#define HP_CMD_NONPRIV_AXS 0x80000000
++#define HP_CMD_HAC_STOP 0x00080000
++#define HP_CMD_HAC_CLEAR 0x00040000
++#define HP_CMD_HAC_LOAD 0x00020000
++#define HP_CMD_HAC_CFG_EN 0x00010000
++#define HP_CMD_SNVS_MSTR_KEY 0x00002000
++#define HP_CMD_PROG_ZMK 0x00001000
++#define HP_CMD_SW_LPSV 0x00000400
++#define HP_CMD_SW_FSV 0x00000200
++#define HP_CMD_SW_SV 0x00000100
++#define HP_CMD_LP_SWR_DIS 0x00000020
++#define HP_CMD_LP_SWR 0x00000010
++#define HP_CMD_SSM_SFNS_DIS 0x00000004
++#define HP_CMD_SSM_ST_DIS 0x00000002
++#define HP_CMD_SMM_ST 0x00000001
++
++#define HP_CTL_TIME_SYNC 0x00010000
++#define HP_CTL_CAL_VAL_SHIFT 10
++#define HP_CTL_CAL_VAL_MASK (0x1f << HP_CTL_CALIB_SHIFT)
++#define HP_CTL_CALIB_EN 0x00000100
++#define HP_CTL_PI_FREQ_SHIFT 4
++#define HP_CTL_PI_FREQ_MASK (0xf << HP_CTL_PI_FREQ_SHIFT)
++#define HP_CTL_PI_EN 0x00000008
++#define HP_CTL_TIMEALARM_EN 0x00000002
++#define HP_CTL_RTC_EN 0x00000001
++
++#define HP_SECVIO_INTEN_EN 0x10000000
++#define HP_SECVIO_INTEN_SRC5 0x00000020
++#define HP_SECVIO_INTEN_SRC4 0x00000010
++#define HP_SECVIO_INTEN_SRC3 0x00000008
++#define HP_SECVIO_INTEN_SRC2 0x00000004
++#define HP_SECVIO_INTEN_SRC1 0x00000002
++#define HP_SECVIO_INTEN_SRC0 0x00000001
++#define HP_SECVIO_INTEN_ALL 0x8000003f
++
++#define HP_SECVIO_ICTL_CFG_SHIFT 30
++#define HP_SECVIO_ICTL_CFG_MASK (0x3 << HP_SECVIO_ICTL_CFG_SHIFT)
++#define HP_SECVIO_ICTL_CFG5_SHIFT 5
++#define HP_SECVIO_ICTL_CFG5_MASK (0x3 << HP_SECVIO_ICTL_CFG5_SHIFT)
++#define HP_SECVIO_ICTL_CFG_DISABLE 0
++#define HP_SECVIO_ICTL_CFG_NONFATAL 1
++#define HP_SECVIO_ICTL_CFG_FATAL 2
++#define HP_SECVIO_ICTL_CFG4_FATAL 0x00000010
++#define HP_SECVIO_ICTL_CFG3_FATAL 0x00000008
++#define HP_SECVIO_ICTL_CFG2_FATAL 0x00000004
++#define HP_SECVIO_ICTL_CFG1_FATAL 0x00000002
++#define HP_SECVIO_ICTL_CFG0_FATAL 0x00000001
++
++#define HP_STATUS_ZMK_ZERO 0x80000000
++#define HP_STATUS_OTPMK_ZERO 0x08000000
++#define HP_STATUS_OTPMK_SYN_SHIFT 16
++#define HP_STATUS_OTPMK_SYN_MASK (0x1ff << HP_STATUS_OTPMK_SYN_SHIFT)
++#define HP_STATUS_SSM_ST_SHIFT 8
++#define HP_STATUS_SSM_ST_MASK (0xf << HP_STATUS_SSM_ST_SHIFT)
++#define HP_STATUS_SSM_ST_INIT 0
++#define HP_STATUS_SSM_ST_HARDFAIL 1
++#define HP_STATUS_SSM_ST_SOFTFAIL 3
++#define HP_STATUS_SSM_ST_INITINT 8
++#define HP_STATUS_SSM_ST_CHECK 9
++#define HP_STATUS_SSM_ST_NONSECURE 11
++#define HP_STATUS_SSM_ST_TRUSTED 13
++#define HP_STATUS_SSM_ST_SECURE 15
++
++#define HP_SECVIOST_ZMK_ECC_FAIL 0x08000000 /* write to clear */
++#define HP_SECVIOST_ZMK_SYN_SHIFT 16
++#define HP_SECVIOST_ZMK_SYN_MASK (0x1ff << HP_SECVIOST_ZMK_SYN_SHIFT)
++#define HP_SECVIOST_SECVIO5 0x00000020
++#define HP_SECVIOST_SECVIO4 0x00000010
++#define HP_SECVIOST_SECVIO3 0x00000008
++#define HP_SECVIOST_SECVIO2 0x00000004
++#define HP_SECVIOST_SECVIO1 0x00000002
++#define HP_SECVIOST_SECVIO0 0x00000001
++#define HP_SECVIOST_SECVIOMASK 0x0000003f
++
++/*
++ * SNVS Low Power Domain
++ * Includes glitch detector, SRTC, alarm, monotonic counter, ZMK
++ */
++struct snvs_lp {
++ u32 lock;
++ u32 ctl;
++ u32 mstr_key_ctl; /* Master Key Control */
++ u32 secvio_ctl; /* Security Violation Control */
++ u32 tamper_filt_cfg; /* Tamper Glitch Filters Configuration */
++ u32 tamper_det_cfg; /* Tamper Detectors Configuration */
++ u32 status;
++ u32 srtc_msb; /* Secure Real Time Clock/Counter MSB */
++ u32 srtc_lsb; /* Secure Real Time Clock/Counter LSB */
++ u32 time_alarm; /* Time Alarm */
++ u32 smc_msb; /* Secure Monotonic Counter MSB */
++ u32 smc_lsb; /* Secure Monotonic Counter LSB */
++ u32 pwr_glitch_det; /* Power Glitch Detector */
++ u32 gen_purpose;
++ u32 zmk[8]; /* Zeroizable Master Key */
++};
++
++#define LP_LOCK_MKEYSEL_LCK 0x00000200
++#define LP_LOCK_TAMPDET_LCK 0x00000100
++#define LP_LOCK_TAMPFLT_LCK 0x00000080
++#define LP_LOCK_SECVIO_LCK 0x00000040
++#define LP_LOCK_GENP_LCK 0x00000020
++#define LP_LOCK_MONOCTR_LCK 0x00000010
++#define LP_LOCK_CALIB_LCK 0x00000008
++#define LP_LOCK_SRTC_LCK 0x00000004
++#define LP_LOCK_ZMK_RD_LCK 0x00000002
++#define LP_LOCK_ZMK_WT_LCK 0x00000001
++
++#define LP_CTL_CAL_VAL_SHIFT 10
++#define LP_CTL_CAL_VAL_MASK (0x1f << LP_CTL_CAL_VAL_SHIFT)
++#define LP_CTL_CALIB_EN 0x00000100
++#define LP_CTL_SRTC_INVAL_EN 0x00000010
++#define LP_CTL_WAKE_INT_EN 0x00000008
++#define LP_CTL_MONOCTR_EN 0x00000004
++#define LP_CTL_TIMEALARM_EN 0x00000002
++#define LP_CTL_SRTC_EN 0x00000001
++
++#define LP_MKEYCTL_ZMKECC_SHIFT 8
++#define LP_MKEYCTL_ZMKECC_MASK (0xff << LP_MKEYCTL_ZMKECC_SHIFT)
++#define LP_MKEYCTL_ZMKECC_EN 0x00000010
++#define LP_MKEYCTL_ZMKECC_VAL 0x00000008
++#define LP_MKEYCTL_ZMKECC_PROG 0x00000004
++#define LP_MKEYCTL_MKSEL_SHIFT 0
++#define LP_MKEYCTL_MKSEL_MASK (3 << LP_MKEYCTL_MKSEL_SHIFT)
++#define LP_MKEYCTL_MK_OTP 0
++#define LP_MKEYCTL_MK_ZMK 2
++#define LP_MKEYCTL_MK_COMB 3
++
++#define LP_SECVIO_CTL_SRC5 0x20
++#define LP_SECVIO_CTL_SRC4 0x10
++#define LP_SECVIO_CTL_SRC3 0x08
++#define LP_SECVIO_CTL_SRC2 0x04
++#define LP_SECVIO_CTL_SRC1 0x02
++#define LP_SECVIO_CTL_SRC0 0x01
++
++#define LP_TAMPFILT_EXT2_EN 0x80000000
++#define LP_TAMPFILT_EXT2_SHIFT 24
++#define LP_TAMPFILT_EXT2_MASK (0x1f << LP_TAMPFILT_EXT2_SHIFT)
++#define LP_TAMPFILT_EXT1_EN 0x00800000
++#define LP_TAMPFILT_EXT1_SHIFT 16
++#define LP_TAMPFILT_EXT1_MASK (0x1f << LP_TAMPFILT_EXT1_SHIFT)
++#define LP_TAMPFILT_WM_EN 0x00000080
++#define LP_TAMPFILT_WM_SHIFT 0
++#define LP_TAMPFILT_WM_MASK (0x1f << LP_TAMPFILT_WM_SHIFT)
++
++#define LP_TAMPDET_OSC_BPS 0x10000000
++#define LP_TAMPDET_VRC_SHIFT 24
++#define LP_TAMPDET_VRC_MASK (3 << LP_TAMPFILT_VRC_SHIFT)
++#define LP_TAMPDET_HTDC_SHIFT 20
++#define LP_TAMPDET_HTDC_MASK (3 << LP_TAMPFILT_HTDC_SHIFT)
++#define LP_TAMPDET_LTDC_SHIFT 16
++#define LP_TAMPDET_LTDC_MASK (3 << LP_TAMPFILT_LTDC_SHIFT)
++#define LP_TAMPDET_POR_OBS 0x00008000
++#define LP_TAMPDET_PFD_OBS 0x00004000
++#define LP_TAMPDET_ET2_EN 0x00000400
++#define LP_TAMPDET_ET1_EN 0x00000200
++#define LP_TAMPDET_WMT2_EN 0x00000100
++#define LP_TAMPDET_WMT1_EN 0x00000080
++#define LP_TAMPDET_VT_EN 0x00000040
++#define LP_TAMPDET_TT_EN 0x00000020
++#define LP_TAMPDET_CT_EN 0x00000010
++#define LP_TAMPDET_MCR_EN 0x00000004
++#define LP_TAMPDET_SRTCR_EN 0x00000002
++
++#define LP_STATUS_SECURE
++#define LP_STATUS_NONSECURE
++#define LP_STATUS_SCANEXIT 0x00100000 /* all write 1 clear here on */
++#define LP_STATUS_EXT_SECVIO 0x00010000
++#define LP_STATUS_ET2 0x00000400
++#define LP_STATUS_ET1 0x00000200
++#define LP_STATUS_WMT2 0x00000100
++#define LP_STATUS_WMT1 0x00000080
++#define LP_STATUS_VTD 0x00000040
++#define LP_STATUS_TTD 0x00000020
++#define LP_STATUS_CTD 0x00000010
++#define LP_STATUS_PGD 0x00000008
++#define LP_STATUS_MCR 0x00000004
++#define LP_STATUS_SRTCR 0x00000002
++#define LP_STATUS_LPTA 0x00000001
++
++/* Full SNVS register page, including version/options */
++struct snvs_full {
++ struct snvs_hp hp;
++ struct snvs_lp lp;
++ u32 rsvd[731]; /* deadspace 0x08c-0xbf7 */
++
++ /* Version / Revision / Option ID space - end of register page */
++ u32 vid; /* 0xbf8 HP Version ID (VID 1) */
++ u32 opt_rev; /* 0xbfc HP Options / Revision (VID 2) */
++};
++
++#endif /* SNVSREGS_H */
+diff -Nur linux-3.14.40.orig/drivers/dma/imx-sdma.c linux-3.14.40/drivers/dma/imx-sdma.c
+--- linux-3.14.40.orig/drivers/dma/imx-sdma.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/dma/imx-sdma.c 2015-05-01 14:57:58.963427001 -0500
+@@ -29,6 +29,7 @@
+ #include <linux/semaphore.h>
+ #include <linux/spinlock.h>
+ #include <linux/device.h>
++#include <linux/genalloc.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/firmware.h>
+ #include <linux/slab.h>
+@@ -232,6 +233,14 @@
+
+ struct sdma_engine;
+
++enum sdma_mode {
++ SDMA_MODE_INVALID = 0,
++ SDMA_MODE_LOOP,
++ SDMA_MODE_NORMAL,
++ SDMA_MODE_P2P,
++ SDMA_MODE_NO_BD,
++};
++
+ /**
+ * struct sdma_channel - housekeeping for a SDMA channel
+ *
+@@ -244,6 +253,7 @@
+ * @word_size peripheral access size
+ * @buf_tail ID of the buffer that was processed
+ * @num_bd max NUM_BD. number of descriptors currently handling
++ * @bd_iram flag indicating the memory location of buffer descriptor
+ */
+ struct sdma_channel {
+ struct sdma_engine *sdma;
+@@ -255,14 +265,19 @@
+ enum dma_slave_buswidth word_size;
+ unsigned int buf_tail;
+ unsigned int num_bd;
++ unsigned int period_len;
+ struct sdma_buffer_descriptor *bd;
+ dma_addr_t bd_phys;
++ bool bd_iram;
+ unsigned int pc_from_device, pc_to_device;
+- unsigned long flags;
+- dma_addr_t per_address;
++ unsigned int device_to_device;
++ unsigned int other_script;
++ enum sdma_mode mode;
++ dma_addr_t per_address, per_address2;
+ unsigned long event_mask[2];
+ unsigned long watermark_level;
+ u32 shp_addr, per_addr;
++ u32 data_addr1, data_addr2;
+ struct dma_chan chan;
+ spinlock_t lock;
+ struct dma_async_tx_descriptor desc;
+@@ -272,8 +287,6 @@
+ struct tasklet_struct tasklet;
+ };
+
+-#define IMX_DMA_SG_LOOP BIT(0)
+-
+ #define MAX_DMA_CHANNELS 32
+ #define MXC_SDMA_DEFAULT_PRIORITY 1
+ #define MXC_SDMA_MIN_PRIORITY 1
+@@ -325,6 +338,7 @@
+ spinlock_t channel_0_lock;
+ u32 script_number;
+ struct sdma_script_start_addrs *script_addrs;
++ struct gen_pool *iram_pool;
+ const struct sdma_driver_data *drvdata;
+ };
+
+@@ -540,12 +554,14 @@
+ dma_addr_t buf_phys;
+ int ret;
+ unsigned long flags;
++ bool use_iram = true;
+
+- buf_virt = dma_alloc_coherent(NULL,
+- size,
+- &buf_phys, GFP_KERNEL);
++ buf_virt = gen_pool_dma_alloc(sdma->iram_pool, size, &buf_phys);
+ if (!buf_virt) {
+- return -ENOMEM;
++ use_iram = false;
++ buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL);
++ if (!buf_virt)
++ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&sdma->channel_0_lock, flags);
+@@ -562,7 +578,10 @@
+
+ spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
+
+- dma_free_coherent(NULL, size, buf_virt, buf_phys);
++ if (use_iram)
++ gen_pool_free(sdma->iram_pool, (unsigned long)buf_virt, size);
++ else
++ dma_free_coherent(NULL, size, buf_virt, buf_phys);
+
+ return ret;
+ }
+@@ -593,6 +612,12 @@
+
+ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
+ {
++ if (sdmac->desc.callback)
++ sdmac->desc.callback(sdmac->desc.callback_param);
++}
++
++static void sdma_update_channel_loop(struct sdma_channel *sdmac)
++{
+ struct sdma_buffer_descriptor *bd;
+
+ /*
+@@ -607,15 +632,10 @@
+
+ if (bd->mode.status & BD_RROR)
+ sdmac->status = DMA_ERROR;
+- else
+- sdmac->status = DMA_IN_PROGRESS;
+
+ bd->mode.status |= BD_DONE;
+ sdmac->buf_tail++;
+ sdmac->buf_tail %= sdmac->num_bd;
+-
+- if (sdmac->desc.callback)
+- sdmac->desc.callback(sdmac->desc.callback_param);
+ }
+ }
+
+@@ -647,14 +667,31 @@
+ sdmac->desc.callback(sdmac->desc.callback_param);
+ }
+
++static void sdma_handle_other_intr(struct sdma_channel *sdmac)
++{
++ if (sdmac->desc.callback)
++ sdmac->desc.callback(sdmac->desc.callback_param);
++}
++
+ static void sdma_tasklet(unsigned long data)
+ {
+ struct sdma_channel *sdmac = (struct sdma_channel *) data;
++ struct sdma_engine *sdma = sdmac->sdma;
+
+- if (sdmac->flags & IMX_DMA_SG_LOOP)
++ switch (sdmac->mode) {
++ case SDMA_MODE_LOOP:
+ sdma_handle_channel_loop(sdmac);
+- else
++ break;
++ case SDMA_MODE_NORMAL:
+ mxc_sdma_handle_channel_normal(sdmac);
++ break;
++ case SDMA_MODE_NO_BD:
++ sdma_handle_other_intr(sdmac);
++ break;
++ default:
++ dev_err(sdma->dev, "invalid SDMA MODE!\n");
++ break;
++ }
+ }
+
+ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
+@@ -671,6 +708,9 @@
+ int channel = fls(stat) - 1;
+ struct sdma_channel *sdmac = &sdma->channel[channel];
+
++ if (sdmac->mode & SDMA_MODE_LOOP)
++ sdma_update_channel_loop(sdmac);
++
+ tasklet_schedule(&sdmac->tasklet);
+
+ __clear_bit(channel, &stat);
+@@ -692,9 +732,12 @@
+ * two peripherals or memory-to-memory transfers
+ */
+ int per_2_per = 0, emi_2_emi = 0;
++ int other = 0;
+
+ sdmac->pc_from_device = 0;
+ sdmac->pc_to_device = 0;
++ sdmac->device_to_device = 0;
++ sdmac->other_script = 0;
+
+ switch (peripheral_type) {
+ case IMX_DMATYPE_MEMORY:
+@@ -740,8 +783,8 @@
+ emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+ break;
+ case IMX_DMATYPE_ASRC:
+- per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
+- emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
++ per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
++ emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+ per_2_per = sdma->script_addrs->per_2_per_addr;
+ break;
+ case IMX_DMATYPE_MSHC:
+@@ -758,12 +801,17 @@
+ case IMX_DMATYPE_IPU_MEMORY:
+ emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
+ break;
++ case IMX_DMATYPE_HDMI:
++ other = sdma->script_addrs->hdmi_dma_addr;
++ break;
+ default:
+ break;
+ }
+
+ sdmac->pc_from_device = per_2_emi;
+ sdmac->pc_to_device = emi_2_per;
++ sdmac->device_to_device = per_2_per;
++ sdmac->other_script = other;
+ }
+
+ static int sdma_load_context(struct sdma_channel *sdmac)
+@@ -776,11 +824,14 @@
+ int ret;
+ unsigned long flags;
+
+- if (sdmac->direction == DMA_DEV_TO_MEM) {
++ if (sdmac->direction == DMA_DEV_TO_MEM)
+ load_address = sdmac->pc_from_device;
+- } else {
++ else if (sdmac->direction == DMA_DEV_TO_DEV)
++ load_address = sdmac->device_to_device;
++ else if (sdmac->direction == DMA_MEM_TO_DEV)
+ load_address = sdmac->pc_to_device;
+- }
++ else
++ load_address = sdmac->other_script;
+
+ if (load_address < 0)
+ return load_address;
+@@ -800,11 +851,16 @@
+ /* Send by context the event mask,base address for peripheral
+ * and watermark level
+ */
+- context->gReg[0] = sdmac->event_mask[1];
+- context->gReg[1] = sdmac->event_mask[0];
+- context->gReg[2] = sdmac->per_addr;
+- context->gReg[6] = sdmac->shp_addr;
+- context->gReg[7] = sdmac->watermark_level;
++ if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
++ context->gReg[4] = sdmac->data_addr1;
++ context->gReg[6] = sdmac->data_addr2;
++ } else {
++ context->gReg[0] = sdmac->event_mask[1];
++ context->gReg[1] = sdmac->event_mask[0];
++ context->gReg[2] = sdmac->per_addr;
++ context->gReg[6] = sdmac->shp_addr;
++ context->gReg[7] = sdmac->watermark_level;
++ }
+
+ bd0->mode.command = C0_SETDM;
+ bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+@@ -829,6 +885,7 @@
+
+ static int sdma_config_channel(struct sdma_channel *sdmac)
+ {
++ struct imx_dma_data *data = sdmac->chan.private;
+ int ret;
+
+ sdma_disable_channel(sdmac);
+@@ -837,12 +894,19 @@
+ sdmac->event_mask[1] = 0;
+ sdmac->shp_addr = 0;
+ sdmac->per_addr = 0;
++ sdmac->data_addr1 = 0;
++ sdmac->data_addr2 = 0;
+
+ if (sdmac->event_id0) {
+ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
+ return -EINVAL;
+ sdma_event_enable(sdmac, sdmac->event_id0);
+ }
++ if (sdmac->event_id1) {
++ if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
++ return -EINVAL;
++ sdma_event_enable(sdmac, sdmac->event_id1);
++ }
+
+ switch (sdmac->peripheral_type) {
+ case IMX_DMATYPE_DSP:
+@@ -862,19 +926,75 @@
+ (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
+ /* Handle multiple event channels differently */
+ if (sdmac->event_id1) {
+- sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
+- if (sdmac->event_id1 > 31)
+- __set_bit(31, &sdmac->watermark_level);
+- sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
+- if (sdmac->event_id0 > 31)
+- __set_bit(30, &sdmac->watermark_level);
++ if (sdmac->event_id0 > 31) {
++ sdmac->event_mask[0] |= 0;
++ __set_bit(28, &sdmac->watermark_level);
++ sdmac->event_mask[1] |=
++ BIT(sdmac->event_id0 % 32);
++ } else {
++ sdmac->event_mask[1] |= 0;
++ sdmac->event_mask[0] |=
++ BIT(sdmac->event_id0 % 32);
++ }
++ if (sdmac->event_id1 > 31) {
++ sdmac->event_mask[0] |= 0;
++ __set_bit(29, &sdmac->watermark_level);
++ sdmac->event_mask[1] |=
++ BIT(sdmac->event_id1 % 32);
++ } else {
++ sdmac->event_mask[1] |= 0;
++ sdmac->event_mask[0] |=
++ BIT(sdmac->event_id1 % 32);
++ }
++ /* BIT 11:
++ * 1 : Source on SPBA
++ * 0 : Source on AIPS
++ */
++ __set_bit(11, &sdmac->watermark_level);
++ /* BIT 12:
++ * 1 : Destination on SPBA
++ * 0 : Destination on AIPS
++ */
++ __set_bit(12, &sdmac->watermark_level);
++ __set_bit(31, &sdmac->watermark_level);
++ /* BIT 31:
++ * 1 : Amount of samples to be transferred is
++ * unknown and script will keep on transferring
++ * samples as long as both events are detected
++ * and script must be manually stopped by the
++ * application.
++ * 0 : The amount of samples to be is equal to
++ * the count field of mode word
++ * */
++ __set_bit(25, &sdmac->watermark_level);
++ __clear_bit(24, &sdmac->watermark_level);
+ } else {
+- __set_bit(sdmac->event_id0, sdmac->event_mask);
++ if (sdmac->event_id0 > 31) {
++ sdmac->event_mask[0] = 0;
++ sdmac->event_mask[1] |=
++ BIT(sdmac->event_id0 % 32);
++ } else {
++ sdmac->event_mask[0] |=
++ BIT(sdmac->event_id0 % 32);
++ sdmac->event_mask[1] = 0;
++ }
+ }
+ /* Watermark Level */
+ sdmac->watermark_level |= sdmac->watermark_level;
+ /* Address */
+- sdmac->shp_addr = sdmac->per_address;
++ if (sdmac->direction == DMA_DEV_TO_DEV) {
++ sdmac->shp_addr = sdmac->per_address2;
++ sdmac->per_addr = sdmac->per_address;
++ } else if (sdmac->direction == DMA_TRANS_NONE) {
++ if (sdmac->peripheral_type != IMX_DMATYPE_HDMI ||
++ !data->data_addr1 || !data->data_addr2)
++ return -EINVAL;
++ sdmac->data_addr1 = *(u32 *)data->data_addr1;
++ sdmac->data_addr2 = *(u32 *)data->data_addr2;
++ sdmac->watermark_level = 0;
++ } else {
++ sdmac->shp_addr = sdmac->per_address;
++ }
+ } else {
+ sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
+ }
+@@ -906,10 +1026,15 @@
+ int channel = sdmac->channel;
+ int ret = -EBUSY;
+
+- sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
++ sdmac->bd_iram = true;
++ sdmac->bd = gen_pool_dma_alloc(sdma->iram_pool, PAGE_SIZE, &sdmac->bd_phys);
+ if (!sdmac->bd) {
+- ret = -ENOMEM;
+- goto out;
++ sdmac->bd_iram = false;
++ sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
++ if (!sdmac->bd) {
++ ret = -ENOMEM;
++ goto out;
++ }
+ }
+
+ memset(sdmac->bd, 0, PAGE_SIZE);
+@@ -967,7 +1092,8 @@
+ }
+
+ sdmac->peripheral_type = data->peripheral_type;
+- sdmac->event_id0 = data->dma_request;
++ sdmac->event_id0 = data->dma_request0;
++ sdmac->event_id1 = data->dma_request1;
+
+ clk_enable(sdmac->sdma->clk_ipg);
+ clk_enable(sdmac->sdma->clk_ahb);
+@@ -985,6 +1111,9 @@
+ /* txd.flags will be overwritten in prep funcs */
+ sdmac->desc.flags = DMA_CTRL_ACK;
+
++ /* Set SDMA channel mode to unvalid to avoid misconfig */
++ sdmac->mode = SDMA_MODE_INVALID;
++
+ return 0;
+ }
+
+@@ -1005,7 +1134,10 @@
+
+ sdma_set_channel_priority(sdmac, 0);
+
+- dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
++ if (sdmac->bd_iram)
++ gen_pool_free(sdma->iram_pool, (unsigned long)sdmac->bd, PAGE_SIZE);
++ else
++ dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
+
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
+@@ -1026,7 +1158,7 @@
+ return NULL;
+ sdmac->status = DMA_IN_PROGRESS;
+
+- sdmac->flags = 0;
++ sdmac->mode = SDMA_MODE_NORMAL;
+
+ sdmac->buf_tail = 0;
+
+@@ -1119,9 +1251,9 @@
+ {
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+- int num_periods = buf_len / period_len;
+ int channel = sdmac->channel;
+ int ret, i = 0, buf = 0;
++ int num_periods;
+
+ dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
+
+@@ -1131,13 +1263,35 @@
+ sdmac->status = DMA_IN_PROGRESS;
+
+ sdmac->buf_tail = 0;
++ sdmac->period_len = period_len;
+
+- sdmac->flags |= IMX_DMA_SG_LOOP;
+ sdmac->direction = direction;
++
++ switch (sdmac->direction) {
++ case DMA_DEV_TO_DEV:
++ sdmac->mode = SDMA_MODE_P2P;
++ break;
++ case DMA_TRANS_NONE:
++ sdmac->mode = SDMA_MODE_NO_BD;
++ break;
++ case DMA_MEM_TO_DEV:
++ case DMA_DEV_TO_MEM:
++ sdmac->mode = SDMA_MODE_LOOP;
++ break;
++ default:
++ dev_err(sdma->dev, "invalid SDMA direction %d\n", direction);
++ return NULL;
++ }
++
+ ret = sdma_load_context(sdmac);
+ if (ret)
+ goto err_out;
+
++ if (period_len)
++ num_periods = buf_len / period_len;
++ else
++ return &sdmac->desc;
++
+ if (num_periods > NUM_BD) {
+ dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
+ channel, num_periods, NUM_BD);
+@@ -1202,18 +1356,31 @@
+ sdma_disable_channel(sdmac);
+ return 0;
+ case DMA_SLAVE_CONFIG:
+- if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
++ if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
++ sdmac->per_address = dmaengine_cfg->src_addr;
++ sdmac->per_address2 = dmaengine_cfg->dst_addr;
++ sdmac->watermark_level = 0;
++ sdmac->watermark_level |=
++ dmaengine_cfg->src_maxburst;
++ sdmac->watermark_level |=
++ dmaengine_cfg->dst_maxburst << 16;
++ sdmac->word_size = dmaengine_cfg->dst_addr_width;
++ } else if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+ sdmac->per_address = dmaengine_cfg->src_addr;
+ sdmac->watermark_level = dmaengine_cfg->src_maxburst *
+ dmaengine_cfg->src_addr_width;
+ sdmac->word_size = dmaengine_cfg->src_addr_width;
+- } else {
++ } else if (dmaengine_cfg->direction == DMA_MEM_TO_DEV) {
+ sdmac->per_address = dmaengine_cfg->dst_addr;
+ sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
+ dmaengine_cfg->dst_addr_width;
+ sdmac->word_size = dmaengine_cfg->dst_addr_width;
+ }
+ sdmac->direction = dmaengine_cfg->direction;
++ if (dmaengine_cfg->dma_request0)
++ sdmac->event_id0 = dmaengine_cfg->dma_request0;
++ if (dmaengine_cfg->dma_request1)
++ sdmac->event_id1 = dmaengine_cfg->dma_request1;
+ return sdma_config_channel(sdmac);
+ default:
+ return -ENOSYS;
+@@ -1227,9 +1394,15 @@
+ struct dma_tx_state *txstate)
+ {
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
++ u32 residue;
++
++ if (sdmac->mode & SDMA_MODE_LOOP)
++ residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
++ else
++ residue = sdmac->chn_count - sdmac->chn_real_count;
+
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
+- sdmac->chn_count - sdmac->chn_real_count);
++ residue);
+
+ return sdmac->status;
+ }
+@@ -1285,7 +1458,10 @@
+ goto err_firmware;
+ switch (header->version_major) {
+ case 1:
+- sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
++ if (header->version_minor > 0)
++ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
++ else
++ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+ break;
+ case 2:
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
+@@ -1331,7 +1507,7 @@
+
+ static int __init sdma_init(struct sdma_engine *sdma)
+ {
+- int i, ret;
++ int i, ret, ccbsize;
+ dma_addr_t ccb_phys;
+
+ clk_enable(sdma->clk_ipg);
+@@ -1340,14 +1516,17 @@
+ /* Be sure SDMA has not started yet */
+ writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
+
+- sdma->channel_control = dma_alloc_coherent(NULL,
+- MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
+- sizeof(struct sdma_context_data),
+- &ccb_phys, GFP_KERNEL);
++ ccbsize = MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)
++ + sizeof(struct sdma_context_data);
+
++ sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool, ccbsize, &ccb_phys);
+ if (!sdma->channel_control) {
+- ret = -ENOMEM;
+- goto err_dma_alloc;
++ sdma->channel_control = dma_alloc_coherent(NULL, ccbsize,
++ &ccb_phys, GFP_KERNEL);
++ if (!sdma->channel_control) {
++ ret = -ENOMEM;
++ goto err_dma_alloc;
++ }
+ }
+
+ sdma->context = (void *)sdma->channel_control +
+@@ -1422,9 +1601,10 @@
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+- data.dma_request = dma_spec->args[0];
++ data.dma_request0 = dma_spec->args[0];
+ data.peripheral_type = dma_spec->args[1];
+ data.priority = dma_spec->args[2];
++ data.dma_request1 = 0;
+
+ return dma_request_channel(mask, sdma_filter_fn, &data);
+ }
+@@ -1542,6 +1722,11 @@
+ &sdma->dma_device.channels);
+ }
+
++ if (np)
++ sdma->iram_pool = of_get_named_gen_pool(np, "iram", 0);
++ if (!sdma->iram_pool)
++ dev_warn(&pdev->dev, "no iram assigned, using external mem\n");
++
+ ret = sdma_init(sdma);
+ if (ret)
+ goto err_init;
+diff -Nur linux-3.14.40.orig/drivers/dma/Kconfig linux-3.14.40/drivers/dma/Kconfig
+--- linux-3.14.40.orig/drivers/dma/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/dma/Kconfig 2015-05-01 14:57:58.963427001 -0500
+@@ -137,6 +137,19 @@
+ To avoid bloating the irq_desc[] array we allocate a sufficient
+ number of IRQ slots and map them dynamically to specific sources.
+
++config MXC_PXP_V2
++ bool "MXC PxP V2 support"
++ depends on ARM
++ select DMA_ENGINE
++ help
++ Support the PxP (Pixel Pipeline) on i.MX6 DualLite and i.MX6 SoloLite.
++ If unsure, select N.
++
++config MXC_PXP_CLIENT_DEVICE
++ bool "MXC PxP Client Device"
++ default y
++ depends on MXC_PXP_V2
++
+ config TXX9_DMAC
+ tristate "Toshiba TXx9 SoC DMA support"
+ depends on MACH_TX49XX || MACH_TX39XX
+diff -Nur linux-3.14.40.orig/drivers/dma/Makefile linux-3.14.40/drivers/dma/Makefile
+--- linux-3.14.40.orig/drivers/dma/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/dma/Makefile 2015-05-01 14:57:58.975427001 -0500
+@@ -18,6 +18,7 @@
+ obj-$(CONFIG_DW_DMAC_CORE) += dw/
+ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
+ obj-$(CONFIG_MX3_IPU) += ipu/
++obj-$(CONFIG_MXC_PXP_V2) += pxp/
+ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
+ obj-$(CONFIG_SH_DMAE_BASE) += sh/
+ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
+diff -Nur linux-3.14.40.orig/drivers/dma/pxp/Makefile linux-3.14.40/drivers/dma/pxp/Makefile
+--- linux-3.14.40.orig/drivers/dma/pxp/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/dma/pxp/Makefile 2015-05-01 14:57:58.975427001 -0500
+@@ -0,0 +1,2 @@
++obj-$(CONFIG_MXC_PXP_V2) += pxp_dma_v2.o
++obj-$(CONFIG_MXC_PXP_CLIENT_DEVICE) += pxp_device.o
+diff -Nur linux-3.14.40.orig/drivers/dma/pxp/pxp_device.c linux-3.14.40/drivers/dma/pxp/pxp_device.c
+--- linux-3.14.40.orig/drivers/dma/pxp/pxp_device.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/dma/pxp/pxp_device.c 2015-05-01 14:57:58.975427001 -0500
+@@ -0,0 +1,765 @@
++/*
++ * Copyright (C) 2010-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#include <linux/interrupt.h>
++#include <linux/miscdevice.h>
++#include <linux/platform_device.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/delay.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/sched.h>
++#include <linux/module.h>
++#include <linux/pxp_device.h>
++#include <linux/atomic.h>
++#include <linux/platform_data/dma-imx.h>
++
++#define BUFFER_HASH_ORDER 4
++
++static struct pxp_buffer_hash bufhash;
++static struct pxp_irq_info irq_info[NR_PXP_VIRT_CHANNEL];
++
++static int pxp_ht_create(struct pxp_buffer_hash *hash, int order)
++{
++ unsigned long i;
++ unsigned long table_size;
++
++ table_size = 1U << order;
++
++ hash->order = order;
++ hash->hash_table = kmalloc(sizeof(*hash->hash_table) * table_size, GFP_KERNEL);
++
++ if (!hash->hash_table) {
++ pr_err("%s: Out of memory for hash table\n", __func__);
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < table_size; i++)
++ INIT_HLIST_HEAD(&hash->hash_table[i]);
++
++ return 0;
++}
++
++static int pxp_ht_insert_item(struct pxp_buffer_hash *hash,
++ struct pxp_buf_obj *new)
++{
++ unsigned long hashkey;
++ struct hlist_head *h_list;
++
++ hashkey = hash_long(new->offset >> PAGE_SHIFT, hash->order);
++ h_list = &hash->hash_table[hashkey];
++
++ spin_lock(&hash->hash_lock);
++ hlist_add_head_rcu(&new->item, h_list);
++ spin_unlock(&hash->hash_lock);
++
++ return 0;
++}
++
++static int pxp_ht_remove_item(struct pxp_buffer_hash *hash,
++ struct pxp_buf_obj *obj)
++{
++ spin_lock(&hash->hash_lock);
++ hlist_del_init_rcu(&obj->item);
++ spin_unlock(&hash->hash_lock);
++ return 0;
++}
++
++static struct hlist_node *pxp_ht_find_key(struct pxp_buffer_hash *hash,
++ unsigned long key)
++{
++ struct pxp_buf_obj *entry;
++ struct hlist_head *h_list;
++ unsigned long hashkey;
++
++ hashkey = hash_long(key, hash->order);
++ h_list = &hash->hash_table[hashkey];
++
++ hlist_for_each_entry_rcu(entry, h_list, item) {
++ if (entry->offset >> PAGE_SHIFT == key)
++ return &entry->item;
++ }
++
++ return NULL;
++}
++
++static void pxp_ht_destroy(struct pxp_buffer_hash *hash)
++{
++ kfree(hash->hash_table);
++ hash->hash_table = NULL;
++}
++
++static int pxp_buffer_handle_create(struct pxp_file *file_priv,
++ struct pxp_buf_obj *obj,
++ uint32_t *handlep)
++{
++ int ret;
++
++ idr_preload(GFP_KERNEL);
++ spin_lock(&file_priv->buffer_lock);
++
++ ret = idr_alloc(&file_priv->buffer_idr, obj, 1, 0, GFP_NOWAIT);
++
++ spin_unlock(&file_priv->buffer_lock);
++ idr_preload_end();
++
++ if (ret < 0)
++ return ret;
++
++ *handlep = ret;
++
++ return 0;
++}
++
++static struct pxp_buf_obj *
++pxp_buffer_object_lookup(struct pxp_file *file_priv,
++ uint32_t handle)
++{
++ struct pxp_buf_obj *obj;
++
++ spin_lock(&file_priv->buffer_lock);
++
++ obj = idr_find(&file_priv->buffer_idr, handle);
++ if (!obj) {
++ spin_unlock(&file_priv->buffer_lock);
++ return NULL;
++ }
++
++ spin_unlock(&file_priv->buffer_lock);
++
++ return obj;
++}
++
++static int pxp_buffer_handle_delete(struct pxp_file *file_priv,
++ uint32_t handle)
++{
++ struct pxp_buf_obj *obj;
++
++ spin_lock(&file_priv->buffer_lock);
++
++ obj = idr_find(&file_priv->buffer_idr, handle);
++ if (!obj) {
++ spin_unlock(&file_priv->buffer_lock);
++ return -EINVAL;
++ }
++
++ idr_remove(&file_priv->buffer_idr, handle);
++ spin_unlock(&file_priv->buffer_lock);
++
++ return 0;
++}
++
++static int pxp_channel_handle_create(struct pxp_file *file_priv,
++ struct pxp_chan_obj *obj,
++ uint32_t *handlep)
++{
++ int ret;
++
++ idr_preload(GFP_KERNEL);
++ spin_lock(&file_priv->channel_lock);
++
++ ret = idr_alloc(&file_priv->channel_idr, obj, 0, 0, GFP_NOWAIT);
++
++ spin_unlock(&file_priv->channel_lock);
++ idr_preload_end();
++
++ if (ret < 0)
++ return ret;
++
++ *handlep = ret;
++
++ return 0;
++}
++
++static struct pxp_chan_obj *
++pxp_channel_object_lookup(struct pxp_file *file_priv,
++ uint32_t handle)
++{
++ struct pxp_chan_obj *obj;
++
++ spin_lock(&file_priv->channel_lock);
++
++ obj = idr_find(&file_priv->channel_idr, handle);
++ if (!obj) {
++ spin_unlock(&file_priv->channel_lock);
++ return NULL;
++ }
++
++ spin_unlock(&file_priv->channel_lock);
++
++ return obj;
++}
++
++static int pxp_channel_handle_delete(struct pxp_file *file_priv,
++ uint32_t handle)
++{
++ struct pxp_chan_obj *obj;
++
++ spin_lock(&file_priv->channel_lock);
++
++ obj = idr_find(&file_priv->channel_idr, handle);
++ if (!obj) {
++ spin_unlock(&file_priv->channel_lock);
++ return -EINVAL;
++ }
++
++ idr_remove(&file_priv->channel_idr, handle);
++ spin_unlock(&file_priv->channel_lock);
++
++ return 0;
++}
++
++static int pxp_alloc_dma_buffer(struct pxp_buf_obj *obj)
++{
++ obj->virtual = dma_alloc_coherent(NULL, PAGE_ALIGN(obj->size),
++ (dma_addr_t *) (&obj->offset),
++ GFP_DMA | GFP_KERNEL);
++ pr_debug("[ALLOC] mem alloc phys_addr = 0x%lx\n", obj->offset);
++
++ if (obj->virtual == NULL) {
++ printk(KERN_ERR "Physical memory allocation error!\n");
++ return -1;
++ }
++
++ return 0;
++}
++
++static void pxp_free_dma_buffer(struct pxp_buf_obj *obj)
++{
++ if (obj->virtual != NULL) {
++ dma_free_coherent(0, PAGE_ALIGN(obj->size),
++ obj->virtual, (dma_addr_t)obj->offset);
++ }
++}
++
++static int
++pxp_buffer_object_free(int id, void *ptr, void *data)
++{
++ struct pxp_file *file_priv = data;
++ struct pxp_buf_obj *obj = ptr;
++ int ret;
++
++ ret = pxp_buffer_handle_delete(file_priv, obj->handle);
++ if (ret < 0)
++ return ret;
++
++ pxp_ht_remove_item(&bufhash, obj);
++ pxp_free_dma_buffer(obj);
++ kfree(obj);
++
++ return 0;
++}
++
++static int
++pxp_channel_object_free(int id, void *ptr, void *data)
++{
++ struct pxp_file *file_priv = data;
++ struct pxp_chan_obj *obj = ptr;
++ int chan_id;
++
++ chan_id = obj->chan->chan_id;
++ wait_event(irq_info[chan_id].waitq,
++ atomic_read(&irq_info[chan_id].irq_pending) == 0);
++
++ pxp_channel_handle_delete(file_priv, obj->handle);
++ dma_release_channel(obj->chan);
++ kfree(obj);
++
++ return 0;
++}
++
++static void pxp_free_buffers(struct pxp_file *file_priv)
++{
++ idr_for_each(&file_priv->buffer_idr,
++ &pxp_buffer_object_free, file_priv);
++ idr_destroy(&file_priv->buffer_idr);
++}
++
++static void pxp_free_channels(struct pxp_file *file_priv)
++{
++ idr_for_each(&file_priv->channel_idr,
++ &pxp_channel_object_free, file_priv);
++ idr_destroy(&file_priv->channel_idr);
++}
++
++/* Callback function triggered after PxP receives an EOF interrupt */
++static void pxp_dma_done(void *arg)
++{
++ struct pxp_tx_desc *tx_desc = to_tx_desc(arg);
++ struct dma_chan *chan = tx_desc->txd.chan;
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++ int chan_id = pxp_chan->dma_chan.chan_id;
++
++ pr_debug("DMA Done ISR, chan_id %d\n", chan_id);
++
++ atomic_dec(&irq_info[chan_id].irq_pending);
++ irq_info[chan_id].hist_status = tx_desc->hist_status;
++
++ wake_up(&(irq_info[chan_id].waitq));
++}
++
++static int pxp_ioc_config_chan(struct pxp_file *priv, unsigned long arg)
++{
++ struct scatterlist sg[3];
++ struct pxp_tx_desc *desc;
++ struct dma_async_tx_descriptor *txd;
++ struct pxp_config_data pxp_conf;
++ dma_cookie_t cookie;
++ int handle, chan_id;
++ int i, length, ret;
++ struct dma_chan *chan;
++ struct pxp_chan_obj *obj;
++
++ ret = copy_from_user(&pxp_conf,
++ (struct pxp_config_data *)arg,
++ sizeof(struct pxp_config_data));
++ if (ret)
++ return -EFAULT;
++
++ handle = pxp_conf.handle;
++ obj = pxp_channel_object_lookup(priv, handle);
++ if (!obj)
++ return -EINVAL;
++ chan = obj->chan;
++ chan_id = chan->chan_id;
++
++ sg_init_table(sg, 3);
++
++ txd = chan->device->device_prep_slave_sg(chan,
++ sg, 3,
++ DMA_TO_DEVICE,
++ DMA_PREP_INTERRUPT,
++ NULL);
++ if (!txd) {
++ pr_err("Error preparing a DMA transaction descriptor.\n");
++ return -EIO;
++ }
++
++ txd->callback_param = txd;
++ txd->callback = pxp_dma_done;
++
++ desc = to_tx_desc(txd);
++
++ length = desc->len;
++ for (i = 0; i < length; i++) {
++ if (i == 0) { /* S0 */
++ memcpy(&desc->proc_data,
++ &pxp_conf.proc_data,
++ sizeof(struct pxp_proc_data));
++ memcpy(&desc->layer_param.s0_param,
++ &pxp_conf.s0_param,
++ sizeof(struct pxp_layer_param));
++ } else if (i == 1) { /* Output */
++ memcpy(&desc->layer_param.out_param,
++ &pxp_conf.out_param,
++ sizeof(struct pxp_layer_param));
++ } else {
++ /* OverLay */
++ memcpy(&desc->layer_param.ol_param,
++ &pxp_conf.ol_param,
++ sizeof(struct pxp_layer_param));
++ }
++
++ desc = desc->next;
++ }
++
++ cookie = txd->tx_submit(txd);
++ if (cookie < 0) {
++ pr_err("Error tx_submit\n");
++ return -EIO;
++ }
++
++ atomic_inc(&irq_info[chan_id].irq_pending);
++
++ return 0;
++}
++
++static int pxp_device_open(struct inode *inode, struct file *filp)
++{
++ struct pxp_file *priv;
++
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++
++ if (!priv)
++ return -ENOMEM;
++
++ filp->private_data = priv;
++ priv->filp = filp;
++
++ idr_init(&priv->buffer_idr);
++ spin_lock_init(&priv->buffer_lock);
++
++ idr_init(&priv->channel_idr);
++ spin_lock_init(&priv->channel_lock);
++
++ return 0;
++}
++
++static int pxp_device_release(struct inode *inode, struct file *filp)
++{
++ struct pxp_file *priv = filp->private_data;
++
++ if (priv) {
++ pxp_free_channels(priv);
++ pxp_free_buffers(priv);
++ kfree(priv);
++ filp->private_data = NULL;
++ }
++
++ return 0;
++}
++
++static int pxp_device_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ int request_size;
++ struct hlist_node *node;
++ struct pxp_buf_obj *obj;
++
++ request_size = vma->vm_end - vma->vm_start;
++
++ pr_debug("start=0x%x, pgoff=0x%x, size=0x%x\n",
++ (unsigned int)(vma->vm_start), (unsigned int)(vma->vm_pgoff),
++ request_size);
++
++ node = pxp_ht_find_key(&bufhash, vma->vm_pgoff);
++ if (!node)
++ return -EINVAL;
++
++ obj = list_entry(node, struct pxp_buf_obj, item);
++ if (obj->offset + (obj->size >> PAGE_SHIFT) <
++ (vma->vm_pgoff + vma_pages(vma)))
++ return -ENOMEM;
++
++ switch (obj->mem_type) {
++ case MEMORY_TYPE_UNCACHED:
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++ break;
++ case MEMORY_TYPE_WC:
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ break;
++ case MEMORY_TYPE_CACHED:
++ break;
++ default:
++ pr_err("%s: invalid memory type!\n", __func__);
++ return -EINVAL;
++ }
++
++ return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ request_size, vma->vm_page_prot) ? -EAGAIN : 0;
++}
++
++static bool chan_filter(struct dma_chan *chan, void *arg)
++{
++ if (imx_dma_is_pxp(chan))
++ return true;
++ else
++ return false;
++}
++
++static long pxp_device_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ int ret = 0;
++ struct pxp_file *file_priv = filp->private_data;
++
++ switch (cmd) {
++ case PXP_IOC_GET_CHAN:
++ {
++ int ret;
++ struct dma_chan *chan = NULL;
++ dma_cap_mask_t mask;
++ struct pxp_chan_obj *obj = NULL;
++
++ pr_debug("drv: PXP_IOC_GET_CHAN Line %d\n", __LINE__);
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++ dma_cap_set(DMA_PRIVATE, mask);
++
++ chan = dma_request_channel(mask, chan_filter, NULL);
++ if (!chan) {
++ pr_err("Unsccessfully received channel!\n");
++ return -EBUSY;
++ }
++
++ pr_debug("Successfully received channel."
++ "chan_id %d\n", chan->chan_id);
++
++ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
++ if (!obj) {
++ dma_release_channel(chan);
++ return -ENOMEM;
++ }
++ obj->chan = chan;
++
++ ret = pxp_channel_handle_create(file_priv, obj,
++ &obj->handle);
++ if (ret) {
++ dma_release_channel(chan);
++ kfree(obj);
++ return ret;
++ }
++
++ init_waitqueue_head(&(irq_info[chan->chan_id].waitq));
++ if (put_user(obj->handle, (u32 __user *) arg)) {
++ pxp_channel_handle_delete(file_priv, obj->handle);
++ dma_release_channel(chan);
++ kfree(obj);
++ return -EFAULT;
++ }
++
++ break;
++ }
++ case PXP_IOC_PUT_CHAN:
++ {
++ int handle;
++ struct pxp_chan_obj *obj;
++
++ if (get_user(handle, (u32 __user *) arg))
++ return -EFAULT;
++
++ pr_debug("%d release handle %d\n", __LINE__, handle);
++
++ obj = pxp_channel_object_lookup(file_priv, handle);
++ if (!obj)
++ return -EINVAL;
++
++ pxp_channel_handle_delete(file_priv, obj->handle);
++ dma_release_channel(obj->chan);
++ kfree(obj);
++
++ break;
++ }
++ case PXP_IOC_CONFIG_CHAN:
++ {
++ int ret;
++
++ ret = pxp_ioc_config_chan(file_priv, arg);
++ if (ret)
++ return ret;
++
++ break;
++ }
++ case PXP_IOC_START_CHAN:
++ {
++ int handle;
++ struct pxp_chan_obj *obj = NULL;
++
++ if (get_user(handle, (u32 __user *) arg))
++ return -EFAULT;
++
++ obj = pxp_channel_object_lookup(file_priv, handle);
++ if (!obj)
++ return -EINVAL;
++
++ dma_async_issue_pending(obj->chan);
++
++ break;
++ }
++ case PXP_IOC_GET_PHYMEM:
++ {
++ struct pxp_mem_desc buffer;
++ struct pxp_buf_obj *obj;
++
++ ret = copy_from_user(&buffer,
++ (struct pxp_mem_desc *)arg,
++ sizeof(struct pxp_mem_desc));
++ if (ret)
++ return -EFAULT;
++
++ pr_debug("[ALLOC] mem alloc size = 0x%x\n",
++ buffer.size);
++
++ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
++ if (!obj)
++ return -ENOMEM;
++ obj->size = buffer.size;
++ obj->mem_type = buffer.mtype;
++
++ ret = pxp_alloc_dma_buffer(obj);
++ if (ret == -1) {
++ printk(KERN_ERR
++ "Physical memory allocation error!\n");
++ kfree(obj);
++ return ret;
++ }
++
++ ret = pxp_buffer_handle_create(file_priv, obj, &obj->handle);
++ if (ret) {
++ pxp_free_dma_buffer(obj);
++ kfree(obj);
++ return ret;
++ }
++ buffer.handle = obj->handle;
++ buffer.phys_addr = obj->offset;
++
++ ret = copy_to_user((void __user *)arg, &buffer,
++ sizeof(struct pxp_mem_desc));
++ if (ret) {
++ pxp_buffer_handle_delete(file_priv, buffer.handle);
++ pxp_free_dma_buffer(obj);
++ kfree(obj);
++ return -EFAULT;
++ }
++
++ pxp_ht_insert_item(&bufhash, obj);
++
++ break;
++ }
++ case PXP_IOC_PUT_PHYMEM:
++ {
++ struct pxp_mem_desc pxp_mem;
++ struct pxp_buf_obj *obj;
++
++ ret = copy_from_user(&pxp_mem,
++ (struct pxp_mem_desc *)arg,
++ sizeof(struct pxp_mem_desc));
++ if (ret)
++ return -EACCES;
++
++ obj = pxp_buffer_object_lookup(file_priv, pxp_mem.handle);
++ if (!obj)
++ return -EINVAL;
++
++ ret = pxp_buffer_handle_delete(file_priv, obj->handle);
++ if (ret)
++ return ret;
++
++ pxp_ht_remove_item(&bufhash, obj);
++ pxp_free_dma_buffer(obj);
++ kfree(obj);
++
++ break;
++ }
++ case PXP_IOC_FLUSH_PHYMEM:
++ {
++ int ret;
++ struct pxp_mem_flush flush;
++ struct pxp_buf_obj *obj;
++
++ ret = copy_from_user(&flush,
++ (struct pxp_mem_flush *)arg,
++ sizeof(struct pxp_mem_flush));
++ if (ret)
++ return -EACCES;
++
++ obj = pxp_buffer_object_lookup(file_priv, flush.handle);
++ if (!obj)
++ return -EINVAL;
++
++ switch (flush.type) {
++ case CACHE_CLEAN:
++ dma_sync_single_for_device(NULL, obj->offset,
++ obj->size, DMA_TO_DEVICE);
++ break;
++ case CACHE_INVALIDATE:
++ dma_sync_single_for_device(NULL, obj->offset,
++ obj->size, DMA_FROM_DEVICE);
++ break;
++ case CACHE_FLUSH:
++ dma_sync_single_for_device(NULL, obj->offset,
++ obj->size, DMA_TO_DEVICE);
++ dma_sync_single_for_device(NULL, obj->offset,
++ obj->size, DMA_FROM_DEVICE);
++ break;
++ default:
++ pr_err("%s: invalid cache flush type\n", __func__);
++ return -EINVAL;
++ }
++
++ break;
++ }
++ case PXP_IOC_WAIT4CMPLT:
++ {
++ struct pxp_chan_handle chan_handle;
++ int ret, chan_id, handle;
++ struct pxp_chan_obj *obj = NULL;
++
++ ret = copy_from_user(&chan_handle,
++ (struct pxp_chan_handle *)arg,
++ sizeof(struct pxp_chan_handle));
++ if (ret)
++ return -EFAULT;
++
++ handle = chan_handle.handle;
++ obj = pxp_channel_object_lookup(file_priv, handle);
++ if (!obj)
++ return -EINVAL;
++ chan_id = obj->chan->chan_id;
++
++ ret = wait_event_interruptible
++ (irq_info[chan_id].waitq,
++ (atomic_read(&irq_info[chan_id].irq_pending) == 0));
++ if (ret < 0) {
++ printk(KERN_WARNING
++ "WAIT4CMPLT: signal received.\n");
++ return -ERESTARTSYS;
++ }
++
++ chan_handle.hist_status = irq_info[chan_id].hist_status;
++ ret = copy_to_user((struct pxp_chan_handle *)arg,
++ &chan_handle,
++ sizeof(struct pxp_chan_handle));
++ if (ret)
++ return -EFAULT;
++ break;
++ }
++ default:
++ break;
++ }
++
++ return 0;
++}
++
++static const struct file_operations pxp_device_fops = {
++ .open = pxp_device_open,
++ .release = pxp_device_release,
++ .unlocked_ioctl = pxp_device_ioctl,
++ .mmap = pxp_device_mmap,
++};
++
++static struct miscdevice pxp_device_miscdev = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "pxp_device",
++ .fops = &pxp_device_fops,
++};
++
++int register_pxp_device(void)
++{
++ int ret;
++
++ ret = misc_register(&pxp_device_miscdev);
++ if (ret)
++ return ret;
++
++ ret = pxp_ht_create(&bufhash, BUFFER_HASH_ORDER);
++ if (ret)
++ return ret;
++ spin_lock_init(&(bufhash.hash_lock));
++
++ pr_debug("PxP_Device registered Successfully\n");
++ return 0;
++}
++
++void unregister_pxp_device(void)
++{
++ pxp_ht_destroy(&bufhash);
++ misc_deregister(&pxp_device_miscdev);
++}
+diff -Nur linux-3.14.40.orig/drivers/dma/pxp/pxp_dma_v2.c linux-3.14.40/drivers/dma/pxp/pxp_dma_v2.c
+--- linux-3.14.40.orig/drivers/dma/pxp/pxp_dma_v2.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/dma/pxp/pxp_dma_v2.c 2015-05-01 14:57:58.975427001 -0500
+@@ -0,0 +1,1854 @@
++/*
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++/*
++ * Based on STMP378X PxP driver
++ * Copyright 2008-2009 Embedded Alley Solutions, Inc All Rights Reserved.
++ */
++
++#include <linux/dma-mapping.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/dmaengine.h>
++#include <linux/pxp_dma.h>
++#include <linux/timer.h>
++#include <linux/clk.h>
++#include <linux/workqueue.h>
++#include <linux/sched.h>
++#include <linux/of.h>
++#include <linux/kthread.h>
++
++#include "regs-pxp_v2.h"
++
++#define PXP_DOWNSCALE_THRESHOLD 0x4000
++
++static LIST_HEAD(head);
++static int timeout_in_ms = 600;
++static unsigned int block_size;
++static struct kmem_cache *tx_desc_cache;
++
++struct pxp_dma {
++ struct dma_device dma;
++};
++
++struct pxps {
++ struct platform_device *pdev;
++ struct clk *clk;
++ void __iomem *base;
++ int irq; /* PXP IRQ to the CPU */
++
++ spinlock_t lock;
++ struct mutex clk_mutex;
++ int clk_stat;
++#define CLK_STAT_OFF 0
++#define CLK_STAT_ON 1
++ int pxp_ongoing;
++ int lut_state;
++
++ struct device *dev;
++ struct pxp_dma pxp_dma;
++ struct pxp_channel channel[NR_PXP_VIRT_CHANNEL];
++ struct work_struct work;
++
++ /* describes most recent processing configuration */
++ struct pxp_config_data pxp_conf_state;
++
++ /* to turn clock off when pxp is inactive */
++ struct timer_list clk_timer;
++
++ /* for pxp config dispatch asynchronously*/
++ struct task_struct *dispatch;
++ wait_queue_head_t thread_waitq;
++ struct completion complete;
++};
++
++#define to_pxp_dma(d) container_of(d, struct pxp_dma, dma)
++#define to_tx_desc(tx) container_of(tx, struct pxp_tx_desc, txd)
++#define to_pxp_channel(d) container_of(d, struct pxp_channel, dma_chan)
++#define to_pxp(id) container_of(id, struct pxps, pxp_dma)
++
++#define PXP_DEF_BUFS 2
++#define PXP_MIN_PIX 8
++
++static uint32_t pxp_s0_formats[] = {
++ PXP_PIX_FMT_RGB32,
++ PXP_PIX_FMT_RGB565,
++ PXP_PIX_FMT_RGB555,
++ PXP_PIX_FMT_YUV420P,
++ PXP_PIX_FMT_YUV422P,
++};
++
++/*
++ * PXP common functions
++ */
++static void dump_pxp_reg(struct pxps *pxp)
++{
++ dev_dbg(pxp->dev, "PXP_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CTRL));
++ dev_dbg(pxp->dev, "PXP_STAT 0x%x",
++ __raw_readl(pxp->base + HW_PXP_STAT));
++ dev_dbg(pxp->dev, "PXP_OUT_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_CTRL));
++ dev_dbg(pxp->dev, "PXP_OUT_BUF 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_BUF));
++ dev_dbg(pxp->dev, "PXP_OUT_BUF2 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_BUF2));
++ dev_dbg(pxp->dev, "PXP_OUT_PITCH 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_PITCH));
++ dev_dbg(pxp->dev, "PXP_OUT_LRC 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_LRC));
++ dev_dbg(pxp->dev, "PXP_OUT_PS_ULC 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_PS_ULC));
++ dev_dbg(pxp->dev, "PXP_OUT_PS_LRC 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_PS_LRC));
++ dev_dbg(pxp->dev, "PXP_OUT_AS_ULC 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_AS_ULC));
++ dev_dbg(pxp->dev, "PXP_OUT_AS_LRC 0x%x",
++ __raw_readl(pxp->base + HW_PXP_OUT_AS_LRC));
++ dev_dbg(pxp->dev, "PXP_PS_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_CTRL));
++ dev_dbg(pxp->dev, "PXP_PS_BUF 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_BUF));
++ dev_dbg(pxp->dev, "PXP_PS_UBUF 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_UBUF));
++ dev_dbg(pxp->dev, "PXP_PS_VBUF 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_VBUF));
++ dev_dbg(pxp->dev, "PXP_PS_PITCH 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_PITCH));
++ dev_dbg(pxp->dev, "PXP_PS_BACKGROUND 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_BACKGROUND));
++ dev_dbg(pxp->dev, "PXP_PS_SCALE 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_SCALE));
++ dev_dbg(pxp->dev, "PXP_PS_OFFSET 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_OFFSET));
++ dev_dbg(pxp->dev, "PXP_PS_CLRKEYLOW 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_CLRKEYLOW));
++ dev_dbg(pxp->dev, "PXP_PS_CLRKEYHIGH 0x%x",
++ __raw_readl(pxp->base + HW_PXP_PS_CLRKEYHIGH));
++ dev_dbg(pxp->dev, "PXP_AS_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_AS_CTRL));
++ dev_dbg(pxp->dev, "PXP_AS_BUF 0x%x",
++ __raw_readl(pxp->base + HW_PXP_AS_BUF));
++ dev_dbg(pxp->dev, "PXP_AS_PITCH 0x%x",
++ __raw_readl(pxp->base + HW_PXP_AS_PITCH));
++ dev_dbg(pxp->dev, "PXP_AS_CLRKEYLOW 0x%x",
++ __raw_readl(pxp->base + HW_PXP_AS_CLRKEYLOW));
++ dev_dbg(pxp->dev, "PXP_AS_CLRKEYHIGH 0x%x",
++ __raw_readl(pxp->base + HW_PXP_AS_CLRKEYHIGH));
++ dev_dbg(pxp->dev, "PXP_CSC1_COEF0 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC1_COEF0));
++ dev_dbg(pxp->dev, "PXP_CSC1_COEF1 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC1_COEF1));
++ dev_dbg(pxp->dev, "PXP_CSC1_COEF2 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC1_COEF2));
++ dev_dbg(pxp->dev, "PXP_CSC2_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_CTRL));
++ dev_dbg(pxp->dev, "PXP_CSC2_COEF0 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_COEF0));
++ dev_dbg(pxp->dev, "PXP_CSC2_COEF1 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_COEF1));
++ dev_dbg(pxp->dev, "PXP_CSC2_COEF2 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_COEF2));
++ dev_dbg(pxp->dev, "PXP_CSC2_COEF3 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_COEF3));
++ dev_dbg(pxp->dev, "PXP_CSC2_COEF4 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_COEF4));
++ dev_dbg(pxp->dev, "PXP_CSC2_COEF5 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CSC2_COEF5));
++ dev_dbg(pxp->dev, "PXP_LUT_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_LUT_CTRL));
++ dev_dbg(pxp->dev, "PXP_LUT_ADDR 0x%x",
++ __raw_readl(pxp->base + HW_PXP_LUT_ADDR));
++ dev_dbg(pxp->dev, "PXP_LUT_DATA 0x%x",
++ __raw_readl(pxp->base + HW_PXP_LUT_DATA));
++ dev_dbg(pxp->dev, "PXP_LUT_EXTMEM 0x%x",
++ __raw_readl(pxp->base + HW_PXP_LUT_EXTMEM));
++ dev_dbg(pxp->dev, "PXP_CFA 0x%x",
++ __raw_readl(pxp->base + HW_PXP_CFA));
++ dev_dbg(pxp->dev, "PXP_HIST_CTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST_CTRL));
++ dev_dbg(pxp->dev, "PXP_HIST2_PARAM 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST2_PARAM));
++ dev_dbg(pxp->dev, "PXP_HIST4_PARAM 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST4_PARAM));
++ dev_dbg(pxp->dev, "PXP_HIST8_PARAM0 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST8_PARAM0));
++ dev_dbg(pxp->dev, "PXP_HIST8_PARAM1 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST8_PARAM1));
++ dev_dbg(pxp->dev, "PXP_HIST16_PARAM0 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST16_PARAM0));
++ dev_dbg(pxp->dev, "PXP_HIST16_PARAM1 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST16_PARAM1));
++ dev_dbg(pxp->dev, "PXP_HIST16_PARAM2 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST16_PARAM2));
++ dev_dbg(pxp->dev, "PXP_HIST16_PARAM3 0x%x",
++ __raw_readl(pxp->base + HW_PXP_HIST16_PARAM3));
++ dev_dbg(pxp->dev, "PXP_POWER 0x%x",
++ __raw_readl(pxp->base + HW_PXP_POWER));
++ dev_dbg(pxp->dev, "PXP_NEXT 0x%x",
++ __raw_readl(pxp->base + HW_PXP_NEXT));
++ dev_dbg(pxp->dev, "PXP_DEBUGCTRL 0x%x",
++ __raw_readl(pxp->base + HW_PXP_DEBUGCTRL));
++ dev_dbg(pxp->dev, "PXP_DEBUG 0x%x",
++ __raw_readl(pxp->base + HW_PXP_DEBUG));
++ dev_dbg(pxp->dev, "PXP_VERSION 0x%x",
++ __raw_readl(pxp->base + HW_PXP_VERSION));
++}
++
++static bool is_yuv(u32 pix_fmt)
++{
++ if ((pix_fmt == PXP_PIX_FMT_YUYV) |
++ (pix_fmt == PXP_PIX_FMT_UYVY) |
++ (pix_fmt == PXP_PIX_FMT_YVYU) |
++ (pix_fmt == PXP_PIX_FMT_VYUY) |
++ (pix_fmt == PXP_PIX_FMT_Y41P) |
++ (pix_fmt == PXP_PIX_FMT_YUV444) |
++ (pix_fmt == PXP_PIX_FMT_NV12) |
++ (pix_fmt == PXP_PIX_FMT_NV16) |
++ (pix_fmt == PXP_PIX_FMT_NV61) |
++ (pix_fmt == PXP_PIX_FMT_GREY) |
++ (pix_fmt == PXP_PIX_FMT_GY04) |
++ (pix_fmt == PXP_PIX_FMT_YVU410P) |
++ (pix_fmt == PXP_PIX_FMT_YUV410P) |
++ (pix_fmt == PXP_PIX_FMT_YVU420P) |
++ (pix_fmt == PXP_PIX_FMT_YUV420P) |
++ (pix_fmt == PXP_PIX_FMT_YUV420P2) |
++ (pix_fmt == PXP_PIX_FMT_YVU422P) |
++ (pix_fmt == PXP_PIX_FMT_YUV422P)) {
++ return true;
++ } else {
++ return false;
++ }
++}
++
++static void pxp_set_ctrl(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_proc_data *proc_data = &pxp_conf->proc_data;
++ u32 ctrl;
++ u32 fmt_ctrl;
++ int need_swap = 0; /* to support YUYV and YVYU formats */
++
++ /* Configure S0 input format */
++ switch (pxp_conf->s0_param.pixel_fmt) {
++ case PXP_PIX_FMT_RGB32:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__RGB888;
++ break;
++ case PXP_PIX_FMT_RGB565:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__RGB565;
++ break;
++ case PXP_PIX_FMT_RGB555:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__RGB555;
++ break;
++ case PXP_PIX_FMT_YUV420P:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YUV420;
++ break;
++ case PXP_PIX_FMT_YVU420P:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YUV420;
++ break;
++ case PXP_PIX_FMT_GREY:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__Y8;
++ break;
++ case PXP_PIX_FMT_GY04:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__Y4;
++ break;
++ case PXP_PIX_FMT_YUV422P:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YUV422;
++ break;
++ case PXP_PIX_FMT_UYVY:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__UYVY1P422;
++ break;
++ case PXP_PIX_FMT_YUYV:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__UYVY1P422;
++ need_swap = 1;
++ break;
++ case PXP_PIX_FMT_VYUY:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__VYUY1P422;
++ break;
++ case PXP_PIX_FMT_YVYU:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__VYUY1P422;
++ need_swap = 1;
++ break;
++ case PXP_PIX_FMT_NV12:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YUV2P420;
++ break;
++ case PXP_PIX_FMT_NV21:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YVU2P420;
++ break;
++ case PXP_PIX_FMT_NV16:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YUV2P422;
++ break;
++ case PXP_PIX_FMT_NV61:
++ fmt_ctrl = BV_PXP_PS_CTRL_FORMAT__YVU2P422;
++ break;
++ default:
++ fmt_ctrl = 0;
++ }
++
++ ctrl = BF_PXP_PS_CTRL_FORMAT(fmt_ctrl) | BF_PXP_PS_CTRL_SWAP(need_swap);
++ __raw_writel(ctrl, pxp->base + HW_PXP_PS_CTRL_SET);
++
++ /* Configure output format based on out_channel format */
++ switch (pxp_conf->out_param.pixel_fmt) {
++ case PXP_PIX_FMT_RGB32:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__RGB888;
++ break;
++ case PXP_PIX_FMT_BGRA32:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__ARGB8888;
++ break;
++ case PXP_PIX_FMT_RGB24:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__RGB888P;
++ break;
++ case PXP_PIX_FMT_RGB565:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__RGB565;
++ break;
++ case PXP_PIX_FMT_RGB555:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__RGB555;
++ break;
++ case PXP_PIX_FMT_GREY:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__Y8;
++ break;
++ case PXP_PIX_FMT_GY04:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__Y4;
++ break;
++ case PXP_PIX_FMT_UYVY:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__UYVY1P422;
++ break;
++ case PXP_PIX_FMT_VYUY:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__VYUY1P422;
++ break;
++ case PXP_PIX_FMT_NV12:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__YUV2P420;
++ break;
++ case PXP_PIX_FMT_NV21:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__YVU2P420;
++ break;
++ case PXP_PIX_FMT_NV16:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__YUV2P422;
++ break;
++ case PXP_PIX_FMT_NV61:
++ fmt_ctrl = BV_PXP_OUT_CTRL_FORMAT__YVU2P422;
++ break;
++ default:
++ fmt_ctrl = 0;
++ }
++
++ ctrl = BF_PXP_OUT_CTRL_FORMAT(fmt_ctrl);
++ __raw_writel(ctrl, pxp->base + HW_PXP_OUT_CTRL);
++
++ ctrl = 0;
++ if (proc_data->scaling)
++ ;
++ if (proc_data->vflip)
++ ctrl |= BM_PXP_CTRL_VFLIP;
++ if (proc_data->hflip)
++ ctrl |= BM_PXP_CTRL_HFLIP;
++ if (proc_data->rotate) {
++ ctrl |= BF_PXP_CTRL_ROTATE(proc_data->rotate / 90);
++ if (proc_data->rot_pos)
++ ctrl |= BM_PXP_CTRL_ROT_POS;
++ }
++
++ /* In default, the block size is set to 8x8
++ * But block size can be set to 16x16 due to
++ * blocksize variable modification
++ */
++ ctrl |= block_size << 23;
++
++ __raw_writel(ctrl, pxp->base + HW_PXP_CTRL);
++}
++
++static int pxp_start(struct pxps *pxp)
++{
++ __raw_writel(BM_PXP_CTRL_IRQ_ENABLE, pxp->base + HW_PXP_CTRL_SET);
++ __raw_writel(BM_PXP_CTRL_ENABLE, pxp->base + HW_PXP_CTRL_SET);
++ dump_pxp_reg(pxp);
++
++ return 0;
++}
++
++static void pxp_set_outbuf(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *out_params = &pxp_conf->out_param;
++
++ __raw_writel(out_params->paddr, pxp->base + HW_PXP_OUT_BUF);
++
++ __raw_writel(BF_PXP_OUT_LRC_X(out_params->width - 1) |
++ BF_PXP_OUT_LRC_Y(out_params->height - 1),
++ pxp->base + HW_PXP_OUT_LRC);
++
++ if (out_params->pixel_fmt == PXP_PIX_FMT_RGB24) {
++ __raw_writel(out_params->stride * 3,
++ pxp->base + HW_PXP_OUT_PITCH);
++ } else if (out_params->pixel_fmt == PXP_PIX_FMT_BGRA32 ||
++ out_params->pixel_fmt == PXP_PIX_FMT_RGB32) {
++ __raw_writel(out_params->stride << 2,
++ pxp->base + HW_PXP_OUT_PITCH);
++ } else if (out_params->pixel_fmt == PXP_PIX_FMT_RGB565) {
++ __raw_writel(out_params->stride << 1,
++ pxp->base + HW_PXP_OUT_PITCH);
++ } else if (out_params->pixel_fmt == PXP_PIX_FMT_UYVY ||
++ (out_params->pixel_fmt == PXP_PIX_FMT_VYUY)) {
++ __raw_writel(out_params->stride << 1,
++ pxp->base + HW_PXP_OUT_PITCH);
++ } else if (out_params->pixel_fmt == PXP_PIX_FMT_GREY ||
++ out_params->pixel_fmt == PXP_PIX_FMT_NV12 ||
++ out_params->pixel_fmt == PXP_PIX_FMT_NV21 ||
++ out_params->pixel_fmt == PXP_PIX_FMT_NV16 ||
++ out_params->pixel_fmt == PXP_PIX_FMT_NV61) {
++ __raw_writel(out_params->stride,
++ pxp->base + HW_PXP_OUT_PITCH);
++ } else if (out_params->pixel_fmt == PXP_PIX_FMT_GY04) {
++ __raw_writel(out_params->stride >> 1,
++ pxp->base + HW_PXP_OUT_PITCH);
++ } else {
++ __raw_writel(0, pxp->base + HW_PXP_OUT_PITCH);
++ }
++
++ /* set global alpha if necessary */
++ if (out_params->global_alpha_enable) {
++ __raw_writel(out_params->global_alpha << 24,
++ pxp->base + HW_PXP_OUT_CTRL_SET);
++ __raw_writel(BM_PXP_OUT_CTRL_ALPHA_OUTPUT,
++ pxp->base + HW_PXP_OUT_CTRL_SET);
++ }
++}
++
++static void pxp_set_s0colorkey(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *s0_params = &pxp_conf->s0_param;
++
++ /* Low and high are set equal. V4L does not allow a chromakey range */
++ if (s0_params->color_key_enable == 0 || s0_params->color_key == -1) {
++ /* disable color key */
++ __raw_writel(0xFFFFFF, pxp->base + HW_PXP_PS_CLRKEYLOW);
++ __raw_writel(0, pxp->base + HW_PXP_PS_CLRKEYHIGH);
++ } else {
++ __raw_writel(s0_params->color_key,
++ pxp->base + HW_PXP_PS_CLRKEYLOW);
++ __raw_writel(s0_params->color_key,
++ pxp->base + HW_PXP_PS_CLRKEYHIGH);
++ }
++}
++
++static void pxp_set_olcolorkey(int layer_no, struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *ol_params = &pxp_conf->ol_param[layer_no];
++
++ /* Low and high are set equal. V4L does not allow a chromakey range */
++ if (ol_params->color_key_enable != 0 && ol_params->color_key != -1) {
++ __raw_writel(ol_params->color_key,
++ pxp->base + HW_PXP_AS_CLRKEYLOW);
++ __raw_writel(ol_params->color_key,
++ pxp->base + HW_PXP_AS_CLRKEYHIGH);
++ } else {
++ /* disable color key */
++ __raw_writel(0xFFFFFF, pxp->base + HW_PXP_AS_CLRKEYLOW);
++ __raw_writel(0, pxp->base + HW_PXP_AS_CLRKEYHIGH);
++ }
++}
++
++static void pxp_set_oln(int layer_no, struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *olparams_data = &pxp_conf->ol_param[layer_no];
++ dma_addr_t phys_addr = olparams_data->paddr;
++ u32 pitch = olparams_data->stride ? olparams_data->stride :
++ olparams_data->width;
++
++ __raw_writel(phys_addr, pxp->base + HW_PXP_AS_BUF);
++
++ /* Fixme */
++ if (olparams_data->width == 0 && olparams_data->height == 0) {
++ __raw_writel(0xffffffff, pxp->base + HW_PXP_OUT_AS_ULC);
++ __raw_writel(0x0, pxp->base + HW_PXP_OUT_AS_LRC);
++ } else {
++ __raw_writel(0x0, pxp->base + HW_PXP_OUT_AS_ULC);
++ if (pxp_conf->proc_data.rotate == 90 ||
++ pxp_conf->proc_data.rotate == 270) {
++ if (pxp_conf->proc_data.rot_pos == 1) {
++ __raw_writel(BF_PXP_OUT_AS_LRC_X(olparams_data->height - 1) |
++ BF_PXP_OUT_AS_LRC_Y(olparams_data->width - 1),
++ pxp->base + HW_PXP_OUT_AS_LRC);
++ } else {
++ __raw_writel(BF_PXP_OUT_AS_LRC_X(olparams_data->width - 1) |
++ BF_PXP_OUT_AS_LRC_Y(olparams_data->height - 1),
++ pxp->base + HW_PXP_OUT_AS_LRC);
++ }
++ } else {
++ __raw_writel(BF_PXP_OUT_AS_LRC_X(olparams_data->width - 1) |
++ BF_PXP_OUT_AS_LRC_Y(olparams_data->height - 1),
++ pxp->base + HW_PXP_OUT_AS_LRC);
++ }
++ }
++
++ if ((olparams_data->pixel_fmt == PXP_PIX_FMT_BGRA32) |
++ (olparams_data->pixel_fmt == PXP_PIX_FMT_RGB32)) {
++ __raw_writel(pitch << 2,
++ pxp->base + HW_PXP_AS_PITCH);
++ } else if (olparams_data->pixel_fmt == PXP_PIX_FMT_RGB565) {
++ __raw_writel(pitch << 1,
++ pxp->base + HW_PXP_AS_PITCH);
++ } else {
++ __raw_writel(0, pxp->base + HW_PXP_AS_PITCH);
++ }
++}
++
++static void pxp_set_olparam(int layer_no, struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *olparams_data = &pxp_conf->ol_param[layer_no];
++ u32 olparam;
++
++ olparam = BF_PXP_AS_CTRL_ALPHA(olparams_data->global_alpha);
++ if (olparams_data->pixel_fmt == PXP_PIX_FMT_RGB32) {
++ olparam |=
++ BF_PXP_AS_CTRL_FORMAT(BV_PXP_AS_CTRL_FORMAT__RGB888);
++ } else if (olparams_data->pixel_fmt == PXP_PIX_FMT_BGRA32) {
++ olparam |=
++ BF_PXP_AS_CTRL_FORMAT(BV_PXP_AS_CTRL_FORMAT__ARGB8888);
++ if (!olparams_data->combine_enable) {
++ olparam |=
++ BF_PXP_AS_CTRL_ALPHA_CTRL
++ (BV_PXP_AS_CTRL_ALPHA_CTRL__ROPs);
++ olparam |= 0x3 << 16;
++ }
++ } else if (olparams_data->pixel_fmt == PXP_PIX_FMT_RGB565) {
++ olparam |=
++ BF_PXP_AS_CTRL_FORMAT(BV_PXP_AS_CTRL_FORMAT__RGB565);
++ }
++ if (olparams_data->global_alpha_enable) {
++ if (olparams_data->global_override) {
++ olparam |=
++ BF_PXP_AS_CTRL_ALPHA_CTRL
++ (BV_PXP_AS_CTRL_ALPHA_CTRL__Override);
++ } else {
++ olparam |=
++ BF_PXP_AS_CTRL_ALPHA_CTRL
++ (BV_PXP_AS_CTRL_ALPHA_CTRL__Multiply);
++ }
++ if (olparams_data->alpha_invert)
++ olparam |= BM_PXP_AS_CTRL_ALPHA_INVERT;
++ }
++ if (olparams_data->color_key_enable)
++ olparam |= BM_PXP_AS_CTRL_ENABLE_COLORKEY;
++
++ __raw_writel(olparam, pxp->base + HW_PXP_AS_CTRL);
++}
++
++static void pxp_set_s0param(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_proc_data *proc_data = &pxp_conf->proc_data;
++ u32 s0param;
++
++ /* contains the coordinate for the PS in the OUTPUT buffer. */
++ if ((pxp_conf->s0_param).width == 0 &&
++ (pxp_conf->s0_param).height == 0) {
++ __raw_writel(0xffffffff, pxp->base + HW_PXP_OUT_PS_ULC);
++ __raw_writel(0x0, pxp->base + HW_PXP_OUT_PS_LRC);
++ } else {
++ s0param = BF_PXP_OUT_PS_ULC_X(proc_data->drect.left);
++ s0param |= BF_PXP_OUT_PS_ULC_Y(proc_data->drect.top);
++ __raw_writel(s0param, pxp->base + HW_PXP_OUT_PS_ULC);
++ s0param = BF_PXP_OUT_PS_LRC_X(proc_data->drect.left +
++ proc_data->drect.width - 1);
++ s0param |= BF_PXP_OUT_PS_LRC_Y(proc_data->drect.top +
++ proc_data->drect.height - 1);
++ __raw_writel(s0param, pxp->base + HW_PXP_OUT_PS_LRC);
++ }
++}
++
++/* crop behavior is re-designed in h/w. */
++static void pxp_set_s0crop(struct pxps *pxp)
++{
++ /*
++ * place-holder, it's implemented in other functions in this driver.
++ * Refer to "Clipping source images" section in RM for detail.
++ */
++}
++
++static int pxp_set_scaling(struct pxps *pxp)
++{
++ int ret = 0;
++ u32 xscale, yscale, s0scale;
++ u32 decx, decy, xdec = 0, ydec = 0;
++ struct pxp_proc_data *proc_data = &pxp->pxp_conf_state.proc_data;
++
++ if (((proc_data->srect.width == proc_data->drect.width) &&
++ (proc_data->srect.height == proc_data->drect.height)) ||
++ ((proc_data->srect.width == 0) && (proc_data->srect.height == 0))) {
++ proc_data->scaling = 0;
++ __raw_writel(0x10001000, pxp->base + HW_PXP_PS_SCALE);
++ __raw_writel(0, pxp->base + HW_PXP_PS_CTRL);
++ goto out;
++ }
++
++ proc_data->scaling = 1;
++ decx = proc_data->srect.width / proc_data->drect.width;
++ decy = proc_data->srect.height / proc_data->drect.height;
++ if (decx > 0) {
++ if (decx >= 2 && decx < 4) {
++ decx = 2;
++ xdec = 1;
++ } else if (decx >= 4 && decx < 8) {
++ decx = 4;
++ xdec = 2;
++ } else if (decx >= 8) {
++ decx = 8;
++ xdec = 3;
++ }
++ xscale = proc_data->srect.width * 0x1000 /
++ (proc_data->drect.width * decx);
++ } else
++ xscale = proc_data->srect.width * 0x1000 /
++ proc_data->drect.width;
++ if (decy > 0) {
++ if (decy >= 2 && decy < 4) {
++ decy = 2;
++ ydec = 1;
++ } else if (decy >= 4 && decy < 8) {
++ decy = 4;
++ ydec = 2;
++ } else if (decy >= 8) {
++ decy = 8;
++ ydec = 3;
++ }
++ yscale = proc_data->srect.height * 0x1000 /
++ (proc_data->drect.height * decy);
++ } else
++ yscale = proc_data->srect.height * 0x1000 /
++ proc_data->drect.height;
++
++ __raw_writel((xdec << 10) | (ydec << 8), pxp->base + HW_PXP_PS_CTRL);
++
++ if (xscale > PXP_DOWNSCALE_THRESHOLD)
++ xscale = PXP_DOWNSCALE_THRESHOLD;
++ if (yscale > PXP_DOWNSCALE_THRESHOLD)
++ yscale = PXP_DOWNSCALE_THRESHOLD;
++ s0scale = BF_PXP_PS_SCALE_YSCALE(yscale) |
++ BF_PXP_PS_SCALE_XSCALE(xscale);
++ __raw_writel(s0scale, pxp->base + HW_PXP_PS_SCALE);
++
++out:
++ pxp_set_ctrl(pxp);
++
++ return ret;
++}
++
++static void pxp_set_bg(struct pxps *pxp)
++{
++ __raw_writel(pxp->pxp_conf_state.proc_data.bgcolor,
++ pxp->base + HW_PXP_PS_BACKGROUND);
++}
++
++static void pxp_set_lut(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ int lut_op = pxp_conf->proc_data.lut_transform;
++ u32 reg_val;
++ int i;
++ bool use_cmap = (lut_op & PXP_LUT_USE_CMAP) ? true : false;
++ u8 *cmap = pxp_conf->proc_data.lut_map;
++ u32 entry_src;
++ u32 pix_val;
++ u8 entry[4];
++
++ /*
++ * If LUT already configured as needed, return...
++ * Unless CMAP is needed and it has been updated.
++ */
++ if ((pxp->lut_state == lut_op) &&
++ !(use_cmap && pxp_conf->proc_data.lut_map_updated))
++ return;
++
++ if (lut_op == PXP_LUT_NONE) {
++ __raw_writel(BM_PXP_LUT_CTRL_BYPASS,
++ pxp->base + HW_PXP_LUT_CTRL);
++ } else if (((lut_op & PXP_LUT_INVERT) != 0)
++ && ((lut_op & PXP_LUT_BLACK_WHITE) != 0)) {
++ /* Fill out LUT table with inverted monochromized values */
++
++ /* clear bypass bit, set lookup mode & out mode */
++ __raw_writel(BF_PXP_LUT_CTRL_LOOKUP_MODE
++ (BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_Y8) |
++ BF_PXP_LUT_CTRL_OUT_MODE
++ (BV_PXP_LUT_CTRL_OUT_MODE__Y8),
++ pxp->base + HW_PXP_LUT_CTRL);
++
++ /* Initialize LUT address to 0 and set NUM_BYTES to 0 */
++ __raw_writel(0, pxp->base + HW_PXP_LUT_ADDR);
++
++ /* LUT address pointer auto-increments after each data write */
++ for (pix_val = 0; pix_val < 256; pix_val += 4) {
++ for (i = 0; i < 4; i++) {
++ entry_src = use_cmap ?
++ cmap[pix_val + i] : pix_val + i;
++ entry[i] = (entry_src < 0x80) ? 0xFF : 0x00;
++ }
++ reg_val = (entry[3] << 24) | (entry[2] << 16) |
++ (entry[1] << 8) | entry[0];
++ __raw_writel(reg_val, pxp->base + HW_PXP_LUT_DATA);
++ }
++ } else if ((lut_op & PXP_LUT_INVERT) != 0) {
++ /* Fill out LUT table with 8-bit inverted values */
++
++ /* clear bypass bit, set lookup mode & out mode */
++ __raw_writel(BF_PXP_LUT_CTRL_LOOKUP_MODE
++ (BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_Y8) |
++ BF_PXP_LUT_CTRL_OUT_MODE
++ (BV_PXP_LUT_CTRL_OUT_MODE__Y8),
++ pxp->base + HW_PXP_LUT_CTRL);
++
++ /* Initialize LUT address to 0 and set NUM_BYTES to 0 */
++ __raw_writel(0, pxp->base + HW_PXP_LUT_ADDR);
++
++ /* LUT address pointer auto-increments after each data write */
++ for (pix_val = 0; pix_val < 256; pix_val += 4) {
++ for (i = 0; i < 4; i++) {
++ entry_src = use_cmap ?
++ cmap[pix_val + i] : pix_val + i;
++ entry[i] = ~entry_src & 0xFF;
++ }
++ reg_val = (entry[3] << 24) | (entry[2] << 16) |
++ (entry[1] << 8) | entry[0];
++ __raw_writel(reg_val, pxp->base + HW_PXP_LUT_DATA);
++ }
++ } else if ((lut_op & PXP_LUT_BLACK_WHITE) != 0) {
++ /* Fill out LUT table with 8-bit monochromized values */
++
++ /* clear bypass bit, set lookup mode & out mode */
++ __raw_writel(BF_PXP_LUT_CTRL_LOOKUP_MODE
++ (BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_Y8) |
++ BF_PXP_LUT_CTRL_OUT_MODE
++ (BV_PXP_LUT_CTRL_OUT_MODE__Y8),
++ pxp->base + HW_PXP_LUT_CTRL);
++
++ /* Initialize LUT address to 0 and set NUM_BYTES to 0 */
++ __raw_writel(0, pxp->base + HW_PXP_LUT_ADDR);
++
++ /* LUT address pointer auto-increments after each data write */
++ for (pix_val = 0; pix_val < 256; pix_val += 4) {
++ for (i = 0; i < 4; i++) {
++ entry_src = use_cmap ?
++ cmap[pix_val + i] : pix_val + i;
++ entry[i] = (entry_src < 0x80) ? 0x00 : 0xFF;
++ }
++ reg_val = (entry[3] << 24) | (entry[2] << 16) |
++ (entry[1] << 8) | entry[0];
++ __raw_writel(reg_val, pxp->base + HW_PXP_LUT_DATA);
++ }
++ } else if (use_cmap) {
++ /* Fill out LUT table using colormap values */
++
++ /* clear bypass bit, set lookup mode & out mode */
++ __raw_writel(BF_PXP_LUT_CTRL_LOOKUP_MODE
++ (BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_Y8) |
++ BF_PXP_LUT_CTRL_OUT_MODE
++ (BV_PXP_LUT_CTRL_OUT_MODE__Y8),
++ pxp->base + HW_PXP_LUT_CTRL);
++
++ /* Initialize LUT address to 0 and set NUM_BYTES to 0 */
++ __raw_writel(0, pxp->base + HW_PXP_LUT_ADDR);
++
++ /* LUT address pointer auto-increments after each data write */
++ for (pix_val = 0; pix_val < 256; pix_val += 4) {
++ for (i = 0; i < 4; i++)
++ entry[i] = cmap[pix_val + i];
++ reg_val = (entry[3] << 24) | (entry[2] << 16) |
++ (entry[1] << 8) | entry[0];
++ __raw_writel(reg_val, pxp->base + HW_PXP_LUT_DATA);
++ }
++ }
++
++ pxp->lut_state = lut_op;
++}
++
++static void pxp_set_csc(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *s0_params = &pxp_conf->s0_param;
++ struct pxp_layer_param *ol_params = &pxp_conf->ol_param[0];
++ struct pxp_layer_param *out_params = &pxp_conf->out_param;
++
++ bool input_is_YUV = is_yuv(s0_params->pixel_fmt);
++ bool output_is_YUV = is_yuv(out_params->pixel_fmt);
++
++ if (input_is_YUV && output_is_YUV) {
++ /*
++ * Input = YUV, Output = YUV
++ * No CSC unless we need to do combining
++ */
++ if (ol_params->combine_enable) {
++ /* Must convert to RGB for combining with RGB overlay */
++
++ /* CSC1 - YUV->RGB */
++ __raw_writel(0x04030000, pxp->base + HW_PXP_CSC1_COEF0);
++ __raw_writel(0x01230208, pxp->base + HW_PXP_CSC1_COEF1);
++ __raw_writel(0x076b079c, pxp->base + HW_PXP_CSC1_COEF2);
++
++ /* CSC2 - RGB->YUV */
++ __raw_writel(0x4, pxp->base + HW_PXP_CSC2_CTRL);
++ __raw_writel(0x0096004D, pxp->base + HW_PXP_CSC2_COEF0);
++ __raw_writel(0x05DA001D, pxp->base + HW_PXP_CSC2_COEF1);
++ __raw_writel(0x007005B6, pxp->base + HW_PXP_CSC2_COEF2);
++ __raw_writel(0x057C009E, pxp->base + HW_PXP_CSC2_COEF3);
++ __raw_writel(0x000005E6, pxp->base + HW_PXP_CSC2_COEF4);
++ __raw_writel(0x00000000, pxp->base + HW_PXP_CSC2_COEF5);
++ } else {
++ /* Input & Output both YUV, so bypass both CSCs */
++
++ /* CSC1 - Bypass */
++ __raw_writel(0x40000000, pxp->base + HW_PXP_CSC1_COEF0);
++
++ /* CSC2 - Bypass */
++ __raw_writel(0x1, pxp->base + HW_PXP_CSC2_CTRL);
++ }
++ } else if (input_is_YUV && !output_is_YUV) {
++ /*
++ * Input = YUV, Output = RGB
++ * Use CSC1 to convert to RGB
++ */
++
++ /* CSC1 - YUV->RGB */
++ __raw_writel(0x84ab01f0, pxp->base + HW_PXP_CSC1_COEF0);
++ __raw_writel(0x01980204, pxp->base + HW_PXP_CSC1_COEF1);
++ __raw_writel(0x0730079c, pxp->base + HW_PXP_CSC1_COEF2);
++
++ /* CSC2 - Bypass */
++ __raw_writel(0x1, pxp->base + HW_PXP_CSC2_CTRL);
++ } else if (!input_is_YUV && output_is_YUV) {
++ /*
++ * Input = RGB, Output = YUV
++ * Use CSC2 to convert to YUV
++ */
++
++ /* CSC1 - Bypass */
++ __raw_writel(0x40000000, pxp->base + HW_PXP_CSC1_COEF0);
++
++ /* CSC2 - RGB->YUV */
++ __raw_writel(0x4, pxp->base + HW_PXP_CSC2_CTRL);
++ __raw_writel(0x0096004D, pxp->base + HW_PXP_CSC2_COEF0);
++ __raw_writel(0x05DA001D, pxp->base + HW_PXP_CSC2_COEF1);
++ __raw_writel(0x007005B6, pxp->base + HW_PXP_CSC2_COEF2);
++ __raw_writel(0x057C009E, pxp->base + HW_PXP_CSC2_COEF3);
++ __raw_writel(0x000005E6, pxp->base + HW_PXP_CSC2_COEF4);
++ __raw_writel(0x00000000, pxp->base + HW_PXP_CSC2_COEF5);
++ } else {
++ /*
++ * Input = RGB, Output = RGB
++ * Input & Output both RGB, so bypass both CSCs
++ */
++
++ /* CSC1 - Bypass */
++ __raw_writel(0x40000000, pxp->base + HW_PXP_CSC1_COEF0);
++
++ /* CSC2 - Bypass */
++ __raw_writel(0x1, pxp->base + HW_PXP_CSC2_CTRL);
++ }
++
++ /* YCrCb colorspace */
++ /* Not sure when we use this...no YCrCb formats are defined for PxP */
++ /*
++ __raw_writel(0x84ab01f0, HW_PXP_CSCCOEFF0_ADDR);
++ __raw_writel(0x01230204, HW_PXP_CSCCOEFF1_ADDR);
++ __raw_writel(0x0730079c, HW_PXP_CSCCOEFF2_ADDR);
++ */
++
++}
++
++static void pxp_set_s0buf(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_layer_param *s0_params = &pxp_conf->s0_param;
++ struct pxp_proc_data *proc_data = &pxp_conf->proc_data;
++ dma_addr_t Y, U, V;
++ dma_addr_t Y1, U1, V1;
++ u32 offset, bpp = 1;
++ u32 pitch = s0_params->stride ? s0_params->stride :
++ s0_params->width;
++
++ Y = s0_params->paddr;
++
++ if (s0_params->pixel_fmt == PXP_PIX_FMT_RGB565)
++ bpp = 2;
++ else if (s0_params->pixel_fmt == PXP_PIX_FMT_RGB32)
++ bpp = 4;
++ offset = (proc_data->srect.top * s0_params->width +
++ proc_data->srect.left) * bpp;
++ /* clipping or cropping */
++ Y1 = Y + offset;
++ __raw_writel(Y1, pxp->base + HW_PXP_PS_BUF);
++ if ((s0_params->pixel_fmt == PXP_PIX_FMT_YUV420P) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_YVU420P) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_GREY) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_YUV422P)) {
++ /* Set to 1 if YUV format is 4:2:2 rather than 4:2:0 */
++ int s = 2;
++ if (s0_params->pixel_fmt == PXP_PIX_FMT_YUV422P)
++ s = 1;
++
++ offset = proc_data->srect.top * s0_params->width / 4 +
++ proc_data->srect.left / 2;
++ U = Y + (s0_params->width * s0_params->height);
++ U1 = U + offset;
++ V = U + ((s0_params->width * s0_params->height) >> s);
++ V1 = V + offset;
++ if (s0_params->pixel_fmt == PXP_PIX_FMT_YVU420P) {
++ __raw_writel(V1, pxp->base + HW_PXP_PS_UBUF);
++ __raw_writel(U1, pxp->base + HW_PXP_PS_VBUF);
++ } else {
++ __raw_writel(U1, pxp->base + HW_PXP_PS_UBUF);
++ __raw_writel(V1, pxp->base + HW_PXP_PS_VBUF);
++ }
++ } else if ((s0_params->pixel_fmt == PXP_PIX_FMT_NV12) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_NV21) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_NV16) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_NV61)) {
++ int s = 2;
++ if ((s0_params->pixel_fmt == PXP_PIX_FMT_NV16) ||
++ (s0_params->pixel_fmt == PXP_PIX_FMT_NV61))
++ s = 1;
++
++ offset = (proc_data->srect.top * s0_params->width +
++ proc_data->srect.left) / s;
++ U = Y + (s0_params->width * s0_params->height);
++ U1 = U + offset;
++
++ __raw_writel(U1, pxp->base + HW_PXP_PS_UBUF);
++ }
++
++ /* TODO: only support RGB565, Y8, Y4, YUV420 */
++ if (s0_params->pixel_fmt == PXP_PIX_FMT_GREY ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_YUV420P ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_YVU420P ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_NV12 ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_NV21 ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_NV16 ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_NV61 ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_YUV422P) {
++ __raw_writel(pitch, pxp->base + HW_PXP_PS_PITCH);
++ }
++ else if (s0_params->pixel_fmt == PXP_PIX_FMT_GY04)
++ __raw_writel(pitch >> 1,
++ pxp->base + HW_PXP_PS_PITCH);
++ else if (s0_params->pixel_fmt == PXP_PIX_FMT_RGB32)
++ __raw_writel(pitch << 2,
++ pxp->base + HW_PXP_PS_PITCH);
++ else if (s0_params->pixel_fmt == PXP_PIX_FMT_UYVY ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_YUYV ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_VYUY ||
++ s0_params->pixel_fmt == PXP_PIX_FMT_YVYU)
++ __raw_writel(pitch << 1,
++ pxp->base + HW_PXP_PS_PITCH);
++ else if (s0_params->pixel_fmt == PXP_PIX_FMT_RGB565)
++ __raw_writel(pitch << 1,
++ pxp->base + HW_PXP_PS_PITCH);
++ else
++ __raw_writel(0, pxp->base + HW_PXP_PS_PITCH);
++}
++
++/**
++ * pxp_config() - configure PxP for a processing task
++ * @pxps: PXP context.
++ * @pxp_chan: PXP channel.
++ * @return: 0 on success or negative error code on failure.
++ */
++static int pxp_config(struct pxps *pxp, struct pxp_channel *pxp_chan)
++{
++ struct pxp_config_data *pxp_conf_data = &pxp->pxp_conf_state;
++ int ol_nr;
++ int i;
++
++ /* Configure PxP regs */
++ pxp_set_ctrl(pxp);
++ pxp_set_s0param(pxp);
++ pxp_set_s0crop(pxp);
++ pxp_set_scaling(pxp);
++ ol_nr = pxp_conf_data->layer_nr - 2;
++ while (ol_nr > 0) {
++ i = pxp_conf_data->layer_nr - 2 - ol_nr;
++ pxp_set_oln(i, pxp);
++ pxp_set_olparam(i, pxp);
++ /* only the color key in higher overlay will take effect. */
++ pxp_set_olcolorkey(i, pxp);
++ ol_nr--;
++ }
++ pxp_set_s0colorkey(pxp);
++ pxp_set_csc(pxp);
++ pxp_set_bg(pxp);
++ pxp_set_lut(pxp);
++
++ pxp_set_s0buf(pxp);
++ pxp_set_outbuf(pxp);
++
++ return 0;
++}
++
++static void pxp_clk_enable(struct pxps *pxp)
++{
++ mutex_lock(&pxp->clk_mutex);
++
++ if (pxp->clk_stat == CLK_STAT_ON) {
++ mutex_unlock(&pxp->clk_mutex);
++ return;
++ }
++
++ clk_prepare_enable(pxp->clk);
++ pxp->clk_stat = CLK_STAT_ON;
++
++ mutex_unlock(&pxp->clk_mutex);
++}
++
++static void pxp_clk_disable(struct pxps *pxp)
++{
++ unsigned long flags;
++
++ mutex_lock(&pxp->clk_mutex);
++
++ if (pxp->clk_stat == CLK_STAT_OFF) {
++ mutex_unlock(&pxp->clk_mutex);
++ return;
++ }
++
++ spin_lock_irqsave(&pxp->lock, flags);
++ if ((pxp->pxp_ongoing == 0) && list_empty(&head)) {
++ spin_unlock_irqrestore(&pxp->lock, flags);
++ clk_disable_unprepare(pxp->clk);
++ pxp->clk_stat = CLK_STAT_OFF;
++ } else
++ spin_unlock_irqrestore(&pxp->lock, flags);
++
++ mutex_unlock(&pxp->clk_mutex);
++}
++
++static inline void clkoff_callback(struct work_struct *w)
++{
++ struct pxps *pxp = container_of(w, struct pxps, work);
++
++ pxp_clk_disable(pxp);
++}
++
++static void pxp_clkoff_timer(unsigned long arg)
++{
++ struct pxps *pxp = (struct pxps *)arg;
++
++ if ((pxp->pxp_ongoing == 0) && list_empty(&head))
++ schedule_work(&pxp->work);
++ else
++ mod_timer(&pxp->clk_timer,
++ jiffies + msecs_to_jiffies(timeout_in_ms));
++}
++
++static struct pxp_tx_desc *pxpdma_first_queued(struct pxp_channel *pxp_chan)
++{
++ return list_entry(pxp_chan->queue.next, struct pxp_tx_desc, list);
++}
++
++/* called with pxp_chan->lock held */
++static void __pxpdma_dostart(struct pxp_channel *pxp_chan)
++{
++ struct pxp_dma *pxp_dma = to_pxp_dma(pxp_chan->dma_chan.device);
++ struct pxps *pxp = to_pxp(pxp_dma);
++ struct pxp_tx_desc *desc;
++ struct pxp_tx_desc *child;
++ int i = 0;
++
++ /* S0 */
++ desc = list_first_entry(&head, struct pxp_tx_desc, list);
++ memcpy(&pxp->pxp_conf_state.s0_param,
++ &desc->layer_param.s0_param, sizeof(struct pxp_layer_param));
++ memcpy(&pxp->pxp_conf_state.proc_data,
++ &desc->proc_data, sizeof(struct pxp_proc_data));
++
++ /* Save PxP configuration */
++ list_for_each_entry(child, &desc->tx_list, list) {
++ if (i == 0) { /* Output */
++ memcpy(&pxp->pxp_conf_state.out_param,
++ &child->layer_param.out_param,
++ sizeof(struct pxp_layer_param));
++ } else { /* Overlay */
++ memcpy(&pxp->pxp_conf_state.ol_param[i - 1],
++ &child->layer_param.ol_param,
++ sizeof(struct pxp_layer_param));
++ }
++
++ i++;
++ }
++ pr_debug("%s:%d S0 w/h %d/%d paddr %08x\n", __func__, __LINE__,
++ pxp->pxp_conf_state.s0_param.width,
++ pxp->pxp_conf_state.s0_param.height,
++ pxp->pxp_conf_state.s0_param.paddr);
++ pr_debug("%s:%d OUT w/h %d/%d paddr %08x\n", __func__, __LINE__,
++ pxp->pxp_conf_state.out_param.width,
++ pxp->pxp_conf_state.out_param.height,
++ pxp->pxp_conf_state.out_param.paddr);
++}
++
++static void pxpdma_dostart_work(struct pxps *pxp)
++{
++ struct pxp_channel *pxp_chan = NULL;
++ unsigned long flags;
++ struct pxp_tx_desc *desc = NULL;
++
++ spin_lock_irqsave(&pxp->lock, flags);
++
++ desc = list_entry(head.next, struct pxp_tx_desc, list);
++ pxp_chan = to_pxp_channel(desc->txd.chan);
++
++ __pxpdma_dostart(pxp_chan);
++
++ /* Configure PxP */
++ pxp_config(pxp, pxp_chan);
++
++ pxp_start(pxp);
++
++ spin_unlock_irqrestore(&pxp->lock, flags);
++}
++
++static void pxpdma_dequeue(struct pxp_channel *pxp_chan, struct pxps *pxp)
++{
++ unsigned long flags;
++ struct pxp_tx_desc *desc = NULL;
++
++ do {
++ desc = pxpdma_first_queued(pxp_chan);
++ spin_lock_irqsave(&pxp->lock, flags);
++ list_move_tail(&desc->list, &head);
++ spin_unlock_irqrestore(&pxp->lock, flags);
++ } while (!list_empty(&pxp_chan->queue));
++}
++
++static dma_cookie_t pxp_tx_submit(struct dma_async_tx_descriptor *tx)
++{
++ struct pxp_tx_desc *desc = to_tx_desc(tx);
++ struct pxp_channel *pxp_chan = to_pxp_channel(tx->chan);
++ dma_cookie_t cookie;
++
++ dev_dbg(&pxp_chan->dma_chan.dev->device, "received TX\n");
++
++ /* pxp_chan->lock can be taken under ichan->lock, but not v.v. */
++ spin_lock(&pxp_chan->lock);
++
++ cookie = pxp_chan->dma_chan.cookie;
++
++ if (++cookie < 0)
++ cookie = 1;
++
++ /* from dmaengine.h: "last cookie value returned to client" */
++ pxp_chan->dma_chan.cookie = cookie;
++ tx->cookie = cookie;
++
++ /* Here we add the tx descriptor to our PxP task queue. */
++ list_add_tail(&desc->list, &pxp_chan->queue);
++
++ spin_unlock(&pxp_chan->lock);
++
++ dev_dbg(&pxp_chan->dma_chan.dev->device, "done TX\n");
++
++ return cookie;
++}
++
++/**
++ * pxp_init_channel() - initialize a PXP channel.
++ * @pxp_dma: PXP DMA context.
++ * @pchan: pointer to the channel object.
++ * @return 0 on success or negative error code on failure.
++ */
++static int pxp_init_channel(struct pxp_dma *pxp_dma,
++ struct pxp_channel *pxp_chan)
++{
++ int ret = 0;
++
++ /*
++ * We are using _virtual_ channel here.
++ * Each channel contains all parameters of corresponding layers
++ * for one transaction; each layer is represented as one descriptor
++ * (i.e., pxp_tx_desc) here.
++ */
++
++ INIT_LIST_HEAD(&pxp_chan->queue);
++
++ return ret;
++}
++
++static irqreturn_t pxp_irq(int irq, void *dev_id)
++{
++ struct pxps *pxp = dev_id;
++ struct pxp_channel *pxp_chan;
++ struct pxp_tx_desc *desc;
++ struct pxp_tx_desc *child, *_child;
++ dma_async_tx_callback callback;
++ void *callback_param;
++ unsigned long flags;
++ u32 hist_status;
++
++ dump_pxp_reg(pxp);
++
++ hist_status =
++ __raw_readl(pxp->base + HW_PXP_HIST_CTRL) & BM_PXP_HIST_CTRL_STATUS;
++
++ __raw_writel(BM_PXP_STAT_IRQ, pxp->base + HW_PXP_STAT_CLR);
++
++ spin_lock_irqsave(&pxp->lock, flags);
++
++ if (list_empty(&head)) {
++ pxp->pxp_ongoing = 0;
++ spin_unlock_irqrestore(&pxp->lock, flags);
++ return IRQ_NONE;
++ }
++
++ /* Get descriptor and call callback */
++ desc = list_entry(head.next, struct pxp_tx_desc, list);
++ pxp_chan = to_pxp_channel(desc->txd.chan);
++
++ pxp_chan->completed = desc->txd.cookie;
++
++ callback = desc->txd.callback;
++ callback_param = desc->txd.callback_param;
++
++ /* Send histogram status back to caller */
++ desc->hist_status = hist_status;
++
++ if ((desc->txd.flags & DMA_PREP_INTERRUPT) && callback)
++ callback(callback_param);
++
++ pxp_chan->status = PXP_CHANNEL_INITIALIZED;
++
++ list_for_each_entry_safe(child, _child, &desc->tx_list, list) {
++ list_del_init(&child->list);
++ kmem_cache_free(tx_desc_cache, (void *)child);
++ }
++ list_del_init(&desc->list);
++ kmem_cache_free(tx_desc_cache, (void *)desc);
++
++ complete(&pxp->complete);
++ pxp->pxp_ongoing = 0;
++ mod_timer(&pxp->clk_timer, jiffies + msecs_to_jiffies(timeout_in_ms));
++
++ spin_unlock_irqrestore(&pxp->lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++/* allocate/free dma tx descriptor dynamically*/
++static struct pxp_tx_desc *pxpdma_desc_alloc(struct pxp_channel *pxp_chan)
++{
++ struct pxp_tx_desc *desc = NULL;
++ struct dma_async_tx_descriptor *txd = NULL;
++
++ desc = kmem_cache_alloc(tx_desc_cache, GFP_KERNEL | __GFP_ZERO);
++ if (desc == NULL)
++ return NULL;
++
++ INIT_LIST_HEAD(&desc->list);
++ INIT_LIST_HEAD(&desc->tx_list);
++ txd = &desc->txd;
++ dma_async_tx_descriptor_init(txd, &pxp_chan->dma_chan);
++ txd->tx_submit = pxp_tx_submit;
++
++ return desc;
++}
++
++/* Allocate and initialise a transfer descriptor. */
++static struct dma_async_tx_descriptor *pxp_prep_slave_sg(struct dma_chan *chan,
++ struct scatterlist
++ *sgl,
++ unsigned int sg_len,
++ enum
++ dma_transfer_direction
++ direction,
++ unsigned long tx_flags,
++ void *context)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++ struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
++ struct pxps *pxp = to_pxp(pxp_dma);
++ struct pxp_tx_desc *desc = NULL;
++ struct pxp_tx_desc *first = NULL, *prev = NULL;
++ struct scatterlist *sg;
++ dma_addr_t phys_addr;
++ int i;
++
++ if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
++ dev_err(chan->device->dev, "Invalid DMA direction %d!\n",
++ direction);
++ return NULL;
++ }
++
++ if (unlikely(sg_len < 2))
++ return NULL;
++
++ for_each_sg(sgl, sg, sg_len, i) {
++ desc = pxpdma_desc_alloc(pxp_chan);
++ if (!desc) {
++ dev_err(chan->device->dev, "no enough memory to allocate tx descriptor\n");
++ return NULL;
++ }
++
++ phys_addr = sg_dma_address(sg);
++
++ if (!first) {
++ first = desc;
++
++ desc->layer_param.s0_param.paddr = phys_addr;
++ } else {
++ list_add_tail(&desc->list, &first->tx_list);
++ prev->next = desc;
++ desc->next = NULL;
++
++ if (i == 1)
++ desc->layer_param.out_param.paddr = phys_addr;
++ else
++ desc->layer_param.ol_param.paddr = phys_addr;
++ }
++
++ prev = desc;
++ }
++
++ pxp->pxp_conf_state.layer_nr = sg_len;
++ first->txd.flags = tx_flags;
++ first->len = sg_len;
++ pr_debug("%s:%d first %p, first->len %d, flags %08x\n",
++ __func__, __LINE__, first, first->len, first->txd.flags);
++
++ return &first->txd;
++}
++
++static void pxp_issue_pending(struct dma_chan *chan)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++ struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
++ struct pxps *pxp = to_pxp(pxp_dma);
++
++ spin_lock(&pxp_chan->lock);
++
++ if (list_empty(&pxp_chan->queue)) {
++ spin_unlock(&pxp_chan->lock);
++ return;
++ }
++
++ pxpdma_dequeue(pxp_chan, pxp);
++ pxp_chan->status = PXP_CHANNEL_READY;
++
++ spin_unlock(&pxp_chan->lock);
++
++ pxp_clk_enable(pxp);
++ wake_up_interruptible(&pxp->thread_waitq);
++}
++
++static void __pxp_terminate_all(struct dma_chan *chan)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++
++ pxp_chan->status = PXP_CHANNEL_INITIALIZED;
++}
++
++static int pxp_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
++ unsigned long arg)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++
++ /* Only supports DMA_TERMINATE_ALL */
++ if (cmd != DMA_TERMINATE_ALL)
++ return -ENXIO;
++
++ spin_lock(&pxp_chan->lock);
++ __pxp_terminate_all(chan);
++ spin_unlock(&pxp_chan->lock);
++
++ return 0;
++}
++
++static int pxp_alloc_chan_resources(struct dma_chan *chan)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++ struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
++ int ret;
++
++ /* dmaengine.c now guarantees to only offer free channels */
++ BUG_ON(chan->client_count > 1);
++ WARN_ON(pxp_chan->status != PXP_CHANNEL_FREE);
++
++ chan->cookie = 1;
++ pxp_chan->completed = -ENXIO;
++
++ pr_debug("%s dma_chan.chan_id %d\n", __func__, chan->chan_id);
++ ret = pxp_init_channel(pxp_dma, pxp_chan);
++ if (ret < 0)
++ goto err_chan;
++
++ pxp_chan->status = PXP_CHANNEL_INITIALIZED;
++
++ dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
++ chan->chan_id, pxp_chan->eof_irq);
++
++ return ret;
++
++err_chan:
++ return ret;
++}
++
++static void pxp_free_chan_resources(struct dma_chan *chan)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++
++ spin_lock(&pxp_chan->lock);
++
++ __pxp_terminate_all(chan);
++
++ pxp_chan->status = PXP_CHANNEL_FREE;
++
++ spin_unlock(&pxp_chan->lock);
++}
++
++static enum dma_status pxp_tx_status(struct dma_chan *chan,
++ dma_cookie_t cookie,
++ struct dma_tx_state *txstate)
++{
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++
++ if (cookie != chan->cookie)
++ return DMA_ERROR;
++
++ if (txstate) {
++ txstate->last = pxp_chan->completed;
++ txstate->used = chan->cookie;
++ txstate->residue = 0;
++ }
++ return DMA_COMPLETE;
++}
++
++static int pxp_hw_init(struct pxps *pxp)
++{
++ struct pxp_config_data *pxp_conf = &pxp->pxp_conf_state;
++ struct pxp_proc_data *proc_data = &pxp_conf->proc_data;
++ u32 reg_val;
++
++ /* Pull PxP out of reset */
++ __raw_writel(0, pxp->base + HW_PXP_CTRL);
++
++ /* Config defaults */
++
++ /* Initialize non-channel-specific PxP parameters */
++ proc_data->drect.left = proc_data->srect.left = 0;
++ proc_data->drect.top = proc_data->srect.top = 0;
++ proc_data->drect.width = proc_data->srect.width = 0;
++ proc_data->drect.height = proc_data->srect.height = 0;
++ proc_data->scaling = 0;
++ proc_data->hflip = 0;
++ proc_data->vflip = 0;
++ proc_data->rotate = 0;
++ proc_data->bgcolor = 0;
++
++ /* Initialize S0 channel parameters */
++ pxp_conf->s0_param.pixel_fmt = pxp_s0_formats[0];
++ pxp_conf->s0_param.width = 0;
++ pxp_conf->s0_param.height = 0;
++ pxp_conf->s0_param.color_key = -1;
++ pxp_conf->s0_param.color_key_enable = false;
++
++ /* Initialize OL channel parameters */
++ pxp_conf->ol_param[0].combine_enable = false;
++ pxp_conf->ol_param[0].width = 0;
++ pxp_conf->ol_param[0].height = 0;
++ pxp_conf->ol_param[0].pixel_fmt = PXP_PIX_FMT_RGB565;
++ pxp_conf->ol_param[0].color_key_enable = false;
++ pxp_conf->ol_param[0].color_key = -1;
++ pxp_conf->ol_param[0].global_alpha_enable = false;
++ pxp_conf->ol_param[0].global_alpha = 0;
++ pxp_conf->ol_param[0].local_alpha_enable = false;
++
++ /* Initialize Output channel parameters */
++ pxp_conf->out_param.width = 0;
++ pxp_conf->out_param.height = 0;
++ pxp_conf->out_param.pixel_fmt = PXP_PIX_FMT_RGB565;
++
++ proc_data->overlay_state = 0;
++
++ /* Write default h/w config */
++ pxp_set_ctrl(pxp);
++ pxp_set_s0param(pxp);
++ pxp_set_s0crop(pxp);
++ /*
++ * simply program the ULC to a higher value than the LRC
++ * to avoid any AS pixels to show up in the output buffer.
++ */
++ __raw_writel(0xFFFFFFFF, pxp->base + HW_PXP_OUT_AS_ULC);
++ pxp_set_olparam(0, pxp);
++ pxp_set_olcolorkey(0, pxp);
++
++ pxp_set_s0colorkey(pxp);
++ pxp_set_csc(pxp);
++ pxp_set_bg(pxp);
++ pxp_set_lut(pxp);
++
++ /* One-time histogram configuration */
++ reg_val =
++ BF_PXP_HIST_CTRL_PANEL_MODE(BV_PXP_HIST_CTRL_PANEL_MODE__GRAY16);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST_CTRL);
++
++ reg_val = BF_PXP_HIST2_PARAM_VALUE0(0x00) |
++ BF_PXP_HIST2_PARAM_VALUE1(0x00F);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST2_PARAM);
++
++ reg_val = BF_PXP_HIST4_PARAM_VALUE0(0x00) |
++ BF_PXP_HIST4_PARAM_VALUE1(0x05) |
++ BF_PXP_HIST4_PARAM_VALUE2(0x0A) | BF_PXP_HIST4_PARAM_VALUE3(0x0F);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST4_PARAM);
++
++ reg_val = BF_PXP_HIST8_PARAM0_VALUE0(0x00) |
++ BF_PXP_HIST8_PARAM0_VALUE1(0x02) |
++ BF_PXP_HIST8_PARAM0_VALUE2(0x04) | BF_PXP_HIST8_PARAM0_VALUE3(0x06);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST8_PARAM0);
++ reg_val = BF_PXP_HIST8_PARAM1_VALUE4(0x09) |
++ BF_PXP_HIST8_PARAM1_VALUE5(0x0B) |
++ BF_PXP_HIST8_PARAM1_VALUE6(0x0D) | BF_PXP_HIST8_PARAM1_VALUE7(0x0F);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST8_PARAM1);
++
++ reg_val = BF_PXP_HIST16_PARAM0_VALUE0(0x00) |
++ BF_PXP_HIST16_PARAM0_VALUE1(0x01) |
++ BF_PXP_HIST16_PARAM0_VALUE2(0x02) |
++ BF_PXP_HIST16_PARAM0_VALUE3(0x03);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST16_PARAM0);
++ reg_val = BF_PXP_HIST16_PARAM1_VALUE4(0x04) |
++ BF_PXP_HIST16_PARAM1_VALUE5(0x05) |
++ BF_PXP_HIST16_PARAM1_VALUE6(0x06) |
++ BF_PXP_HIST16_PARAM1_VALUE7(0x07);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST16_PARAM1);
++ reg_val = BF_PXP_HIST16_PARAM2_VALUE8(0x08) |
++ BF_PXP_HIST16_PARAM2_VALUE9(0x09) |
++ BF_PXP_HIST16_PARAM2_VALUE10(0x0A) |
++ BF_PXP_HIST16_PARAM2_VALUE11(0x0B);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST16_PARAM2);
++ reg_val = BF_PXP_HIST16_PARAM3_VALUE12(0x0C) |
++ BF_PXP_HIST16_PARAM3_VALUE13(0x0D) |
++ BF_PXP_HIST16_PARAM3_VALUE14(0x0E) |
++ BF_PXP_HIST16_PARAM3_VALUE15(0x0F);
++ __raw_writel(reg_val, pxp->base + HW_PXP_HIST16_PARAM3);
++
++ return 0;
++}
++
++static int pxp_dma_init(struct pxps *pxp)
++{
++ struct pxp_dma *pxp_dma = &pxp->pxp_dma;
++ struct dma_device *dma = &pxp_dma->dma;
++ int i;
++
++ dma_cap_set(DMA_SLAVE, dma->cap_mask);
++ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
++
++ /* Compulsory common fields */
++ dma->dev = pxp->dev;
++ dma->device_alloc_chan_resources = pxp_alloc_chan_resources;
++ dma->device_free_chan_resources = pxp_free_chan_resources;
++ dma->device_tx_status = pxp_tx_status;
++ dma->device_issue_pending = pxp_issue_pending;
++
++ /* Compulsory for DMA_SLAVE fields */
++ dma->device_prep_slave_sg = pxp_prep_slave_sg;
++ dma->device_control = pxp_control;
++
++ /* Initialize PxP Channels */
++ INIT_LIST_HEAD(&dma->channels);
++ for (i = 0; i < NR_PXP_VIRT_CHANNEL; i++) {
++ struct pxp_channel *pxp_chan = pxp->channel + i;
++ struct dma_chan *dma_chan = &pxp_chan->dma_chan;
++
++ spin_lock_init(&pxp_chan->lock);
++
++ /* Only one EOF IRQ for PxP, shared by all channels */
++ pxp_chan->eof_irq = pxp->irq;
++ pxp_chan->status = PXP_CHANNEL_FREE;
++ pxp_chan->completed = -ENXIO;
++ snprintf(pxp_chan->eof_name, sizeof(pxp_chan->eof_name),
++ "PXP EOF %d", i);
++
++ dma_chan->device = &pxp_dma->dma;
++ dma_chan->cookie = 1;
++ dma_chan->chan_id = i;
++ list_add_tail(&dma_chan->device_node, &dma->channels);
++ }
++
++ return dma_async_device_register(&pxp_dma->dma);
++}
++
++static ssize_t clk_off_timeout_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%d\n", timeout_in_ms);
++}
++
++static ssize_t clk_off_timeout_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int val;
++ if (sscanf(buf, "%d", &val) > 0) {
++ timeout_in_ms = val;
++ return count;
++ }
++ return -EINVAL;
++}
++
++static DEVICE_ATTR(clk_off_timeout, 0644, clk_off_timeout_show,
++ clk_off_timeout_store);
++
++static ssize_t block_size_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ return sprintf(buf, "%d\n", block_size);
++}
++
++static ssize_t block_size_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ char **last = NULL;
++
++ block_size = simple_strtoul(buf, last, 0);
++ if (block_size > 1)
++ block_size = 1;
++
++ return count;
++}
++static DEVICE_ATTR(block_size, S_IWUSR | S_IRUGO,
++ block_size_show, block_size_store);
++
++static const struct of_device_id imx_pxpdma_dt_ids[] = {
++ { .compatible = "fsl,imx6dl-pxp-dma", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_pxpdma_dt_ids);
++
++static int has_pending_task(struct pxps *pxp, struct pxp_channel *task)
++{
++ int found;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pxp->lock, flags);
++ found = !list_empty(&head);
++ spin_unlock_irqrestore(&pxp->lock, flags);
++
++ return found;
++}
++
++static int pxp_dispatch_thread(void *argv)
++{
++ struct pxps *pxp = (struct pxps *)argv;
++ struct pxp_channel *pending = NULL;
++ unsigned long flags;
++
++ while (!kthread_should_stop()) {
++ int ret;
++ ret = wait_event_interruptible(pxp->thread_waitq,
++ has_pending_task(pxp, pending));
++ if (signal_pending(current))
++ continue;
++
++ if (kthread_should_stop())
++ break;
++
++ spin_lock_irqsave(&pxp->lock, flags);
++ pxp->pxp_ongoing = 1;
++ spin_unlock_irqrestore(&pxp->lock, flags);
++ init_completion(&pxp->complete);
++ pxpdma_dostart_work(pxp);
++ ret = wait_for_completion_timeout(&pxp->complete, 2 * HZ);
++ if (ret == 0) {
++ printk(KERN_EMERG "%s: task is timeout\n\n", __func__);
++ break;
++ }
++ }
++
++ return 0;
++}
++
++static int pxp_probe(struct platform_device *pdev)
++{
++ struct pxps *pxp;
++ struct resource *res;
++ int irq;
++ int err = 0;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ irq = platform_get_irq(pdev, 0);
++ if (!res || irq < 0) {
++ err = -ENODEV;
++ goto exit;
++ }
++
++ pxp = devm_kzalloc(&pdev->dev, sizeof(*pxp), GFP_KERNEL);
++ if (!pxp) {
++ dev_err(&pdev->dev, "failed to allocate control object\n");
++ err = -ENOMEM;
++ goto exit;
++ }
++
++ pxp->dev = &pdev->dev;
++
++ platform_set_drvdata(pdev, pxp);
++ pxp->irq = irq;
++
++ pxp->pxp_ongoing = 0;
++ pxp->lut_state = 0;
++
++ spin_lock_init(&pxp->lock);
++ mutex_init(&pxp->clk_mutex);
++
++ pxp->base = devm_request_and_ioremap(&pdev->dev, res);
++ if (pxp->base == NULL) {
++ dev_err(&pdev->dev, "Couldn't ioremap regs\n");
++ err = -ENODEV;
++ goto exit;
++ }
++
++ pxp->pdev = pdev;
++
++ pxp->clk = devm_clk_get(&pdev->dev, "pxp-axi");
++ clk_prepare_enable(pxp->clk);
++
++ err = pxp_hw_init(pxp);
++ clk_disable_unprepare(pxp->clk);
++ if (err) {
++ dev_err(&pdev->dev, "failed to initialize hardware\n");
++ goto exit;
++ }
++
++ err = devm_request_irq(&pdev->dev, pxp->irq, pxp_irq, 0,
++ "pxp-dmaengine", pxp);
++ if (err)
++ goto exit;
++ /* Initialize DMA engine */
++ err = pxp_dma_init(pxp);
++ if (err < 0)
++ goto exit;
++
++ if (device_create_file(&pdev->dev, &dev_attr_clk_off_timeout)) {
++ dev_err(&pdev->dev,
++ "Unable to create file from clk_off_timeout\n");
++ goto exit;
++ }
++
++ device_create_file(&pdev->dev, &dev_attr_block_size);
++ dump_pxp_reg(pxp);
++
++ INIT_WORK(&pxp->work, clkoff_callback);
++ init_timer(&pxp->clk_timer);
++ pxp->clk_timer.function = pxp_clkoff_timer;
++ pxp->clk_timer.data = (unsigned long)pxp;
++
++ /* allocate a kernel thread to dispatch pxp conf */
++ pxp->dispatch = kthread_run(pxp_dispatch_thread, pxp, "pxp_dispatch");
++ if (IS_ERR(pxp->dispatch)) {
++ err = PTR_ERR(pxp->dispatch);
++ goto exit;
++ }
++ init_waitqueue_head(&pxp->thread_waitq);
++ tx_desc_cache = kmem_cache_create("tx_desc", sizeof(struct pxp_tx_desc),
++ 0, SLAB_HWCACHE_ALIGN, NULL);
++ if (!tx_desc_cache) {
++ err = -ENOMEM;
++ goto exit;
++ }
++
++ register_pxp_device();
++
++exit:
++ if (err)
++ dev_err(&pdev->dev, "Exiting (unsuccessfully) pxp_probe()\n");
++ return err;
++}
++
++static int pxp_remove(struct platform_device *pdev)
++{
++ struct pxps *pxp = platform_get_drvdata(pdev);
++
++ unregister_pxp_device();
++ kmem_cache_destroy(tx_desc_cache);
++ kthread_stop(pxp->dispatch);
++ cancel_work_sync(&pxp->work);
++ del_timer_sync(&pxp->clk_timer);
++ clk_disable_unprepare(pxp->clk);
++ device_remove_file(&pdev->dev, &dev_attr_clk_off_timeout);
++ device_remove_file(&pdev->dev, &dev_attr_block_size);
++ dma_async_device_unregister(&(pxp->pxp_dma.dma));
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int pxp_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct pxps *pxp = platform_get_drvdata(pdev);
++
++ pxp_clk_enable(pxp);
++ while (__raw_readl(pxp->base + HW_PXP_CTRL) & BM_PXP_CTRL_ENABLE)
++ ;
++
++ __raw_writel(BM_PXP_CTRL_SFTRST, pxp->base + HW_PXP_CTRL);
++ pxp_clk_disable(pxp);
++
++ return 0;
++}
++
++static int pxp_resume(struct platform_device *pdev)
++{
++ struct pxps *pxp = platform_get_drvdata(pdev);
++
++ pxp_clk_enable(pxp);
++ /* Pull PxP out of reset */
++ __raw_writel(0, pxp->base + HW_PXP_CTRL);
++ pxp_clk_disable(pxp);
++
++ return 0;
++}
++#else
++#define pxp_suspend NULL
++#define pxp_resume NULL
++#endif
++
++static struct platform_driver pxp_driver = {
++ .driver = {
++ .name = "imx-pxp",
++ .of_match_table = of_match_ptr(imx_pxpdma_dt_ids),
++ },
++ .probe = pxp_probe,
++ .remove = pxp_remove,
++ .suspend = pxp_suspend,
++ .resume = pxp_resume,
++};
++
++module_platform_driver(pxp_driver);
++
++
++MODULE_DESCRIPTION("i.MX PxP driver");
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/dma/pxp/regs-pxp_v2.h linux-3.14.40/drivers/dma/pxp/regs-pxp_v2.h
+--- linux-3.14.40.orig/drivers/dma/pxp/regs-pxp_v2.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/dma/pxp/regs-pxp_v2.h 2015-05-01 14:57:58.975427001 -0500
+@@ -0,0 +1,1152 @@
++/*
++ * Freescale PXP Register Definitions
++ *
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * This file is created by xml file. Don't Edit it.
++ *
++ * Xml Revision: 1.29
++ * Template revision: 1.3
++ */
++
++#ifndef __ARCH_ARM___PXP_H
++#define __ARCH_ARM___PXP_H
++
++#define HW_PXP_CTRL (0x00000000)
++#define HW_PXP_CTRL_SET (0x00000004)
++#define HW_PXP_CTRL_CLR (0x00000008)
++#define HW_PXP_CTRL_TOG (0x0000000c)
++
++#define BM_PXP_CTRL_SFTRST 0x80000000
++#define BM_PXP_CTRL_CLKGATE 0x40000000
++#define BM_PXP_CTRL_RSVD4 0x20000000
++#define BM_PXP_CTRL_EN_REPEAT 0x10000000
++#define BP_PXP_CTRL_RSVD3 26
++#define BM_PXP_CTRL_RSVD3 0x0C000000
++#define BF_PXP_CTRL_RSVD3(v) \
++ (((v) << 26) & BM_PXP_CTRL_RSVD3)
++#define BP_PXP_CTRL_INTERLACED_INPUT 24
++#define BM_PXP_CTRL_INTERLACED_INPUT 0x03000000
++#define BF_PXP_CTRL_INTERLACED_INPUT(v) \
++ (((v) << 24) & BM_PXP_CTRL_INTERLACED_INPUT)
++#define BV_PXP_CTRL_INTERLACED_INPUT__PROGRESSIVE 0x0
++#define BV_PXP_CTRL_INTERLACED_INPUT__FIELD0 0x2
++#define BV_PXP_CTRL_INTERLACED_INPUT__FIELD1 0x3
++#define BM_PXP_CTRL_BLOCK_SIZE 0x00800000
++#define BV_PXP_CTRL_BLOCK_SIZE__8X8 0x0
++#define BV_PXP_CTRL_BLOCK_SIZE__16X16 0x1
++#define BM_PXP_CTRL_ROT_POS 0x00400000
++#define BM_PXP_CTRL_IN_PLACE 0x00200000
++#define BP_PXP_CTRL_RSVD1 12
++#define BM_PXP_CTRL_RSVD1 0x001FF000
++#define BF_PXP_CTRL_RSVD1(v) \
++ (((v) << 12) & BM_PXP_CTRL_RSVD1)
++#define BM_PXP_CTRL_VFLIP 0x00000800
++#define BM_PXP_CTRL_HFLIP 0x00000400
++#define BP_PXP_CTRL_ROTATE 8
++#define BM_PXP_CTRL_ROTATE 0x00000300
++#define BF_PXP_CTRL_ROTATE(v) \
++ (((v) << 8) & BM_PXP_CTRL_ROTATE)
++#define BV_PXP_CTRL_ROTATE__ROT_0 0x0
++#define BV_PXP_CTRL_ROTATE__ROT_90 0x1
++#define BV_PXP_CTRL_ROTATE__ROT_180 0x2
++#define BV_PXP_CTRL_ROTATE__ROT_270 0x3
++#define BP_PXP_CTRL_RSVD0 5
++#define BM_PXP_CTRL_RSVD0 0x000000E0
++#define BF_PXP_CTRL_RSVD0(v) \
++ (((v) << 5) & BM_PXP_CTRL_RSVD0)
++#define BM_PXP_CTRL_ENABLE_LCD_HANDSHAKE 0x00000010
++#define BM_PXP_CTRL_LUT_DMA_IRQ_ENABLE 0x00000008
++#define BM_PXP_CTRL_NEXT_IRQ_ENABLE 0x00000004
++#define BM_PXP_CTRL_IRQ_ENABLE 0x00000002
++#define BM_PXP_CTRL_ENABLE 0x00000001
++
++#define HW_PXP_STAT (0x00000010)
++#define HW_PXP_STAT_SET (0x00000014)
++#define HW_PXP_STAT_CLR (0x00000018)
++#define HW_PXP_STAT_TOG (0x0000001c)
++
++#define BP_PXP_STAT_BLOCKX 24
++#define BM_PXP_STAT_BLOCKX 0xFF000000
++#define BF_PXP_STAT_BLOCKX(v) \
++ (((v) << 24) & BM_PXP_STAT_BLOCKX)
++#define BP_PXP_STAT_BLOCKY 16
++#define BM_PXP_STAT_BLOCKY 0x00FF0000
++#define BF_PXP_STAT_BLOCKY(v) \
++ (((v) << 16) & BM_PXP_STAT_BLOCKY)
++#define BP_PXP_STAT_RSVD2 9
++#define BM_PXP_STAT_RSVD2 0x0000FE00
++#define BF_PXP_STAT_RSVD2(v) \
++ (((v) << 9) & BM_PXP_STAT_RSVD2)
++#define BM_PXP_STAT_LUT_DMA_LOAD_DONE_IRQ 0x00000100
++#define BP_PXP_STAT_AXI_ERROR_ID 4
++#define BM_PXP_STAT_AXI_ERROR_ID 0x000000F0
++#define BF_PXP_STAT_AXI_ERROR_ID(v) \
++ (((v) << 4) & BM_PXP_STAT_AXI_ERROR_ID)
++#define BM_PXP_STAT_NEXT_IRQ 0x00000008
++#define BM_PXP_STAT_AXI_READ_ERROR 0x00000004
++#define BM_PXP_STAT_AXI_WRITE_ERROR 0x00000002
++#define BM_PXP_STAT_IRQ 0x00000001
++
++#define HW_PXP_OUT_CTRL (0x00000020)
++#define HW_PXP_OUT_CTRL_SET (0x00000024)
++#define HW_PXP_OUT_CTRL_CLR (0x00000028)
++#define HW_PXP_OUT_CTRL_TOG (0x0000002c)
++
++#define BP_PXP_OUT_CTRL_ALPHA 24
++#define BM_PXP_OUT_CTRL_ALPHA 0xFF000000
++#define BF_PXP_OUT_CTRL_ALPHA(v) \
++ (((v) << 24) & BM_PXP_OUT_CTRL_ALPHA)
++#define BM_PXP_OUT_CTRL_ALPHA_OUTPUT 0x00800000
++#define BP_PXP_OUT_CTRL_RSVD1 10
++#define BM_PXP_OUT_CTRL_RSVD1 0x007FFC00
++#define BF_PXP_OUT_CTRL_RSVD1(v) \
++ (((v) << 10) & BM_PXP_OUT_CTRL_RSVD1)
++#define BP_PXP_OUT_CTRL_INTERLACED_OUTPUT 8
++#define BM_PXP_OUT_CTRL_INTERLACED_OUTPUT 0x00000300
++#define BF_PXP_OUT_CTRL_INTERLACED_OUTPUT(v) \
++ (((v) << 8) & BM_PXP_OUT_CTRL_INTERLACED_OUTPUT)
++#define BV_PXP_OUT_CTRL_INTERLACED_OUTPUT__PROGRESSIVE 0x0
++#define BV_PXP_OUT_CTRL_INTERLACED_OUTPUT__FIELD0 0x1
++#define BV_PXP_OUT_CTRL_INTERLACED_OUTPUT__FIELD1 0x2
++#define BV_PXP_OUT_CTRL_INTERLACED_OUTPUT__INTERLACED 0x3
++#define BP_PXP_OUT_CTRL_RSVD0 5
++#define BM_PXP_OUT_CTRL_RSVD0 0x000000E0
++#define BF_PXP_OUT_CTRL_RSVD0(v) \
++ (((v) << 5) & BM_PXP_OUT_CTRL_RSVD0)
++#define BP_PXP_OUT_CTRL_FORMAT 0
++#define BM_PXP_OUT_CTRL_FORMAT 0x0000001F
++#define BF_PXP_OUT_CTRL_FORMAT(v) \
++ (((v) << 0) & BM_PXP_OUT_CTRL_FORMAT)
++#define BV_PXP_OUT_CTRL_FORMAT__ARGB8888 0x0
++#define BV_PXP_OUT_CTRL_FORMAT__RGB888 0x4
++#define BV_PXP_OUT_CTRL_FORMAT__RGB888P 0x5
++#define BV_PXP_OUT_CTRL_FORMAT__ARGB1555 0x8
++#define BV_PXP_OUT_CTRL_FORMAT__ARGB4444 0x9
++#define BV_PXP_OUT_CTRL_FORMAT__RGB555 0xC
++#define BV_PXP_OUT_CTRL_FORMAT__RGB444 0xD
++#define BV_PXP_OUT_CTRL_FORMAT__RGB565 0xE
++#define BV_PXP_OUT_CTRL_FORMAT__YUV1P444 0x10
++#define BV_PXP_OUT_CTRL_FORMAT__UYVY1P422 0x12
++#define BV_PXP_OUT_CTRL_FORMAT__VYUY1P422 0x13
++#define BV_PXP_OUT_CTRL_FORMAT__Y8 0x14
++#define BV_PXP_OUT_CTRL_FORMAT__Y4 0x15
++#define BV_PXP_OUT_CTRL_FORMAT__YUV2P422 0x18
++#define BV_PXP_OUT_CTRL_FORMAT__YUV2P420 0x19
++#define BV_PXP_OUT_CTRL_FORMAT__YVU2P422 0x1A
++#define BV_PXP_OUT_CTRL_FORMAT__YVU2P420 0x1B
++
++#define HW_PXP_OUT_BUF (0x00000030)
++
++#define BP_PXP_OUT_BUF_ADDR 0
++#define BM_PXP_OUT_BUF_ADDR 0xFFFFFFFF
++#define BF_PXP_OUT_BUF_ADDR(v) (v)
++
++#define HW_PXP_OUT_BUF2 (0x00000040)
++
++#define BP_PXP_OUT_BUF2_ADDR 0
++#define BM_PXP_OUT_BUF2_ADDR 0xFFFFFFFF
++#define BF_PXP_OUT_BUF2_ADDR(v) (v)
++
++#define HW_PXP_OUT_PITCH (0x00000050)
++
++#define BP_PXP_OUT_PITCH_RSVD 16
++#define BM_PXP_OUT_PITCH_RSVD 0xFFFF0000
++#define BF_PXP_OUT_PITCH_RSVD(v) \
++ (((v) << 16) & BM_PXP_OUT_PITCH_RSVD)
++#define BP_PXP_OUT_PITCH_PITCH 0
++#define BM_PXP_OUT_PITCH_PITCH 0x0000FFFF
++#define BF_PXP_OUT_PITCH_PITCH(v) \
++ (((v) << 0) & BM_PXP_OUT_PITCH_PITCH)
++
++#define HW_PXP_OUT_LRC (0x00000060)
++
++#define BP_PXP_OUT_LRC_RSVD1 30
++#define BM_PXP_OUT_LRC_RSVD1 0xC0000000
++#define BF_PXP_OUT_LRC_RSVD1(v) \
++ (((v) << 30) & BM_PXP_OUT_LRC_RSVD1)
++#define BP_PXP_OUT_LRC_X 16
++#define BM_PXP_OUT_LRC_X 0x3FFF0000
++#define BF_PXP_OUT_LRC_X(v) \
++ (((v) << 16) & BM_PXP_OUT_LRC_X)
++#define BP_PXP_OUT_LRC_RSVD0 14
++#define BM_PXP_OUT_LRC_RSVD0 0x0000C000
++#define BF_PXP_OUT_LRC_RSVD0(v) \
++ (((v) << 14) & BM_PXP_OUT_LRC_RSVD0)
++#define BP_PXP_OUT_LRC_Y 0
++#define BM_PXP_OUT_LRC_Y 0x00003FFF
++#define BF_PXP_OUT_LRC_Y(v) \
++ (((v) << 0) & BM_PXP_OUT_LRC_Y)
++
++#define HW_PXP_OUT_PS_ULC (0x00000070)
++
++#define BP_PXP_OUT_PS_ULC_RSVD1 30
++#define BM_PXP_OUT_PS_ULC_RSVD1 0xC0000000
++#define BF_PXP_OUT_PS_ULC_RSVD1(v) \
++ (((v) << 30) & BM_PXP_OUT_PS_ULC_RSVD1)
++#define BP_PXP_OUT_PS_ULC_X 16
++#define BM_PXP_OUT_PS_ULC_X 0x3FFF0000
++#define BF_PXP_OUT_PS_ULC_X(v) \
++ (((v) << 16) & BM_PXP_OUT_PS_ULC_X)
++#define BP_PXP_OUT_PS_ULC_RSVD0 14
++#define BM_PXP_OUT_PS_ULC_RSVD0 0x0000C000
++#define BF_PXP_OUT_PS_ULC_RSVD0(v) \
++ (((v) << 14) & BM_PXP_OUT_PS_ULC_RSVD0)
++#define BP_PXP_OUT_PS_ULC_Y 0
++#define BM_PXP_OUT_PS_ULC_Y 0x00003FFF
++#define BF_PXP_OUT_PS_ULC_Y(v) \
++ (((v) << 0) & BM_PXP_OUT_PS_ULC_Y)
++
++#define HW_PXP_OUT_PS_LRC (0x00000080)
++
++#define BP_PXP_OUT_PS_LRC_RSVD1 30
++#define BM_PXP_OUT_PS_LRC_RSVD1 0xC0000000
++#define BF_PXP_OUT_PS_LRC_RSVD1(v) \
++ (((v) << 30) & BM_PXP_OUT_PS_LRC_RSVD1)
++#define BP_PXP_OUT_PS_LRC_X 16
++#define BM_PXP_OUT_PS_LRC_X 0x3FFF0000
++#define BF_PXP_OUT_PS_LRC_X(v) \
++ (((v) << 16) & BM_PXP_OUT_PS_LRC_X)
++#define BP_PXP_OUT_PS_LRC_RSVD0 14
++#define BM_PXP_OUT_PS_LRC_RSVD0 0x0000C000
++#define BF_PXP_OUT_PS_LRC_RSVD0(v) \
++ (((v) << 14) & BM_PXP_OUT_PS_LRC_RSVD0)
++#define BP_PXP_OUT_PS_LRC_Y 0
++#define BM_PXP_OUT_PS_LRC_Y 0x00003FFF
++#define BF_PXP_OUT_PS_LRC_Y(v) \
++ (((v) << 0) & BM_PXP_OUT_PS_LRC_Y)
++
++#define HW_PXP_OUT_AS_ULC (0x00000090)
++
++#define BP_PXP_OUT_AS_ULC_RSVD1 30
++#define BM_PXP_OUT_AS_ULC_RSVD1 0xC0000000
++#define BF_PXP_OUT_AS_ULC_RSVD1(v) \
++ (((v) << 30) & BM_PXP_OUT_AS_ULC_RSVD1)
++#define BP_PXP_OUT_AS_ULC_X 16
++#define BM_PXP_OUT_AS_ULC_X 0x3FFF0000
++#define BF_PXP_OUT_AS_ULC_X(v) \
++ (((v) << 16) & BM_PXP_OUT_AS_ULC_X)
++#define BP_PXP_OUT_AS_ULC_RSVD0 14
++#define BM_PXP_OUT_AS_ULC_RSVD0 0x0000C000
++#define BF_PXP_OUT_AS_ULC_RSVD0(v) \
++ (((v) << 14) & BM_PXP_OUT_AS_ULC_RSVD0)
++#define BP_PXP_OUT_AS_ULC_Y 0
++#define BM_PXP_OUT_AS_ULC_Y 0x00003FFF
++#define BF_PXP_OUT_AS_ULC_Y(v) \
++ (((v) << 0) & BM_PXP_OUT_AS_ULC_Y)
++
++#define HW_PXP_OUT_AS_LRC (0x000000a0)
++
++#define BP_PXP_OUT_AS_LRC_RSVD1 30
++#define BM_PXP_OUT_AS_LRC_RSVD1 0xC0000000
++#define BF_PXP_OUT_AS_LRC_RSVD1(v) \
++ (((v) << 30) & BM_PXP_OUT_AS_LRC_RSVD1)
++#define BP_PXP_OUT_AS_LRC_X 16
++#define BM_PXP_OUT_AS_LRC_X 0x3FFF0000
++#define BF_PXP_OUT_AS_LRC_X(v) \
++ (((v) << 16) & BM_PXP_OUT_AS_LRC_X)
++#define BP_PXP_OUT_AS_LRC_RSVD0 14
++#define BM_PXP_OUT_AS_LRC_RSVD0 0x0000C000
++#define BF_PXP_OUT_AS_LRC_RSVD0(v) \
++ (((v) << 14) & BM_PXP_OUT_AS_LRC_RSVD0)
++#define BP_PXP_OUT_AS_LRC_Y 0
++#define BM_PXP_OUT_AS_LRC_Y 0x00003FFF
++#define BF_PXP_OUT_AS_LRC_Y(v) \
++ (((v) << 0) & BM_PXP_OUT_AS_LRC_Y)
++
++#define HW_PXP_PS_CTRL (0x000000b0)
++#define HW_PXP_PS_CTRL_SET (0x000000b4)
++#define HW_PXP_PS_CTRL_CLR (0x000000b8)
++#define HW_PXP_PS_CTRL_TOG (0x000000bc)
++
++#define BP_PXP_PS_CTRL_RSVD1 12
++#define BM_PXP_PS_CTRL_RSVD1 0xFFFFF000
++#define BF_PXP_PS_CTRL_RSVD1(v) \
++ (((v) << 12) & BM_PXP_PS_CTRL_RSVD1)
++#define BP_PXP_PS_CTRL_DECX 10
++#define BM_PXP_PS_CTRL_DECX 0x00000C00
++#define BF_PXP_PS_CTRL_DECX(v) \
++ (((v) << 10) & BM_PXP_PS_CTRL_DECX)
++#define BV_PXP_PS_CTRL_DECX__DISABLE 0x0
++#define BV_PXP_PS_CTRL_DECX__DECX2 0x1
++#define BV_PXP_PS_CTRL_DECX__DECX4 0x2
++#define BV_PXP_PS_CTRL_DECX__DECX8 0x3
++#define BP_PXP_PS_CTRL_DECY 8
++#define BM_PXP_PS_CTRL_DECY 0x00000300
++#define BF_PXP_PS_CTRL_DECY(v) \
++ (((v) << 8) & BM_PXP_PS_CTRL_DECY)
++#define BV_PXP_PS_CTRL_DECY__DISABLE 0x0
++#define BV_PXP_PS_CTRL_DECY__DECY2 0x1
++#define BV_PXP_PS_CTRL_DECY__DECY4 0x2
++#define BV_PXP_PS_CTRL_DECY__DECY8 0x3
++#define BP_PXP_PS_CTRL_SWAP 5
++#define BM_PXP_PS_CTRL_SWAP 0x000000E0
++#define BF_PXP_PS_CTRL_SWAP(v) \
++ (((v) << 5) & BM_PXP_PS_CTRL_SWAP)
++#define BP_PXP_PS_CTRL_FORMAT 0
++#define BM_PXP_PS_CTRL_FORMAT 0x0000001F
++#define BF_PXP_PS_CTRL_FORMAT(v) \
++ (((v) << 0) & BM_PXP_PS_CTRL_FORMAT)
++#define BV_PXP_PS_CTRL_FORMAT__RGB888 0x4
++#define BV_PXP_PS_CTRL_FORMAT__RGB555 0xC
++#define BV_PXP_PS_CTRL_FORMAT__RGB444 0xD
++#define BV_PXP_PS_CTRL_FORMAT__RGB565 0xE
++#define BV_PXP_PS_CTRL_FORMAT__YUV1P444 0x10
++#define BV_PXP_PS_CTRL_FORMAT__UYVY1P422 0x12
++#define BV_PXP_PS_CTRL_FORMAT__VYUY1P422 0x13
++#define BV_PXP_PS_CTRL_FORMAT__Y8 0x14
++#define BV_PXP_PS_CTRL_FORMAT__Y4 0x15
++#define BV_PXP_PS_CTRL_FORMAT__YUV2P422 0x18
++#define BV_PXP_PS_CTRL_FORMAT__YUV2P420 0x19
++#define BV_PXP_PS_CTRL_FORMAT__YVU2P422 0x1A
++#define BV_PXP_PS_CTRL_FORMAT__YVU2P420 0x1B
++#define BV_PXP_PS_CTRL_FORMAT__YUV422 0x1E
++#define BV_PXP_PS_CTRL_FORMAT__YUV420 0x1F
++
++#define HW_PXP_PS_BUF (0x000000c0)
++
++#define BP_PXP_PS_BUF_ADDR 0
++#define BM_PXP_PS_BUF_ADDR 0xFFFFFFFF
++#define BF_PXP_PS_BUF_ADDR(v) (v)
++
++#define HW_PXP_PS_UBUF (0x000000d0)
++
++#define BP_PXP_PS_UBUF_ADDR 0
++#define BM_PXP_PS_UBUF_ADDR 0xFFFFFFFF
++#define BF_PXP_PS_UBUF_ADDR(v) (v)
++
++#define HW_PXP_PS_VBUF (0x000000e0)
++
++#define BP_PXP_PS_VBUF_ADDR 0
++#define BM_PXP_PS_VBUF_ADDR 0xFFFFFFFF
++#define BF_PXP_PS_VBUF_ADDR(v) (v)
++
++#define HW_PXP_PS_PITCH (0x000000f0)
++
++#define BP_PXP_PS_PITCH_RSVD 16
++#define BM_PXP_PS_PITCH_RSVD 0xFFFF0000
++#define BF_PXP_PS_PITCH_RSVD(v) \
++ (((v) << 16) & BM_PXP_PS_PITCH_RSVD)
++#define BP_PXP_PS_PITCH_PITCH 0
++#define BM_PXP_PS_PITCH_PITCH 0x0000FFFF
++#define BF_PXP_PS_PITCH_PITCH(v) \
++ (((v) << 0) & BM_PXP_PS_PITCH_PITCH)
++
++#define HW_PXP_PS_BACKGROUND (0x00000100)
++
++#define BP_PXP_PS_BACKGROUND_RSVD 24
++#define BM_PXP_PS_BACKGROUND_RSVD 0xFF000000
++#define BF_PXP_PS_BACKGROUND_RSVD(v) \
++ (((v) << 24) & BM_PXP_PS_BACKGROUND_RSVD)
++#define BP_PXP_PS_BACKGROUND_COLOR 0
++#define BM_PXP_PS_BACKGROUND_COLOR 0x00FFFFFF
++#define BF_PXP_PS_BACKGROUND_COLOR(v) \
++ (((v) << 0) & BM_PXP_PS_BACKGROUND_COLOR)
++
++#define HW_PXP_PS_SCALE (0x00000110)
++
++#define BM_PXP_PS_SCALE_RSVD2 0x80000000
++#define BP_PXP_PS_SCALE_YSCALE 16
++#define BM_PXP_PS_SCALE_YSCALE 0x7FFF0000
++#define BF_PXP_PS_SCALE_YSCALE(v) \
++ (((v) << 16) & BM_PXP_PS_SCALE_YSCALE)
++#define BM_PXP_PS_SCALE_RSVD1 0x00008000
++#define BP_PXP_PS_SCALE_XSCALE 0
++#define BM_PXP_PS_SCALE_XSCALE 0x00007FFF
++#define BF_PXP_PS_SCALE_XSCALE(v) \
++ (((v) << 0) & BM_PXP_PS_SCALE_XSCALE)
++
++#define HW_PXP_PS_OFFSET (0x00000120)
++
++#define BP_PXP_PS_OFFSET_RSVD2 28
++#define BM_PXP_PS_OFFSET_RSVD2 0xF0000000
++#define BF_PXP_PS_OFFSET_RSVD2(v) \
++ (((v) << 28) & BM_PXP_PS_OFFSET_RSVD2)
++#define BP_PXP_PS_OFFSET_YOFFSET 16
++#define BM_PXP_PS_OFFSET_YOFFSET 0x0FFF0000
++#define BF_PXP_PS_OFFSET_YOFFSET(v) \
++ (((v) << 16) & BM_PXP_PS_OFFSET_YOFFSET)
++#define BP_PXP_PS_OFFSET_RSVD1 12
++#define BM_PXP_PS_OFFSET_RSVD1 0x0000F000
++#define BF_PXP_PS_OFFSET_RSVD1(v) \
++ (((v) << 12) & BM_PXP_PS_OFFSET_RSVD1)
++#define BP_PXP_PS_OFFSET_XOFFSET 0
++#define BM_PXP_PS_OFFSET_XOFFSET 0x00000FFF
++#define BF_PXP_PS_OFFSET_XOFFSET(v) \
++ (((v) << 0) & BM_PXP_PS_OFFSET_XOFFSET)
++
++#define HW_PXP_PS_CLRKEYLOW (0x00000130)
++
++#define BP_PXP_PS_CLRKEYLOW_RSVD1 24
++#define BM_PXP_PS_CLRKEYLOW_RSVD1 0xFF000000
++#define BF_PXP_PS_CLRKEYLOW_RSVD1(v) \
++ (((v) << 24) & BM_PXP_PS_CLRKEYLOW_RSVD1)
++#define BP_PXP_PS_CLRKEYLOW_PIXEL 0
++#define BM_PXP_PS_CLRKEYLOW_PIXEL 0x00FFFFFF
++#define BF_PXP_PS_CLRKEYLOW_PIXEL(v) \
++ (((v) << 0) & BM_PXP_PS_CLRKEYLOW_PIXEL)
++
++#define HW_PXP_PS_CLRKEYHIGH (0x00000140)
++
++#define BP_PXP_PS_CLRKEYHIGH_RSVD1 24
++#define BM_PXP_PS_CLRKEYHIGH_RSVD1 0xFF000000
++#define BF_PXP_PS_CLRKEYHIGH_RSVD1(v) \
++ (((v) << 24) & BM_PXP_PS_CLRKEYHIGH_RSVD1)
++#define BP_PXP_PS_CLRKEYHIGH_PIXEL 0
++#define BM_PXP_PS_CLRKEYHIGH_PIXEL 0x00FFFFFF
++#define BF_PXP_PS_CLRKEYHIGH_PIXEL(v) \
++ (((v) << 0) & BM_PXP_PS_CLRKEYHIGH_PIXEL)
++
++#define HW_PXP_AS_CTRL (0x00000150)
++
++#define BP_PXP_AS_CTRL_RSVD1 21
++#define BM_PXP_AS_CTRL_RSVD1 0xFFE00000
++#define BF_PXP_AS_CTRL_RSVD1(v) \
++ (((v) << 21) & BM_PXP_AS_CTRL_RSVD1)
++#define BM_PXP_AS_CTRL_ALPHA_INVERT 0x00100000
++#define BP_PXP_AS_CTRL_ROP 16
++#define BM_PXP_AS_CTRL_ROP 0x000F0000
++#define BF_PXP_AS_CTRL_ROP(v) \
++ (((v) << 16) & BM_PXP_AS_CTRL_ROP)
++#define BV_PXP_AS_CTRL_ROP__MASKAS 0x0
++#define BV_PXP_AS_CTRL_ROP__MASKNOTAS 0x1
++#define BV_PXP_AS_CTRL_ROP__MASKASNOT 0x2
++#define BV_PXP_AS_CTRL_ROP__MERGEAS 0x3
++#define BV_PXP_AS_CTRL_ROP__MERGENOTAS 0x4
++#define BV_PXP_AS_CTRL_ROP__MERGEASNOT 0x5
++#define BV_PXP_AS_CTRL_ROP__NOTCOPYAS 0x6
++#define BV_PXP_AS_CTRL_ROP__NOT 0x7
++#define BV_PXP_AS_CTRL_ROP__NOTMASKAS 0x8
++#define BV_PXP_AS_CTRL_ROP__NOTMERGEAS 0x9
++#define BV_PXP_AS_CTRL_ROP__XORAS 0xA
++#define BV_PXP_AS_CTRL_ROP__NOTXORAS 0xB
++#define BP_PXP_AS_CTRL_ALPHA 8
++#define BM_PXP_AS_CTRL_ALPHA 0x0000FF00
++#define BF_PXP_AS_CTRL_ALPHA(v) \
++ (((v) << 8) & BM_PXP_AS_CTRL_ALPHA)
++#define BP_PXP_AS_CTRL_FORMAT 4
++#define BM_PXP_AS_CTRL_FORMAT 0x000000F0
++#define BF_PXP_AS_CTRL_FORMAT(v) \
++ (((v) << 4) & BM_PXP_AS_CTRL_FORMAT)
++#define BV_PXP_AS_CTRL_FORMAT__ARGB8888 0x0
++#define BV_PXP_AS_CTRL_FORMAT__RGB888 0x4
++#define BV_PXP_AS_CTRL_FORMAT__ARGB1555 0x8
++#define BV_PXP_AS_CTRL_FORMAT__ARGB4444 0x9
++#define BV_PXP_AS_CTRL_FORMAT__RGB555 0xC
++#define BV_PXP_AS_CTRL_FORMAT__RGB444 0xD
++#define BV_PXP_AS_CTRL_FORMAT__RGB565 0xE
++#define BM_PXP_AS_CTRL_ENABLE_COLORKEY 0x00000008
++#define BP_PXP_AS_CTRL_ALPHA_CTRL 1
++#define BM_PXP_AS_CTRL_ALPHA_CTRL 0x00000006
++#define BF_PXP_AS_CTRL_ALPHA_CTRL(v) \
++ (((v) << 1) & BM_PXP_AS_CTRL_ALPHA_CTRL)
++#define BV_PXP_AS_CTRL_ALPHA_CTRL__Embedded 0x0
++#define BV_PXP_AS_CTRL_ALPHA_CTRL__Override 0x1
++#define BV_PXP_AS_CTRL_ALPHA_CTRL__Multiply 0x2
++#define BV_PXP_AS_CTRL_ALPHA_CTRL__ROPs 0x3
++#define BM_PXP_AS_CTRL_RSVD0 0x00000001
++
++#define HW_PXP_AS_BUF (0x00000160)
++
++#define BP_PXP_AS_BUF_ADDR 0
++#define BM_PXP_AS_BUF_ADDR 0xFFFFFFFF
++#define BF_PXP_AS_BUF_ADDR(v) (v)
++
++#define HW_PXP_AS_PITCH (0x00000170)
++
++#define BP_PXP_AS_PITCH_RSVD 16
++#define BM_PXP_AS_PITCH_RSVD 0xFFFF0000
++#define BF_PXP_AS_PITCH_RSVD(v) \
++ (((v) << 16) & BM_PXP_AS_PITCH_RSVD)
++#define BP_PXP_AS_PITCH_PITCH 0
++#define BM_PXP_AS_PITCH_PITCH 0x0000FFFF
++#define BF_PXP_AS_PITCH_PITCH(v) \
++ (((v) << 0) & BM_PXP_AS_PITCH_PITCH)
++
++#define HW_PXP_AS_CLRKEYLOW (0x00000180)
++
++#define BP_PXP_AS_CLRKEYLOW_RSVD1 24
++#define BM_PXP_AS_CLRKEYLOW_RSVD1 0xFF000000
++#define BF_PXP_AS_CLRKEYLOW_RSVD1(v) \
++ (((v) << 24) & BM_PXP_AS_CLRKEYLOW_RSVD1)
++#define BP_PXP_AS_CLRKEYLOW_PIXEL 0
++#define BM_PXP_AS_CLRKEYLOW_PIXEL 0x00FFFFFF
++#define BF_PXP_AS_CLRKEYLOW_PIXEL(v) \
++ (((v) << 0) & BM_PXP_AS_CLRKEYLOW_PIXEL)
++
++#define HW_PXP_AS_CLRKEYHIGH (0x00000190)
++
++#define BP_PXP_AS_CLRKEYHIGH_RSVD1 24
++#define BM_PXP_AS_CLRKEYHIGH_RSVD1 0xFF000000
++#define BF_PXP_AS_CLRKEYHIGH_RSVD1(v) \
++ (((v) << 24) & BM_PXP_AS_CLRKEYHIGH_RSVD1)
++#define BP_PXP_AS_CLRKEYHIGH_PIXEL 0
++#define BM_PXP_AS_CLRKEYHIGH_PIXEL 0x00FFFFFF
++#define BF_PXP_AS_CLRKEYHIGH_PIXEL(v) \
++ (((v) << 0) & BM_PXP_AS_CLRKEYHIGH_PIXEL)
++
++#define HW_PXP_CSC1_COEF0 (0x000001a0)
++
++#define BM_PXP_CSC1_COEF0_YCBCR_MODE 0x80000000
++#define BM_PXP_CSC1_COEF0_BYPASS 0x40000000
++#define BM_PXP_CSC1_COEF0_RSVD1 0x20000000
++#define BP_PXP_CSC1_COEF0_C0 18
++#define BM_PXP_CSC1_COEF0_C0 0x1FFC0000
++#define BF_PXP_CSC1_COEF0_C0(v) \
++ (((v) << 18) & BM_PXP_CSC1_COEF0_C0)
++#define BP_PXP_CSC1_COEF0_UV_OFFSET 9
++#define BM_PXP_CSC1_COEF0_UV_OFFSET 0x0003FE00
++#define BF_PXP_CSC1_COEF0_UV_OFFSET(v) \
++ (((v) << 9) & BM_PXP_CSC1_COEF0_UV_OFFSET)
++#define BP_PXP_CSC1_COEF0_Y_OFFSET 0
++#define BM_PXP_CSC1_COEF0_Y_OFFSET 0x000001FF
++#define BF_PXP_CSC1_COEF0_Y_OFFSET(v) \
++ (((v) << 0) & BM_PXP_CSC1_COEF0_Y_OFFSET)
++
++#define HW_PXP_CSC1_COEF1 (0x000001b0)
++
++#define BP_PXP_CSC1_COEF1_RSVD1 27
++#define BM_PXP_CSC1_COEF1_RSVD1 0xF8000000
++#define BF_PXP_CSC1_COEF1_RSVD1(v) \
++ (((v) << 27) & BM_PXP_CSC1_COEF1_RSVD1)
++#define BP_PXP_CSC1_COEF1_C1 16
++#define BM_PXP_CSC1_COEF1_C1 0x07FF0000
++#define BF_PXP_CSC1_COEF1_C1(v) \
++ (((v) << 16) & BM_PXP_CSC1_COEF1_C1)
++#define BP_PXP_CSC1_COEF1_RSVD0 11
++#define BM_PXP_CSC1_COEF1_RSVD0 0x0000F800
++#define BF_PXP_CSC1_COEF1_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC1_COEF1_RSVD0)
++#define BP_PXP_CSC1_COEF1_C4 0
++#define BM_PXP_CSC1_COEF1_C4 0x000007FF
++#define BF_PXP_CSC1_COEF1_C4(v) \
++ (((v) << 0) & BM_PXP_CSC1_COEF1_C4)
++
++#define HW_PXP_CSC1_COEF2 (0x000001c0)
++
++#define BP_PXP_CSC1_COEF2_RSVD1 27
++#define BM_PXP_CSC1_COEF2_RSVD1 0xF8000000
++#define BF_PXP_CSC1_COEF2_RSVD1(v) \
++ (((v) << 27) & BM_PXP_CSC1_COEF2_RSVD1)
++#define BP_PXP_CSC1_COEF2_C2 16
++#define BM_PXP_CSC1_COEF2_C2 0x07FF0000
++#define BF_PXP_CSC1_COEF2_C2(v) \
++ (((v) << 16) & BM_PXP_CSC1_COEF2_C2)
++#define BP_PXP_CSC1_COEF2_RSVD0 11
++#define BM_PXP_CSC1_COEF2_RSVD0 0x0000F800
++#define BF_PXP_CSC1_COEF2_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC1_COEF2_RSVD0)
++#define BP_PXP_CSC1_COEF2_C3 0
++#define BM_PXP_CSC1_COEF2_C3 0x000007FF
++#define BF_PXP_CSC1_COEF2_C3(v) \
++ (((v) << 0) & BM_PXP_CSC1_COEF2_C3)
++
++#define HW_PXP_CSC2_CTRL (0x000001d0)
++
++#define BP_PXP_CSC2_CTRL_RSVD 3
++#define BM_PXP_CSC2_CTRL_RSVD 0xFFFFFFF8
++#define BF_PXP_CSC2_CTRL_RSVD(v) \
++ (((v) << 3) & BM_PXP_CSC2_CTRL_RSVD)
++#define BP_PXP_CSC2_CTRL_CSC_MODE 1
++#define BM_PXP_CSC2_CTRL_CSC_MODE 0x00000006
++#define BF_PXP_CSC2_CTRL_CSC_MODE(v) \
++ (((v) << 1) & BM_PXP_CSC2_CTRL_CSC_MODE)
++#define BV_PXP_CSC2_CTRL_CSC_MODE__YUV2RGB 0x0
++#define BV_PXP_CSC2_CTRL_CSC_MODE__YCbCr2RGB 0x1
++#define BV_PXP_CSC2_CTRL_CSC_MODE__RGB2YUV 0x2
++#define BV_PXP_CSC2_CTRL_CSC_MODE__RGB2YCbCr 0x3
++#define BM_PXP_CSC2_CTRL_BYPASS 0x00000001
++
++#define HW_PXP_CSC2_COEF0 (0x000001e0)
++
++#define BP_PXP_CSC2_COEF0_RSVD1 27
++#define BM_PXP_CSC2_COEF0_RSVD1 0xF8000000
++#define BF_PXP_CSC2_COEF0_RSVD1(v) \
++ (((v) << 27) & BM_PXP_CSC2_COEF0_RSVD1)
++#define BP_PXP_CSC2_COEF0_A2 16
++#define BM_PXP_CSC2_COEF0_A2 0x07FF0000
++#define BF_PXP_CSC2_COEF0_A2(v) \
++ (((v) << 16) & BM_PXP_CSC2_COEF0_A2)
++#define BP_PXP_CSC2_COEF0_RSVD0 11
++#define BM_PXP_CSC2_COEF0_RSVD0 0x0000F800
++#define BF_PXP_CSC2_COEF0_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC2_COEF0_RSVD0)
++#define BP_PXP_CSC2_COEF0_A1 0
++#define BM_PXP_CSC2_COEF0_A1 0x000007FF
++#define BF_PXP_CSC2_COEF0_A1(v) \
++ (((v) << 0) & BM_PXP_CSC2_COEF0_A1)
++
++#define HW_PXP_CSC2_COEF1 (0x000001f0)
++
++#define BP_PXP_CSC2_COEF1_RSVD1 27
++#define BM_PXP_CSC2_COEF1_RSVD1 0xF8000000
++#define BF_PXP_CSC2_COEF1_RSVD1(v) \
++ (((v) << 27) & BM_PXP_CSC2_COEF1_RSVD1)
++#define BP_PXP_CSC2_COEF1_B1 16
++#define BM_PXP_CSC2_COEF1_B1 0x07FF0000
++#define BF_PXP_CSC2_COEF1_B1(v) \
++ (((v) << 16) & BM_PXP_CSC2_COEF1_B1)
++#define BP_PXP_CSC2_COEF1_RSVD0 11
++#define BM_PXP_CSC2_COEF1_RSVD0 0x0000F800
++#define BF_PXP_CSC2_COEF1_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC2_COEF1_RSVD0)
++#define BP_PXP_CSC2_COEF1_A3 0
++#define BM_PXP_CSC2_COEF1_A3 0x000007FF
++#define BF_PXP_CSC2_COEF1_A3(v) \
++ (((v) << 0) & BM_PXP_CSC2_COEF1_A3)
++
++#define HW_PXP_CSC2_COEF2 (0x00000200)
++
++#define BP_PXP_CSC2_COEF2_RSVD1 27
++#define BM_PXP_CSC2_COEF2_RSVD1 0xF8000000
++#define BF_PXP_CSC2_COEF2_RSVD1(v) \
++ (((v) << 27) & BM_PXP_CSC2_COEF2_RSVD1)
++#define BP_PXP_CSC2_COEF2_B3 16
++#define BM_PXP_CSC2_COEF2_B3 0x07FF0000
++#define BF_PXP_CSC2_COEF2_B3(v) \
++ (((v) << 16) & BM_PXP_CSC2_COEF2_B3)
++#define BP_PXP_CSC2_COEF2_RSVD0 11
++#define BM_PXP_CSC2_COEF2_RSVD0 0x0000F800
++#define BF_PXP_CSC2_COEF2_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC2_COEF2_RSVD0)
++#define BP_PXP_CSC2_COEF2_B2 0
++#define BM_PXP_CSC2_COEF2_B2 0x000007FF
++#define BF_PXP_CSC2_COEF2_B2(v) \
++ (((v) << 0) & BM_PXP_CSC2_COEF2_B2)
++
++#define HW_PXP_CSC2_COEF3 (0x00000210)
++
++#define BP_PXP_CSC2_COEF3_RSVD1 27
++#define BM_PXP_CSC2_COEF3_RSVD1 0xF8000000
++#define BF_PXP_CSC2_COEF3_RSVD1(v) \
++ (((v) << 27) & BM_PXP_CSC2_COEF3_RSVD1)
++#define BP_PXP_CSC2_COEF3_C2 16
++#define BM_PXP_CSC2_COEF3_C2 0x07FF0000
++#define BF_PXP_CSC2_COEF3_C2(v) \
++ (((v) << 16) & BM_PXP_CSC2_COEF3_C2)
++#define BP_PXP_CSC2_COEF3_RSVD0 11
++#define BM_PXP_CSC2_COEF3_RSVD0 0x0000F800
++#define BF_PXP_CSC2_COEF3_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC2_COEF3_RSVD0)
++#define BP_PXP_CSC2_COEF3_C1 0
++#define BM_PXP_CSC2_COEF3_C1 0x000007FF
++#define BF_PXP_CSC2_COEF3_C1(v) \
++ (((v) << 0) & BM_PXP_CSC2_COEF3_C1)
++
++#define HW_PXP_CSC2_COEF4 (0x00000220)
++
++#define BP_PXP_CSC2_COEF4_RSVD1 25
++#define BM_PXP_CSC2_COEF4_RSVD1 0xFE000000
++#define BF_PXP_CSC2_COEF4_RSVD1(v) \
++ (((v) << 25) & BM_PXP_CSC2_COEF4_RSVD1)
++#define BP_PXP_CSC2_COEF4_D1 16
++#define BM_PXP_CSC2_COEF4_D1 0x01FF0000
++#define BF_PXP_CSC2_COEF4_D1(v) \
++ (((v) << 16) & BM_PXP_CSC2_COEF4_D1)
++#define BP_PXP_CSC2_COEF4_RSVD0 11
++#define BM_PXP_CSC2_COEF4_RSVD0 0x0000F800
++#define BF_PXP_CSC2_COEF4_RSVD0(v) \
++ (((v) << 11) & BM_PXP_CSC2_COEF4_RSVD0)
++#define BP_PXP_CSC2_COEF4_C3 0
++#define BM_PXP_CSC2_COEF4_C3 0x000007FF
++#define BF_PXP_CSC2_COEF4_C3(v) \
++ (((v) << 0) & BM_PXP_CSC2_COEF4_C3)
++
++#define HW_PXP_CSC2_COEF5 (0x00000230)
++
++#define BP_PXP_CSC2_COEF5_RSVD1 25
++#define BM_PXP_CSC2_COEF5_RSVD1 0xFE000000
++#define BF_PXP_CSC2_COEF5_RSVD1(v) \
++ (((v) << 25) & BM_PXP_CSC2_COEF5_RSVD1)
++#define BP_PXP_CSC2_COEF5_D3 16
++#define BM_PXP_CSC2_COEF5_D3 0x01FF0000
++#define BF_PXP_CSC2_COEF5_D3(v) \
++ (((v) << 16) & BM_PXP_CSC2_COEF5_D3)
++#define BP_PXP_CSC2_COEF5_RSVD0 9
++#define BM_PXP_CSC2_COEF5_RSVD0 0x0000FE00
++#define BF_PXP_CSC2_COEF5_RSVD0(v) \
++ (((v) << 9) & BM_PXP_CSC2_COEF5_RSVD0)
++#define BP_PXP_CSC2_COEF5_D2 0
++#define BM_PXP_CSC2_COEF5_D2 0x000001FF
++#define BF_PXP_CSC2_COEF5_D2(v) \
++ (((v) << 0) & BM_PXP_CSC2_COEF5_D2)
++
++#define HW_PXP_LUT_CTRL (0x00000240)
++
++#define BM_PXP_LUT_CTRL_BYPASS 0x80000000
++#define BP_PXP_LUT_CTRL_RSVD3 26
++#define BM_PXP_LUT_CTRL_RSVD3 0x7C000000
++#define BF_PXP_LUT_CTRL_RSVD3(v) \
++ (((v) << 26) & BM_PXP_LUT_CTRL_RSVD3)
++#define BP_PXP_LUT_CTRL_LOOKUP_MODE 24
++#define BM_PXP_LUT_CTRL_LOOKUP_MODE 0x03000000
++#define BF_PXP_LUT_CTRL_LOOKUP_MODE(v) \
++ (((v) << 24) & BM_PXP_LUT_CTRL_LOOKUP_MODE)
++#define BV_PXP_LUT_CTRL_LOOKUP_MODE__CACHE_RGB565 0x0
++#define BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_Y8 0x1
++#define BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_RGB444 0x2
++#define BV_PXP_LUT_CTRL_LOOKUP_MODE__DIRECT_RGB454 0x3
++#define BP_PXP_LUT_CTRL_RSVD2 18
++#define BM_PXP_LUT_CTRL_RSVD2 0x00FC0000
++#define BF_PXP_LUT_CTRL_RSVD2(v) \
++ (((v) << 18) & BM_PXP_LUT_CTRL_RSVD2)
++#define BP_PXP_LUT_CTRL_OUT_MODE 16
++#define BM_PXP_LUT_CTRL_OUT_MODE 0x00030000
++#define BF_PXP_LUT_CTRL_OUT_MODE(v) \
++ (((v) << 16) & BM_PXP_LUT_CTRL_OUT_MODE)
++#define BV_PXP_LUT_CTRL_OUT_MODE__RESERVED 0x0
++#define BV_PXP_LUT_CTRL_OUT_MODE__Y8 0x1
++#define BV_PXP_LUT_CTRL_OUT_MODE__RGBW4444CFA 0x2
++#define BV_PXP_LUT_CTRL_OUT_MODE__RGB888 0x3
++#define BP_PXP_LUT_CTRL_RSVD1 11
++#define BM_PXP_LUT_CTRL_RSVD1 0x0000F800
++#define BF_PXP_LUT_CTRL_RSVD1(v) \
++ (((v) << 11) & BM_PXP_LUT_CTRL_RSVD1)
++#define BM_PXP_LUT_CTRL_SEL_8KB 0x00000400
++#define BM_PXP_LUT_CTRL_LRU_UPD 0x00000200
++#define BM_PXP_LUT_CTRL_INVALID 0x00000100
++#define BP_PXP_LUT_CTRL_RSVD0 1
++#define BM_PXP_LUT_CTRL_RSVD0 0x000000FE
++#define BF_PXP_LUT_CTRL_RSVD0(v) \
++ (((v) << 1) & BM_PXP_LUT_CTRL_RSVD0)
++#define BM_PXP_LUT_CTRL_DMA_START 0x00000001
++
++#define HW_PXP_LUT_ADDR (0x00000250)
++
++#define BM_PXP_LUT_ADDR_RSVD2 0x80000000
++#define BP_PXP_LUT_ADDR_NUM_BYTES 16
++#define BM_PXP_LUT_ADDR_NUM_BYTES 0x7FFF0000
++#define BF_PXP_LUT_ADDR_NUM_BYTES(v) \
++ (((v) << 16) & BM_PXP_LUT_ADDR_NUM_BYTES)
++#define BP_PXP_LUT_ADDR_RSVD1 14
++#define BM_PXP_LUT_ADDR_RSVD1 0x0000C000
++#define BF_PXP_LUT_ADDR_RSVD1(v) \
++ (((v) << 14) & BM_PXP_LUT_ADDR_RSVD1)
++#define BP_PXP_LUT_ADDR_ADDR 0
++#define BM_PXP_LUT_ADDR_ADDR 0x00003FFF
++#define BF_PXP_LUT_ADDR_ADDR(v) \
++ (((v) << 0) & BM_PXP_LUT_ADDR_ADDR)
++
++#define HW_PXP_LUT_DATA (0x00000260)
++
++#define BP_PXP_LUT_DATA_DATA 0
++#define BM_PXP_LUT_DATA_DATA 0xFFFFFFFF
++#define BF_PXP_LUT_DATA_DATA(v) (v)
++
++#define HW_PXP_LUT_EXTMEM (0x00000270)
++
++#define BP_PXP_LUT_EXTMEM_ADDR 0
++#define BM_PXP_LUT_EXTMEM_ADDR 0xFFFFFFFF
++#define BF_PXP_LUT_EXTMEM_ADDR(v) (v)
++
++#define HW_PXP_CFA (0x00000280)
++
++#define BP_PXP_CFA_DATA 0
++#define BM_PXP_CFA_DATA 0xFFFFFFFF
++#define BF_PXP_CFA_DATA(v) (v)
++
++#define HW_PXP_HIST_CTRL (0x00000290)
++
++#define BP_PXP_HIST_CTRL_RSVD 6
++#define BM_PXP_HIST_CTRL_RSVD 0xFFFFFFC0
++#define BF_PXP_HIST_CTRL_RSVD(v) \
++ (((v) << 6) & BM_PXP_HIST_CTRL_RSVD)
++#define BP_PXP_HIST_CTRL_PANEL_MODE 4
++#define BM_PXP_HIST_CTRL_PANEL_MODE 0x00000030
++#define BF_PXP_HIST_CTRL_PANEL_MODE(v) \
++ (((v) << 4) & BM_PXP_HIST_CTRL_PANEL_MODE)
++#define BV_PXP_HIST_CTRL_PANEL_MODE__GRAY4 0x0
++#define BV_PXP_HIST_CTRL_PANEL_MODE__GRAY8 0x1
++#define BV_PXP_HIST_CTRL_PANEL_MODE__GRAY16 0x2
++#define BV_PXP_HIST_CTRL_PANEL_MODE__GRAY32 0x3
++#define BP_PXP_HIST_CTRL_STATUS 0
++#define BM_PXP_HIST_CTRL_STATUS 0x0000000F
++#define BF_PXP_HIST_CTRL_STATUS(v) \
++ (((v) << 0) & BM_PXP_HIST_CTRL_STATUS)
++
++#define HW_PXP_HIST2_PARAM (0x000002a0)
++
++#define BP_PXP_HIST2_PARAM_RSVD 16
++#define BM_PXP_HIST2_PARAM_RSVD 0xFFFF0000
++#define BF_PXP_HIST2_PARAM_RSVD(v) \
++ (((v) << 16) & BM_PXP_HIST2_PARAM_RSVD)
++#define BP_PXP_HIST2_PARAM_RSVD1 13
++#define BM_PXP_HIST2_PARAM_RSVD1 0x0000E000
++#define BF_PXP_HIST2_PARAM_RSVD1(v) \
++ (((v) << 13) & BM_PXP_HIST2_PARAM_RSVD1)
++#define BP_PXP_HIST2_PARAM_VALUE1 8
++#define BM_PXP_HIST2_PARAM_VALUE1 0x00001F00
++#define BF_PXP_HIST2_PARAM_VALUE1(v) \
++ (((v) << 8) & BM_PXP_HIST2_PARAM_VALUE1)
++#define BP_PXP_HIST2_PARAM_RSVD0 5
++#define BM_PXP_HIST2_PARAM_RSVD0 0x000000E0
++#define BF_PXP_HIST2_PARAM_RSVD0(v) \
++ (((v) << 5) & BM_PXP_HIST2_PARAM_RSVD0)
++#define BP_PXP_HIST2_PARAM_VALUE0 0
++#define BM_PXP_HIST2_PARAM_VALUE0 0x0000001F
++#define BF_PXP_HIST2_PARAM_VALUE0(v) \
++ (((v) << 0) & BM_PXP_HIST2_PARAM_VALUE0)
++
++#define HW_PXP_HIST4_PARAM (0x000002b0)
++
++#define BP_PXP_HIST4_PARAM_RSVD3 29
++#define BM_PXP_HIST4_PARAM_RSVD3 0xE0000000
++#define BF_PXP_HIST4_PARAM_RSVD3(v) \
++ (((v) << 29) & BM_PXP_HIST4_PARAM_RSVD3)
++#define BP_PXP_HIST4_PARAM_VALUE3 24
++#define BM_PXP_HIST4_PARAM_VALUE3 0x1F000000
++#define BF_PXP_HIST4_PARAM_VALUE3(v) \
++ (((v) << 24) & BM_PXP_HIST4_PARAM_VALUE3)
++#define BP_PXP_HIST4_PARAM_RSVD2 21
++#define BM_PXP_HIST4_PARAM_RSVD2 0x00E00000
++#define BF_PXP_HIST4_PARAM_RSVD2(v) \
++ (((v) << 21) & BM_PXP_HIST4_PARAM_RSVD2)
++#define BP_PXP_HIST4_PARAM_VALUE2 16
++#define BM_PXP_HIST4_PARAM_VALUE2 0x001F0000
++#define BF_PXP_HIST4_PARAM_VALUE2(v) \
++ (((v) << 16) & BM_PXP_HIST4_PARAM_VALUE2)
++#define BP_PXP_HIST4_PARAM_RSVD1 13
++#define BM_PXP_HIST4_PARAM_RSVD1 0x0000E000
++#define BF_PXP_HIST4_PARAM_RSVD1(v) \
++ (((v) << 13) & BM_PXP_HIST4_PARAM_RSVD1)
++#define BP_PXP_HIST4_PARAM_VALUE1 8
++#define BM_PXP_HIST4_PARAM_VALUE1 0x00001F00
++#define BF_PXP_HIST4_PARAM_VALUE1(v) \
++ (((v) << 8) & BM_PXP_HIST4_PARAM_VALUE1)
++#define BP_PXP_HIST4_PARAM_RSVD0 5
++#define BM_PXP_HIST4_PARAM_RSVD0 0x000000E0
++#define BF_PXP_HIST4_PARAM_RSVD0(v) \
++ (((v) << 5) & BM_PXP_HIST4_PARAM_RSVD0)
++#define BP_PXP_HIST4_PARAM_VALUE0 0
++#define BM_PXP_HIST4_PARAM_VALUE0 0x0000001F
++#define BF_PXP_HIST4_PARAM_VALUE0(v) \
++ (((v) << 0) & BM_PXP_HIST4_PARAM_VALUE0)
++
++#define HW_PXP_HIST8_PARAM0 (0x000002c0)
++
++#define BP_PXP_HIST8_PARAM0_RSVD3 29
++#define BM_PXP_HIST8_PARAM0_RSVD3 0xE0000000
++#define BF_PXP_HIST8_PARAM0_RSVD3(v) \
++ (((v) << 29) & BM_PXP_HIST8_PARAM0_RSVD3)
++#define BP_PXP_HIST8_PARAM0_VALUE3 24
++#define BM_PXP_HIST8_PARAM0_VALUE3 0x1F000000
++#define BF_PXP_HIST8_PARAM0_VALUE3(v) \
++ (((v) << 24) & BM_PXP_HIST8_PARAM0_VALUE3)
++#define BP_PXP_HIST8_PARAM0_RSVD2 21
++#define BM_PXP_HIST8_PARAM0_RSVD2 0x00E00000
++#define BF_PXP_HIST8_PARAM0_RSVD2(v) \
++ (((v) << 21) & BM_PXP_HIST8_PARAM0_RSVD2)
++#define BP_PXP_HIST8_PARAM0_VALUE2 16
++#define BM_PXP_HIST8_PARAM0_VALUE2 0x001F0000
++#define BF_PXP_HIST8_PARAM0_VALUE2(v) \
++ (((v) << 16) & BM_PXP_HIST8_PARAM0_VALUE2)
++#define BP_PXP_HIST8_PARAM0_RSVD1 13
++#define BM_PXP_HIST8_PARAM0_RSVD1 0x0000E000
++#define BF_PXP_HIST8_PARAM0_RSVD1(v) \
++ (((v) << 13) & BM_PXP_HIST8_PARAM0_RSVD1)
++#define BP_PXP_HIST8_PARAM0_VALUE1 8
++#define BM_PXP_HIST8_PARAM0_VALUE1 0x00001F00
++#define BF_PXP_HIST8_PARAM0_VALUE1(v) \
++ (((v) << 8) & BM_PXP_HIST8_PARAM0_VALUE1)
++#define BP_PXP_HIST8_PARAM0_RSVD0 5
++#define BM_PXP_HIST8_PARAM0_RSVD0 0x000000E0
++#define BF_PXP_HIST8_PARAM0_RSVD0(v) \
++ (((v) << 5) & BM_PXP_HIST8_PARAM0_RSVD0)
++#define BP_PXP_HIST8_PARAM0_VALUE0 0
++#define BM_PXP_HIST8_PARAM0_VALUE0 0x0000001F
++#define BF_PXP_HIST8_PARAM0_VALUE0(v) \
++ (((v) << 0) & BM_PXP_HIST8_PARAM0_VALUE0)
++
++#define HW_PXP_HIST8_PARAM1 (0x000002d0)
++
++#define BP_PXP_HIST8_PARAM1_RSVD7 29
++#define BM_PXP_HIST8_PARAM1_RSVD7 0xE0000000
++#define BF_PXP_HIST8_PARAM1_RSVD7(v) \
++ (((v) << 29) & BM_PXP_HIST8_PARAM1_RSVD7)
++#define BP_PXP_HIST8_PARAM1_VALUE7 24
++#define BM_PXP_HIST8_PARAM1_VALUE7 0x1F000000
++#define BF_PXP_HIST8_PARAM1_VALUE7(v) \
++ (((v) << 24) & BM_PXP_HIST8_PARAM1_VALUE7)
++#define BP_PXP_HIST8_PARAM1_RSVD6 21
++#define BM_PXP_HIST8_PARAM1_RSVD6 0x00E00000
++#define BF_PXP_HIST8_PARAM1_RSVD6(v) \
++ (((v) << 21) & BM_PXP_HIST8_PARAM1_RSVD6)
++#define BP_PXP_HIST8_PARAM1_VALUE6 16
++#define BM_PXP_HIST8_PARAM1_VALUE6 0x001F0000
++#define BF_PXP_HIST8_PARAM1_VALUE6(v) \
++ (((v) << 16) & BM_PXP_HIST8_PARAM1_VALUE6)
++#define BP_PXP_HIST8_PARAM1_RSVD5 13
++#define BM_PXP_HIST8_PARAM1_RSVD5 0x0000E000
++#define BF_PXP_HIST8_PARAM1_RSVD5(v) \
++ (((v) << 13) & BM_PXP_HIST8_PARAM1_RSVD5)
++#define BP_PXP_HIST8_PARAM1_VALUE5 8
++#define BM_PXP_HIST8_PARAM1_VALUE5 0x00001F00
++#define BF_PXP_HIST8_PARAM1_VALUE5(v) \
++ (((v) << 8) & BM_PXP_HIST8_PARAM1_VALUE5)
++#define BP_PXP_HIST8_PARAM1_RSVD4 5
++#define BM_PXP_HIST8_PARAM1_RSVD4 0x000000E0
++#define BF_PXP_HIST8_PARAM1_RSVD4(v) \
++ (((v) << 5) & BM_PXP_HIST8_PARAM1_RSVD4)
++#define BP_PXP_HIST8_PARAM1_VALUE4 0
++#define BM_PXP_HIST8_PARAM1_VALUE4 0x0000001F
++#define BF_PXP_HIST8_PARAM1_VALUE4(v) \
++ (((v) << 0) & BM_PXP_HIST8_PARAM1_VALUE4)
++
++#define HW_PXP_HIST16_PARAM0 (0x000002e0)
++
++#define BP_PXP_HIST16_PARAM0_RSVD3 29
++#define BM_PXP_HIST16_PARAM0_RSVD3 0xE0000000
++#define BF_PXP_HIST16_PARAM0_RSVD3(v) \
++ (((v) << 29) & BM_PXP_HIST16_PARAM0_RSVD3)
++#define BP_PXP_HIST16_PARAM0_VALUE3 24
++#define BM_PXP_HIST16_PARAM0_VALUE3 0x1F000000
++#define BF_PXP_HIST16_PARAM0_VALUE3(v) \
++ (((v) << 24) & BM_PXP_HIST16_PARAM0_VALUE3)
++#define BP_PXP_HIST16_PARAM0_RSVD2 21
++#define BM_PXP_HIST16_PARAM0_RSVD2 0x00E00000
++#define BF_PXP_HIST16_PARAM0_RSVD2(v) \
++ (((v) << 21) & BM_PXP_HIST16_PARAM0_RSVD2)
++#define BP_PXP_HIST16_PARAM0_VALUE2 16
++#define BM_PXP_HIST16_PARAM0_VALUE2 0x001F0000
++#define BF_PXP_HIST16_PARAM0_VALUE2(v) \
++ (((v) << 16) & BM_PXP_HIST16_PARAM0_VALUE2)
++#define BP_PXP_HIST16_PARAM0_RSVD1 13
++#define BM_PXP_HIST16_PARAM0_RSVD1 0x0000E000
++#define BF_PXP_HIST16_PARAM0_RSVD1(v) \
++ (((v) << 13) & BM_PXP_HIST16_PARAM0_RSVD1)
++#define BP_PXP_HIST16_PARAM0_VALUE1 8
++#define BM_PXP_HIST16_PARAM0_VALUE1 0x00001F00
++#define BF_PXP_HIST16_PARAM0_VALUE1(v) \
++ (((v) << 8) & BM_PXP_HIST16_PARAM0_VALUE1)
++#define BP_PXP_HIST16_PARAM0_RSVD0 5
++#define BM_PXP_HIST16_PARAM0_RSVD0 0x000000E0
++#define BF_PXP_HIST16_PARAM0_RSVD0(v) \
++ (((v) << 5) & BM_PXP_HIST16_PARAM0_RSVD0)
++#define BP_PXP_HIST16_PARAM0_VALUE0 0
++#define BM_PXP_HIST16_PARAM0_VALUE0 0x0000001F
++#define BF_PXP_HIST16_PARAM0_VALUE0(v) \
++ (((v) << 0) & BM_PXP_HIST16_PARAM0_VALUE0)
++
++#define HW_PXP_HIST16_PARAM1 (0x000002f0)
++
++#define BP_PXP_HIST16_PARAM1_RSVD7 29
++#define BM_PXP_HIST16_PARAM1_RSVD7 0xE0000000
++#define BF_PXP_HIST16_PARAM1_RSVD7(v) \
++ (((v) << 29) & BM_PXP_HIST16_PARAM1_RSVD7)
++#define BP_PXP_HIST16_PARAM1_VALUE7 24
++#define BM_PXP_HIST16_PARAM1_VALUE7 0x1F000000
++#define BF_PXP_HIST16_PARAM1_VALUE7(v) \
++ (((v) << 24) & BM_PXP_HIST16_PARAM1_VALUE7)
++#define BP_PXP_HIST16_PARAM1_RSVD6 21
++#define BM_PXP_HIST16_PARAM1_RSVD6 0x00E00000
++#define BF_PXP_HIST16_PARAM1_RSVD6(v) \
++ (((v) << 21) & BM_PXP_HIST16_PARAM1_RSVD6)
++#define BP_PXP_HIST16_PARAM1_VALUE6 16
++#define BM_PXP_HIST16_PARAM1_VALUE6 0x001F0000
++#define BF_PXP_HIST16_PARAM1_VALUE6(v) \
++ (((v) << 16) & BM_PXP_HIST16_PARAM1_VALUE6)
++#define BP_PXP_HIST16_PARAM1_RSVD5 13
++#define BM_PXP_HIST16_PARAM1_RSVD5 0x0000E000
++#define BF_PXP_HIST16_PARAM1_RSVD5(v) \
++ (((v) << 13) & BM_PXP_HIST16_PARAM1_RSVD5)
++#define BP_PXP_HIST16_PARAM1_VALUE5 8
++#define BM_PXP_HIST16_PARAM1_VALUE5 0x00001F00
++#define BF_PXP_HIST16_PARAM1_VALUE5(v) \
++ (((v) << 8) & BM_PXP_HIST16_PARAM1_VALUE5)
++#define BP_PXP_HIST16_PARAM1_RSVD4 5
++#define BM_PXP_HIST16_PARAM1_RSVD4 0x000000E0
++#define BF_PXP_HIST16_PARAM1_RSVD4(v) \
++ (((v) << 5) & BM_PXP_HIST16_PARAM1_RSVD4)
++#define BP_PXP_HIST16_PARAM1_VALUE4 0
++#define BM_PXP_HIST16_PARAM1_VALUE4 0x0000001F
++#define BF_PXP_HIST16_PARAM1_VALUE4(v) \
++ (((v) << 0) & BM_PXP_HIST16_PARAM1_VALUE4)
++
++#define HW_PXP_HIST16_PARAM2 (0x00000300)
++
++#define BP_PXP_HIST16_PARAM2_RSVD11 29
++#define BM_PXP_HIST16_PARAM2_RSVD11 0xE0000000
++#define BF_PXP_HIST16_PARAM2_RSVD11(v) \
++ (((v) << 29) & BM_PXP_HIST16_PARAM2_RSVD11)
++#define BP_PXP_HIST16_PARAM2_VALUE11 24
++#define BM_PXP_HIST16_PARAM2_VALUE11 0x1F000000
++#define BF_PXP_HIST16_PARAM2_VALUE11(v) \
++ (((v) << 24) & BM_PXP_HIST16_PARAM2_VALUE11)
++#define BP_PXP_HIST16_PARAM2_RSVD10 21
++#define BM_PXP_HIST16_PARAM2_RSVD10 0x00E00000
++#define BF_PXP_HIST16_PARAM2_RSVD10(v) \
++ (((v) << 21) & BM_PXP_HIST16_PARAM2_RSVD10)
++#define BP_PXP_HIST16_PARAM2_VALUE10 16
++#define BM_PXP_HIST16_PARAM2_VALUE10 0x001F0000
++#define BF_PXP_HIST16_PARAM2_VALUE10(v) \
++ (((v) << 16) & BM_PXP_HIST16_PARAM2_VALUE10)
++#define BP_PXP_HIST16_PARAM2_RSVD9 13
++#define BM_PXP_HIST16_PARAM2_RSVD9 0x0000E000
++#define BF_PXP_HIST16_PARAM2_RSVD9(v) \
++ (((v) << 13) & BM_PXP_HIST16_PARAM2_RSVD9)
++#define BP_PXP_HIST16_PARAM2_VALUE9 8
++#define BM_PXP_HIST16_PARAM2_VALUE9 0x00001F00
++#define BF_PXP_HIST16_PARAM2_VALUE9(v) \
++ (((v) << 8) & BM_PXP_HIST16_PARAM2_VALUE9)
++#define BP_PXP_HIST16_PARAM2_RSVD8 5
++#define BM_PXP_HIST16_PARAM2_RSVD8 0x000000E0
++#define BF_PXP_HIST16_PARAM2_RSVD8(v) \
++ (((v) << 5) & BM_PXP_HIST16_PARAM2_RSVD8)
++#define BP_PXP_HIST16_PARAM2_VALUE8 0
++#define BM_PXP_HIST16_PARAM2_VALUE8 0x0000001F
++#define BF_PXP_HIST16_PARAM2_VALUE8(v) \
++ (((v) << 0) & BM_PXP_HIST16_PARAM2_VALUE8)
++
++#define HW_PXP_HIST16_PARAM3 (0x00000310)
++
++#define BP_PXP_HIST16_PARAM3_RSVD15 29
++#define BM_PXP_HIST16_PARAM3_RSVD15 0xE0000000
++#define BF_PXP_HIST16_PARAM3_RSVD15(v) \
++ (((v) << 29) & BM_PXP_HIST16_PARAM3_RSVD15)
++#define BP_PXP_HIST16_PARAM3_VALUE15 24
++#define BM_PXP_HIST16_PARAM3_VALUE15 0x1F000000
++#define BF_PXP_HIST16_PARAM3_VALUE15(v) \
++ (((v) << 24) & BM_PXP_HIST16_PARAM3_VALUE15)
++#define BP_PXP_HIST16_PARAM3_RSVD14 21
++#define BM_PXP_HIST16_PARAM3_RSVD14 0x00E00000
++#define BF_PXP_HIST16_PARAM3_RSVD14(v) \
++ (((v) << 21) & BM_PXP_HIST16_PARAM3_RSVD14)
++#define BP_PXP_HIST16_PARAM3_VALUE14 16
++#define BM_PXP_HIST16_PARAM3_VALUE14 0x001F0000
++#define BF_PXP_HIST16_PARAM3_VALUE14(v) \
++ (((v) << 16) & BM_PXP_HIST16_PARAM3_VALUE14)
++#define BP_PXP_HIST16_PARAM3_RSVD13 13
++#define BM_PXP_HIST16_PARAM3_RSVD13 0x0000E000
++#define BF_PXP_HIST16_PARAM3_RSVD13(v) \
++ (((v) << 13) & BM_PXP_HIST16_PARAM3_RSVD13)
++#define BP_PXP_HIST16_PARAM3_VALUE13 8
++#define BM_PXP_HIST16_PARAM3_VALUE13 0x00001F00
++#define BF_PXP_HIST16_PARAM3_VALUE13(v) \
++ (((v) << 8) & BM_PXP_HIST16_PARAM3_VALUE13)
++#define BP_PXP_HIST16_PARAM3_RSVD12 5
++#define BM_PXP_HIST16_PARAM3_RSVD12 0x000000E0
++#define BF_PXP_HIST16_PARAM3_RSVD12(v) \
++ (((v) << 5) & BM_PXP_HIST16_PARAM3_RSVD12)
++#define BP_PXP_HIST16_PARAM3_VALUE12 0
++#define BM_PXP_HIST16_PARAM3_VALUE12 0x0000001F
++#define BF_PXP_HIST16_PARAM3_VALUE12(v) \
++ (((v) << 0) & BM_PXP_HIST16_PARAM3_VALUE12)
++
++#define HW_PXP_POWER (0x00000320)
++
++#define BP_PXP_POWER_CTRL 12
++#define BM_PXP_POWER_CTRL 0xFFFFF000
++#define BF_PXP_POWER_CTRL(v) \
++ (((v) << 12) & BM_PXP_POWER_CTRL)
++#define BP_PXP_POWER_ROT_MEM_LP_STATE 9
++#define BM_PXP_POWER_ROT_MEM_LP_STATE 0x00000E00
++#define BF_PXP_POWER_ROT_MEM_LP_STATE(v) \
++ (((v) << 9) & BM_PXP_POWER_ROT_MEM_LP_STATE)
++#define BV_PXP_POWER_ROT_MEM_LP_STATE__NONE 0x0
++#define BV_PXP_POWER_ROT_MEM_LP_STATE__LS 0x1
++#define BV_PXP_POWER_ROT_MEM_LP_STATE__DS 0x2
++#define BV_PXP_POWER_ROT_MEM_LP_STATE__SD 0x4
++#define BP_PXP_POWER_LUT_LP_STATE_WAY1_BANKN 6
++#define BM_PXP_POWER_LUT_LP_STATE_WAY1_BANKN 0x000001C0
++#define BF_PXP_POWER_LUT_LP_STATE_WAY1_BANKN(v) \
++ (((v) << 6) & BM_PXP_POWER_LUT_LP_STATE_WAY1_BANKN)
++#define BV_PXP_POWER_LUT_LP_STATE_WAY1_BANKN__NONE 0x0
++#define BV_PXP_POWER_LUT_LP_STATE_WAY1_BANKN__LS 0x1
++#define BV_PXP_POWER_LUT_LP_STATE_WAY1_BANKN__DS 0x2
++#define BV_PXP_POWER_LUT_LP_STATE_WAY1_BANKN__SD 0x4
++#define BP_PXP_POWER_LUT_LP_STATE_WAY0_BANKN 3
++#define BM_PXP_POWER_LUT_LP_STATE_WAY0_BANKN 0x00000038
++#define BF_PXP_POWER_LUT_LP_STATE_WAY0_BANKN(v) \
++ (((v) << 3) & BM_PXP_POWER_LUT_LP_STATE_WAY0_BANKN)
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANKN__NONE 0x0
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANKN__LS 0x1
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANKN__DS 0x2
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANKN__SD 0x4
++#define BP_PXP_POWER_LUT_LP_STATE_WAY0_BANK0 0
++#define BM_PXP_POWER_LUT_LP_STATE_WAY0_BANK0 0x00000007
++#define BF_PXP_POWER_LUT_LP_STATE_WAY0_BANK0(v) \
++ (((v) << 0) & BM_PXP_POWER_LUT_LP_STATE_WAY0_BANK0)
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANK0__NONE 0x0
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANK0__LS 0x1
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANK0__DS 0x2
++#define BV_PXP_POWER_LUT_LP_STATE_WAY0_BANK0__SD 0x4
++
++#define HW_PXP_NEXT (0x00000400)
++
++#define BP_PXP_NEXT_POINTER 2
++#define BM_PXP_NEXT_POINTER 0xFFFFFFFC
++#define BF_PXP_NEXT_POINTER(v) \
++ (((v) << 2) & BM_PXP_NEXT_POINTER)
++#define BM_PXP_NEXT_RSVD 0x00000002
++#define BM_PXP_NEXT_ENABLED 0x00000001
++
++#define HW_PXP_DEBUGCTRL (0x00000410)
++
++#define BP_PXP_DEBUGCTRL_RSVD 12
++#define BM_PXP_DEBUGCTRL_RSVD 0xFFFFF000
++#define BF_PXP_DEBUGCTRL_RSVD(v) \
++ (((v) << 12) & BM_PXP_DEBUGCTRL_RSVD)
++#define BP_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT 8
++#define BM_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT 0x00000F00
++#define BF_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT(v) \
++ (((v) << 8) & BM_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT)
++#define BV_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT__NONE 0x0
++#define BV_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT__MISS_CNT 0x1
++#define BV_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT__HIT_CNT 0x2
++#define BV_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT__LAT_CNT 0x4
++#define BV_PXP_DEBUGCTRL_LUT_CLR_STAT_CNT__MAX_LAT 0x8
++#define BP_PXP_DEBUGCTRL_SELECT 0
++#define BM_PXP_DEBUGCTRL_SELECT 0x000000FF
++#define BF_PXP_DEBUGCTRL_SELECT(v) \
++ (((v) << 0) & BM_PXP_DEBUGCTRL_SELECT)
++#define BV_PXP_DEBUGCTRL_SELECT__NONE 0x0
++#define BV_PXP_DEBUGCTRL_SELECT__CTRL 0x1
++#define BV_PXP_DEBUGCTRL_SELECT__PSBUF 0x2
++#define BV_PXP_DEBUGCTRL_SELECT__PSBAX 0x3
++#define BV_PXP_DEBUGCTRL_SELECT__PSBAY 0x4
++#define BV_PXP_DEBUGCTRL_SELECT__ASBUF 0x5
++#define BV_PXP_DEBUGCTRL_SELECT__ROTATION 0x6
++#define BV_PXP_DEBUGCTRL_SELECT__OUTBUF0 0x7
++#define BV_PXP_DEBUGCTRL_SELECT__OUTBUF1 0x8
++#define BV_PXP_DEBUGCTRL_SELECT__OUTBUF2 0x9
++#define BV_PXP_DEBUGCTRL_SELECT__LUT_STAT 0x10
++#define BV_PXP_DEBUGCTRL_SELECT__LUT_MISS 0x11
++#define BV_PXP_DEBUGCTRL_SELECT__LUT_HIT 0x12
++#define BV_PXP_DEBUGCTRL_SELECT__LUT_LAT 0x13
++#define BV_PXP_DEBUGCTRL_SELECT__LUT_MAX_LAT 0x14
++
++#define HW_PXP_DEBUG (0x00000420)
++
++#define BP_PXP_DEBUG_DATA 0
++#define BM_PXP_DEBUG_DATA 0xFFFFFFFF
++#define BF_PXP_DEBUG_DATA(v) (v)
++
++#define HW_PXP_VERSION (0x00000430)
++
++#define BP_PXP_VERSION_MAJOR 24
++#define BM_PXP_VERSION_MAJOR 0xFF000000
++#define BF_PXP_VERSION_MAJOR(v) \
++ (((v) << 24) & BM_PXP_VERSION_MAJOR)
++#define BP_PXP_VERSION_MINOR 16
++#define BM_PXP_VERSION_MINOR 0x00FF0000
++#define BF_PXP_VERSION_MINOR(v) \
++ (((v) << 16) & BM_PXP_VERSION_MINOR)
++#define BP_PXP_VERSION_STEP 0
++#define BM_PXP_VERSION_STEP 0x0000FFFF
++#define BF_PXP_VERSION_STEP(v) \
++ (((v) << 0) & BM_PXP_VERSION_STEP)
++#endif /* __ARCH_ARM___PXP_H */
+diff -Nur linux-3.14.40.orig/drivers/gpio/gpio-pca953x.c linux-3.14.40/drivers/gpio/gpio-pca953x.c
+--- linux-3.14.40.orig/drivers/gpio/gpio-pca953x.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/gpio/gpio-pca953x.c 2015-05-01 14:57:58.999427001 -0500
+@@ -19,6 +19,7 @@
+ #include <linux/irqdomain.h>
+ #include <linux/i2c.h>
+ #include <linux/platform_data/pca953x.h>
++#include <linux/reset.h>
+ #include <linux/slab.h>
+ #ifdef CONFIG_OF_GPIO
+ #include <linux/of_platform.h>
+@@ -741,6 +742,10 @@
+
+ mutex_init(&chip->i2c_lock);
+
++ ret = device_reset(&client->dev);
++ if (ret == -ENODEV)
++ return -EPROBE_DEFER;
++
+ /* initialize cached registers from their original values.
+ * we can't share this chip with another i2c master.
+ */
+diff -Nur linux-3.14.40.orig/drivers/gpu/drm/drm_crtc_helper.c linux-3.14.40/drivers/gpu/drm/drm_crtc_helper.c
+--- linux-3.14.40.orig/drivers/gpu/drm/drm_crtc_helper.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/gpu/drm/drm_crtc_helper.c 2015-05-01 14:57:59.031427001 -0500
+@@ -564,7 +564,7 @@
+ * Caller must hold mode config lock.
+ *
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+- * from userspace or internally e.g. from the fbdev suppport code) in @set, and
++ * from userspace or internally e.g. from the fbdev support code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
+diff -Nur linux-3.14.40.orig/drivers/gpu/drm/drm_prime.c linux-3.14.40/drivers/gpu/drm/drm_prime.c
+--- linux-3.14.40.orig/drivers/gpu/drm/drm_prime.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/gpu/drm/drm_prime.c 2015-05-01 14:57:59.039427001 -0500
+@@ -471,7 +471,7 @@
+ get_dma_buf(dma_buf);
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+- if (IS_ERR_OR_NULL(sgt)) {
++ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+diff -Nur linux-3.14.40.orig/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c linux-3.14.40/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+--- linux-3.14.40.orig/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c 2015-05-01 14:57:59.051427001 -0500
+@@ -224,7 +224,7 @@
+ get_dma_buf(dma_buf);
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+- if (IS_ERR_OR_NULL(sgt)) {
++ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_buf_detach;
+ }
+diff -Nur linux-3.14.40.orig/drivers/gpu/drm/Kconfig linux-3.14.40/drivers/gpu/drm/Kconfig
+--- linux-3.14.40.orig/drivers/gpu/drm/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/gpu/drm/Kconfig 2015-05-01 14:57:59.059427001 -0500
+@@ -166,6 +166,13 @@
+ Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
+ chipset. If M is selected the module will be called savage.
+
++config DRM_VIVANTE
++ tristate "Vivante GCCore"
++ depends on DRM
++ help
++ Choose this option if you have a Vivante graphics card.
++ If M is selected, the module will be called vivante.
++
+ source "drivers/gpu/drm/exynos/Kconfig"
+
+ source "drivers/gpu/drm/vmwgfx/Kconfig"
+diff -Nur linux-3.14.40.orig/drivers/gpu/drm/Makefile linux-3.14.40/drivers/gpu/drm/Makefile
+--- linux-3.14.40.orig/drivers/gpu/drm/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/gpu/drm/Makefile 2015-05-01 14:57:59.067427001 -0500
+@@ -1,3 +1,24 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2013 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
+ #
+ # Makefile for the drm device driver. This driver provides support for the
+ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+@@ -35,6 +56,7 @@
+ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
+ obj-$(CONFIG_DRM_USB) += drm_usb.o
+ obj-$(CONFIG_DRM_TTM) += ttm/
++obj-$(CONFIG_DRM_VIVANTE) += vivante/
+ obj-$(CONFIG_DRM_TDFX) += tdfx/
+ obj-$(CONFIG_DRM_R128) += r128/
+ obj-$(CONFIG_DRM_RADEON)+= radeon/
+diff -Nur linux-3.14.40.orig/drivers/gpu/drm/vivante/Makefile linux-3.14.40/drivers/gpu/drm/vivante/Makefile
+--- linux-3.14.40.orig/drivers/gpu/drm/vivante/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/gpu/drm/vivante/Makefile 2015-05-01 14:57:59.067427001 -0500
+@@ -0,0 +1,29 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2013 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++#
++# Makefile for the drm device driver. This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++
++ccflags-y := -Iinclude/drm
++vivante-y := vivante_drv.o
++
++obj-$(CONFIG_DRM_VIVANTE) += vivante.o
+diff -Nur linux-3.14.40.orig/drivers/gpu/drm/vivante/vivante_drv.c linux-3.14.40/drivers/gpu/drm/vivante/vivante_drv.c
+--- linux-3.14.40.orig/drivers/gpu/drm/vivante/vivante_drv.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/gpu/drm/vivante/vivante_drv.c 2015-05-01 14:57:59.067427001 -0500
+@@ -0,0 +1,108 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/* vivante_drv.c -- vivante driver -*- linux-c -*-
++ *
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Rickard E. (Rik) Faith <faith@valinux.com>
++ * Daryll Strauss <daryll@valinux.com>
++ * Gareth Hughes <gareth@valinux.com>
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++
++#include "drmP.h"
++#include "vivante_drv.h"
++
++#include "drm_pciids.h"
++
++static char platformdevicename[] = "Vivante GCCore";
++static struct platform_device *pplatformdev;
++
++static const struct file_operations viv_driver_fops = {
++ .owner = THIS_MODULE,
++ .open = drm_open,
++ .release = drm_release,
++ .unlocked_ioctl = drm_ioctl,
++ .mmap = drm_mmap,
++ .poll = drm_poll,
++ .llseek = noop_llseek,
++};
++
++static struct drm_driver driver = {
++ .fops = &viv_driver_fops,
++ .name = DRIVER_NAME,
++ .desc = DRIVER_DESC,
++ .date = DRIVER_DATE,
++ .major = DRIVER_MAJOR,
++ .minor = DRIVER_MINOR,
++ .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int __init vivante_init(void)
++{
++ int retcode;
++
++ pplatformdev = platform_device_register_simple(platformdevicename,
++ -1, NULL, 0);
++ if (pplatformdev == NULL)
++ printk(KERN_ERR"Platform device is null\n");
++
++ retcode = drm_platform_init(&driver, pplatformdev);
++
++ return retcode;
++}
++
++static void __exit vivante_exit(void)
++{
++ if (pplatformdev) {
++ platform_device_unregister(pplatformdev);
++ pplatformdev = NULL;
++ }
++}
++
++module_init(vivante_init);
++module_exit(vivante_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nur linux-3.14.40.orig/drivers/gpu/drm/vivante/vivante_drv.h linux-3.14.40/drivers/gpu/drm/vivante/vivante_drv.h
+--- linux-3.14.40.orig/drivers/gpu/drm/vivante/vivante_drv.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/gpu/drm/vivante/vivante_drv.h 2015-05-01 14:57:59.067427001 -0500
+@@ -0,0 +1,66 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/* vivante_drv.h -- Vivante DRM template customization -*- linux-c -*-
++ * Created: Wed Feb 14 12:32:32 2012 by John Zhao
++ */
++/*
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Gareth Hughes <gareth@valinux.com>
++ */
++
++#ifndef __VIVANTE_DRV_H__
++#define __VIVANTE_DRV_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR "Vivante Inc."
++
++#define DRIVER_NAME "vivante"
++#define DRIVER_DESC "Vivante GCCore"
++#define DRIVER_DATE "20120216"
++
++#define DRIVER_MAJOR 1
++#define DRIVER_MINOR 0
++#define DRIVER_PATCHLEVEL 0
++
++#endif
+diff -Nur linux-3.14.40.orig/drivers/hwmon/Kconfig linux-3.14.40/drivers/hwmon/Kconfig
+--- linux-3.14.40.orig/drivers/hwmon/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/hwmon/Kconfig 2015-05-01 14:57:59.075427001 -0500
+@@ -1584,4 +1584,19 @@
+
+ endif # ACPI
+
++config SENSORS_MAG3110
++ tristate "Freescale MAG3110 e-compass sensor"
++ depends on I2C && SYSFS
++ help
++ If you say yes here you get support for the Freescale MAG3110
++ e-compass sensor.
++ This driver can also be built as a module. If so, the module
++ will be called mag3110.
++
++config MXC_MMA8451
++ tristate "MMA8451 device driver"
++ depends on I2C
++ depends on INPUT_POLLDEV
++ default y
++
+ endif # HWMON
+diff -Nur linux-3.14.40.orig/drivers/hwmon/mag3110.c linux-3.14.40/drivers/hwmon/mag3110.c
+--- linux-3.14.40.orig/drivers/hwmon/mag3110.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/hwmon/mag3110.c 2015-05-01 14:57:59.075427001 -0500
+@@ -0,0 +1,611 @@
++/*
++ *
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/irq.h>
++#include <linux/platform_device.h>
++#include <linux/input-polldev.h>
++#include <linux/hwmon.h>
++#include <linux/input.h>
++#include <linux/wait.h>
++#include <linux/workqueue.h>
++#include <linux/of.h>
++#include <linux/regulator/consumer.h>
++
++#define MAG3110_DRV_NAME "mag3110"
++#define MAG3110_ID 0xC4
++#define MAG3110_XYZ_DATA_LEN 6
++#define MAG3110_STATUS_ZYXDR 0x08
++
++#define MAG3110_AC_MASK (0x01)
++#define MAG3110_AC_OFFSET 0
++#define MAG3110_DR_MODE_MASK (0x7 << 5)
++#define MAG3110_DR_MODE_OFFSET 5
++#define MAG3110_IRQ_USED 0
++
++#define POLL_INTERVAL_MAX 500
++#define POLL_INTERVAL 100
++#define INT_TIMEOUT 1000
++#define DEFAULT_POSITION 2
++/* register enum for mag3110 registers */
++enum {
++ MAG3110_DR_STATUS = 0x00,
++ MAG3110_OUT_X_MSB,
++ MAG3110_OUT_X_LSB,
++ MAG3110_OUT_Y_MSB,
++ MAG3110_OUT_Y_LSB,
++ MAG3110_OUT_Z_MSB,
++ MAG3110_OUT_Z_LSB,
++ MAG3110_WHO_AM_I,
++
++ MAG3110_OFF_X_MSB,
++ MAG3110_OFF_X_LSB,
++ MAG3110_OFF_Y_MSB,
++ MAG3110_OFF_Y_LSB,
++ MAG3110_OFF_Z_MSB,
++ MAG3110_OFF_Z_LSB,
++
++ MAG3110_DIE_TEMP,
++
++ MAG3110_CTRL_REG1 = 0x10,
++ MAG3110_CTRL_REG2,
++};
++enum {
++ MAG_STANDBY,
++ MAG_ACTIVED
++};
++struct mag3110_data {
++ struct i2c_client *client;
++ struct input_polled_dev *poll_dev;
++ struct device *hwmon_dev;
++ wait_queue_head_t waitq;
++ bool data_ready;
++ u8 ctl_reg1;
++ int active;
++ int position;
++};
++static short MAGHAL[8][3][3] = {
++ { {0, 1, 0}, {-1, 0, 0}, {0, 0, 1} },
++ { {1, 0, 0}, {0, 1, 0}, {0, 0, 1} },
++ { {0, -1, 0}, {1, 0, 0}, {0, 0, 1} },
++ { {-1, 0, 0}, {0, -1, 0}, {0, 0, 1} },
++
++ { {0, 1, 0}, {1, 0, 0}, {0, 0, -1} },
++ { {1, 0, 0}, {0, -1, 0}, {0, 0, -1} },
++ { {0, -1, 0}, {-1, 0, 0}, {0, 0, -1} },
++ { {-1, 0, 0}, {0, 1, 0}, {0, 0, -1} },
++};
++
++static struct mag3110_data *mag3110_pdata;
++/*!
++ * This function do one mag3110 register read.
++ */
++static DEFINE_MUTEX(mag3110_lock);
++static int mag3110_adjust_position(short *x, short *y, short *z)
++{
++ short rawdata[3], data[3];
++ int i, j;
++ int position = mag3110_pdata->position;
++ if (position < 0 || position > 7)
++ position = 0;
++ rawdata[0] = *x;
++ rawdata[1] = *y;
++ rawdata[2] = *z;
++ for (i = 0; i < 3; i++) {
++ data[i] = 0;
++ for (j = 0; j < 3; j++)
++ data[i] += rawdata[j] * MAGHAL[position][i][j];
++ }
++ *x = data[0];
++ *y = data[1];
++ *z = data[2];
++ return 0;
++}
++
++static int mag3110_read_reg(struct i2c_client *client, u8 reg)
++{
++ return i2c_smbus_read_byte_data(client, reg);
++}
++
++/*!
++ * This function do one mag3110 register write.
++ */
++static int mag3110_write_reg(struct i2c_client *client, u8 reg, char value)
++{
++ int ret;
++
++ ret = i2c_smbus_write_byte_data(client, reg, value);
++ if (ret < 0)
++ dev_err(&client->dev, "i2c write failed\n");
++ return ret;
++}
++
++/*!
++ * This function do multiple mag3110 registers read.
++ */
++static int mag3110_read_block_data(struct i2c_client *client, u8 reg,
++ int count, u8 *addr)
++{
++ if (i2c_smbus_read_i2c_block_data(client, reg, count, addr) < count) {
++ dev_err(&client->dev, "i2c block read failed\n");
++ return -1;
++ }
++
++ return count;
++}
++
++/*
++ * Initialization function
++ */
++static int mag3110_init_client(struct i2c_client *client)
++{
++ int val, ret;
++
++ /* enable automatic resets */
++ val = 0x80;
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG2, val);
++
++ /* set default data rate to 10HZ */
++ val = mag3110_read_reg(client, MAG3110_CTRL_REG1);
++ val |= (0x0 << MAG3110_DR_MODE_OFFSET);
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1, val);
++
++ return ret;
++}
++
++/***************************************************************
++*
++* read sensor data from mag3110
++*
++***************************************************************/
++static int mag3110_read_data(short *x, short *y, short *z)
++{
++ struct mag3110_data *data;
++ int retry = 3;
++ u8 tmp_data[MAG3110_XYZ_DATA_LEN];
++ int result;
++ if (!mag3110_pdata || mag3110_pdata->active == MAG_STANDBY)
++ return -EINVAL;
++
++ data = mag3110_pdata;
++#if MAG3110_IRQ_USED
++ if (!wait_event_interruptible_timeout
++ (data->waitq, data->data_ready != 0,
++ msecs_to_jiffies(INT_TIMEOUT))) {
++ dev_dbg(&data->client->dev, "interrupt not received\n");
++ return -ETIME;
++ }
++#else
++ do {
++ msleep(1);
++ result = i2c_smbus_read_byte_data(data->client,
++ MAG3110_DR_STATUS);
++ retry--;
++ } while (!(result & MAG3110_STATUS_ZYXDR) && retry > 0);
++ /* Clear data_ready flag after data is read out */
++ if (retry == 0)
++ return -EINVAL;
++#endif
++
++ data->data_ready = 0;
++
++ if (mag3110_read_block_data(data->client,
++ MAG3110_OUT_X_MSB, MAG3110_XYZ_DATA_LEN,
++ tmp_data) < 0)
++ return -1;
++
++ *x = ((tmp_data[0] << 8) & 0xff00) | tmp_data[1];
++ *y = ((tmp_data[2] << 8) & 0xff00) | tmp_data[3];
++ *z = ((tmp_data[4] << 8) & 0xff00) | tmp_data[5];
++
++ return 0;
++}
++
++static void report_abs(void)
++{
++ struct input_dev *idev;
++ short x, y, z;
++
++ mutex_lock(&mag3110_lock);
++ if (mag3110_read_data(&x, &y, &z) != 0)
++ goto out;
++ mag3110_adjust_position(&x, &y, &z);
++ idev = mag3110_pdata->poll_dev->input;
++ input_report_abs(idev, ABS_X, x);
++ input_report_abs(idev, ABS_Y, y);
++ input_report_abs(idev, ABS_Z, z);
++ input_sync(idev);
++out:
++ mutex_unlock(&mag3110_lock);
++}
++
++static void mag3110_dev_poll(struct input_polled_dev *dev)
++{
++ report_abs();
++}
++
++#if MAG3110_IRQ_USED
++static irqreturn_t mag3110_irq_handler(int irq, void *dev_id)
++{
++ mag3110_pdata->data_ready = 1;
++ wake_up_interruptible(&mag3110_pdata->waitq);
++
++ return IRQ_HANDLED;
++}
++#endif
++static ssize_t mag3110_enable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client;
++ int val;
++ mutex_lock(&mag3110_lock);
++ client = mag3110_pdata->client;
++ val = mag3110_read_reg(client, MAG3110_CTRL_REG1) & MAG3110_AC_MASK;
++
++ mutex_unlock(&mag3110_lock);
++ return sprintf(buf, "%d\n", val);
++}
++
++static ssize_t mag3110_enable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct i2c_client *client;
++ int reg, ret;
++ long enable;
++ u8 tmp_data[MAG3110_XYZ_DATA_LEN];
++
++ ret = strict_strtol(buf, 10, &enable);
++ if (ret) {
++ dev_err(dev, "string to long error\n");
++ return ret;
++ }
++
++ mutex_lock(&mag3110_lock);
++ client = mag3110_pdata->client;
++ reg = mag3110_read_reg(client, MAG3110_CTRL_REG1);
++ if (enable && mag3110_pdata->active == MAG_STANDBY) {
++ reg |= MAG3110_AC_MASK;
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1, reg);
++ if (!ret)
++ mag3110_pdata->active = MAG_ACTIVED;
++ } else if (!enable && mag3110_pdata->active == MAG_ACTIVED) {
++ reg &= ~MAG3110_AC_MASK;
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1, reg);
++ if (!ret)
++ mag3110_pdata->active = MAG_STANDBY;
++ }
++
++ if (mag3110_pdata->active == MAG_ACTIVED) {
++ msleep(100);
++ /* Read out MSB data to clear interrupt flag automatically */
++ mag3110_read_block_data(client, MAG3110_OUT_X_MSB,
++ MAG3110_XYZ_DATA_LEN, tmp_data);
++ }
++ mutex_unlock(&mag3110_lock);
++ return count;
++}
++
++static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
++ mag3110_enable_show, mag3110_enable_store);
++
++static ssize_t mag3110_dr_mode_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client;
++ int val;
++
++ client = mag3110_pdata->client;
++ val = (mag3110_read_reg(client, MAG3110_CTRL_REG1)
++ & MAG3110_DR_MODE_MASK) >> MAG3110_DR_MODE_OFFSET;
++
++ return sprintf(buf, "%d\n", val);
++}
++
++static ssize_t mag3110_dr_mode_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct i2c_client *client;
++ int reg, ret;
++ unsigned long val;
++
++ /* This must be done when mag3110 is disabled */
++ if ((strict_strtoul(buf, 10, &val) < 0) || (val > 7))
++ return -EINVAL;
++
++ client = mag3110_pdata->client;
++ reg = mag3110_read_reg(client, MAG3110_CTRL_REG1) &
++ ~MAG3110_DR_MODE_MASK;
++ reg |= (val << MAG3110_DR_MODE_OFFSET);
++ /* MAG3110_CTRL_REG1 bit 5-7: data rate mode */
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1, reg);
++ if (ret < 0)
++ return ret;
++
++ return count;
++}
++
++static DEVICE_ATTR(dr_mode, S_IWUSR | S_IRUGO,
++ mag3110_dr_mode_show, mag3110_dr_mode_store);
++
++static ssize_t mag3110_position_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ int val;
++ mutex_lock(&mag3110_lock);
++ val = mag3110_pdata->position;
++ mutex_unlock(&mag3110_lock);
++ return sprintf(buf, "%d\n", val);
++}
++
++static ssize_t mag3110_position_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ long position;
++ int ret;
++ ret = strict_strtol(buf, 10, &position);
++ if (ret) {
++ dev_err(dev, "string to long error\n");
++ return ret;
++ }
++
++ mutex_lock(&mag3110_lock);
++ mag3110_pdata->position = (int)position;
++ mutex_unlock(&mag3110_lock);
++ return count;
++}
++
++static DEVICE_ATTR(position, S_IWUSR | S_IRUGO,
++ mag3110_position_show, mag3110_position_store);
++
++static struct attribute *mag3110_attributes[] = {
++ &dev_attr_enable.attr,
++ &dev_attr_dr_mode.attr,
++ &dev_attr_position.attr,
++ NULL
++};
++
++static const struct attribute_group mag3110_attr_group = {
++ .attrs = mag3110_attributes,
++};
++
++static int mag3110_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct i2c_adapter *adapter;
++ struct input_dev *idev;
++ struct mag3110_data *data;
++ int ret = 0;
++ struct regulator *vdd, *vdd_io;
++ u32 pos = 0;
++ struct device_node *of_node = client->dev.of_node;
++ vdd = NULL;
++ vdd_io = NULL;
++
++ vdd = devm_regulator_get(&client->dev, "vdd");
++ if (!IS_ERR(vdd)) {
++ ret = regulator_enable(vdd);
++ if (ret) {
++ dev_err(&client->dev, "vdd set voltage error\n");
++ return ret;
++ }
++ }
++
++ vdd_io = devm_regulator_get(&client->dev, "vddio");
++ if (!IS_ERR(vdd_io)) {
++ ret = regulator_enable(vdd_io);
++ if (ret) {
++ dev_err(&client->dev, "vddio set voltage error\n");
++ return ret;
++ }
++ }
++
++ adapter = to_i2c_adapter(client->dev.parent);
++ if (!i2c_check_functionality(adapter,
++ I2C_FUNC_SMBUS_BYTE |
++ I2C_FUNC_SMBUS_BYTE_DATA |
++ I2C_FUNC_SMBUS_I2C_BLOCK))
++ return -EIO;
++
++ dev_info(&client->dev, "check mag3110 chip ID\n");
++ ret = mag3110_read_reg(client, MAG3110_WHO_AM_I);
++
++ if (MAG3110_ID != ret) {
++ dev_err(&client->dev,
++ "read chip ID 0x%x is not equal to 0x%x!\n", ret,
++ MAG3110_ID);
++ return -EINVAL;
++ }
++ data = kzalloc(sizeof(struct mag3110_data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++ data->client = client;
++ i2c_set_clientdata(client, data);
++ /* Init queue */
++ init_waitqueue_head(&data->waitq);
++
++ data->hwmon_dev = hwmon_device_register(&client->dev);
++ if (IS_ERR(data->hwmon_dev)) {
++ dev_err(&client->dev, "hwmon register failed!\n");
++ ret = PTR_ERR(data->hwmon_dev);
++ goto error_rm_dev_sysfs;
++ }
++
++ /*input poll device register */
++ data->poll_dev = input_allocate_polled_device();
++ if (!data->poll_dev) {
++ dev_err(&client->dev, "alloc poll device failed!\n");
++ ret = -ENOMEM;
++ goto error_rm_hwmon_dev;
++ }
++ data->poll_dev->poll = mag3110_dev_poll;
++ data->poll_dev->poll_interval = POLL_INTERVAL;
++ data->poll_dev->poll_interval_max = POLL_INTERVAL_MAX;
++ idev = data->poll_dev->input;
++ idev->name = MAG3110_DRV_NAME;
++ idev->id.bustype = BUS_I2C;
++ idev->evbit[0] = BIT_MASK(EV_ABS);
++ input_set_abs_params(idev, ABS_X, -15000, 15000, 0, 0);
++ input_set_abs_params(idev, ABS_Y, -15000, 15000, 0, 0);
++ input_set_abs_params(idev, ABS_Z, -15000, 15000, 0, 0);
++ ret = input_register_polled_device(data->poll_dev);
++ if (ret) {
++ dev_err(&client->dev, "register poll device failed!\n");
++ goto error_free_poll_dev;
++ }
++
++ /*create device group in sysfs as user interface */
++ ret = sysfs_create_group(&idev->dev.kobj, &mag3110_attr_group);
++ if (ret) {
++ dev_err(&client->dev, "create device file failed!\n");
++ ret = -EINVAL;
++ goto error_rm_poll_dev;
++ }
++ /* set irq type to edge rising */
++#if MAG3110_IRQ_USED
++ ret = request_irq(client->irq, mag3110_irq_handler,
++ IRQF_TRIGGER_RISING, client->dev.driver->name, idev);
++ if (ret < 0) {
++ dev_err(&client->dev, "failed to register irq %d!\n",
++ client->irq);
++ goto error_rm_dev_sysfs;
++ }
++#endif
++ /* Initialize mag3110 chip */
++ mag3110_init_client(client);
++ mag3110_pdata = data;
++ mag3110_pdata->active = MAG_STANDBY;
++ ret = of_property_read_u32(of_node, "position", &pos);
++ if (ret)
++ pos = DEFAULT_POSITION;
++ mag3110_pdata->position = (int)pos;
++ dev_info(&client->dev, "mag3110 is probed\n");
++ return 0;
++error_rm_dev_sysfs:
++ sysfs_remove_group(&client->dev.kobj, &mag3110_attr_group);
++error_rm_poll_dev:
++ input_unregister_polled_device(data->poll_dev);
++error_free_poll_dev:
++ input_free_polled_device(data->poll_dev);
++error_rm_hwmon_dev:
++ hwmon_device_unregister(data->hwmon_dev);
++
++ kfree(data);
++ mag3110_pdata = NULL;
++
++ return ret;
++}
++
++static int mag3110_remove(struct i2c_client *client)
++{
++ struct mag3110_data *data;
++ int ret;
++
++ data = i2c_get_clientdata(client);
++
++ data->ctl_reg1 = mag3110_read_reg(client, MAG3110_CTRL_REG1);
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1,
++ data->ctl_reg1 & ~MAG3110_AC_MASK);
++
++ free_irq(client->irq, data);
++ input_unregister_polled_device(data->poll_dev);
++ input_free_polled_device(data->poll_dev);
++ hwmon_device_unregister(data->hwmon_dev);
++ sysfs_remove_group(&client->dev.kobj, &mag3110_attr_group);
++ kfree(data);
++ mag3110_pdata = NULL;
++
++ return ret;
++}
++
++#ifdef CONFIG_PM
++static int mag3110_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ int ret = 0;
++ struct mag3110_data *data = i2c_get_clientdata(client);
++ if (data->active == MAG_ACTIVED) {
++ data->ctl_reg1 = mag3110_read_reg(client, MAG3110_CTRL_REG1);
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1,
++ data->ctl_reg1 & ~MAG3110_AC_MASK);
++ }
++ return ret;
++}
++
++static int mag3110_resume(struct i2c_client *client)
++{
++ int ret = 0;
++ u8 tmp_data[MAG3110_XYZ_DATA_LEN];
++ struct mag3110_data *data = i2c_get_clientdata(client);
++ if (data->active == MAG_ACTIVED) {
++ ret = mag3110_write_reg(client, MAG3110_CTRL_REG1,
++ data->ctl_reg1);
++
++ if (data->ctl_reg1 & MAG3110_AC_MASK) {
++ /* Read out MSB data to clear interrupt
++ flag automatically */
++ mag3110_read_block_data(client, MAG3110_OUT_X_MSB,
++ MAG3110_XYZ_DATA_LEN, tmp_data);
++ }
++ }
++ return ret;
++}
++
++#else
++#define mag3110_suspend NULL
++#define mag3110_resume NULL
++#endif /* CONFIG_PM */
++
++static const struct i2c_device_id mag3110_id[] = {
++ {MAG3110_DRV_NAME, 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, mag3110_id);
++static struct i2c_driver mag3110_driver = {
++ .driver = {.name = MAG3110_DRV_NAME,
++ .owner = THIS_MODULE,},
++ .suspend = mag3110_suspend,
++ .resume = mag3110_resume,
++ .probe = mag3110_probe,
++ .remove = mag3110_remove,
++ .id_table = mag3110_id,
++};
++
++static int __init mag3110_init(void)
++{
++ return i2c_add_driver(&mag3110_driver);
++}
++
++static void __exit mag3110_exit(void)
++{
++ i2c_del_driver(&mag3110_driver);
++}
++
++module_init(mag3110_init);
++module_exit(mag3110_exit);
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Freescale mag3110 3-axis magnetometer driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/hwmon/Makefile linux-3.14.40/drivers/hwmon/Makefile
+--- linux-3.14.40.orig/drivers/hwmon/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/hwmon/Makefile 2015-05-01 14:57:59.087427001 -0500
+@@ -142,6 +142,8 @@
+ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
+ obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
+ obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
++obj-$(CONFIG_SENSORS_MAG3110) += mag3110.o
++obj-$(CONFIG_MXC_MMA8451) += mxc_mma8451.o
+
+ obj-$(CONFIG_PMBUS) += pmbus/
+
+diff -Nur linux-3.14.40.orig/drivers/hwmon/mxc_mma8451.c linux-3.14.40/drivers/hwmon/mxc_mma8451.c
+--- linux-3.14.40.orig/drivers/hwmon/mxc_mma8451.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/hwmon/mxc_mma8451.c 2015-05-01 14:57:59.087427001 -0500
+@@ -0,0 +1,598 @@
++/*
++ * mma8451.c - Linux kernel modules for 3-Axis Orientation/Motion
++ * Detection Sensor
++ *
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/pm.h>
++#include <linux/mutex.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/hwmon-sysfs.h>
++#include <linux/err.h>
++#include <linux/hwmon.h>
++#include <linux/input-polldev.h>
++#include <linux/of.h>
++#include <linux/regulator/consumer.h>
++
++#define MMA8451_I2C_ADDR 0x1C
++#define MMA8451_ID 0x1A
++#define MMA8452_ID 0x2A
++#define MMA8453_ID 0x3A
++
++#define POLL_INTERVAL_MIN 1
++#define POLL_INTERVAL_MAX 500
++#define POLL_INTERVAL 100 /* msecs */
++#define INPUT_FUZZ 32
++#define INPUT_FLAT 32
++#define MODE_CHANGE_DELAY_MS 100
++
++#define MMA8451_STATUS_ZYXDR 0x08
++#define MMA8451_BUF_SIZE 7
++#define DEFAULT_POSITION 0
++
++/* register enum for mma8451 registers */
++enum {
++ MMA8451_STATUS = 0x00,
++ MMA8451_OUT_X_MSB,
++ MMA8451_OUT_X_LSB,
++ MMA8451_OUT_Y_MSB,
++ MMA8451_OUT_Y_LSB,
++ MMA8451_OUT_Z_MSB,
++ MMA8451_OUT_Z_LSB,
++
++ MMA8451_F_SETUP = 0x09,
++ MMA8451_TRIG_CFG,
++ MMA8451_SYSMOD,
++ MMA8451_INT_SOURCE,
++ MMA8451_WHO_AM_I,
++ MMA8451_XYZ_DATA_CFG,
++ MMA8451_HP_FILTER_CUTOFF,
++
++ MMA8451_PL_STATUS,
++ MMA8451_PL_CFG,
++ MMA8451_PL_COUNT,
++ MMA8451_PL_BF_ZCOMP,
++ MMA8451_P_L_THS_REG,
++
++ MMA8451_FF_MT_CFG,
++ MMA8451_FF_MT_SRC,
++ MMA8451_FF_MT_THS,
++ MMA8451_FF_MT_COUNT,
++
++ MMA8451_TRANSIENT_CFG = 0x1D,
++ MMA8451_TRANSIENT_SRC,
++ MMA8451_TRANSIENT_THS,
++ MMA8451_TRANSIENT_COUNT,
++
++ MMA8451_PULSE_CFG,
++ MMA8451_PULSE_SRC,
++ MMA8451_PULSE_THSX,
++ MMA8451_PULSE_THSY,
++ MMA8451_PULSE_THSZ,
++ MMA8451_PULSE_TMLT,
++ MMA8451_PULSE_LTCY,
++ MMA8451_PULSE_WIND,
++
++ MMA8451_ASLP_COUNT,
++ MMA8451_CTRL_REG1,
++ MMA8451_CTRL_REG2,
++ MMA8451_CTRL_REG3,
++ MMA8451_CTRL_REG4,
++ MMA8451_CTRL_REG5,
++
++ MMA8451_OFF_X,
++ MMA8451_OFF_Y,
++ MMA8451_OFF_Z,
++
++ MMA8451_REG_END,
++};
++
++/* The sensitivity is represented in counts/g. In 2g mode the
++sensitivity is 1024 counts/g. In 4g mode the sensitivity is 512
++counts/g and in 8g mode the sensitivity is 256 counts/g.
++ */
++enum {
++ MODE_2G = 0,
++ MODE_4G,
++ MODE_8G,
++};
++
++enum {
++ MMA_STANDBY = 0,
++ MMA_ACTIVED,
++};
++
++/* mma8451 status */
++struct mma8451_status {
++ u8 mode;
++ u8 ctl_reg1;
++ int active;
++ int position;
++};
++
++static struct mma8451_status mma_status;
++static struct input_polled_dev *mma8451_idev;
++static struct device *hwmon_dev;
++static struct i2c_client *mma8451_i2c_client;
++
++static int senstive_mode = MODE_2G;
++static int ACCHAL[8][3][3] = {
++ { {0, -1, 0}, {1, 0, 0}, {0, 0, 1} },
++ { {-1, 0, 0}, {0, -1, 0}, {0, 0, 1} },
++ { {0, 1, 0}, {-1, 0, 0}, {0, 0, 1} },
++ { {1, 0, 0}, {0, 1, 0}, {0, 0, 1} },
++
++ { {0, -1, 0}, {-1, 0, 0}, {0, 0, -1} },
++ { {-1, 0, 0}, {0, 1, 0}, {0, 0, -1} },
++ { {0, 1, 0}, {1, 0, 0}, {0, 0, -1} },
++ { {1, 0, 0}, {0, -1, 0}, {0, 0, -1} },
++};
++
++static DEFINE_MUTEX(mma8451_lock);
++static int mma8451_adjust_position(short *x, short *y, short *z)
++{
++ short rawdata[3], data[3];
++ int i, j;
++ int position = mma_status.position;
++ if (position < 0 || position > 7)
++ position = 0;
++ rawdata[0] = *x;
++ rawdata[1] = *y;
++ rawdata[2] = *z;
++ for (i = 0; i < 3; i++) {
++ data[i] = 0;
++ for (j = 0; j < 3; j++)
++ data[i] += rawdata[j] * ACCHAL[position][i][j];
++ }
++ *x = data[0];
++ *y = data[1];
++ *z = data[2];
++ return 0;
++}
++
++static int mma8451_change_mode(struct i2c_client *client, int mode)
++{
++ int result;
++
++ mma_status.ctl_reg1 = 0;
++ result = i2c_smbus_write_byte_data(client, MMA8451_CTRL_REG1, 0);
++ if (result < 0)
++ goto out;
++ mma_status.active = MMA_STANDBY;
++
++ result = i2c_smbus_write_byte_data(client, MMA8451_XYZ_DATA_CFG,
++ mode);
++ if (result < 0)
++ goto out;
++ mdelay(MODE_CHANGE_DELAY_MS);
++ mma_status.mode = mode;
++
++ return 0;
++out:
++ dev_err(&client->dev, "error when init mma8451:(%d)", result);
++ return result;
++}
++
++static int mma8451_read_data(short *x, short *y, short *z)
++{
++ u8 tmp_data[MMA8451_BUF_SIZE];
++ int ret;
++
++ ret = i2c_smbus_read_i2c_block_data(mma8451_i2c_client,
++ MMA8451_OUT_X_MSB, 7, tmp_data);
++ if (ret < MMA8451_BUF_SIZE) {
++ dev_err(&mma8451_i2c_client->dev, "i2c block read failed\n");
++ return -EIO;
++ }
++
++ *x = ((tmp_data[0] << 8) & 0xff00) | tmp_data[1];
++ *y = ((tmp_data[2] << 8) & 0xff00) | tmp_data[3];
++ *z = ((tmp_data[4] << 8) & 0xff00) | tmp_data[5];
++ return 0;
++}
++
++static void report_abs(void)
++{
++ short x, y, z;
++ int result;
++ int retry = 3;
++
++ mutex_lock(&mma8451_lock);
++ if (mma_status.active == MMA_STANDBY)
++ goto out;
++ /* wait for the data ready */
++ do {
++ result = i2c_smbus_read_byte_data(mma8451_i2c_client,
++ MMA8451_STATUS);
++ retry--;
++ msleep(1);
++ } while (!(result & MMA8451_STATUS_ZYXDR) && retry > 0);
++ if (retry == 0)
++ goto out;
++ if (mma8451_read_data(&x, &y, &z) != 0)
++ goto out;
++ mma8451_adjust_position(&x, &y, &z);
++ input_report_abs(mma8451_idev->input, ABS_X, x);
++ input_report_abs(mma8451_idev->input, ABS_Y, y);
++ input_report_abs(mma8451_idev->input, ABS_Z, z);
++ input_sync(mma8451_idev->input);
++out:
++ mutex_unlock(&mma8451_lock);
++}
++
++static void mma8451_dev_poll(struct input_polled_dev *dev)
++{
++ report_abs();
++}
++
++static ssize_t mma8451_enable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client;
++ u8 val;
++ int enable;
++
++ mutex_lock(&mma8451_lock);
++ client = mma8451_i2c_client;
++ val = i2c_smbus_read_byte_data(client, MMA8451_CTRL_REG1);
++ if ((val & 0x01) && mma_status.active == MMA_ACTIVED)
++ enable = 1;
++ else
++ enable = 0;
++ mutex_unlock(&mma8451_lock);
++ return sprintf(buf, "%d\n", enable);
++}
++
++static ssize_t mma8451_enable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct i2c_client *client;
++ int ret;
++ unsigned long enable;
++ u8 val = 0;
++
++ ret = strict_strtoul(buf, 10, &enable);
++ if (ret) {
++ dev_err(dev, "string transform error\n");
++ return ret;
++ }
++
++ mutex_lock(&mma8451_lock);
++ client = mma8451_i2c_client;
++ enable = (enable > 0) ? 1 : 0;
++ if (enable && mma_status.active == MMA_STANDBY) {
++ val = i2c_smbus_read_byte_data(client, MMA8451_CTRL_REG1);
++ ret =
++ i2c_smbus_write_byte_data(client, MMA8451_CTRL_REG1,
++ val | 0x01);
++ if (!ret)
++ mma_status.active = MMA_ACTIVED;
++
++ } else if (enable == 0 && mma_status.active == MMA_ACTIVED) {
++ val = i2c_smbus_read_byte_data(client, MMA8451_CTRL_REG1);
++ ret =
++ i2c_smbus_write_byte_data(client, MMA8451_CTRL_REG1,
++ val & 0xFE);
++ if (!ret)
++ mma_status.active = MMA_STANDBY;
++
++ }
++ mutex_unlock(&mma8451_lock);
++ return count;
++}
++
++static ssize_t mma8451_position_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ int position = 0;
++ mutex_lock(&mma8451_lock);
++ position = mma_status.position;
++ mutex_unlock(&mma8451_lock);
++ return sprintf(buf, "%d\n", position);
++}
++
++static ssize_t mma8451_position_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long position;
++ int ret;
++ ret = strict_strtoul(buf, 10, &position);
++ if (ret) {
++ dev_err(dev, "string transform error\n");
++ return ret;
++ }
++
++ mutex_lock(&mma8451_lock);
++ mma_status.position = (int)position;
++ mutex_unlock(&mma8451_lock);
++ return count;
++}
++
++static ssize_t mma8451_scalemode_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ int mode = 0;
++ mutex_lock(&mma8451_lock);
++ mode = (int)mma_status.mode;
++ mutex_unlock(&mma8451_lock);
++
++ return sprintf(buf, "%d\n", mode);
++}
++
++static ssize_t mma8451_scalemode_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long mode;
++ int ret, active_save;
++ struct i2c_client *client = mma8451_i2c_client;
++
++ ret = strict_strtoul(buf, 10, &mode);
++ if (ret) {
++ dev_err(dev, "string transform error\n");
++ goto out;
++ }
++
++ if (mode > MODE_8G) {
++ dev_warn(dev, "not supported mode\n");
++ ret = count;
++ goto out;
++ }
++
++ mutex_lock(&mma8451_lock);
++ if (mode == mma_status.mode) {
++ ret = count;
++ goto out_unlock;
++ }
++
++ active_save = mma_status.active;
++ ret = mma8451_change_mode(client, mode);
++ if (ret)
++ goto out_unlock;
++
++ if (active_save == MMA_ACTIVED) {
++ ret = i2c_smbus_write_byte_data(client, MMA8451_CTRL_REG1, 1);
++
++ if (ret)
++ goto out_unlock;
++ mma_status.active = active_save;
++ }
++
++out_unlock:
++ mutex_unlock(&mma8451_lock);
++out:
++ return ret;
++}
++
++static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
++ mma8451_enable_show, mma8451_enable_store);
++static DEVICE_ATTR(position, S_IWUSR | S_IRUGO,
++ mma8451_position_show, mma8451_position_store);
++static DEVICE_ATTR(scalemode, S_IWUSR | S_IRUGO,
++ mma8451_scalemode_show, mma8451_scalemode_store);
++
++static struct attribute *mma8451_attributes[] = {
++ &dev_attr_enable.attr,
++ &dev_attr_position.attr,
++ &dev_attr_scalemode.attr,
++ NULL
++};
++
++static const struct attribute_group mma8451_attr_group = {
++ .attrs = mma8451_attributes,
++};
++
++static int mma8451_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int result, client_id;
++ struct input_dev *idev;
++ struct i2c_adapter *adapter;
++ u32 pos;
++ struct device_node *of_node = client->dev.of_node;
++ struct regulator *vdd, *vdd_io;
++
++ mma8451_i2c_client = client;
++
++ vdd = devm_regulator_get(&client->dev, "vdd");
++ if (!IS_ERR(vdd)) {
++ result = regulator_enable(vdd);
++ if (result) {
++ dev_err(&client->dev, "vdd set voltage error\n");
++ return result;
++ }
++ }
++
++ vdd_io = devm_regulator_get(&client->dev, "vddio");
++ if (!IS_ERR(vdd_io)) {
++ result = regulator_enable(vdd_io);
++ if (result) {
++ dev_err(&client->dev, "vddio set voltage error\n");
++ return result;
++ }
++ }
++
++ adapter = to_i2c_adapter(client->dev.parent);
++ result = i2c_check_functionality(adapter,
++ I2C_FUNC_SMBUS_BYTE |
++ I2C_FUNC_SMBUS_BYTE_DATA);
++ if (!result)
++ goto err_out;
++
++ client_id = i2c_smbus_read_byte_data(client, MMA8451_WHO_AM_I);
++ if (client_id != MMA8451_ID && client_id != MMA8452_ID
++ && client_id != MMA8453_ID) {
++ dev_err(&client->dev,
++ "read chip ID 0x%x is not equal to 0x%x or 0x%x!\n",
++ result, MMA8451_ID, MMA8452_ID);
++ result = -EINVAL;
++ goto err_out;
++ }
++
++ /* Initialize the MMA8451 chip */
++ result = mma8451_change_mode(client, senstive_mode);
++ if (result) {
++ dev_err(&client->dev,
++ "error when init mma8451 chip:(%d)\n", result);
++ goto err_out;
++ }
++
++ hwmon_dev = hwmon_device_register(&client->dev);
++ if (!hwmon_dev) {
++ result = -ENOMEM;
++ dev_err(&client->dev, "error when register hwmon device\n");
++ goto err_out;
++ }
++
++ mma8451_idev = input_allocate_polled_device();
++ if (!mma8451_idev) {
++ result = -ENOMEM;
++ dev_err(&client->dev, "alloc poll device failed!\n");
++ goto err_alloc_poll_device;
++ }
++ mma8451_idev->poll = mma8451_dev_poll;
++ mma8451_idev->poll_interval = POLL_INTERVAL;
++ mma8451_idev->poll_interval_min = POLL_INTERVAL_MIN;
++ mma8451_idev->poll_interval_max = POLL_INTERVAL_MAX;
++ idev = mma8451_idev->input;
++ idev->name = "mma845x";
++ idev->id.bustype = BUS_I2C;
++ idev->evbit[0] = BIT_MASK(EV_ABS);
++
++ input_set_abs_params(idev, ABS_X, -8192, 8191, INPUT_FUZZ, INPUT_FLAT);
++ input_set_abs_params(idev, ABS_Y, -8192, 8191, INPUT_FUZZ, INPUT_FLAT);
++ input_set_abs_params(idev, ABS_Z, -8192, 8191, INPUT_FUZZ, INPUT_FLAT);
++
++ result = input_register_polled_device(mma8451_idev);
++ if (result) {
++ dev_err(&client->dev, "register poll device failed!\n");
++ goto err_register_polled_device;
++ }
++ result = sysfs_create_group(&idev->dev.kobj, &mma8451_attr_group);
++ if (result) {
++ dev_err(&client->dev, "create device file failed!\n");
++ result = -EINVAL;
++ goto err_register_polled_device;
++ }
++
++ result = of_property_read_u32(of_node, "position", &pos);
++ if (result)
++ pos = DEFAULT_POSITION;
++ mma_status.position = (int)pos;
++
++ return 0;
++err_register_polled_device:
++ input_free_polled_device(mma8451_idev);
++err_alloc_poll_device:
++ hwmon_device_unregister(&client->dev);
++err_out:
++ return result;
++}
++
++static int mma8451_stop_chip(struct i2c_client *client)
++{
++ int ret = 0;
++ if (mma_status.active == MMA_ACTIVED) {
++ mma_status.ctl_reg1 = i2c_smbus_read_byte_data(client,
++ MMA8451_CTRL_REG1);
++ ret = i2c_smbus_write_byte_data(client, MMA8451_CTRL_REG1,
++ mma_status.ctl_reg1 & 0xFE);
++ }
++ return ret;
++}
++
++static int mma8451_remove(struct i2c_client *client)
++{
++ int ret;
++ ret = mma8451_stop_chip(client);
++ hwmon_device_unregister(hwmon_dev);
++
++ return ret;
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int mma8451_suspend(struct device *dev)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++
++ return mma8451_stop_chip(client);
++}
++
++static int mma8451_resume(struct device *dev)
++{
++ int ret = 0;
++ struct i2c_client *client = to_i2c_client(dev);
++ if (mma_status.active == MMA_ACTIVED)
++ ret = i2c_smbus_write_byte_data(client, MMA8451_CTRL_REG1,
++ mma_status.ctl_reg1);
++ return ret;
++
++}
++#endif
++
++static const struct i2c_device_id mma8451_id[] = {
++ {"mma8451", 0},
++};
++
++MODULE_DEVICE_TABLE(i2c, mma8451_id);
++
++static SIMPLE_DEV_PM_OPS(mma8451_pm_ops, mma8451_suspend, mma8451_resume);
++static struct i2c_driver mma8451_driver = {
++ .driver = {
++ .name = "mma8451",
++ .owner = THIS_MODULE,
++ .pm = &mma8451_pm_ops,
++ },
++ .probe = mma8451_probe,
++ .remove = mma8451_remove,
++ .id_table = mma8451_id,
++};
++
++static int __init mma8451_init(void)
++{
++ /* register driver */
++ int res;
++
++ res = i2c_add_driver(&mma8451_driver);
++ if (res < 0) {
++ printk(KERN_INFO "add mma8451 i2c driver failed\n");
++ return -ENODEV;
++ }
++ return res;
++}
++
++static void __exit mma8451_exit(void)
++{
++ i2c_del_driver(&mma8451_driver);
++}
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("MMA8451 3-Axis Orientation/Motion Detection Sensor driver");
++MODULE_LICENSE("GPL");
++
++module_init(mma8451_init);
++module_exit(mma8451_exit);
+diff -Nur linux-3.14.40.orig/drivers/i2c/busses/i2c-imx.c linux-3.14.40/drivers/i2c/busses/i2c-imx.c
+--- linux-3.14.40.orig/drivers/i2c/busses/i2c-imx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/i2c/busses/i2c-imx.c 2015-05-01 14:57:59.091427001 -0500
+@@ -184,6 +184,9 @@
+ int stopped;
+ unsigned int ifdr; /* IMX_I2C_IFDR */
+ const struct imx_i2c_hwdata *hwdata;
++
++ unsigned int cur_clk;
++ unsigned int bitrate;
+ };
+
+ static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
+@@ -305,6 +308,51 @@
+ return 0;
+ }
+
++static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
++{
++ struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
++ unsigned ndivs = i2c_imx->hwdata->ndivs;
++ unsigned int i2c_clk_rate;
++ unsigned int div;
++ int i;
++
++ /* Divider value calculation */
++ i2c_clk_rate = clk_get_rate(i2c_imx->clk);
++ if (i2c_imx->cur_clk == i2c_clk_rate)
++ return;
++ else
++ i2c_imx->cur_clk = i2c_clk_rate;
++
++ div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate;
++ if (div < i2c_clk_div[0].div)
++ i = 0;
++ else if (div > i2c_clk_div[ndivs - 1].div)
++ i = ndivs - 1;
++ else
++ for (i = 0; i2c_clk_div[i].div < div; i++)
++ ;
++
++ /* Store divider value */
++ i2c_imx->ifdr = imx_i2c_clk_div[i].val;
++
++ /*
++ * There dummy delay is calculated.
++ * It should be about one I2C clock period long.
++ * This delay is used in I2C bus disable function
++ * to fix chip hardware bug.
++ */
++ i2c_imx->disable_delay = (500000U * i2c_clk_div[i].div
++ + (i2c_clk_rate / 2) - 1) / (i2c_clk_rate / 2);
++
++ /* dev_dbg() can't be used, because adapter is not yet registered */
++#ifdef CONFIG_I2C_DEBUG_BUS
++ dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C_CLK=%d, REQ DIV=%d\n",
++ __func__, i2c_clk_rate, div);
++ dev_dbg(&i2c_imx->adapter.dev, "<%s> IFDR[IC]=0x%x, REAL DIV=%d\n",
++ __func__, i2c_clk_div[i].val, i2c_clk_div[i].div);
++#endif
++}
++
+ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
+ {
+ unsigned int temp = 0;
+@@ -312,6 +360,7 @@
+
+ dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
+
++ i2c_imx_set_clk(i2c_imx);
+ result = clk_prepare_enable(i2c_imx->clk);
+ if (result)
+ return result;
+@@ -367,45 +416,6 @@
+ clk_disable_unprepare(i2c_imx->clk);
+ }
+
+-static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
+- unsigned int rate)
+-{
+- struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
+- unsigned int i2c_clk_rate;
+- unsigned int div;
+- int i;
+-
+- /* Divider value calculation */
+- i2c_clk_rate = clk_get_rate(i2c_imx->clk);
+- div = (i2c_clk_rate + rate - 1) / rate;
+- if (div < i2c_clk_div[0].div)
+- i = 0;
+- else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div)
+- i = i2c_imx->hwdata->ndivs - 1;
+- else
+- for (i = 0; i2c_clk_div[i].div < div; i++);
+-
+- /* Store divider value */
+- i2c_imx->ifdr = i2c_clk_div[i].val;
+-
+- /*
+- * There dummy delay is calculated.
+- * It should be about one I2C clock period long.
+- * This delay is used in I2C bus disable function
+- * to fix chip hardware bug.
+- */
+- i2c_imx->disable_delay = (500000U * i2c_clk_div[i].div
+- + (i2c_clk_rate / 2) - 1) / (i2c_clk_rate / 2);
+-
+- /* dev_dbg() can't be used, because adapter is not yet registered */
+-#ifdef CONFIG_I2C_DEBUG_BUS
+- dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C_CLK=%d, REQ DIV=%d\n",
+- __func__, i2c_clk_rate, div);
+- dev_dbg(&i2c_imx->adapter.dev, "<%s> IFDR[IC]=0x%x, REAL DIV=%d\n",
+- __func__, i2c_clk_div[i].val, i2c_clk_div[i].div);
+-#endif
+-}
+-
+ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
+ {
+ struct imx_i2c_struct *i2c_imx = dev_id;
+@@ -600,7 +610,6 @@
+ struct imxi2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __iomem *base;
+ int irq, ret;
+- u32 bitrate;
+
+ dev_dbg(&pdev->dev, "<%s>\n", __func__);
+
+@@ -664,12 +673,12 @@
+ i2c_set_adapdata(&i2c_imx->adapter, i2c_imx);
+
+ /* Set up clock divider */
+- bitrate = IMX_I2C_BIT_RATE;
++ i2c_imx->bitrate = IMX_I2C_BIT_RATE;
+ ret = of_property_read_u32(pdev->dev.of_node,
+- "clock-frequency", &bitrate);
++ "clock-frequency", &i2c_imx->bitrate);
+ if (ret < 0 && pdata && pdata->bitrate)
+- bitrate = pdata->bitrate;
+- i2c_imx_set_clk(i2c_imx, bitrate);
++ i2c_imx->bitrate = pdata->bitrate;
++ i2c_imx_set_clk(i2c_imx);
+
+ /* Set up chip registers to defaults */
+ imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
+diff -Nur linux-3.14.40.orig/drivers/input/keyboard/gpio_keys.c linux-3.14.40/drivers/input/keyboard/gpio_keys.c
+--- linux-3.14.40.orig/drivers/input/keyboard/gpio_keys.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/input/keyboard/gpio_keys.c 2015-05-01 14:57:59.099427001 -0500
+@@ -3,6 +3,7 @@
+ *
+ * Copyright 2005 Phil Blundell
+ * Copyright 2010, 2011 David Jander <david@protonic.nl>
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -473,6 +474,8 @@
+
+ isr = gpio_keys_gpio_isr;
+ irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
++ if (bdata->button->wakeup)
++ irqflags |= IRQF_NO_SUSPEND;
+
+ } else {
+ if (!button->irq) {
+diff -Nur linux-3.14.40.orig/drivers/input/keyboard/imx_keypad.c linux-3.14.40/drivers/input/keyboard/imx_keypad.c
+--- linux-3.14.40.orig/drivers/input/keyboard/imx_keypad.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/input/keyboard/imx_keypad.c 2015-05-01 14:57:59.099427001 -0500
+@@ -1,6 +1,7 @@
+ /*
+ * Driver for the IMX keypad port.
+ * Copyright (C) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -548,6 +549,8 @@
+
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(kbd->irq);
++ else
++ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+ }
+@@ -561,6 +564,8 @@
+
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(kbd->irq);
++ else
++ pinctrl_pm_select_default_state(dev);
+
+ mutex_lock(&input_dev->mutex);
+
+diff -Nur linux-3.14.40.orig/drivers/input/misc/mma8450.c linux-3.14.40/drivers/input/misc/mma8450.c
+--- linux-3.14.40.orig/drivers/input/misc/mma8450.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/input/misc/mma8450.c 2015-05-01 14:57:59.107427001 -0500
+@@ -1,7 +1,7 @@
+ /*
+ * Driver for Freescale's 3-Axis Accelerometer MMA8450
+ *
+- * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -25,6 +25,7 @@
+ #include <linux/i2c.h>
+ #include <linux/input-polldev.h>
+ #include <linux/of_device.h>
++#include <linux/mutex.h>
+
+ #define MMA8450_DRV_NAME "mma8450"
+
+@@ -51,11 +52,22 @@
+
+ #define MMA8450_CTRL_REG1 0x38
+ #define MMA8450_CTRL_REG2 0x39
++#define MMA8450_ID 0xC6
++#define MMA8450_WHO_AM_I 0x0F
++
++enum {
++ MODE_STANDBY = 0,
++ MODE_2G,
++ MODE_4G,
++ MODE_8G,
++};
+
+ /* mma8450 status */
+ struct mma8450 {
+ struct i2c_client *client;
+ struct input_polled_dev *idev;
++ struct mutex mma8450_lock;
++ u8 mode;
+ };
+
+ static int mma8450_read(struct mma8450 *m, unsigned off)
+@@ -112,16 +124,19 @@
+ int ret;
+ u8 buf[6];
+
+- ret = mma8450_read(m, MMA8450_STATUS);
+- if (ret < 0)
+- return;
++ mutex_lock(&m->mma8450_lock);
+
+- if (!(ret & MMA8450_STATUS_ZXYDR))
++ ret = mma8450_read(m, MMA8450_STATUS);
++ if (ret < 0 || !(ret & MMA8450_STATUS_ZXYDR)) {
++ mutex_unlock(&m->mma8450_lock);
+ return;
++ }
+
+ ret = mma8450_read_block(m, MMA8450_OUT_X_LSB, buf, sizeof(buf));
+- if (ret < 0)
++ if (ret < 0) {
++ mutex_unlock(&m->mma8450_lock);
+ return;
++ }
+
+ x = ((int)(s8)buf[1] << 4) | (buf[0] & 0xf);
+ y = ((int)(s8)buf[3] << 4) | (buf[2] & 0xf);
+@@ -131,10 +146,12 @@
+ input_report_abs(dev->input, ABS_Y, y);
+ input_report_abs(dev->input, ABS_Z, z);
+ input_sync(dev->input);
++
++ mutex_unlock(&m->mma8450_lock);
+ }
+
+ /* Initialize the MMA8450 chip */
+-static void mma8450_open(struct input_polled_dev *dev)
++static s32 mma8450_open(struct input_polled_dev *dev)
+ {
+ struct mma8450 *m = dev->private;
+ int err;
+@@ -142,18 +159,20 @@
+ /* enable all events from X/Y/Z, no FIFO */
+ err = mma8450_write(m, MMA8450_XYZ_DATA_CFG, 0x07);
+ if (err)
+- return;
++ return err;
+
+ /*
+ * Sleep mode poll rate - 50Hz
+ * System output data rate - 400Hz
+- * Full scale selection - Active, +/- 2G
++ * Standby mode
+ */
+- err = mma8450_write(m, MMA8450_CTRL_REG1, 0x01);
+- if (err < 0)
+- return;
+-
++ err = mma8450_write(m, MMA8450_CTRL_REG1, MODE_STANDBY);
++ if (err)
++ return err;
++ m->mode = MODE_STANDBY;
+ msleep(MODE_CHANGE_DELAY_MS);
++
++ return 0;
+ }
+
+ static void mma8450_close(struct input_polled_dev *dev)
+@@ -164,6 +183,76 @@
+ mma8450_write(m, MMA8450_CTRL_REG2, 0x01);
+ }
+
++static ssize_t mma8450_scalemode_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ int mode = 0;
++ struct mma8450 *m;
++ struct i2c_client *client = to_i2c_client(dev);
++
++ m = i2c_get_clientdata(client);
++
++ mutex_lock(&m->mma8450_lock);
++ mode = (int)m->mode;
++ mutex_unlock(&m->mma8450_lock);
++
++ return sprintf(buf, "%d\n", mode);
++}
++
++static ssize_t mma8450_scalemode_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long mode;
++ int ret;
++ struct mma8450 *m = NULL;
++ struct i2c_client *client = to_i2c_client(dev);
++
++ ret = strict_strtoul(buf, 10, &mode);
++ if (ret) {
++ dev_err(dev, "string transform error\n");
++ return ret;
++ }
++
++ if (mode > MODE_8G) {
++ dev_warn(dev, "not supported mode %d\n", (int)mode);
++ return count;
++ }
++
++ m = i2c_get_clientdata(client);
++
++ mutex_lock(&m->mma8450_lock);
++ if (mode == m->mode) {
++ mutex_unlock(&m->mma8450_lock);
++ return count;
++ }
++
++ ret = mma8450_write(m, MMA8450_CTRL_REG1, mode);
++ if (ret < 0) {
++ mutex_unlock(&m->mma8450_lock);
++ return ret;
++ }
++
++ msleep(MODE_CHANGE_DELAY_MS);
++ m->mode = (u8)mode;
++ mutex_unlock(&m->mma8450_lock);
++
++ return count;
++}
++
++static DEVICE_ATTR(scalemode, S_IWUSR | S_IRUGO,
++ mma8450_scalemode_show, mma8450_scalemode_store);
++
++static struct attribute *mma8450_attributes[] = {
++ &dev_attr_scalemode.attr,
++ NULL
++};
++
++static const struct attribute_group mma8450_attr_group = {
++ .attrs = mma8450_attributes,
++};
++
+ /*
+ * I2C init/probing/exit functions
+ */
+@@ -172,7 +261,25 @@
+ {
+ struct input_polled_dev *idev;
+ struct mma8450 *m;
+- int err;
++ int err, client_id;
++ struct i2c_adapter *adapter = NULL;
++
++ adapter = to_i2c_adapter(c->dev.parent);
++ err = i2c_check_functionality(adapter,
++ I2C_FUNC_SMBUS_BYTE |
++ I2C_FUNC_SMBUS_BYTE_DATA);
++ if (!err)
++ goto err_out;
++
++ client_id = i2c_smbus_read_byte_data(c, MMA8450_WHO_AM_I);
++
++ if (MMA8450_ID != client_id) {
++ dev_err(&c->dev,
++ "read chip ID 0x%x is not equal to 0x%x!\n", client_id,
++ MMA8450_ID);
++ err = -EINVAL;
++ goto err_out;
++ }
+
+ m = kzalloc(sizeof(struct mma8450), GFP_KERNEL);
+ idev = input_allocate_polled_device();
+@@ -183,6 +290,7 @@
+
+ m->client = c;
+ m->idev = idev;
++ i2c_set_clientdata(c, m);
+
+ idev->private = m;
+ idev->input->name = MMA8450_DRV_NAME;
+@@ -190,8 +298,6 @@
+ idev->poll = mma8450_poll;
+ idev->poll_interval = POLL_INTERVAL;
+ idev->poll_interval_max = POLL_INTERVAL_MAX;
+- idev->open = mma8450_open;
+- idev->close = mma8450_close;
+
+ __set_bit(EV_ABS, idev->input->evbit);
+ input_set_abs_params(idev->input, ABS_X, -2048, 2047, 32, 32);
+@@ -206,11 +312,32 @@
+
+ i2c_set_clientdata(c, m);
+
++ mutex_init(&m->mma8450_lock);
++
++ err = mma8450_open(idev);
++ if (err) {
++ dev_err(&c->dev, "failed to initialize mma8450\n");
++ goto err_unreg_dev;
++ }
++
++ err = sysfs_create_group(&c->dev.kobj, &mma8450_attr_group);
++ if (err) {
++ dev_err(&c->dev, "create device file failed!\n");
++ err = -EINVAL;
++ goto err_close;
++ }
++
+ return 0;
+
++err_close:
++ mma8450_close(idev);
++err_unreg_dev:
++ mutex_destroy(&m->mma8450_lock);
++ input_unregister_polled_device(idev);
+ err_free_mem:
+ input_free_polled_device(idev);
+ kfree(m);
++err_out:
+ return err;
+ }
+
+@@ -219,6 +346,9 @@
+ struct mma8450 *m = i2c_get_clientdata(c);
+ struct input_polled_dev *idev = m->idev;
+
++ sysfs_remove_group(&c->dev.kobj, &mma8450_attr_group);
++ mma8450_close(idev);
++ mutex_destroy(&m->mma8450_lock);
+ input_unregister_polled_device(idev);
+ input_free_polled_device(idev);
+ kfree(m);
+diff -Nur linux-3.14.40.orig/drivers/input/sparse-keymap.c linux-3.14.40/drivers/input/sparse-keymap.c
+--- linux-3.14.40.orig/drivers/input/sparse-keymap.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/input/sparse-keymap.c 2015-05-01 14:57:59.115427001 -0500
+@@ -236,7 +236,7 @@
+ * in an input device that was set up by sparse_keymap_setup().
+ * NOTE: It is safe to cal this function while input device is
+ * still registered (however the drivers should care not to try to
+- * use freed keymap and thus have to shut off interrups/polling
++ * use freed keymap and thus have to shut off interrupts/polling
+ * before freeing the keymap).
+ */
+ void sparse_keymap_free(struct input_dev *dev)
+diff -Nur linux-3.14.40.orig/drivers/Kconfig linux-3.14.40/drivers/Kconfig
+--- linux-3.14.40.orig/drivers/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/Kconfig 2015-05-01 14:57:59.147427001 -0500
+@@ -96,6 +96,8 @@
+
+ source "drivers/memstick/Kconfig"
+
++source "drivers/mxc/Kconfig"
++
+ source "drivers/leds/Kconfig"
+
+ source "drivers/accessibility/Kconfig"
+diff -Nur linux-3.14.40.orig/drivers/leds/leds-gpio.c linux-3.14.40/drivers/leds/leds-gpio.c
+--- linux-3.14.40.orig/drivers/leds/leds-gpio.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/leds/leds-gpio.c 2015-05-01 14:57:59.163427001 -0500
+@@ -3,7 +3,7 @@
+ *
+ * Copyright (C) 2007 8D Technologies inc.
+ * Raphael Assenat <raph@8d.com>
+- * Copyright (C) 2008 Freescale Semiconductor, Inc.
++ * Copyright (C) 2008, 2014 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -203,6 +203,8 @@
+ else
+ led.default_state = LEDS_GPIO_DEFSTATE_OFF;
+ }
++ if (of_get_property(child, "retain-state-suspended", NULL))
++ led.retain_state_suspended = 1;
+
+ ret = create_gpio_led(&led, &priv->leds[priv->num_leds++],
+ &pdev->dev, NULL);
+diff -Nur linux-3.14.40.orig/drivers/leds/leds-pwm.c linux-3.14.40/drivers/leds/leds-pwm.c
+--- linux-3.14.40.orig/drivers/leds/leds-pwm.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/leds/leds-pwm.c 2015-05-01 14:57:59.167427001 -0500
+@@ -70,6 +70,10 @@
+
+ duty *= brightness;
+ do_div(duty, max);
++
++ if (led_dat->active_low)
++ duty = led_dat->period - duty;
++
+ led_dat->duty = duty;
+
+ if (led_dat->can_sleep)
+@@ -93,55 +97,75 @@
+ }
+ }
+
+-static int led_pwm_create_of(struct platform_device *pdev,
+- struct led_pwm_priv *priv)
++static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
++ struct led_pwm *led, struct device_node *child)
+ {
+- struct device_node *child;
++ struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
+ int ret;
+
+- for_each_child_of_node(pdev->dev.of_node, child) {
+- struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
++ led_data->active_low = led->active_low;
++ led_data->period = led->pwm_period_ns;
++ led_data->cdev.name = led->name;
++ led_data->cdev.default_trigger = led->default_trigger;
++ led_data->cdev.brightness_set = led_pwm_set;
++ led_data->cdev.brightness = LED_OFF;
++ led_data->cdev.max_brightness = led->max_brightness;
++ led_data->cdev.flags = LED_CORE_SUSPENDRESUME;
++
++ if (child)
++ led_data->pwm = devm_of_pwm_get(dev, child, NULL);
++ else
++ led_data->pwm = devm_pwm_get(dev, led->name);
++ if (IS_ERR(led_data->pwm)) {
++ ret = PTR_ERR(led_data->pwm);
++ dev_err(dev, "unable to request PWM for %s: %d\n",
++ led->name, ret);
++ return ret;
++ }
+
+- led_dat->cdev.name = of_get_property(child, "label",
+- NULL) ? : child->name;
++ if (child)
++ led_data->period = pwm_get_period(led_data->pwm);
+
+- led_dat->pwm = devm_of_pwm_get(&pdev->dev, child, NULL);
+- if (IS_ERR(led_dat->pwm)) {
+- dev_err(&pdev->dev, "unable to request PWM for %s\n",
+- led_dat->cdev.name);
+- ret = PTR_ERR(led_dat->pwm);
+- goto err;
+- }
+- /* Get the period from PWM core when n*/
+- led_dat->period = pwm_get_period(led_dat->pwm);
++ led_data->can_sleep = pwm_can_sleep(led_data->pwm);
++ if (led_data->can_sleep)
++ INIT_WORK(&led_data->work, led_pwm_work);
+
+- led_dat->cdev.default_trigger = of_get_property(child,
++ ret = led_classdev_register(dev, &led_data->cdev);
++ if (ret == 0) {
++ priv->num_leds++;
++ } else {
++ dev_err(dev, "failed to register PWM led for %s: %d\n",
++ led->name, ret);
++ }
++
++ return ret;
++}
++
++static int led_pwm_create_of(struct device *dev, struct led_pwm_priv *priv)
++{
++ struct device_node *child;
++ struct led_pwm led;
++ int ret = 0;
++
++ memset(&led, 0, sizeof(led));
++
++ for_each_child_of_node(dev->of_node, child) {
++ led.name = of_get_property(child, "label", NULL) ? :
++ child->name;
++
++ led.default_trigger = of_get_property(child,
+ "linux,default-trigger", NULL);
++ led.active_low = of_property_read_bool(child, "active-low");
+ of_property_read_u32(child, "max-brightness",
+- &led_dat->cdev.max_brightness);
++ &led.max_brightness);
+
+- led_dat->cdev.brightness_set = led_pwm_set;
+- led_dat->cdev.brightness = LED_OFF;
+- led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+-
+- led_dat->can_sleep = pwm_can_sleep(led_dat->pwm);
+- if (led_dat->can_sleep)
+- INIT_WORK(&led_dat->work, led_pwm_work);
+-
+- ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
+- if (ret < 0) {
+- dev_err(&pdev->dev, "failed to register for %s\n",
+- led_dat->cdev.name);
++ ret = led_pwm_add(dev, priv, &led, child);
++ if (ret) {
+ of_node_put(child);
+- goto err;
++ break;
+ }
+- priv->num_leds++;
+ }
+
+- return 0;
+-err:
+- led_pwm_cleanup(priv);
+-
+ return ret;
+ }
+
+@@ -167,51 +191,23 @@
+
+ if (pdata) {
+ for (i = 0; i < count; i++) {
+- struct led_pwm *cur_led = &pdata->leds[i];
+- struct led_pwm_data *led_dat = &priv->leds[i];
+-
+- led_dat->pwm = devm_pwm_get(&pdev->dev, cur_led->name);
+- if (IS_ERR(led_dat->pwm)) {
+- ret = PTR_ERR(led_dat->pwm);
+- dev_err(&pdev->dev,
+- "unable to request PWM for %s\n",
+- cur_led->name);
+- goto err;
+- }
+-
+- led_dat->cdev.name = cur_led->name;
+- led_dat->cdev.default_trigger = cur_led->default_trigger;
+- led_dat->active_low = cur_led->active_low;
+- led_dat->period = cur_led->pwm_period_ns;
+- led_dat->cdev.brightness_set = led_pwm_set;
+- led_dat->cdev.brightness = LED_OFF;
+- led_dat->cdev.max_brightness = cur_led->max_brightness;
+- led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+-
+- led_dat->can_sleep = pwm_can_sleep(led_dat->pwm);
+- if (led_dat->can_sleep)
+- INIT_WORK(&led_dat->work, led_pwm_work);
+-
+- ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
+- if (ret < 0)
+- goto err;
++ ret = led_pwm_add(&pdev->dev, priv, &pdata->leds[i],
++ NULL);
++ if (ret)
++ break;
+ }
+- priv->num_leds = count;
+ } else {
+- ret = led_pwm_create_of(pdev, priv);
+- if (ret)
+- return ret;
++ ret = led_pwm_create_of(&pdev->dev, priv);
++ }
++
++ if (ret) {
++ led_pwm_cleanup(priv);
++ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+-
+-err:
+- priv->num_leds = i;
+- led_pwm_cleanup(priv);
+-
+- return ret;
+ }
+
+ static int led_pwm_remove(struct platform_device *pdev)
+diff -Nur linux-3.14.40.orig/drivers/mailbox/mailbox.c linux-3.14.40/drivers/mailbox/mailbox.c
+--- linux-3.14.40.orig/drivers/mailbox/mailbox.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mailbox/mailbox.c 2015-05-01 14:57:59.167427001 -0500
+@@ -0,0 +1,488 @@
++/*
++ * Mailbox: Common code for Mailbox controllers and users
++ *
++ * Copyright (C) 2014 Linaro Ltd.
++ * Author: Jassi Brar <jassisinghbrar@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/mutex.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/err.h>
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/mailbox_client.h>
++#include <linux/mailbox_controller.h>
++
++#define TXDONE_BY_IRQ (1 << 0) /* controller has remote RTR irq */
++#define TXDONE_BY_POLL (1 << 1) /* controller can read status of last TX */
++#define TXDONE_BY_ACK (1 << 2) /* S/W ACK recevied by Client ticks the TX */
++
++static LIST_HEAD(mbox_cons);
++static DEFINE_MUTEX(con_mutex);
++
++static int _add_to_rbuf(struct mbox_chan *chan, void *mssg)
++{
++ int idx;
++ unsigned long flags;
++
++ spin_lock_irqsave(&chan->lock, flags);
++
++ /* See if there is any space left */
++ if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
++ spin_unlock_irqrestore(&chan->lock, flags);
++ return -ENOMEM;
++ }
++
++ idx = chan->msg_free;
++ chan->msg_data[idx] = mssg;
++ chan->msg_count++;
++
++ if (idx == MBOX_TX_QUEUE_LEN - 1)
++ chan->msg_free = 0;
++ else
++ chan->msg_free++;
++
++ spin_unlock_irqrestore(&chan->lock, flags);
++
++ return idx;
++}
++
++static void _msg_submit(struct mbox_chan *chan)
++{
++ unsigned count, idx;
++ unsigned long flags;
++ void *data;
++ int err;
++
++ spin_lock_irqsave(&chan->lock, flags);
++
++ if (!chan->msg_count || chan->active_req) {
++ spin_unlock_irqrestore(&chan->lock, flags);
++ return;
++ }
++
++ count = chan->msg_count;
++ idx = chan->msg_free;
++ if (idx >= count)
++ idx -= count;
++ else
++ idx += MBOX_TX_QUEUE_LEN - count;
++
++ data = chan->msg_data[idx];
++
++ /* Try to submit a message to the MBOX controller */
++ err = chan->mbox->ops->send_data(chan, data);
++ if (!err) {
++ chan->active_req = data;
++ chan->msg_count--;
++ }
++
++ spin_unlock_irqrestore(&chan->lock, flags);
++}
++
++static void tx_tick(struct mbox_chan *chan, int r)
++{
++ unsigned long flags;
++ void *mssg;
++
++ spin_lock_irqsave(&chan->lock, flags);
++ mssg = chan->active_req;
++ chan->active_req = NULL;
++ spin_unlock_irqrestore(&chan->lock, flags);
++
++ /* Submit next message */
++ _msg_submit(chan);
++
++ /* Notify the client */
++ if (chan->cl->tx_block)
++ complete(&chan->tx_complete);
++ else if (mssg && chan->cl->tx_done)
++ chan->cl->tx_done(chan->cl, mssg, r);
++}
++
++static void poll_txdone(unsigned long data)
++{
++ struct mbox_controller *mbox = (struct mbox_controller *)data;
++ bool txdone, resched = false;
++ int i;
++
++ for (i = 0; i < mbox->num_chans; i++) {
++ struct mbox_chan *chan = &mbox->chans[i];
++
++ if (chan->active_req && chan->cl) {
++ resched = true;
++ txdone = chan->mbox->ops->last_tx_done(chan);
++ if (txdone)
++ tx_tick(chan, 0);
++ }
++ }
++
++ if (resched)
++ mod_timer(&mbox->poll,
++ jiffies + msecs_to_jiffies(mbox->period));
++}
++
++/**
++ * mbox_chan_received_data - A way for controller driver to push data
++ * received from remote to the upper layer.
++ * @chan: Pointer to the mailbox channel on which RX happened.
++ * @data: Client specific message typecasted as void *
++ *
++ * After startup and before shutdown any data received on the chan
++ * is passed on to the API via atomic mbox_chan_received_data().
++ * The controller should ACK the RX only after this call returns.
++ */
++void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
++{
++ /* No buffering the received data */
++ if (chan->cl->rx_callback)
++ chan->cl->rx_callback(chan->cl, mssg);
++}
++EXPORT_SYMBOL_GPL(mbox_chan_received_data);
++
++/**
++ * mbox_chan_txdone - A way for controller driver to notify the
++ * framework that the last TX has completed.
++ * @chan: Pointer to the mailbox chan on which TX happened.
++ * @r: Status of last TX - OK or ERROR
++ *
++ * The controller that has IRQ for TX ACK calls this atomic API
++ * to tick the TX state machine. It works only if txdone_irq
++ * is set by the controller.
++ */
++void mbox_chan_txdone(struct mbox_chan *chan, int r)
++{
++ if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
++ pr_err("Controller can't run the TX ticker\n");
++ return;
++ }
++
++ tx_tick(chan, r);
++}
++EXPORT_SYMBOL_GPL(mbox_chan_txdone);
++
++/**
++ * mbox_client_txdone - The way for a client to run the TX state machine.
++ * @chan: Mailbox channel assigned to this client.
++ * @r: Success status of last transmission.
++ *
++ * The client/protocol had received some 'ACK' packet and it notifies
++ * the API that the last packet was sent successfully. This only works
++ * if the controller can't sense TX-Done.
++ */
++void mbox_client_txdone(struct mbox_chan *chan, int r)
++{
++ if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
++ pr_err("Client can't run the TX ticker\n");
++ return;
++ }
++
++ tx_tick(chan, r);
++}
++EXPORT_SYMBOL_GPL(mbox_client_txdone);
++
++/**
++ * mbox_client_peek_data - A way for client driver to pull data
++ * received from remote by the controller.
++ * @chan: Mailbox channel assigned to this client.
++ *
++ * A poke to controller driver for any received data.
++ * The data is actually passed onto client via the
++ * mbox_chan_received_data()
++ * The call can be made from atomic context, so the controller's
++ * implementation of peek_data() must not sleep.
++ *
++ * Return: True, if controller has, and is going to push after this,
++ * some data.
++ * False, if controller doesn't have any data to be read.
++ */
++bool mbox_client_peek_data(struct mbox_chan *chan)
++{
++ if (chan->mbox->ops->peek_data)
++ return chan->mbox->ops->peek_data(chan);
++
++ return false;
++}
++EXPORT_SYMBOL_GPL(mbox_client_peek_data);
++
++/**
++ * mbox_send_message - For client to submit a message to be
++ * sent to the remote.
++ * @chan: Mailbox channel assigned to this client.
++ * @mssg: Client specific message typecasted.
++ *
++ * For client to submit data to the controller destined for a remote
++ * processor. If the client had set 'tx_block', the call will return
++ * either when the remote receives the data or when 'tx_tout' millisecs
++ * run out.
++ * In non-blocking mode, the requests are buffered by the API and a
++ * non-negative token is returned for each queued request. If the request
++ * is not queued, a negative token is returned. Upon failure or successful
++ * TX, the API calls 'tx_done' from atomic context, from which the client
++ * could submit yet another request.
++ * In blocking mode, 'tx_done' is not called, effectively making the
++ * queue length 1.
++ * The pointer to message should be preserved until it is sent
++ * over the chan, i.e, tx_done() is made.
++ * This function could be called from atomic context as it simply
++ * queues the data and returns a token against the request.
++ *
++ * Return: Non-negative integer for successful submission (non-blocking mode)
++ * or transmission over chan (blocking mode).
++ * Negative value denotes failure.
++ */
++int mbox_send_message(struct mbox_chan *chan, void *mssg)
++{
++ int t;
++
++ if (!chan || !chan->cl)
++ return -EINVAL;
++
++ t = _add_to_rbuf(chan, mssg);
++ if (t < 0) {
++ pr_err("Try increasing MBOX_TX_QUEUE_LEN\n");
++ return t;
++ }
++
++ _msg_submit(chan);
++
++ reinit_completion(&chan->tx_complete);
++
++ if (chan->txdone_method == TXDONE_BY_POLL)
++ poll_txdone((unsigned long)chan->mbox);
++
++ if (chan->cl->tx_block && chan->active_req) {
++ unsigned long wait;
++ int ret;
++
++ if (!chan->cl->tx_tout) /* wait for ever */
++ wait = msecs_to_jiffies(3600000);
++ else
++ wait = msecs_to_jiffies(chan->cl->tx_tout);
++
++ ret = wait_for_completion_timeout(&chan->tx_complete, wait);
++ if (ret == 0) {
++ t = -EIO;
++ tx_tick(chan, -EIO);
++ }
++ }
++
++ return t;
++}
++EXPORT_SYMBOL_GPL(mbox_send_message);
++
++/**
++ * mbox_request_channel - Request a mailbox channel.
++ * @cl: Identity of the client requesting the channel.
++ *
++ * The Client specifies its requirements and capabilities while asking for
++ * a mailbox channel. It can't be called from atomic context.
++ * The channel is exclusively allocated and can't be used by another
++ * client before the owner calls mbox_free_channel.
++ * After assignment, any packet received on this channel will be
++ * handed over to the client via the 'rx_callback'.
++ * The framework holds reference to the client, so the mbox_client
++ * structure shouldn't be modified until the mbox_free_channel returns.
++ *
++ * Return: Pointer to the channel assigned to the client if successful.
++ * ERR_PTR for request failure.
++ */
++struct mbox_chan *mbox_request_channel(struct mbox_client *cl)
++{
++ struct device *dev = cl->dev;
++ struct mbox_controller *mbox;
++ struct of_phandle_args spec;
++ struct mbox_chan *chan;
++ unsigned long flags;
++ int count, i, ret;
++
++ if (!dev || !dev->of_node) {
++ pr_err("%s: No owner device node\n", __func__);
++ return ERR_PTR(-ENODEV);
++ }
++
++ count = of_property_count_strings(dev->of_node, "mbox-names");
++ if (count < 0) {
++ pr_err("%s: mbox-names property of node '%s' missing\n",
++ __func__, dev->of_node->full_name);
++ return ERR_PTR(-ENODEV);
++ }
++
++ mutex_lock(&con_mutex);
++
++ ret = -ENODEV;
++ for (i = 0; i < count; i++) {
++ const char *s;
++
++ if (of_property_read_string_index(dev->of_node,
++ "mbox-names", i, &s))
++ continue;
++
++ if (strcmp(cl->chan_name, s))
++ continue;
++
++ if (of_parse_phandle_with_args(dev->of_node,
++ "mbox", "#mbox-cells", i, &spec))
++ continue;
++
++ chan = NULL;
++ list_for_each_entry(mbox, &mbox_cons, node)
++ if (mbox->dev->of_node == spec.np) {
++ chan = mbox->of_xlate(mbox, &spec);
++ break;
++ }
++
++ of_node_put(spec.np);
++
++ if (!chan)
++ continue;
++
++ ret = -EBUSY;
++ if (!chan->cl && try_module_get(mbox->dev->driver->owner))
++ break;
++ }
++
++ if (i == count) {
++ mutex_unlock(&con_mutex);
++ return ERR_PTR(ret);
++ }
++
++ spin_lock_irqsave(&chan->lock, flags);
++ chan->msg_free = 0;
++ chan->msg_count = 0;
++ chan->active_req = NULL;
++ chan->cl = cl;
++ init_completion(&chan->tx_complete);
++
++ if (chan->txdone_method == TXDONE_BY_POLL
++ && cl->knows_txdone)
++ chan->txdone_method |= TXDONE_BY_ACK;
++ spin_unlock_irqrestore(&chan->lock, flags);
++
++ ret = chan->mbox->ops->startup(chan);
++ if (ret) {
++ pr_err("Unable to startup the chan (%d)\n", ret);
++ mbox_free_channel(chan);
++ chan = ERR_PTR(ret);
++ }
++
++ mutex_unlock(&con_mutex);
++ return chan;
++}
++EXPORT_SYMBOL_GPL(mbox_request_channel);
++
++/**
++ * mbox_free_channel - The client relinquishes control of a mailbox
++ * channel by this call.
++ * @chan: The mailbox channel to be freed.
++ */
++void mbox_free_channel(struct mbox_chan *chan)
++{
++ unsigned long flags;
++
++ if (!chan || !chan->cl)
++ return;
++
++ chan->mbox->ops->shutdown(chan);
++
++ /* The queued TX requests are simply aborted, no callbacks are made */
++ spin_lock_irqsave(&chan->lock, flags);
++ chan->cl = NULL;
++ chan->active_req = NULL;
++ if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
++ chan->txdone_method = TXDONE_BY_POLL;
++
++ module_put(chan->mbox->dev->driver->owner);
++ spin_unlock_irqrestore(&chan->lock, flags);
++}
++EXPORT_SYMBOL_GPL(mbox_free_channel);
++
++static struct mbox_chan *
++of_mbox_index_xlate(struct mbox_controller *mbox,
++ const struct of_phandle_args *sp)
++{
++ int ind = sp->args[0];
++
++ if (ind >= mbox->num_chans)
++ return NULL;
++
++ return &mbox->chans[ind];
++}
++
++/**
++ * mbox_controller_register - Register the mailbox controller
++ * @mbox: Pointer to the mailbox controller.
++ *
++ * The controller driver registers its communication chans
++ */
++int mbox_controller_register(struct mbox_controller *mbox)
++{
++ int i, txdone;
++
++ /* Sanity check */
++ if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
++ return -EINVAL;
++
++ if (mbox->txdone_irq)
++ txdone = TXDONE_BY_IRQ;
++ else if (mbox->txdone_poll)
++ txdone = TXDONE_BY_POLL;
++ else /* It has to be ACK then */
++ txdone = TXDONE_BY_ACK;
++
++ if (txdone == TXDONE_BY_POLL) {
++ mbox->poll.function = &poll_txdone;
++ mbox->poll.data = (unsigned long)mbox;
++ init_timer(&mbox->poll);
++ }
++
++ for (i = 0; i < mbox->num_chans; i++) {
++ struct mbox_chan *chan = &mbox->chans[i];
++ chan->cl = NULL;
++ chan->mbox = mbox;
++ chan->txdone_method = txdone;
++ spin_lock_init(&chan->lock);
++ }
++
++ if (!mbox->of_xlate)
++ mbox->of_xlate = of_mbox_index_xlate;
++
++ mutex_lock(&con_mutex);
++ list_add_tail(&mbox->node, &mbox_cons);
++ mutex_unlock(&con_mutex);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(mbox_controller_register);
++
++/**
++ * mbox_controller_unregister - UnRegister the mailbox controller
++ * @mbox: Pointer to the mailbox controller.
++ */
++void mbox_controller_unregister(struct mbox_controller *mbox)
++{
++ int i;
++
++ if (!mbox)
++ return;
++
++ mutex_lock(&con_mutex);
++
++ list_del(&mbox->node);
++
++ for (i = 0; i < mbox->num_chans; i++)
++ mbox_free_channel(&mbox->chans[i]);
++
++ if (mbox->txdone_poll)
++ del_timer_sync(&mbox->poll);
++
++ mutex_unlock(&con_mutex);
++}
++EXPORT_SYMBOL_GPL(mbox_controller_unregister);
+diff -Nur linux-3.14.40.orig/drivers/mailbox/Makefile linux-3.14.40/drivers/mailbox/Makefile
+--- linux-3.14.40.orig/drivers/mailbox/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mailbox/Makefile 2015-05-01 14:57:59.179427001 -0500
+@@ -1,3 +1,7 @@
++# Generic MAILBOX API
++
++obj-$(CONFIG_MAILBOX) += mailbox.o
++
+ obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
+
+ obj-$(CONFIG_OMAP_MBOX) += omap-mailbox.o
+diff -Nur linux-3.14.40.orig/drivers/mailbox/pl320-ipc.c linux-3.14.40/drivers/mailbox/pl320-ipc.c
+--- linux-3.14.40.orig/drivers/mailbox/pl320-ipc.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mailbox/pl320-ipc.c 2015-05-01 14:57:59.179427001 -0500
+@@ -26,7 +26,7 @@
+ #include <linux/device.h>
+ #include <linux/amba/bus.h>
+
+-#include <linux/mailbox.h>
++#include <linux/pl320-ipc.h>
+
+ #define IPCMxSOURCE(m) ((m) * 0x40)
+ #define IPCMxDSET(m) (((m) * 0x40) + 0x004)
+diff -Nur linux-3.14.40.orig/drivers/Makefile linux-3.14.40/drivers/Makefile
+--- linux-3.14.40.orig/drivers/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/Makefile 2015-05-01 14:57:59.203427001 -0500
+@@ -111,6 +111,7 @@
+ obj-$(CONFIG_CPU_FREQ) += cpufreq/
+ obj-$(CONFIG_CPU_IDLE) += cpuidle/
+ obj-y += mmc/
++obj-$(CONFIG_ARCH_MXC) += mxc/
+ obj-$(CONFIG_MEMSTICK) += memstick/
+ obj-y += leds/
+ obj-$(CONFIG_INFINIBAND) += infiniband/
+diff -Nur linux-3.14.40.orig/drivers/media/platform/Kconfig linux-3.14.40/drivers/media/platform/Kconfig
+--- linux-3.14.40.orig/drivers/media/platform/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/media/platform/Kconfig 2015-05-01 14:57:59.211427001 -0500
+@@ -115,6 +115,21 @@
+ To compile this driver as a module, choose M here: the module
+ will be called s3c-camif.
+
++config VIDEO_MXC_OUTPUT
++ tristate "MXC Video For Linux Video Output"
++ depends on VIDEO_DEV && ARCH_MXC && FB_MXC
++ select VIDEOBUF_DMA_CONTIG
++ ---help---
++ This is the video4linux2 output driver based on MXC module.
++
++config VIDEO_MXC_CAPTURE
++ tristate "MXC Video For Linux Video Capture"
++ depends on VIDEO_V4L2 && VIDEO_V4L2_INT_DEVICE
++ ---help---
++ This is the video4linux2 capture driver based on i.MX video-in module.
++
++source "drivers/media/platform/mxc/capture/Kconfig"
++source "drivers/media/platform/mxc/output/Kconfig"
+ source "drivers/media/platform/soc_camera/Kconfig"
+ source "drivers/media/platform/exynos4-is/Kconfig"
+ source "drivers/media/platform/s5p-tv/Kconfig"
+diff -Nur linux-3.14.40.orig/drivers/media/platform/Makefile linux-3.14.40/drivers/media/platform/Makefile
+--- linux-3.14.40.orig/drivers/media/platform/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/media/platform/Makefile 2015-05-01 14:57:59.227427001 -0500
+@@ -51,4 +51,7 @@
+
+ obj-$(CONFIG_ARCH_OMAP) += omap/
+
++obj-$(CONFIG_VIDEO_MXC_CAPTURE) += mxc/capture/
++obj-$(CONFIG_VIDEO_MXC_OUTPUT) += mxc/output/
++
+ ccflags-y += -I$(srctree)/drivers/media/i2c
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/adv7180.c linux-3.14.40/drivers/media/platform/mxc/capture/adv7180.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/adv7180.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/adv7180.c 2015-05-01 14:57:59.227427001 -0500
+@@ -0,0 +1,1344 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file adv7180.c
++ *
++ * @brief Analog Device ADV7180 video decoder functions
++ *
++ * @ingroup Camera
++ */
++
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/i2c.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/of_device.h>
++#include <linux/of_gpio.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/regulator/consumer.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-int-device.h>
++#include "mxc_v4l2_capture.h"
++
++#define ADV7180_VOLTAGE_ANALOG 1800000
++#define ADV7180_VOLTAGE_DIGITAL_CORE 1800000
++#define ADV7180_VOLTAGE_DIGITAL_IO 3300000
++#define ADV7180_VOLTAGE_PLL 1800000
++
++static struct regulator *dvddio_regulator;
++static struct regulator *dvdd_regulator;
++static struct regulator *avdd_regulator;
++static struct regulator *pvdd_regulator;
++static int pwn_gpio;
++
++static int adv7180_probe(struct i2c_client *adapter,
++ const struct i2c_device_id *id);
++static int adv7180_detach(struct i2c_client *client);
++
++static const struct i2c_device_id adv7180_id[] = {
++ {"adv7180", 0},
++ {},
++};
++
++MODULE_DEVICE_TABLE(i2c, adv7180_id);
++
++static struct i2c_driver adv7180_i2c_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "adv7180",
++ },
++ .probe = adv7180_probe,
++ .remove = adv7180_detach,
++ .id_table = adv7180_id,
++};
++
++/*!
++ * Maintains the information on the current state of the sensor.
++ */
++struct sensor {
++ struct sensor_data sen;
++ v4l2_std_id std_id;
++} adv7180_data;
++
++
++/*! List of input video formats supported. The video formats is corresponding
++ * with v4l2 id in video_fmt_t
++ */
++typedef enum {
++ ADV7180_NTSC = 0, /*!< Locked on (M) NTSC video signal. */
++ ADV7180_PAL, /*!< (B, G, H, I, N)PAL video signal. */
++ ADV7180_NOT_LOCKED, /*!< Not locked on a signal. */
++} video_fmt_idx;
++
++/*! Number of video standards supported (including 'not locked' signal). */
++#define ADV7180_STD_MAX (ADV7180_PAL + 1)
++
++/*! Video format structure. */
++typedef struct {
++ int v4l2_id; /*!< Video for linux ID. */
++ char name[16]; /*!< Name (e.g., "NTSC", "PAL", etc.) */
++ u16 raw_width; /*!< Raw width. */
++ u16 raw_height; /*!< Raw height. */
++ u16 active_width; /*!< Active width. */
++ u16 active_height; /*!< Active height. */
++} video_fmt_t;
++
++/*! Description of video formats supported.
++ *
++ * PAL: raw=720x625, active=720x576.
++ * NTSC: raw=720x525, active=720x480.
++ */
++static video_fmt_t video_fmts[] = {
++ { /*! NTSC */
++ .v4l2_id = V4L2_STD_NTSC,
++ .name = "NTSC",
++ .raw_width = 720, /* SENS_FRM_WIDTH */
++ .raw_height = 525, /* SENS_FRM_HEIGHT */
++ .active_width = 720, /* ACT_FRM_WIDTH plus 1 */
++ .active_height = 480, /* ACT_FRM_WIDTH plus 1 */
++ },
++ { /*! (B, G, H, I, N) PAL */
++ .v4l2_id = V4L2_STD_PAL,
++ .name = "PAL",
++ .raw_width = 720,
++ .raw_height = 625,
++ .active_width = 720,
++ .active_height = 576,
++ },
++ { /*! Unlocked standard */
++ .v4l2_id = V4L2_STD_ALL,
++ .name = "Autodetect",
++ .raw_width = 720,
++ .raw_height = 625,
++ .active_width = 720,
++ .active_height = 576,
++ },
++};
++
++/*!* Standard index of ADV7180. */
++static video_fmt_idx video_idx = ADV7180_PAL;
++
++/*! @brief This mutex is used to provide mutual exclusion.
++ *
++ * Create a mutex that can be used to provide mutually exclusive
++ * read/write access to the globally accessible data structures
++ * and variables that were defined above.
++ */
++static DEFINE_MUTEX(mutex);
++
++#define IF_NAME "adv7180"
++#define ADV7180_INPUT_CTL 0x00 /* Input Control */
++#define ADV7180_STATUS_1 0x10 /* Status #1 */
++#define ADV7180_BRIGHTNESS 0x0a /* Brightness */
++#define ADV7180_IDENT 0x11 /* IDENT */
++#define ADV7180_VSYNC_FIELD_CTL_1 0x31 /* VSYNC Field Control #1 */
++#define ADV7180_MANUAL_WIN_CTL 0x3d /* Manual Window Control */
++#define ADV7180_SD_SATURATION_CB 0xe3 /* SD Saturation Cb */
++#define ADV7180_SD_SATURATION_CR 0xe4 /* SD Saturation Cr */
++#define ADV7180_PWR_MNG 0x0f /* Power Management */
++
++/* supported controls */
++/* This hasn't been fully implemented yet.
++ * This is how it should work, though. */
++static struct v4l2_queryctrl adv7180_qctrl[] = {
++ {
++ .id = V4L2_CID_BRIGHTNESS,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Brightness",
++ .minimum = 0, /* check this value */
++ .maximum = 255, /* check this value */
++ .step = 1, /* check this value */
++ .default_value = 127, /* check this value */
++ .flags = 0,
++ }, {
++ .id = V4L2_CID_SATURATION,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Saturation",
++ .minimum = 0, /* check this value */
++ .maximum = 255, /* check this value */
++ .step = 0x1, /* check this value */
++ .default_value = 127, /* check this value */
++ .flags = 0,
++ }
++};
++
++static inline void adv7180_power_down(int enable)
++{
++ gpio_set_value_cansleep(pwn_gpio, !enable);
++ msleep(2);
++}
++
++static int adv7180_regulator_enable(struct device *dev)
++{
++ int ret = 0;
++
++ dvddio_regulator = devm_regulator_get(dev, "DOVDD");
++
++ if (!IS_ERR(dvddio_regulator)) {
++ regulator_set_voltage(dvddio_regulator,
++ ADV7180_VOLTAGE_DIGITAL_IO,
++ ADV7180_VOLTAGE_DIGITAL_IO);
++ ret = regulator_enable(dvddio_regulator);
++ if (ret) {
++ dev_err(dev, "set io voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set io voltage ok\n");
++ }
++ } else {
++ dev_warn(dev, "cannot get io voltage\n");
++ }
++
++ dvdd_regulator = devm_regulator_get(dev, "DVDD");
++ if (!IS_ERR(dvdd_regulator)) {
++ regulator_set_voltage(dvdd_regulator,
++ ADV7180_VOLTAGE_DIGITAL_CORE,
++ ADV7180_VOLTAGE_DIGITAL_CORE);
++ ret = regulator_enable(dvdd_regulator);
++ if (ret) {
++ dev_err(dev, "set core voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set core voltage ok\n");
++ }
++ } else {
++ dev_warn(dev, "cannot get core voltage\n");
++ }
++
++ avdd_regulator = devm_regulator_get(dev, "AVDD");
++ if (!IS_ERR(avdd_regulator)) {
++ regulator_set_voltage(avdd_regulator,
++ ADV7180_VOLTAGE_ANALOG,
++ ADV7180_VOLTAGE_ANALOG);
++ ret = regulator_enable(avdd_regulator);
++ if (ret) {
++ dev_err(dev, "set analog voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set analog voltage ok\n");
++ }
++ } else {
++ dev_warn(dev, "cannot get analog voltage\n");
++ }
++
++ pvdd_regulator = devm_regulator_get(dev, "PVDD");
++ if (!IS_ERR(pvdd_regulator)) {
++ regulator_set_voltage(pvdd_regulator,
++ ADV7180_VOLTAGE_PLL,
++ ADV7180_VOLTAGE_PLL);
++ ret = regulator_enable(pvdd_regulator);
++ if (ret) {
++ dev_err(dev, "set pll voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set pll voltage ok\n");
++ }
++ } else {
++ dev_warn(dev, "cannot get pll voltage\n");
++ }
++
++ return ret;
++}
++
++
++/***********************************************************************
++ * I2C transfert.
++ ***********************************************************************/
++
++/*! Read one register from a ADV7180 i2c slave device.
++ *
++ * @param *reg register in the device we wish to access.
++ *
++ * @return 0 if success, an error code otherwise.
++ */
++static inline int adv7180_read(u8 reg)
++{
++ int val;
++
++ val = i2c_smbus_read_byte_data(adv7180_data.sen.i2c_client, reg);
++ if (val < 0) {
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ "%s:read reg error: reg=%2x\n", __func__, reg);
++ return -1;
++ }
++ return val;
++}
++
++/*! Write one register of a ADV7180 i2c slave device.
++ *
++ * @param *reg register in the device we wish to access.
++ *
++ * @return 0 if success, an error code otherwise.
++ */
++static int adv7180_write_reg(u8 reg, u8 val)
++{
++ s32 ret;
++
++ ret = i2c_smbus_write_byte_data(adv7180_data.sen.i2c_client, reg, val);
++ if (ret < 0) {
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ "%s:write reg error:reg=%2x,val=%2x\n", __func__,
++ reg, val);
++ return -1;
++ }
++ return 0;
++}
++
++/***********************************************************************
++ * mxc_v4l2_capture interface.
++ ***********************************************************************/
++
++/*!
++ * Return attributes of current video standard.
++ * Since this device autodetects the current standard, this function also
++ * sets the values that need to be changed if the standard changes.
++ * There is no set std equivalent function.
++ *
++ * @return None.
++ */
++static void adv7180_get_std(v4l2_std_id *std)
++{
++ int tmp;
++ int idx;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180_get_std\n");
++
++ /* Read the AD_RESULT to get the detect output video standard */
++ tmp = adv7180_read(ADV7180_STATUS_1) & 0x70;
++
++ mutex_lock(&mutex);
++ if (tmp == 0x40) {
++ /* PAL */
++ *std = V4L2_STD_PAL;
++ idx = ADV7180_PAL;
++ } else if (tmp == 0) {
++ /*NTSC*/
++ *std = V4L2_STD_NTSC;
++ idx = ADV7180_NTSC;
++ } else {
++ *std = V4L2_STD_ALL;
++ idx = ADV7180_NOT_LOCKED;
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ "Got invalid video standard!\n");
++ }
++ mutex_unlock(&mutex);
++
++ /* This assumes autodetect which this device uses. */
++ if (*std != adv7180_data.std_id) {
++ video_idx = idx;
++ adv7180_data.std_id = *std;
++ adv7180_data.sen.pix.width = video_fmts[video_idx].raw_width;
++ adv7180_data.sen.pix.height = video_fmts[video_idx].raw_height;
++ }
++}
++
++/***********************************************************************
++ * IOCTL Functions from v4l2_int_ioctl_desc.
++ ***********************************************************************/
++
++/*!
++ * ioctl_g_ifparm - V4L2 sensor interface handler for vidioc_int_g_ifparm_num
++ * s: pointer to standard V4L2 device structure
++ * p: pointer to standard V4L2 vidioc_int_g_ifparm_num ioctl structure
++ *
++ * Gets slave interface parameters.
++ * Calculates the required xclk value to support the requested
++ * clock parameters in p. This value is returned in the p
++ * parameter.
++ *
++ * vidioc_int_g_ifparm returns platform-specific information about the
++ * interface settings used by the sensor.
++ *
++ * Called on open.
++ */
++static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_g_ifparm\n");
++
++ if (s == NULL) {
++ pr_err(" ERROR!! no slave device set!\n");
++ return -1;
++ }
++
++ /* Initialize structure to 0s then set any non-0 values. */
++ memset(p, 0, sizeof(*p));
++ p->if_type = V4L2_IF_TYPE_BT656; /* This is the only possibility. */
++ p->u.bt656.mode = V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT;
++ p->u.bt656.nobt_hs_inv = 1;
++ p->u.bt656.bt_sync_correct = 1;
++
++ /* ADV7180 has a dedicated clock so no clock settings needed. */
++
++ return 0;
++}
++
++/*!
++ * Sets the camera power.
++ *
++ * s pointer to the camera device
++ * on if 1, power is to be turned on. 0 means power is to be turned off
++ *
++ * ioctl_s_power - V4L2 sensor interface handler for vidioc_int_s_power_num
++ * @s: pointer to standard V4L2 device structure
++ * @on: power state to which device is to be set
++ *
++ * Sets devices power state to requrested state, if possible.
++ * This is called on open, close, suspend and resume.
++ */
++static int ioctl_s_power(struct v4l2_int_device *s, int on)
++{
++ struct sensor *sensor = s->priv;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_s_power\n");
++
++ if (on && !sensor->sen.on) {
++ if (adv7180_write_reg(ADV7180_PWR_MNG, 0x04) != 0)
++ return -EIO;
++
++ /*
++ * FIXME:Additional 400ms to wait the chip to be stable?
++ * This is a workaround for preview scrolling issue.
++ */
++ msleep(400);
++ } else if (!on && sensor->sen.on) {
++ if (adv7180_write_reg(ADV7180_PWR_MNG, 0x24) != 0)
++ return -EIO;
++ }
++
++ sensor->sen.on = on;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_parm - V4L2 sensor interface handler for VIDIOC_G_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_G_PARM ioctl structure
++ *
++ * Returns the sensor's video CAPTURE parameters.
++ */
++static int ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor *sensor = s->priv;
++ struct v4l2_captureparm *cparm = &a->parm.capture;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_g_parm\n");
++
++ switch (a->type) {
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ pr_debug(" type is V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
++ memset(a, 0, sizeof(*a));
++ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cparm->capability = sensor->sen.streamcap.capability;
++ cparm->timeperframe = sensor->sen.streamcap.timeperframe;
++ cparm->capturemode = sensor->sen.streamcap.capturemode;
++ break;
++
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ break;
++
++ default:
++ pr_debug("ioctl_g_parm:type is unknown %d\n", a->type);
++ break;
++ }
++
++ return 0;
++}
++
++/*!
++ * ioctl_s_parm - V4L2 sensor interface handler for VIDIOC_S_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_S_PARM ioctl structure
++ *
++ * Configures the sensor to use the input parameters, if possible. If
++ * not possible, reverts to the old parameters and returns the
++ * appropriate error code.
++ *
++ * This driver cannot change these settings.
++ */
++static int ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_s_parm\n");
++
++ switch (a->type) {
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ break;
++ }
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_fmt_cap - V4L2 sensor interface handler for ioctl_g_fmt_cap
++ * @s: pointer to standard V4L2 device structure
++ * @f: pointer to standard V4L2 v4l2_format structure
++ *
++ * Returns the sensor's current pixel format in the v4l2_format
++ * parameter.
++ */
++static int ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f)
++{
++ struct sensor *sensor = s->priv;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_g_fmt_cap\n");
++
++ switch (f->type) {
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ pr_debug(" Returning size of %dx%d\n",
++ sensor->sen.pix.width, sensor->sen.pix.height);
++ f->fmt.pix = sensor->sen.pix;
++ break;
++
++ case V4L2_BUF_TYPE_PRIVATE: {
++ v4l2_std_id std;
++ adv7180_get_std(&std);
++ f->fmt.pix.pixelformat = (u32)std;
++ }
++ break;
++
++ default:
++ f->fmt.pix = sensor->sen.pix;
++ break;
++ }
++
++ return 0;
++}
++
++/*!
++ * ioctl_queryctrl - V4L2 sensor interface handler for VIDIOC_QUERYCTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @qc: standard V4L2 VIDIOC_QUERYCTRL ioctl structure
++ *
++ * If the requested control is supported, returns the control information
++ * from the video_control[] array. Otherwise, returns -EINVAL if the
++ * control is not supported.
++ */
++static int ioctl_queryctrl(struct v4l2_int_device *s,
++ struct v4l2_queryctrl *qc)
++{
++ int i;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_queryctrl\n");
++
++ for (i = 0; i < ARRAY_SIZE(adv7180_qctrl); i++)
++ if (qc->id && qc->id == adv7180_qctrl[i].id) {
++ memcpy(qc, &(adv7180_qctrl[i]),
++ sizeof(*qc));
++ return 0;
++ }
++
++ return -EINVAL;
++}
++
++/*!
++ * ioctl_g_ctrl - V4L2 sensor interface handler for VIDIOC_G_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_G_CTRL ioctl structure
++ *
++ * If the requested control is supported, returns the control's current
++ * value from the video_control[] array. Otherwise, returns -EINVAL
++ * if the control is not supported.
++ */
++static int ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int ret = 0;
++ int sat = 0;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_g_ctrl\n");
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_BRIGHTNESS\n");
++ adv7180_data.sen.brightness = adv7180_read(ADV7180_BRIGHTNESS);
++ vc->value = adv7180_data.sen.brightness;
++ break;
++ case V4L2_CID_CONTRAST:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_CONTRAST\n");
++ vc->value = adv7180_data.sen.contrast;
++ break;
++ case V4L2_CID_SATURATION:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_SATURATION\n");
++ sat = adv7180_read(ADV7180_SD_SATURATION_CB);
++ adv7180_data.sen.saturation = sat;
++ vc->value = adv7180_data.sen.saturation;
++ break;
++ case V4L2_CID_HUE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_HUE\n");
++ vc->value = adv7180_data.sen.hue;
++ break;
++ case V4L2_CID_AUTO_WHITE_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_AUTO_WHITE_BALANCE\n");
++ break;
++ case V4L2_CID_DO_WHITE_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_DO_WHITE_BALANCE\n");
++ break;
++ case V4L2_CID_RED_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_RED_BALANCE\n");
++ vc->value = adv7180_data.sen.red;
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_BLUE_BALANCE\n");
++ vc->value = adv7180_data.sen.blue;
++ break;
++ case V4L2_CID_GAMMA:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_GAMMA\n");
++ break;
++ case V4L2_CID_EXPOSURE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_EXPOSURE\n");
++ vc->value = adv7180_data.sen.ae_mode;
++ break;
++ case V4L2_CID_AUTOGAIN:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_AUTOGAIN\n");
++ break;
++ case V4L2_CID_GAIN:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_GAIN\n");
++ break;
++ case V4L2_CID_HFLIP:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_HFLIP\n");
++ break;
++ case V4L2_CID_VFLIP:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_VFLIP\n");
++ break;
++ default:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " Default case\n");
++ vc->value = 0;
++ ret = -EPERM;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_ctrl - V4L2 sensor interface handler for VIDIOC_S_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_S_CTRL ioctl structure
++ *
++ * If the requested control is supported, sets the control's current
++ * value in HW (and updates the video_control[] array). Otherwise,
++ * returns -EINVAL if the control is not supported.
++ */
++static int ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int retval = 0;
++ u8 tmp;
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_s_ctrl\n");
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_BRIGHTNESS\n");
++ tmp = vc->value;
++ adv7180_write_reg(ADV7180_BRIGHTNESS, tmp);
++ adv7180_data.sen.brightness = vc->value;
++ break;
++ case V4L2_CID_CONTRAST:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_CONTRAST\n");
++ break;
++ case V4L2_CID_SATURATION:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_SATURATION\n");
++ tmp = vc->value;
++ adv7180_write_reg(ADV7180_SD_SATURATION_CB, tmp);
++ adv7180_write_reg(ADV7180_SD_SATURATION_CR, tmp);
++ adv7180_data.sen.saturation = vc->value;
++ break;
++ case V4L2_CID_HUE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_HUE\n");
++ break;
++ case V4L2_CID_AUTO_WHITE_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_AUTO_WHITE_BALANCE\n");
++ break;
++ case V4L2_CID_DO_WHITE_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_DO_WHITE_BALANCE\n");
++ break;
++ case V4L2_CID_RED_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_RED_BALANCE\n");
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_BLUE_BALANCE\n");
++ break;
++ case V4L2_CID_GAMMA:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_GAMMA\n");
++ break;
++ case V4L2_CID_EXPOSURE:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_EXPOSURE\n");
++ break;
++ case V4L2_CID_AUTOGAIN:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_AUTOGAIN\n");
++ break;
++ case V4L2_CID_GAIN:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_GAIN\n");
++ break;
++ case V4L2_CID_HFLIP:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_HFLIP\n");
++ break;
++ case V4L2_CID_VFLIP:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " V4L2_CID_VFLIP\n");
++ break;
++ default:
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ " Default case\n");
++ retval = -EPERM;
++ break;
++ }
++
++ return retval;
++}
++
++/*!
++ * ioctl_enum_framesizes - V4L2 sensor interface handler for
++ * VIDIOC_ENUM_FRAMESIZES ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @fsize: standard V4L2 VIDIOC_ENUM_FRAMESIZES ioctl structure
++ *
++ * Return 0 if successful, otherwise -EINVAL.
++ */
++static int ioctl_enum_framesizes(struct v4l2_int_device *s,
++ struct v4l2_frmsizeenum *fsize)
++{
++ if (fsize->index >= 1)
++ return -EINVAL;
++
++ fsize->discrete.width = video_fmts[video_idx].active_width;
++ fsize->discrete.height = video_fmts[video_idx].active_height;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_chip_ident - V4L2 sensor interface handler for
++ * VIDIOC_DBG_G_CHIP_IDENT ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @id: pointer to int
++ *
++ * Return 0.
++ */
++static int ioctl_g_chip_ident(struct v4l2_int_device *s, int *id)
++{
++ ((struct v4l2_dbg_chip_ident *)id)->match.type =
++ V4L2_CHIP_MATCH_I2C_DRIVER;
++ strcpy(((struct v4l2_dbg_chip_ident *)id)->match.name,
++ "adv7180_decoder");
++ ((struct v4l2_dbg_chip_ident *)id)->ident = V4L2_IDENT_ADV7180;
++
++ return 0;
++}
++
++/*!
++ * ioctl_init - V4L2 sensor interface handler for VIDIOC_INT_INIT
++ * @s: pointer to standard V4L2 device structure
++ */
++static int ioctl_init(struct v4l2_int_device *s)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_init\n");
++ return 0;
++}
++
++/*!
++ * ioctl_dev_init - V4L2 sensor interface handler for vidioc_int_dev_init_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Initialise the device when slave attaches to the master.
++ */
++static int ioctl_dev_init(struct v4l2_int_device *s)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_dev_init\n");
++ return 0;
++}
++
++/*!
++ * This structure defines all the ioctls for this module.
++ */
++static struct v4l2_int_ioctl_desc adv7180_ioctl_desc[] = {
++
++ {vidioc_int_dev_init_num, (v4l2_int_ioctl_func*)ioctl_dev_init},
++
++ /*!
++ * Delinitialise the dev. at slave detach.
++ * The complement of ioctl_dev_init.
++ */
++/* {vidioc_int_dev_exit_num, (v4l2_int_ioctl_func *)ioctl_dev_exit}, */
++
++ {vidioc_int_s_power_num, (v4l2_int_ioctl_func*)ioctl_s_power},
++ {vidioc_int_g_ifparm_num, (v4l2_int_ioctl_func*)ioctl_g_ifparm},
++/* {vidioc_int_g_needs_reset_num,
++ (v4l2_int_ioctl_func *)ioctl_g_needs_reset}, */
++/* {vidioc_int_reset_num, (v4l2_int_ioctl_func *)ioctl_reset}, */
++ {vidioc_int_init_num, (v4l2_int_ioctl_func*)ioctl_init},
++
++ /*!
++ * VIDIOC_ENUM_FMT ioctl for the CAPTURE buffer type.
++ */
++/* {vidioc_int_enum_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_fmt_cap}, */
++
++ /*!
++ * VIDIOC_TRY_FMT ioctl for the CAPTURE buffer type.
++ * This ioctl is used to negotiate the image capture size and
++ * pixel format without actually making it take effect.
++ */
++/* {vidioc_int_try_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_try_fmt_cap}, */
++
++ {vidioc_int_g_fmt_cap_num, (v4l2_int_ioctl_func*)ioctl_g_fmt_cap},
++
++ /*!
++ * If the requested format is supported, configures the HW to use that
++ * format, returns error code if format not supported or HW can't be
++ * correctly configured.
++ */
++/* {vidioc_int_s_fmt_cap_num, (v4l2_int_ioctl_func *)ioctl_s_fmt_cap}, */
++
++ {vidioc_int_g_parm_num, (v4l2_int_ioctl_func*)ioctl_g_parm},
++ {vidioc_int_s_parm_num, (v4l2_int_ioctl_func*)ioctl_s_parm},
++ {vidioc_int_queryctrl_num, (v4l2_int_ioctl_func*)ioctl_queryctrl},
++ {vidioc_int_g_ctrl_num, (v4l2_int_ioctl_func*)ioctl_g_ctrl},
++ {vidioc_int_s_ctrl_num, (v4l2_int_ioctl_func*)ioctl_s_ctrl},
++ {vidioc_int_enum_framesizes_num,
++ (v4l2_int_ioctl_func *) ioctl_enum_framesizes},
++ {vidioc_int_g_chip_ident_num,
++ (v4l2_int_ioctl_func *)ioctl_g_chip_ident},
++};
++
++static struct v4l2_int_slave adv7180_slave = {
++ .ioctls = adv7180_ioctl_desc,
++ .num_ioctls = ARRAY_SIZE(adv7180_ioctl_desc),
++};
++
++static struct v4l2_int_device adv7180_int_device = {
++ .module = THIS_MODULE,
++ .name = "adv7180",
++ .type = v4l2_int_type_slave,
++ .u = {
++ .slave = &adv7180_slave,
++ },
++};
++
++
++/***********************************************************************
++ * I2C client and driver.
++ ***********************************************************************/
++
++/*! ADV7180 Reset function.
++ *
++ * @return None.
++ */
++static void adv7180_hard_reset(bool cvbs)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ "In adv7180:adv7180_hard_reset\n");
++
++ if (cvbs) {
++ /* Set CVBS input on AIN1 */
++ adv7180_write_reg(ADV7180_INPUT_CTL, 0x00);
++ } else {
++ /*
++ * Set YPbPr input on AIN1,4,5 and normal
++ * operations(autodection of all stds).
++ */
++ adv7180_write_reg(ADV7180_INPUT_CTL, 0x09);
++ }
++
++ /* Datasheet recommends */
++ adv7180_write_reg(0x01, 0xc8);
++ adv7180_write_reg(0x02, 0x04);
++ adv7180_write_reg(0x03, 0x00);
++ adv7180_write_reg(0x04, 0x45);
++ adv7180_write_reg(0x05, 0x00);
++ adv7180_write_reg(0x06, 0x02);
++ adv7180_write_reg(0x07, 0x7F);
++ adv7180_write_reg(0x08, 0x80);
++ adv7180_write_reg(0x0A, 0x00);
++ adv7180_write_reg(0x0B, 0x00);
++ adv7180_write_reg(0x0C, 0x36);
++ adv7180_write_reg(0x0D, 0x7C);
++ adv7180_write_reg(0x0E, 0x00);
++ adv7180_write_reg(0x0F, 0x00);
++ adv7180_write_reg(0x13, 0x00);
++ adv7180_write_reg(0x14, 0x12);
++ adv7180_write_reg(0x15, 0x00);
++ adv7180_write_reg(0x16, 0x00);
++ adv7180_write_reg(0x17, 0x01);
++ adv7180_write_reg(0x18, 0x93);
++ adv7180_write_reg(0xF1, 0x19);
++ adv7180_write_reg(0x1A, 0x00);
++ adv7180_write_reg(0x1B, 0x00);
++ adv7180_write_reg(0x1C, 0x00);
++ adv7180_write_reg(0x1D, 0x40);
++ adv7180_write_reg(0x1E, 0x00);
++ adv7180_write_reg(0x1F, 0x00);
++ adv7180_write_reg(0x20, 0x00);
++ adv7180_write_reg(0x21, 0x00);
++ adv7180_write_reg(0x22, 0x00);
++ adv7180_write_reg(0x23, 0xC0);
++ adv7180_write_reg(0x24, 0x00);
++ adv7180_write_reg(0x25, 0x00);
++ adv7180_write_reg(0x26, 0x00);
++ adv7180_write_reg(0x27, 0x58);
++ adv7180_write_reg(0x28, 0x00);
++ adv7180_write_reg(0x29, 0x00);
++ adv7180_write_reg(0x2A, 0x00);
++ adv7180_write_reg(0x2B, 0xE1);
++ adv7180_write_reg(0x2C, 0xAE);
++ adv7180_write_reg(0x2D, 0xF4);
++ adv7180_write_reg(0x2E, 0x00);
++ adv7180_write_reg(0x2F, 0xF0);
++ adv7180_write_reg(0x30, 0x00);
++ adv7180_write_reg(0x31, 0x12);
++ adv7180_write_reg(0x32, 0x41);
++ adv7180_write_reg(0x33, 0x84);
++ adv7180_write_reg(0x34, 0x00);
++ adv7180_write_reg(0x35, 0x02);
++ adv7180_write_reg(0x36, 0x00);
++ adv7180_write_reg(0x37, 0x01);
++ adv7180_write_reg(0x38, 0x80);
++ adv7180_write_reg(0x39, 0xC0);
++ adv7180_write_reg(0x3A, 0x10);
++ adv7180_write_reg(0x3B, 0x05);
++ adv7180_write_reg(0x3C, 0x58);
++ adv7180_write_reg(0x3D, 0xB2);
++ adv7180_write_reg(0x3E, 0x64);
++ adv7180_write_reg(0x3F, 0xE4);
++ adv7180_write_reg(0x40, 0x90);
++ adv7180_write_reg(0x41, 0x01);
++ adv7180_write_reg(0x42, 0x7E);
++ adv7180_write_reg(0x43, 0xA4);
++ adv7180_write_reg(0x44, 0xFF);
++ adv7180_write_reg(0x45, 0xB6);
++ adv7180_write_reg(0x46, 0x12);
++ adv7180_write_reg(0x48, 0x00);
++ adv7180_write_reg(0x49, 0x00);
++ adv7180_write_reg(0x4A, 0x00);
++ adv7180_write_reg(0x4B, 0x00);
++ adv7180_write_reg(0x4C, 0x00);
++ adv7180_write_reg(0x4D, 0xEF);
++ adv7180_write_reg(0x4E, 0x08);
++ adv7180_write_reg(0x4F, 0x08);
++ adv7180_write_reg(0x50, 0x08);
++ adv7180_write_reg(0x51, 0x24);
++ adv7180_write_reg(0x52, 0x0B);
++ adv7180_write_reg(0x53, 0x4E);
++ adv7180_write_reg(0x54, 0x80);
++ adv7180_write_reg(0x55, 0x00);
++ adv7180_write_reg(0x56, 0x10);
++ adv7180_write_reg(0x57, 0x00);
++ adv7180_write_reg(0x58, 0x00);
++ adv7180_write_reg(0x59, 0x00);
++ adv7180_write_reg(0x5A, 0x00);
++ adv7180_write_reg(0x5B, 0x00);
++ adv7180_write_reg(0x5C, 0x00);
++ adv7180_write_reg(0x5D, 0x00);
++ adv7180_write_reg(0x5E, 0x00);
++ adv7180_write_reg(0x5F, 0x00);
++ adv7180_write_reg(0x60, 0x00);
++ adv7180_write_reg(0x61, 0x00);
++ adv7180_write_reg(0x62, 0x20);
++ adv7180_write_reg(0x63, 0x00);
++ adv7180_write_reg(0x64, 0x00);
++ adv7180_write_reg(0x65, 0x00);
++ adv7180_write_reg(0x66, 0x00);
++ adv7180_write_reg(0x67, 0x03);
++ adv7180_write_reg(0x68, 0x01);
++ adv7180_write_reg(0x69, 0x00);
++ adv7180_write_reg(0x6A, 0x00);
++ adv7180_write_reg(0x6B, 0xC0);
++ adv7180_write_reg(0x6C, 0x00);
++ adv7180_write_reg(0x6D, 0x00);
++ adv7180_write_reg(0x6E, 0x00);
++ adv7180_write_reg(0x6F, 0x00);
++ adv7180_write_reg(0x70, 0x00);
++ adv7180_write_reg(0x71, 0x00);
++ adv7180_write_reg(0x72, 0x00);
++ adv7180_write_reg(0x73, 0x10);
++ adv7180_write_reg(0x74, 0x04);
++ adv7180_write_reg(0x75, 0x01);
++ adv7180_write_reg(0x76, 0x00);
++ adv7180_write_reg(0x77, 0x3F);
++ adv7180_write_reg(0x78, 0xFF);
++ adv7180_write_reg(0x79, 0xFF);
++ adv7180_write_reg(0x7A, 0xFF);
++ adv7180_write_reg(0x7B, 0x1E);
++ adv7180_write_reg(0x7C, 0xC0);
++ adv7180_write_reg(0x7D, 0x00);
++ adv7180_write_reg(0x7E, 0x00);
++ adv7180_write_reg(0x7F, 0x00);
++ adv7180_write_reg(0x80, 0x00);
++ adv7180_write_reg(0x81, 0xC0);
++ adv7180_write_reg(0x82, 0x04);
++ adv7180_write_reg(0x83, 0x00);
++ adv7180_write_reg(0x84, 0x0C);
++ adv7180_write_reg(0x85, 0x02);
++ adv7180_write_reg(0x86, 0x03);
++ adv7180_write_reg(0x87, 0x63);
++ adv7180_write_reg(0x88, 0x5A);
++ adv7180_write_reg(0x89, 0x08);
++ adv7180_write_reg(0x8A, 0x10);
++ adv7180_write_reg(0x8B, 0x00);
++ adv7180_write_reg(0x8C, 0x40);
++ adv7180_write_reg(0x8D, 0x00);
++ adv7180_write_reg(0x8E, 0x40);
++ adv7180_write_reg(0x8F, 0x00);
++ adv7180_write_reg(0x90, 0x00);
++ adv7180_write_reg(0x91, 0x50);
++ adv7180_write_reg(0x92, 0x00);
++ adv7180_write_reg(0x93, 0x00);
++ adv7180_write_reg(0x94, 0x00);
++ adv7180_write_reg(0x95, 0x00);
++ adv7180_write_reg(0x96, 0x00);
++ adv7180_write_reg(0x97, 0xF0);
++ adv7180_write_reg(0x98, 0x00);
++ adv7180_write_reg(0x99, 0x00);
++ adv7180_write_reg(0x9A, 0x00);
++ adv7180_write_reg(0x9B, 0x00);
++ adv7180_write_reg(0x9C, 0x00);
++ adv7180_write_reg(0x9D, 0x00);
++ adv7180_write_reg(0x9E, 0x00);
++ adv7180_write_reg(0x9F, 0x00);
++ adv7180_write_reg(0xA0, 0x00);
++ adv7180_write_reg(0xA1, 0x00);
++ adv7180_write_reg(0xA2, 0x00);
++ adv7180_write_reg(0xA3, 0x00);
++ adv7180_write_reg(0xA4, 0x00);
++ adv7180_write_reg(0xA5, 0x00);
++ adv7180_write_reg(0xA6, 0x00);
++ adv7180_write_reg(0xA7, 0x00);
++ adv7180_write_reg(0xA8, 0x00);
++ adv7180_write_reg(0xA9, 0x00);
++ adv7180_write_reg(0xAA, 0x00);
++ adv7180_write_reg(0xAB, 0x00);
++ adv7180_write_reg(0xAC, 0x00);
++ adv7180_write_reg(0xAD, 0x00);
++ adv7180_write_reg(0xAE, 0x60);
++ adv7180_write_reg(0xAF, 0x00);
++ adv7180_write_reg(0xB0, 0x00);
++ adv7180_write_reg(0xB1, 0x60);
++ adv7180_write_reg(0xB2, 0x1C);
++ adv7180_write_reg(0xB3, 0x54);
++ adv7180_write_reg(0xB4, 0x00);
++ adv7180_write_reg(0xB5, 0x00);
++ adv7180_write_reg(0xB6, 0x00);
++ adv7180_write_reg(0xB7, 0x13);
++ adv7180_write_reg(0xB8, 0x03);
++ adv7180_write_reg(0xB9, 0x33);
++ adv7180_write_reg(0xBF, 0x02);
++ adv7180_write_reg(0xC0, 0x00);
++ adv7180_write_reg(0xC1, 0x00);
++ adv7180_write_reg(0xC2, 0x00);
++ adv7180_write_reg(0xC3, 0x00);
++ adv7180_write_reg(0xC4, 0x00);
++ adv7180_write_reg(0xC5, 0x81);
++ adv7180_write_reg(0xC6, 0x00);
++ adv7180_write_reg(0xC7, 0x00);
++ adv7180_write_reg(0xC8, 0x00);
++ adv7180_write_reg(0xC9, 0x04);
++ adv7180_write_reg(0xCC, 0x69);
++ adv7180_write_reg(0xCD, 0x00);
++ adv7180_write_reg(0xCE, 0x01);
++ adv7180_write_reg(0xCF, 0xB4);
++ adv7180_write_reg(0xD0, 0x00);
++ adv7180_write_reg(0xD1, 0x10);
++ adv7180_write_reg(0xD2, 0xFF);
++ adv7180_write_reg(0xD3, 0xFF);
++ adv7180_write_reg(0xD4, 0x7F);
++ adv7180_write_reg(0xD5, 0x7F);
++ adv7180_write_reg(0xD6, 0x3E);
++ adv7180_write_reg(0xD7, 0x08);
++ adv7180_write_reg(0xD8, 0x3C);
++ adv7180_write_reg(0xD9, 0x08);
++ adv7180_write_reg(0xDA, 0x3C);
++ adv7180_write_reg(0xDB, 0x9B);
++ adv7180_write_reg(0xDC, 0xAC);
++ adv7180_write_reg(0xDD, 0x4C);
++ adv7180_write_reg(0xDE, 0x00);
++ adv7180_write_reg(0xDF, 0x00);
++ adv7180_write_reg(0xE0, 0x14);
++ adv7180_write_reg(0xE1, 0x80);
++ adv7180_write_reg(0xE2, 0x80);
++ adv7180_write_reg(0xE3, 0x80);
++ adv7180_write_reg(0xE4, 0x80);
++ adv7180_write_reg(0xE5, 0x25);
++ adv7180_write_reg(0xE6, 0x44);
++ adv7180_write_reg(0xE7, 0x63);
++ adv7180_write_reg(0xE8, 0x65);
++ adv7180_write_reg(0xE9, 0x14);
++ adv7180_write_reg(0xEA, 0x63);
++ adv7180_write_reg(0xEB, 0x55);
++ adv7180_write_reg(0xEC, 0x55);
++ adv7180_write_reg(0xEE, 0x00);
++ adv7180_write_reg(0xEF, 0x4A);
++ adv7180_write_reg(0xF0, 0x44);
++ adv7180_write_reg(0xF1, 0x0C);
++ adv7180_write_reg(0xF2, 0x32);
++ adv7180_write_reg(0xF3, 0x00);
++ adv7180_write_reg(0xF4, 0x3F);
++ adv7180_write_reg(0xF5, 0xE0);
++ adv7180_write_reg(0xF6, 0x69);
++ adv7180_write_reg(0xF7, 0x10);
++ adv7180_write_reg(0xF8, 0x00);
++ adv7180_write_reg(0xF9, 0x03);
++ adv7180_write_reg(0xFA, 0xFA);
++ adv7180_write_reg(0xFB, 0x40);
++}
++
++/*! ADV7180 I2C attach function.
++ *
++ * @param *adapter struct i2c_adapter *.
++ *
++ * @return Error code indicating success or failure.
++ */
++
++/*!
++ * ADV7180 I2C probe function.
++ * Function set in i2c_driver struct.
++ * Called by insmod.
++ *
++ * @param *adapter I2C adapter descriptor.
++ *
++ * @return Error code indicating success or failure.
++ */
++static int adv7180_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int rev_id;
++ int ret = 0;
++ u32 cvbs = true;
++ struct pinctrl *pinctrl;
++ struct device *dev = &client->dev;
++
++ printk(KERN_ERR"DBG sensor data is at %p\n", &adv7180_data);
++
++ /* ov5640 pinctrl */
++ pinctrl = devm_pinctrl_get_select_default(dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(dev, "setup pinctrl failed\n");
++ return PTR_ERR(pinctrl);
++ }
++
++ /* request power down pin */
++ pwn_gpio = of_get_named_gpio(dev->of_node, "pwn-gpios", 0);
++ if (!gpio_is_valid(pwn_gpio)) {
++ dev_err(dev, "no sensor pwdn pin available\n");
++ return -ENODEV;
++ }
++ ret = devm_gpio_request_one(dev, pwn_gpio, GPIOF_OUT_INIT_HIGH,
++ "adv7180_pwdn");
++ if (ret < 0) {
++ dev_err(dev, "no power pin available!\n");
++ return ret;
++ }
++
++ adv7180_regulator_enable(dev);
++
++ adv7180_power_down(0);
++
++ msleep(1);
++
++ /* Set initial values for the sensor struct. */
++ memset(&adv7180_data, 0, sizeof(adv7180_data));
++ adv7180_data.sen.i2c_client = client;
++ adv7180_data.sen.streamcap.timeperframe.denominator = 30;
++ adv7180_data.sen.streamcap.timeperframe.numerator = 1;
++ adv7180_data.std_id = V4L2_STD_ALL;
++ video_idx = ADV7180_NOT_LOCKED;
++ adv7180_data.sen.pix.width = video_fmts[video_idx].raw_width;
++ adv7180_data.sen.pix.height = video_fmts[video_idx].raw_height;
++ adv7180_data.sen.pix.pixelformat = V4L2_PIX_FMT_UYVY; /* YUV422 */
++ adv7180_data.sen.pix.priv = 1; /* 1 is used to indicate TV in */
++ adv7180_data.sen.on = true;
++
++ adv7180_data.sen.sensor_clk = devm_clk_get(dev, "csi_mclk");
++ if (IS_ERR(adv7180_data.sen.sensor_clk)) {
++ dev_err(dev, "get mclk failed\n");
++ return PTR_ERR(adv7180_data.sen.sensor_clk);
++ }
++
++ ret = of_property_read_u32(dev->of_node, "mclk",
++ &adv7180_data.sen.mclk);
++ if (ret) {
++ dev_err(dev, "mclk frequency is invalid\n");
++ return ret;
++ }
++
++ ret = of_property_read_u32(
++ dev->of_node, "mclk_source",
++ (u32 *) &(adv7180_data.sen.mclk_source));
++ if (ret) {
++ dev_err(dev, "mclk_source invalid\n");
++ return ret;
++ }
++
++ ret = of_property_read_u32(dev->of_node, "csi_id",
++ &(adv7180_data.sen.csi));
++ if (ret) {
++ dev_err(dev, "csi_id invalid\n");
++ return ret;
++ }
++
++ clk_prepare_enable(adv7180_data.sen.sensor_clk);
++
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ "%s:adv7180 probe i2c address is 0x%02X\n",
++ __func__, adv7180_data.sen.i2c_client->addr);
++
++ /*! Read the revision ID of the tvin chip */
++ rev_id = adv7180_read(ADV7180_IDENT);
++ dev_dbg(dev,
++ "%s:Analog Device adv7%2X0 detected!\n", __func__,
++ rev_id);
++
++ ret = of_property_read_u32(dev->of_node, "cvbs", &(cvbs));
++ if (ret) {
++ dev_err(dev, "cvbs setting is not found\n");
++ cvbs = true;
++ }
++
++ /*! ADV7180 initialization. */
++ adv7180_hard_reset(cvbs);
++
++ pr_debug(" type is %d (expect %d)\n",
++ adv7180_int_device.type, v4l2_int_type_slave);
++ pr_debug(" num ioctls is %d\n",
++ adv7180_int_device.u.slave->num_ioctls);
++
++ /* This function attaches this structure to the /dev/video0 device.
++ * The pointer in priv points to the adv7180_data structure here.*/
++ adv7180_int_device.priv = &adv7180_data;
++ ret = v4l2_int_device_register(&adv7180_int_device);
++
++ clk_disable_unprepare(adv7180_data.sen.sensor_clk);
++
++ return ret;
++}
++
++/*!
++ * ADV7180 I2C detach function.
++ * Called on rmmod.
++ *
++ * @param *client struct i2c_client*.
++ *
++ * @return Error code indicating success or failure.
++ */
++static int adv7180_detach(struct i2c_client *client)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev,
++ "%s:Removing %s video decoder @ 0x%02X from adapter %s\n",
++ __func__, IF_NAME, client->addr << 1, client->adapter->name);
++
++ /* Power down via i2c */
++ adv7180_write_reg(ADV7180_PWR_MNG, 0x24);
++
++ if (dvddio_regulator)
++ regulator_disable(dvddio_regulator);
++
++ if (dvdd_regulator)
++ regulator_disable(dvdd_regulator);
++
++ if (avdd_regulator)
++ regulator_disable(avdd_regulator);
++
++ if (pvdd_regulator)
++ regulator_disable(pvdd_regulator);
++
++ v4l2_int_device_unregister(&adv7180_int_device);
++
++ return 0;
++}
++
++/*!
++ * ADV7180 init function.
++ * Called on insmod.
++ *
++ * @return Error code indicating success or failure.
++ */
++static __init int adv7180_init(void)
++{
++ u8 err = 0;
++
++ pr_debug("In adv7180_init\n");
++
++ /* Tells the i2c driver what functions to call for this driver. */
++ err = i2c_add_driver(&adv7180_i2c_driver);
++ if (err != 0)
++ pr_err("%s:driver registration failed, error=%d\n",
++ __func__, err);
++
++ return err;
++}
++
++/*!
++ * ADV7180 cleanup function.
++ * Called on rmmod.
++ *
++ * @return Error code indicating success or failure.
++ */
++static void __exit adv7180_clean(void)
++{
++ dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180_clean\n");
++ i2c_del_driver(&adv7180_i2c_driver);
++}
++
++module_init(adv7180_init);
++module_exit(adv7180_clean);
++
++MODULE_AUTHOR("Freescale Semiconductor");
++MODULE_DESCRIPTION("Anolog Device ADV7180 video decoder driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/csi_v4l2_capture.c linux-3.14.40/drivers/media/platform/mxc/capture/csi_v4l2_capture.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/csi_v4l2_capture.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/csi_v4l2_capture.c 2015-05-01 14:57:59.255427001 -0500
+@@ -0,0 +1,2047 @@
++/*
++ * Copyright 2009-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file drivers/media/video/mxc/capture/csi_v4l2_capture.c
++ * This file is derived from mxc_v4l2_capture.c
++ *
++ * @brief Video For Linux 2 capture driver
++ *
++ * @ingroup MXC_V4L2_CAPTURE
++ */
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/ctype.h>
++#include <linux/clk.h>
++#include <linux/io.h>
++#include <linux/semaphore.h>
++#include <linux/pagemap.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/mxcfb.h>
++#include <linux/dma-mapping.h>
++#include <media/v4l2-ioctl.h>
++#include <media/v4l2-int-device.h>
++#include <media/v4l2-chip-ident.h>
++#include "mxc_v4l2_capture.h"
++#include "fsl_csi.h"
++
++static int video_nr = -1;
++static cam_data *g_cam;
++static int req_buf_number;
++
++static int csi_v4l2_master_attach(struct v4l2_int_device *slave);
++static void csi_v4l2_master_detach(struct v4l2_int_device *slave);
++static u8 camera_power(cam_data *cam, bool cameraOn);
++struct v4l2_crop crop_current;
++struct v4l2_window win_current;
++
++/*! Information about this driver. */
++static struct v4l2_int_master csi_v4l2_master = {
++ .attach = csi_v4l2_master_attach,
++ .detach = csi_v4l2_master_detach,
++};
++
++static struct v4l2_int_device csi_v4l2_int_device = {
++ .module = THIS_MODULE,
++ .name = "csi_v4l2_cap",
++ .type = v4l2_int_type_master,
++ .u = {
++ .master = &csi_v4l2_master,
++ },
++};
++
++static struct v4l2_queryctrl pxp_controls[] = {
++ {
++ .id = V4L2_CID_HFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Horizontal Flip",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ .flags = 0,
++ }, {
++ .id = V4L2_CID_VFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Vertical Flip",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ .flags = 0,
++ }, {
++ .id = V4L2_CID_PRIVATE_BASE,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "Rotation",
++ .minimum = 0,
++ .maximum = 270,
++ .step = 90,
++ .default_value = 0,
++ .flags = 0,
++ },
++};
++
++/* Callback function triggered after PxP receives an EOF interrupt */
++static void pxp_dma_done(void *arg)
++{
++ struct pxp_tx_desc *tx_desc = to_tx_desc(arg);
++ struct dma_chan *chan = tx_desc->txd.chan;
++ struct pxp_channel *pxp_chan = to_pxp_channel(chan);
++ cam_data *cam = pxp_chan->client;
++
++ /* This call will signal wait_for_completion_timeout() */
++ complete(&cam->pxp_tx_cmpl);
++}
++
++static bool chan_filter(struct dma_chan *chan, void *arg)
++{
++ if (imx_dma_is_pxp(chan))
++ return true;
++ else
++ return false;
++}
++
++/* Function to request PXP DMA channel */
++static int pxp_chan_init(cam_data *cam)
++{
++ dma_cap_mask_t mask;
++ struct dma_chan *chan;
++
++ /* Request a free channel */
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++ dma_cap_set(DMA_PRIVATE, mask);
++ chan = dma_request_channel(mask, chan_filter, NULL);
++ if (!chan) {
++ pr_err("Unsuccessfully request channel!\n");
++ return -EBUSY;
++ }
++
++ cam->pxp_chan = to_pxp_channel(chan);
++ cam->pxp_chan->client = cam;
++
++ init_completion(&cam->pxp_tx_cmpl);
++
++ return 0;
++}
++
++/*
++ * Function to call PxP DMA driver and send our new V4L2 buffer
++ * through the PxP.
++ * Note: This is a blocking call, so upon return the PxP tx should be complete.
++ */
++static int pxp_process_update(cam_data *cam)
++{
++ dma_cookie_t cookie;
++ struct scatterlist *sg = cam->sg;
++ struct dma_chan *dma_chan;
++ struct pxp_tx_desc *desc;
++ struct dma_async_tx_descriptor *txd;
++ struct pxp_config_data *pxp_conf = &cam->pxp_conf;
++ struct pxp_proc_data *proc_data = &cam->pxp_conf.proc_data;
++ int i, ret;
++ int length;
++
++ pr_debug("Starting PxP Send Buffer\n");
++
++ /* First, check to see that we have acquired a PxP Channel object */
++ if (cam->pxp_chan == NULL) {
++ /*
++ * PxP Channel has not yet been created and initialized,
++ * so let's go ahead and try
++ */
++ ret = pxp_chan_init(cam);
++ if (ret) {
++ /*
++ * PxP channel init failed, and we can't use the
++ * PxP until the PxP DMA driver has loaded, so we abort
++ */
++ pr_err("PxP chan init failed\n");
++ return -ENODEV;
++ }
++ }
++
++ /*
++ * Init completion, so that we can be properly informed of
++ * the completion of the PxP task when it is done.
++ */
++ init_completion(&cam->pxp_tx_cmpl);
++
++ dma_chan = &cam->pxp_chan->dma_chan;
++
++ txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg, 2,
++ DMA_TO_DEVICE,
++ DMA_PREP_INTERRUPT,
++ NULL);
++ if (!txd) {
++ pr_err("Error preparing a DMA transaction descriptor.\n");
++ return -EIO;
++ }
++
++ txd->callback_param = txd;
++ txd->callback = pxp_dma_done;
++
++ /*
++ * Configure PxP for processing of new v4l2 buf
++ */
++ pxp_conf->s0_param.pixel_fmt = PXP_PIX_FMT_UYVY;
++ pxp_conf->s0_param.color_key = -1;
++ pxp_conf->s0_param.color_key_enable = false;
++ pxp_conf->s0_param.width = cam->v2f.fmt.pix.width;
++ pxp_conf->s0_param.height = cam->v2f.fmt.pix.height;
++
++ pxp_conf->ol_param[0].combine_enable = false;
++
++ proc_data->srect.top = 0;
++ proc_data->srect.left = 0;
++ proc_data->srect.width = pxp_conf->s0_param.width;
++ proc_data->srect.height = pxp_conf->s0_param.height;
++
++ if (crop_current.c.top != 0)
++ proc_data->srect.top = crop_current.c.top;
++ if (crop_current.c.left != 0)
++ proc_data->srect.left = crop_current.c.left;
++ if (crop_current.c.width != 0)
++ proc_data->srect.width = crop_current.c.width;
++ if (crop_current.c.height != 0)
++ proc_data->srect.height = crop_current.c.height;
++
++ proc_data->drect.left = 0;
++ proc_data->drect.top = 0;
++ proc_data->drect.width = proc_data->srect.width;
++ proc_data->drect.height = proc_data->srect.height;
++
++ if (win_current.w.left != 0)
++ proc_data->drect.left = win_current.w.left;
++ if (win_current.w.top != 0)
++ proc_data->drect.top = win_current.w.top;
++ if (win_current.w.width != 0)
++ proc_data->drect.width = win_current.w.width;
++ if (win_current.w.height != 0)
++ proc_data->drect.height = win_current.w.height;
++
++ pr_debug("srect l: %d, t: %d, w: %d, h: %d; "
++ "drect l: %d, t: %d, w: %d, h: %d\n",
++ proc_data->srect.left, proc_data->srect.top,
++ proc_data->srect.width, proc_data->srect.height,
++ proc_data->drect.left, proc_data->drect.top,
++ proc_data->drect.width, proc_data->drect.height);
++
++ pxp_conf->out_param.pixel_fmt = PXP_PIX_FMT_RGB565;
++ pxp_conf->out_param.width = proc_data->drect.width;
++ pxp_conf->out_param.height = proc_data->drect.height;
++
++ if (cam->rotation % 180)
++ pxp_conf->out_param.stride = pxp_conf->out_param.height;
++ else
++ pxp_conf->out_param.stride = pxp_conf->out_param.width;
++
++ desc = to_tx_desc(txd);
++ length = desc->len;
++ for (i = 0; i < length; i++) {
++ if (i == 0) {/* S0 */
++ memcpy(&desc->proc_data, proc_data,
++ sizeof(struct pxp_proc_data));
++ pxp_conf->s0_param.paddr = sg_dma_address(&sg[0]);
++ memcpy(&desc->layer_param.s0_param, &pxp_conf->s0_param,
++ sizeof(struct pxp_layer_param));
++ } else if (i == 1) {
++ pxp_conf->out_param.paddr = sg_dma_address(&sg[1]);
++ memcpy(&desc->layer_param.out_param,
++ &pxp_conf->out_param,
++ sizeof(struct pxp_layer_param));
++ }
++
++ desc = desc->next;
++ }
++
++ /* Submitting our TX starts the PxP processing task */
++ cookie = txd->tx_submit(txd);
++ if (cookie < 0) {
++ pr_err("Error sending FB through PxP\n");
++ return -EIO;
++ }
++
++ cam->txd = txd;
++
++ /* trigger PxP */
++ dma_async_issue_pending(dma_chan);
++
++ return 0;
++}
++
++static int pxp_complete_update(cam_data *cam)
++{
++ int ret;
++ /*
++ * Wait for completion event, which will be set
++ * through our TX callback function.
++ */
++ ret = wait_for_completion_timeout(&cam->pxp_tx_cmpl, HZ / 10);
++ if (ret <= 0) {
++ pr_warning("PxP operation failed due to %s\n",
++ ret < 0 ? "user interrupt" : "timeout");
++ dma_release_channel(&cam->pxp_chan->dma_chan);
++ cam->pxp_chan = NULL;
++ return ret ? : -ETIMEDOUT;
++ }
++
++ dma_release_channel(&cam->pxp_chan->dma_chan);
++ cam->pxp_chan = NULL;
++
++ pr_debug("TX completed\n");
++
++ return 0;
++}
++
++/*!
++ * Camera V4l2 callback function.
++ *
++ * @param mask u32
++ * @param dev void device structure
++ *
++ * @return none
++ */
++static void camera_callback(u32 mask, void *dev)
++{
++ struct mxc_v4l_frame *done_frame;
++ struct mxc_v4l_frame *ready_frame;
++ cam_data *cam;
++
++ cam = (cam_data *) dev;
++ if (cam == NULL)
++ return;
++
++ spin_lock(&cam->queue_int_lock);
++ spin_lock(&cam->dqueue_int_lock);
++ if (!list_empty(&cam->working_q)) {
++ done_frame = list_entry(cam->working_q.next,
++ struct mxc_v4l_frame, queue);
++
++ if (done_frame->csi_buf_num != cam->ping_pong_csi)
++ goto next;
++
++ if (done_frame->buffer.flags & V4L2_BUF_FLAG_QUEUED) {
++ done_frame->buffer.flags |= V4L2_BUF_FLAG_DONE;
++ done_frame->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED;
++
++ /* Added to the done queue */
++ list_del(cam->working_q.next);
++ list_add_tail(&done_frame->queue, &cam->done_q);
++ cam->enc_counter++;
++ wake_up_interruptible(&cam->enc_queue);
++ } else {
++ pr_err("ERROR: v4l2 capture: %s: "
++ "buffer not queued\n", __func__);
++ }
++ }
++
++next:
++ if (!list_empty(&cam->ready_q)) {
++ ready_frame = list_entry(cam->ready_q.next,
++ struct mxc_v4l_frame, queue);
++ list_del(cam->ready_q.next);
++ list_add_tail(&ready_frame->queue, &cam->working_q);
++
++ __raw_writel(ready_frame->paddress,
++ cam->ping_pong_csi == 1 ? CSI_CSIDMASA_FB1 :
++ CSI_CSIDMASA_FB2);
++ ready_frame->csi_buf_num = cam->ping_pong_csi;
++ } else {
++ __raw_writel(cam->dummy_frame.paddress,
++ cam->ping_pong_csi == 1 ? CSI_CSIDMASA_FB1 :
++ CSI_CSIDMASA_FB2);
++ }
++ spin_unlock(&cam->dqueue_int_lock);
++ spin_unlock(&cam->queue_int_lock);
++
++ return;
++}
++
++/*!
++ * Make csi ready for capture image.
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 success
++ */
++static int csi_cap_image(cam_data *cam)
++{
++ unsigned int value;
++
++ value = __raw_readl(CSI_CSICR3);
++ __raw_writel(value | BIT_FRMCNT_RST, CSI_CSICR3);
++ value = __raw_readl(CSI_CSISR);
++ __raw_writel(value, CSI_CSISR);
++
++ return 0;
++}
++
++/***************************************************************************
++ * Functions for handling Frame buffers.
++ **************************************************************************/
++
++/*!
++ * Free frame buffers
++ *
++ * @param cam Structure cam_data *
++ *
++ * @return status 0 success.
++ */
++static int csi_free_frame_buf(cam_data *cam)
++{
++ int i;
++
++ pr_debug("MVC: In %s\n", __func__);
++
++ for (i = 0; i < FRAME_NUM; i++) {
++ if (cam->frame[i].vaddress != 0) {
++ dma_free_coherent(0, cam->frame[i].buffer.length,
++ cam->frame[i].vaddress,
++ cam->frame[i].paddress);
++ cam->frame[i].vaddress = 0;
++ }
++ }
++
++ if (cam->dummy_frame.vaddress != 0) {
++ dma_free_coherent(0, cam->dummy_frame.buffer.length,
++ cam->dummy_frame.vaddress,
++ cam->dummy_frame.paddress);
++ cam->dummy_frame.vaddress = 0;
++ }
++
++ return 0;
++}
++
++/*!
++ * Allocate frame buffers
++ *
++ * @param cam Structure cam_data *
++ * @param count int number of buffer need to allocated
++ *
++ * @return status -0 Successfully allocated a buffer, -ENOBUFS failed.
++ */
++static int csi_allocate_frame_buf(cam_data *cam, int count)
++{
++ int i;
++
++ pr_debug("In MVC:%s- size=%d\n",
++ __func__, cam->v2f.fmt.pix.sizeimage);
++ for (i = 0; i < count; i++) {
++ cam->frame[i].vaddress = dma_alloc_coherent(0, PAGE_ALIGN
++ (cam->v2f.fmt.
++ pix.sizeimage),
++ &cam->frame[i].
++ paddress,
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->frame[i].vaddress == 0) {
++ pr_err("ERROR: v4l2 capture: "
++ "%s failed.\n", __func__);
++ csi_free_frame_buf(cam);
++ return -ENOBUFS;
++ }
++ cam->frame[i].buffer.index = i;
++ cam->frame[i].buffer.flags = V4L2_BUF_FLAG_MAPPED;
++ cam->frame[i].buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cam->frame[i].buffer.length = cam->v2f.fmt.pix.sizeimage;
++ cam->frame[i].buffer.memory = V4L2_MEMORY_MMAP;
++ cam->frame[i].buffer.m.offset = cam->frame[i].paddress;
++ cam->frame[i].index = i;
++ cam->frame[i].csi_buf_num = 0;
++ }
++
++ return 0;
++}
++
++/*!
++ * Free frame buffers status
++ *
++ * @param cam Structure cam_data *
++ *
++ * @return none
++ */
++static void csi_free_frames(cam_data *cam)
++{
++ int i;
++
++ pr_debug("In MVC: %s\n", __func__);
++
++ for (i = 0; i < FRAME_NUM; i++)
++ cam->frame[i].buffer.flags = V4L2_BUF_FLAG_MAPPED;
++
++ cam->enc_counter = 0;
++ INIT_LIST_HEAD(&cam->ready_q);
++ INIT_LIST_HEAD(&cam->working_q);
++ INIT_LIST_HEAD(&cam->done_q);
++
++ return;
++}
++
++/*!
++ * Return the buffer status
++ *
++ * @param cam Structure cam_data *
++ * @param buf Structure v4l2_buffer *
++ *
++ * @return status 0 success, EINVAL failed.
++ */
++static int csi_v4l2_buffer_status(cam_data *cam, struct v4l2_buffer *buf)
++{
++ pr_debug("In MVC: %s\n", __func__);
++
++ if (buf->index < 0 || buf->index >= FRAME_NUM) {
++ pr_err("ERROR: v4l2 capture: %s buffers "
++ "not allocated\n", __func__);
++ return -EINVAL;
++ }
++
++ memcpy(buf, &(cam->frame[buf->index].buffer), sizeof(*buf));
++
++ return 0;
++}
++
++static int csi_v4l2_release_bufs(cam_data *cam)
++{
++ pr_debug("In MVC:csi_v4l2_release_bufs\n");
++ return 0;
++}
++
++static int csi_v4l2_prepare_bufs(cam_data *cam, struct v4l2_buffer *buf)
++{
++ pr_debug("In MVC:csi_v4l2_prepare_bufs\n");
++
++ if (buf->index < 0 || buf->index >= FRAME_NUM || buf->length <
++ cam->v2f.fmt.pix.sizeimage) {
++ pr_err("ERROR: v4l2 capture: csi_v4l2_prepare_bufs buffers "
++ "not allocated,index=%d, length=%d\n", buf->index,
++ buf->length);
++ return -EINVAL;
++ }
++
++ cam->frame[buf->index].buffer.index = buf->index;
++ cam->frame[buf->index].buffer.flags = V4L2_BUF_FLAG_MAPPED;
++ cam->frame[buf->index].buffer.length = buf->length;
++ cam->frame[buf->index].buffer.m.offset = cam->frame[buf->index].paddress
++ = buf->m.offset;
++ cam->frame[buf->index].buffer.type = buf->type;
++ cam->frame[buf->index].buffer.memory = V4L2_MEMORY_USERPTR;
++ cam->frame[buf->index].index = buf->index;
++
++ return 0;
++}
++
++/*!
++ * Indicates whether the palette is supported.
++ *
++ * @param palette V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_UYVY or V4L2_PIX_FMT_YUV420
++ *
++ * @return 0 if failed
++ */
++static inline int valid_mode(u32 palette)
++{
++ return (palette == V4L2_PIX_FMT_RGB565) ||
++ (palette == V4L2_PIX_FMT_YUYV) ||
++ (palette == V4L2_PIX_FMT_UYVY) || (palette == V4L2_PIX_FMT_YUV420);
++}
++
++/*!
++ * Start stream I/O
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int csi_streamon(cam_data *cam)
++{
++ struct mxc_v4l_frame *frame;
++ unsigned long flags;
++ unsigned long val;
++ int timeout, timeout2;
++
++ pr_debug("In MVC: %s\n", __func__);
++
++ if (NULL == cam) {
++ pr_err("ERROR: v4l2 capture: %s cam parameter is NULL\n",
++ __func__);
++ return -1;
++ }
++ cam->dummy_frame.vaddress = dma_alloc_coherent(0,
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ &cam->dummy_frame.paddress,
++ GFP_DMA | GFP_KERNEL);
++ if (cam->dummy_frame.vaddress == 0) {
++ pr_err("ERROR: v4l2 capture: Allocate dummy frame "
++ "failed.\n");
++ return -ENOBUFS;
++ }
++ cam->dummy_frame.buffer.type = V4L2_BUF_TYPE_PRIVATE;
++ cam->dummy_frame.buffer.length = cam->v2f.fmt.pix.sizeimage;
++ cam->dummy_frame.buffer.m.offset = cam->dummy_frame.paddress;
++
++ spin_lock_irqsave(&cam->queue_int_lock, flags);
++ /* move the frame from readyq to workingq */
++ if (list_empty(&cam->ready_q)) {
++ pr_err("ERROR: v4l2 capture: %s: "
++ "ready_q queue empty\n", __func__);
++ spin_unlock_irqrestore(&cam->queue_int_lock, flags);
++ return -1;
++ }
++ frame = list_entry(cam->ready_q.next, struct mxc_v4l_frame, queue);
++ list_del(cam->ready_q.next);
++ list_add_tail(&frame->queue, &cam->working_q);
++ __raw_writel(frame->paddress, CSI_CSIDMASA_FB1);
++ frame->csi_buf_num = 1;
++
++ if (list_empty(&cam->ready_q)) {
++ pr_err("ERROR: v4l2 capture: %s: "
++ "ready_q queue empty\n", __func__);
++ spin_unlock_irqrestore(&cam->queue_int_lock, flags);
++ return -1;
++ }
++ frame = list_entry(cam->ready_q.next, struct mxc_v4l_frame, queue);
++ list_del(cam->ready_q.next);
++ list_add_tail(&frame->queue, &cam->working_q);
++ __raw_writel(frame->paddress, CSI_CSIDMASA_FB2);
++ frame->csi_buf_num = 2;
++ spin_unlock_irqrestore(&cam->queue_int_lock, flags);
++
++ cam->capture_pid = current->pid;
++ cam->capture_on = true;
++ csi_cap_image(cam);
++
++ local_irq_save(flags);
++ for (timeout = 1000000; timeout > 0; timeout--) {
++ if (__raw_readl(CSI_CSISR) & BIT_SOF_INT) {
++ val = __raw_readl(CSI_CSICR3);
++ __raw_writel(val | BIT_DMA_REFLASH_RFF, CSI_CSICR3);
++ for (timeout2 = 1000000; timeout2 > 0; timeout2--) {
++ if (__raw_readl(CSI_CSICR3) &
++ BIT_DMA_REFLASH_RFF)
++ cpu_relax();
++ else
++ break;
++ }
++ if (timeout2 <= 0) {
++ pr_err("timeout when wait for reflash done.\n");
++ local_irq_restore(flags);
++ return -ETIME;
++ }
++
++ csi_dmareq_rff_enable();
++ csi_enable_int(1);
++ break;
++ } else
++ cpu_relax();
++ }
++ if (timeout <= 0) {
++ pr_err("timeout when wait for SOF\n");
++ local_irq_restore(flags);
++ return -ETIME;
++ }
++ local_irq_restore(flags);
++
++ return 0;
++}
++
++/*!
++ * Stop stream I/O
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int csi_streamoff(cam_data *cam)
++{
++ pr_debug("In MVC: %s\n", __func__);
++
++ if (cam->capture_on == false)
++ return 0;
++
++ csi_dmareq_rff_disable();
++ csi_disable_int();
++ cam->capture_on = false;
++
++ /* set CSI_CSIDMASA_FB1 and CSI_CSIDMASA_FB2 to default value */
++ __raw_writel(0, CSI_CSIDMASA_FB1);
++ __raw_writel(0, CSI_CSIDMASA_FB2);
++
++ csi_free_frames(cam);
++ csi_free_frame_buf(cam);
++
++ return 0;
++}
++
++/*!
++ * start the viewfinder job
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int start_preview(cam_data *cam)
++{
++ unsigned long fb_addr = (unsigned long)cam->v4l2_fb.base;
++
++ __raw_writel(fb_addr, CSI_CSIDMASA_FB1);
++ __raw_writel(fb_addr, CSI_CSIDMASA_FB2);
++ __raw_writel(__raw_readl(CSI_CSICR3) | BIT_DMA_REFLASH_RFF, CSI_CSICR3);
++
++ csi_enable_int(0);
++
++ return 0;
++}
++
++/*!
++ * shut down the viewfinder job
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int stop_preview(cam_data *cam)
++{
++ csi_disable_int();
++
++ /* set CSI_CSIDMASA_FB1 and CSI_CSIDMASA_FB2 to default value */
++ __raw_writel(0, CSI_CSIDMASA_FB1);
++ __raw_writel(0, CSI_CSIDMASA_FB2);
++ __raw_writel(__raw_readl(CSI_CSICR3) | BIT_DMA_REFLASH_RFF, CSI_CSICR3);
++
++ return 0;
++}
++
++/***************************************************************************
++ * VIDIOC Functions.
++ **************************************************************************/
++
++/*!
++ *
++ * @param cam structure cam_data *
++ *
++ * @param f structure v4l2_format *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int csi_v4l2_g_fmt(cam_data *cam, struct v4l2_format *f)
++{
++ int retval = 0;
++
++ switch (f->type) {
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ pr_debug(" type is V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
++ f->fmt.pix = cam->v2f.fmt.pix;
++ break;
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ pr_debug(" type is V4L2_BUF_TYPE_VIDEO_OVERLAY\n");
++ f->fmt.win = cam->win;
++ break;
++ default:
++ pr_debug(" type is invalid\n");
++ retval = -EINVAL;
++ }
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__, cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++
++ return retval;
++}
++
++/*!
++ * V4L2 - csi_v4l2_s_fmt function
++ *
++ * @param cam structure cam_data *
++ *
++ * @param f structure v4l2_format *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int csi_v4l2_s_fmt(cam_data *cam, struct v4l2_format *f)
++{
++ int retval = 0;
++ int size = 0;
++ int bytesperline = 0;
++ int *width, *height;
++
++ pr_debug("In MVC: %s\n", __func__);
++
++ switch (f->type) {
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ pr_debug(" type=V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
++ if (!valid_mode(f->fmt.pix.pixelformat)) {
++ pr_err("ERROR: v4l2 capture: %s: format "
++ "not supported\n", __func__);
++ return -EINVAL;
++ }
++
++ /* Handle case where size requested is larger than cuurent
++ * camera setting. */
++ if ((f->fmt.pix.width > cam->crop_bounds.width)
++ || (f->fmt.pix.height > cam->crop_bounds.height)) {
++ /* Need the logic here, calling vidioc_s_param if
++ * camera can change. */
++ pr_debug("csi_v4l2_s_fmt size changed\n");
++ }
++ if (cam->rotation % 180) {
++ height = &f->fmt.pix.width;
++ width = &f->fmt.pix.height;
++ } else {
++ width = &f->fmt.pix.width;
++ height = &f->fmt.pix.height;
++ }
++
++ if ((cam->crop_bounds.width / *width > 8) ||
++ ((cam->crop_bounds.width / *width == 8) &&
++ (cam->crop_bounds.width % *width))) {
++ *width = cam->crop_bounds.width / 8;
++ if (*width % 8)
++ *width += 8 - *width % 8;
++ pr_err("ERROR: v4l2 capture: width exceeds limit "
++ "resize to %d.\n", *width);
++ }
++
++ if ((cam->crop_bounds.height / *height > 8) ||
++ ((cam->crop_bounds.height / *height == 8) &&
++ (cam->crop_bounds.height % *height))) {
++ *height = cam->crop_bounds.height / 8;
++ if (*height % 8)
++ *height += 8 - *height % 8;
++ pr_err("ERROR: v4l2 capture: height exceeds limit "
++ "resize to %d.\n", *height);
++ }
++
++ switch (f->fmt.pix.pixelformat) {
++ case V4L2_PIX_FMT_RGB565:
++ size = f->fmt.pix.width * f->fmt.pix.height * 2;
++ csi_init_format(V4L2_PIX_FMT_UYVY);
++ csi_set_16bit_imagpara(f->fmt.pix.width,
++ f->fmt.pix.height);
++ bytesperline = f->fmt.pix.width * 2;
++ break;
++ case V4L2_PIX_FMT_UYVY:
++ size = f->fmt.pix.width * f->fmt.pix.height * 2;
++ csi_init_format(f->fmt.pix.pixelformat);
++ csi_set_16bit_imagpara(f->fmt.pix.width,
++ f->fmt.pix.height);
++ bytesperline = f->fmt.pix.width * 2;
++ break;
++ case V4L2_PIX_FMT_YUYV:
++ size = f->fmt.pix.width * f->fmt.pix.height * 2;
++ csi_init_format(f->fmt.pix.pixelformat);
++ csi_set_16bit_imagpara(f->fmt.pix.width,
++ f->fmt.pix.height);
++ bytesperline = f->fmt.pix.width * 2;
++ break;
++ case V4L2_PIX_FMT_YUV420:
++ size = f->fmt.pix.width * f->fmt.pix.height * 3 / 2;
++ csi_set_12bit_imagpara(f->fmt.pix.width,
++ f->fmt.pix.height);
++ bytesperline = f->fmt.pix.width;
++ break;
++ case V4L2_PIX_FMT_YUV422P:
++ case V4L2_PIX_FMT_RGB24:
++ case V4L2_PIX_FMT_BGR24:
++ case V4L2_PIX_FMT_BGR32:
++ case V4L2_PIX_FMT_RGB32:
++ case V4L2_PIX_FMT_NV12:
++ default:
++ pr_debug(" case not supported\n");
++ break;
++ }
++
++ if (f->fmt.pix.bytesperline < bytesperline)
++ f->fmt.pix.bytesperline = bytesperline;
++ else
++ bytesperline = f->fmt.pix.bytesperline;
++
++ if (f->fmt.pix.sizeimage < size)
++ f->fmt.pix.sizeimage = size;
++ else
++ size = f->fmt.pix.sizeimage;
++
++ cam->v2f.fmt.pix = f->fmt.pix;
++
++ if (cam->v2f.fmt.pix.priv != 0) {
++ if (copy_from_user(&cam->offset,
++ (void *)cam->v2f.fmt.pix.priv,
++ sizeof(cam->offset))) {
++ retval = -EFAULT;
++ break;
++ }
++ }
++ break;
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ pr_debug(" type=V4L2_BUF_TYPE_VIDEO_OVERLAY\n");
++ cam->win = f->fmt.win;
++ win_current = f->fmt.win;
++ size = win_current.w.width * win_current.w.height * 2;
++ if (cam->v2f.fmt.pix.sizeimage < size)
++ cam->v2f.fmt.pix.sizeimage = size;
++
++ break;
++ default:
++ retval = -EINVAL;
++ }
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__, cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++
++ return retval;
++}
++
++/*!
++ * V4L2 - csi_v4l2_s_param function
++ * Allows setting of capturemode and frame rate.
++ *
++ * @param cam structure cam_data *
++ * @param parm structure v4l2_streamparm *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int csi_v4l2_s_param(cam_data *cam, struct v4l2_streamparm *parm)
++{
++ struct v4l2_ifparm ifparm;
++ struct v4l2_format cam_fmt;
++ struct v4l2_streamparm currentparm;
++ int err = 0;
++
++ pr_debug("In %s\n", __func__);
++
++ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ pr_err(KERN_ERR "%s invalid type\n", __func__);
++ return -EINVAL;
++ }
++
++ /* Stop the viewfinder */
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++
++ currentparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++
++ /* First check that this device can support the changes requested. */
++ err = vidioc_int_g_parm(cam->sensor, &currentparm);
++ if (err) {
++ pr_err("%s: vidioc_int_g_parm returned an error %d\n",
++ __func__, err);
++ goto exit;
++ }
++
++ pr_debug(" Current capabilities are %x\n",
++ currentparm.parm.capture.capability);
++ pr_debug(" Current capturemode is %d change to %d\n",
++ currentparm.parm.capture.capturemode,
++ parm->parm.capture.capturemode);
++ pr_debug(" Current framerate is %d change to %d\n",
++ currentparm.parm.capture.timeperframe.denominator,
++ parm->parm.capture.timeperframe.denominator);
++
++ err = vidioc_int_s_parm(cam->sensor, parm);
++ if (err) {
++ pr_err("%s: vidioc_int_s_parm returned an error %d\n",
++ __func__, err);
++ goto exit;
++ }
++
++ vidioc_int_g_ifparm(cam->sensor, &ifparm);
++ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ vidioc_int_g_fmt_cap(cam->sensor, &cam_fmt);
++ pr_debug(" g_fmt_cap returns widthxheight of input as %d x %d\n",
++ cam_fmt.fmt.pix.width, cam_fmt.fmt.pix.height);
++
++ cam->crop_bounds.top = cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = cam_fmt.fmt.pix.width;
++ cam->crop_bounds.height = cam_fmt.fmt.pix.height;
++ cam->crop_current.width = cam->crop_bounds.width;
++ cam->crop_current.height = cam->crop_bounds.height;
++
++exit:
++ return err;
++}
++
++static int pxp_set_cstate(cam_data *cam, struct v4l2_control *vc)
++{
++ struct pxp_proc_data *proc_data = &cam->pxp_conf.proc_data;
++
++ if (vc->id == V4L2_CID_HFLIP) {
++ proc_data->hflip = vc->value;
++ } else if (vc->id == V4L2_CID_VFLIP) {
++ proc_data->vflip = vc->value;
++ } else if (vc->id == V4L2_CID_PRIVATE_BASE) {
++ if (vc->value % 90)
++ return -ERANGE;
++ proc_data->rotate = vc->value;
++ cam->rotation = vc->value;
++ }
++
++ return 0;
++}
++
++static int pxp_get_cstate(cam_data *cam, struct v4l2_control *vc)
++{
++ struct pxp_proc_data *proc_data = &cam->pxp_conf.proc_data;
++
++ if (vc->id == V4L2_CID_HFLIP)
++ vc->value = proc_data->hflip;
++ else if (vc->id == V4L2_CID_VFLIP)
++ vc->value = proc_data->vflip;
++ else if (vc->id == V4L2_CID_PRIVATE_BASE)
++ vc->value = proc_data->rotate;
++
++ return 0;
++}
++
++
++/*!
++ * Dequeue one V4L capture buffer
++ *
++ * @param cam structure cam_data *
++ * @param buf structure v4l2_buffer *
++ *
++ * @return status 0 success, EINVAL invalid frame number
++ * ETIME timeout, ERESTARTSYS interrupted by user
++ */
++static int csi_v4l_dqueue(cam_data *cam, struct v4l2_buffer *buf)
++{
++ int retval = 0;
++ struct mxc_v4l_frame *frame;
++ unsigned long lock_flags;
++
++ if (!wait_event_interruptible_timeout(cam->enc_queue,
++ cam->enc_counter != 0, 10 * HZ)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l_dqueue timeout "
++ "enc_counter %x\n", cam->enc_counter);
++ return -ETIME;
++ } else if (signal_pending(current)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l_dqueue() "
++ "interrupt received\n");
++ return -ERESTARTSYS;
++ }
++
++ if (down_interruptible(&cam->busy_lock))
++ return -EBUSY;
++
++ spin_lock_irqsave(&cam->dqueue_int_lock, lock_flags);
++
++ if (list_empty(&cam->done_q)) {
++ spin_unlock_irqrestore(&cam->dqueue_int_lock, lock_flags);
++ up(&cam->busy_lock);
++ return -EINVAL;
++ }
++
++ cam->enc_counter--;
++
++ frame = list_entry(cam->done_q.next, struct mxc_v4l_frame, queue);
++ list_del(cam->done_q.next);
++
++ if (frame->buffer.flags & V4L2_BUF_FLAG_DONE) {
++ frame->buffer.flags &= ~V4L2_BUF_FLAG_DONE;
++ } else if (frame->buffer.flags & V4L2_BUF_FLAG_QUEUED) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_DQBUF: "
++ "Buffer not filled.\n");
++ frame->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED;
++ retval = -EINVAL;
++ } else if ((frame->buffer.flags & 0x7) == V4L2_BUF_FLAG_MAPPED) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_DQBUF: "
++ "Buffer not queued.\n");
++ retval = -EINVAL;
++ }
++
++ spin_unlock_irqrestore(&cam->dqueue_int_lock, lock_flags);
++
++ buf->bytesused = cam->v2f.fmt.pix.sizeimage;
++ buf->index = frame->index;
++ buf->flags = frame->buffer.flags;
++ buf->m = cam->frame[frame->index].buffer.m;
++
++ /*
++ * Note:
++ * If want to do preview on LCD, use PxP CSC to convert from UYVY
++ * to RGB565; but for encoding, usually we don't use RGB format.
++ */
++ if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565) {
++ sg_dma_address(&cam->sg[0]) = buf->m.offset;
++ sg_dma_address(&cam->sg[1]) =
++ cam->frame[req_buf_number].paddress;
++ retval = pxp_process_update(cam);
++ if (retval) {
++ pr_err("Unable to submit PxP update task.\n");
++ return retval;
++ }
++ pxp_complete_update(cam);
++ if (cam->frame[buf->index].vaddress)
++ memcpy(cam->frame[buf->index].vaddress,
++ cam->frame[req_buf_number].vaddress,
++ cam->v2f.fmt.pix.sizeimage);
++ }
++ up(&cam->busy_lock);
++
++ return retval;
++}
++
++/*!
++ * V4L interface - open function
++ *
++ * @param file structure file *
++ *
++ * @return status 0 success, ENODEV invalid device instance,
++ * ENODEV timeout, ERESTARTSYS interrupted by user
++ */
++static int csi_v4l_open(struct file *file)
++{
++ struct v4l2_ifparm ifparm;
++ struct v4l2_format cam_fmt;
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++ struct sensor_data *sensor;
++ int err = 0;
++
++ pr_debug(" device name is %s\n", dev->name);
++
++ if (!cam) {
++ pr_err("%s: Internal error, cam_data not found!\n", __func__);
++ return -EBADF;
++ }
++
++ if (!cam->sensor) {
++ pr_err("%s: Internal error, camera is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ sensor = cam->sensor->priv;
++ if (!sensor) {
++ pr_err("%s: Internal error, sensor_data is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ down(&cam->busy_lock);
++ err = 0;
++ if (signal_pending(current))
++ goto oops;
++
++ if (cam->open_count++ == 0) {
++ wait_event_interruptible(cam->power_queue,
++ cam->low_power == false);
++
++ cam->enc_counter = 0;
++ INIT_LIST_HEAD(&cam->ready_q);
++ INIT_LIST_HEAD(&cam->working_q);
++ INIT_LIST_HEAD(&cam->done_q);
++
++ vidioc_int_g_ifparm(cam->sensor, &ifparm);
++
++ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ clk_prepare_enable(sensor->sensor_clk);
++ vidioc_int_s_power(cam->sensor, 1);
++ vidioc_int_init(cam->sensor);
++ vidioc_int_dev_init(cam->sensor);
++ }
++
++ file->private_data = dev;
++
++oops:
++ up(&cam->busy_lock);
++ return err;
++}
++
++/*!
++ * V4L interface - close function
++ *
++ * @param file struct file *
++ *
++ * @return 0 success
++ */
++static int csi_v4l_close(struct file *file)
++{
++ struct video_device *dev = video_devdata(file);
++ int err = 0;
++ cam_data *cam = video_get_drvdata(dev);
++ struct sensor_data *sensor;
++
++ pr_debug("In MVC:%s\n", __func__);
++
++ if (!cam) {
++ pr_err("%s: Internal error, cam_data not found!\n", __func__);
++ return -EBADF;
++ }
++
++ if (!cam->sensor) {
++ pr_err("%s: Internal error, camera is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ sensor = cam->sensor->priv;
++ if (!sensor) {
++ pr_err("%s: Internal error, sensor_data is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ /* for the case somebody hit the ctrl C */
++ if (cam->overlay_pid == current->pid) {
++ err = stop_preview(cam);
++ cam->overlay_on = false;
++ }
++
++ if (--cam->open_count == 0) {
++ wait_event_interruptible(cam->power_queue,
++ cam->low_power == false);
++ file->private_data = NULL;
++ vidioc_int_s_power(cam->sensor, 0);
++ clk_disable_unprepare(sensor->sensor_clk);
++ }
++
++ return err;
++}
++
++/*
++ * V4L interface - read function
++ *
++ * @param file struct file *
++ * @param read buf char *
++ * @param count size_t
++ * @param ppos structure loff_t *
++ *
++ * @return bytes read
++ */
++static ssize_t csi_v4l_read(struct file *file, char *buf, size_t count,
++ loff_t *ppos)
++{
++ int err = 0;
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++
++ if (down_interruptible(&cam->busy_lock))
++ return -EINTR;
++
++ /* Stop the viewfinder */
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++
++ if (cam->still_buf_vaddr == NULL) {
++ cam->still_buf_vaddr = dma_alloc_coherent(0,
++ PAGE_ALIGN
++ (cam->v2f.fmt.
++ pix.sizeimage),
++ &cam->
++ still_buf[0],
++ GFP_DMA | GFP_KERNEL);
++ if (cam->still_buf_vaddr == NULL) {
++ pr_err("alloc dma memory failed\n");
++ return -ENOMEM;
++ }
++ cam->still_counter = 0;
++ __raw_writel(cam->still_buf[0], CSI_CSIDMASA_FB2);
++ __raw_writel(cam->still_buf[0], CSI_CSIDMASA_FB1);
++ __raw_writel(__raw_readl(CSI_CSICR3) | BIT_DMA_REFLASH_RFF,
++ CSI_CSICR3);
++ __raw_writel(__raw_readl(CSI_CSISR), CSI_CSISR);
++ __raw_writel(__raw_readl(CSI_CSICR3) | BIT_FRMCNT_RST,
++ CSI_CSICR3);
++ csi_enable_int(1);
++ }
++
++ wait_event_interruptible(cam->still_queue, cam->still_counter);
++ csi_disable_int();
++ err = copy_to_user(buf, cam->still_buf_vaddr,
++ cam->v2f.fmt.pix.sizeimage);
++
++ if (cam->still_buf_vaddr != NULL) {
++ dma_free_coherent(0, PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ cam->still_buf_vaddr, cam->still_buf[0]);
++ cam->still_buf[0] = 0;
++ cam->still_buf_vaddr = NULL;
++ }
++
++ if (cam->overlay_on == true)
++ start_preview(cam);
++
++ up(&cam->busy_lock);
++ if (err < 0)
++ return err;
++
++ return cam->v2f.fmt.pix.sizeimage - err;
++}
++
++/*!
++ * V4L interface - ioctl function
++ *
++ * @param file struct file*
++ *
++ * @param ioctlnr unsigned int
++ *
++ * @param arg void*
++ *
++ * @return 0 success, ENODEV for invalid device instance,
++ * -1 for other errors.
++ */
++static long csi_v4l_do_ioctl(struct file *file,
++ unsigned int ioctlnr, void *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++ int retval = 0;
++ unsigned long lock_flags;
++
++ pr_debug("In MVC: %s, %x\n", __func__, ioctlnr);
++ wait_event_interruptible(cam->power_queue, cam->low_power == false);
++ /* make this _really_ smp-safe */
++ if (ioctlnr != VIDIOC_DQBUF)
++ if (down_interruptible(&cam->busy_lock))
++ return -EBUSY;
++
++ switch (ioctlnr) {
++ /*!
++ * V4l2 VIDIOC_G_FMT ioctl
++ */
++ case VIDIOC_G_FMT:{
++ struct v4l2_format *gf = arg;
++ pr_debug(" case VIDIOC_G_FMT\n");
++ retval = csi_v4l2_g_fmt(cam, gf);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_S_FMT ioctl
++ */
++ case VIDIOC_S_FMT:{
++ struct v4l2_format *sf = arg;
++ pr_debug(" case VIDIOC_S_FMT\n");
++ retval = csi_v4l2_s_fmt(cam, sf);
++ vidioc_int_s_fmt_cap(cam->sensor, sf);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_OVERLAY ioctl
++ */
++ case VIDIOC_OVERLAY:{
++ int *on = arg;
++ pr_debug(" case VIDIOC_OVERLAY\n");
++ if (*on) {
++ cam->overlay_on = true;
++ cam->overlay_pid = current->pid;
++ start_preview(cam);
++ }
++ if (!*on) {
++ stop_preview(cam);
++ cam->overlay_on = false;
++ }
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_G_FBUF ioctl
++ */
++ case VIDIOC_G_FBUF:{
++ struct v4l2_framebuffer *fb = arg;
++ *fb = cam->v4l2_fb;
++ fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY;
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_S_FBUF ioctl
++ */
++ case VIDIOC_S_FBUF:{
++ struct v4l2_framebuffer *fb = arg;
++ cam->v4l2_fb = *fb;
++ break;
++ }
++
++ case VIDIOC_G_PARM:{
++ struct v4l2_streamparm *parm = arg;
++ pr_debug(" case VIDIOC_G_PARM\n");
++ vidioc_int_g_parm(cam->sensor, parm);
++ break;
++ }
++
++ case VIDIOC_S_PARM:{
++ struct v4l2_streamparm *parm = arg;
++ pr_debug(" case VIDIOC_S_PARM\n");
++ retval = csi_v4l2_s_param(cam, parm);
++ break;
++ }
++
++ case VIDIOC_QUERYCAP:{
++ struct v4l2_capability *cap = arg;
++ pr_debug(" case VIDIOC_QUERYCAP\n");
++ strcpy(cap->driver, "csi_v4l2");
++ cap->version = KERNEL_VERSION(0, 1, 11);
++ cap->capabilities = V4L2_CAP_VIDEO_OVERLAY |
++ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
++ V4L2_CAP_VIDEO_OUTPUT_OVERLAY | V4L2_CAP_READWRITE;
++ cap->card[0] = '\0';
++ cap->bus_info[0] = '\0';
++ break;
++ }
++
++ case VIDIOC_CROPCAP:
++ {
++ struct v4l2_cropcap *cap = arg;
++
++ if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
++ cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) {
++ retval = -EINVAL;
++ break;
++ }
++ cap->bounds = cam->crop_bounds;
++ cap->defrect = cam->crop_defrect;
++ break;
++ }
++ case VIDIOC_S_CROP:
++ {
++ struct v4l2_crop *crop = arg;
++ struct v4l2_rect *b = &cam->crop_bounds;
++
++ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ retval = -EINVAL;
++ break;
++ }
++
++ crop->c.top = (crop->c.top < b->top) ? b->top
++ : crop->c.top;
++ if (crop->c.top > b->top + b->height)
++ crop->c.top = b->top + b->height - 1;
++ if (crop->c.height > b->top + b->height - crop->c.top)
++ crop->c.height =
++ b->top + b->height - crop->c.top;
++
++ crop->c.left = (crop->c.left < b->left) ? b->left
++ : crop->c.left;
++ if (crop->c.left > b->left + b->width)
++ crop->c.left = b->left + b->width - 1;
++ if (crop->c.width > b->left - crop->c.left + b->width)
++ crop->c.width =
++ b->left - crop->c.left + b->width;
++
++ crop->c.width -= crop->c.width % 8;
++ crop->c.height -= crop->c.height % 8;
++
++ crop_current.c = crop->c;
++
++ break;
++ }
++ case VIDIOC_G_CROP:
++ {
++ struct v4l2_crop *crop = arg;
++
++ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ retval = -EINVAL;
++ break;
++ }
++ crop->c = crop_current.c;
++
++ break;
++
++ }
++ case VIDIOC_REQBUFS: {
++ struct v4l2_requestbuffers *req = arg;
++ pr_debug(" case VIDIOC_REQBUFS\n");
++
++ if (req->count > FRAME_NUM) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_REQBUFS: "
++ "not enough buffers\n");
++ req->count = FRAME_NUM;
++ }
++
++ if (req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_REQBUFS: "
++ "wrong buffer type\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ csi_streamoff(cam);
++ if (req->memory & V4L2_MEMORY_MMAP) {
++ csi_free_frame_buf(cam);
++ retval = csi_allocate_frame_buf(cam, req->count + 1);
++ req_buf_number = req->count;
++ }
++ break;
++ }
++
++ case VIDIOC_QUERYBUF: {
++ struct v4l2_buffer *buf = arg;
++ int index = buf->index;
++ pr_debug(" case VIDIOC_QUERYBUF\n");
++
++ if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ retval = -EINVAL;
++ break;
++ }
++
++ if (buf->memory & V4L2_MEMORY_MMAP) {
++ memset(buf, 0, sizeof(buf));
++ buf->index = index;
++ }
++
++ down(&cam->param_lock);
++ if (buf->memory & V4L2_MEMORY_USERPTR) {
++ csi_v4l2_release_bufs(cam);
++ retval = csi_v4l2_prepare_bufs(cam, buf);
++ }
++ if (buf->memory & V4L2_MEMORY_MMAP)
++ retval = csi_v4l2_buffer_status(cam, buf);
++ up(&cam->param_lock);
++ break;
++ }
++
++ case VIDIOC_QBUF: {
++ struct v4l2_buffer *buf = arg;
++ int index = buf->index;
++ pr_debug(" case VIDIOC_QBUF\n");
++
++ spin_lock_irqsave(&cam->queue_int_lock, lock_flags);
++ cam->frame[index].buffer.m.offset = buf->m.offset;
++ if ((cam->frame[index].buffer.flags & 0x7) ==
++ V4L2_BUF_FLAG_MAPPED) {
++ cam->frame[index].buffer.flags |= V4L2_BUF_FLAG_QUEUED;
++ list_add_tail(&cam->frame[index].queue, &cam->ready_q);
++ } else if (cam->frame[index].buffer.flags &
++ V4L2_BUF_FLAG_QUEUED) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_QBUF: "
++ "buffer already queued\n");
++ retval = -EINVAL;
++ } else if (cam->frame[index].buffer.
++ flags & V4L2_BUF_FLAG_DONE) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_QBUF: "
++ "overwrite done buffer.\n");
++ cam->frame[index].buffer.flags &=
++ ~V4L2_BUF_FLAG_DONE;
++ cam->frame[index].buffer.flags |=
++ V4L2_BUF_FLAG_QUEUED;
++ retval = -EINVAL;
++ }
++ buf->flags = cam->frame[index].buffer.flags;
++ spin_unlock_irqrestore(&cam->queue_int_lock, lock_flags);
++
++ break;
++ }
++
++ case VIDIOC_DQBUF: {
++ struct v4l2_buffer *buf = arg;
++ pr_debug(" case VIDIOC_DQBUF\n");
++
++ retval = csi_v4l_dqueue(cam, buf);
++
++ break;
++ }
++
++ case VIDIOC_STREAMON: {
++ pr_debug(" case VIDIOC_STREAMON\n");
++ retval = csi_streamon(cam);
++ break;
++ }
++
++ case VIDIOC_STREAMOFF: {
++ pr_debug(" case VIDIOC_STREAMOFF\n");
++ retval = csi_streamoff(cam);
++ break;
++ }
++ case VIDIOC_ENUM_FMT: {
++ struct v4l2_fmtdesc *fmt = arg;
++ if (cam->sensor)
++ retval = vidioc_int_enum_fmt_cap(cam->sensor, fmt);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++ case VIDIOC_ENUM_FRAMESIZES: {
++ struct v4l2_frmsizeenum *fsize = arg;
++ if (cam->sensor)
++ retval = vidioc_int_enum_framesizes(cam->sensor, fsize);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++ case VIDIOC_ENUM_FRAMEINTERVALS: {
++ struct v4l2_frmivalenum *fival = arg;
++ if (cam->sensor)
++ retval = vidioc_int_enum_frameintervals(cam->sensor,
++ fival);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++ case VIDIOC_DBG_G_CHIP_IDENT: {
++ struct v4l2_dbg_chip_ident *p = arg;
++ p->ident = V4L2_IDENT_NONE;
++ p->revision = 0;
++ if (cam->sensor)
++ retval = vidioc_int_g_chip_ident(cam->sensor, (int *)p);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++
++ case VIDIOC_S_CTRL:
++ {
++ struct v4l2_control *vc = arg;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(pxp_controls); i++)
++ if (vc->id == pxp_controls[i].id) {
++ if (vc->value < pxp_controls[i].minimum ||
++ vc->value > pxp_controls[i].maximum) {
++ retval = -ERANGE;
++ break;
++ }
++ retval = pxp_set_cstate(cam, vc);
++ break;
++ }
++
++ if (i >= ARRAY_SIZE(pxp_controls))
++ retval = -EINVAL;
++ break;
++
++ }
++ case VIDIOC_G_CTRL:
++ {
++ struct v4l2_control *vc = arg;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(pxp_controls); i++)
++ if (vc->id == pxp_controls[i].id) {
++ retval = pxp_get_cstate(cam, vc);
++ break;
++ }
++
++ if (i >= ARRAY_SIZE(pxp_controls))
++ retval = -EINVAL;
++ break;
++ }
++ case VIDIOC_QUERYCTRL:
++ {
++ struct v4l2_queryctrl *qc = arg;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(pxp_controls); i++)
++ if (qc->id && qc->id == pxp_controls[i].id) {
++ memcpy(qc, &(pxp_controls[i]), sizeof(*qc));
++ break;
++ }
++
++ if (i >= ARRAY_SIZE(pxp_controls))
++ retval = -EINVAL;
++ break;
++ }
++ case VIDIOC_G_STD:
++ case VIDIOC_G_OUTPUT:
++ case VIDIOC_S_OUTPUT:
++ case VIDIOC_ENUMSTD:
++ case VIDIOC_S_STD:
++ case VIDIOC_TRY_FMT:
++ case VIDIOC_ENUMINPUT:
++ case VIDIOC_G_INPUT:
++ case VIDIOC_S_INPUT:
++ case VIDIOC_G_TUNER:
++ case VIDIOC_S_TUNER:
++ case VIDIOC_G_FREQUENCY:
++ case VIDIOC_S_FREQUENCY:
++ case VIDIOC_ENUMOUTPUT:
++ default:
++ pr_debug(" case not supported\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ if (ioctlnr != VIDIOC_DQBUF)
++ up(&cam->busy_lock);
++ return retval;
++}
++
++/*
++ * V4L interface - ioctl function
++ *
++ * @return None
++ */
++static long csi_v4l_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ return video_usercopy(file, cmd, arg, csi_v4l_do_ioctl);
++}
++
++/*!
++ * V4L interface - mmap function
++ *
++ * @param file structure file *
++ *
++ * @param vma structure vm_area_struct *
++ *
++ * @return status 0 Success, EINTR busy lock error, ENOBUFS remap_page error
++ */
++static int csi_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ struct video_device *dev = video_devdata(file);
++ unsigned long size;
++ int res = 0;
++ cam_data *cam = video_get_drvdata(dev);
++
++ pr_debug("%s\n", __func__);
++ pr_debug("\npgoff=0x%lx, start=0x%lx, end=0x%lx\n",
++ vma->vm_pgoff, vma->vm_start, vma->vm_end);
++
++ /* make this _really_ smp-safe */
++ if (down_interruptible(&cam->busy_lock))
++ return -EINTR;
++
++ size = vma->vm_end - vma->vm_start;
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ vma->vm_pgoff, size, vma->vm_page_prot)) {
++ pr_err("ERROR: v4l2 capture: %s : "
++ "remap_pfn_range failed\n", __func__);
++ res = -ENOBUFS;
++ goto csi_mmap_exit;
++ }
++
++ vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
++
++csi_mmap_exit:
++ up(&cam->busy_lock);
++ return res;
++}
++
++/*!
++ * This structure defines the functions to be called in this driver.
++ */
++static struct v4l2_file_operations csi_v4l_fops = {
++ .owner = THIS_MODULE,
++ .open = csi_v4l_open,
++ .release = csi_v4l_close,
++ .read = csi_v4l_read,
++ .ioctl = csi_v4l_ioctl,
++ .mmap = csi_mmap,
++};
++
++static struct video_device csi_v4l_template = {
++ .name = "Mx25 Camera",
++ .fops = &csi_v4l_fops,
++ .release = video_device_release,
++};
++
++/*!
++ * initialize cam_data structure
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static void init_camera_struct(cam_data *cam)
++{
++ struct pxp_proc_data *proc_data = &cam->pxp_conf.proc_data;
++ pr_debug("In MVC: %s\n", __func__);
++
++ proc_data->hflip = 0;
++ proc_data->vflip = 0;
++ proc_data->rotate = 0;
++ proc_data->bgcolor = 0;
++
++ /* Default everything to 0 */
++ memset(cam, 0, sizeof(cam_data));
++
++ sema_init(&cam->param_lock, 1);
++ sema_init(&cam->busy_lock, 1);
++
++ cam->video_dev = video_device_alloc();
++ if (cam->video_dev == NULL)
++ return;
++
++ *(cam->video_dev) = csi_v4l_template;
++
++ video_set_drvdata(cam->video_dev, cam);
++ cam->video_dev->minor = -1;
++
++ init_waitqueue_head(&cam->enc_queue);
++ init_waitqueue_head(&cam->still_queue);
++
++ cam->streamparm.parm.capture.capturemode = 0;
++
++ cam->standard.index = 0;
++ cam->standard.id = V4L2_STD_UNKNOWN;
++ cam->standard.frameperiod.denominator = 30;
++ cam->standard.frameperiod.numerator = 1;
++ cam->standard.framelines = 480;
++ cam->standard_autodetect = true;
++ cam->streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cam->streamparm.parm.capture.timeperframe = cam->standard.frameperiod;
++ cam->streamparm.parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
++ cam->overlay_on = false;
++ cam->capture_on = false;
++ cam->v4l2_fb.flags = V4L2_FBUF_FLAG_OVERLAY;
++
++ cam->v2f.fmt.pix.sizeimage = 480 * 640 * 2;
++ cam->v2f.fmt.pix.bytesperline = 640 * 2;
++ cam->v2f.fmt.pix.width = 640;
++ cam->v2f.fmt.pix.height = 480;
++ cam->v2f.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
++ cam->win.w.width = 160;
++ cam->win.w.height = 160;
++ cam->win.w.left = 0;
++ cam->win.w.top = 0;
++ cam->still_counter = 0;
++ /* setup cropping */
++ cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = 640;
++ cam->crop_bounds.top = 0;
++ cam->crop_bounds.height = 480;
++ cam->crop_current = cam->crop_defrect = cam->crop_bounds;
++
++ cam->enc_callback = camera_callback;
++ csi_start_callback(cam);
++ init_waitqueue_head(&cam->power_queue);
++ spin_lock_init(&cam->queue_int_lock);
++ spin_lock_init(&cam->dqueue_int_lock);
++}
++
++/*!
++ * camera_power function
++ * Turns Sensor power On/Off
++ *
++ * @param cam cam data struct
++ * @param cameraOn true to turn camera on, false to turn off power.
++ *
++ * @return status
++ */
++static u8 camera_power(cam_data *cam, bool cameraOn)
++{
++ pr_debug("In MVC: %s on=%d\n", __func__, cameraOn);
++
++ if (cameraOn == true) {
++ vidioc_int_s_power(cam->sensor, 1);
++ } else {
++ vidioc_int_s_power(cam->sensor, 0);
++ }
++ return 0;
++}
++
++static const struct of_device_id imx_csi_v4l2_dt_ids[] = {
++ { .compatible = "fsl,imx6sl-csi-v4l2", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_csi_v4l2_dt_ids);
++
++static int csi_v4l2_probe(struct platform_device *pdev)
++{
++ struct scatterlist *sg;
++ u8 err = 0;
++
++ /* Create g_cam and initialize it. */
++ g_cam = kmalloc(sizeof(cam_data), GFP_KERNEL);
++ if (g_cam == NULL) {
++ pr_err("ERROR: v4l2 capture: failed to register camera\n");
++ err = -ENOMEM;
++ goto out;
++ }
++ memset(&crop_current, 0, sizeof(crop_current));
++ memset(&win_current, 0, sizeof(win_current));
++ init_camera_struct(g_cam);
++ platform_set_drvdata(pdev, (void *)g_cam);
++
++ /* Set up the v4l2 device and register it */
++ csi_v4l2_int_device.priv = g_cam;
++ /* This function contains a bug that won't let this be rmmod'd. */
++ v4l2_int_device_register(&csi_v4l2_int_device);
++
++ /* register v4l video device */
++ if (video_register_device(g_cam->video_dev, VFL_TYPE_GRABBER, video_nr)
++ == -1) {
++ kfree(g_cam);
++ g_cam = NULL;
++ pr_err("ERROR: v4l2 capture: video_register_device failed\n");
++ err = -ENODEV;
++ goto out;
++ }
++ pr_debug(" Video device registered: %s #%d\n",
++ g_cam->video_dev->name, g_cam->video_dev->minor);
++
++ g_cam->pxp_chan = NULL;
++ /* Initialize Scatter-gather list containing 2 buffer addresses. */
++ sg = g_cam->sg;
++ sg_init_table(sg, 2);
++
++out:
++ return err;
++}
++
++static int csi_v4l2_remove(struct platform_device *pdev)
++{
++ if (g_cam->open_count) {
++ pr_err("ERROR: v4l2 capture:camera open "
++ "-- setting ops to NULL\n");
++ } else {
++ pr_info("V4L2 freeing image input device\n");
++ v4l2_int_device_unregister(&csi_v4l2_int_device);
++ csi_stop_callback(g_cam);
++ video_unregister_device(g_cam->video_dev);
++ platform_set_drvdata(pdev, NULL);
++
++ kfree(g_cam);
++ g_cam = NULL;
++ }
++
++ return 0;
++}
++
++/*!
++ * This function is called to put the sensor in a low power state.
++ * Refer to the document driver-model/driver.txt in the kernel source tree
++ * for more information.
++ *
++ * @param pdev the device structure used to give information on which I2C
++ * to suspend
++ * @param state the power state the device is entering
++ *
++ * @return The function returns 0 on success and -1 on failure.
++ */
++static int csi_v4l2_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ cam_data *cam = platform_get_drvdata(pdev);
++
++ pr_debug("In MVC: %s\n", __func__);
++
++ if (cam == NULL)
++ return -1;
++
++ cam->low_power = true;
++
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++
++ if (cam->capture_on == true || cam->overlay_on == true)
++ camera_power(cam, false);
++
++ return 0;
++}
++
++/*!
++ * This function is called to bring the sensor back from a low power state.
++ * Refer to the document driver-model/driver.txt in the kernel source tree
++ * for more information.
++ *
++ * @param pdev the device structure
++ *
++ * @return The function returns 0 on success and -1 on failure
++ */
++static int csi_v4l2_resume(struct platform_device *pdev)
++{
++ cam_data *cam = platform_get_drvdata(pdev);
++
++ pr_debug("In MVC: %s\n", __func__);
++
++ if (cam == NULL)
++ return -1;
++
++ cam->low_power = false;
++ wake_up_interruptible(&cam->power_queue);
++ if (cam->capture_on == true || cam->overlay_on == true)
++ camera_power(cam, true);
++
++ if (cam->overlay_on == true)
++ start_preview(cam);
++
++ return 0;
++}
++
++/*!
++ * This structure contains pointers to the power management callback functions.
++ */
++static struct platform_driver csi_v4l2_driver = {
++ .driver = {
++ .name = "csi_v4l2",
++ .of_match_table = of_match_ptr(imx_csi_v4l2_dt_ids),
++ },
++ .probe = csi_v4l2_probe,
++ .remove = csi_v4l2_remove,
++#ifdef CONFIG_PM
++ .suspend = csi_v4l2_suspend,
++ .resume = csi_v4l2_resume,
++#endif
++ .shutdown = NULL,
++};
++
++/*!
++ * Initializes the camera driver.
++ */
++static int csi_v4l2_master_attach(struct v4l2_int_device *slave)
++{
++ cam_data *cam = slave->u.slave->master->priv;
++ struct v4l2_format cam_fmt;
++
++ pr_debug("In MVC: %s\n", __func__);
++ pr_debug(" slave.name = %s\n", slave->name);
++ pr_debug(" master.name = %s\n", slave->u.slave->master->name);
++
++ cam->sensor = slave;
++ if (slave == NULL) {
++ pr_err("ERROR: v4l2 capture: slave parameter not valid.\n");
++ return -1;
++ }
++
++ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ vidioc_int_g_fmt_cap(cam->sensor, &cam_fmt);
++
++ /* Used to detect TV in (type 1) vs. camera (type 0) */
++ cam->device_type = cam_fmt.fmt.pix.priv;
++
++ cam->crop_bounds.top = cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = cam_fmt.fmt.pix.width;
++ cam->crop_bounds.height = cam_fmt.fmt.pix.height;
++
++ /* This also is the max crop size for this device. */
++ cam->crop_defrect.top = cam->crop_defrect.left = 0;
++ cam->crop_defrect.width = cam_fmt.fmt.pix.width;
++ cam->crop_defrect.height = cam_fmt.fmt.pix.height;
++
++ /* At this point, this is also the current image size. */
++ cam->crop_current.top = cam->crop_current.left = 0;
++ cam->crop_current.width = cam_fmt.fmt.pix.width;
++ cam->crop_current.height = cam_fmt.fmt.pix.height;
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__, cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++
++ return 0;
++}
++
++/*!
++ * Disconnects the camera driver.
++ */
++static void csi_v4l2_master_detach(struct v4l2_int_device *slave)
++{
++ pr_debug("In MVC: %s\n", __func__);
++
++ vidioc_int_dev_exit(slave);
++}
++
++module_platform_driver(csi_v4l2_driver);
++
++module_param(video_nr, int, 0444);
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("V4L2 capture driver for Mx25 based cameras");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE("video");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/fsl_csi.c linux-3.14.40/drivers/media/platform/mxc/capture/fsl_csi.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/fsl_csi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/fsl_csi.c 2015-05-01 14:57:59.255427001 -0500
+@@ -0,0 +1,302 @@
++/*
++ * Copyright 2009-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file fsl_csi.c, this file is derived from mx27_csi.c
++ *
++ * @brief mx25 CMOS Sensor interface functions
++ *
++ * @ingroup CSI
++ */
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/module.h>
++#include <linux/clk.h>
++#include <linux/of.h>
++#include <linux/sched.h>
++
++#include "mxc_v4l2_capture.h"
++#include "fsl_csi.h"
++
++void __iomem *csi_regbase;
++EXPORT_SYMBOL(csi_regbase);
++static int irq_nr;
++static csi_irq_callback_t g_callback;
++static void *g_callback_data;
++
++static irqreturn_t csi_irq_handler(int irq, void *data)
++{
++ cam_data *cam = (cam_data *) data;
++ unsigned long status = __raw_readl(CSI_CSISR);
++
++ __raw_writel(status, CSI_CSISR);
++
++ if (status & BIT_HRESP_ERR_INT)
++ pr_warning("Hresponse error is detected.\n");
++
++ if (status & BIT_DMA_TSF_DONE_FB1) {
++ if (cam->capture_on) {
++ spin_lock(&cam->queue_int_lock);
++ cam->ping_pong_csi = 1;
++ spin_unlock(&cam->queue_int_lock);
++ cam->enc_callback(0, cam);
++ } else {
++ cam->still_counter++;
++ wake_up_interruptible(&cam->still_queue);
++ }
++ }
++
++ if (status & BIT_DMA_TSF_DONE_FB2) {
++ if (cam->capture_on) {
++ spin_lock(&cam->queue_int_lock);
++ cam->ping_pong_csi = 2;
++ spin_unlock(&cam->queue_int_lock);
++ cam->enc_callback(0, cam);
++ } else {
++ cam->still_counter++;
++ wake_up_interruptible(&cam->still_queue);
++ }
++ }
++
++ if (g_callback)
++ g_callback(g_callback_data, status);
++
++ pr_debug("CSI status = 0x%08lX\n", status);
++
++ return IRQ_HANDLED;
++}
++
++static void csihw_reset_frame_count(void)
++{
++ __raw_writel(__raw_readl(CSI_CSICR3) | BIT_FRMCNT_RST, CSI_CSICR3);
++}
++
++static void csihw_reset(void)
++{
++ csihw_reset_frame_count();
++ __raw_writel(CSICR1_RESET_VAL, CSI_CSICR1);
++ __raw_writel(CSICR2_RESET_VAL, CSI_CSICR2);
++ __raw_writel(CSICR3_RESET_VAL, CSI_CSICR3);
++}
++
++/*!
++ * csi_init_interface
++ * Init csi interface
++ */
++void csi_init_interface(void)
++{
++ unsigned int val = 0;
++ unsigned int imag_para;
++
++ val |= BIT_SOF_POL;
++ val |= BIT_REDGE;
++ val |= BIT_GCLK_MODE;
++ val |= BIT_HSYNC_POL;
++ val |= BIT_PACK_DIR;
++ val |= BIT_FCC;
++ val |= BIT_SWAP16_EN;
++ val |= 1 << SHIFT_MCLKDIV;
++ val |= BIT_MCLKEN;
++ __raw_writel(val, CSI_CSICR1);
++
++ imag_para = (640 << 16) | 960;
++ __raw_writel(imag_para, CSI_CSIIMAG_PARA);
++
++ val = 0x1010;
++ val |= BIT_DMA_REFLASH_RFF;
++ __raw_writel(val, CSI_CSICR3);
++}
++EXPORT_SYMBOL(csi_init_interface);
++
++void csi_init_format(int fmt)
++{
++ unsigned int val;
++
++ val = __raw_readl(CSI_CSICR1);
++ if (fmt == V4L2_PIX_FMT_YUYV) {
++ val &= ~BIT_PACK_DIR;
++ val &= ~BIT_SWAP16_EN;
++ } else if (fmt == V4L2_PIX_FMT_UYVY) {
++ val |= BIT_PACK_DIR;
++ val |= BIT_SWAP16_EN;
++ } else
++ pr_warning("unsupported format, old format remains.\n");
++
++ __raw_writel(val, CSI_CSICR1);
++}
++EXPORT_SYMBOL(csi_init_format);
++
++/*!
++ * csi_read_mclk_flag
++ *
++ * @return gcsi_mclk_source
++ */
++int csi_read_mclk_flag(void)
++{
++ return 0;
++}
++EXPORT_SYMBOL(csi_read_mclk_flag);
++
++void csi_start_callback(void *data)
++{
++ cam_data *cam = (cam_data *) data;
++
++ if (request_irq(irq_nr, csi_irq_handler, 0, "csi", cam) < 0)
++ pr_debug("CSI error: irq request fail\n");
++
++}
++EXPORT_SYMBOL(csi_start_callback);
++
++void csi_stop_callback(void *data)
++{
++ cam_data *cam = (cam_data *) data;
++
++ free_irq(irq_nr, cam);
++}
++EXPORT_SYMBOL(csi_stop_callback);
++
++void csi_enable_int(int arg)
++{
++ unsigned long cr1 = __raw_readl(CSI_CSICR1);
++
++ cr1 |= BIT_SOF_INTEN;
++ if (arg == 1) {
++ /* still capture needs DMA intterrupt */
++ cr1 |= BIT_FB1_DMA_DONE_INTEN;
++ cr1 |= BIT_FB2_DMA_DONE_INTEN;
++ }
++ __raw_writel(cr1, CSI_CSICR1);
++}
++EXPORT_SYMBOL(csi_enable_int);
++
++void csi_disable_int(void)
++{
++ unsigned long cr1 = __raw_readl(CSI_CSICR1);
++
++ cr1 &= ~BIT_SOF_INTEN;
++ cr1 &= ~BIT_FB1_DMA_DONE_INTEN;
++ cr1 &= ~BIT_FB2_DMA_DONE_INTEN;
++ __raw_writel(cr1, CSI_CSICR1);
++}
++EXPORT_SYMBOL(csi_disable_int);
++
++void csi_set_16bit_imagpara(int width, int height)
++{
++ int imag_para = 0;
++ unsigned long cr3 = __raw_readl(CSI_CSICR3);
++
++ imag_para = (width << 16) | (height * 2);
++ __raw_writel(imag_para, CSI_CSIIMAG_PARA);
++
++ /* reflash the embeded DMA controller */
++ __raw_writel(cr3 | BIT_DMA_REFLASH_RFF, CSI_CSICR3);
++}
++EXPORT_SYMBOL(csi_set_16bit_imagpara);
++
++void csi_set_12bit_imagpara(int width, int height)
++{
++ int imag_para = 0;
++ unsigned long cr3 = __raw_readl(CSI_CSICR3);
++
++ imag_para = (width << 16) | (height * 3 / 2);
++ __raw_writel(imag_para, CSI_CSIIMAG_PARA);
++
++ /* reflash the embeded DMA controller */
++ __raw_writel(cr3 | BIT_DMA_REFLASH_RFF, CSI_CSICR3);
++}
++EXPORT_SYMBOL(csi_set_12bit_imagpara);
++
++void csi_dmareq_rff_enable(void)
++{
++ unsigned long cr3 = __raw_readl(CSI_CSICR3);
++
++ cr3 |= BIT_DMA_REQ_EN_RFF;
++ cr3 |= BIT_HRESP_ERR_EN;
++ __raw_writel(cr3, CSI_CSICR3);
++}
++EXPORT_SYMBOL(csi_dmareq_rff_enable);
++
++void csi_dmareq_rff_disable(void)
++{
++ unsigned long cr3 = __raw_readl(CSI_CSICR3);
++
++ cr3 &= ~BIT_DMA_REQ_EN_RFF;
++ cr3 &= ~BIT_HRESP_ERR_EN;
++ __raw_writel(cr3, CSI_CSICR3);
++}
++EXPORT_SYMBOL(csi_dmareq_rff_disable);
++
++static const struct of_device_id fsl_csi_dt_ids[] = {
++ { .compatible = "fsl,imx6sl-csi", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, fsl_csi_dt_ids);
++
++static int csi_probe(struct platform_device *pdev)
++{
++ int ret = 0;
++ struct resource *res;
++
++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "No csi irq found.\n");
++ ret = -ENODEV;
++ goto err;
++ }
++ irq_nr = res->start;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "No csi base address found.\n");
++ ret = -ENODEV;
++ goto err;
++ }
++ csi_regbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
++ if (!csi_regbase) {
++ dev_err(&pdev->dev, "ioremap failed with csi base\n");
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ csihw_reset();
++ csi_init_interface();
++ csi_dmareq_rff_disable();
++
++err:
++ return ret;
++}
++
++static int csi_remove(struct platform_device *pdev)
++{
++ return 0;
++}
++
++static struct platform_driver csi_driver = {
++ .driver = {
++ .name = "fsl_csi",
++ .of_match_table = of_match_ptr(fsl_csi_dt_ids),
++ },
++ .probe = csi_probe,
++ .remove = csi_remove,
++};
++
++module_platform_driver(csi_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("fsl CSI driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/fsl_csi.h linux-3.14.40/drivers/media/platform/mxc/capture/fsl_csi.h
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/fsl_csi.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/fsl_csi.h 2015-05-01 14:57:59.255427001 -0500
+@@ -0,0 +1,198 @@
++/*
++ * Copyright 2009-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file fsl_csi.h
++ *
++ * @brief mx25 CMOS Sensor interface functions
++ *
++ * @ingroup CSI
++ */
++
++#ifndef MX25_CSI_H
++#define MX25_CSI_H
++
++#include <linux/io.h>
++
++/* reset values */
++#define CSICR1_RESET_VAL 0x40000800
++#define CSICR2_RESET_VAL 0x0
++#define CSICR3_RESET_VAL 0x0
++
++/* csi control reg 1 */
++#define BIT_SWAP16_EN (0x1 << 31)
++#define BIT_EXT_VSYNC (0x1 << 30)
++#define BIT_EOF_INT_EN (0x1 << 29)
++#define BIT_PRP_IF_EN (0x1 << 28)
++#define BIT_CCIR_MODE (0x1 << 27)
++#define BIT_COF_INT_EN (0x1 << 26)
++#define BIT_SF_OR_INTEN (0x1 << 25)
++#define BIT_RF_OR_INTEN (0x1 << 24)
++#define BIT_SFF_DMA_DONE_INTEN (0x1 << 22)
++#define BIT_STATFF_INTEN (0x1 << 21)
++#define BIT_FB2_DMA_DONE_INTEN (0x1 << 20)
++#define BIT_FB1_DMA_DONE_INTEN (0x1 << 19)
++#define BIT_RXFF_INTEN (0x1 << 18)
++#define BIT_SOF_POL (0x1 << 17)
++#define BIT_SOF_INTEN (0x1 << 16)
++#define BIT_MCLKDIV (0xF << 12)
++#define BIT_HSYNC_POL (0x1 << 11)
++#define BIT_CCIR_EN (0x1 << 10)
++#define BIT_MCLKEN (0x1 << 9)
++#define BIT_FCC (0x1 << 8)
++#define BIT_PACK_DIR (0x1 << 7)
++#define BIT_CLR_STATFIFO (0x1 << 6)
++#define BIT_CLR_RXFIFO (0x1 << 5)
++#define BIT_GCLK_MODE (0x1 << 4)
++#define BIT_INV_DATA (0x1 << 3)
++#define BIT_INV_PCLK (0x1 << 2)
++#define BIT_REDGE (0x1 << 1)
++#define BIT_PIXEL_BIT (0x1 << 0)
++
++#define SHIFT_MCLKDIV 12
++
++/* control reg 3 */
++#define BIT_FRMCNT (0xFFFF << 16)
++#define BIT_FRMCNT_RST (0x1 << 15)
++#define BIT_DMA_REFLASH_RFF (0x1 << 14)
++#define BIT_DMA_REFLASH_SFF (0x1 << 13)
++#define BIT_DMA_REQ_EN_RFF (0x1 << 12)
++#define BIT_DMA_REQ_EN_SFF (0x1 << 11)
++#define BIT_STATFF_LEVEL (0x7 << 8)
++#define BIT_HRESP_ERR_EN (0x1 << 7)
++#define BIT_RXFF_LEVEL (0x7 << 4)
++#define BIT_TWO_8BIT_SENSOR (0x1 << 3)
++#define BIT_ZERO_PACK_EN (0x1 << 2)
++#define BIT_ECC_INT_EN (0x1 << 1)
++#define BIT_ECC_AUTO_EN (0x1 << 0)
++
++#define SHIFT_FRMCNT 16
++
++/* csi status reg */
++#define BIT_SFF_OR_INT (0x1 << 25)
++#define BIT_RFF_OR_INT (0x1 << 24)
++#define BIT_DMA_TSF_DONE_SFF (0x1 << 22)
++#define BIT_STATFF_INT (0x1 << 21)
++#define BIT_DMA_TSF_DONE_FB2 (0x1 << 20)
++#define BIT_DMA_TSF_DONE_FB1 (0x1 << 19)
++#define BIT_RXFF_INT (0x1 << 18)
++#define BIT_EOF_INT (0x1 << 17)
++#define BIT_SOF_INT (0x1 << 16)
++#define BIT_F2_INT (0x1 << 15)
++#define BIT_F1_INT (0x1 << 14)
++#define BIT_COF_INT (0x1 << 13)
++#define BIT_HRESP_ERR_INT (0x1 << 7)
++#define BIT_ECC_INT (0x1 << 1)
++#define BIT_DRDY (0x1 << 0)
++
++#define CSI_MCLK_VF 1
++#define CSI_MCLK_ENC 2
++#define CSI_MCLK_RAW 4
++#define CSI_MCLK_I2C 8
++#endif
++
++extern void __iomem *csi_regbase;
++#define CSI_CSICR1 (csi_regbase)
++#define CSI_CSICR2 (csi_regbase + 0x4)
++#define CSI_CSICR3 (csi_regbase + 0x8)
++#define CSI_STATFIFO (csi_regbase + 0xC)
++#define CSI_CSIRXFIFO (csi_regbase + 0x10)
++#define CSI_CSIRXCNT (csi_regbase + 0x14)
++#define CSI_CSISR (csi_regbase + 0x18)
++
++#define CSI_CSIDBG (csi_regbase + 0x1C)
++#define CSI_CSIDMASA_STATFIFO (csi_regbase + 0x20)
++#define CSI_CSIDMATS_STATFIFO (csi_regbase + 0x24)
++#define CSI_CSIDMASA_FB1 (csi_regbase + 0x28)
++#define CSI_CSIDMASA_FB2 (csi_regbase + 0x2C)
++#define CSI_CSIFBUF_PARA (csi_regbase + 0x30)
++#define CSI_CSIIMAG_PARA (csi_regbase + 0x34)
++
++static inline void csi_clear_status(unsigned long status)
++{
++ __raw_writel(status, CSI_CSISR);
++}
++
++struct csi_signal_cfg_t {
++ unsigned data_width:3;
++ unsigned clk_mode:2;
++ unsigned ext_vsync:1;
++ unsigned Vsync_pol:1;
++ unsigned Hsync_pol:1;
++ unsigned pixclk_pol:1;
++ unsigned data_pol:1;
++ unsigned sens_clksrc:1;
++};
++
++struct csi_config_t {
++ /* control reg 1 */
++ unsigned int swap16_en:1;
++ unsigned int ext_vsync:1;
++ unsigned int eof_int_en:1;
++ unsigned int prp_if_en:1;
++ unsigned int ccir_mode:1;
++ unsigned int cof_int_en:1;
++ unsigned int sf_or_inten:1;
++ unsigned int rf_or_inten:1;
++ unsigned int sff_dma_done_inten:1;
++ unsigned int statff_inten:1;
++ unsigned int fb2_dma_done_inten:1;
++ unsigned int fb1_dma_done_inten:1;
++ unsigned int rxff_inten:1;
++ unsigned int sof_pol:1;
++ unsigned int sof_inten:1;
++ unsigned int mclkdiv:4;
++ unsigned int hsync_pol:1;
++ unsigned int ccir_en:1;
++ unsigned int mclken:1;
++ unsigned int fcc:1;
++ unsigned int pack_dir:1;
++ unsigned int gclk_mode:1;
++ unsigned int inv_data:1;
++ unsigned int inv_pclk:1;
++ unsigned int redge:1;
++ unsigned int pixel_bit:1;
++
++ /* control reg 3 */
++ unsigned int frmcnt:16;
++ unsigned int frame_reset:1;
++ unsigned int dma_reflash_rff:1;
++ unsigned int dma_reflash_sff:1;
++ unsigned int dma_req_en_rff:1;
++ unsigned int dma_req_en_sff:1;
++ unsigned int statff_level:3;
++ unsigned int hresp_err_en:1;
++ unsigned int rxff_level:3;
++ unsigned int two_8bit_sensor:1;
++ unsigned int zero_pack_en:1;
++ unsigned int ecc_int_en:1;
++ unsigned int ecc_auto_en:1;
++ /* fifo counter */
++ unsigned int rxcnt;
++};
++
++typedef void (*csi_irq_callback_t) (void *data, unsigned long status);
++
++void csi_init_interface(void);
++void csi_init_format(int fmt);
++void csi_set_16bit_imagpara(int width, int height);
++void csi_set_12bit_imagpara(int width, int height);
++int csi_read_mclk_flag(void);
++void csi_start_callback(void *data);
++void csi_stop_callback(void *data);
++void csi_enable_int(int arg);
++void csi_disable_int(void);
++void csi_mclk_enable(void);
++void csi_mclk_disable(void);
++void csi_dmareq_rff_enable(void);
++void csi_dmareq_rff_disable(void);
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_bg_overlay_sdc.c linux-3.14.40/drivers/media/platform/mxc/capture/ipu_bg_overlay_sdc.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_bg_overlay_sdc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/ipu_bg_overlay_sdc.c 2015-05-01 14:57:59.255427001 -0500
+@@ -0,0 +1,546 @@
++
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_bg_overlay_sdc_bg.c
++ *
++ * @brief IPU Use case for PRP-VF back-ground
++ *
++ * @ingroup IPU
++ */
++#include <linux/module.h>
++#include <linux/dma-mapping.h>
++#include <linux/fb.h>
++#include <linux/ipu.h>
++#include <linux/mipi_csi2.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++static int csi_buffer_num;
++static u32 bpp, csi_mem_bufsize = 3;
++static u32 out_format;
++static struct ipu_soc *disp_ipu;
++static u32 offset;
++
++static void csi_buf_work_func(struct work_struct *work)
++{
++ int err = 0;
++ cam_data *cam =
++ container_of(work, struct _cam_data, csi_work_struct);
++
++ struct ipu_task task;
++ memset(&task, 0, sizeof(task));
++
++ if (csi_buffer_num)
++ task.input.paddr = cam->vf_bufs[0];
++ else
++ task.input.paddr = cam->vf_bufs[1];
++ task.input.width = cam->crop_current.width;
++ task.input.height = cam->crop_current.height;
++ task.input.format = IPU_PIX_FMT_UYVY;
++
++ task.output.paddr = offset;
++ task.output.width = cam->overlay_fb->var.xres;
++ task.output.height = cam->overlay_fb->var.yres;
++ task.output.format = out_format;
++ task.output.rotate = cam->rotation;
++ task.output.crop.pos.x = cam->win.w.left;
++ task.output.crop.pos.y = cam->win.w.top;
++ if (cam->win.w.width > 1024 || cam->win.w.height > 1024) {
++ task.output.crop.w = cam->overlay_fb->var.xres;
++ task.output.crop.h = cam->overlay_fb->var.yres;
++ } else {
++ task.output.crop.w = cam->win.w.width;
++ task.output.crop.h = cam->win.w.height;
++ }
++again:
++ err = ipu_check_task(&task);
++ if (err != IPU_CHECK_OK) {
++ if (err > IPU_CHECK_ERR_MIN) {
++ if (err == IPU_CHECK_ERR_SPLIT_INPUTW_OVER) {
++ task.input.crop.w -= 8;
++ goto again;
++ }
++ if (err == IPU_CHECK_ERR_SPLIT_INPUTH_OVER) {
++ task.input.crop.h -= 8;
++ goto again;
++ }
++ if (err == IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER) {
++ task.output.width -= 8;
++ task.output.crop.w = task.output.width;
++ goto again;
++ }
++ if (err == IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER) {
++ task.output.height -= 8;
++ task.output.crop.h = task.output.height;
++ goto again;
++ }
++ printk(KERN_ERR "check ipu taks fail\n");
++ return;
++ }
++ printk(KERN_ERR "check ipu taks fail\n");
++ return;
++ }
++ err = ipu_queue_task(&task);
++ if (err < 0)
++ printk(KERN_ERR "queue ipu task error\n");
++}
++
++static void get_disp_ipu(cam_data *cam)
++{
++ if (cam->output > 2)
++ disp_ipu = ipu_get_soc(1); /* using DISP4 */
++ else
++ disp_ipu = ipu_get_soc(0);
++}
++
++
++/*!
++ * csi ENC callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t csi_enc_callback(int irq, void *dev_id)
++{
++ cam_data *cam = (cam_data *) dev_id;
++
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, csi_buffer_num);
++ schedule_work(&cam->csi_work_struct);
++ csi_buffer_num = (csi_buffer_num == 0) ? 1 : 0;
++ return IRQ_HANDLED;
++}
++
++static int csi_enc_setup(cam_data *cam)
++{
++ ipu_channel_params_t params;
++ u32 pixel_fmt;
++ int err = 0, sensor_protocol = 0;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (!cam) {
++ printk(KERN_ERR "cam private is NULL\n");
++ return -ENXIO;
++ }
++
++ memset(&params, 0, sizeof(ipu_channel_params_t));
++ params.csi_mem.csi = cam->csi;
++
++ sensor_protocol = ipu_csi_get_sensor_protocol(cam->ipu, cam->csi);
++ switch (sensor_protocol) {
++ case IPU_CSI_CLK_MODE_GATED_CLK:
++ case IPU_CSI_CLK_MODE_NONGATED_CLK:
++ case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
++ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
++ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
++ params.csi_mem.interlaced = false;
++ break;
++ case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
++ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
++ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
++ params.csi_mem.interlaced = true;
++ break;
++ default:
++ printk(KERN_ERR "sensor protocol unsupported\n");
++ return -EINVAL;
++ }
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id) {
++ params.csi_mem.mipi_en = true;
++ params.csi_mem.mipi_vc =
++ mipi_csi2_get_virtual_channel(mipi_csi2_info);
++ params.csi_mem.mipi_id =
++ mipi_csi2_get_datatype(mipi_csi2_info);
++
++ mipi_csi2_pixelclk_enable(mipi_csi2_info);
++ } else {
++ params.csi_mem.mipi_en = false;
++ params.csi_mem.mipi_vc = 0;
++ params.csi_mem.mipi_id = 0;
++ }
++ } else {
++ params.csi_mem.mipi_en = false;
++ params.csi_mem.mipi_vc = 0;
++ params.csi_mem.mipi_id = 0;
++ }
++ }
++#endif
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ }
++ csi_mem_bufsize =
++ cam->crop_current.width * cam->crop_current.height * 2;
++ cam->vf_bufs_size[0] = PAGE_ALIGN(csi_mem_bufsize);
++ cam->vf_bufs_vaddr[0] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[0],
++ (dma_addr_t *) &
++ cam->vf_bufs[0],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[0] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_2;
++ }
++ cam->vf_bufs_size[1] = PAGE_ALIGN(csi_mem_bufsize);
++ cam->vf_bufs_vaddr[1] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[1],
++ (dma_addr_t *) &
++ cam->vf_bufs[1],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[1] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_1;
++ }
++ pr_debug("vf_bufs %x %x\n", cam->vf_bufs[0], cam->vf_bufs[1]);
++
++ err = ipu_init_channel(cam->ipu, CSI_MEM, &params);
++ if (err != 0) {
++ printk(KERN_ERR "ipu_init_channel %d\n", err);
++ goto out_1;
++ }
++
++ pixel_fmt = IPU_PIX_FMT_UYVY;
++ err = ipu_init_channel_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ pixel_fmt, cam->crop_current.width,
++ cam->crop_current.height,
++ cam->crop_current.width, IPU_ROTATE_NONE,
++ cam->vf_bufs[0], cam->vf_bufs[1], 0,
++ cam->offset.u_offset, cam->offset.u_offset);
++ if (err != 0) {
++ printk(KERN_ERR "CSI_MEM output buffer\n");
++ goto out_1;
++ }
++ err = ipu_enable_channel(cam->ipu, CSI_MEM);
++ if (err < 0) {
++ printk(KERN_ERR "ipu_enable_channel CSI_MEM\n");
++ goto out_1;
++ }
++
++ csi_buffer_num = 0;
++
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, 0);
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, 1);
++ return err;
++out_1:
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++out_2:
++ return err;
++}
++
++/*!
++ * Enable encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int csi_enc_enabling_tasks(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ ipu_clear_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF,
++ csi_enc_callback, 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error registering CSI0_OUT_EOF irq\n");
++ return err;
++ }
++
++ INIT_WORK(&cam->csi_work_struct, csi_buf_work_func);
++
++ err = csi_enc_setup(cam);
++ if (err != 0) {
++ printk(KERN_ERR "csi_enc_setup %d\n", err);
++ goto out1;
++ }
++
++ return err;
++out1:
++ ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
++ return err;
++}
++
++/*!
++ * bg_overlay_start - start the overlay task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int bg_overlay_start(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ if (!cam) {
++ printk(KERN_ERR "private is NULL\n");
++ return -EIO;
++ }
++
++ if (cam->overlay_active == true) {
++ pr_debug("already start.\n");
++ return 0;
++ }
++
++ get_disp_ipu(cam);
++
++ out_format = cam->v4l2_fb.fmt.pixelformat;
++ if (cam->v4l2_fb.fmt.pixelformat == IPU_PIX_FMT_BGR24) {
++ bpp = 3, csi_mem_bufsize = 3;
++ pr_info("BGR24\n");
++ } else if (cam->v4l2_fb.fmt.pixelformat == IPU_PIX_FMT_RGB565) {
++ bpp = 2, csi_mem_bufsize = 2;
++ pr_info("RGB565\n");
++ } else if (cam->v4l2_fb.fmt.pixelformat == IPU_PIX_FMT_BGR32) {
++ bpp = 4, csi_mem_bufsize = 4;
++ pr_info("BGR32\n");
++ } else {
++ printk(KERN_ERR
++ "unsupported fix format from the framebuffer.\n");
++ return -EINVAL;
++ }
++
++ offset = cam->v4l2_fb.fmt.bytesperline * cam->win.w.top +
++ csi_mem_bufsize * cam->win.w.left;
++
++ if (cam->v4l2_fb.base == 0)
++ printk(KERN_ERR "invalid frame buffer address.\n");
++ else
++ offset += (u32) cam->v4l2_fb.base;
++
++ csi_mem_bufsize = cam->win.w.width * cam->win.w.height
++ * csi_mem_bufsize;
++
++ err = csi_enc_enabling_tasks(cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error csi enc enable fail\n");
++ return err;
++ }
++
++ cam->overlay_active = true;
++ return err;
++}
++
++/*!
++ * bg_overlay_stop - stop the overlay task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int bg_overlay_stop(void *private)
++{
++ int err = 0;
++ cam_data *cam = (cam_data *) private;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (cam->overlay_active == false)
++ return 0;
++
++ err = ipu_disable_channel(cam->ipu, CSI_MEM, true);
++
++ ipu_uninit_channel(cam->ipu, CSI_MEM);
++
++ csi_buffer_num = 0;
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id)
++ mipi_csi2_pixelclk_disable(mipi_csi2_info);
++ }
++ }
++#endif
++
++ flush_work(&cam->csi_work_struct);
++ cancel_work_sync(&cam->csi_work_struct);
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0], cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1], cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++ if (cam->rot_vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->rot_vf_buf_size[0],
++ cam->rot_vf_bufs_vaddr[0],
++ cam->rot_vf_bufs[0]);
++ cam->rot_vf_bufs_vaddr[0] = NULL;
++ cam->rot_vf_bufs[0] = 0;
++ }
++ if (cam->rot_vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->rot_vf_buf_size[1],
++ cam->rot_vf_bufs_vaddr[1],
++ cam->rot_vf_bufs[1]);
++ cam->rot_vf_bufs_vaddr[1] = NULL;
++ cam->rot_vf_bufs[1] = 0;
++ }
++
++ cam->overlay_active = false;
++ return err;
++}
++
++/*!
++ * Enable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int bg_overlay_enable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ return ipu_enable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * Disable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int bg_overlay_disable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ /* free csi eof irq firstly.
++ * when disable csi, wait for idmac eof.
++ * it requests eof irq again */
++ ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
++
++ return ipu_disable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * function to select bg as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return status
++ */
++int bg_overlay_sdc_select(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ if (cam) {
++ cam->vf_start_sdc = bg_overlay_start;
++ cam->vf_stop_sdc = bg_overlay_stop;
++ cam->vf_enable_csi = bg_overlay_enable_csi;
++ cam->vf_disable_csi = bg_overlay_disable_csi;
++ cam->overlay_active = false;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(bg_overlay_sdc_select);
++
++/*!
++ * function to de-select bg as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return status
++ */
++int bg_overlay_sdc_deselect(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ if (cam) {
++ cam->vf_start_sdc = NULL;
++ cam->vf_stop_sdc = NULL;
++ cam->vf_enable_csi = NULL;
++ cam->vf_disable_csi = NULL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(bg_overlay_sdc_deselect);
++
++/*!
++ * Init background overlay task.
++ *
++ * @return Error code indicating success or failure
++ */
++__init int bg_overlay_sdc_init(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit background overlay task.
++ *
++ * @return Error code indicating success or failure
++ */
++void __exit bg_overlay_sdc_exit(void)
++{
++}
++
++module_init(bg_overlay_sdc_init);
++module_exit(bg_overlay_sdc_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IPU PRP VF SDC Backgroud Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_csi_enc.c linux-3.14.40/drivers/media/platform/mxc/capture/ipu_csi_enc.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_csi_enc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/ipu_csi_enc.c 2015-05-01 14:57:59.259427001 -0500
+@@ -0,0 +1,418 @@
++/*
++ * Copyright 2009-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_csi_enc.c
++ *
++ * @brief CSI Use case for video capture
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/dma-mapping.h>
++#include <linux/ipu.h>
++#include <linux/mipi_csi2.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++#ifdef CAMERA_DBG
++ #define CAMERA_TRACE(x) (printk)x
++#else
++ #define CAMERA_TRACE(x)
++#endif
++
++/*
++ * Function definitions
++ */
++
++/*!
++ * csi ENC callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t csi_enc_callback(int irq, void *dev_id)
++{
++ cam_data *cam = (cam_data *) dev_id;
++
++ if (cam->enc_callback == NULL)
++ return IRQ_HANDLED;
++
++ cam->enc_callback(irq, dev_id);
++ return IRQ_HANDLED;
++}
++
++/*!
++ * CSI ENC enable channel setup function
++ *
++ * @param cam struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int csi_enc_setup(cam_data *cam)
++{
++ ipu_channel_params_t params;
++ u32 pixel_fmt;
++ int err = 0, sensor_protocol = 0;
++ dma_addr_t dummy = cam->dummy_frame.buffer.m.offset;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ CAMERA_TRACE("In csi_enc_setup\n");
++ if (!cam) {
++ printk(KERN_ERR "cam private is NULL\n");
++ return -ENXIO;
++ }
++
++ memset(&params, 0, sizeof(ipu_channel_params_t));
++ params.csi_mem.csi = cam->csi;
++
++ sensor_protocol = ipu_csi_get_sensor_protocol(cam->ipu, cam->csi);
++ switch (sensor_protocol) {
++ case IPU_CSI_CLK_MODE_GATED_CLK:
++ case IPU_CSI_CLK_MODE_NONGATED_CLK:
++ case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
++ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
++ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
++ params.csi_mem.interlaced = false;
++ break;
++ case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
++ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
++ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
++ params.csi_mem.interlaced = true;
++ break;
++ default:
++ printk(KERN_ERR "sensor protocol unsupported\n");
++ return -EINVAL;
++ }
++
++ if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
++ pixel_fmt = IPU_PIX_FMT_YUV420P;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YVU420)
++ pixel_fmt = IPU_PIX_FMT_YVU420P;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P)
++ pixel_fmt = IPU_PIX_FMT_YUV422P;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
++ pixel_fmt = IPU_PIX_FMT_UYVY;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
++ pixel_fmt = IPU_PIX_FMT_YUYV;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12)
++ pixel_fmt = IPU_PIX_FMT_NV12;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24)
++ pixel_fmt = IPU_PIX_FMT_BGR24;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24)
++ pixel_fmt = IPU_PIX_FMT_RGB24;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565)
++ pixel_fmt = IPU_PIX_FMT_RGB565;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32)
++ pixel_fmt = IPU_PIX_FMT_BGR32;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32)
++ pixel_fmt = IPU_PIX_FMT_RGB32;
++ else {
++ printk(KERN_ERR "format not supported\n");
++ return -EINVAL;
++ }
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id) {
++ params.csi_mem.mipi_en = true;
++ params.csi_mem.mipi_vc =
++ mipi_csi2_get_virtual_channel(mipi_csi2_info);
++ params.csi_mem.mipi_id =
++ mipi_csi2_get_datatype(mipi_csi2_info);
++
++ mipi_csi2_pixelclk_enable(mipi_csi2_info);
++ } else {
++ params.csi_mem.mipi_en = false;
++ params.csi_mem.mipi_vc = 0;
++ params.csi_mem.mipi_id = 0;
++ }
++ } else {
++ params.csi_mem.mipi_en = false;
++ params.csi_mem.mipi_vc = 0;
++ params.csi_mem.mipi_id = 0;
++ }
++ }
++#endif
++
++ err = ipu_init_channel(cam->ipu, CSI_MEM, &params);
++ if (err != 0) {
++ printk(KERN_ERR "ipu_init_channel %d\n", err);
++ return err;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ pixel_fmt, cam->v2f.fmt.pix.width,
++ cam->v2f.fmt.pix.height,
++ cam->v2f.fmt.pix.bytesperline,
++ IPU_ROTATE_NONE,
++ dummy, dummy, 0,
++ cam->offset.u_offset,
++ cam->offset.v_offset);
++ if (err != 0) {
++ printk(KERN_ERR "CSI_MEM output buffer\n");
++ return err;
++ }
++ err = ipu_enable_channel(cam->ipu, CSI_MEM);
++ if (err < 0) {
++ printk(KERN_ERR "ipu_enable_channel CSI_MEM\n");
++ return err;
++ }
++
++ return err;
++}
++
++/*!
++ * function to update physical buffer address for encorder IDMA channel
++ *
++ * @param eba physical buffer address for encorder IDMA channel
++ * @param buffer_num int buffer 0 or buffer 1
++ *
++ * @return status
++ */
++static int csi_enc_eba_update(struct ipu_soc *ipu, dma_addr_t eba,
++ int *buffer_num)
++{
++ int err = 0;
++
++ pr_debug("eba %x\n", eba);
++ err = ipu_update_channel_buffer(ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ *buffer_num, eba);
++ if (err != 0) {
++ ipu_clear_buffer_ready(ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ *buffer_num);
++
++ err = ipu_update_channel_buffer(ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ *buffer_num, eba);
++ if (err != 0) {
++ pr_err("ERROR: v4l2 capture: fail to update "
++ "buf%d\n", *buffer_num);
++ return err;
++ }
++ }
++
++ ipu_select_buffer(ipu, CSI_MEM, IPU_OUTPUT_BUFFER, *buffer_num);
++
++ *buffer_num = (*buffer_num == 0) ? 1 : 0;
++
++ return 0;
++}
++
++/*!
++ * Enable encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int csi_enc_enabling_tasks(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++ CAMERA_TRACE("IPU:In csi_enc_enabling_tasks\n");
++
++ cam->dummy_frame.vaddress = dma_alloc_coherent(0,
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ &cam->dummy_frame.paddress,
++ GFP_DMA | GFP_KERNEL);
++ if (cam->dummy_frame.vaddress == 0) {
++ pr_err("ERROR: v4l2 capture: Allocate dummy frame "
++ "failed.\n");
++ return -ENOBUFS;
++ }
++ cam->dummy_frame.buffer.type = V4L2_BUF_TYPE_PRIVATE;
++ cam->dummy_frame.buffer.length =
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
++ cam->dummy_frame.buffer.m.offset = cam->dummy_frame.paddress;
++
++ ipu_clear_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF,
++ csi_enc_callback, 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error registering rot irq\n");
++ return err;
++ }
++
++ err = csi_enc_setup(cam);
++ if (err != 0) {
++ printk(KERN_ERR "csi_enc_setup %d\n", err);
++ return err;
++ }
++
++ return err;
++}
++
++/*!
++ * Disable encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return int
++ */
++static int csi_enc_disabling_tasks(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ err = ipu_disable_channel(cam->ipu, CSI_MEM, true);
++
++ ipu_uninit_channel(cam->ipu, CSI_MEM);
++
++ if (cam->dummy_frame.vaddress != 0) {
++ dma_free_coherent(0, cam->dummy_frame.buffer.length,
++ cam->dummy_frame.vaddress,
++ cam->dummy_frame.paddress);
++ cam->dummy_frame.vaddress = 0;
++ }
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id)
++ mipi_csi2_pixelclk_disable(mipi_csi2_info);
++ }
++ }
++#endif
++
++ return err;
++}
++
++/*!
++ * Enable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int csi_enc_enable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ return ipu_enable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * Disable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int csi_enc_disable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ /* free csi eof irq firstly.
++ * when disable csi, wait for idmac eof.
++ * it requests eof irq again */
++ ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
++
++ return ipu_disable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * function to select CSI ENC as the working path
++ *
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return int
++ */
++int csi_enc_select(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ if (cam) {
++ cam->enc_update_eba = csi_enc_eba_update;
++ cam->enc_enable = csi_enc_enabling_tasks;
++ cam->enc_disable = csi_enc_disabling_tasks;
++ cam->enc_enable_csi = csi_enc_enable_csi;
++ cam->enc_disable_csi = csi_enc_disable_csi;
++ } else {
++ err = -EIO;
++ }
++
++ return err;
++}
++EXPORT_SYMBOL(csi_enc_select);
++
++/*!
++ * function to de-select CSI ENC as the working path
++ *
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return int
++ */
++int csi_enc_deselect(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ if (cam) {
++ cam->enc_update_eba = NULL;
++ cam->enc_enable = NULL;
++ cam->enc_disable = NULL;
++ cam->enc_enable_csi = NULL;
++ cam->enc_disable_csi = NULL;
++ }
++
++ return err;
++}
++EXPORT_SYMBOL(csi_enc_deselect);
++
++/*!
++ * Init the Encorder channels
++ *
++ * @return Error code indicating success or failure
++ */
++__init int csi_enc_init(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit the Encorder channels
++ *
++ */
++void __exit csi_enc_exit(void)
++{
++}
++
++module_init(csi_enc_init);
++module_exit(csi_enc_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("CSI ENC Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_fg_overlay_sdc.c linux-3.14.40/drivers/media/platform/mxc/capture/ipu_fg_overlay_sdc.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_fg_overlay_sdc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/ipu_fg_overlay_sdc.c 2015-05-01 14:57:59.259427001 -0500
+@@ -0,0 +1,634 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++/* * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_foreground_sdc.c
++ *
++ * @brief IPU Use case for PRP-VF
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/module.h>
++#include <linux/dma-mapping.h>
++#include <linux/console.h>
++#include <linux/ipu.h>
++#include <linux/mxcfb.h>
++#include <linux/mipi_csi2.h>
++
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++#ifdef CAMERA_DBG
++ #define CAMERA_TRACE(x) (printk)x
++#else
++ #define CAMERA_TRACE(x)
++#endif
++
++static int csi_buffer_num, buffer_num;
++static u32 csi_mem_bufsize;
++static struct ipu_soc *disp_ipu;
++static struct fb_info *fbi;
++static struct fb_var_screeninfo fbvar;
++static u32 vf_out_format;
++static void csi_buf_work_func(struct work_struct *work)
++{
++ int err = 0;
++ cam_data *cam =
++ container_of(work, struct _cam_data, csi_work_struct);
++
++ struct ipu_task task;
++ memset(&task, 0, sizeof(task));
++
++ if (csi_buffer_num)
++ task.input.paddr = cam->vf_bufs[0];
++ else
++ task.input.paddr = cam->vf_bufs[1];
++ task.input.width = cam->crop_current.width;
++ task.input.height = cam->crop_current.height;
++ task.input.format = IPU_PIX_FMT_NV12;
++
++ if (buffer_num == 0)
++ task.output.paddr = fbi->fix.smem_start +
++ (fbi->fix.line_length * fbvar.yres);
++ else
++ task.output.paddr = fbi->fix.smem_start;
++ task.output.width = cam->win.w.width;
++ task.output.height = cam->win.w.height;
++ task.output.format = vf_out_format;
++ task.output.rotate = cam->rotation;
++again:
++ err = ipu_check_task(&task);
++ if (err != IPU_CHECK_OK) {
++ if (err > IPU_CHECK_ERR_MIN) {
++ if (err == IPU_CHECK_ERR_SPLIT_INPUTW_OVER) {
++ task.input.crop.w -= 8;
++ goto again;
++ }
++ if (err == IPU_CHECK_ERR_SPLIT_INPUTH_OVER) {
++ task.input.crop.h -= 8;
++ goto again;
++ }
++ if (err == IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER) {
++ task.output.width -= 8;
++ task.output.crop.w = task.output.width;
++ goto again;
++ }
++ if (err == IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER) {
++ task.output.height -= 8;
++ task.output.crop.h = task.output.height;
++ goto again;
++ }
++ printk(KERN_ERR "check ipu taks fail\n");
++ return;
++ }
++ printk(KERN_ERR "check ipu taks fail\n");
++ return;
++ }
++ err = ipu_queue_task(&task);
++ if (err < 0)
++ printk(KERN_ERR "queue ipu task error\n");
++ ipu_select_buffer(disp_ipu, MEM_FG_SYNC, IPU_INPUT_BUFFER, buffer_num);
++ buffer_num = (buffer_num == 0) ? 1 : 0;
++}
++
++static void get_disp_ipu(cam_data *cam)
++{
++ if (cam->output > 2)
++ disp_ipu = ipu_get_soc(1); /* using DISP4 */
++ else
++ disp_ipu = ipu_get_soc(0);
++}
++
++/*!
++ * csi ENC callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t csi_enc_callback(int irq, void *dev_id)
++{
++ cam_data *cam = (cam_data *) dev_id;
++
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, csi_buffer_num);
++ if ((cam->crop_current.width != cam->win.w.width) ||
++ (cam->crop_current.height != cam->win.w.height) ||
++ (vf_out_format != IPU_PIX_FMT_NV12) ||
++ (cam->rotation >= IPU_ROTATE_VERT_FLIP))
++ schedule_work(&cam->csi_work_struct);
++ csi_buffer_num = (csi_buffer_num == 0) ? 1 : 0;
++ return IRQ_HANDLED;
++}
++
++static int csi_enc_setup(cam_data *cam)
++{
++ ipu_channel_params_t params;
++ int err = 0, sensor_protocol = 0;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ CAMERA_TRACE("In csi_enc_setup\n");
++ if (!cam) {
++ printk(KERN_ERR "cam private is NULL\n");
++ return -ENXIO;
++ }
++
++ memset(&params, 0, sizeof(ipu_channel_params_t));
++ params.csi_mem.csi = cam->csi;
++
++ sensor_protocol = ipu_csi_get_sensor_protocol(cam->ipu, cam->csi);
++ switch (sensor_protocol) {
++ case IPU_CSI_CLK_MODE_GATED_CLK:
++ case IPU_CSI_CLK_MODE_NONGATED_CLK:
++ case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
++ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
++ case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
++ params.csi_mem.interlaced = false;
++ break;
++ case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
++ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
++ case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
++ params.csi_mem.interlaced = true;
++ break;
++ default:
++ printk(KERN_ERR "sensor protocol unsupported\n");
++ return -EINVAL;
++ }
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id) {
++ params.csi_mem.mipi_en = true;
++ params.csi_mem.mipi_vc =
++ mipi_csi2_get_virtual_channel(mipi_csi2_info);
++ params.csi_mem.mipi_id =
++ mipi_csi2_get_datatype(mipi_csi2_info);
++
++ mipi_csi2_pixelclk_enable(mipi_csi2_info);
++ } else {
++ params.csi_mem.mipi_en = false;
++ params.csi_mem.mipi_vc = 0;
++ params.csi_mem.mipi_id = 0;
++ }
++ } else {
++ params.csi_mem.mipi_en = false;
++ params.csi_mem.mipi_vc = 0;
++ params.csi_mem.mipi_id = 0;
++ }
++ }
++#endif
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ }
++ csi_mem_bufsize = cam->crop_current.width *
++ cam->crop_current.height * 3/2;
++ cam->vf_bufs_size[0] = PAGE_ALIGN(csi_mem_bufsize);
++ cam->vf_bufs_vaddr[0] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[0],
++ (dma_addr_t *) &
++ cam->vf_bufs[0],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[0] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_2;
++ }
++ cam->vf_bufs_size[1] = PAGE_ALIGN(csi_mem_bufsize);
++ cam->vf_bufs_vaddr[1] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[1],
++ (dma_addr_t *) &
++ cam->vf_bufs[1],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[1] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_1;
++ }
++ pr_debug("vf_bufs %x %x\n", cam->vf_bufs[0], cam->vf_bufs[1]);
++
++ err = ipu_init_channel(cam->ipu, CSI_MEM, &params);
++ if (err != 0) {
++ printk(KERN_ERR "ipu_init_channel %d\n", err);
++ goto out_1;
++ }
++
++ if ((cam->crop_current.width == cam->win.w.width) &&
++ (cam->crop_current.height == cam->win.w.height) &&
++ (vf_out_format == IPU_PIX_FMT_NV12) &&
++ (cam->rotation < IPU_ROTATE_VERT_FLIP)) {
++ err = ipu_init_channel_buffer(cam->ipu, CSI_MEM,
++ IPU_OUTPUT_BUFFER,
++ IPU_PIX_FMT_NV12,
++ cam->crop_current.width,
++ cam->crop_current.height,
++ cam->crop_current.width, IPU_ROTATE_NONE,
++ fbi->fix.smem_start +
++ (fbi->fix.line_length * fbvar.yres),
++ fbi->fix.smem_start, 0,
++ cam->offset.u_offset, cam->offset.u_offset);
++ } else {
++ err = ipu_init_channel_buffer(cam->ipu, CSI_MEM,
++ IPU_OUTPUT_BUFFER,
++ IPU_PIX_FMT_NV12,
++ cam->crop_current.width,
++ cam->crop_current.height,
++ cam->crop_current.width, IPU_ROTATE_NONE,
++ cam->vf_bufs[0], cam->vf_bufs[1], 0,
++ cam->offset.u_offset, cam->offset.u_offset);
++ }
++ if (err != 0) {
++ printk(KERN_ERR "CSI_MEM output buffer\n");
++ goto out_1;
++ }
++ err = ipu_enable_channel(cam->ipu, CSI_MEM);
++ if (err < 0) {
++ printk(KERN_ERR "ipu_enable_channel CSI_MEM\n");
++ goto out_1;
++ }
++
++ csi_buffer_num = 0;
++
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, 0);
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, 1);
++ return err;
++out_1:
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++out_2:
++ return err;
++}
++
++/*!
++ * Enable encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int csi_enc_enabling_tasks(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++ CAMERA_TRACE("IPU:In csi_enc_enabling_tasks\n");
++
++ ipu_clear_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF,
++ csi_enc_callback, 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error registering CSI0_OUT_EOF irq\n");
++ return err;
++ }
++
++ INIT_WORK(&cam->csi_work_struct, csi_buf_work_func);
++
++ err = csi_enc_setup(cam);
++ if (err != 0) {
++ printk(KERN_ERR "csi_enc_setup %d\n", err);
++ goto out1;
++ }
++
++ return err;
++out1:
++ ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
++ return err;
++}
++
++/*
++ * Function definitions
++ */
++
++/*!
++ * foreground_start - start the vf task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int foreground_start(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0, i = 0, screen_size;
++ char *base;
++
++ if (!cam) {
++ printk(KERN_ERR "private is NULL\n");
++ return -EIO;
++ }
++
++ if (cam->overlay_active == true) {
++ pr_debug("already started.\n");
++ return 0;
++ }
++
++ get_disp_ipu(cam);
++
++ for (i = 0; i < num_registered_fb; i++) {
++ char *idstr = registered_fb[i]->fix.id;
++ if (((strcmp(idstr, "DISP3 FG") == 0) && (cam->output < 3)) ||
++ ((strcmp(idstr, "DISP4 FG") == 0) && (cam->output >= 3))) {
++ fbi = registered_fb[i];
++ break;
++ }
++ }
++
++ if (fbi == NULL) {
++ printk(KERN_ERR "DISP FG fb not found\n");
++ return -EPERM;
++ }
++
++ fbvar = fbi->var;
++
++ /* Store the overlay frame buffer's original std */
++ cam->fb_origin_std = fbvar.nonstd;
++
++ if (cam->devtype == IMX5_V4L2 || cam->devtype == IMX6_V4L2) {
++ /* Use DP to do CSC so that we can get better performance */
++ vf_out_format = IPU_PIX_FMT_NV12;
++ fbvar.nonstd = vf_out_format;
++ } else {
++ vf_out_format = IPU_PIX_FMT_RGB565;
++ fbvar.nonstd = 0;
++ }
++
++ fbvar.bits_per_pixel = 16;
++ fbvar.xres = fbvar.xres_virtual = cam->win.w.width;
++ fbvar.yres = cam->win.w.height;
++ fbvar.yres_virtual = cam->win.w.height * 2;
++ fbvar.yoffset = 0;
++ fbvar.vmode &= ~FB_VMODE_YWRAP;
++ fbvar.accel_flags = FB_ACCEL_DOUBLE_FLAG;
++ fbvar.activate |= FB_ACTIVATE_FORCE;
++ fb_set_var(fbi, &fbvar);
++
++ ipu_disp_set_window_pos(disp_ipu, MEM_FG_SYNC, cam->win.w.left,
++ cam->win.w.top);
++
++ /* Fill black color for framebuffer */
++ base = (char *) fbi->screen_base;
++ screen_size = fbi->var.xres * fbi->var.yres;
++ if (cam->devtype == IMX5_V4L2 || cam->devtype == IMX6_V4L2) {
++ memset(base, 0, screen_size);
++ base += screen_size;
++ for (i = 0; i < screen_size / 2; i++, base++)
++ *base = 0x80;
++ } else {
++ for (i = 0; i < screen_size * 2; i++, base++)
++ *base = 0x00;
++ }
++
++ console_lock();
++ fb_blank(fbi, FB_BLANK_UNBLANK);
++ console_unlock();
++
++ /* correct display ch buffer address */
++ ipu_update_channel_buffer(disp_ipu, MEM_FG_SYNC, IPU_INPUT_BUFFER,
++ 0, fbi->fix.smem_start +
++ (fbi->fix.line_length * fbvar.yres));
++ ipu_update_channel_buffer(disp_ipu, MEM_FG_SYNC, IPU_INPUT_BUFFER,
++ 1, fbi->fix.smem_start);
++
++ err = csi_enc_enabling_tasks(cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error csi enc enable fail\n");
++ return err;
++ }
++
++ cam->overlay_active = true;
++ return err;
++
++}
++
++/*!
++ * foreground_stop - stop the vf task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int foreground_stop(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0, i = 0;
++ struct fb_info *fbi = NULL;
++ struct fb_var_screeninfo fbvar;
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (cam->overlay_active == false)
++ return 0;
++
++ err = ipu_disable_channel(cam->ipu, CSI_MEM, true);
++
++ ipu_uninit_channel(cam->ipu, CSI_MEM);
++
++ csi_buffer_num = 0;
++ buffer_num = 0;
++
++ for (i = 0; i < num_registered_fb; i++) {
++ char *idstr = registered_fb[i]->fix.id;
++ if (((strcmp(idstr, "DISP3 FG") == 0) && (cam->output < 3)) ||
++ ((strcmp(idstr, "DISP4 FG") == 0) && (cam->output >= 3))) {
++ fbi = registered_fb[i];
++ break;
++ }
++ }
++
++ if (fbi == NULL) {
++ printk(KERN_ERR "DISP FG fb not found\n");
++ return -EPERM;
++ }
++
++ console_lock();
++ fb_blank(fbi, FB_BLANK_POWERDOWN);
++ console_unlock();
++
++ /* Set the overlay frame buffer std to what it is used to be */
++ fbvar = fbi->var;
++ fbvar.accel_flags = FB_ACCEL_TRIPLE_FLAG;
++ fbvar.nonstd = cam->fb_origin_std;
++ fbvar.activate |= FB_ACTIVATE_FORCE;
++ fb_set_var(fbi, &fbvar);
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id)
++ mipi_csi2_pixelclk_disable(mipi_csi2_info);
++ }
++ }
++#endif
++
++ flush_work(&cam->csi_work_struct);
++ cancel_work_sync(&cam->csi_work_struct);
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++
++ cam->overlay_active = false;
++ return err;
++}
++
++/*!
++ * Enable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int foreground_enable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ return ipu_enable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * Disable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int foreground_disable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ /* free csi eof irq firstly.
++ * when disable csi, wait for idmac eof.
++ * it requests eof irq again */
++ ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
++
++ return ipu_disable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * function to select foreground as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return status
++ */
++int foreground_sdc_select(void *private)
++{
++ cam_data *cam;
++ int err = 0;
++ if (private) {
++ cam = (cam_data *) private;
++ cam->vf_start_sdc = foreground_start;
++ cam->vf_stop_sdc = foreground_stop;
++ cam->vf_enable_csi = foreground_enable_csi;
++ cam->vf_disable_csi = foreground_disable_csi;
++ cam->overlay_active = false;
++ } else
++ err = -EIO;
++
++ return err;
++}
++EXPORT_SYMBOL(foreground_sdc_select);
++
++/*!
++ * function to de-select foreground as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return int
++ */
++int foreground_sdc_deselect(void *private)
++{
++ cam_data *cam;
++
++ if (private) {
++ cam = (cam_data *) private;
++ cam->vf_start_sdc = NULL;
++ cam->vf_stop_sdc = NULL;
++ cam->vf_enable_csi = NULL;
++ cam->vf_disable_csi = NULL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(foreground_sdc_deselect);
++
++/*!
++ * Init viewfinder task.
++ *
++ * @return Error code indicating success or failure
++ */
++__init int foreground_sdc_init(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit viewfinder task.
++ *
++ * @return Error code indicating success or failure
++ */
++void __exit foreground_sdc_exit(void)
++{
++}
++
++module_init(foreground_sdc_init);
++module_exit(foreground_sdc_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IPU PRP VF SDC Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_prp_enc.c linux-3.14.40/drivers/media/platform/mxc/capture/ipu_prp_enc.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_prp_enc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/ipu_prp_enc.c 2015-05-01 14:57:59.259427001 -0500
+@@ -0,0 +1,595 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_prp_enc.c
++ *
++ * @brief IPU Use case for PRP-ENC
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/module.h>
++#include <linux/dma-mapping.h>
++#include <linux/platform_device.h>
++#include <linux/ipu.h>
++#include <linux/mipi_csi2.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++#ifdef CAMERA_DBG
++ #define CAMERA_TRACE(x) (printk)x
++#else
++ #define CAMERA_TRACE(x)
++#endif
++
++static ipu_rotate_mode_t grotation = IPU_ROTATE_NONE;
++
++/*
++ * Function definitions
++ */
++
++/*!
++ * IPU ENC callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t prp_enc_callback(int irq, void *dev_id)
++{
++ cam_data *cam = (cam_data *) dev_id;
++
++ if (cam->enc_callback == NULL)
++ return IRQ_HANDLED;
++
++ cam->enc_callback(irq, dev_id);
++
++ return IRQ_HANDLED;
++}
++
++/*!
++ * PrpENC enable channel setup function
++ *
++ * @param cam struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_enc_setup(cam_data *cam)
++{
++ ipu_channel_params_t enc;
++ int err = 0;
++ dma_addr_t dummy = cam->dummy_frame.buffer.m.offset;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ CAMERA_TRACE("In prp_enc_setup\n");
++ if (!cam) {
++ printk(KERN_ERR "cam private is NULL\n");
++ return -ENXIO;
++ }
++ memset(&enc, 0, sizeof(ipu_channel_params_t));
++
++ ipu_csi_get_window_size(cam->ipu, &enc.csi_prp_enc_mem.in_width,
++ &enc.csi_prp_enc_mem.in_height, cam->csi);
++
++ enc.csi_prp_enc_mem.in_pixel_fmt = IPU_PIX_FMT_UYVY;
++ enc.csi_prp_enc_mem.out_width = cam->v2f.fmt.pix.width;
++ enc.csi_prp_enc_mem.out_height = cam->v2f.fmt.pix.height;
++ enc.csi_prp_enc_mem.csi = cam->csi;
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
++ enc.csi_prp_enc_mem.out_width = cam->v2f.fmt.pix.height;
++ enc.csi_prp_enc_mem.out_height = cam->v2f.fmt.pix.width;
++ }
++
++ if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUV420P;
++ pr_info("YUV420\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YVU420) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YVU420P;
++ pr_info("YVU420\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUV422P;
++ pr_info("YUV422P\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUYV;
++ pr_info("YUYV\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_UYVY;
++ pr_info("UYVY\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_NV12;
++ pr_info("NV12\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_BGR24;
++ pr_info("BGR24\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB24;
++ pr_info("RGB24\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB565;
++ pr_info("RGB565\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_BGR32;
++ pr_info("BGR32\n");
++ } else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32) {
++ enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB32;
++ pr_info("RGB32\n");
++ } else {
++ printk(KERN_ERR "format not supported\n");
++ return -EINVAL;
++ }
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id) {
++ enc.csi_prp_enc_mem.mipi_en = true;
++ enc.csi_prp_enc_mem.mipi_vc =
++ mipi_csi2_get_virtual_channel(mipi_csi2_info);
++ enc.csi_prp_enc_mem.mipi_id =
++ mipi_csi2_get_datatype(mipi_csi2_info);
++
++ mipi_csi2_pixelclk_enable(mipi_csi2_info);
++ } else {
++ enc.csi_prp_enc_mem.mipi_en = false;
++ enc.csi_prp_enc_mem.mipi_vc = 0;
++ enc.csi_prp_enc_mem.mipi_id = 0;
++ }
++ } else {
++ enc.csi_prp_enc_mem.mipi_en = false;
++ enc.csi_prp_enc_mem.mipi_vc = 0;
++ enc.csi_prp_enc_mem.mipi_id = 0;
++ }
++ }
++#endif
++
++ err = ipu_init_channel(cam->ipu, CSI_PRP_ENC_MEM, &enc);
++ if (err != 0) {
++ printk(KERN_ERR "ipu_init_channel %d\n", err);
++ return err;
++ }
++
++ grotation = cam->rotation;
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
++ if (cam->rot_enc_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->rot_enc_buf_size[0],
++ cam->rot_enc_bufs_vaddr[0],
++ cam->rot_enc_bufs[0]);
++ }
++ if (cam->rot_enc_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->rot_enc_buf_size[1],
++ cam->rot_enc_bufs_vaddr[1],
++ cam->rot_enc_bufs[1]);
++ }
++ cam->rot_enc_buf_size[0] =
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
++ cam->rot_enc_bufs_vaddr[0] =
++ (void *)dma_alloc_coherent(0, cam->rot_enc_buf_size[0],
++ &cam->rot_enc_bufs[0],
++ GFP_DMA | GFP_KERNEL);
++ if (!cam->rot_enc_bufs_vaddr[0]) {
++ printk(KERN_ERR "alloc enc_bufs0\n");
++ return -ENOMEM;
++ }
++ cam->rot_enc_buf_size[1] =
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
++ cam->rot_enc_bufs_vaddr[1] =
++ (void *)dma_alloc_coherent(0, cam->rot_enc_buf_size[1],
++ &cam->rot_enc_bufs[1],
++ GFP_DMA | GFP_KERNEL);
++ if (!cam->rot_enc_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->rot_enc_buf_size[0],
++ cam->rot_enc_bufs_vaddr[0],
++ cam->rot_enc_bufs[0]);
++ cam->rot_enc_bufs_vaddr[0] = NULL;
++ cam->rot_enc_bufs[0] = 0;
++ printk(KERN_ERR "alloc enc_bufs1\n");
++ return -ENOMEM;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ enc.csi_prp_enc_mem.out_pixel_fmt,
++ enc.csi_prp_enc_mem.out_width,
++ enc.csi_prp_enc_mem.out_height,
++ enc.csi_prp_enc_mem.out_width,
++ IPU_ROTATE_NONE,
++ cam->rot_enc_bufs[0],
++ cam->rot_enc_bufs[1], 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "CSI_PRP_ENC_MEM err\n");
++ return err;
++ }
++
++ err = ipu_init_channel(cam->ipu, MEM_ROT_ENC_MEM, NULL);
++ if (err != 0) {
++ printk(KERN_ERR "MEM_ROT_ENC_MEM channel err\n");
++ return err;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_ENC_MEM,
++ IPU_INPUT_BUFFER,
++ enc.csi_prp_enc_mem.out_pixel_fmt,
++ enc.csi_prp_enc_mem.out_width,
++ enc.csi_prp_enc_mem.out_height,
++ enc.csi_prp_enc_mem.out_width,
++ cam->rotation,
++ cam->rot_enc_bufs[0],
++ cam->rot_enc_bufs[1], 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "MEM_ROT_ENC_MEM input buffer\n");
++ return err;
++ }
++
++ err =
++ ipu_init_channel_buffer(cam->ipu, MEM_ROT_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ enc.csi_prp_enc_mem.out_pixel_fmt,
++ enc.csi_prp_enc_mem.out_height,
++ enc.csi_prp_enc_mem.out_width,
++ cam->v2f.fmt.pix.bytesperline /
++ bytes_per_pixel(enc.csi_prp_enc_mem.
++ out_pixel_fmt),
++ IPU_ROTATE_NONE,
++ dummy, dummy, 0,
++ cam->offset.u_offset,
++ cam->offset.v_offset);
++ if (err != 0) {
++ printk(KERN_ERR "MEM_ROT_ENC_MEM output buffer\n");
++ return err;
++ }
++
++ err = ipu_link_channels(cam->ipu,
++ CSI_PRP_ENC_MEM, MEM_ROT_ENC_MEM);
++ if (err < 0) {
++ printk(KERN_ERR
++ "link CSI_PRP_ENC_MEM-MEM_ROT_ENC_MEM\n");
++ return err;
++ }
++
++ err = ipu_enable_channel(cam->ipu, CSI_PRP_ENC_MEM);
++ if (err < 0) {
++ printk(KERN_ERR "ipu_enable_channel CSI_PRP_ENC_MEM\n");
++ return err;
++ }
++ err = ipu_enable_channel(cam->ipu, MEM_ROT_ENC_MEM);
++ if (err < 0) {
++ printk(KERN_ERR "ipu_enable_channel MEM_ROT_ENC_MEM\n");
++ return err;
++ }
++
++ ipu_select_buffer(cam->ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER, 0);
++ ipu_select_buffer(cam->ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER, 1);
++ } else {
++ err =
++ ipu_init_channel_buffer(cam->ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ enc.csi_prp_enc_mem.out_pixel_fmt,
++ enc.csi_prp_enc_mem.out_width,
++ enc.csi_prp_enc_mem.out_height,
++ cam->v2f.fmt.pix.bytesperline /
++ bytes_per_pixel(enc.csi_prp_enc_mem.
++ out_pixel_fmt),
++ cam->rotation,
++ dummy, dummy, 0,
++ cam->offset.u_offset,
++ cam->offset.v_offset);
++ if (err != 0) {
++ printk(KERN_ERR "CSI_PRP_ENC_MEM output buffer\n");
++ return err;
++ }
++ err = ipu_enable_channel(cam->ipu, CSI_PRP_ENC_MEM);
++ if (err < 0) {
++ printk(KERN_ERR "ipu_enable_channel CSI_PRP_ENC_MEM\n");
++ return err;
++ }
++ }
++
++ return err;
++}
++
++/*!
++ * function to update physical buffer address for encorder IDMA channel
++ *
++ * @param eba physical buffer address for encorder IDMA channel
++ * @param buffer_num int buffer 0 or buffer 1
++ *
++ * @return status
++ */
++static int prp_enc_eba_update(struct ipu_soc *ipu, dma_addr_t eba,
++ int *buffer_num)
++{
++ int err = 0;
++
++ pr_debug("eba %x\n", eba);
++ if (grotation >= IPU_ROTATE_90_RIGHT) {
++ err = ipu_update_channel_buffer(ipu, MEM_ROT_ENC_MEM,
++ IPU_OUTPUT_BUFFER, *buffer_num,
++ eba);
++ } else {
++ err = ipu_update_channel_buffer(ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER, *buffer_num,
++ eba);
++ }
++ if (err != 0) {
++ if (grotation >= IPU_ROTATE_90_RIGHT) {
++ ipu_clear_buffer_ready(ipu, MEM_ROT_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ *buffer_num);
++ err = ipu_update_channel_buffer(ipu, MEM_ROT_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ *buffer_num,
++ eba);
++ } else {
++ ipu_clear_buffer_ready(ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ *buffer_num);
++ err = ipu_update_channel_buffer(ipu, CSI_PRP_ENC_MEM,
++ IPU_OUTPUT_BUFFER,
++ *buffer_num,
++ eba);
++ }
++
++ if (err != 0) {
++ pr_err("ERROR: v4l2 capture: fail to update "
++ "buf%d\n", *buffer_num);
++ return err;
++ }
++ }
++
++ if (grotation >= IPU_ROTATE_90_RIGHT) {
++ ipu_select_buffer(ipu, MEM_ROT_ENC_MEM, IPU_OUTPUT_BUFFER,
++ *buffer_num);
++ } else {
++ ipu_select_buffer(ipu, CSI_PRP_ENC_MEM, IPU_OUTPUT_BUFFER,
++ *buffer_num);
++ }
++
++ *buffer_num = (*buffer_num == 0) ? 1 : 0;
++ return 0;
++}
++
++/*!
++ * Enable encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_enc_enabling_tasks(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++ CAMERA_TRACE("IPU:In prp_enc_enabling_tasks\n");
++
++ cam->dummy_frame.vaddress = dma_alloc_coherent(0,
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ &cam->dummy_frame.paddress,
++ GFP_DMA | GFP_KERNEL);
++ if (cam->dummy_frame.vaddress == 0) {
++ pr_err("ERROR: v4l2 capture: Allocate dummy frame "
++ "failed.\n");
++ return -ENOBUFS;
++ }
++ cam->dummy_frame.buffer.type = V4L2_BUF_TYPE_PRIVATE;
++ cam->dummy_frame.buffer.length =
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
++ cam->dummy_frame.buffer.m.offset = cam->dummy_frame.paddress;
++
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_PRP_ENC_ROT_OUT_EOF,
++ prp_enc_callback, 0, "Mxc Camera", cam);
++ } else {
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_PRP_ENC_OUT_EOF,
++ prp_enc_callback, 0, "Mxc Camera", cam);
++ }
++ if (err != 0) {
++ printk(KERN_ERR "Error registering rot irq\n");
++ return err;
++ }
++
++ err = prp_enc_setup(cam);
++ if (err != 0) {
++ printk(KERN_ERR "prp_enc_setup %d\n", err);
++ return err;
++ }
++
++ return err;
++}
++
++/*!
++ * Disable encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return int
++ */
++static int prp_enc_disabling_tasks(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_ENC_ROT_OUT_EOF, cam);
++ ipu_unlink_channels(cam->ipu, CSI_PRP_ENC_MEM, MEM_ROT_ENC_MEM);
++ }
++
++ err = ipu_disable_channel(cam->ipu, CSI_PRP_ENC_MEM, true);
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT)
++ err |= ipu_disable_channel(cam->ipu, MEM_ROT_ENC_MEM, true);
++
++ ipu_uninit_channel(cam->ipu, CSI_PRP_ENC_MEM);
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT)
++ ipu_uninit_channel(cam->ipu, MEM_ROT_ENC_MEM);
++
++ if (cam->dummy_frame.vaddress != 0) {
++ dma_free_coherent(0, cam->dummy_frame.buffer.length,
++ cam->dummy_frame.vaddress,
++ cam->dummy_frame.paddress);
++ cam->dummy_frame.vaddress = 0;
++ }
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id)
++ mipi_csi2_pixelclk_disable(mipi_csi2_info);
++ }
++ }
++#endif
++
++ return err;
++}
++
++/*!
++ * Enable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_enc_enable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ return ipu_enable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * Disable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_enc_disable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ /* free csi eof irq firstly.
++ * when disable csi, wait for idmac eof.
++ * it requests eof irq again */
++ if (cam->rotation < IPU_ROTATE_90_RIGHT)
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_ENC_OUT_EOF, cam);
++
++ return ipu_disable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * function to select PRP-ENC as the working path
++ *
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return int
++ */
++int prp_enc_select(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ if (cam) {
++ cam->enc_update_eba = prp_enc_eba_update;
++ cam->enc_enable = prp_enc_enabling_tasks;
++ cam->enc_disable = prp_enc_disabling_tasks;
++ cam->enc_enable_csi = prp_enc_enable_csi;
++ cam->enc_disable_csi = prp_enc_disable_csi;
++ } else {
++ err = -EIO;
++ }
++
++ return err;
++}
++EXPORT_SYMBOL(prp_enc_select);
++
++/*!
++ * function to de-select PRP-ENC as the working path
++ *
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return int
++ */
++int prp_enc_deselect(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ if (cam) {
++ cam->enc_update_eba = NULL;
++ cam->enc_enable = NULL;
++ cam->enc_disable = NULL;
++ cam->enc_enable_csi = NULL;
++ cam->enc_disable_csi = NULL;
++ if (cam->rot_enc_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->rot_enc_buf_size[0],
++ cam->rot_enc_bufs_vaddr[0],
++ cam->rot_enc_bufs[0]);
++ cam->rot_enc_bufs_vaddr[0] = NULL;
++ cam->rot_enc_bufs[0] = 0;
++ }
++ if (cam->rot_enc_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->rot_enc_buf_size[1],
++ cam->rot_enc_bufs_vaddr[1],
++ cam->rot_enc_bufs[1]);
++ cam->rot_enc_bufs_vaddr[1] = NULL;
++ cam->rot_enc_bufs[1] = 0;
++ }
++ }
++
++ return err;
++}
++EXPORT_SYMBOL(prp_enc_deselect);
++
++/*!
++ * Init the Encorder channels
++ *
++ * @return Error code indicating success or failure
++ */
++__init int prp_enc_init(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit the Encorder channels
++ *
++ */
++void __exit prp_enc_exit(void)
++{
++}
++
++module_init(prp_enc_init);
++module_exit(prp_enc_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IPU PRP ENC Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_prp_sw.h linux-3.14.40/drivers/media/platform/mxc/capture/ipu_prp_sw.h
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_prp_sw.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/ipu_prp_sw.h 2015-05-01 14:57:59.259427001 -0500
+@@ -0,0 +1,43 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_prp_sw.h
++ *
++ * @brief This file contains the IPU PRP use case driver header.
++ *
++ * @ingroup IPU
++ */
++
++#ifndef _INCLUDE_IPU__PRP_SW_H_
++#define _INCLUDE_IPU__PRP_SW_H_
++
++int csi_enc_select(void *private);
++int csi_enc_deselect(void *private);
++int prp_enc_select(void *private);
++int prp_enc_deselect(void *private);
++#ifdef CONFIG_MXC_IPU_PRP_VF_SDC
++int prp_vf_sdc_select(void *private);
++int prp_vf_sdc_deselect(void *private);
++int prp_vf_sdc_select_bg(void *private);
++int prp_vf_sdc_deselect_bg(void *private);
++#else
++int foreground_sdc_select(void *private);
++int foreground_sdc_deselect(void *private);
++int bg_overlay_sdc_select(void *private);
++int bg_overlay_sdc_deselect(void *private);
++#endif
++int prp_still_select(void *private);
++int prp_still_deselect(void *private);
++
++#endif
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc_bg.c linux-3.14.40/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc_bg.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc_bg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc_bg.c 2015-05-01 14:57:59.259427001 -0500
+@@ -0,0 +1,521 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_prp_vf_sdc_bg.c
++ *
++ * @brief IPU Use case for PRP-VF back-ground
++ *
++ * @ingroup IPU
++ */
++#include <linux/dma-mapping.h>
++#include <linux/fb.h>
++#include <linux/ipu.h>
++#include <linux/module.h>
++#include <mach/mipi_csi2.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++static int buffer_num;
++static int buffer_ready;
++static struct ipu_soc *disp_ipu;
++
++static void get_disp_ipu(cam_data *cam)
++{
++ if (cam->output > 2)
++ disp_ipu = ipu_get_soc(1); /* using DISP4 */
++ else
++ disp_ipu = ipu_get_soc(0);
++}
++
++/*
++ * Function definitions
++ */
++
++/*!
++ * SDC V-Sync callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t prpvf_sdc_vsync_callback(int irq, void *dev_id)
++{
++ cam_data *cam = dev_id;
++ if (buffer_ready > 0) {
++ ipu_select_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_OUTPUT_BUFFER, 0);
++ buffer_ready--;
++ }
++
++ return IRQ_HANDLED;
++}
++
++/*!
++ * VF EOF callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t prpvf_vf_eof_callback(int irq, void *dev_id)
++{
++ cam_data *cam = dev_id;
++ pr_debug("buffer_ready %d buffer_num %d\n", buffer_ready, buffer_num);
++
++ ipu_select_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_INPUT_BUFFER, buffer_num);
++ buffer_num = (buffer_num == 0) ? 1 : 0;
++ ipu_select_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER, buffer_num);
++ buffer_ready++;
++ return IRQ_HANDLED;
++}
++
++/*!
++ * prpvf_start - start the vf task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int prpvf_start(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ ipu_channel_params_t vf;
++ u32 format;
++ u32 offset;
++ u32 bpp, size = 3;
++ int err = 0;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (!cam) {
++ printk(KERN_ERR "private is NULL\n");
++ return -EIO;
++ }
++
++ if (cam->overlay_active == true) {
++ pr_debug("already start.\n");
++ return 0;
++ }
++
++ get_disp_ipu(cam);
++
++ format = cam->v4l2_fb.fmt.pixelformat;
++ if (cam->v4l2_fb.fmt.pixelformat == IPU_PIX_FMT_BGR24) {
++ bpp = 3, size = 3;
++ pr_info("BGR24\n");
++ } else if (cam->v4l2_fb.fmt.pixelformat == IPU_PIX_FMT_RGB565) {
++ bpp = 2, size = 2;
++ pr_info("RGB565\n");
++ } else if (cam->v4l2_fb.fmt.pixelformat == IPU_PIX_FMT_BGR32) {
++ bpp = 4, size = 4;
++ pr_info("BGR32\n");
++ } else {
++ printk(KERN_ERR
++ "unsupported fix format from the framebuffer.\n");
++ return -EINVAL;
++ }
++
++ offset = cam->v4l2_fb.fmt.bytesperline * cam->win.w.top +
++ size * cam->win.w.left;
++
++ if (cam->v4l2_fb.base == 0)
++ printk(KERN_ERR "invalid frame buffer address.\n");
++ else
++ offset += (u32) cam->v4l2_fb.base;
++
++ memset(&vf, 0, sizeof(ipu_channel_params_t));
++ ipu_csi_get_window_size(cam->ipu, &vf.csi_prp_vf_mem.in_width,
++ &vf.csi_prp_vf_mem.in_height, cam->csi);
++ vf.csi_prp_vf_mem.in_pixel_fmt = IPU_PIX_FMT_UYVY;
++ vf.csi_prp_vf_mem.out_width = cam->win.w.width;
++ vf.csi_prp_vf_mem.out_height = cam->win.w.height;
++ vf.csi_prp_vf_mem.csi = cam->csi;
++ if (cam->vf_rotation >= IPU_ROTATE_90_RIGHT) {
++ vf.csi_prp_vf_mem.out_width = cam->win.w.height;
++ vf.csi_prp_vf_mem.out_height = cam->win.w.width;
++ }
++ vf.csi_prp_vf_mem.out_pixel_fmt = format;
++ size = cam->win.w.width * cam->win.w.height * size;
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id) {
++ vf.csi_prp_vf_mem.mipi_en = true;
++ vf.csi_prp_vf_mem.mipi_vc =
++ mipi_csi2_get_virtual_channel(mipi_csi2_info);
++ vf.csi_prp_vf_mem.mipi_id =
++ mipi_csi2_get_datatype(mipi_csi2_info);
++
++ mipi_csi2_pixelclk_enable(mipi_csi2_info);
++ } else {
++ vf.csi_prp_vf_mem.mipi_en = false;
++ vf.csi_prp_vf_mem.mipi_vc = 0;
++ vf.csi_prp_vf_mem.mipi_id = 0;
++ }
++ } else {
++ vf.csi_prp_vf_mem.mipi_en = false;
++ vf.csi_prp_vf_mem.mipi_vc = 0;
++ vf.csi_prp_vf_mem.mipi_id = 0;
++ }
++ }
++#endif
++
++ err = ipu_init_channel(cam->ipu, CSI_PRP_VF_MEM, &vf);
++ if (err != 0)
++ goto out_4;
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0], cam->vf_bufs[0]);
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1], cam->vf_bufs[1]);
++ }
++ cam->vf_bufs_size[0] = PAGE_ALIGN(size);
++ cam->vf_bufs_vaddr[0] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[0],
++ &cam->vf_bufs[0],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[0] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_3;
++ }
++ cam->vf_bufs_size[1] = PAGE_ALIGN(size);
++ cam->vf_bufs_vaddr[1] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[1],
++ &cam->vf_bufs[1],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[1] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_3;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER,
++ format, vf.csi_prp_vf_mem.out_width,
++ vf.csi_prp_vf_mem.out_height,
++ vf.csi_prp_vf_mem.out_width,
++ IPU_ROTATE_NONE,
++ cam->vf_bufs[0],
++ cam->vf_bufs[1],
++ 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "Error initializing CSI_PRP_VF_MEM\n");
++ goto out_3;
++ }
++ err = ipu_init_channel(cam->ipu, MEM_ROT_VF_MEM, NULL);
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM channel\n");
++ goto out_3;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_INPUT_BUFFER,
++ format, vf.csi_prp_vf_mem.out_width,
++ vf.csi_prp_vf_mem.out_height,
++ vf.csi_prp_vf_mem.out_width,
++ cam->vf_rotation,
++ cam->vf_bufs[0],
++ cam->vf_bufs[1],
++ 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM input buffer\n");
++ goto out_2;
++ }
++
++ if (cam->vf_rotation >= IPU_ROTATE_90_RIGHT) {
++ err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_OUTPUT_BUFFER,
++ format,
++ vf.csi_prp_vf_mem.out_height,
++ vf.csi_prp_vf_mem.out_width,
++ cam->overlay_fb->var.xres * bpp,
++ IPU_ROTATE_NONE,
++ offset, 0, 0, 0, 0);
++
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM output buffer\n");
++ goto out_2;
++ }
++ } else {
++ err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_OUTPUT_BUFFER,
++ format,
++ vf.csi_prp_vf_mem.out_width,
++ vf.csi_prp_vf_mem.out_height,
++ cam->overlay_fb->var.xres * bpp,
++ IPU_ROTATE_NONE,
++ offset, 0, 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM output buffer\n");
++ goto out_2;
++ }
++ }
++
++ ipu_clear_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF,
++ prpvf_vf_eof_callback,
++ 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR
++ "Error registering IPU_IRQ_PRP_VF_OUT_EOF irq.\n");
++ goto out_2;
++ }
++
++ ipu_clear_irq(disp_ipu, IPU_IRQ_BG_SF_END);
++ err = ipu_request_irq(disp_ipu, IPU_IRQ_BG_SF_END,
++ prpvf_sdc_vsync_callback,
++ 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error registering IPU_IRQ_BG_SF_END irq.\n");
++ goto out_1;
++ }
++
++ ipu_enable_channel(cam->ipu, CSI_PRP_VF_MEM);
++ ipu_enable_channel(cam->ipu, MEM_ROT_VF_MEM);
++
++ buffer_num = 0;
++ buffer_ready = 0;
++ ipu_select_buffer(cam->ipu, CSI_PRP_VF_MEM, IPU_OUTPUT_BUFFER, 0);
++
++ cam->overlay_active = true;
++ return err;
++
++out_1:
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF, NULL);
++out_2:
++ ipu_uninit_channel(cam->ipu, MEM_ROT_VF_MEM);
++out_3:
++ ipu_uninit_channel(cam->ipu, CSI_PRP_VF_MEM);
++out_4:
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0], cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1], cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++ if (cam->rot_vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->rot_vf_buf_size[0],
++ cam->rot_vf_bufs_vaddr[0],
++ cam->rot_vf_bufs[0]);
++ cam->rot_vf_bufs_vaddr[0] = NULL;
++ cam->rot_vf_bufs[0] = 0;
++ }
++ if (cam->rot_vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->rot_vf_buf_size[1],
++ cam->rot_vf_bufs_vaddr[1],
++ cam->rot_vf_bufs[1]);
++ cam->rot_vf_bufs_vaddr[1] = NULL;
++ cam->rot_vf_bufs[1] = 0;
++ }
++ return err;
++}
++
++/*!
++ * prpvf_stop - stop the vf task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int prpvf_stop(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (cam->overlay_active == false)
++ return 0;
++
++ ipu_free_irq(disp_ipu, IPU_IRQ_BG_SF_END, cam);
++
++ ipu_disable_channel(cam->ipu, CSI_PRP_VF_MEM, true);
++ ipu_disable_channel(cam->ipu, MEM_ROT_VF_MEM, true);
++ ipu_uninit_channel(cam->ipu, CSI_PRP_VF_MEM);
++ ipu_uninit_channel(cam->ipu, MEM_ROT_VF_MEM);
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id)
++ mipi_csi2_pixelclk_disable(mipi_csi2_info);
++ }
++ }
++#endif
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0], cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1], cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++ if (cam->rot_vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->rot_vf_buf_size[0],
++ cam->rot_vf_bufs_vaddr[0],
++ cam->rot_vf_bufs[0]);
++ cam->rot_vf_bufs_vaddr[0] = NULL;
++ cam->rot_vf_bufs[0] = 0;
++ }
++ if (cam->rot_vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->rot_vf_buf_size[1],
++ cam->rot_vf_bufs_vaddr[1],
++ cam->rot_vf_bufs[1]);
++ cam->rot_vf_bufs_vaddr[1] = NULL;
++ cam->rot_vf_bufs[1] = 0;
++ }
++
++ buffer_num = 0;
++ buffer_ready = 0;
++ cam->overlay_active = false;
++ return 0;
++}
++
++/*!
++ * Enable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_vf_enable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ return ipu_enable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * Disable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_vf_disable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ /* free csi eof irq firstly.
++ * when disable csi, wait for idmac eof.
++ * it requests eof irq again */
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF, cam);
++
++ return ipu_disable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * function to select PRP-VF as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return status
++ */
++int prp_vf_sdc_select_bg(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ if (cam) {
++ cam->vf_start_sdc = prpvf_start;
++ cam->vf_stop_sdc = prpvf_stop;
++ cam->vf_enable_csi = prp_vf_enable_csi;
++ cam->vf_disable_csi = prp_vf_disable_csi;
++ cam->overlay_active = false;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(prp_vf_sdc_select_bg);
++
++/*!
++ * function to de-select PRP-VF as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return status
++ */
++int prp_vf_sdc_deselect_bg(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ if (cam) {
++ cam->vf_start_sdc = NULL;
++ cam->vf_stop_sdc = NULL;
++ cam->vf_enable_csi = NULL;
++ cam->vf_disable_csi = NULL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(prp_vf_sdc_deselect_bg);
++
++/*!
++ * Init viewfinder task.
++ *
++ * @return Error code indicating success or failure
++ */
++__init int prp_vf_sdc_init_bg(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit viewfinder task.
++ *
++ * @return Error code indicating success or failure
++ */
++void __exit prp_vf_sdc_exit_bg(void)
++{
++}
++
++module_init(prp_vf_sdc_init_bg);
++module_exit(prp_vf_sdc_exit_bg);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IPU PRP VF SDC Backgroud Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc.c linux-3.14.40/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/ipu_prp_vf_sdc.c 2015-05-01 14:57:59.259427001 -0500
+@@ -0,0 +1,582 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++/* * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_prp_vf_sdc.c
++ *
++ * @brief IPU Use case for PRP-VF
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/dma-mapping.h>
++#include <linux/console.h>
++#include <linux/ipu.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <mach/hardware.h>
++#include <mach/mipi_csi2.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++static int buffer_num;
++static struct ipu_soc *disp_ipu;
++
++static void get_disp_ipu(cam_data *cam)
++{
++ if (cam->output > 2)
++ disp_ipu = ipu_get_soc(1); /* using DISP4 */
++ else
++ disp_ipu = ipu_get_soc(0);
++}
++
++static irqreturn_t prpvf_rot_eof_callback(int irq, void *dev_id)
++{
++ cam_data *cam = dev_id;
++ pr_debug("buffer_num %d\n", buffer_num);
++
++ if (cam->vf_rotation >= IPU_ROTATE_VERT_FLIP) {
++ ipu_select_buffer(disp_ipu, MEM_FG_SYNC,
++ IPU_INPUT_BUFFER, buffer_num);
++ buffer_num = (buffer_num == 0) ? 1 : 0;
++ ipu_select_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_OUTPUT_BUFFER, buffer_num);
++ } else {
++ ipu_select_buffer(disp_ipu, MEM_FG_SYNC,
++ IPU_INPUT_BUFFER, buffer_num);
++ buffer_num = (buffer_num == 0) ? 1 : 0;
++ ipu_select_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER, buffer_num);
++ }
++ return IRQ_HANDLED;
++}
++/*
++ * Function definitions
++ */
++
++/*!
++ * prpvf_start - start the vf task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int prpvf_start(void *private)
++{
++ struct fb_var_screeninfo fbvar;
++ struct fb_info *fbi = NULL;
++ cam_data *cam = (cam_data *) private;
++ ipu_channel_params_t vf;
++ u32 vf_out_format = 0;
++ u32 size = 2, temp = 0;
++ int err = 0, i = 0;
++ short *tmp, color;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (!cam) {
++ printk(KERN_ERR "private is NULL\n");
++ return -EIO;
++ }
++
++ if (cam->overlay_active == true) {
++ pr_debug("already started.\n");
++ return 0;
++ }
++
++ get_disp_ipu(cam);
++
++ for (i = 0; i < num_registered_fb; i++) {
++ char *idstr = registered_fb[i]->fix.id;
++ if (((strcmp(idstr, "DISP3 FG") == 0) && (cam->output < 3)) ||
++ ((strcmp(idstr, "DISP4 FG") == 0) && (cam->output >= 3))) {
++ fbi = registered_fb[i];
++ break;
++ }
++ }
++
++ if (fbi == NULL) {
++ printk(KERN_ERR "DISP FG fb not found\n");
++ return -EPERM;
++ }
++
++ fbvar = fbi->var;
++
++ /* Store the overlay frame buffer's original std */
++ cam->fb_origin_std = fbvar.nonstd;
++
++ if (cam->devtype == IMX5_V4L2 || cam->devtype == IMX6_V4L2) {
++ /* Use DP to do CSC so that we can get better performance */
++ vf_out_format = IPU_PIX_FMT_UYVY;
++ fbvar.nonstd = vf_out_format;
++ color = 0x80;
++ } else {
++ vf_out_format = IPU_PIX_FMT_RGB565;
++ fbvar.nonstd = 0;
++ color = 0x0;
++ }
++
++ fbvar.bits_per_pixel = 16;
++ fbvar.xres = fbvar.xres_virtual = cam->win.w.width;
++ fbvar.yres = cam->win.w.height;
++ fbvar.yres_virtual = cam->win.w.height * 2;
++ fbvar.yoffset = 0;
++ fbvar.accel_flags = FB_ACCEL_DOUBLE_FLAG;
++ fbvar.activate |= FB_ACTIVATE_FORCE;
++ fb_set_var(fbi, &fbvar);
++
++ ipu_disp_set_window_pos(disp_ipu, MEM_FG_SYNC, cam->win.w.left,
++ cam->win.w.top);
++
++ /* Fill black color for framebuffer */
++ tmp = (short *) fbi->screen_base;
++ for (i = 0; i < (fbi->fix.line_length * fbi->var.yres)/2;
++ i++, tmp++)
++ *tmp = color;
++
++ console_lock();
++ fb_blank(fbi, FB_BLANK_UNBLANK);
++ console_unlock();
++
++ /* correct display ch buffer address */
++ ipu_update_channel_buffer(disp_ipu, MEM_FG_SYNC, IPU_INPUT_BUFFER,
++ 0, fbi->fix.smem_start +
++ (fbi->fix.line_length * fbvar.yres));
++ ipu_update_channel_buffer(disp_ipu, MEM_FG_SYNC, IPU_INPUT_BUFFER,
++ 1, fbi->fix.smem_start);
++
++ memset(&vf, 0, sizeof(ipu_channel_params_t));
++ ipu_csi_get_window_size(cam->ipu, &vf.csi_prp_vf_mem.in_width,
++ &vf.csi_prp_vf_mem.in_height, cam->csi);
++ vf.csi_prp_vf_mem.in_pixel_fmt = IPU_PIX_FMT_UYVY;
++ vf.csi_prp_vf_mem.out_width = cam->win.w.width;
++ vf.csi_prp_vf_mem.out_height = cam->win.w.height;
++ vf.csi_prp_vf_mem.csi = cam->csi;
++ if (cam->vf_rotation >= IPU_ROTATE_90_RIGHT) {
++ vf.csi_prp_vf_mem.out_width = cam->win.w.height;
++ vf.csi_prp_vf_mem.out_height = cam->win.w.width;
++ }
++ vf.csi_prp_vf_mem.out_pixel_fmt = vf_out_format;
++ size = cam->win.w.width * cam->win.w.height * size;
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id) {
++ vf.csi_prp_vf_mem.mipi_en = true;
++ vf.csi_prp_vf_mem.mipi_vc =
++ mipi_csi2_get_virtual_channel(mipi_csi2_info);
++ vf.csi_prp_vf_mem.mipi_id =
++ mipi_csi2_get_datatype(mipi_csi2_info);
++
++ mipi_csi2_pixelclk_enable(mipi_csi2_info);
++ } else {
++ vf.csi_prp_vf_mem.mipi_en = false;
++ vf.csi_prp_vf_mem.mipi_vc = 0;
++ vf.csi_prp_vf_mem.mipi_id = 0;
++ }
++ } else {
++ vf.csi_prp_vf_mem.mipi_en = false;
++ vf.csi_prp_vf_mem.mipi_vc = 0;
++ vf.csi_prp_vf_mem.mipi_id = 0;
++ }
++ }
++#endif
++
++ err = ipu_init_channel(cam->ipu, CSI_PRP_VF_MEM, &vf);
++ if (err != 0)
++ goto out_5;
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ }
++ cam->vf_bufs_size[0] = PAGE_ALIGN(size);
++ cam->vf_bufs_vaddr[0] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[0],
++ (dma_addr_t *) &
++ cam->vf_bufs[0],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[0] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_4;
++ }
++ cam->vf_bufs_size[1] = PAGE_ALIGN(size);
++ cam->vf_bufs_vaddr[1] = (void *)dma_alloc_coherent(0,
++ cam->vf_bufs_size[1],
++ (dma_addr_t *) &
++ cam->vf_bufs[1],
++ GFP_DMA |
++ GFP_KERNEL);
++ if (cam->vf_bufs_vaddr[1] == NULL) {
++ printk(KERN_ERR "Error to allocate vf buffer\n");
++ err = -ENOMEM;
++ goto out_3;
++ }
++ pr_debug("vf_bufs %x %x\n", cam->vf_bufs[0], cam->vf_bufs[1]);
++
++ if (cam->vf_rotation >= IPU_ROTATE_VERT_FLIP) {
++ err = ipu_init_channel_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER,
++ vf_out_format,
++ vf.csi_prp_vf_mem.out_width,
++ vf.csi_prp_vf_mem.out_height,
++ vf.csi_prp_vf_mem.out_width,
++ IPU_ROTATE_NONE,
++ cam->vf_bufs[0], cam->vf_bufs[1],
++ 0, 0, 0);
++ if (err != 0)
++ goto out_3;
++
++ err = ipu_init_channel(cam->ipu, MEM_ROT_VF_MEM, NULL);
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM channel\n");
++ goto out_3;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_INPUT_BUFFER,
++ vf_out_format,
++ vf.csi_prp_vf_mem.out_width,
++ vf.csi_prp_vf_mem.out_height,
++ vf.csi_prp_vf_mem.out_width,
++ cam->vf_rotation,
++ cam->vf_bufs[0],
++ cam->vf_bufs[1],
++ 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM input buffer\n");
++ goto out_2;
++ }
++
++ if (cam->vf_rotation < IPU_ROTATE_90_RIGHT) {
++ temp = vf.csi_prp_vf_mem.out_width;
++ vf.csi_prp_vf_mem.out_width =
++ vf.csi_prp_vf_mem.out_height;
++ vf.csi_prp_vf_mem.out_height = temp;
++ }
++
++ err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_OUTPUT_BUFFER,
++ vf_out_format,
++ vf.csi_prp_vf_mem.out_height,
++ vf.csi_prp_vf_mem.out_width,
++ vf.csi_prp_vf_mem.out_height,
++ IPU_ROTATE_NONE,
++ fbi->fix.smem_start +
++ (fbi->fix.line_length *
++ fbi->var.yres),
++ fbi->fix.smem_start, 0, 0, 0);
++
++ if (err != 0) {
++ printk(KERN_ERR "Error MEM_ROT_VF_MEM output buffer\n");
++ goto out_2;
++ }
++
++ ipu_clear_irq(cam->ipu, IPU_IRQ_PRP_VF_ROT_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_PRP_VF_ROT_OUT_EOF,
++ prpvf_rot_eof_callback,
++ 0, "Mxc Camera", cam);
++ if (err < 0) {
++ printk(KERN_ERR "Error request irq:IPU_IRQ_PRP_VF_ROT_OUT_EOF\n");
++ goto out_2;
++ }
++
++ err = ipu_link_channels(cam->ipu,
++ CSI_PRP_VF_MEM, MEM_ROT_VF_MEM);
++ if (err < 0) {
++ printk(KERN_ERR
++ "Error link CSI_PRP_VF_MEM-MEM_ROT_VF_MEM\n");
++ goto out_1;
++ }
++
++ ipu_enable_channel(cam->ipu, CSI_PRP_VF_MEM);
++ ipu_enable_channel(cam->ipu, MEM_ROT_VF_MEM);
++
++ ipu_select_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER, 0);
++ ipu_select_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER, 1);
++ ipu_select_buffer(cam->ipu, MEM_ROT_VF_MEM,
++ IPU_OUTPUT_BUFFER, 0);
++ } else {
++ err = ipu_init_channel_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER,
++ vf_out_format, cam->win.w.width,
++ cam->win.w.height,
++ cam->win.w.width,
++ cam->vf_rotation,
++ fbi->fix.smem_start +
++ (fbi->fix.line_length *
++ fbi->var.yres),
++ fbi->fix.smem_start, 0, 0, 0);
++ if (err != 0) {
++ printk(KERN_ERR "Error initializing CSI_PRP_VF_MEM\n");
++ goto out_4;
++ }
++ ipu_clear_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF,
++ prpvf_rot_eof_callback,
++ 0, "Mxc Camera", cam);
++ if (err < 0) {
++ printk(KERN_ERR "Error request irq:IPU_IRQ_PRP_VF_OUT_EOF\n");
++ goto out_4;
++ }
++
++ ipu_enable_channel(cam->ipu, CSI_PRP_VF_MEM);
++
++ ipu_select_buffer(cam->ipu, CSI_PRP_VF_MEM,
++ IPU_OUTPUT_BUFFER, 0);
++ }
++
++ cam->overlay_active = true;
++ return err;
++
++out_1:
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF, NULL);
++out_2:
++ if (cam->vf_rotation >= IPU_ROTATE_VERT_FLIP)
++ ipu_uninit_channel(cam->ipu, MEM_ROT_VF_MEM);
++out_3:
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++out_4:
++ ipu_uninit_channel(cam->ipu, CSI_PRP_VF_MEM);
++out_5:
++ return err;
++}
++
++/*!
++ * prpvf_stop - stop the vf task
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ */
++static int prpvf_stop(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0, i = 0;
++ struct fb_info *fbi = NULL;
++ struct fb_var_screeninfo fbvar;
++#ifdef CONFIG_MXC_MIPI_CSI2
++ void *mipi_csi2_info;
++ int ipu_id;
++ int csi_id;
++#endif
++
++ if (cam->overlay_active == false)
++ return 0;
++
++ for (i = 0; i < num_registered_fb; i++) {
++ char *idstr = registered_fb[i]->fix.id;
++ if (((strcmp(idstr, "DISP3 FG") == 0) && (cam->output < 3)) ||
++ ((strcmp(idstr, "DISP4 FG") == 0) && (cam->output >= 3))) {
++ fbi = registered_fb[i];
++ break;
++ }
++ }
++
++ if (fbi == NULL) {
++ printk(KERN_ERR "DISP FG fb not found\n");
++ return -EPERM;
++ }
++
++ if (cam->vf_rotation >= IPU_ROTATE_VERT_FLIP) {
++ ipu_unlink_channels(cam->ipu, CSI_PRP_VF_MEM, MEM_ROT_VF_MEM);
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_VF_ROT_OUT_EOF, cam);
++ }
++ buffer_num = 0;
++
++ ipu_disable_channel(cam->ipu, CSI_PRP_VF_MEM, true);
++
++ if (cam->vf_rotation >= IPU_ROTATE_VERT_FLIP) {
++ ipu_disable_channel(cam->ipu, MEM_ROT_VF_MEM, true);
++ ipu_uninit_channel(cam->ipu, MEM_ROT_VF_MEM);
++ }
++ ipu_uninit_channel(cam->ipu, CSI_PRP_VF_MEM);
++
++ console_lock();
++ fb_blank(fbi, FB_BLANK_POWERDOWN);
++ console_unlock();
++
++ /* Set the overlay frame buffer std to what it is used to be */
++ fbvar = fbi->var;
++ fbvar.accel_flags = FB_ACCEL_TRIPLE_FLAG;
++ fbvar.nonstd = cam->fb_origin_std;
++ fbvar.activate |= FB_ACTIVATE_FORCE;
++ fb_set_var(fbi, &fbvar);
++
++#ifdef CONFIG_MXC_MIPI_CSI2
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ if (mipi_csi2_info) {
++ if (mipi_csi2_get_status(mipi_csi2_info)) {
++ ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
++ csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);
++
++ if (cam->ipu == ipu_get_soc(ipu_id)
++ && cam->csi == csi_id)
++ mipi_csi2_pixelclk_disable(mipi_csi2_info);
++ }
++ }
++#endif
++
++ if (cam->vf_bufs_vaddr[0]) {
++ dma_free_coherent(0, cam->vf_bufs_size[0],
++ cam->vf_bufs_vaddr[0],
++ (dma_addr_t) cam->vf_bufs[0]);
++ cam->vf_bufs_vaddr[0] = NULL;
++ cam->vf_bufs[0] = 0;
++ }
++ if (cam->vf_bufs_vaddr[1]) {
++ dma_free_coherent(0, cam->vf_bufs_size[1],
++ cam->vf_bufs_vaddr[1],
++ (dma_addr_t) cam->vf_bufs[1]);
++ cam->vf_bufs_vaddr[1] = NULL;
++ cam->vf_bufs[1] = 0;
++ }
++
++ cam->overlay_active = false;
++ return err;
++}
++
++/*!
++ * Enable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_vf_enable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ return ipu_enable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * Disable csi
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_vf_disable_csi(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ /* free csi eof irq firstly.
++ * when disable csi, wait for idmac eof.
++ * it requests eof irq again */
++ if (cam->vf_rotation < IPU_ROTATE_VERT_FLIP)
++ ipu_free_irq(cam->ipu, IPU_IRQ_PRP_VF_OUT_EOF, cam);
++
++ return ipu_disable_csi(cam->ipu, cam->csi);
++}
++
++/*!
++ * function to select PRP-VF as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return status
++ */
++int prp_vf_sdc_select(void *private)
++{
++ cam_data *cam;
++ int err = 0;
++ if (private) {
++ cam = (cam_data *) private;
++ cam->vf_start_sdc = prpvf_start;
++ cam->vf_stop_sdc = prpvf_stop;
++ cam->vf_enable_csi = prp_vf_enable_csi;
++ cam->vf_disable_csi = prp_vf_disable_csi;
++ cam->overlay_active = false;
++ } else
++ err = -EIO;
++
++ return err;
++}
++EXPORT_SYMBOL(prp_vf_sdc_select);
++
++/*!
++ * function to de-select PRP-VF as the working path
++ *
++ * @param private cam_data * mxc v4l2 main structure
++ *
++ * @return int
++ */
++int prp_vf_sdc_deselect(void *private)
++{
++ cam_data *cam;
++
++ if (private) {
++ cam = (cam_data *) private;
++ cam->vf_start_sdc = NULL;
++ cam->vf_stop_sdc = NULL;
++ cam->vf_enable_csi = NULL;
++ cam->vf_disable_csi = NULL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(prp_vf_sdc_deselect);
++
++/*!
++ * Init viewfinder task.
++ *
++ * @return Error code indicating success or failure
++ */
++__init int prp_vf_sdc_init(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit viewfinder task.
++ *
++ * @return Error code indicating success or failure
++ */
++void __exit prp_vf_sdc_exit(void)
++{
++}
++
++module_init(prp_vf_sdc_init);
++module_exit(prp_vf_sdc_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IPU PRP VF SDC Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_still.c linux-3.14.40/drivers/media/platform/mxc/capture/ipu_still.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/ipu_still.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/ipu_still.c 2015-05-01 14:57:59.259427001 -0500
+@@ -0,0 +1,268 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_still.c
++ *
++ * @brief IPU Use case for still image capture
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/module.h>
++#include <linux/semaphore.h>
++#include <linux/sched.h>
++#include <linux/ipu.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++static int callback_eof_flag;
++#ifndef CONFIG_MXC_IPU_V1
++static int buffer_num;
++#endif
++
++#ifdef CONFIG_MXC_IPU_V1
++static int callback_flag;
++/*
++ * Function definitions
++ */
++/*!
++ * CSI EOF callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t prp_csi_eof_callback(int irq, void *dev_id)
++{
++ cam_data *cam = devid;
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ callback_flag%2 ? 1 : 0);
++ if (callback_flag == 0)
++ ipu_enable_channel(cam->ipu, CSI_MEM);
++
++ callback_flag++;
++ return IRQ_HANDLED;
++}
++#endif
++
++/*!
++ * CSI callback function.
++ *
++ * @param irq int irq line
++ * @param dev_id void * device id
++ *
++ * @return status IRQ_HANDLED for handled
++ */
++static irqreturn_t prp_still_callback(int irq, void *dev_id)
++{
++ cam_data *cam = (cam_data *) dev_id;
++
++ callback_eof_flag++;
++ if (callback_eof_flag < 5) {
++#ifndef CONFIG_MXC_IPU_V1
++ buffer_num = (buffer_num == 0) ? 1 : 0;
++ ipu_select_buffer(cam->ipu, CSI_MEM,
++ IPU_OUTPUT_BUFFER, buffer_num);
++#endif
++ } else {
++ cam->still_counter++;
++ wake_up_interruptible(&cam->still_queue);
++ }
++
++ return IRQ_HANDLED;
++}
++
++/*!
++ * start csi->mem task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_still_start(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ u32 pixel_fmt;
++ int err;
++ ipu_channel_params_t params;
++
++ if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420)
++ pixel_fmt = IPU_PIX_FMT_YUV420P;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12)
++ pixel_fmt = IPU_PIX_FMT_NV12;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P)
++ pixel_fmt = IPU_PIX_FMT_YUV422P;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY)
++ pixel_fmt = IPU_PIX_FMT_UYVY;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV)
++ pixel_fmt = IPU_PIX_FMT_YUYV;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24)
++ pixel_fmt = IPU_PIX_FMT_BGR24;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24)
++ pixel_fmt = IPU_PIX_FMT_RGB24;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565)
++ pixel_fmt = IPU_PIX_FMT_RGB565;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32)
++ pixel_fmt = IPU_PIX_FMT_BGR32;
++ else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32)
++ pixel_fmt = IPU_PIX_FMT_RGB32;
++ else {
++ printk(KERN_ERR "format not supported\n");
++ return -EINVAL;
++ }
++
++ memset(&params, 0, sizeof(params));
++ err = ipu_init_channel(cam->ipu, CSI_MEM, &params);
++ if (err != 0)
++ return err;
++
++ err = ipu_init_channel_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER,
++ pixel_fmt, cam->v2f.fmt.pix.width,
++ cam->v2f.fmt.pix.height,
++ cam->v2f.fmt.pix.width, IPU_ROTATE_NONE,
++ cam->still_buf[0], cam->still_buf[1], 0,
++ 0, 0);
++ if (err != 0)
++ return err;
++
++#ifdef CONFIG_MXC_IPU_V1
++ ipu_clear_irq(IPU_IRQ_SENSOR_OUT_EOF);
++ err = ipu_request_irq(IPU_IRQ_SENSOR_OUT_EOF, prp_still_callback,
++ 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error registering irq.\n");
++ return err;
++ }
++ callback_flag = 0;
++ callback_eof_flag = 0;
++ ipu_clear_irq(IPU_IRQ_SENSOR_EOF);
++ err = ipu_request_irq(IPU_IRQ_SENSOR_EOF, prp_csi_eof_callback,
++ 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error IPU_IRQ_SENSOR_EOF\n");
++ return err;
++ }
++#else
++ callback_eof_flag = 0;
++ buffer_num = 0;
++
++ ipu_clear_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF);
++ err = ipu_request_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF,
++ prp_still_callback,
++ 0, "Mxc Camera", cam);
++ if (err != 0) {
++ printk(KERN_ERR "Error registering irq.\n");
++ return err;
++ }
++
++ ipu_select_buffer(cam->ipu, CSI_MEM, IPU_OUTPUT_BUFFER, 0);
++ ipu_enable_channel(cam->ipu, CSI_MEM);
++ ipu_enable_csi(cam->ipu, cam->csi);
++#endif
++
++ return err;
++}
++
++/*!
++ * stop csi->mem encoder task
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++static int prp_still_stop(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++#ifdef CONFIG_MXC_IPU_V1
++ ipu_free_irq(IPU_IRQ_SENSOR_EOF, NULL);
++ ipu_free_irq(IPU_IRQ_SENSOR_OUT_EOF, cam);
++#else
++ ipu_free_irq(cam->ipu, IPU_IRQ_CSI0_OUT_EOF, cam);
++#endif
++
++ ipu_disable_csi(cam->ipu, cam->csi);
++ ipu_disable_channel(cam->ipu, CSI_MEM, true);
++ ipu_uninit_channel(cam->ipu, CSI_MEM);
++
++ return err;
++}
++
++/*!
++ * function to select CSI_MEM as the working path
++ *
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++int prp_still_select(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++
++ if (cam) {
++ cam->csi_start = prp_still_start;
++ cam->csi_stop = prp_still_stop;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(prp_still_select);
++
++/*!
++ * function to de-select CSI_MEM as the working path
++ *
++ * @param private struct cam_data * mxc capture instance
++ *
++ * @return status
++ */
++int prp_still_deselect(void *private)
++{
++ cam_data *cam = (cam_data *) private;
++ int err = 0;
++
++ err = prp_still_stop(cam);
++
++ if (cam) {
++ cam->csi_start = NULL;
++ cam->csi_stop = NULL;
++ }
++
++ return err;
++}
++EXPORT_SYMBOL(prp_still_deselect);
++
++/*!
++ * Init the Encorder channels
++ *
++ * @return Error code indicating success or failure
++ */
++__init int prp_still_init(void)
++{
++ return 0;
++}
++
++/*!
++ * Deinit the Encorder channels
++ *
++ */
++void __exit prp_still_exit(void)
++{
++}
++
++module_init(prp_still_init);
++module_exit(prp_still_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IPU PRP STILL IMAGE Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/Kconfig linux-3.14.40/drivers/media/platform/mxc/capture/Kconfig
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/Kconfig 2015-05-01 14:57:59.259427001 -0500
+@@ -0,0 +1,86 @@
++if VIDEO_MXC_CAPTURE
++
++menu "MXC Camera/V4L2 PRP Features support"
++config VIDEO_MXC_IPU_CAMERA
++ bool
++ depends on VIDEO_MXC_CAPTURE && MXC_IPU
++ default y
++
++config VIDEO_MXC_CSI_CAMERA
++ tristate "CSI camera support"
++ depends on VIDEO_MXC_CAPTURE && VIDEO_V4L2
++ ---help---
++ This is the video4linux2 capture driver based on CSI module.
++
++config MXC_CAMERA_OV5640
++ tristate "OmniVision ov5640 camera support"
++ depends on !VIDEO_MXC_EMMA_CAMERA && I2C
++ ---help---
++ If you plan to use the ov5640 Camera with your MXC system, say Y here.
++
++config MXC_CAMERA_OV5642
++ tristate "OmniVision ov5642 camera support"
++ depends on !VIDEO_MXC_EMMA_CAMERA && I2C
++ ---help---
++ If you plan to use the ov5642 Camera with your MXC system, say Y here.
++
++config MXC_CAMERA_OV5640_MIPI
++ tristate "OmniVision ov5640 camera support using mipi"
++ depends on !VIDEO_MXC_EMMA_CAMERA && I2C
++ ---help---
++ If you plan to use the ov5640 Camera with mipi interface in your MXC system, say Y here.
++
++config MXC_TVIN_ADV7180
++ tristate "Analog Device adv7180 TV Decoder Input support"
++ depends on !VIDEO_MXC_EMMA_CAMERA && I2C
++ ---help---
++ If you plan to use the adv7180 video decoder with your MXC system, say Y here.
++
++choice
++ prompt "Select Overlay Rounting"
++ default MXC_IPU_DEVICE_QUEUE_SDC
++ depends on VIDEO_MXC_IPU_CAMERA && FB_MXC_SYNC_PANEL
++
++config MXC_IPU_DEVICE_QUEUE_SDC
++ tristate "Queue ipu device for overlay library"
++ depends on VIDEO_MXC_IPU_CAMERA
++ ---help---
++ Use case CSI->MEM->IPU DEVICE->SDC:
++ Images from sensor will be frist recieved in memory,then
++ queue to ipu device for processing if needed, and displaying
++ it on synchronous display with SDC use case.
++
++config MXC_IPU_PRP_VF_SDC
++ bool "Pre-Processor VF SDC library"
++ depends on VIDEO_MXC_IPU_CAMERA
++ ---help---
++ Use case PRP_VF_SDC:
++ Preprocessing image from smart sensor for viewfinder and
++ displaying it on synchronous display with SDC use case.
++ If SDC BG is selected, Rotation will not be supported.
++ CSI -> IC (PRP VF) -> MEM
++ MEM -> IC (ROT) -> MEM
++ MEM -> SDC (FG/BG)
++
++endchoice
++
++config MXC_IPU_PRP_ENC
++ tristate "Pre-processor Encoder library"
++ depends on VIDEO_MXC_IPU_CAMERA
++ default y
++ ---help---
++ Use case PRP_ENC:
++ Preprocessing image from smart sensor for encoder.
++ CSI -> IC (PRP ENC) -> MEM
++
++config MXC_IPU_CSI_ENC
++ tristate "IPU CSI Encoder library"
++ depends on VIDEO_MXC_IPU_CAMERA
++ default y
++ ---help---
++ Use case IPU_CSI_ENC:
++ Get raw image with CSI from smart sensor for encoder.
++ CSI -> MEM
++endmenu
++
++endif
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/Makefile linux-3.14.40/drivers/media/platform/mxc/capture/Makefile
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/Makefile 2015-05-01 14:57:59.259427001 -0500
+@@ -0,0 +1,21 @@
++obj-$(CONFIG_VIDEO_MXC_CSI_CAMERA) += fsl_csi.o csi_v4l2_capture.o
++
++ifeq ($(CONFIG_VIDEO_MXC_IPU_CAMERA),y)
++ obj-$(CONFIG_VIDEO_MXC_CAPTURE) += mxc_v4l2_capture.o
++ obj-$(CONFIG_MXC_IPU_PRP_VF_SDC) += ipu_prp_vf_sdc.o ipu_prp_vf_sdc_bg.o
++ obj-$(CONFIG_MXC_IPU_DEVICE_QUEUE_SDC) += ipu_fg_overlay_sdc.o ipu_bg_overlay_sdc.o
++ obj-$(CONFIG_MXC_IPU_PRP_ENC) += ipu_prp_enc.o ipu_still.o
++ obj-$(CONFIG_MXC_IPU_CSI_ENC) += ipu_csi_enc.o ipu_still.o
++endif
++
++ov5640_camera-objs := ov5640.o
++obj-$(CONFIG_MXC_CAMERA_OV5640) += ov5640_camera.o
++
++ov5642_camera-objs := ov5642.o
++obj-$(CONFIG_MXC_CAMERA_OV5642) += ov5642_camera.o
++
++ov5640_camera_mipi-objs := ov5640_mipi.o
++obj-$(CONFIG_MXC_CAMERA_OV5640_MIPI) += ov5640_camera_mipi.o
++
++adv7180_tvin-objs := adv7180.o
++obj-$(CONFIG_MXC_TVIN_ADV7180) += adv7180_tvin.o
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/mxc_v4l2_capture.c linux-3.14.40/drivers/media/platform/mxc/capture/mxc_v4l2_capture.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/mxc_v4l2_capture.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/mxc_v4l2_capture.c 2015-05-01 14:57:59.263427001 -0500
+@@ -0,0 +1,3102 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file drivers/media/video/mxc/capture/mxc_v4l2_capture.c
++ *
++ * @brief Mxc Video For Linux 2 driver
++ *
++ * @ingroup MXC_V4L2_CAPTURE
++ */
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/ctype.h>
++#include <linux/clk.h>
++#include <linux/io.h>
++#include <linux/semaphore.h>
++#include <linux/pagemap.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/dma-mapping.h>
++#include <linux/delay.h>
++#include <linux/mxcfb.h>
++#include <linux/of_device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-ioctl.h>
++#include <media/v4l2-int-device.h>
++#include <linux/fsl_devices.h>
++#include "mxc_v4l2_capture.h"
++#include "ipu_prp_sw.h"
++
++#define init_MUTEX(sem) sema_init(sem, 1)
++
++static struct platform_device_id imx_v4l2_devtype[] = {
++ {
++ .name = "v4l2-capture-imx5",
++ .driver_data = IMX5_V4L2,
++ }, {
++ .name = "v4l2-capture-imx6",
++ .driver_data = IMX6_V4L2,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, imx_v4l2_devtype);
++
++static const struct of_device_id mxc_v4l2_dt_ids[] = {
++ {
++ .compatible = "fsl,imx6q-v4l2-capture",
++ .data = &imx_v4l2_devtype[IMX6_V4L2],
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(of, mxc_v4l2_dt_ids);
++
++static int video_nr = -1;
++
++/*! This data is used for the output to the display. */
++#define MXC_V4L2_CAPTURE_NUM_OUTPUTS 6
++#define MXC_V4L2_CAPTURE_NUM_INPUTS 2
++static struct v4l2_output mxc_capture_outputs[MXC_V4L2_CAPTURE_NUM_OUTPUTS] = {
++ {
++ .index = 0,
++ .name = "DISP3 BG",
++ .type = V4L2_OUTPUT_TYPE_ANALOG,
++ .audioset = 0,
++ .modulator = 0,
++ .std = V4L2_STD_UNKNOWN,
++ },
++ {
++ .index = 1,
++ .name = "DISP3 BG - DI1",
++ .type = V4L2_OUTPUT_TYPE_ANALOG,
++ .audioset = 0,
++ .modulator = 0,
++ .std = V4L2_STD_UNKNOWN,
++ },
++ {
++ .index = 2,
++ .name = "DISP3 FG",
++ .type = V4L2_OUTPUT_TYPE_ANALOG,
++ .audioset = 0,
++ .modulator = 0,
++ .std = V4L2_STD_UNKNOWN,
++ },
++ {
++ .index = 3,
++ .name = "DISP4 BG",
++ .type = V4L2_OUTPUT_TYPE_ANALOG,
++ .audioset = 0,
++ .modulator = 0,
++ .std = V4L2_STD_UNKNOWN,
++ },
++ {
++ .index = 4,
++ .name = "DISP4 BG - DI1",
++ .type = V4L2_OUTPUT_TYPE_ANALOG,
++ .audioset = 0,
++ .modulator = 0,
++ .std = V4L2_STD_UNKNOWN,
++ },
++ {
++ .index = 5,
++ .name = "DISP4 FG",
++ .type = V4L2_OUTPUT_TYPE_ANALOG,
++ .audioset = 0,
++ .modulator = 0,
++ .std = V4L2_STD_UNKNOWN,
++ },
++};
++
++static struct v4l2_input mxc_capture_inputs[MXC_V4L2_CAPTURE_NUM_INPUTS] = {
++ {
++ .index = 0,
++ .name = "CSI IC MEM",
++ .type = V4L2_INPUT_TYPE_CAMERA,
++ .audioset = 0,
++ .tuner = 0,
++ .std = V4L2_STD_UNKNOWN,
++ .status = 0,
++ },
++ {
++ .index = 1,
++ .name = "CSI MEM",
++ .type = V4L2_INPUT_TYPE_CAMERA,
++ .audioset = 0,
++ .tuner = 0,
++ .std = V4L2_STD_UNKNOWN,
++ .status = V4L2_IN_ST_NO_POWER,
++ },
++};
++
++/*! List of TV input video formats supported. The video formats is corresponding
++ * to the v4l2_id in video_fmt_t.
++ * Currently, only PAL and NTSC is supported. Needs to be expanded in the
++ * future.
++ */
++typedef enum {
++ TV_NTSC = 0, /*!< Locked on (M) NTSC video signal. */
++ TV_PAL, /*!< (B, G, H, I, N)PAL video signal. */
++ TV_NOT_LOCKED, /*!< Not locked on a signal. */
++} video_fmt_idx;
++
++/*! Number of video standards supported (including 'not locked' signal). */
++#define TV_STD_MAX (TV_NOT_LOCKED + 1)
++
++/*! Video format structure. */
++typedef struct {
++ int v4l2_id; /*!< Video for linux ID. */
++ char name[16]; /*!< Name (e.g., "NTSC", "PAL", etc.) */
++ u16 raw_width; /*!< Raw width. */
++ u16 raw_height; /*!< Raw height. */
++ u16 active_width; /*!< Active width. */
++ u16 active_height; /*!< Active height. */
++ u16 active_top; /*!< Active top. */
++ u16 active_left; /*!< Active left. */
++} video_fmt_t;
++
++/*!
++ * Description of video formats supported.
++ *
++ * PAL: raw=720x625, active=720x576.
++ * NTSC: raw=720x525, active=720x480.
++ */
++static video_fmt_t video_fmts[] = {
++ { /*! NTSC */
++ .v4l2_id = V4L2_STD_NTSC,
++ .name = "NTSC",
++ .raw_width = 720, /* SENS_FRM_WIDTH */
++ .raw_height = 525, /* SENS_FRM_HEIGHT */
++ .active_width = 720, /* ACT_FRM_WIDTH */
++ .active_height = 480, /* ACT_FRM_HEIGHT */
++ .active_top = 13,
++ .active_left = 0,
++ },
++ { /*! (B, G, H, I, N) PAL */
++ .v4l2_id = V4L2_STD_PAL,
++ .name = "PAL",
++ .raw_width = 720,
++ .raw_height = 625,
++ .active_width = 720,
++ .active_height = 576,
++ .active_top = 0,
++ .active_left = 0,
++ },
++ { /*! Unlocked standard */
++ .v4l2_id = V4L2_STD_ALL,
++ .name = "Autodetect",
++ .raw_width = 720,
++ .raw_height = 625,
++ .active_width = 720,
++ .active_height = 576,
++ .active_top = 0,
++ .active_left = 0,
++ },
++};
++
++/*!* Standard index of TV. */
++static video_fmt_idx video_index = TV_NOT_LOCKED;
++
++static int mxc_v4l2_master_attach(struct v4l2_int_device *slave);
++static void mxc_v4l2_master_detach(struct v4l2_int_device *slave);
++static int start_preview(cam_data *cam);
++static int stop_preview(cam_data *cam);
++
++/*! Information about this driver. */
++static struct v4l2_int_master mxc_v4l2_master = {
++ .attach = mxc_v4l2_master_attach,
++ .detach = mxc_v4l2_master_detach,
++};
++
++/***************************************************************************
++ * Functions for handling Frame buffers.
++ **************************************************************************/
++
++/*!
++ * Free frame buffers
++ *
++ * @param cam Structure cam_data *
++ *
++ * @return status 0 success.
++ */
++static int mxc_free_frame_buf(cam_data *cam)
++{
++ int i;
++
++ pr_debug("MVC: In mxc_free_frame_buf\n");
++
++ for (i = 0; i < FRAME_NUM; i++) {
++ if (cam->frame[i].vaddress != 0) {
++ dma_free_coherent(0, cam->frame[i].buffer.length,
++ cam->frame[i].vaddress,
++ cam->frame[i].paddress);
++ cam->frame[i].vaddress = 0;
++ }
++ }
++
++ return 0;
++}
++
++/*!
++ * Allocate frame buffers
++ *
++ * @param cam Structure cam_data*
++ * @param count int number of buffer need to allocated
++ *
++ * @return status -0 Successfully allocated a buffer, -ENOBUFS failed.
++ */
++static int mxc_allocate_frame_buf(cam_data *cam, int count)
++{
++ int i;
++
++ pr_debug("In MVC:mxc_allocate_frame_buf - size=%d\n",
++ cam->v2f.fmt.pix.sizeimage);
++
++ for (i = 0; i < count; i++) {
++ cam->frame[i].vaddress =
++ dma_alloc_coherent(0,
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ &cam->frame[i].paddress,
++ GFP_DMA | GFP_KERNEL);
++ if (cam->frame[i].vaddress == 0) {
++ pr_err("ERROR: v4l2 capture: "
++ "mxc_allocate_frame_buf failed.\n");
++ mxc_free_frame_buf(cam);
++ return -ENOBUFS;
++ }
++ cam->frame[i].buffer.index = i;
++ cam->frame[i].buffer.flags = V4L2_BUF_FLAG_MAPPED;
++ cam->frame[i].buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cam->frame[i].buffer.length =
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
++ cam->frame[i].buffer.memory = V4L2_MEMORY_MMAP;
++ cam->frame[i].buffer.m.offset = cam->frame[i].paddress;
++ cam->frame[i].index = i;
++ }
++
++ return 0;
++}
++
++/*!
++ * Free frame buffers status
++ *
++ * @param cam Structure cam_data *
++ *
++ * @return none
++ */
++static void mxc_free_frames(cam_data *cam)
++{
++ int i;
++
++ pr_debug("In MVC:mxc_free_frames\n");
++
++ for (i = 0; i < FRAME_NUM; i++)
++ cam->frame[i].buffer.flags = V4L2_BUF_FLAG_MAPPED;
++
++ cam->enc_counter = 0;
++ INIT_LIST_HEAD(&cam->ready_q);
++ INIT_LIST_HEAD(&cam->working_q);
++ INIT_LIST_HEAD(&cam->done_q);
++}
++
++/*!
++ * Return the buffer status
++ *
++ * @param cam Structure cam_data *
++ * @param buf Structure v4l2_buffer *
++ *
++ * @return status 0 success, EINVAL failed.
++ */
++static int mxc_v4l2_buffer_status(cam_data *cam, struct v4l2_buffer *buf)
++{
++ pr_debug("In MVC:mxc_v4l2_buffer_status\n");
++
++ if (buf->index < 0 || buf->index >= FRAME_NUM) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l2_buffer_status buffers "
++ "not allocated\n");
++ return -EINVAL;
++ }
++
++ memcpy(buf, &(cam->frame[buf->index].buffer), sizeof(*buf));
++ return 0;
++}
++
++static int mxc_v4l2_release_bufs(cam_data *cam)
++{
++ pr_debug("In MVC:mxc_v4l2_release_bufs\n");
++ return 0;
++}
++
++static int mxc_v4l2_prepare_bufs(cam_data *cam, struct v4l2_buffer *buf)
++{
++ pr_debug("In MVC:mxc_v4l2_prepare_bufs\n");
++
++ if (buf->index < 0 || buf->index >= FRAME_NUM || buf->length <
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l2_prepare_bufs buffers "
++ "not allocated,index=%d, length=%d\n", buf->index,
++ buf->length);
++ return -EINVAL;
++ }
++
++ cam->frame[buf->index].buffer.index = buf->index;
++ cam->frame[buf->index].buffer.flags = V4L2_BUF_FLAG_MAPPED;
++ cam->frame[buf->index].buffer.length = buf->length;
++ cam->frame[buf->index].buffer.m.offset = cam->frame[buf->index].paddress
++ = buf->m.offset;
++ cam->frame[buf->index].buffer.type = buf->type;
++ cam->frame[buf->index].buffer.memory = V4L2_MEMORY_USERPTR;
++ cam->frame[buf->index].index = buf->index;
++
++ return 0;
++}
++
++/***************************************************************************
++ * Functions for handling the video stream.
++ **************************************************************************/
++
++/*!
++ * Indicates whether the palette is supported.
++ *
++ * @param palette V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_BGR24 or V4L2_PIX_FMT_BGR32
++ *
++ * @return 0 if failed
++ */
++static inline int valid_mode(u32 palette)
++{
++ return ((palette == V4L2_PIX_FMT_RGB565) ||
++ (palette == V4L2_PIX_FMT_BGR24) ||
++ (palette == V4L2_PIX_FMT_RGB24) ||
++ (palette == V4L2_PIX_FMT_BGR32) ||
++ (palette == V4L2_PIX_FMT_RGB32) ||
++ (palette == V4L2_PIX_FMT_YUV422P) ||
++ (palette == V4L2_PIX_FMT_UYVY) ||
++ (palette == V4L2_PIX_FMT_YUYV) ||
++ (palette == V4L2_PIX_FMT_YUV420) ||
++ (palette == V4L2_PIX_FMT_YVU420) ||
++ (palette == V4L2_PIX_FMT_NV12));
++}
++
++/*!
++ * Start the encoder job
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int mxc_streamon(cam_data *cam)
++{
++ struct mxc_v4l_frame *frame;
++ unsigned long lock_flags;
++ int err = 0;
++
++ pr_debug("In MVC:mxc_streamon\n");
++
++ if (NULL == cam) {
++ pr_err("ERROR! cam parameter is NULL\n");
++ return -1;
++ }
++
++ if (cam->capture_on) {
++ pr_err("ERROR: v4l2 capture: Capture stream has been turned "
++ " on\n");
++ return -1;
++ }
++
++ if (list_empty(&cam->ready_q)) {
++ pr_err("ERROR: v4l2 capture: mxc_streamon buffer has not been "
++ "queued yet\n");
++ return -EINVAL;
++ }
++ if (cam->enc_update_eba &&
++ cam->ready_q.prev == cam->ready_q.next) {
++ pr_err("ERROR: v4l2 capture: mxc_streamon buffer need "
++ "ping pong at least two buffers\n");
++ return -EINVAL;
++ }
++
++ cam->capture_pid = current->pid;
++
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++
++ if (cam->enc_enable) {
++ err = cam->enc_enable(cam);
++ if (err != 0)
++ return err;
++ }
++
++ spin_lock_irqsave(&cam->queue_int_lock, lock_flags);
++ cam->ping_pong_csi = 0;
++ cam->local_buf_num = 0;
++ if (cam->enc_update_eba) {
++ frame =
++ list_entry(cam->ready_q.next, struct mxc_v4l_frame, queue);
++ list_del(cam->ready_q.next);
++ list_add_tail(&frame->queue, &cam->working_q);
++ frame->ipu_buf_num = cam->ping_pong_csi;
++ err = cam->enc_update_eba(cam->ipu, frame->buffer.m.offset,
++ &cam->ping_pong_csi);
++
++ frame =
++ list_entry(cam->ready_q.next, struct mxc_v4l_frame, queue);
++ list_del(cam->ready_q.next);
++ list_add_tail(&frame->queue, &cam->working_q);
++ frame->ipu_buf_num = cam->ping_pong_csi;
++ err |= cam->enc_update_eba(cam->ipu, frame->buffer.m.offset,
++ &cam->ping_pong_csi);
++ spin_unlock_irqrestore(&cam->queue_int_lock, lock_flags);
++ } else {
++ spin_unlock_irqrestore(&cam->queue_int_lock, lock_flags);
++ return -EINVAL;
++ }
++
++ if (cam->overlay_on == true)
++ start_preview(cam);
++
++ if (cam->enc_enable_csi) {
++ err = cam->enc_enable_csi(cam);
++ if (err != 0)
++ return err;
++ }
++
++ cam->capture_on = true;
++
++ return err;
++}
++
++/*!
++ * Shut down the encoder job
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int mxc_streamoff(cam_data *cam)
++{
++ int err = 0;
++
++ pr_debug("In MVC:mxc_streamoff\n");
++
++ if (cam->capture_on == false)
++ return 0;
++
++ /* For both CSI--MEM and CSI--IC--MEM
++ * 1. wait for idmac eof
++ * 2. disable csi first
++ * 3. disable idmac
++ * 4. disable smfc (CSI--MEM channel)
++ */
++ if (mxc_capture_inputs[cam->current_input].name != NULL) {
++ if (cam->enc_disable_csi) {
++ err = cam->enc_disable_csi(cam);
++ if (err != 0)
++ return err;
++ }
++ if (cam->enc_disable) {
++ err = cam->enc_disable(cam);
++ if (err != 0)
++ return err;
++ }
++ }
++
++ mxc_free_frames(cam);
++ mxc_capture_inputs[cam->current_input].status |= V4L2_IN_ST_NO_POWER;
++ cam->capture_on = false;
++ return err;
++}
++
++/*!
++ * Valid and adjust the overlay window size, position
++ *
++ * @param cam structure cam_data *
++ * @param win struct v4l2_window *
++ *
++ * @return 0
++ */
++static int verify_preview(cam_data *cam, struct v4l2_window *win)
++{
++ int i = 0, width_bound = 0, height_bound = 0;
++ int *width, *height;
++ unsigned int ipu_ch = CHAN_NONE;
++ struct fb_info *bg_fbi = NULL, *fbi = NULL;
++ bool foregound_fb = false;
++ mm_segment_t old_fs;
++
++ pr_debug("In MVC: verify_preview\n");
++
++ do {
++ fbi = (struct fb_info *)registered_fb[i];
++ if (fbi == NULL) {
++ pr_err("ERROR: verify_preview frame buffer NULL.\n");
++ return -1;
++ }
++
++ /* Which DI supports 2 layers? */
++ if (((strncmp(fbi->fix.id, "DISP3 BG", 8) == 0) &&
++ (cam->output < 3)) ||
++ ((strncmp(fbi->fix.id, "DISP4 BG", 8) == 0) &&
++ (cam->output >= 3))) {
++ if (fbi->fbops->fb_ioctl) {
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++ fbi->fbops->fb_ioctl(fbi, MXCFB_GET_FB_IPU_CHAN,
++ (unsigned long)&ipu_ch);
++ set_fs(old_fs);
++ }
++ if (ipu_ch == MEM_BG_SYNC) {
++ bg_fbi = fbi;
++ pr_debug("Found background frame buffer.\n");
++ }
++ }
++
++ /* Found the frame buffer to preview on. */
++ if (strcmp(fbi->fix.id,
++ mxc_capture_outputs[cam->output].name) == 0) {
++ if (((strcmp(fbi->fix.id, "DISP3 FG") == 0) &&
++ (cam->output < 3)) ||
++ ((strcmp(fbi->fix.id, "DISP4 FG") == 0) &&
++ (cam->output >= 3)))
++ foregound_fb = true;
++
++ cam->overlay_fb = fbi;
++ break;
++ }
++ } while (++i < FB_MAX);
++
++ if (foregound_fb) {
++ width_bound = bg_fbi->var.xres;
++ height_bound = bg_fbi->var.yres;
++
++ if (win->w.width + win->w.left > bg_fbi->var.xres ||
++ win->w.height + win->w.top > bg_fbi->var.yres) {
++ pr_err("ERROR: FG window position exceeds.\n");
++ return -1;
++ }
++ } else {
++ /* 4 bytes alignment for BG */
++ width_bound = cam->overlay_fb->var.xres;
++ height_bound = cam->overlay_fb->var.yres;
++
++ if (cam->overlay_fb->var.bits_per_pixel == 24)
++ win->w.left -= win->w.left % 4;
++ else if (cam->overlay_fb->var.bits_per_pixel == 16)
++ win->w.left -= win->w.left % 2;
++
++ if (win->w.width + win->w.left > cam->overlay_fb->var.xres)
++ win->w.width = cam->overlay_fb->var.xres - win->w.left;
++ if (win->w.height + win->w.top > cam->overlay_fb->var.yres)
++ win->w.height = cam->overlay_fb->var.yres - win->w.top;
++ }
++
++ /* stride line limitation */
++ win->w.height -= win->w.height % 8;
++ win->w.width -= win->w.width % 8;
++
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
++ height = &win->w.width;
++ width = &win->w.height;
++ } else {
++ width = &win->w.width;
++ height = &win->w.height;
++ }
++
++ if (*width == 0 || *height == 0) {
++ pr_err("ERROR: v4l2 capture: width or height"
++ " too small.\n");
++ return -EINVAL;
++ }
++
++ if ((cam->crop_bounds.width / *width > 8) ||
++ ((cam->crop_bounds.width / *width == 8) &&
++ (cam->crop_bounds.width % *width))) {
++ *width = cam->crop_bounds.width / 8;
++ if (*width % 8)
++ *width += 8 - *width % 8;
++ if (*width + win->w.left > width_bound) {
++ pr_err("ERROR: v4l2 capture: width exceeds "
++ "resize limit.\n");
++ return -1;
++ }
++ pr_err("ERROR: v4l2 capture: width exceeds limit. "
++ "Resize to %d.\n",
++ *width);
++ }
++
++ if ((cam->crop_bounds.height / *height > 8) ||
++ ((cam->crop_bounds.height / *height == 8) &&
++ (cam->crop_bounds.height % *height))) {
++ *height = cam->crop_bounds.height / 8;
++ if (*height % 8)
++ *height += 8 - *height % 8;
++ if (*height + win->w.top > height_bound) {
++ pr_err("ERROR: v4l2 capture: height exceeds "
++ "resize limit.\n");
++ return -1;
++ }
++ pr_err("ERROR: v4l2 capture: height exceeds limit "
++ "resize to %d.\n",
++ *height);
++ }
++
++ return 0;
++}
++
++/*!
++ * start the viewfinder job
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int start_preview(cam_data *cam)
++{
++ int err = 0;
++
++ pr_debug("MVC: start_preview\n");
++
++ if (cam->v4l2_fb.flags == V4L2_FBUF_FLAG_OVERLAY)
++ #ifdef CONFIG_MXC_IPU_PRP_VF_SDC
++ err = prp_vf_sdc_select(cam);
++ #else
++ err = foreground_sdc_select(cam);
++ #endif
++ else if (cam->v4l2_fb.flags == V4L2_FBUF_FLAG_PRIMARY)
++ #ifdef CONFIG_MXC_IPU_PRP_VF_SDC
++ err = prp_vf_sdc_select_bg(cam);
++ #else
++ err = bg_overlay_sdc_select(cam);
++ #endif
++ if (err != 0)
++ return err;
++
++ if (cam->vf_start_sdc) {
++ err = cam->vf_start_sdc(cam);
++ if (err != 0)
++ return err;
++ }
++
++ if (cam->vf_enable_csi)
++ err = cam->vf_enable_csi(cam);
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__,
++ cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++ pr_debug("End of %s: crop_bounds widthxheight %d x %d\n",
++ __func__,
++ cam->crop_bounds.width, cam->crop_bounds.height);
++ pr_debug("End of %s: crop_defrect widthxheight %d x %d\n",
++ __func__,
++ cam->crop_defrect.width, cam->crop_defrect.height);
++ pr_debug("End of %s: crop_current widthxheight %d x %d\n",
++ __func__,
++ cam->crop_current.width, cam->crop_current.height);
++
++ return err;
++}
++
++/*!
++ * shut down the viewfinder job
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int stop_preview(cam_data *cam)
++{
++ int err = 0;
++
++ if (cam->vf_disable_csi) {
++ err = cam->vf_disable_csi(cam);
++ if (err != 0)
++ return err;
++ }
++
++ if (cam->vf_stop_sdc) {
++ err = cam->vf_stop_sdc(cam);
++ if (err != 0)
++ return err;
++ }
++
++ if (cam->v4l2_fb.flags == V4L2_FBUF_FLAG_OVERLAY)
++ #ifdef CONFIG_MXC_IPU_PRP_VF_SDC
++ err = prp_vf_sdc_deselect(cam);
++ #else
++ err = foreground_sdc_deselect(cam);
++ #endif
++ else if (cam->v4l2_fb.flags == V4L2_FBUF_FLAG_PRIMARY)
++ #ifdef CONFIG_MXC_IPU_PRP_VF_SDC
++ err = prp_vf_sdc_deselect_bg(cam);
++ #else
++ err = bg_overlay_sdc_deselect(cam);
++ #endif
++
++ return err;
++}
++
++/***************************************************************************
++ * VIDIOC Functions.
++ **************************************************************************/
++
++/*!
++ * V4L2 - mxc_v4l2_g_fmt function
++ *
++ * @param cam structure cam_data *
++ *
++ * @param f structure v4l2_format *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_g_fmt(cam_data *cam, struct v4l2_format *f)
++{
++ int retval = 0;
++
++ pr_debug("In MVC: mxc_v4l2_g_fmt type=%d\n", f->type);
++
++ switch (f->type) {
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ pr_debug(" type is V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
++ f->fmt.pix = cam->v2f.fmt.pix;
++ break;
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ pr_debug(" type is V4L2_BUF_TYPE_VIDEO_OVERLAY\n");
++ f->fmt.win = cam->win;
++ break;
++ default:
++ pr_debug(" type is invalid\n");
++ retval = -EINVAL;
++ }
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__,
++ cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++ pr_debug("End of %s: crop_bounds widthxheight %d x %d\n",
++ __func__,
++ cam->crop_bounds.width, cam->crop_bounds.height);
++ pr_debug("End of %s: crop_defrect widthxheight %d x %d\n",
++ __func__,
++ cam->crop_defrect.width, cam->crop_defrect.height);
++ pr_debug("End of %s: crop_current widthxheight %d x %d\n",
++ __func__,
++ cam->crop_current.width, cam->crop_current.height);
++
++ return retval;
++}
++
++/*!
++ * V4L2 - mxc_v4l2_s_fmt function
++ *
++ * @param cam structure cam_data *
++ *
++ * @param f structure v4l2_format *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_s_fmt(cam_data *cam, struct v4l2_format *f)
++{
++ int retval = 0;
++ int size = 0;
++ int bytesperline = 0;
++ int *width, *height;
++
++ pr_debug("In MVC: mxc_v4l2_s_fmt\n");
++
++ switch (f->type) {
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ pr_debug(" type=V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
++ if (!valid_mode(f->fmt.pix.pixelformat)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l2_s_fmt: format "
++ "not supported\n");
++ return -EINVAL;
++ }
++
++ /*
++ * Force the capture window resolution to be crop bounds
++ * for CSI MEM input mode.
++ */
++ if (strcmp(mxc_capture_inputs[cam->current_input].name,
++ "CSI MEM") == 0) {
++ f->fmt.pix.width = cam->crop_current.width;
++ f->fmt.pix.height = cam->crop_current.height;
++ }
++
++ if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
++ height = &f->fmt.pix.width;
++ width = &f->fmt.pix.height;
++ } else {
++ width = &f->fmt.pix.width;
++ height = &f->fmt.pix.height;
++ }
++
++ /* stride line limitation */
++ *width -= *width % 8;
++ *height -= *height % 8;
++
++ if (*width == 0 || *height == 0) {
++ pr_err("ERROR: v4l2 capture: width or height"
++ " too small.\n");
++ return -EINVAL;
++ }
++
++ if ((cam->crop_current.width / *width > 8) ||
++ ((cam->crop_current.width / *width == 8) &&
++ (cam->crop_current.width % *width))) {
++ *width = cam->crop_current.width / 8;
++ if (*width % 8)
++ *width += 8 - *width % 8;
++ pr_err("ERROR: v4l2 capture: width exceeds limit "
++ "resize to %d.\n",
++ *width);
++ }
++
++ if ((cam->crop_current.height / *height > 8) ||
++ ((cam->crop_current.height / *height == 8) &&
++ (cam->crop_current.height % *height))) {
++ *height = cam->crop_current.height / 8;
++ if (*height % 8)
++ *height += 8 - *height % 8;
++ pr_err("ERROR: v4l2 capture: height exceeds limit "
++ "resize to %d.\n",
++ *height);
++ }
++
++ switch (f->fmt.pix.pixelformat) {
++ case V4L2_PIX_FMT_RGB565:
++ size = f->fmt.pix.width * f->fmt.pix.height * 2;
++ bytesperline = f->fmt.pix.width * 2;
++ break;
++ case V4L2_PIX_FMT_BGR24:
++ size = f->fmt.pix.width * f->fmt.pix.height * 3;
++ bytesperline = f->fmt.pix.width * 3;
++ break;
++ case V4L2_PIX_FMT_RGB24:
++ size = f->fmt.pix.width * f->fmt.pix.height * 3;
++ bytesperline = f->fmt.pix.width * 3;
++ break;
++ case V4L2_PIX_FMT_BGR32:
++ size = f->fmt.pix.width * f->fmt.pix.height * 4;
++ bytesperline = f->fmt.pix.width * 4;
++ break;
++ case V4L2_PIX_FMT_RGB32:
++ size = f->fmt.pix.width * f->fmt.pix.height * 4;
++ bytesperline = f->fmt.pix.width * 4;
++ break;
++ case V4L2_PIX_FMT_YUV422P:
++ size = f->fmt.pix.width * f->fmt.pix.height * 2;
++ bytesperline = f->fmt.pix.width;
++ break;
++ case V4L2_PIX_FMT_UYVY:
++ case V4L2_PIX_FMT_YUYV:
++ size = f->fmt.pix.width * f->fmt.pix.height * 2;
++ bytesperline = f->fmt.pix.width * 2;
++ break;
++ case V4L2_PIX_FMT_YUV420:
++ case V4L2_PIX_FMT_YVU420:
++ size = f->fmt.pix.width * f->fmt.pix.height * 3 / 2;
++ bytesperline = f->fmt.pix.width;
++ break;
++ case V4L2_PIX_FMT_NV12:
++ size = f->fmt.pix.width * f->fmt.pix.height * 3 / 2;
++ bytesperline = f->fmt.pix.width;
++ break;
++ default:
++ break;
++ }
++
++ if (f->fmt.pix.bytesperline < bytesperline)
++ f->fmt.pix.bytesperline = bytesperline;
++ else
++ bytesperline = f->fmt.pix.bytesperline;
++
++ if (f->fmt.pix.sizeimage < size)
++ f->fmt.pix.sizeimage = size;
++ else
++ size = f->fmt.pix.sizeimage;
++
++ cam->v2f.fmt.pix = f->fmt.pix;
++
++ if (cam->v2f.fmt.pix.priv != 0) {
++ if (copy_from_user(&cam->offset,
++ (void *)cam->v2f.fmt.pix.priv,
++ sizeof(cam->offset))) {
++ retval = -EFAULT;
++ break;
++ }
++ }
++ break;
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ pr_debug(" type=V4L2_BUF_TYPE_VIDEO_OVERLAY\n");
++ retval = verify_preview(cam, &f->fmt.win);
++ cam->win = f->fmt.win;
++ break;
++ default:
++ retval = -EINVAL;
++ }
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__,
++ cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++ pr_debug("End of %s: crop_bounds widthxheight %d x %d\n",
++ __func__,
++ cam->crop_bounds.width, cam->crop_bounds.height);
++ pr_debug("End of %s: crop_defrect widthxheight %d x %d\n",
++ __func__,
++ cam->crop_defrect.width, cam->crop_defrect.height);
++ pr_debug("End of %s: crop_current widthxheight %d x %d\n",
++ __func__,
++ cam->crop_current.width, cam->crop_current.height);
++
++ return retval;
++}
++
++/*!
++ * get control param
++ *
++ * @param cam structure cam_data *
++ *
++ * @param c structure v4l2_control *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_g_ctrl(cam_data *cam, struct v4l2_control *c)
++{
++ int status = 0;
++
++ pr_debug("In MVC:mxc_v4l2_g_ctrl\n");
++
++ /* probably don't need to store the values that can be retrieved,
++ * locally, but they are for now. */
++ switch (c->id) {
++ case V4L2_CID_HFLIP:
++ /* This is handled in the ipu. */
++ if (cam->rotation == IPU_ROTATE_HORIZ_FLIP)
++ c->value = 1;
++ break;
++ case V4L2_CID_VFLIP:
++ /* This is handled in the ipu. */
++ if (cam->rotation == IPU_ROTATE_VERT_FLIP)
++ c->value = 1;
++ break;
++ case V4L2_CID_MXC_ROT:
++ /* This is handled in the ipu. */
++ c->value = cam->rotation;
++ break;
++ case V4L2_CID_BRIGHTNESS:
++ if (cam->sensor) {
++ c->value = cam->bright;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->bright = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ case V4L2_CID_HUE:
++ if (cam->sensor) {
++ c->value = cam->hue;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->hue = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ case V4L2_CID_CONTRAST:
++ if (cam->sensor) {
++ c->value = cam->contrast;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->contrast = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ case V4L2_CID_SATURATION:
++ if (cam->sensor) {
++ c->value = cam->saturation;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->saturation = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ case V4L2_CID_RED_BALANCE:
++ if (cam->sensor) {
++ c->value = cam->red;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->red = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ if (cam->sensor) {
++ c->value = cam->blue;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->blue = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ case V4L2_CID_BLACK_LEVEL:
++ if (cam->sensor) {
++ c->value = cam->ae_mode;
++ status = vidioc_int_g_ctrl(cam->sensor, c);
++ cam->ae_mode = c->value;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ status = -ENODEV;
++ }
++ break;
++ default:
++ pr_err("ERROR: v4l2 capture: unsupported ioctrl!\n");
++ }
++
++ return status;
++}
++
++/*!
++ * V4L2 - set_control function
++ * V4L2_CID_PRIVATE_BASE is the extention for IPU preprocessing.
++ * 0 for normal operation
++ * 1 for vertical flip
++ * 2 for horizontal flip
++ * 3 for horizontal and vertical flip
++ * 4 for 90 degree rotation
++ * @param cam structure cam_data *
++ *
++ * @param c structure v4l2_control *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_s_ctrl(cam_data *cam, struct v4l2_control *c)
++{
++ int i, ret = 0;
++ int tmp_rotation = IPU_ROTATE_NONE;
++ struct sensor_data *sensor_data;
++
++ pr_debug("In MVC:mxc_v4l2_s_ctrl\n");
++
++ switch (c->id) {
++ case V4L2_CID_HFLIP:
++ /* This is done by the IPU */
++ if (c->value == 1) {
++ if ((cam->rotation != IPU_ROTATE_VERT_FLIP) &&
++ (cam->rotation != IPU_ROTATE_180))
++ cam->rotation = IPU_ROTATE_HORIZ_FLIP;
++ else
++ cam->rotation = IPU_ROTATE_180;
++ } else {
++ if (cam->rotation == IPU_ROTATE_HORIZ_FLIP)
++ cam->rotation = IPU_ROTATE_NONE;
++ if (cam->rotation == IPU_ROTATE_180)
++ cam->rotation = IPU_ROTATE_VERT_FLIP;
++ }
++ break;
++ case V4L2_CID_VFLIP:
++ /* This is done by the IPU */
++ if (c->value == 1) {
++ if ((cam->rotation != IPU_ROTATE_HORIZ_FLIP) &&
++ (cam->rotation != IPU_ROTATE_180))
++ cam->rotation = IPU_ROTATE_VERT_FLIP;
++ else
++ cam->rotation = IPU_ROTATE_180;
++ } else {
++ if (cam->rotation == IPU_ROTATE_VERT_FLIP)
++ cam->rotation = IPU_ROTATE_NONE;
++ if (cam->rotation == IPU_ROTATE_180)
++ cam->rotation = IPU_ROTATE_HORIZ_FLIP;
++ }
++ break;
++ case V4L2_CID_MXC_ROT:
++ case V4L2_CID_MXC_VF_ROT:
++ /* This is done by the IPU */
++ switch (c->value) {
++ case V4L2_MXC_ROTATE_NONE:
++ tmp_rotation = IPU_ROTATE_NONE;
++ break;
++ case V4L2_MXC_ROTATE_VERT_FLIP:
++ tmp_rotation = IPU_ROTATE_VERT_FLIP;
++ break;
++ case V4L2_MXC_ROTATE_HORIZ_FLIP:
++ tmp_rotation = IPU_ROTATE_HORIZ_FLIP;
++ break;
++ case V4L2_MXC_ROTATE_180:
++ tmp_rotation = IPU_ROTATE_180;
++ break;
++ case V4L2_MXC_ROTATE_90_RIGHT:
++ tmp_rotation = IPU_ROTATE_90_RIGHT;
++ break;
++ case V4L2_MXC_ROTATE_90_RIGHT_VFLIP:
++ tmp_rotation = IPU_ROTATE_90_RIGHT_VFLIP;
++ break;
++ case V4L2_MXC_ROTATE_90_RIGHT_HFLIP:
++ tmp_rotation = IPU_ROTATE_90_RIGHT_HFLIP;
++ break;
++ case V4L2_MXC_ROTATE_90_LEFT:
++ tmp_rotation = IPU_ROTATE_90_LEFT;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ #ifdef CONFIG_MXC_IPU_PRP_VF_SDC
++ if (c->id == V4L2_CID_MXC_VF_ROT)
++ cam->vf_rotation = tmp_rotation;
++ else
++ cam->rotation = tmp_rotation;
++ #else
++ cam->rotation = tmp_rotation;
++ #endif
++
++ break;
++ case V4L2_CID_HUE:
++ if (cam->sensor) {
++ cam->hue = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_CONTRAST:
++ if (cam->sensor) {
++ cam->contrast = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_BRIGHTNESS:
++ if (cam->sensor) {
++ cam->bright = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_SATURATION:
++ if (cam->sensor) {
++ cam->saturation = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_RED_BALANCE:
++ if (cam->sensor) {
++ cam->red = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ if (cam->sensor) {
++ cam->blue = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_EXPOSURE:
++ if (cam->sensor) {
++ cam->ae_mode = c->value;
++ ret = vidioc_int_s_ctrl(cam->sensor, c);
++ } else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ ret = -ENODEV;
++ }
++ break;
++ case V4L2_CID_MXC_FLASH:
++#ifdef CONFIG_MXC_IPU_V1
++ ipu_csi_flash_strobe(true);
++#endif
++ break;
++ case V4L2_CID_MXC_SWITCH_CAM:
++ if (cam->sensor == cam->all_sensors[c->value])
++ break;
++
++ /* power down other cameraes before enable new one */
++ for (i = 0; i < cam->sensor_index; i++) {
++ if (i != c->value) {
++ vidioc_int_dev_exit(cam->all_sensors[i]);
++ vidioc_int_s_power(cam->all_sensors[i], 0);
++ if (cam->mclk_on[cam->mclk_source]) {
++ ipu_csi_enable_mclk_if(cam->ipu,
++ CSI_MCLK_I2C,
++ cam->mclk_source,
++ false, false);
++ cam->mclk_on[cam->mclk_source] =
++ false;
++ }
++ }
++ }
++ sensor_data = cam->all_sensors[c->value]->priv;
++ if (sensor_data->io_init)
++ sensor_data->io_init();
++ cam->sensor = cam->all_sensors[c->value];
++ cam->mclk_source = sensor_data->mclk_source;
++ ipu_csi_enable_mclk_if(cam->ipu, CSI_MCLK_I2C,
++ cam->mclk_source, true, true);
++ cam->mclk_on[cam->mclk_source] = true;
++ vidioc_int_s_power(cam->sensor, 1);
++ vidioc_int_dev_init(cam->sensor);
++ break;
++ default:
++ pr_debug(" default case\n");
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * V4L2 - mxc_v4l2_s_param function
++ * Allows setting of capturemode and frame rate.
++ *
++ * @param cam structure cam_data *
++ * @param parm structure v4l2_streamparm *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_s_param(cam_data *cam, struct v4l2_streamparm *parm)
++{
++ struct v4l2_ifparm ifparm;
++ struct v4l2_format cam_fmt;
++ struct v4l2_streamparm currentparm;
++ ipu_csi_signal_cfg_t csi_param;
++ u32 current_fps, parm_fps;
++ int err = 0;
++
++ pr_debug("In mxc_v4l2_s_param\n");
++
++ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ pr_err(KERN_ERR "mxc_v4l2_s_param invalid type\n");
++ return -EINVAL;
++ }
++
++ /* Stop the viewfinder */
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++
++ currentparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++
++ /* First check that this device can support the changes requested. */
++ err = vidioc_int_g_parm(cam->sensor, &currentparm);
++ if (err) {
++ pr_err("%s: vidioc_int_g_parm returned an error %d\n",
++ __func__, err);
++ goto exit;
++ }
++
++ current_fps = currentparm.parm.capture.timeperframe.denominator
++ / currentparm.parm.capture.timeperframe.numerator;
++ parm_fps = parm->parm.capture.timeperframe.denominator
++ / parm->parm.capture.timeperframe.numerator;
++
++ pr_debug(" Current capabilities are %x\n",
++ currentparm.parm.capture.capability);
++ pr_debug(" Current capturemode is %d change to %d\n",
++ currentparm.parm.capture.capturemode,
++ parm->parm.capture.capturemode);
++ pr_debug(" Current framerate is %d change to %d\n",
++ current_fps, parm_fps);
++
++ /* This will change any camera settings needed. */
++ err = vidioc_int_s_parm(cam->sensor, parm);
++ if (err) {
++ pr_err("%s: vidioc_int_s_parm returned an error %d\n",
++ __func__, err);
++ goto exit;
++ }
++
++ /* If resolution changed, need to re-program the CSI */
++ /* Get new values. */
++ vidioc_int_g_ifparm(cam->sensor, &ifparm);
++
++ csi_param.data_width = 0;
++ csi_param.clk_mode = 0;
++ csi_param.ext_vsync = 0;
++ csi_param.Vsync_pol = 0;
++ csi_param.Hsync_pol = 0;
++ csi_param.pixclk_pol = 0;
++ csi_param.data_pol = 0;
++ csi_param.sens_clksrc = 0;
++ csi_param.pack_tight = 0;
++ csi_param.force_eof = 0;
++ csi_param.data_en_pol = 0;
++ csi_param.data_fmt = 0;
++ csi_param.csi = cam->csi;
++ csi_param.mclk = 0;
++
++ pr_debug(" clock_curr=mclk=%d\n", ifparm.u.bt656.clock_curr);
++ if (ifparm.u.bt656.clock_curr == 0)
++ csi_param.clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
++ else
++ csi_param.clk_mode = IPU_CSI_CLK_MODE_GATED_CLK;
++
++ csi_param.pixclk_pol = ifparm.u.bt656.latch_clk_inv;
++
++ if (ifparm.u.bt656.mode == V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT) {
++ csi_param.data_width = IPU_CSI_DATA_WIDTH_8;
++ } else if (ifparm.u.bt656.mode
++ == V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT) {
++ csi_param.data_width = IPU_CSI_DATA_WIDTH_10;
++ } else {
++ csi_param.data_width = IPU_CSI_DATA_WIDTH_8;
++ }
++
++ csi_param.Vsync_pol = ifparm.u.bt656.nobt_vs_inv;
++ csi_param.Hsync_pol = ifparm.u.bt656.nobt_hs_inv;
++ csi_param.ext_vsync = ifparm.u.bt656.bt_sync_correct;
++
++ /* if the capturemode changed, the size bounds will have changed. */
++ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ vidioc_int_g_fmt_cap(cam->sensor, &cam_fmt);
++ pr_debug(" g_fmt_cap returns widthxheight of input as %d x %d\n",
++ cam_fmt.fmt.pix.width, cam_fmt.fmt.pix.height);
++
++ csi_param.data_fmt = cam_fmt.fmt.pix.pixelformat;
++
++ cam->crop_bounds.top = cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = cam_fmt.fmt.pix.width;
++ cam->crop_bounds.height = cam_fmt.fmt.pix.height;
++
++ /*
++ * Set the default current cropped resolution to be the same with
++ * the cropping boundary(except for tvin module).
++ */
++ if (cam->device_type != 1) {
++ cam->crop_current.width = cam->crop_bounds.width;
++ cam->crop_current.height = cam->crop_bounds.height;
++ }
++
++ /* This essentially loses the data at the left and bottom of the image
++ * giving a digital zoom image, if crop_current is less than the full
++ * size of the image. */
++ ipu_csi_set_window_size(cam->ipu, cam->crop_current.width,
++ cam->crop_current.height, cam->csi);
++ ipu_csi_set_window_pos(cam->ipu, cam->crop_current.left,
++ cam->crop_current.top,
++ cam->csi);
++ ipu_csi_init_interface(cam->ipu, cam->crop_bounds.width,
++ cam->crop_bounds.height,
++ cam_fmt.fmt.pix.pixelformat, csi_param);
++
++
++exit:
++ if (cam->overlay_on == true)
++ start_preview(cam);
++
++ return err;
++}
++
++/*!
++ * V4L2 - mxc_v4l2_s_std function
++ *
++ * Sets the TV standard to be used.
++ *
++ * @param cam structure cam_data *
++ * @param parm structure v4l2_streamparm *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_s_std(cam_data *cam, v4l2_std_id e)
++{
++ pr_debug("In mxc_v4l2_s_std %Lx\n", e);
++
++ if (e == V4L2_STD_PAL) {
++ pr_debug(" Setting standard to PAL %Lx\n", V4L2_STD_PAL);
++ cam->standard.id = V4L2_STD_PAL;
++ video_index = TV_PAL;
++ } else if (e == V4L2_STD_NTSC) {
++ pr_debug(" Setting standard to NTSC %Lx\n",
++ V4L2_STD_NTSC);
++ /* Get rid of the white dot line in NTSC signal input */
++ cam->standard.id = V4L2_STD_NTSC;
++ video_index = TV_NTSC;
++ } else {
++ cam->standard.id = V4L2_STD_ALL;
++ video_index = TV_NOT_LOCKED;
++ pr_err("ERROR: unrecognized std! %Lx (PAL=%Lx, NTSC=%Lx\n",
++ e, V4L2_STD_PAL, V4L2_STD_NTSC);
++ }
++
++ cam->standard.index = video_index;
++ strcpy(cam->standard.name, video_fmts[video_index].name);
++ cam->crop_bounds.width = video_fmts[video_index].raw_width;
++ cam->crop_bounds.height = video_fmts[video_index].raw_height;
++ cam->crop_current.width = video_fmts[video_index].active_width;
++ cam->crop_current.height = video_fmts[video_index].active_height;
++ cam->crop_current.top = video_fmts[video_index].active_top;
++ cam->crop_current.left = video_fmts[video_index].active_left;
++
++ return 0;
++}
++
++/*!
++ * V4L2 - mxc_v4l2_g_std function
++ *
++ * Gets the TV standard from the TV input device.
++ *
++ * @param cam structure cam_data *
++ *
++ * @param e structure v4l2_streamparm *
++ *
++ * @return status 0 success, EINVAL failed
++ */
++static int mxc_v4l2_g_std(cam_data *cam, v4l2_std_id *e)
++{
++ struct v4l2_format tv_fmt;
++
++ pr_debug("In mxc_v4l2_g_std\n");
++
++ if (cam->device_type == 1) {
++ /* Use this function to get what the TV-In device detects the
++ * format to be. pixelformat is used to return the std value
++ * since the interface has no vidioc_g_std.*/
++ tv_fmt.type = V4L2_BUF_TYPE_PRIVATE;
++ vidioc_int_g_fmt_cap(cam->sensor, &tv_fmt);
++
++ /* If the TV-in automatically detects the standard, then if it
++ * changes, the settings need to change. */
++ if (cam->standard_autodetect) {
++ if (cam->standard.id != tv_fmt.fmt.pix.pixelformat) {
++ pr_debug("MVC: mxc_v4l2_g_std: "
++ "Changing standard\n");
++ mxc_v4l2_s_std(cam, tv_fmt.fmt.pix.pixelformat);
++ }
++ }
++
++ *e = tv_fmt.fmt.pix.pixelformat;
++ }
++
++ return 0;
++}
++
++/*!
++ * Dequeue one V4L capture buffer
++ *
++ * @param cam structure cam_data *
++ * @param buf structure v4l2_buffer *
++ *
++ * @return status 0 success, EINVAL invalid frame number,
++ * ETIME timeout, ERESTARTSYS interrupted by user
++ */
++static int mxc_v4l_dqueue(cam_data *cam, struct v4l2_buffer *buf)
++{
++ int retval = 0;
++ struct mxc_v4l_frame *frame;
++ unsigned long lock_flags;
++
++ pr_debug("In MVC:mxc_v4l_dqueue\n");
++
++ if (!wait_event_interruptible_timeout(cam->enc_queue,
++ cam->enc_counter != 0, 10 * HZ)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l_dqueue timeout "
++ "enc_counter %x\n",
++ cam->enc_counter);
++ return -ETIME;
++ } else if (signal_pending(current)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l_dqueue() "
++ "interrupt received\n");
++ return -ERESTARTSYS;
++ }
++
++ if (down_interruptible(&cam->busy_lock))
++ return -EBUSY;
++
++ spin_lock_irqsave(&cam->dqueue_int_lock, lock_flags);
++ cam->enc_counter--;
++
++ frame = list_entry(cam->done_q.next, struct mxc_v4l_frame, queue);
++ list_del(cam->done_q.next);
++ if (frame->buffer.flags & V4L2_BUF_FLAG_DONE) {
++ frame->buffer.flags &= ~V4L2_BUF_FLAG_DONE;
++ } else if (frame->buffer.flags & V4L2_BUF_FLAG_QUEUED) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_DQBUF: "
++ "Buffer not filled.\n");
++ frame->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED;
++ retval = -EINVAL;
++ } else if ((frame->buffer.flags & 0x7) == V4L2_BUF_FLAG_MAPPED) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_DQBUF: "
++ "Buffer not queued.\n");
++ retval = -EINVAL;
++ }
++
++ cam->frame[frame->index].buffer.field = cam->device_type ?
++ V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE;
++
++ buf->bytesused = cam->v2f.fmt.pix.sizeimage;
++ buf->index = frame->index;
++ buf->flags = frame->buffer.flags;
++ buf->m = cam->frame[frame->index].buffer.m;
++ buf->timestamp = cam->frame[frame->index].buffer.timestamp;
++ buf->field = cam->frame[frame->index].buffer.field;
++ spin_unlock_irqrestore(&cam->dqueue_int_lock, lock_flags);
++
++ up(&cam->busy_lock);
++ return retval;
++}
++
++/*!
++ * V4L interface - open function
++ *
++ * @param file structure file *
++ *
++ * @return status 0 success, ENODEV invalid device instance,
++ * ENODEV timeout, ERESTARTSYS interrupted by user
++ */
++static int mxc_v4l_open(struct file *file)
++{
++ struct v4l2_ifparm ifparm;
++ struct v4l2_format cam_fmt;
++ ipu_csi_signal_cfg_t csi_param;
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++ int err = 0;
++ struct sensor_data *sensor;
++
++ pr_debug("\nIn MVC: mxc_v4l_open\n");
++ pr_debug(" device name is %s\n", dev->name);
++
++ if (!cam) {
++ pr_err("ERROR: v4l2 capture: Internal error, "
++ "cam_data not found!\n");
++ return -EBADF;
++ }
++
++ if (cam->sensor == NULL ||
++ cam->sensor->type != v4l2_int_type_slave) {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ return -EAGAIN;
++ }
++
++ sensor = cam->sensor->priv;
++ if (!sensor) {
++ pr_err("%s: Internal error, sensor_data is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ down(&cam->busy_lock);
++ err = 0;
++ if (signal_pending(current))
++ goto oops;
++
++ if (cam->open_count++ == 0) {
++ wait_event_interruptible(cam->power_queue,
++ cam->low_power == false);
++
++ if (strcmp(mxc_capture_inputs[cam->current_input].name,
++ "CSI MEM") == 0) {
++#if defined(CONFIG_MXC_IPU_CSI_ENC) || defined(CONFIG_MXC_IPU_CSI_ENC_MODULE)
++ err = csi_enc_select(cam);
++#endif
++ } else if (strcmp(mxc_capture_inputs[cam->current_input].name,
++ "CSI IC MEM") == 0) {
++#if defined(CONFIG_MXC_IPU_PRP_ENC) || defined(CONFIG_MXC_IPU_PRP_ENC_MODULE)
++ err = prp_enc_select(cam);
++#endif
++ }
++
++ cam->enc_counter = 0;
++ INIT_LIST_HEAD(&cam->ready_q);
++ INIT_LIST_HEAD(&cam->working_q);
++ INIT_LIST_HEAD(&cam->done_q);
++
++ vidioc_int_g_ifparm(cam->sensor, &ifparm);
++
++ csi_param.sens_clksrc = 0;
++
++ csi_param.clk_mode = 0;
++ csi_param.data_pol = 0;
++ csi_param.ext_vsync = 0;
++
++ csi_param.pack_tight = 0;
++ csi_param.force_eof = 0;
++ csi_param.data_en_pol = 0;
++
++ csi_param.mclk = ifparm.u.bt656.clock_curr;
++
++ csi_param.pixclk_pol = ifparm.u.bt656.latch_clk_inv;
++
++ if (ifparm.u.bt656.mode
++ == V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT)
++ csi_param.data_width = IPU_CSI_DATA_WIDTH_8;
++ else if (ifparm.u.bt656.mode
++ == V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT)
++ csi_param.data_width = IPU_CSI_DATA_WIDTH_10;
++ else
++ csi_param.data_width = IPU_CSI_DATA_WIDTH_8;
++
++
++ csi_param.Vsync_pol = ifparm.u.bt656.nobt_vs_inv;
++ csi_param.Hsync_pol = ifparm.u.bt656.nobt_hs_inv;
++
++ csi_param.csi = cam->csi;
++
++ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ vidioc_int_g_fmt_cap(cam->sensor, &cam_fmt);
++
++ /* Reset the sizes. Needed to prevent carryover of last
++ * operation.*/
++ cam->crop_bounds.top = cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = cam_fmt.fmt.pix.width;
++ cam->crop_bounds.height = cam_fmt.fmt.pix.height;
++
++ /* This also is the max crop size for this device. */
++ cam->crop_defrect.top = cam->crop_defrect.left = 0;
++ cam->crop_defrect.width = cam_fmt.fmt.pix.width;
++ cam->crop_defrect.height = cam_fmt.fmt.pix.height;
++
++ /* At this point, this is also the current image size. */
++ cam->crop_current.top = cam->crop_current.left = 0;
++ cam->crop_current.width = cam_fmt.fmt.pix.width;
++ cam->crop_current.height = cam_fmt.fmt.pix.height;
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__,
++ cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++ pr_debug("End of %s: crop_bounds widthxheight %d x %d\n",
++ __func__,
++ cam->crop_bounds.width, cam->crop_bounds.height);
++ pr_debug("End of %s: crop_defrect widthxheight %d x %d\n",
++ __func__,
++ cam->crop_defrect.width, cam->crop_defrect.height);
++ pr_debug("End of %s: crop_current widthxheight %d x %d\n",
++ __func__,
++ cam->crop_current.width, cam->crop_current.height);
++
++ csi_param.data_fmt = cam_fmt.fmt.pix.pixelformat;
++ pr_debug("On Open: Input to ipu size is %d x %d\n",
++ cam_fmt.fmt.pix.width, cam_fmt.fmt.pix.height);
++ ipu_csi_set_window_size(cam->ipu, cam->crop_current.width,
++ cam->crop_current.height,
++ cam->csi);
++ ipu_csi_set_window_pos(cam->ipu, cam->crop_current.left,
++ cam->crop_current.top,
++ cam->csi);
++ ipu_csi_init_interface(cam->ipu, cam->crop_bounds.width,
++ cam->crop_bounds.height,
++ cam_fmt.fmt.pix.pixelformat,
++ csi_param);
++ clk_prepare_enable(sensor->sensor_clk);
++ vidioc_int_s_power(cam->sensor, 1);
++ vidioc_int_init(cam->sensor);
++ vidioc_int_dev_init(cam->sensor);
++ }
++
++ file->private_data = dev;
++
++oops:
++ up(&cam->busy_lock);
++ return err;
++}
++
++/*!
++ * V4L interface - close function
++ *
++ * @param file struct file *
++ *
++ * @return 0 success
++ */
++static int mxc_v4l_close(struct file *file)
++{
++ struct video_device *dev = video_devdata(file);
++ int err = 0;
++ cam_data *cam = video_get_drvdata(dev);
++ struct sensor_data *sensor;
++ pr_debug("In MVC:mxc_v4l_close\n");
++
++ if (!cam) {
++ pr_err("ERROR: v4l2 capture: Internal error, "
++ "cam_data not found!\n");
++ return -EBADF;
++ }
++
++ if (!cam->sensor) {
++ pr_err("%s: Internal error, camera is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ sensor = cam->sensor->priv;
++ if (!sensor) {
++ pr_err("%s: Internal error, sensor_data is not found!\n", __func__);
++ return -EBADF;
++ }
++
++ down(&cam->busy_lock);
++
++ /* for the case somebody hit the ctrl C */
++ if (cam->overlay_pid == current->pid && cam->overlay_on) {
++ err = stop_preview(cam);
++ cam->overlay_on = false;
++ }
++ if (cam->capture_pid == current->pid) {
++ err |= mxc_streamoff(cam);
++ wake_up_interruptible(&cam->enc_queue);
++ }
++
++ if (--cam->open_count == 0) {
++ vidioc_int_s_power(cam->sensor, 0);
++ clk_disable_unprepare(sensor->sensor_clk);
++ wait_event_interruptible(cam->power_queue,
++ cam->low_power == false);
++ pr_debug("mxc_v4l_close: release resource\n");
++
++ if (strcmp(mxc_capture_inputs[cam->current_input].name,
++ "CSI MEM") == 0) {
++#if defined(CONFIG_MXC_IPU_CSI_ENC) || defined(CONFIG_MXC_IPU_CSI_ENC_MODULE)
++ err |= csi_enc_deselect(cam);
++#endif
++ } else if (strcmp(mxc_capture_inputs[cam->current_input].name,
++ "CSI IC MEM") == 0) {
++#if defined(CONFIG_MXC_IPU_PRP_ENC) || defined(CONFIG_MXC_IPU_PRP_ENC_MODULE)
++ err |= prp_enc_deselect(cam);
++#endif
++ }
++
++ mxc_free_frame_buf(cam);
++ file->private_data = NULL;
++
++ /* capture off */
++ wake_up_interruptible(&cam->enc_queue);
++ mxc_free_frames(cam);
++ cam->enc_counter++;
++ }
++
++ up(&cam->busy_lock);
++
++ return err;
++}
++
++#if defined(CONFIG_MXC_IPU_PRP_ENC) || defined(CONFIG_MXC_IPU_CSI_ENC) || \
++ defined(CONFIG_MXC_IPU_PRP_ENC_MODULE) || \
++ defined(CONFIG_MXC_IPU_CSI_ENC_MODULE)
++/*
++ * V4L interface - read function
++ *
++ * @param file struct file *
++ * @param read buf char *
++ * @param count size_t
++ * @param ppos structure loff_t *
++ *
++ * @return bytes read
++ */
++static ssize_t mxc_v4l_read(struct file *file, char *buf, size_t count,
++ loff_t *ppos)
++{
++ int err = 0;
++ u8 *v_address[2];
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++
++ if (down_interruptible(&cam->busy_lock))
++ return -EINTR;
++
++ /* Stop the viewfinder */
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++
++ v_address[0] = dma_alloc_coherent(0,
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ &cam->still_buf[0],
++ GFP_DMA | GFP_KERNEL);
++
++ v_address[1] = dma_alloc_coherent(0,
++ PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
++ &cam->still_buf[1],
++ GFP_DMA | GFP_KERNEL);
++
++ if (!v_address[0] || !v_address[1]) {
++ err = -ENOBUFS;
++ goto exit0;
++ }
++
++ err = prp_still_select(cam);
++ if (err != 0) {
++ err = -EIO;
++ goto exit0;
++ }
++
++ cam->still_counter = 0;
++ err = cam->csi_start(cam);
++ if (err != 0) {
++ err = -EIO;
++ goto exit1;
++ }
++
++ if (!wait_event_interruptible_timeout(cam->still_queue,
++ cam->still_counter != 0,
++ 10 * HZ)) {
++ pr_err("ERROR: v4l2 capture: mxc_v4l_read timeout counter %x\n",
++ cam->still_counter);
++ err = -ETIME;
++ goto exit1;
++ }
++ err = copy_to_user(buf, v_address[1], cam->v2f.fmt.pix.sizeimage);
++
++exit1:
++ prp_still_deselect(cam);
++
++exit0:
++ if (v_address[0] != 0)
++ dma_free_coherent(0, cam->v2f.fmt.pix.sizeimage, v_address[0],
++ cam->still_buf[0]);
++ if (v_address[1] != 0)
++ dma_free_coherent(0, cam->v2f.fmt.pix.sizeimage, v_address[1],
++ cam->still_buf[1]);
++
++ cam->still_buf[0] = cam->still_buf[1] = 0;
++
++ if (cam->overlay_on == true)
++ start_preview(cam);
++
++ up(&cam->busy_lock);
++ if (err < 0)
++ return err;
++
++ return cam->v2f.fmt.pix.sizeimage - err;
++}
++#endif
++
++/*!
++ * V4L interface - ioctl function
++ *
++ * @param file struct file*
++ *
++ * @param ioctlnr unsigned int
++ *
++ * @param arg void*
++ *
++ * @return 0 success, ENODEV for invalid device instance,
++ * -1 for other errors.
++ */
++static long mxc_v4l_do_ioctl(struct file *file,
++ unsigned int ioctlnr, void *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++ int retval = 0;
++ unsigned long lock_flags;
++
++ pr_debug("In MVC: mxc_v4l_do_ioctl %x\n", ioctlnr);
++ wait_event_interruptible(cam->power_queue, cam->low_power == false);
++ /* make this _really_ smp-safe */
++ if (ioctlnr != VIDIOC_DQBUF)
++ if (down_interruptible(&cam->busy_lock))
++ return -EBUSY;
++
++ switch (ioctlnr) {
++ /*!
++ * V4l2 VIDIOC_QUERYCAP ioctl
++ */
++ case VIDIOC_QUERYCAP: {
++ struct v4l2_capability *cap = arg;
++ pr_debug(" case VIDIOC_QUERYCAP\n");
++ strcpy(cap->driver, "mxc_v4l2");
++ cap->version = KERNEL_VERSION(0, 1, 11);
++ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
++ V4L2_CAP_VIDEO_OVERLAY |
++ V4L2_CAP_STREAMING |
++ V4L2_CAP_READWRITE;
++ cap->card[0] = '\0';
++ cap->bus_info[0] = '\0';
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_G_FMT ioctl
++ */
++ case VIDIOC_G_FMT: {
++ struct v4l2_format *gf = arg;
++ pr_debug(" case VIDIOC_G_FMT\n");
++ retval = mxc_v4l2_g_fmt(cam, gf);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_S_FMT ioctl
++ */
++ case VIDIOC_S_FMT: {
++ struct v4l2_format *sf = arg;
++ pr_debug(" case VIDIOC_S_FMT\n");
++ retval = mxc_v4l2_s_fmt(cam, sf);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_REQBUFS ioctl
++ */
++ case VIDIOC_REQBUFS: {
++ struct v4l2_requestbuffers *req = arg;
++ pr_debug(" case VIDIOC_REQBUFS\n");
++
++ if (req->count > FRAME_NUM) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_REQBUFS: "
++ "not enough buffers\n");
++ req->count = FRAME_NUM;
++ }
++
++ if ((req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_REQBUFS: "
++ "wrong buffer type\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ mxc_streamoff(cam);
++ if (req->memory & V4L2_MEMORY_MMAP) {
++ mxc_free_frame_buf(cam);
++ retval = mxc_allocate_frame_buf(cam, req->count);
++ }
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_QUERYBUF ioctl
++ */
++ case VIDIOC_QUERYBUF: {
++ struct v4l2_buffer *buf = arg;
++ int index = buf->index;
++ pr_debug(" case VIDIOC_QUERYBUF\n");
++
++ if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ pr_err("ERROR: v4l2 capture: "
++ "VIDIOC_QUERYBUFS: "
++ "wrong buffer type\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ if (buf->memory & V4L2_MEMORY_MMAP) {
++ memset(buf, 0, sizeof(buf));
++ buf->index = index;
++ }
++
++ down(&cam->param_lock);
++ if (buf->memory & V4L2_MEMORY_USERPTR) {
++ mxc_v4l2_release_bufs(cam);
++ retval = mxc_v4l2_prepare_bufs(cam, buf);
++ }
++
++ if (buf->memory & V4L2_MEMORY_MMAP)
++ retval = mxc_v4l2_buffer_status(cam, buf);
++ up(&cam->param_lock);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_QBUF ioctl
++ */
++ case VIDIOC_QBUF: {
++ struct v4l2_buffer *buf = arg;
++ int index = buf->index;
++ pr_debug(" case VIDIOC_QBUF\n");
++
++ spin_lock_irqsave(&cam->queue_int_lock, lock_flags);
++ if ((cam->frame[index].buffer.flags & 0x7) ==
++ V4L2_BUF_FLAG_MAPPED) {
++ cam->frame[index].buffer.flags |=
++ V4L2_BUF_FLAG_QUEUED;
++ list_add_tail(&cam->frame[index].queue,
++ &cam->ready_q);
++ } else if (cam->frame[index].buffer.
++ flags & V4L2_BUF_FLAG_QUEUED) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_QBUF: "
++ "buffer already queued\n");
++ retval = -EINVAL;
++ } else if (cam->frame[index].buffer.
++ flags & V4L2_BUF_FLAG_DONE) {
++ pr_err("ERROR: v4l2 capture: VIDIOC_QBUF: "
++ "overwrite done buffer.\n");
++ cam->frame[index].buffer.flags &=
++ ~V4L2_BUF_FLAG_DONE;
++ cam->frame[index].buffer.flags |=
++ V4L2_BUF_FLAG_QUEUED;
++ retval = -EINVAL;
++ }
++
++ buf->flags = cam->frame[index].buffer.flags;
++ spin_unlock_irqrestore(&cam->queue_int_lock, lock_flags);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_DQBUF ioctl
++ */
++ case VIDIOC_DQBUF: {
++ struct v4l2_buffer *buf = arg;
++ pr_debug(" case VIDIOC_DQBUF\n");
++
++ if ((cam->enc_counter == 0) &&
++ (file->f_flags & O_NONBLOCK)) {
++ retval = -EAGAIN;
++ break;
++ }
++
++ retval = mxc_v4l_dqueue(cam, buf);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_STREAMON ioctl
++ */
++ case VIDIOC_STREAMON: {
++ pr_debug(" case VIDIOC_STREAMON\n");
++ retval = mxc_streamon(cam);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_STREAMOFF ioctl
++ */
++ case VIDIOC_STREAMOFF: {
++ pr_debug(" case VIDIOC_STREAMOFF\n");
++ retval = mxc_streamoff(cam);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_G_CTRL ioctl
++ */
++ case VIDIOC_G_CTRL: {
++ pr_debug(" case VIDIOC_G_CTRL\n");
++ retval = mxc_v4l2_g_ctrl(cam, arg);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_S_CTRL ioctl
++ */
++ case VIDIOC_S_CTRL: {
++ pr_debug(" case VIDIOC_S_CTRL\n");
++ retval = mxc_v4l2_s_ctrl(cam, arg);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_CROPCAP ioctl
++ */
++ case VIDIOC_CROPCAP: {
++ struct v4l2_cropcap *cap = arg;
++ pr_debug(" case VIDIOC_CROPCAP\n");
++ if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
++ cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) {
++ retval = -EINVAL;
++ break;
++ }
++ cap->bounds = cam->crop_bounds;
++ cap->defrect = cam->crop_defrect;
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_G_CROP ioctl
++ */
++ case VIDIOC_G_CROP: {
++ struct v4l2_crop *crop = arg;
++ pr_debug(" case VIDIOC_G_CROP\n");
++
++ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
++ crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) {
++ retval = -EINVAL;
++ break;
++ }
++ crop->c = cam->crop_current;
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_S_CROP ioctl
++ */
++ case VIDIOC_S_CROP: {
++ struct v4l2_crop *crop = arg;
++ struct v4l2_rect *b = &cam->crop_bounds;
++ pr_debug(" case VIDIOC_S_CROP\n");
++
++ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
++ crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) {
++ retval = -EINVAL;
++ break;
++ }
++
++ crop->c.top = (crop->c.top < b->top) ? b->top
++ : crop->c.top;
++ if (crop->c.top > b->top + b->height)
++ crop->c.top = b->top + b->height - 1;
++ if (crop->c.height > b->top + b->height - crop->c.top)
++ crop->c.height =
++ b->top + b->height - crop->c.top;
++
++ crop->c.left = (crop->c.left < b->left) ? b->left
++ : crop->c.left;
++ if (crop->c.left > b->left + b->width)
++ crop->c.left = b->left + b->width - 1;
++ if (crop->c.width > b->left - crop->c.left + b->width)
++ crop->c.width =
++ b->left - crop->c.left + b->width;
++
++ crop->c.width -= crop->c.width % 8;
++ crop->c.left -= crop->c.left % 4;
++ cam->crop_current = crop->c;
++
++ pr_debug(" Cropping Input to ipu size %d x %d\n",
++ cam->crop_current.width,
++ cam->crop_current.height);
++ ipu_csi_set_window_size(cam->ipu, cam->crop_current.width,
++ cam->crop_current.height,
++ cam->csi);
++ ipu_csi_set_window_pos(cam->ipu, cam->crop_current.left,
++ cam->crop_current.top,
++ cam->csi);
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_OVERLAY ioctl
++ */
++ case VIDIOC_OVERLAY: {
++ int *on = arg;
++ pr_debug(" VIDIOC_OVERLAY on=%d\n", *on);
++ if (*on) {
++ cam->overlay_on = true;
++ cam->overlay_pid = current->pid;
++ retval = start_preview(cam);
++ }
++ if (!*on) {
++ retval = stop_preview(cam);
++ cam->overlay_on = false;
++ }
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_G_FBUF ioctl
++ */
++ case VIDIOC_G_FBUF: {
++ struct v4l2_framebuffer *fb = arg;
++ pr_debug(" case VIDIOC_G_FBUF\n");
++ *fb = cam->v4l2_fb;
++ fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY;
++ break;
++ }
++
++ /*!
++ * V4l2 VIDIOC_S_FBUF ioctl
++ */
++ case VIDIOC_S_FBUF: {
++ struct v4l2_framebuffer *fb = arg;
++ pr_debug(" case VIDIOC_S_FBUF\n");
++ cam->v4l2_fb = *fb;
++ break;
++ }
++
++ case VIDIOC_G_PARM: {
++ struct v4l2_streamparm *parm = arg;
++ pr_debug(" case VIDIOC_G_PARM\n");
++ if (cam->sensor)
++ retval = vidioc_int_g_parm(cam->sensor, parm);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++
++ case VIDIOC_S_PARM: {
++ struct v4l2_streamparm *parm = arg;
++ pr_debug(" case VIDIOC_S_PARM\n");
++ if (cam->sensor)
++ retval = mxc_v4l2_s_param(cam, parm);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++
++ /* linux v4l2 bug, kernel c0485619 user c0405619 */
++ case VIDIOC_ENUMSTD: {
++ struct v4l2_standard *e = arg;
++ pr_debug(" case VIDIOC_ENUMSTD\n");
++ *e = cam->standard;
++ break;
++ }
++
++ case VIDIOC_G_STD: {
++ v4l2_std_id *e = arg;
++ pr_debug(" case VIDIOC_G_STD\n");
++ if (cam->sensor)
++ retval = mxc_v4l2_g_std(cam, e);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++
++ case VIDIOC_S_STD: {
++ v4l2_std_id *e = arg;
++ pr_debug(" case VIDIOC_S_STD\n");
++ retval = mxc_v4l2_s_std(cam, *e);
++
++ break;
++ }
++
++ case VIDIOC_ENUMOUTPUT: {
++ struct v4l2_output *output = arg;
++ pr_debug(" case VIDIOC_ENUMOUTPUT\n");
++ if (output->index >= MXC_V4L2_CAPTURE_NUM_OUTPUTS) {
++ retval = -EINVAL;
++ break;
++ }
++ *output = mxc_capture_outputs[output->index];
++
++ break;
++ }
++ case VIDIOC_G_OUTPUT: {
++ int *p_output_num = arg;
++ pr_debug(" case VIDIOC_G_OUTPUT\n");
++ *p_output_num = cam->output;
++ break;
++ }
++
++ case VIDIOC_S_OUTPUT: {
++ int *p_output_num = arg;
++ pr_debug(" case VIDIOC_S_OUTPUT\n");
++ if (*p_output_num >= MXC_V4L2_CAPTURE_NUM_OUTPUTS) {
++ retval = -EINVAL;
++ break;
++ }
++ cam->output = *p_output_num;
++ break;
++ }
++
++ case VIDIOC_ENUMINPUT: {
++ struct v4l2_input *input = arg;
++ pr_debug(" case VIDIOC_ENUMINPUT\n");
++ if (input->index >= MXC_V4L2_CAPTURE_NUM_INPUTS) {
++ retval = -EINVAL;
++ break;
++ }
++ *input = mxc_capture_inputs[input->index];
++ break;
++ }
++
++ case VIDIOC_G_INPUT: {
++ int *index = arg;
++ pr_debug(" case VIDIOC_G_INPUT\n");
++ *index = cam->current_input;
++ break;
++ }
++
++ case VIDIOC_S_INPUT: {
++ int *index = arg;
++ pr_debug(" case VIDIOC_S_INPUT\n");
++ if (*index >= MXC_V4L2_CAPTURE_NUM_INPUTS) {
++ retval = -EINVAL;
++ break;
++ }
++
++ if (*index == cam->current_input)
++ break;
++
++ if ((mxc_capture_inputs[cam->current_input].status &
++ V4L2_IN_ST_NO_POWER) == 0) {
++ retval = mxc_streamoff(cam);
++ if (retval)
++ break;
++ mxc_capture_inputs[cam->current_input].status |=
++ V4L2_IN_ST_NO_POWER;
++ }
++
++ if (strcmp(mxc_capture_inputs[*index].name, "CSI MEM") == 0) {
++#if defined(CONFIG_MXC_IPU_CSI_ENC) || defined(CONFIG_MXC_IPU_CSI_ENC_MODULE)
++ retval = csi_enc_select(cam);
++ if (retval)
++ break;
++#endif
++ } else if (strcmp(mxc_capture_inputs[*index].name,
++ "CSI IC MEM") == 0) {
++#if defined(CONFIG_MXC_IPU_PRP_ENC) || defined(CONFIG_MXC_IPU_PRP_ENC_MODULE)
++ retval = prp_enc_select(cam);
++ if (retval)
++ break;
++#endif
++ }
++
++ mxc_capture_inputs[*index].status &= ~V4L2_IN_ST_NO_POWER;
++ cam->current_input = *index;
++ break;
++ }
++ case VIDIOC_ENUM_FMT: {
++ struct v4l2_fmtdesc *f = arg;
++ if (cam->sensor)
++ retval = vidioc_int_enum_fmt_cap(cam->sensor, f);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++ case VIDIOC_ENUM_FRAMESIZES: {
++ struct v4l2_frmsizeenum *fsize = arg;
++ if (cam->sensor)
++ retval = vidioc_int_enum_framesizes(cam->sensor, fsize);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++ case VIDIOC_DBG_G_CHIP_IDENT: {
++ struct v4l2_dbg_chip_ident *p = arg;
++ p->ident = V4L2_IDENT_NONE;
++ p->revision = 0;
++ if (cam->sensor)
++ retval = vidioc_int_g_chip_ident(cam->sensor, (int *)p);
++ else {
++ pr_err("ERROR: v4l2 capture: slave not found!\n");
++ retval = -ENODEV;
++ }
++ break;
++ }
++ case VIDIOC_TRY_FMT:
++ case VIDIOC_QUERYCTRL:
++ case VIDIOC_G_TUNER:
++ case VIDIOC_S_TUNER:
++ case VIDIOC_G_FREQUENCY:
++ case VIDIOC_S_FREQUENCY:
++ default:
++ pr_debug(" case default or not supported\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ if (ioctlnr != VIDIOC_DQBUF)
++ up(&cam->busy_lock);
++ return retval;
++}
++
++/*
++ * V4L interface - ioctl function
++ *
++ * @return None
++ */
++static long mxc_v4l_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ pr_debug("In MVC:mxc_v4l_ioctl\n");
++ return video_usercopy(file, cmd, arg, mxc_v4l_do_ioctl);
++}
++
++/*!
++ * V4L interface - mmap function
++ *
++ * @param file structure file *
++ *
++ * @param vma structure vm_area_struct *
++ *
++ * @return status 0 Success, EINTR busy lock error, ENOBUFS remap_page error
++ */
++static int mxc_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ struct video_device *dev = video_devdata(file);
++ unsigned long size;
++ int res = 0;
++ cam_data *cam = video_get_drvdata(dev);
++
++ pr_debug("In MVC:mxc_mmap\n");
++ pr_debug(" pgoff=0x%lx, start=0x%lx, end=0x%lx\n",
++ vma->vm_pgoff, vma->vm_start, vma->vm_end);
++
++ /* make this _really_ smp-safe */
++ if (down_interruptible(&cam->busy_lock))
++ return -EINTR;
++
++ size = vma->vm_end - vma->vm_start;
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ vma->vm_pgoff, size, vma->vm_page_prot)) {
++ pr_err("ERROR: v4l2 capture: mxc_mmap: "
++ "remap_pfn_range failed\n");
++ res = -ENOBUFS;
++ goto mxc_mmap_exit;
++ }
++
++ vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
++
++mxc_mmap_exit:
++ up(&cam->busy_lock);
++ return res;
++}
++
++/*!
++ * V4L interface - poll function
++ *
++ * @param file structure file *
++ *
++ * @param wait structure poll_table_struct *
++ *
++ * @return status POLLIN | POLLRDNORM
++ */
++static unsigned int mxc_poll(struct file *file, struct poll_table_struct *wait)
++{
++ struct video_device *dev = video_devdata(file);
++ cam_data *cam = video_get_drvdata(dev);
++ wait_queue_head_t *queue = NULL;
++ int res = POLLIN | POLLRDNORM;
++
++ pr_debug("In MVC:mxc_poll\n");
++
++ if (down_interruptible(&cam->busy_lock))
++ return -EINTR;
++
++ queue = &cam->enc_queue;
++ poll_wait(file, queue, wait);
++
++ up(&cam->busy_lock);
++
++ return res;
++}
++
++/*!
++ * This structure defines the functions to be called in this driver.
++ */
++static struct v4l2_file_operations mxc_v4l_fops = {
++ .owner = THIS_MODULE,
++ .open = mxc_v4l_open,
++ .release = mxc_v4l_close,
++ .read = mxc_v4l_read,
++ .ioctl = mxc_v4l_ioctl,
++ .mmap = mxc_mmap,
++ .poll = mxc_poll,
++};
++
++static struct video_device mxc_v4l_template = {
++ .name = "Mxc Camera",
++ .fops = &mxc_v4l_fops,
++ .release = video_device_release,
++};
++
++/*!
++ * This function can be used to release any platform data on closing.
++ */
++static void camera_platform_release(struct device *device)
++{
++}
++
++/*!
++ * Camera V4l2 callback function.
++ *
++ * @param mask u32
++ *
++ * @param dev void device structure
++ *
++ * @return status
++ */
++static void camera_callback(u32 mask, void *dev)
++{
++ struct mxc_v4l_frame *done_frame;
++ struct mxc_v4l_frame *ready_frame;
++ struct timeval cur_time;
++
++ cam_data *cam = (cam_data *) dev;
++ if (cam == NULL)
++ return;
++
++ pr_debug("In MVC:camera_callback\n");
++
++ spin_lock(&cam->queue_int_lock);
++ spin_lock(&cam->dqueue_int_lock);
++ if (!list_empty(&cam->working_q)) {
++ do_gettimeofday(&cur_time);
++
++ done_frame = list_entry(cam->working_q.next,
++ struct mxc_v4l_frame,
++ queue);
++
++ if (done_frame->ipu_buf_num != cam->local_buf_num)
++ goto next;
++
++ /*
++ * Set the current time to done frame buffer's
++ * timestamp. Users can use this information to judge
++ * the frame's usage.
++ */
++ done_frame->buffer.timestamp = cur_time;
++
++ if (done_frame->buffer.flags & V4L2_BUF_FLAG_QUEUED) {
++ done_frame->buffer.flags |= V4L2_BUF_FLAG_DONE;
++ done_frame->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED;
++
++ /* Added to the done queue */
++ list_del(cam->working_q.next);
++ list_add_tail(&done_frame->queue, &cam->done_q);
++
++ /* Wake up the queue */
++ cam->enc_counter++;
++ wake_up_interruptible(&cam->enc_queue);
++ } else
++ pr_err("ERROR: v4l2 capture: camera_callback: "
++ "buffer not queued\n");
++ }
++
++next:
++ if (!list_empty(&cam->ready_q)) {
++ ready_frame = list_entry(cam->ready_q.next,
++ struct mxc_v4l_frame,
++ queue);
++ if (cam->enc_update_eba)
++ if (cam->enc_update_eba(cam->ipu,
++ ready_frame->buffer.m.offset,
++ &cam->ping_pong_csi) == 0) {
++ list_del(cam->ready_q.next);
++ list_add_tail(&ready_frame->queue,
++ &cam->working_q);
++ ready_frame->ipu_buf_num = cam->local_buf_num;
++ }
++ } else {
++ if (cam->enc_update_eba)
++ cam->enc_update_eba(
++ cam->ipu, cam->dummy_frame.buffer.m.offset,
++ &cam->ping_pong_csi);
++ }
++
++ cam->local_buf_num = (cam->local_buf_num == 0) ? 1 : 0;
++ spin_unlock(&cam->dqueue_int_lock);
++ spin_unlock(&cam->queue_int_lock);
++
++ return;
++}
++
++/*!
++ * initialize cam_data structure
++ *
++ * @param cam structure cam_data *
++ *
++ * @return status 0 Success
++ */
++static int init_camera_struct(cam_data *cam, struct platform_device *pdev)
++{
++ const struct of_device_id *of_id =
++ of_match_device(mxc_v4l2_dt_ids, &pdev->dev);
++ struct device_node *np = pdev->dev.of_node;
++ int ipu_id, csi_id, mclk_source;
++ int ret = 0;
++
++ pr_debug("In MVC: init_camera_struct\n");
++
++ ret = of_property_read_u32(np, "ipu_id", &ipu_id);
++ if (ret) {
++ dev_err(&pdev->dev, "ipu_id missing or invalid\n");
++ return ret;
++ }
++
++ ret = of_property_read_u32(np, "csi_id", &csi_id);
++ if (ret) {
++ dev_err(&pdev->dev, "csi_id missing or invalid\n");
++ return ret;
++ }
++
++ ret = of_property_read_u32(np, "mclk_source", &mclk_source);
++ if (ret) {
++ dev_err(&pdev->dev, "sensor mclk missing or invalid\n");
++ return ret;
++ }
++
++ /* Default everything to 0 */
++ memset(cam, 0, sizeof(cam_data));
++
++ /* get devtype to distinguish if the cpu is imx5 or imx6
++ * IMX5_V4L2 specify the cpu is imx5
++ * IMX6_V4L2 specify the cpu is imx6q or imx6sdl
++ */
++ if (of_id)
++ pdev->id_entry = of_id->data;
++ cam->devtype = pdev->id_entry->driver_data;
++
++ cam->ipu = ipu_get_soc(ipu_id);
++ if (cam->ipu == NULL) {
++ pr_err("ERROR: v4l2 capture: failed to get ipu\n");
++ return -EINVAL;
++ } else if (cam->ipu == ERR_PTR(-ENODEV)) {
++ pr_err("ERROR: v4l2 capture: get invalid ipu\n");
++ return -ENODEV;
++ }
++
++ init_MUTEX(&cam->param_lock);
++ init_MUTEX(&cam->busy_lock);
++
++ cam->video_dev = video_device_alloc();
++ if (cam->video_dev == NULL)
++ return -ENODEV;
++
++ *(cam->video_dev) = mxc_v4l_template;
++
++ video_set_drvdata(cam->video_dev, cam);
++ dev_set_drvdata(&pdev->dev, (void *)cam);
++ cam->video_dev->minor = -1;
++
++ init_waitqueue_head(&cam->enc_queue);
++ init_waitqueue_head(&cam->still_queue);
++
++ /* setup cropping */
++ cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = 640;
++ cam->crop_bounds.top = 0;
++ cam->crop_bounds.height = 480;
++ cam->crop_current = cam->crop_defrect = cam->crop_bounds;
++ ipu_csi_set_window_size(cam->ipu, cam->crop_current.width,
++ cam->crop_current.height, cam->csi);
++ ipu_csi_set_window_pos(cam->ipu, cam->crop_current.left,
++ cam->crop_current.top, cam->csi);
++ cam->streamparm.parm.capture.capturemode = 0;
++
++ cam->standard.index = 0;
++ cam->standard.id = V4L2_STD_UNKNOWN;
++ cam->standard.frameperiod.denominator = 30;
++ cam->standard.frameperiod.numerator = 1;
++ cam->standard.framelines = 480;
++ cam->standard_autodetect = true;
++ cam->streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cam->streamparm.parm.capture.timeperframe = cam->standard.frameperiod;
++ cam->streamparm.parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
++ cam->overlay_on = false;
++ cam->capture_on = false;
++ cam->v4l2_fb.flags = V4L2_FBUF_FLAG_OVERLAY;
++
++ cam->v2f.fmt.pix.sizeimage = 352 * 288 * 3 / 2;
++ cam->v2f.fmt.pix.bytesperline = 288 * 3 / 2;
++ cam->v2f.fmt.pix.width = 288;
++ cam->v2f.fmt.pix.height = 352;
++ cam->v2f.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
++ cam->win.w.width = 160;
++ cam->win.w.height = 160;
++ cam->win.w.left = 0;
++ cam->win.w.top = 0;
++
++ cam->ipu_id = ipu_id;
++ cam->csi = csi_id;
++ cam->mclk_source = mclk_source;
++ cam->mclk_on[cam->mclk_source] = false;
++
++ cam->enc_callback = camera_callback;
++ init_waitqueue_head(&cam->power_queue);
++ spin_lock_init(&cam->queue_int_lock);
++ spin_lock_init(&cam->dqueue_int_lock);
++
++ cam->self = kmalloc(sizeof(struct v4l2_int_device), GFP_KERNEL);
++ cam->self->module = THIS_MODULE;
++ sprintf(cam->self->name, "mxc_v4l2_cap%d", cam->csi);
++ cam->self->type = v4l2_int_type_master;
++ cam->self->u.master = &mxc_v4l2_master;
++
++ return 0;
++}
++
++static ssize_t show_streaming(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct video_device *video_dev = container_of(dev,
++ struct video_device, dev);
++ cam_data *cam = video_get_drvdata(video_dev);
++
++ if (cam->capture_on)
++ return sprintf(buf, "stream on\n");
++ else
++ return sprintf(buf, "stream off\n");
++}
++static DEVICE_ATTR(fsl_v4l2_capture_property, S_IRUGO, show_streaming, NULL);
++
++static ssize_t show_overlay(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct video_device *video_dev = container_of(dev,
++ struct video_device, dev);
++ cam_data *cam = video_get_drvdata(video_dev);
++
++ if (cam->overlay_on)
++ return sprintf(buf, "overlay on\n");
++ else
++ return sprintf(buf, "overlay off\n");
++}
++static DEVICE_ATTR(fsl_v4l2_overlay_property, S_IRUGO, show_overlay, NULL);
++
++static ssize_t show_csi(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct video_device *video_dev = container_of(dev,
++ struct video_device, dev);
++ cam_data *cam = video_get_drvdata(video_dev);
++
++ return sprintf(buf, "ipu%d_csi%d\n", cam->ipu_id, cam->csi);
++}
++static DEVICE_ATTR(fsl_csi_property, S_IRUGO, show_csi, NULL);
++
++/*!
++ * This function is called to probe the devices if registered.
++ *
++ * @param pdev the device structure used to give information on which device
++ * to probe
++ *
++ * @return The function returns 0 on success and -1 on failure.
++ */
++static int mxc_v4l2_probe(struct platform_device *pdev)
++{
++ /* Create cam and initialize it. */
++ cam_data *cam = kmalloc(sizeof(cam_data), GFP_KERNEL);
++ if (cam == NULL) {
++ pr_err("ERROR: v4l2 capture: failed to register camera\n");
++ return -1;
++ }
++
++ init_camera_struct(cam, pdev);
++ pdev->dev.release = camera_platform_release;
++
++ /* Set up the v4l2 device and register it*/
++ cam->self->priv = cam;
++ v4l2_int_device_register(cam->self);
++
++ /* register v4l video device */
++ if (video_register_device(cam->video_dev, VFL_TYPE_GRABBER, video_nr)
++ == -1) {
++ kfree(cam);
++ cam = NULL;
++ pr_err("ERROR: v4l2 capture: video_register_device failed\n");
++ return -1;
++ }
++ pr_debug(" Video device registered: %s #%d\n",
++ cam->video_dev->name, cam->video_dev->minor);
++
++ if (device_create_file(&cam->video_dev->dev,
++ &dev_attr_fsl_v4l2_capture_property))
++ dev_err(&pdev->dev, "Error on creating sysfs file"
++ " for capture\n");
++
++ if (device_create_file(&cam->video_dev->dev,
++ &dev_attr_fsl_v4l2_overlay_property))
++ dev_err(&pdev->dev, "Error on creating sysfs file"
++ " for overlay\n");
++
++ if (device_create_file(&cam->video_dev->dev,
++ &dev_attr_fsl_csi_property))
++ dev_err(&pdev->dev, "Error on creating sysfs file"
++ " for csi number\n");
++
++ return 0;
++}
++
++/*!
++ * This function is called to remove the devices when device unregistered.
++ *
++ * @param pdev the device structure used to give information on which device
++ * to remove
++ *
++ * @return The function returns 0 on success and -1 on failure.
++ */
++static int mxc_v4l2_remove(struct platform_device *pdev)
++{
++ cam_data *cam = (cam_data *)platform_get_drvdata(pdev);
++ if (cam->open_count) {
++ pr_err("ERROR: v4l2 capture:camera open "
++ "-- setting ops to NULL\n");
++ return -EBUSY;
++ } else {
++ device_remove_file(&cam->video_dev->dev,
++ &dev_attr_fsl_v4l2_capture_property);
++ device_remove_file(&cam->video_dev->dev,
++ &dev_attr_fsl_v4l2_overlay_property);
++ device_remove_file(&cam->video_dev->dev,
++ &dev_attr_fsl_csi_property);
++
++ pr_info("V4L2 freeing image input device\n");
++ v4l2_int_device_unregister(cam->self);
++ video_unregister_device(cam->video_dev);
++
++ mxc_free_frame_buf(cam);
++ kfree(cam);
++ }
++
++ pr_info("V4L2 unregistering video\n");
++ return 0;
++}
++
++/*!
++ * This function is called to put the sensor in a low power state.
++ * Refer to the document driver-model/driver.txt in the kernel source tree
++ * for more information.
++ *
++ * @param pdev the device structure used to give information on which I2C
++ * to suspend
++ * @param state the power state the device is entering
++ *
++ * @return The function returns 0 on success and -1 on failure.
++ */
++static int mxc_v4l2_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ cam_data *cam = platform_get_drvdata(pdev);
++
++ pr_debug("In MVC:mxc_v4l2_suspend\n");
++
++ if (cam == NULL)
++ return -1;
++
++ down(&cam->busy_lock);
++
++ cam->low_power = true;
++
++ if (cam->overlay_on == true)
++ stop_preview(cam);
++ if ((cam->capture_on == true) && cam->enc_disable)
++ cam->enc_disable(cam);
++
++ if (cam->sensor && cam->open_count) {
++ if (cam->mclk_on[cam->mclk_source]) {
++ ipu_csi_enable_mclk_if(cam->ipu, CSI_MCLK_I2C,
++ cam->mclk_source,
++ false, false);
++ cam->mclk_on[cam->mclk_source] = false;
++ }
++ vidioc_int_s_power(cam->sensor, 0);
++ }
++
++ up(&cam->busy_lock);
++
++ return 0;
++}
++
++/*!
++ * This function is called to bring the sensor back from a low power state.
++ * Refer to the document driver-model/driver.txt in the kernel source tree
++ * for more information.
++ *
++ * @param pdev the device structure
++ *
++ * @return The function returns 0 on success and -1 on failure
++ */
++static int mxc_v4l2_resume(struct platform_device *pdev)
++{
++ cam_data *cam = platform_get_drvdata(pdev);
++
++ pr_debug("In MVC:mxc_v4l2_resume\n");
++
++ if (cam == NULL)
++ return -1;
++
++ down(&cam->busy_lock);
++
++ cam->low_power = false;
++ wake_up_interruptible(&cam->power_queue);
++
++ if (cam->sensor && cam->open_count) {
++ vidioc_int_s_power(cam->sensor, 1);
++
++ if (!cam->mclk_on[cam->mclk_source]) {
++ ipu_csi_enable_mclk_if(cam->ipu, CSI_MCLK_I2C,
++ cam->mclk_source,
++ true, true);
++ cam->mclk_on[cam->mclk_source] = true;
++ }
++ }
++
++ if (cam->overlay_on == true)
++ start_preview(cam);
++ if (cam->capture_on == true)
++ mxc_streamon(cam);
++
++ up(&cam->busy_lock);
++
++ return 0;
++}
++
++/*!
++ * This structure contains pointers to the power management callback functions.
++ */
++static struct platform_driver mxc_v4l2_driver = {
++ .driver = {
++ .name = "mxc_v4l2_capture",
++ .owner = THIS_MODULE,
++ .of_match_table = mxc_v4l2_dt_ids,
++ },
++ .id_table = imx_v4l2_devtype,
++ .probe = mxc_v4l2_probe,
++ .remove = mxc_v4l2_remove,
++ .suspend = mxc_v4l2_suspend,
++ .resume = mxc_v4l2_resume,
++ .shutdown = NULL,
++};
++
++/*!
++ * Initializes the camera driver.
++ */
++static int mxc_v4l2_master_attach(struct v4l2_int_device *slave)
++{
++ cam_data *cam = slave->u.slave->master->priv;
++ struct v4l2_format cam_fmt;
++ int i;
++ struct sensor_data *sdata = slave->priv;
++
++ pr_debug("In MVC: mxc_v4l2_master_attach\n");
++ pr_debug(" slave.name = %s\n", slave->name);
++ pr_debug(" master.name = %s\n", slave->u.slave->master->name);
++
++ if (slave == NULL) {
++ pr_err("ERROR: v4l2 capture: slave parameter not valid.\n");
++ return -1;
++ }
++
++ if (sdata->csi != cam->csi) {
++ pr_debug("%s: csi doesn't match\n", __func__);
++ return -1;
++ }
++
++ cam->sensor = slave;
++
++ if (cam->sensor_index < MXC_SENSOR_NUM) {
++ cam->all_sensors[cam->sensor_index] = slave;
++ cam->sensor_index++;
++ } else {
++ pr_err("ERROR: v4l2 capture: slave number exceeds the maximum.\n");
++ return -1;
++ }
++
++ for (i = 0; i < cam->sensor_index; i++) {
++ vidioc_int_dev_exit(cam->all_sensors[i]);
++ vidioc_int_s_power(cam->all_sensors[i], 0);
++ }
++
++ cam_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ vidioc_int_g_fmt_cap(cam->sensor, &cam_fmt);
++
++ /* Used to detect TV in (type 1) vs. camera (type 0)*/
++ cam->device_type = cam_fmt.fmt.pix.priv;
++
++ /* Set the input size to the ipu for this device */
++ cam->crop_bounds.top = cam->crop_bounds.left = 0;
++ cam->crop_bounds.width = cam_fmt.fmt.pix.width;
++ cam->crop_bounds.height = cam_fmt.fmt.pix.height;
++
++ /* This also is the max crop size for this device. */
++ cam->crop_defrect.top = cam->crop_defrect.left = 0;
++ cam->crop_defrect.width = cam_fmt.fmt.pix.width;
++ cam->crop_defrect.height = cam_fmt.fmt.pix.height;
++
++ /* At this point, this is also the current image size. */
++ cam->crop_current.top = cam->crop_current.left = 0;
++ cam->crop_current.width = cam_fmt.fmt.pix.width;
++ cam->crop_current.height = cam_fmt.fmt.pix.height;
++
++ pr_debug("End of %s: v2f pix widthxheight %d x %d\n",
++ __func__,
++ cam->v2f.fmt.pix.width, cam->v2f.fmt.pix.height);
++ pr_debug("End of %s: crop_bounds widthxheight %d x %d\n",
++ __func__,
++ cam->crop_bounds.width, cam->crop_bounds.height);
++ pr_debug("End of %s: crop_defrect widthxheight %d x %d\n",
++ __func__,
++ cam->crop_defrect.width, cam->crop_defrect.height);
++ pr_debug("End of %s: crop_current widthxheight %d x %d\n",
++ __func__,
++ cam->crop_current.width, cam->crop_current.height);
++
++ return 0;
++}
++
++/*!
++ * Disconnects the camera driver.
++ */
++static void mxc_v4l2_master_detach(struct v4l2_int_device *slave)
++{
++ unsigned int i;
++ cam_data *cam = slave->u.slave->master->priv;
++
++ pr_debug("In MVC:mxc_v4l2_master_detach\n");
++
++ if (cam->sensor_index > 1) {
++ for (i = 0; i < cam->sensor_index; i++) {
++ if (cam->all_sensors[i] != slave)
++ continue;
++ /* Move all the sensors behind this
++ * sensor one step forward
++ */
++ for (; i <= MXC_SENSOR_NUM - 2; i++)
++ cam->all_sensors[i] = cam->all_sensors[i+1];
++ break;
++ }
++ /* Point current sensor to the last one */
++ cam->sensor = cam->all_sensors[cam->sensor_index - 2];
++ } else
++ cam->sensor = NULL;
++
++ cam->sensor_index--;
++ vidioc_int_dev_exit(slave);
++}
++
++/*!
++ * Entry point for the V4L2
++ *
++ * @return Error code indicating success or failure
++ */
++static __init int camera_init(void)
++{
++ u8 err = 0;
++
++ pr_debug("In MVC:camera_init\n");
++
++ /* Register the device driver structure. */
++ err = platform_driver_register(&mxc_v4l2_driver);
++ if (err != 0) {
++ pr_err("ERROR: v4l2 capture:camera_init: "
++ "platform_driver_register failed.\n");
++ return err;
++ }
++
++ return err;
++}
++
++/*!
++ * Exit and cleanup for the V4L2
++ */
++static void __exit camera_exit(void)
++{
++ pr_debug("In MVC: camera_exit\n");
++
++ platform_driver_unregister(&mxc_v4l2_driver);
++}
++
++module_init(camera_init);
++module_exit(camera_exit);
++
++module_param(video_nr, int, 0444);
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("V4L2 capture driver for Mxc based cameras");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE("video");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/mxc_v4l2_capture.h linux-3.14.40/drivers/media/platform/mxc/capture/mxc_v4l2_capture.h
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/mxc_v4l2_capture.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/mxc_v4l2_capture.h 2015-05-01 14:57:59.263427001 -0500
+@@ -0,0 +1,260 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @defgroup MXC_V4L2_CAPTURE MXC V4L2 Video Capture Driver
++ */
++/*!
++ * @file mxc_v4l2_capture.h
++ *
++ * @brief mxc V4L2 capture device API Header file
++ *
++ * It include all the defines for frame operations, also three structure defines
++ * use case ops structure, common v4l2 driver structure and frame structure.
++ *
++ * @ingroup MXC_V4L2_CAPTURE
++ */
++#ifndef __MXC_V4L2_CAPTURE_H__
++#define __MXC_V4L2_CAPTURE_H__
++
++#include <linux/uaccess.h>
++#include <linux/list.h>
++#include <linux/mxc_v4l2.h>
++#include <linux/completion.h>
++#include <linux/dmaengine.h>
++#include <linux/pxp_dma.h>
++#include <linux/ipu-v3.h>
++#include <linux/platform_data/dma-imx.h>
++
++#include <media/v4l2-dev.h>
++#include <media/v4l2-int-device.h>
++
++
++#define FRAME_NUM 10
++#define MXC_SENSOR_NUM 2
++
++enum imx_v4l2_devtype {
++ IMX5_V4L2,
++ IMX6_V4L2,
++};
++
++/*!
++ * v4l2 frame structure.
++ */
++struct mxc_v4l_frame {
++ u32 paddress;
++ void *vaddress;
++ int count;
++ int width;
++ int height;
++
++ struct v4l2_buffer buffer;
++ struct list_head queue;
++ int index;
++ union {
++ int ipu_buf_num;
++ int csi_buf_num;
++ };
++};
++
++/* Only for old version. Will go away soon. */
++typedef struct {
++ u8 clk_mode;
++ u8 ext_vsync;
++ u8 Vsync_pol;
++ u8 Hsync_pol;
++ u8 pixclk_pol;
++ u8 data_pol;
++ u8 data_width;
++ u8 pack_tight;
++ u8 force_eof;
++ u8 data_en_pol;
++ u16 width;
++ u16 height;
++ u32 pixel_fmt;
++ u32 mclk;
++ u16 active_width;
++ u16 active_height;
++} sensor_interface;
++
++/* Sensor control function */
++/* Only for old version. Will go away soon. */
++struct camera_sensor {
++ void (*set_color) (int bright, int saturation, int red, int green,
++ int blue);
++ void (*get_color) (int *bright, int *saturation, int *red, int *green,
++ int *blue);
++ void (*set_ae_mode) (int ae_mode);
++ void (*get_ae_mode) (int *ae_mode);
++ sensor_interface *(*config) (int *frame_rate, int high_quality);
++ sensor_interface *(*reset) (void);
++ void (*get_std) (v4l2_std_id *std);
++ void (*set_std) (v4l2_std_id std);
++ unsigned int csi;
++};
++
++/*!
++ * common v4l2 driver structure.
++ */
++typedef struct _cam_data {
++ struct video_device *video_dev;
++ int device_type;
++
++ /* semaphore guard against SMP multithreading */
++ struct semaphore busy_lock;
++
++ int open_count;
++
++ /* params lock for this camera */
++ struct semaphore param_lock;
++
++ /* Encoder */
++ struct list_head ready_q;
++ struct list_head done_q;
++ struct list_head working_q;
++ int ping_pong_csi;
++ spinlock_t queue_int_lock;
++ spinlock_t dqueue_int_lock;
++ struct mxc_v4l_frame frame[FRAME_NUM];
++ struct mxc_v4l_frame dummy_frame;
++ wait_queue_head_t enc_queue;
++ int enc_counter;
++ dma_addr_t rot_enc_bufs[2];
++ void *rot_enc_bufs_vaddr[2];
++ int rot_enc_buf_size[2];
++ enum v4l2_buf_type type;
++
++ /* still image capture */
++ wait_queue_head_t still_queue;
++ int still_counter;
++ dma_addr_t still_buf[2];
++ void *still_buf_vaddr;
++
++ /* overlay */
++ struct v4l2_window win;
++ struct v4l2_framebuffer v4l2_fb;
++ dma_addr_t vf_bufs[2];
++ void *vf_bufs_vaddr[2];
++ int vf_bufs_size[2];
++ dma_addr_t rot_vf_bufs[2];
++ void *rot_vf_bufs_vaddr[2];
++ int rot_vf_buf_size[2];
++ bool overlay_active;
++ int output;
++ struct fb_info *overlay_fb;
++ int fb_origin_std;
++ struct work_struct csi_work_struct;
++
++ /* v4l2 format */
++ struct v4l2_format v2f;
++ int rotation; /* for IPUv1 and IPUv3, this means encoder rotation */
++ int vf_rotation; /* viewfinder rotation only for IPUv1 and IPUv3 */
++ struct v4l2_mxc_offset offset;
++
++ /* V4l2 control bit */
++ int bright;
++ int hue;
++ int contrast;
++ int saturation;
++ int red;
++ int green;
++ int blue;
++ int ae_mode;
++
++ /* standard */
++ struct v4l2_streamparm streamparm;
++ struct v4l2_standard standard;
++ bool standard_autodetect;
++
++ /* crop */
++ struct v4l2_rect crop_bounds;
++ struct v4l2_rect crop_defrect;
++ struct v4l2_rect crop_current;
++
++ int (*enc_update_eba) (struct ipu_soc *ipu, dma_addr_t eba,
++ int *bufferNum);
++ int (*enc_enable) (void *private);
++ int (*enc_disable) (void *private);
++ int (*enc_enable_csi) (void *private);
++ int (*enc_disable_csi) (void *private);
++ void (*enc_callback) (u32 mask, void *dev);
++ int (*vf_start_adc) (void *private);
++ int (*vf_stop_adc) (void *private);
++ int (*vf_start_sdc) (void *private);
++ int (*vf_stop_sdc) (void *private);
++ int (*vf_enable_csi) (void *private);
++ int (*vf_disable_csi) (void *private);
++ int (*csi_start) (void *private);
++ int (*csi_stop) (void *private);
++
++ /* misc status flag */
++ bool overlay_on;
++ bool capture_on;
++ int overlay_pid;
++ int capture_pid;
++ bool low_power;
++ wait_queue_head_t power_queue;
++ unsigned int ipu_id;
++ unsigned int csi;
++ u8 mclk_source;
++ bool mclk_on[2]; /* two mclk sources at most now */
++ int current_input;
++
++ int local_buf_num;
++
++ /* camera sensor interface */
++ struct camera_sensor *cam_sensor; /* old version */
++ struct v4l2_int_device *all_sensors[MXC_SENSOR_NUM];
++ struct v4l2_int_device *sensor;
++ struct v4l2_int_device *self;
++ int sensor_index;
++ void *ipu;
++ enum imx_v4l2_devtype devtype;
++
++ /* v4l2 buf elements related to PxP DMA */
++ struct completion pxp_tx_cmpl;
++ struct pxp_channel *pxp_chan;
++ struct pxp_config_data pxp_conf;
++ struct dma_async_tx_descriptor *txd;
++ dma_cookie_t cookie;
++ struct scatterlist sg[2];
++} cam_data;
++
++struct sensor_data {
++ const struct ov5642_platform_data *platform_data;
++ struct v4l2_int_device *v4l2_int_device;
++ struct i2c_client *i2c_client;
++ struct v4l2_pix_format pix;
++ struct v4l2_captureparm streamcap;
++ bool on;
++
++ /* control settings */
++ int brightness;
++ int hue;
++ int contrast;
++ int saturation;
++ int red;
++ int green;
++ int blue;
++ int ae_mode;
++
++ u32 mclk;
++ u8 mclk_source;
++ struct clk *sensor_clk;
++ int csi;
++
++ void (*io_init)(void);
++};
++
++void set_mclk_rate(uint32_t *p_mclk_freq, uint32_t csi);
++#endif /* __MXC_V4L2_CAPTURE_H__ */
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/ov5640.c linux-3.14.40/drivers/media/platform/mxc/capture/ov5640.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/ov5640.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/ov5640.c 2015-05-01 14:57:59.263427001 -0500
+@@ -0,0 +1,1951 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/i2c.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/of_device.h>
++#include <linux/of_gpio.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/regulator/consumer.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-int-device.h>
++#include "mxc_v4l2_capture.h"
++
++#define OV5640_VOLTAGE_ANALOG 2800000
++#define OV5640_VOLTAGE_DIGITAL_CORE 1500000
++#define OV5640_VOLTAGE_DIGITAL_IO 1800000
++
++#define MIN_FPS 15
++#define MAX_FPS 30
++#define DEFAULT_FPS 30
++
++#define OV5640_XCLK_MIN 6000000
++#define OV5640_XCLK_MAX 24000000
++
++#define OV5640_CHIP_ID_HIGH_BYTE 0x300A
++#define OV5640_CHIP_ID_LOW_BYTE 0x300B
++
++enum ov5640_mode {
++ ov5640_mode_MIN = 0,
++ ov5640_mode_VGA_640_480 = 0,
++ ov5640_mode_QVGA_320_240 = 1,
++ ov5640_mode_NTSC_720_480 = 2,
++ ov5640_mode_PAL_720_576 = 3,
++ ov5640_mode_720P_1280_720 = 4,
++ ov5640_mode_1080P_1920_1080 = 5,
++ ov5640_mode_QSXGA_2592_1944 = 6,
++ ov5640_mode_QCIF_176_144 = 7,
++ ov5640_mode_XGA_1024_768 = 8,
++ ov5640_mode_MAX = 8
++};
++
++enum ov5640_frame_rate {
++ ov5640_15_fps,
++ ov5640_30_fps
++};
++
++static int ov5640_framerates[] = {
++ [ov5640_15_fps] = 15,
++ [ov5640_30_fps] = 30,
++};
++
++struct reg_value {
++ u16 u16RegAddr;
++ u8 u8Val;
++ u8 u8Mask;
++ u32 u32Delay_ms;
++};
++
++struct ov5640_mode_info {
++ enum ov5640_mode mode;
++ u32 width;
++ u32 height;
++ struct reg_value *init_data_ptr;
++ u32 init_data_size;
++};
++
++/*!
++ * Maintains the information on the current state of the sesor.
++ */
++static struct sensor_data ov5640_data;
++static int pwn_gpio, rst_gpio;
++static int prev_sysclk;
++static int AE_Target = 52, night_mode;
++static int prev_HTS;
++static int AE_high, AE_low;
++
++static struct reg_value ov5640_global_init_setting[] = {
++ {0x3008, 0x42, 0, 0},
++ {0x3103, 0x03, 0, 0}, {0x3017, 0xff, 0, 0}, {0x3018, 0xff, 0, 0},
++ {0x3034, 0x1a, 0, 0}, {0x3037, 0x13, 0, 0}, {0x3108, 0x01, 0, 0},
++ {0x3630, 0x36, 0, 0}, {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0},
++ {0x3633, 0x12, 0, 0}, {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0},
++ {0x3703, 0x5a, 0, 0}, {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0},
++ {0x370b, 0x60, 0, 0}, {0x3705, 0x1a, 0, 0}, {0x3905, 0x02, 0, 0},
++ {0x3906, 0x10, 0, 0}, {0x3901, 0x0a, 0, 0}, {0x3731, 0x12, 0, 0},
++ {0x3600, 0x08, 0, 0}, {0x3601, 0x33, 0, 0}, {0x302d, 0x60, 0, 0},
++ {0x3620, 0x52, 0, 0}, {0x371b, 0x20, 0, 0}, {0x471c, 0x50, 0, 0},
++ {0x3a13, 0x43, 0, 0}, {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0},
++ {0x3635, 0x13, 0, 0}, {0x3636, 0x03, 0, 0}, {0x3634, 0x40, 0, 0},
++ {0x3622, 0x01, 0, 0}, {0x3c01, 0x34, 0, 0}, {0x3c04, 0x28, 0, 0},
++ {0x3c05, 0x98, 0, 0}, {0x3c06, 0x00, 0, 0}, {0x3c07, 0x07, 0, 0},
++ {0x3c08, 0x00, 0, 0}, {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0},
++ {0x3c0b, 0x40, 0, 0}, {0x3810, 0x00, 0, 0}, {0x3811, 0x10, 0, 0},
++ {0x3812, 0x00, 0, 0}, {0x3708, 0x64, 0, 0}, {0x4001, 0x02, 0, 0},
++ {0x4005, 0x1a, 0, 0}, {0x3000, 0x00, 0, 0}, {0x3004, 0xff, 0, 0},
++ {0x300e, 0x58, 0, 0}, {0x302e, 0x00, 0, 0}, {0x4300, 0x30, 0, 0},
++ {0x501f, 0x00, 0, 0}, {0x440e, 0x00, 0, 0}, {0x5000, 0xa7, 0, 0},
++ {0x3008, 0x02, 0, 0},
++};
++
++static struct reg_value ov5640_init_setting_30fps_VGA[] = {
++ {0x3008, 0x42, 0, 0},
++ {0x3103, 0x03, 0, 0}, {0x3017, 0xff, 0, 0}, {0x3018, 0xff, 0, 0},
++ {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0}, {0x3036, 0x46, 0, 0},
++ {0x3037, 0x13, 0, 0}, {0x3108, 0x01, 0, 0}, {0x3630, 0x36, 0, 0},
++ {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
++ {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
++ {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
++ {0x3705, 0x1a, 0, 0}, {0x3905, 0x02, 0, 0}, {0x3906, 0x10, 0, 0},
++ {0x3901, 0x0a, 0, 0}, {0x3731, 0x12, 0, 0}, {0x3600, 0x08, 0, 0},
++ {0x3601, 0x33, 0, 0}, {0x302d, 0x60, 0, 0}, {0x3620, 0x52, 0, 0},
++ {0x371b, 0x20, 0, 0}, {0x471c, 0x50, 0, 0}, {0x3a13, 0x43, 0, 0},
++ {0x3a18, 0x00, 0, 0}, {0x3a19, 0xf8, 0, 0}, {0x3635, 0x13, 0, 0},
++ {0x3636, 0x03, 0, 0}, {0x3634, 0x40, 0, 0}, {0x3622, 0x01, 0, 0},
++ {0x3c01, 0x34, 0, 0}, {0x3c04, 0x28, 0, 0}, {0x3c05, 0x98, 0, 0},
++ {0x3c06, 0x00, 0, 0}, {0x3c07, 0x08, 0, 0}, {0x3c08, 0x00, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x300e, 0x58, 0, 0}, {0x302e, 0x00, 0, 0}, {0x4300, 0x30, 0, 0},
++ {0x501f, 0x00, 0, 0}, {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0},
++ {0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5000, 0xa7, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0xf2, 0, 0},
++ {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0}, {0x5187, 0x09, 0, 0},
++ {0x5188, 0x09, 0, 0}, {0x5189, 0x88, 0, 0}, {0x518a, 0x54, 0, 0},
++ {0x518b, 0xee, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x50, 0, 0},
++ {0x518e, 0x34, 0, 0}, {0x518f, 0x6b, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x6c, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x09, 0, 0},
++ {0x519d, 0x2b, 0, 0}, {0x519e, 0x38, 0, 0}, {0x5381, 0x1e, 0, 0},
++ {0x5382, 0x5b, 0, 0}, {0x5383, 0x08, 0, 0}, {0x5384, 0x0a, 0, 0},
++ {0x5385, 0x7e, 0, 0}, {0x5386, 0x88, 0, 0}, {0x5387, 0x7c, 0, 0},
++ {0x5388, 0x6c, 0, 0}, {0x5389, 0x10, 0, 0}, {0x538a, 0x01, 0, 0},
++ {0x538b, 0x98, 0, 0}, {0x5300, 0x08, 0, 0}, {0x5301, 0x30, 0, 0},
++ {0x5302, 0x10, 0, 0}, {0x5303, 0x00, 0, 0}, {0x5304, 0x08, 0, 0},
++ {0x5305, 0x30, 0, 0}, {0x5306, 0x08, 0, 0}, {0x5307, 0x16, 0, 0},
++ {0x5309, 0x08, 0, 0}, {0x530a, 0x30, 0, 0}, {0x530b, 0x04, 0, 0},
++ {0x530c, 0x06, 0, 0}, {0x5480, 0x01, 0, 0}, {0x5481, 0x08, 0, 0},
++ {0x5482, 0x14, 0, 0}, {0x5483, 0x28, 0, 0}, {0x5484, 0x51, 0, 0},
++ {0x5485, 0x65, 0, 0}, {0x5486, 0x71, 0, 0}, {0x5487, 0x7d, 0, 0},
++ {0x5488, 0x87, 0, 0}, {0x5489, 0x91, 0, 0}, {0x548a, 0x9a, 0, 0},
++ {0x548b, 0xaa, 0, 0}, {0x548c, 0xb8, 0, 0}, {0x548d, 0xcd, 0, 0},
++ {0x548e, 0xdd, 0, 0}, {0x548f, 0xea, 0, 0}, {0x5490, 0x1d, 0, 0},
++ {0x5580, 0x02, 0, 0}, {0x5583, 0x40, 0, 0}, {0x5584, 0x10, 0, 0},
++ {0x5589, 0x10, 0, 0}, {0x558a, 0x00, 0, 0}, {0x558b, 0xf8, 0, 0},
++ {0x5800, 0x23, 0, 0}, {0x5801, 0x14, 0, 0}, {0x5802, 0x0f, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x12, 0, 0}, {0x5805, 0x26, 0, 0},
++ {0x5806, 0x0c, 0, 0}, {0x5807, 0x08, 0, 0}, {0x5808, 0x05, 0, 0},
++ {0x5809, 0x05, 0, 0}, {0x580a, 0x08, 0, 0}, {0x580b, 0x0d, 0, 0},
++ {0x580c, 0x08, 0, 0}, {0x580d, 0x03, 0, 0}, {0x580e, 0x00, 0, 0},
++ {0x580f, 0x00, 0, 0}, {0x5810, 0x03, 0, 0}, {0x5811, 0x09, 0, 0},
++ {0x5812, 0x07, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x00, 0, 0},
++ {0x5815, 0x01, 0, 0}, {0x5816, 0x03, 0, 0}, {0x5817, 0x08, 0, 0},
++ {0x5818, 0x0d, 0, 0}, {0x5819, 0x08, 0, 0}, {0x581a, 0x05, 0, 0},
++ {0x581b, 0x06, 0, 0}, {0x581c, 0x08, 0, 0}, {0x581d, 0x0e, 0, 0},
++ {0x581e, 0x29, 0, 0}, {0x581f, 0x17, 0, 0}, {0x5820, 0x11, 0, 0},
++ {0x5821, 0x11, 0, 0}, {0x5822, 0x15, 0, 0}, {0x5823, 0x28, 0, 0},
++ {0x5824, 0x46, 0, 0}, {0x5825, 0x26, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x26, 0, 0}, {0x5828, 0x64, 0, 0}, {0x5829, 0x26, 0, 0},
++ {0x582a, 0x24, 0, 0}, {0x582b, 0x22, 0, 0}, {0x582c, 0x24, 0, 0},
++ {0x582d, 0x24, 0, 0}, {0x582e, 0x06, 0, 0}, {0x582f, 0x22, 0, 0},
++ {0x5830, 0x40, 0, 0}, {0x5831, 0x42, 0, 0}, {0x5832, 0x24, 0, 0},
++ {0x5833, 0x26, 0, 0}, {0x5834, 0x24, 0, 0}, {0x5835, 0x22, 0, 0},
++ {0x5836, 0x22, 0, 0}, {0x5837, 0x26, 0, 0}, {0x5838, 0x44, 0, 0},
++ {0x5839, 0x24, 0, 0}, {0x583a, 0x26, 0, 0}, {0x583b, 0x28, 0, 0},
++ {0x583c, 0x42, 0, 0}, {0x583d, 0xce, 0, 0}, {0x5025, 0x00, 0, 0},
++ {0x3a0f, 0x30, 0, 0}, {0x3a10, 0x28, 0, 0}, {0x3a1b, 0x30, 0, 0},
++ {0x3a1e, 0x26, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x14, 0, 0},
++ {0x3008, 0x02, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_VGA_640_480[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0},
++ {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0}, {0x3503, 0x00, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_VGA_640_480[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0},
++ {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0}, {0x3503, 0x00, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_QVGA_320_240[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0},
++ {0x380a, 0x00, 0, 0}, {0x380b, 0xf0, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_QVGA_320_240[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0},
++ {0x380a, 0x00, 0, 0}, {0x380b, 0xf0, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_NTSC_720_480[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0},
++ {0x3807, 0xd4, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0},
++ {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_NTSC_720_480[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0},
++ {0x3807, 0xd4, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0},
++ {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_PAL_720_576[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x60, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x09, 0, 0}, {0x3805, 0x7e, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0},
++ {0x380a, 0x02, 0, 0}, {0x380b, 0x40, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_PAL_720_576[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x60, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x09, 0, 0}, {0x3805, 0x7e, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0},
++ {0x380a, 0x02, 0, 0}, {0x380b, 0x40, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_720P_1280_720[] = {
++ {0x3035, 0x21, 0, 0}, {0x3036, 0x69, 0, 0}, {0x3c07, 0x07, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
++ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0xd0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x64, 0, 0},
++ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3709, 0x52, 0, 0},
++ {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0}, {0x3a03, 0xe0, 0, 0},
++ {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe0, 0, 0}, {0x4004, 0x02, 0, 0},
++ {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
++ {0x4837, 0x16, 0, 0}, {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0},
++ {0x3503, 0x00, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_720P_1280_720[] = {
++ {0x3035, 0x41, 0, 0}, {0x3036, 0x69, 0, 0}, {0x3c07, 0x07, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
++ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0xd0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x64, 0, 0},
++ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3709, 0x52, 0, 0},
++ {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0}, {0x3a03, 0xe0, 0, 0},
++ {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe0, 0, 0}, {0x4004, 0x02, 0, 0},
++ {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
++ {0x4837, 0x16, 0, 0}, {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0},
++ {0x3503, 0x00, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_QCIF_176_144[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0},
++ {0x380a, 0x00, 0, 0}, {0x380b, 0x90, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_QCIF_176_144[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0},
++ {0x380a, 0x00, 0, 0}, {0x380b, 0x90, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_XGA_1024_768[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x04, 0, 0}, {0x3809, 0x00, 0, 0},
++ {0x380a, 0x03, 0, 0}, {0x380b, 0x00, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x20, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x01, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x69, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_XGA_1024_768[] = {
++ {0x3c07, 0x08, 0, 0}, {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0},
++ {0x3814, 0x31, 0, 0}, {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9b, 0, 0}, {0x3808, 0x04, 0, 0}, {0x3809, 0x00, 0, 0},
++ {0x380a, 0x03, 0, 0}, {0x380b, 0x00, 0, 0}, {0x380c, 0x07, 0, 0},
++ {0x380d, 0x68, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0},
++ {0x3813, 0x06, 0, 0}, {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x0b, 0, 0},
++ {0x3a03, 0x88, 0, 0}, {0x3a14, 0x0b, 0, 0}, {0x3a15, 0x88, 0, 0},
++ {0x4004, 0x02, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0},
++ {0x460c, 0x20, 0, 0}, {0x4837, 0x22, 0, 0}, {0x3824, 0x01, 0, 0},
++ {0x5001, 0xa3, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x46, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++
++static struct reg_value ov5640_setting_15fps_1080P_1920_1080[] = {
++ {0x3c07, 0x07, 0, 0}, {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0},
++ {0x3814, 0x11, 0, 0}, {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0xee, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x05, 0, 0},
++ {0x3807, 0xc3, 0, 0}, {0x3808, 0x07, 0, 0}, {0x3809, 0x80, 0, 0},
++ {0x380a, 0x04, 0, 0}, {0x380b, 0x38, 0, 0}, {0x380c, 0x0b, 0, 0},
++ {0x380d, 0x1c, 0, 0}, {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0},
++ {0x3813, 0x04, 0, 0}, {0x3618, 0x04, 0, 0}, {0x3612, 0x2b, 0, 0},
++ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x07, 0, 0},
++ {0x3a03, 0xae, 0, 0}, {0x3a14, 0x07, 0, 0}, {0x3a15, 0xae, 0, 0},
++ {0x4004, 0x06, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x02, 0, 0}, {0x4407, 0x0c, 0, 0}, {0x460b, 0x37, 0, 0},
++ {0x460c, 0x20, 0, 0}, {0x4837, 0x2c, 0, 0}, {0x3824, 0x01, 0, 0},
++ {0x5001, 0x83, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x69, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_QSXGA_2592_1944[] = {
++ {0x3c07, 0x07, 0, 0}, {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0},
++ {0x3814, 0x11, 0, 0}, {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0},
++ {0x3801, 0x00, 0, 0}, {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0},
++ {0x3804, 0x0a, 0, 0}, {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0},
++ {0x3807, 0x9f, 0, 0}, {0x3808, 0x0a, 0, 0}, {0x3809, 0x20, 0, 0},
++ {0x380a, 0x07, 0, 0}, {0x380b, 0x98, 0, 0}, {0x380c, 0x0b, 0, 0},
++ {0x380d, 0x1c, 0, 0}, {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0},
++ {0x3813, 0x04, 0, 0}, {0x3618, 0x04, 0, 0}, {0x3612, 0x2b, 0, 0},
++ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x07, 0, 0},
++ {0x3a03, 0xae, 0, 0}, {0x3a14, 0x07, 0, 0}, {0x3a15, 0xae, 0, 0},
++ {0x4004, 0x06, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x4713, 0x02, 0, 0}, {0x4407, 0x0c, 0, 0}, {0x460b, 0x37, 0, 0},
++ {0x460c, 0x20, 0, 0}, {0x4837, 0x2c, 0, 0}, {0x3824, 0x01, 0, 0},
++ {0x5001, 0x83, 0, 0}, {0x3034, 0x1a, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x69, 0, 0}, {0x3037, 0x13, 0, 0},
++};
++
++static struct ov5640_mode_info ov5640_mode_info_data[2][ov5640_mode_MAX + 1] = {
++ {
++ {ov5640_mode_VGA_640_480, 640, 480,
++ ov5640_setting_15fps_VGA_640_480,
++ ARRAY_SIZE(ov5640_setting_15fps_VGA_640_480)},
++ {ov5640_mode_QVGA_320_240, 320, 240,
++ ov5640_setting_15fps_QVGA_320_240,
++ ARRAY_SIZE(ov5640_setting_15fps_QVGA_320_240)},
++ {ov5640_mode_NTSC_720_480, 720, 480,
++ ov5640_setting_15fps_NTSC_720_480,
++ ARRAY_SIZE(ov5640_setting_15fps_NTSC_720_480)},
++ {ov5640_mode_PAL_720_576, 720, 576,
++ ov5640_setting_15fps_PAL_720_576,
++ ARRAY_SIZE(ov5640_setting_15fps_PAL_720_576)},
++ {ov5640_mode_720P_1280_720, 1280, 720,
++ ov5640_setting_15fps_720P_1280_720,
++ ARRAY_SIZE(ov5640_setting_15fps_720P_1280_720)},
++ {ov5640_mode_1080P_1920_1080, 1920, 1080,
++ ov5640_setting_15fps_1080P_1920_1080,
++ ARRAY_SIZE(ov5640_setting_15fps_1080P_1920_1080)},
++ {ov5640_mode_QSXGA_2592_1944, 2592, 1944,
++ ov5640_setting_15fps_QSXGA_2592_1944,
++ ARRAY_SIZE(ov5640_setting_15fps_QSXGA_2592_1944)},
++ {ov5640_mode_QCIF_176_144, 176, 144,
++ ov5640_setting_15fps_QCIF_176_144,
++ ARRAY_SIZE(ov5640_setting_15fps_QCIF_176_144)},
++ {ov5640_mode_XGA_1024_768, 1024, 768,
++ ov5640_setting_15fps_XGA_1024_768,
++ ARRAY_SIZE(ov5640_setting_15fps_XGA_1024_768)},
++ },
++ {
++ {ov5640_mode_VGA_640_480, 640, 480,
++ ov5640_setting_30fps_VGA_640_480,
++ ARRAY_SIZE(ov5640_setting_30fps_VGA_640_480)},
++ {ov5640_mode_QVGA_320_240, 320, 240,
++ ov5640_setting_30fps_QVGA_320_240,
++ ARRAY_SIZE(ov5640_setting_30fps_QVGA_320_240)},
++ {ov5640_mode_NTSC_720_480, 720, 480,
++ ov5640_setting_30fps_NTSC_720_480,
++ ARRAY_SIZE(ov5640_setting_30fps_NTSC_720_480)},
++ {ov5640_mode_PAL_720_576, 720, 576,
++ ov5640_setting_30fps_PAL_720_576,
++ ARRAY_SIZE(ov5640_setting_30fps_PAL_720_576)},
++ {ov5640_mode_720P_1280_720, 1280, 720,
++ ov5640_setting_30fps_720P_1280_720,
++ ARRAY_SIZE(ov5640_setting_30fps_720P_1280_720)},
++ {ov5640_mode_1080P_1920_1080, 0, 0, NULL, 0},
++ {ov5640_mode_QSXGA_2592_1944, 0, 0, NULL, 0},
++ {ov5640_mode_QCIF_176_144, 176, 144,
++ ov5640_setting_30fps_QCIF_176_144,
++ ARRAY_SIZE(ov5640_setting_30fps_QCIF_176_144)},
++ {ov5640_mode_XGA_1024_768, 1024, 768,
++ ov5640_setting_30fps_XGA_1024_768,
++ ARRAY_SIZE(ov5640_setting_30fps_XGA_1024_768)},
++ },
++};
++
++static struct regulator *io_regulator;
++static struct regulator *core_regulator;
++static struct regulator *analog_regulator;
++
++static int ov5640_probe(struct i2c_client *adapter,
++ const struct i2c_device_id *device_id);
++static int ov5640_remove(struct i2c_client *client);
++
++static s32 ov5640_read_reg(u16 reg, u8 *val);
++static s32 ov5640_write_reg(u16 reg, u8 val);
++
++static const struct i2c_device_id ov5640_id[] = {
++ {"ov5640", 0},
++ {"ov564x", 0},
++ {},
++};
++
++MODULE_DEVICE_TABLE(i2c, ov5640_id);
++
++static struct i2c_driver ov5640_i2c_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "ov5640",
++ },
++ .probe = ov5640_probe,
++ .remove = ov5640_remove,
++ .id_table = ov5640_id,
++};
++
++static inline void ov5640_power_down(int enable)
++{
++ gpio_set_value(pwn_gpio, enable);
++
++ msleep(2);
++}
++
++static inline void ov5640_reset(void)
++{
++ /* camera reset */
++ gpio_set_value(rst_gpio, 1);
++
++ /* camera power down */
++ gpio_set_value(pwn_gpio, 1);
++ msleep(5);
++ gpio_set_value(pwn_gpio, 0);
++ msleep(5);
++ gpio_set_value(rst_gpio, 0);
++ msleep(1);
++ gpio_set_value(rst_gpio, 1);
++ msleep(5);
++ gpio_set_value(pwn_gpio, 1);
++}
++
++static int ov5640_regulator_enable(struct device *dev)
++{
++ int ret = 0;
++
++ io_regulator = devm_regulator_get(dev, "DOVDD");
++ if (!IS_ERR(io_regulator)) {
++ regulator_set_voltage(io_regulator,
++ OV5640_VOLTAGE_DIGITAL_IO,
++ OV5640_VOLTAGE_DIGITAL_IO);
++ ret = regulator_enable(io_regulator);
++ if (ret) {
++ dev_err(dev, "set io voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set io voltage ok\n");
++ }
++ } else {
++ io_regulator = NULL;
++ dev_warn(dev, "cannot get io voltage\n");
++ }
++
++ core_regulator = devm_regulator_get(dev, "DVDD");
++ if (!IS_ERR(core_regulator)) {
++ regulator_set_voltage(core_regulator,
++ OV5640_VOLTAGE_DIGITAL_CORE,
++ OV5640_VOLTAGE_DIGITAL_CORE);
++ ret = regulator_enable(core_regulator);
++ if (ret) {
++ dev_err(dev, "set core voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set core voltage ok\n");
++ }
++ } else {
++ core_regulator = NULL;
++ dev_warn(dev, "cannot get core voltage\n");
++ }
++
++ analog_regulator = devm_regulator_get(dev, "AVDD");
++ if (!IS_ERR(analog_regulator)) {
++ regulator_set_voltage(analog_regulator,
++ OV5640_VOLTAGE_ANALOG,
++ OV5640_VOLTAGE_ANALOG);
++ ret = regulator_enable(analog_regulator);
++ if (ret) {
++ dev_err(dev, "set analog voltage failed\n");
++ return ret;
++ } else {
++ dev_dbg(dev, "set analog voltage ok\n");
++ }
++ } else {
++ analog_regulator = NULL;
++ dev_warn(dev, "cannot get analog voltage\n");
++ }
++
++ return ret;
++}
++
++static s32 ov5640_write_reg(u16 reg, u8 val)
++{
++ u8 au8Buf[3] = {0};
++
++ au8Buf[0] = reg >> 8;
++ au8Buf[1] = reg & 0xff;
++ au8Buf[2] = val;
++
++ if (i2c_master_send(ov5640_data.i2c_client, au8Buf, 3) < 0) {
++ pr_err("%s:write reg error:reg=%x,val=%x\n",
++ __func__, reg, val);
++ return -1;
++ }
++
++ return 0;
++}
++
++static s32 ov5640_read_reg(u16 reg, u8 *val)
++{
++ u8 au8RegBuf[2] = {0};
++ u8 u8RdVal = 0;
++
++ au8RegBuf[0] = reg >> 8;
++ au8RegBuf[1] = reg & 0xff;
++
++ if (2 != i2c_master_send(ov5640_data.i2c_client, au8RegBuf, 2)) {
++ pr_err("%s:write reg error:reg=%x\n",
++ __func__, reg);
++ return -1;
++ }
++
++ if (1 != i2c_master_recv(ov5640_data.i2c_client, &u8RdVal, 1)) {
++ pr_err("%s:read reg error:reg=%x,val=%x\n",
++ __func__, reg, u8RdVal);
++ return -1;
++ }
++
++ *val = u8RdVal;
++
++ return u8RdVal;
++}
++
++static void ov5640_soft_reset(void)
++{
++ /* sysclk from pad */
++ ov5640_write_reg(0x3103, 0x11);
++
++ /* software reset */
++ ov5640_write_reg(0x3008, 0x82);
++
++ /* delay at least 5ms */
++ msleep(10);
++}
++
++/* set sensor driver capability
++ * 0x302c[7:6] - strength
++ 00 - 1x
++ 01 - 2x
++ 10 - 3x
++ 11 - 4x
++ */
++static int ov5640_driver_capability(int strength)
++{
++ u8 temp = 0;
++
++ if (strength > 4 || strength < 1) {
++ pr_err("The valid driver capability of ov5640 is 1x~4x\n");
++ return -EINVAL;
++ }
++
++ ov5640_read_reg(0x302c, &temp);
++
++ temp &= ~0xc0; /* clear [7:6] */
++ temp |= ((strength - 1) << 6); /* set [7:6] */
++
++ ov5640_write_reg(0x302c, temp);
++
++ return 0;
++}
++
++/* calculate sysclk */
++static int ov5640_get_sysclk(void)
++{
++ int xvclk = ov5640_data.mclk / 10000;
++ int sysclk;
++ int temp1, temp2;
++ int Multiplier, PreDiv, VCO, SysDiv, Pll_rdiv, Bit_div2x, sclk_rdiv;
++ int sclk_rdiv_map[] = {1, 2, 4, 8};
++ u8 regval = 0;
++
++ temp1 = ov5640_read_reg(0x3034, &regval);
++ temp2 = temp1 & 0x0f;
++ if (temp2 == 8 || temp2 == 10) {
++ Bit_div2x = temp2 / 2;
++ } else {
++ pr_err("ov5640: unsupported bit mode %d\n", temp2);
++ return -1;
++ }
++
++ temp1 = ov5640_read_reg(0x3035, &regval);
++ SysDiv = temp1 >> 4;
++ if (SysDiv == 0)
++ SysDiv = 16;
++
++ temp1 = ov5640_read_reg(0x3036, &regval);
++ Multiplier = temp1;
++ temp1 = ov5640_read_reg(0x3037, &regval);
++ PreDiv = temp1 & 0x0f;
++ Pll_rdiv = ((temp1 >> 4) & 0x01) + 1;
++
++ temp1 = ov5640_read_reg(0x3108, &regval);
++ temp2 = temp1 & 0x03;
++
++ sclk_rdiv = sclk_rdiv_map[temp2];
++ VCO = xvclk * Multiplier / PreDiv;
++ sysclk = VCO / SysDiv / Pll_rdiv * 2 / Bit_div2x / sclk_rdiv;
++
++ return sysclk;
++}
++
++/* read HTS from register settings */
++static int ov5640_get_HTS(void)
++{
++ int HTS;
++ u8 temp = 0;
++
++ HTS = ov5640_read_reg(0x380c, &temp);
++ HTS = (HTS<<8) + ov5640_read_reg(0x380d, &temp);
++ return HTS;
++}
++
++/* read VTS from register settings */
++static int ov5640_get_VTS(void)
++{
++ int VTS;
++ u8 temp = 0;
++
++ VTS = ov5640_read_reg(0x380e, &temp);
++ VTS = (VTS<<8) + ov5640_read_reg(0x380f, &temp);
++
++ return VTS;
++}
++
++/* write VTS to registers */
++static int ov5640_set_VTS(int VTS)
++{
++ int temp;
++
++ temp = VTS & 0xff;
++ ov5640_write_reg(0x380f, temp);
++
++ temp = VTS>>8;
++ ov5640_write_reg(0x380e, temp);
++ return 0;
++}
++
++/* read shutter, in number of line period */
++static int ov5640_get_shutter(void)
++{
++ int shutter;
++ u8 regval;
++
++ shutter = (ov5640_read_reg(0x03500, &regval) & 0x0f);
++
++ shutter = (shutter<<8) + ov5640_read_reg(0x3501, &regval);
++ shutter = (shutter<<4) + (ov5640_read_reg(0x3502, &regval)>>4);
++
++ return shutter;
++}
++
++/* write shutter, in number of line period */
++static int ov5640_set_shutter(int shutter)
++{
++ int temp;
++
++ shutter = shutter & 0xffff;
++ temp = shutter & 0x0f;
++ temp = temp<<4;
++ ov5640_write_reg(0x3502, temp);
++
++ temp = shutter & 0xfff;
++ temp = temp>>4;
++ ov5640_write_reg(0x3501, temp);
++
++ temp = shutter>>12;
++ ov5640_write_reg(0x3500, temp);
++
++ return 0;
++}
++
++/* read gain, 16 = 1x */
++static int ov5640_get_gain16(void)
++{
++ int gain16;
++ u8 regval;
++
++ gain16 = ov5640_read_reg(0x350a, &regval) & 0x03;
++ gain16 = (gain16<<8) + ov5640_read_reg(0x350b, &regval);
++
++ return gain16;
++}
++
++/* write gain, 16 = 1x */
++static int ov5640_set_gain16(int gain16)
++{
++ int temp;
++
++ gain16 = gain16 & 0x3ff;
++ temp = gain16 & 0xff;
++
++ ov5640_write_reg(0x350b, temp);
++ temp = gain16>>8;
++
++ ov5640_write_reg(0x350a, temp);
++ return 0;
++}
++
++/* get banding filter value */
++static int ov5640_get_light_freq(void)
++{
++ int temp, temp1, light_frequency;
++ u8 regval;
++
++ temp = ov5640_read_reg(0x3c01, &regval);
++ if (temp & 0x80) {
++ /* manual */
++ temp1 = ov5640_read_reg(0x3c00, &regval);
++ if (temp1 & 0x04) {
++ /* 50Hz */
++ light_frequency = 50;
++ } else {
++ /* 60Hz */
++ light_frequency = 60;
++ }
++ } else {
++ /* auto */
++ temp1 = ov5640_read_reg(0x3c0c, &regval);
++ if (temp1 & 0x01) {
++ /* 50Hz */
++ light_frequency = 50;
++ } else {
++ /* 60Hz */
++ light_frequency = 60;
++ }
++ }
++
++ return light_frequency;
++}
++
++static void ov5640_set_bandingfilter(void)
++{
++ int prev_VTS;
++ int band_step60, max_band60, band_step50, max_band50;
++
++ /* read preview PCLK */
++ prev_sysclk = ov5640_get_sysclk();
++
++ /* read preview HTS */
++ prev_HTS = ov5640_get_HTS();
++
++ /* read preview VTS */
++ prev_VTS = ov5640_get_VTS();
++
++ /* calculate banding filter */
++ /* 60Hz */
++ band_step60 = prev_sysclk * 100/prev_HTS * 100/120;
++ ov5640_write_reg(0x3a0a, (band_step60 >> 8));
++ ov5640_write_reg(0x3a0b, (band_step60 & 0xff));
++
++ max_band60 = (int)((prev_VTS-4)/band_step60);
++ ov5640_write_reg(0x3a0d, max_band60);
++
++ /* 50Hz */
++ band_step50 = prev_sysclk * 100/prev_HTS;
++ ov5640_write_reg(0x3a08, (band_step50 >> 8));
++ ov5640_write_reg(0x3a09, (band_step50 & 0xff));
++
++ max_band50 = (int)((prev_VTS-4)/band_step50);
++ ov5640_write_reg(0x3a0e, max_band50);
++}
++
++/* stable in high */
++static int ov5640_set_AE_target(int target)
++{
++ int fast_high, fast_low;
++
++ AE_low = target * 23 / 25; /* 0.92 */
++ AE_high = target * 27 / 25; /* 1.08 */
++ fast_high = AE_high << 1;
++
++ if (fast_high > 255)
++ fast_high = 255;
++ fast_low = AE_low >> 1;
++
++ ov5640_write_reg(0x3a0f, AE_high);
++ ov5640_write_reg(0x3a10, AE_low);
++ ov5640_write_reg(0x3a1b, AE_high);
++ ov5640_write_reg(0x3a1e, AE_low);
++ ov5640_write_reg(0x3a11, fast_high);
++ ov5640_write_reg(0x3a1f, fast_low);
++
++ return 0;
++}
++
++/* enable = 0 to turn off night mode
++ enable = 1 to turn on night mode */
++static int ov5640_set_night_mode(int enable)
++{
++ u8 mode;
++
++ ov5640_read_reg(0x3a00, &mode);
++
++ if (enable) {
++ /* night mode on */
++ mode |= 0x04;
++ ov5640_write_reg(0x3a00, mode);
++ } else {
++ /* night mode off */
++ mode &= 0xfb;
++ ov5640_write_reg(0x3a00, mode);
++ }
++
++ return 0;
++}
++
++/* enable = 0 to turn off AEC/AGC
++ enable = 1 to turn on AEC/AGC */
++void ov5640_turn_on_AE_AG(int enable)
++{
++ u8 ae_ag_ctrl;
++
++ ov5640_read_reg(0x3503, &ae_ag_ctrl);
++ if (enable) {
++ /* turn on auto AE/AG */
++ ae_ag_ctrl = ae_ag_ctrl & ~(0x03);
++ } else {
++ /* turn off AE/AG */
++ ae_ag_ctrl = ae_ag_ctrl | 0x03;
++ }
++ ov5640_write_reg(0x3503, ae_ag_ctrl);
++}
++
++/* download ov5640 settings to sensor through i2c */
++static int ov5640_download_firmware(struct reg_value *pModeSetting, s32 ArySize)
++{
++ register u32 Delay_ms = 0;
++ register u16 RegAddr = 0;
++ register u8 Mask = 0;
++ register u8 Val = 0;
++ u8 RegVal = 0;
++ int i, retval = 0;
++
++ for (i = 0; i < ArySize; ++i, ++pModeSetting) {
++ Delay_ms = pModeSetting->u32Delay_ms;
++ RegAddr = pModeSetting->u16RegAddr;
++ Val = pModeSetting->u8Val;
++ Mask = pModeSetting->u8Mask;
++
++ if (Mask) {
++ retval = ov5640_read_reg(RegAddr, &RegVal);
++ if (retval < 0)
++ goto err;
++
++ RegVal &= ~(u8)Mask;
++ Val &= Mask;
++ Val |= RegVal;
++ }
++
++ retval = ov5640_write_reg(RegAddr, Val);
++ if (retval < 0)
++ goto err;
++
++ if (Delay_ms)
++ msleep(Delay_ms);
++ }
++err:
++ return retval;
++}
++
++static int ov5640_init_mode(void)
++{
++ struct reg_value *pModeSetting = NULL;
++ int ArySize = 0, retval = 0;
++
++ ov5640_soft_reset();
++
++ pModeSetting = ov5640_global_init_setting;
++ ArySize = ARRAY_SIZE(ov5640_global_init_setting);
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ if (retval < 0)
++ goto err;
++
++ pModeSetting = ov5640_init_setting_30fps_VGA;
++ ArySize = ARRAY_SIZE(ov5640_init_setting_30fps_VGA);
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ if (retval < 0)
++ goto err;
++
++ /* change driver capability to 2x according to validation board.
++ * if the image is not stable, please increase the driver strength.
++ */
++ ov5640_driver_capability(2);
++ ov5640_set_bandingfilter();
++ ov5640_set_AE_target(AE_Target);
++ ov5640_set_night_mode(night_mode);
++
++ /* skip 9 vysnc: start capture at 10th vsync */
++ msleep(300);
++
++ /* turn off night mode */
++ night_mode = 0;
++ ov5640_data.pix.width = 640;
++ ov5640_data.pix.height = 480;
++err:
++ return retval;
++}
++
++/* change to or back to subsampling mode set the mode directly
++ * image size below 1280 * 960 is subsampling mode */
++static int ov5640_change_mode_direct(enum ov5640_frame_rate frame_rate,
++ enum ov5640_mode mode)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 ArySize = 0;
++ int retval = 0;
++
++ if (mode > ov5640_mode_MAX || mode < ov5640_mode_MIN) {
++ pr_err("Wrong ov5640 mode detected!\n");
++ return -1;
++ }
++
++ pModeSetting = ov5640_mode_info_data[frame_rate][mode].init_data_ptr;
++ ArySize =
++ ov5640_mode_info_data[frame_rate][mode].init_data_size;
++
++ ov5640_data.pix.width = ov5640_mode_info_data[frame_rate][mode].width;
++ ov5640_data.pix.height = ov5640_mode_info_data[frame_rate][mode].height;
++
++ if (ov5640_data.pix.width == 0 || ov5640_data.pix.height == 0 ||
++ pModeSetting == NULL || ArySize == 0)
++ return -EINVAL;
++
++ /* set ov5640 to subsampling mode */
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++
++ /* turn on AE AG for subsampling mode, in case the firmware didn't */
++ ov5640_turn_on_AE_AG(1);
++
++ /* calculate banding filter */
++ ov5640_set_bandingfilter();
++
++ /* set AE target */
++ ov5640_set_AE_target(AE_Target);
++
++ /* update night mode setting */
++ ov5640_set_night_mode(night_mode);
++
++ /* skip 9 vysnc: start capture at 10th vsync */
++ if (mode == ov5640_mode_XGA_1024_768 && frame_rate == ov5640_30_fps) {
++ pr_warning("ov5640: actual frame rate of XGA is 22.5fps\n");
++ /* 1/22.5 * 9*/
++ msleep(400);
++ return retval;
++ }
++
++ if (frame_rate == ov5640_15_fps) {
++ /* 1/15 * 9*/
++ msleep(600);
++ } else if (frame_rate == ov5640_30_fps) {
++ /* 1/30 * 9*/
++ msleep(300);
++ }
++
++ return retval;
++}
++
++/* change to scaling mode go through exposure calucation
++ * image size above 1280 * 960 is scaling mode */
++static int ov5640_change_mode_exposure_calc(enum ov5640_frame_rate frame_rate,
++ enum ov5640_mode mode)
++{
++ int prev_shutter, prev_gain16, average;
++ int cap_shutter, cap_gain16;
++ int cap_sysclk, cap_HTS, cap_VTS;
++ int light_freq, cap_bandfilt, cap_maxband;
++ long cap_gain16_shutter;
++ u8 temp;
++ struct reg_value *pModeSetting = NULL;
++ s32 ArySize = 0;
++ int retval = 0;
++
++ /* check if the input mode and frame rate is valid */
++ pModeSetting =
++ ov5640_mode_info_data[frame_rate][mode].init_data_ptr;
++ ArySize =
++ ov5640_mode_info_data[frame_rate][mode].init_data_size;
++
++ ov5640_data.pix.width =
++ ov5640_mode_info_data[frame_rate][mode].width;
++ ov5640_data.pix.height =
++ ov5640_mode_info_data[frame_rate][mode].height;
++
++ if (ov5640_data.pix.width == 0 || ov5640_data.pix.height == 0 ||
++ pModeSetting == NULL || ArySize == 0)
++ return -EINVAL;
++
++ /* read preview shutter */
++ prev_shutter = ov5640_get_shutter();
++
++ /* read preview gain */
++ prev_gain16 = ov5640_get_gain16();
++
++ /* get average */
++ average = ov5640_read_reg(0x56a1, &temp);
++
++ /* turn off night mode for capture */
++ ov5640_set_night_mode(0);
++
++ /* turn off overlay */
++ ov5640_write_reg(0x3022, 0x06);
++
++ /* Write capture setting */
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ if (retval < 0)
++ goto err;
++
++ /* turn off AE AG when capture image. */
++ ov5640_turn_on_AE_AG(0);
++
++ /* read capture VTS */
++ cap_VTS = ov5640_get_VTS();
++ cap_HTS = ov5640_get_HTS();
++ cap_sysclk = ov5640_get_sysclk();
++
++ /* calculate capture banding filter */
++ light_freq = ov5640_get_light_freq();
++ if (light_freq == 60) {
++ /* 60Hz */
++ cap_bandfilt = cap_sysclk * 100 / cap_HTS * 100 / 120;
++ } else {
++ /* 50Hz */
++ cap_bandfilt = cap_sysclk * 100 / cap_HTS;
++ }
++ cap_maxband = (int)((cap_VTS - 4)/cap_bandfilt);
++ /* calculate capture shutter/gain16 */
++ if (average > AE_low && average < AE_high) {
++ /* in stable range */
++ cap_gain16_shutter =
++ prev_gain16 * prev_shutter * cap_sysclk/prev_sysclk *
++ prev_HTS/cap_HTS * AE_Target / average;
++ } else {
++ cap_gain16_shutter =
++ prev_gain16 * prev_shutter * cap_sysclk/prev_sysclk *
++ prev_HTS/cap_HTS;
++ }
++
++ /* gain to shutter */
++ if (cap_gain16_shutter < (cap_bandfilt * 16)) {
++ /* shutter < 1/100 */
++ cap_shutter = cap_gain16_shutter/16;
++ if (cap_shutter < 1)
++ cap_shutter = 1;
++ cap_gain16 = cap_gain16_shutter/cap_shutter;
++ if (cap_gain16 < 16)
++ cap_gain16 = 16;
++ } else {
++ if (cap_gain16_shutter > (cap_bandfilt*cap_maxband*16)) {
++ /* exposure reach max */
++ cap_shutter = cap_bandfilt*cap_maxband;
++ cap_gain16 = cap_gain16_shutter / cap_shutter;
++ } else {
++ /* 1/100 < cap_shutter =< max, cap_shutter = n/100 */
++ cap_shutter =
++ ((int)(cap_gain16_shutter/16/cap_bandfilt))
++ * cap_bandfilt;
++ cap_gain16 = cap_gain16_shutter / cap_shutter;
++ }
++ }
++
++ /* write capture gain */
++ ov5640_set_gain16(cap_gain16);
++
++ /* write capture shutter */
++ if (cap_shutter > (cap_VTS - 4)) {
++ cap_VTS = cap_shutter + 4;
++ ov5640_set_VTS(cap_VTS);
++ }
++
++ ov5640_set_shutter(cap_shutter);
++
++ /* skip 2 vysnc: start capture at 3rd vsync
++ * frame rate of QSXGA and 1080P is 7.5fps: 1/7.5 * 2
++ */
++ pr_warning("ov5640: the actual frame rate of %s is 7.5fps\n",
++ mode == ov5640_mode_1080P_1920_1080 ? "1080P" : "QSXGA");
++ msleep(267);
++err:
++ return retval;
++}
++
++static int ov5640_change_mode(enum ov5640_frame_rate frame_rate,
++ enum ov5640_mode mode)
++{
++ int retval = 0;
++
++ if (mode > ov5640_mode_MAX || mode < ov5640_mode_MIN) {
++ pr_err("Wrong ov5640 mode detected!\n");
++ return -1;
++ }
++
++ if (mode == ov5640_mode_1080P_1920_1080 ||
++ mode == ov5640_mode_QSXGA_2592_1944) {
++ /* change to scaling mode go through exposure calucation
++ * image size above 1280 * 960 is scaling mode */
++ retval = ov5640_change_mode_exposure_calc(frame_rate, mode);
++ } else {
++ /* change back to subsampling modem download firmware directly
++ * image size below 1280 * 960 is subsampling mode */
++ retval = ov5640_change_mode_direct(frame_rate, mode);
++ }
++
++ return retval;
++}
++
++/* --------------- IOCTL functions from v4l2_int_ioctl_desc --------------- */
++
++static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
++{
++ if (s == NULL) {
++ pr_err(" ERROR!! no slave device set!\n");
++ return -1;
++ }
++
++ memset(p, 0, sizeof(*p));
++ p->u.bt656.clock_curr = ov5640_data.mclk;
++ pr_debug(" clock_curr=mclk=%d\n", ov5640_data.mclk);
++ p->if_type = V4L2_IF_TYPE_BT656;
++ p->u.bt656.mode = V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT;
++ p->u.bt656.clock_min = OV5640_XCLK_MIN;
++ p->u.bt656.clock_max = OV5640_XCLK_MAX;
++ p->u.bt656.bt_sync_correct = 1; /* Indicate external vsync */
++
++ return 0;
++}
++
++/*!
++ * ioctl_s_power - V4L2 sensor interface handler for VIDIOC_S_POWER ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @on: indicates power mode (on or off)
++ *
++ * Turns the power on or off, depending on the value of on and returns the
++ * appropriate error code.
++ */
++static int ioctl_s_power(struct v4l2_int_device *s, int on)
++{
++ struct sensor_data *sensor = s->priv;
++
++ if (on && !sensor->on) {
++ if (io_regulator)
++ if (regulator_enable(io_regulator) != 0)
++ return -EIO;
++ if (core_regulator)
++ if (regulator_enable(core_regulator) != 0)
++ return -EIO;
++ if (analog_regulator)
++ if (regulator_enable(analog_regulator) != 0)
++ return -EIO;
++ /* Make sure power on */
++ ov5640_power_down(0);
++ } else if (!on && sensor->on) {
++ if (analog_regulator)
++ regulator_disable(analog_regulator);
++ if (core_regulator)
++ regulator_disable(core_regulator);
++ if (io_regulator)
++ regulator_disable(io_regulator);
++
++ ov5640_power_down(1);
++}
++
++ sensor->on = on;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_parm - V4L2 sensor interface handler for VIDIOC_G_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_G_PARM ioctl structure
++ *
++ * Returns the sensor's video CAPTURE parameters.
++ */
++static int ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor_data *sensor = s->priv;
++ struct v4l2_captureparm *cparm = &a->parm.capture;
++ int ret = 0;
++
++ switch (a->type) {
++ /* This is the only case currently handled. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ memset(a, 0, sizeof(*a));
++ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cparm->capability = sensor->streamcap.capability;
++ cparm->timeperframe = sensor->streamcap.timeperframe;
++ cparm->capturemode = sensor->streamcap.capturemode;
++ ret = 0;
++ break;
++
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ ret = -EINVAL;
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_parm - V4L2 sensor interface handler for VIDIOC_S_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_S_PARM ioctl structure
++ *
++ * Configures the sensor to use the input parameters, if possible. If
++ * not possible, reverts to the old parameters and returns the
++ * appropriate error code.
++ */
++static int ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor_data *sensor = s->priv;
++ struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe;
++ u32 tgt_fps; /* target frames per secound */
++ enum ov5640_frame_rate frame_rate;
++ int ret = 0;
++
++ /* Make sure power on */
++ ov5640_power_down(0);
++
++ switch (a->type) {
++ /* This is the only case currently handled. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ /* Check that the new frame rate is allowed. */
++ if ((timeperframe->numerator == 0) ||
++ (timeperframe->denominator == 0)) {
++ timeperframe->denominator = DEFAULT_FPS;
++ timeperframe->numerator = 1;
++ }
++
++ tgt_fps = timeperframe->denominator /
++ timeperframe->numerator;
++
++ if (tgt_fps > MAX_FPS) {
++ timeperframe->denominator = MAX_FPS;
++ timeperframe->numerator = 1;
++ } else if (tgt_fps < MIN_FPS) {
++ timeperframe->denominator = MIN_FPS;
++ timeperframe->numerator = 1;
++ }
++
++ /* Actual frame rate we use */
++ tgt_fps = timeperframe->denominator /
++ timeperframe->numerator;
++
++ if (tgt_fps == 15)
++ frame_rate = ov5640_15_fps;
++ else if (tgt_fps == 30)
++ frame_rate = ov5640_30_fps;
++ else {
++ pr_err(" The camera frame rate is not supported!\n");
++ return -EINVAL;
++ }
++
++ ret = ov5640_change_mode(frame_rate,
++ a->parm.capture.capturemode);
++ if (ret < 0)
++ return ret;
++
++ sensor->streamcap.timeperframe = *timeperframe;
++ sensor->streamcap.capturemode = a->parm.capture.capturemode;
++
++ break;
++
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ pr_debug(" type is not " \
++ "V4L2_BUF_TYPE_VIDEO_CAPTURE but %d\n",
++ a->type);
++ ret = -EINVAL;
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_g_fmt_cap - V4L2 sensor interface handler for ioctl_g_fmt_cap
++ * @s: pointer to standard V4L2 device structure
++ * @f: pointer to standard V4L2 v4l2_format structure
++ *
++ * Returns the sensor's current pixel format in the v4l2_format
++ * parameter.
++ */
++static int ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f)
++{
++ struct sensor_data *sensor = s->priv;
++
++ f->fmt.pix = sensor->pix;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_ctrl - V4L2 sensor interface handler for VIDIOC_G_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_G_CTRL ioctl structure
++ *
++ * If the requested control is supported, returns the control's current
++ * value from the video_control[] array. Otherwise, returns -EINVAL
++ * if the control is not supported.
++ */
++static int ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int ret = 0;
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ vc->value = ov5640_data.brightness;
++ break;
++ case V4L2_CID_HUE:
++ vc->value = ov5640_data.hue;
++ break;
++ case V4L2_CID_CONTRAST:
++ vc->value = ov5640_data.contrast;
++ break;
++ case V4L2_CID_SATURATION:
++ vc->value = ov5640_data.saturation;
++ break;
++ case V4L2_CID_RED_BALANCE:
++ vc->value = ov5640_data.red;
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ vc->value = ov5640_data.blue;
++ break;
++ case V4L2_CID_EXPOSURE:
++ vc->value = ov5640_data.ae_mode;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_ctrl - V4L2 sensor interface handler for VIDIOC_S_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_S_CTRL ioctl structure
++ *
++ * If the requested control is supported, sets the control's current
++ * value in HW (and updates the video_control[] array). Otherwise,
++ * returns -EINVAL if the control is not supported.
++ */
++static int ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int retval = 0;
++
++ pr_debug("In ov5640:ioctl_s_ctrl %d\n",
++ vc->id);
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ break;
++ case V4L2_CID_CONTRAST:
++ break;
++ case V4L2_CID_SATURATION:
++ break;
++ case V4L2_CID_HUE:
++ break;
++ case V4L2_CID_AUTO_WHITE_BALANCE:
++ break;
++ case V4L2_CID_DO_WHITE_BALANCE:
++ break;
++ case V4L2_CID_RED_BALANCE:
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ break;
++ case V4L2_CID_GAMMA:
++ break;
++ case V4L2_CID_EXPOSURE:
++ break;
++ case V4L2_CID_AUTOGAIN:
++ break;
++ case V4L2_CID_GAIN:
++ break;
++ case V4L2_CID_HFLIP:
++ break;
++ case V4L2_CID_VFLIP:
++ break;
++ default:
++ retval = -EPERM;
++ break;
++ }
++
++ return retval;
++}
++
++/*!
++ * ioctl_enum_framesizes - V4L2 sensor interface handler for
++ * VIDIOC_ENUM_FRAMESIZES ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @fsize: standard V4L2 VIDIOC_ENUM_FRAMESIZES ioctl structure
++ *
++ * Return 0 if successful, otherwise -EINVAL.
++ */
++static int ioctl_enum_framesizes(struct v4l2_int_device *s,
++ struct v4l2_frmsizeenum *fsize)
++{
++ if (fsize->index > ov5640_mode_MAX)
++ return -EINVAL;
++
++ fsize->pixel_format = ov5640_data.pix.pixelformat;
++ fsize->discrete.width =
++ max(ov5640_mode_info_data[0][fsize->index].width,
++ ov5640_mode_info_data[1][fsize->index].width);
++ fsize->discrete.height =
++ max(ov5640_mode_info_data[0][fsize->index].height,
++ ov5640_mode_info_data[1][fsize->index].height);
++ return 0;
++}
++
++/*!
++ * ioctl_enum_frameintervals - V4L2 sensor interface handler for
++ * VIDIOC_ENUM_FRAMEINTERVALS ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @fival: standard V4L2 VIDIOC_ENUM_FRAMEINTERVALS ioctl structure
++ *
++ * Return 0 if successful, otherwise -EINVAL.
++ */
++static int ioctl_enum_frameintervals(struct v4l2_int_device *s,
++ struct v4l2_frmivalenum *fival)
++{
++ int i, j, count;
++
++ if (fival->index < 0 || fival->index > ov5640_mode_MAX)
++ return -EINVAL;
++
++ if (fival->width == 0 || fival->height == 0 ||
++ fival->pixel_format == 0) {
++ pr_warning("Please assign pixelformat, width and height.\n");
++ return -EINVAL;
++ }
++
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++
++ count = 0;
++ for (i = 0; i < ARRAY_SIZE(ov5640_mode_info_data); i++) {
++ for (j = 0; j < (ov5640_mode_MAX + 1); j++) {
++ if (fival->pixel_format == ov5640_data.pix.pixelformat
++ && fival->width == ov5640_mode_info_data[i][j].width
++ && fival->height == ov5640_mode_info_data[i][j].height
++ && ov5640_mode_info_data[i][j].init_data_ptr != NULL) {
++ count++;
++ }
++ if (fival->index == (count - 1)) {
++ fival->discrete.denominator =
++ ov5640_framerates[i];
++ return 0;
++ }
++ }
++ }
++
++ return -EINVAL;
++}
++
++/*!
++ * ioctl_g_chip_ident - V4L2 sensor interface handler for
++ * VIDIOC_DBG_G_CHIP_IDENT ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @id: pointer to int
++ *
++ * Return 0.
++ */
++static int ioctl_g_chip_ident(struct v4l2_int_device *s, int *id)
++{
++ ((struct v4l2_dbg_chip_ident *)id)->match.type =
++ V4L2_CHIP_MATCH_I2C_DRIVER;
++ strcpy(((struct v4l2_dbg_chip_ident *)id)->match.name, "ov5640_camera");
++
++ return 0;
++}
++
++/*!
++ * ioctl_init - V4L2 sensor interface handler for VIDIOC_INT_INIT
++ * @s: pointer to standard V4L2 device structure
++ */
++static int ioctl_init(struct v4l2_int_device *s)
++{
++
++ return 0;
++}
++
++/*!
++ * ioctl_enum_fmt_cap - V4L2 sensor interface handler for VIDIOC_ENUM_FMT
++ * @s: pointer to standard V4L2 device structure
++ * @fmt: pointer to standard V4L2 fmt description structure
++ *
++ * Return 0.
++ */
++static int ioctl_enum_fmt_cap(struct v4l2_int_device *s,
++ struct v4l2_fmtdesc *fmt)
++{
++ if (fmt->index > ov5640_mode_MAX)
++ return -EINVAL;
++
++ fmt->pixelformat = ov5640_data.pix.pixelformat;
++
++ return 0;
++}
++
++/*!
++ * ioctl_dev_init - V4L2 sensor interface handler for vidioc_int_dev_init_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Initialise the device when slave attaches to the master.
++ */
++static int ioctl_dev_init(struct v4l2_int_device *s)
++{
++ struct sensor_data *sensor = s->priv;
++ u32 tgt_xclk; /* target xclk */
++ u32 tgt_fps; /* target frames per secound */
++ enum ov5640_frame_rate frame_rate;
++ int ret;
++
++ ov5640_data.on = true;
++
++ /* mclk */
++ tgt_xclk = ov5640_data.mclk;
++ tgt_xclk = min(tgt_xclk, (u32)OV5640_XCLK_MAX);
++ tgt_xclk = max(tgt_xclk, (u32)OV5640_XCLK_MIN);
++ ov5640_data.mclk = tgt_xclk;
++
++ pr_debug(" Setting mclk to %d MHz\n", tgt_xclk / 1000000);
++ clk_set_rate(ov5640_data.sensor_clk, ov5640_data.mclk);
++
++ /* Default camera frame rate is set in probe */
++ tgt_fps = sensor->streamcap.timeperframe.denominator /
++ sensor->streamcap.timeperframe.numerator;
++
++ if (tgt_fps == 15)
++ frame_rate = ov5640_15_fps;
++ else if (tgt_fps == 30)
++ frame_rate = ov5640_30_fps;
++ else
++ return -EINVAL; /* Only support 15fps or 30fps now. */
++
++ ret = ov5640_init_mode();
++ return ret;
++}
++
++/*!
++ * ioctl_dev_exit - V4L2 sensor interface handler for vidioc_int_dev_exit_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Delinitialise the device when slave detaches to the master.
++ */
++static int ioctl_dev_exit(struct v4l2_int_device *s)
++{
++ return 0;
++}
++
++/*!
++ * This structure defines all the ioctls for this module and links them to the
++ * enumeration.
++ */
++static struct v4l2_int_ioctl_desc ov5640_ioctl_desc[] = {
++ { vidioc_int_dev_init_num,
++ (v4l2_int_ioctl_func *)ioctl_dev_init },
++ { vidioc_int_dev_exit_num,
++ ioctl_dev_exit},
++ { vidioc_int_s_power_num,
++ (v4l2_int_ioctl_func *)ioctl_s_power },
++ { vidioc_int_g_ifparm_num,
++ (v4l2_int_ioctl_func *)ioctl_g_ifparm },
++ { vidioc_int_init_num,
++ (v4l2_int_ioctl_func *)ioctl_init },
++ { vidioc_int_enum_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_fmt_cap },
++ { vidioc_int_g_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_g_fmt_cap },
++ { vidioc_int_g_parm_num,
++ (v4l2_int_ioctl_func *)ioctl_g_parm },
++ { vidioc_int_s_parm_num,
++ (v4l2_int_ioctl_func *)ioctl_s_parm },
++ { vidioc_int_g_ctrl_num,
++ (v4l2_int_ioctl_func *)ioctl_g_ctrl },
++ { vidioc_int_s_ctrl_num,
++ (v4l2_int_ioctl_func *)ioctl_s_ctrl },
++ { vidioc_int_enum_framesizes_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_framesizes },
++ { vidioc_int_enum_frameintervals_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_frameintervals },
++ { vidioc_int_g_chip_ident_num,
++ (v4l2_int_ioctl_func *)ioctl_g_chip_ident },
++};
++
++static struct v4l2_int_slave ov5640_slave = {
++ .ioctls = ov5640_ioctl_desc,
++ .num_ioctls = ARRAY_SIZE(ov5640_ioctl_desc),
++};
++
++static struct v4l2_int_device ov5640_int_device = {
++ .module = THIS_MODULE,
++ .name = "ov5640",
++ .type = v4l2_int_type_slave,
++ .u = {
++ .slave = &ov5640_slave,
++ },
++};
++
++/*!
++ * ov5640 I2C probe function
++ *
++ * @param adapter struct i2c_adapter *
++ * @return Error code indicating success or failure
++ */
++static int ov5640_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct pinctrl *pinctrl;
++ struct device *dev = &client->dev;
++ int retval;
++ u8 chip_id_high, chip_id_low;
++
++ /* ov5640 pinctrl */
++ pinctrl = devm_pinctrl_get_select_default(dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(dev, "setup pinctrl failed\n");
++ return PTR_ERR(pinctrl);
++ }
++
++ /* request power down pin */
++ pwn_gpio = of_get_named_gpio(dev->of_node, "pwn-gpios", 0);
++ if (!gpio_is_valid(pwn_gpio)) {
++ dev_err(dev, "no sensor pwdn pin available\n");
++ return -ENODEV;
++ }
++ retval = devm_gpio_request_one(dev, pwn_gpio, GPIOF_OUT_INIT_HIGH,
++ "ov5640_pwdn");
++ if (retval < 0)
++ return retval;
++
++ /* request reset pin */
++ rst_gpio = of_get_named_gpio(dev->of_node, "rst-gpios", 0);
++ if (!gpio_is_valid(rst_gpio)) {
++ dev_err(dev, "no sensor reset pin available\n");
++ return -EINVAL;
++ }
++ retval = devm_gpio_request_one(dev, rst_gpio, GPIOF_OUT_INIT_HIGH,
++ "ov5640_reset");
++ if (retval < 0)
++ return retval;
++
++ /* Set initial values for the sensor struct. */
++ memset(&ov5640_data, 0, sizeof(ov5640_data));
++ ov5640_data.sensor_clk = devm_clk_get(dev, "csi_mclk");
++ if (IS_ERR(ov5640_data.sensor_clk)) {
++ dev_err(dev, "get mclk failed\n");
++ return PTR_ERR(ov5640_data.sensor_clk);
++ }
++
++ retval = of_property_read_u32(dev->of_node, "mclk",
++ &ov5640_data.mclk);
++ if (retval) {
++ dev_err(dev, "mclk frequency is invalid\n");
++ return retval;
++ }
++
++ retval = of_property_read_u32(dev->of_node, "mclk_source",
++ (u32 *) &(ov5640_data.mclk_source));
++ if (retval) {
++ dev_err(dev, "mclk_source invalid\n");
++ return retval;
++ }
++
++ retval = of_property_read_u32(dev->of_node, "csi_id",
++ &(ov5640_data.csi));
++ if (retval) {
++ dev_err(dev, "csi_id invalid\n");
++ return retval;
++ }
++
++ clk_prepare_enable(ov5640_data.sensor_clk);
++
++ ov5640_data.io_init = ov5640_reset;
++ ov5640_data.i2c_client = client;
++ ov5640_data.pix.pixelformat = V4L2_PIX_FMT_YUYV;
++ ov5640_data.pix.width = 640;
++ ov5640_data.pix.height = 480;
++ ov5640_data.streamcap.capability = V4L2_MODE_HIGHQUALITY |
++ V4L2_CAP_TIMEPERFRAME;
++ ov5640_data.streamcap.capturemode = 0;
++ ov5640_data.streamcap.timeperframe.denominator = DEFAULT_FPS;
++ ov5640_data.streamcap.timeperframe.numerator = 1;
++
++ ov5640_regulator_enable(&client->dev);
++
++ ov5640_reset();
++
++ ov5640_power_down(0);
++
++ retval = ov5640_read_reg(OV5640_CHIP_ID_HIGH_BYTE, &chip_id_high);
++ if (retval < 0 || chip_id_high != 0x56) {
++ clk_disable_unprepare(ov5640_data.sensor_clk);
++ pr_warning("camera ov5640 is not found\n");
++ return -ENODEV;
++ }
++ retval = ov5640_read_reg(OV5640_CHIP_ID_LOW_BYTE, &chip_id_low);
++ if (retval < 0 || chip_id_low != 0x40) {
++ clk_disable_unprepare(ov5640_data.sensor_clk);
++ pr_warning("camera ov5640 is not found\n");
++ return -ENODEV;
++ }
++
++ ov5640_power_down(1);
++
++ clk_disable_unprepare(ov5640_data.sensor_clk);
++
++ ov5640_int_device.priv = &ov5640_data;
++ retval = v4l2_int_device_register(&ov5640_int_device);
++
++ pr_info("camera ov5640 is found\n");
++ return retval;
++}
++
++/*!
++ * ov5640 I2C detach function
++ *
++ * @param client struct i2c_client *
++ * @return Error code indicating success or failure
++ */
++static int ov5640_remove(struct i2c_client *client)
++{
++ v4l2_int_device_unregister(&ov5640_int_device);
++
++ if (analog_regulator)
++ regulator_disable(analog_regulator);
++
++ if (core_regulator)
++ regulator_disable(core_regulator);
++
++ if (io_regulator)
++ regulator_disable(io_regulator);
++
++ return 0;
++}
++
++module_i2c_driver(ov5640_i2c_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("OV5640 Camera Driver");
++MODULE_LICENSE("GPL");
++MODULE_VERSION("1.0");
++MODULE_ALIAS("CSI");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/ov5640_mipi.c linux-3.14.40/drivers/media/platform/mxc/capture/ov5640_mipi.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/ov5640_mipi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/ov5640_mipi.c 2015-05-01 14:57:59.267427001 -0500
+@@ -0,0 +1,2104 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/ctype.h>
++#include <linux/types.h>
++#include <linux/delay.h>
++#include <linux/clk.h>
++#include <linux/of_device.h>
++#include <linux/i2c.h>
++#include <linux/of_gpio.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/regulator/consumer.h>
++#include <linux/fsl_devices.h>
++#include <linux/mipi_csi2.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-int-device.h>
++#include "mxc_v4l2_capture.h"
++
++#define OV5640_VOLTAGE_ANALOG 2800000
++#define OV5640_VOLTAGE_DIGITAL_CORE 1500000
++#define OV5640_VOLTAGE_DIGITAL_IO 1800000
++
++#define MIN_FPS 15
++#define MAX_FPS 30
++#define DEFAULT_FPS 30
++
++#define OV5640_XCLK_MIN 6000000
++#define OV5640_XCLK_MAX 24000000
++
++#define OV5640_CHIP_ID_HIGH_BYTE 0x300A
++#define OV5640_CHIP_ID_LOW_BYTE 0x300B
++
++enum ov5640_mode {
++ ov5640_mode_MIN = 0,
++ ov5640_mode_VGA_640_480 = 0,
++ ov5640_mode_QVGA_320_240 = 1,
++ ov5640_mode_NTSC_720_480 = 2,
++ ov5640_mode_PAL_720_576 = 3,
++ ov5640_mode_720P_1280_720 = 4,
++ ov5640_mode_1080P_1920_1080 = 5,
++ ov5640_mode_QSXGA_2592_1944 = 6,
++ ov5640_mode_QCIF_176_144 = 7,
++ ov5640_mode_XGA_1024_768 = 8,
++ ov5640_mode_MAX = 8,
++ ov5640_mode_INIT = 0xff, /*only for sensor init*/
++};
++
++enum ov5640_frame_rate {
++ ov5640_15_fps,
++ ov5640_30_fps
++};
++
++/* image size under 1280 * 960 are SUBSAMPLING
++ * image size upper 1280 * 960 are SCALING
++ */
++enum ov5640_downsize_mode {
++ SUBSAMPLING,
++ SCALING,
++};
++
++struct reg_value {
++ u16 u16RegAddr;
++ u8 u8Val;
++ u8 u8Mask;
++ u32 u32Delay_ms;
++};
++
++struct ov5640_mode_info {
++ enum ov5640_mode mode;
++ enum ov5640_downsize_mode dn_mode;
++ u32 width;
++ u32 height;
++ struct reg_value *init_data_ptr;
++ u32 init_data_size;
++};
++
++/*!
++ * Maintains the information on the current state of the sesor.
++ */
++static struct sensor_data ov5640_data;
++static int pwn_gpio, rst_gpio;
++
++static struct reg_value ov5640_init_setting_30fps_VGA[] = {
++
++ {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
++ {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0},
++ {0x3034, 0x18, 0, 0}, {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0},
++ {0x3037, 0x13, 0, 0}, {0x3108, 0x01, 0, 0}, {0x3630, 0x36, 0, 0},
++ {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
++ {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
++ {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
++ {0x3705, 0x1a, 0, 0}, {0x3905, 0x02, 0, 0}, {0x3906, 0x10, 0, 0},
++ {0x3901, 0x0a, 0, 0}, {0x3731, 0x12, 0, 0}, {0x3600, 0x08, 0, 0},
++ {0x3601, 0x33, 0, 0}, {0x302d, 0x60, 0, 0}, {0x3620, 0x52, 0, 0},
++ {0x371b, 0x20, 0, 0}, {0x471c, 0x50, 0, 0}, {0x3a13, 0x43, 0, 0},
++ {0x3a18, 0x00, 0, 0}, {0x3a19, 0xf8, 0, 0}, {0x3635, 0x13, 0, 0},
++ {0x3636, 0x03, 0, 0}, {0x3634, 0x40, 0, 0}, {0x3622, 0x01, 0, 0},
++ {0x3c01, 0xa4, 0, 0}, {0x3c04, 0x28, 0, 0}, {0x3c05, 0x98, 0, 0},
++ {0x3c06, 0x00, 0, 0}, {0x3c07, 0x08, 0, 0}, {0x3c08, 0x00, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0},
++ {0x300e, 0x45, 0, 0}, {0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0},
++ {0x501f, 0x00, 0, 0}, {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0},
++ {0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x4837, 0x0a, 0, 0}, {0x4800, 0x04, 0, 0}, {0x3824, 0x02, 0, 0},
++ {0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0},
++ {0x5181, 0xf2, 0, 0}, {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0},
++ {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0},
++ {0x5187, 0x09, 0, 0}, {0x5188, 0x09, 0, 0}, {0x5189, 0x88, 0, 0},
++ {0x518a, 0x54, 0, 0}, {0x518b, 0xee, 0, 0}, {0x518c, 0xb2, 0, 0},
++ {0x518d, 0x50, 0, 0}, {0x518e, 0x34, 0, 0}, {0x518f, 0x6b, 0, 0},
++ {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0},
++ {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0},
++ {0x5199, 0x6c, 0, 0}, {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0},
++ {0x519c, 0x09, 0, 0}, {0x519d, 0x2b, 0, 0}, {0x519e, 0x38, 0, 0},
++ {0x5381, 0x1e, 0, 0}, {0x5382, 0x5b, 0, 0}, {0x5383, 0x08, 0, 0},
++ {0x5384, 0x0a, 0, 0}, {0x5385, 0x7e, 0, 0}, {0x5386, 0x88, 0, 0},
++ {0x5387, 0x7c, 0, 0}, {0x5388, 0x6c, 0, 0}, {0x5389, 0x10, 0, 0},
++ {0x538a, 0x01, 0, 0}, {0x538b, 0x98, 0, 0}, {0x5300, 0x08, 0, 0},
++ {0x5301, 0x30, 0, 0}, {0x5302, 0x10, 0, 0}, {0x5303, 0x00, 0, 0},
++ {0x5304, 0x08, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x08, 0, 0},
++ {0x5307, 0x16, 0, 0}, {0x5309, 0x08, 0, 0}, {0x530a, 0x30, 0, 0},
++ {0x530b, 0x04, 0, 0}, {0x530c, 0x06, 0, 0}, {0x5480, 0x01, 0, 0},
++ {0x5481, 0x08, 0, 0}, {0x5482, 0x14, 0, 0}, {0x5483, 0x28, 0, 0},
++ {0x5484, 0x51, 0, 0}, {0x5485, 0x65, 0, 0}, {0x5486, 0x71, 0, 0},
++ {0x5487, 0x7d, 0, 0}, {0x5488, 0x87, 0, 0}, {0x5489, 0x91, 0, 0},
++ {0x548a, 0x9a, 0, 0}, {0x548b, 0xaa, 0, 0}, {0x548c, 0xb8, 0, 0},
++ {0x548d, 0xcd, 0, 0}, {0x548e, 0xdd, 0, 0}, {0x548f, 0xea, 0, 0},
++ {0x5490, 0x1d, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x10, 0, 0}, {0x5589, 0x10, 0, 0}, {0x558a, 0x00, 0, 0},
++ {0x558b, 0xf8, 0, 0}, {0x5800, 0x23, 0, 0}, {0x5801, 0x14, 0, 0},
++ {0x5802, 0x0f, 0, 0}, {0x5803, 0x0f, 0, 0}, {0x5804, 0x12, 0, 0},
++ {0x5805, 0x26, 0, 0}, {0x5806, 0x0c, 0, 0}, {0x5807, 0x08, 0, 0},
++ {0x5808, 0x05, 0, 0}, {0x5809, 0x05, 0, 0}, {0x580a, 0x08, 0, 0},
++ {0x580b, 0x0d, 0, 0}, {0x580c, 0x08, 0, 0}, {0x580d, 0x03, 0, 0},
++ {0x580e, 0x00, 0, 0}, {0x580f, 0x00, 0, 0}, {0x5810, 0x03, 0, 0},
++ {0x5811, 0x09, 0, 0}, {0x5812, 0x07, 0, 0}, {0x5813, 0x03, 0, 0},
++ {0x5814, 0x00, 0, 0}, {0x5815, 0x01, 0, 0}, {0x5816, 0x03, 0, 0},
++ {0x5817, 0x08, 0, 0}, {0x5818, 0x0d, 0, 0}, {0x5819, 0x08, 0, 0},
++ {0x581a, 0x05, 0, 0}, {0x581b, 0x06, 0, 0}, {0x581c, 0x08, 0, 0},
++ {0x581d, 0x0e, 0, 0}, {0x581e, 0x29, 0, 0}, {0x581f, 0x17, 0, 0},
++ {0x5820, 0x11, 0, 0}, {0x5821, 0x11, 0, 0}, {0x5822, 0x15, 0, 0},
++ {0x5823, 0x28, 0, 0}, {0x5824, 0x46, 0, 0}, {0x5825, 0x26, 0, 0},
++ {0x5826, 0x08, 0, 0}, {0x5827, 0x26, 0, 0}, {0x5828, 0x64, 0, 0},
++ {0x5829, 0x26, 0, 0}, {0x582a, 0x24, 0, 0}, {0x582b, 0x22, 0, 0},
++ {0x582c, 0x24, 0, 0}, {0x582d, 0x24, 0, 0}, {0x582e, 0x06, 0, 0},
++ {0x582f, 0x22, 0, 0}, {0x5830, 0x40, 0, 0}, {0x5831, 0x42, 0, 0},
++ {0x5832, 0x24, 0, 0}, {0x5833, 0x26, 0, 0}, {0x5834, 0x24, 0, 0},
++ {0x5835, 0x22, 0, 0}, {0x5836, 0x22, 0, 0}, {0x5837, 0x26, 0, 0},
++ {0x5838, 0x44, 0, 0}, {0x5839, 0x24, 0, 0}, {0x583a, 0x26, 0, 0},
++ {0x583b, 0x28, 0, 0}, {0x583c, 0x42, 0, 0}, {0x583d, 0xce, 0, 0},
++ {0x5025, 0x00, 0, 0}, {0x3a0f, 0x30, 0, 0}, {0x3a10, 0x28, 0, 0},
++ {0x3a1b, 0x30, 0, 0}, {0x3a1e, 0x26, 0, 0}, {0x3a11, 0x60, 0, 0},
++ {0x3a1f, 0x14, 0, 0}, {0x3008, 0x02, 0, 0}, {0x3c00, 0x04, 0, 300},
++};
++
++static struct reg_value ov5640_setting_30fps_VGA_640_480[] = {
++
++ {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x04, 0, 0}, {0x380f, 0x38, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x0e, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x3503, 0x00, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_VGA_640_480[] = {
++ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_XGA_1024_768[] = {
++
++ {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x04, 0, 0}, {0x380f, 0x38, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x0e, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x3503, 0x00, 0, 0},
++ {0x3808, 0x04, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x03, 0, 0},
++ {0x380b, 0x00, 0, 0}, {0x3035, 0x12, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_XGA_1024_768[] = {
++ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x3808, 0x04, 0, 0},
++ {0x3809, 0x00, 0, 0}, {0x380a, 0x03, 0, 0}, {0x380b, 0x00, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_QVGA_320_240[] = {
++ {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0xf0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_QVGA_320_240[] = {
++ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0xf0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_QCIF_176_144[] = {
++ {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0x90, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++static struct reg_value ov5640_setting_15fps_QCIF_176_144[] = {
++ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0x90, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_NTSC_720_480[] = {
++ {0x3035, 0x12, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x3c, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_NTSC_720_480[] = {
++ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x3c, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_PAL_720_576[] = {
++ {0x3035, 0x12, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0x40, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x38, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_PAL_720_576[] = {
++ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0x40, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x38, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_720P_1280_720[] = {
++ {0x3008, 0x42, 0, 0},
++ {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
++ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0xd0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x64, 0, 0},
++ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0},
++ {0x3a03, 0xe4, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0xbc, 0, 0},
++ {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x72, 0, 0}, {0x3a0e, 0x01, 0, 0},
++ {0x3a0d, 0x02, 0, 0}, {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe4, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x02, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
++ {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0}, {0x4005, 0x1a, 0, 0},
++ {0x3008, 0x02, 0, 0}, {0x3503, 0, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_720P_1280_720[] = {
++ {0x3035, 0x41, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
++ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
++ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0xd0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x64, 0, 0},
++ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0},
++ {0x3a03, 0xe4, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0xbc, 0, 0},
++ {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x72, 0, 0}, {0x3a0e, 0x01, 0, 0},
++ {0x3a0d, 0x02, 0, 0}, {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe4, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x02, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
++ {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0},
++};
++
++static struct reg_value ov5640_setting_30fps_1080P_1920_1080[] = {
++ {0x3008, 0x42, 0, 0},
++ {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
++ {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
++ {0x3808, 0x0a, 0, 0}, {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0},
++ {0x380b, 0x98, 0, 0}, {0x380c, 0x0b, 0, 0}, {0x380d, 0x1c, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
++ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0}, {0x3035, 0x11, 0, 0},
++ {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3800, 0x01, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3802, 0x01, 0, 0},
++ {0x3803, 0xb2, 0, 0}, {0x3804, 0x08, 0, 0}, {0x3805, 0xef, 0, 0},
++ {0x3806, 0x05, 0, 0}, {0x3807, 0xf1, 0, 0}, {0x3808, 0x07, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x04, 0, 0}, {0x380b, 0x38, 0, 0},
++ {0x380c, 0x09, 0, 0}, {0x380d, 0xc4, 0, 0}, {0x380e, 0x04, 0, 0},
++ {0x380f, 0x60, 0, 0}, {0x3612, 0x2b, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3a02, 0x04, 0, 0}, {0x3a03, 0x60, 0, 0}, {0x3a08, 0x01, 0, 0},
++ {0x3a09, 0x50, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x18, 0, 0},
++ {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
++ {0x3a15, 0x60, 0, 0}, {0x4713, 0x02, 0, 0}, {0x4407, 0x04, 0, 0},
++ {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
++ {0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0},
++ {0x3503, 0, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_1080P_1920_1080[] = {
++ {0x3008, 0x42, 0, 0},
++ {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
++ {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
++ {0x3808, 0x0a, 0, 0}, {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0},
++ {0x380b, 0x98, 0, 0}, {0x380c, 0x0b, 0, 0}, {0x380d, 0x1c, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
++ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0}, {0x3035, 0x21, 0, 0},
++ {0x3036, 0x54, 0, 1}, {0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3800, 0x01, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3802, 0x01, 0, 0},
++ {0x3803, 0xb2, 0, 0}, {0x3804, 0x08, 0, 0}, {0x3805, 0xef, 0, 0},
++ {0x3806, 0x05, 0, 0}, {0x3807, 0xf1, 0, 0}, {0x3808, 0x07, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x04, 0, 0}, {0x380b, 0x38, 0, 0},
++ {0x380c, 0x09, 0, 0}, {0x380d, 0xc4, 0, 0}, {0x380e, 0x04, 0, 0},
++ {0x380f, 0x60, 0, 0}, {0x3612, 0x2b, 0, 0}, {0x3708, 0x64, 0, 0},
++ {0x3a02, 0x04, 0, 0}, {0x3a03, 0x60, 0, 0}, {0x3a08, 0x01, 0, 0},
++ {0x3a09, 0x50, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x18, 0, 0},
++ {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
++ {0x3a15, 0x60, 0, 0}, {0x4713, 0x02, 0, 0}, {0x4407, 0x04, 0, 0},
++ {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
++ {0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0}, {0x3503, 0, 0, 0},
++};
++
++static struct reg_value ov5640_setting_15fps_QSXGA_2592_1944[] = {
++ {0x4202, 0x0f, 0, 0}, /* stream off the sensor */
++ {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, /*disable flip*/
++ {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
++ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
++ {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
++ {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
++ {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
++ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
++ {0x3808, 0x0a, 0, 0}, {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0},
++ {0x380b, 0x98, 0, 0}, {0x380c, 0x0b, 0, 0}, {0x380d, 0x1c, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0}, {0x3810, 0x00, 0, 0},
++ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
++ {0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
++ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
++ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
++ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
++ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++ {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 70},
++ {0x4202, 0x00, 0, 0}, /* stream on the sensor */
++};
++
++static struct ov5640_mode_info ov5640_mode_info_data[2][ov5640_mode_MAX + 1] = {
++ {
++ {ov5640_mode_VGA_640_480, SUBSAMPLING, 640, 480,
++ ov5640_setting_15fps_VGA_640_480,
++ ARRAY_SIZE(ov5640_setting_15fps_VGA_640_480)},
++ {ov5640_mode_QVGA_320_240, SUBSAMPLING, 320, 240,
++ ov5640_setting_15fps_QVGA_320_240,
++ ARRAY_SIZE(ov5640_setting_15fps_QVGA_320_240)},
++ {ov5640_mode_NTSC_720_480, SUBSAMPLING, 720, 480,
++ ov5640_setting_15fps_NTSC_720_480,
++ ARRAY_SIZE(ov5640_setting_15fps_NTSC_720_480)},
++ {ov5640_mode_PAL_720_576, SUBSAMPLING, 720, 576,
++ ov5640_setting_15fps_PAL_720_576,
++ ARRAY_SIZE(ov5640_setting_15fps_PAL_720_576)},
++ {ov5640_mode_720P_1280_720, SUBSAMPLING, 1280, 720,
++ ov5640_setting_15fps_720P_1280_720,
++ ARRAY_SIZE(ov5640_setting_15fps_720P_1280_720)},
++ {ov5640_mode_1080P_1920_1080, SCALING, 1920, 1080,
++ ov5640_setting_15fps_1080P_1920_1080,
++ ARRAY_SIZE(ov5640_setting_15fps_1080P_1920_1080)},
++ {ov5640_mode_QSXGA_2592_1944, SCALING, 2592, 1944,
++ ov5640_setting_15fps_QSXGA_2592_1944,
++ ARRAY_SIZE(ov5640_setting_15fps_QSXGA_2592_1944)},
++ {ov5640_mode_QCIF_176_144, SUBSAMPLING, 176, 144,
++ ov5640_setting_15fps_QCIF_176_144,
++ ARRAY_SIZE(ov5640_setting_15fps_QCIF_176_144)},
++ {ov5640_mode_XGA_1024_768, SUBSAMPLING, 1024, 768,
++ ov5640_setting_15fps_XGA_1024_768,
++ ARRAY_SIZE(ov5640_setting_15fps_XGA_1024_768)},
++ },
++ {
++ {ov5640_mode_VGA_640_480, SUBSAMPLING, 640, 480,
++ ov5640_setting_30fps_VGA_640_480,
++ ARRAY_SIZE(ov5640_setting_30fps_VGA_640_480)},
++ {ov5640_mode_QVGA_320_240, SUBSAMPLING, 320, 240,
++ ov5640_setting_30fps_QVGA_320_240,
++ ARRAY_SIZE(ov5640_setting_30fps_QVGA_320_240)},
++ {ov5640_mode_NTSC_720_480, SUBSAMPLING, 720, 480,
++ ov5640_setting_30fps_NTSC_720_480,
++ ARRAY_SIZE(ov5640_setting_30fps_NTSC_720_480)},
++ {ov5640_mode_PAL_720_576, SUBSAMPLING, 720, 576,
++ ov5640_setting_30fps_PAL_720_576,
++ ARRAY_SIZE(ov5640_setting_30fps_PAL_720_576)},
++ {ov5640_mode_720P_1280_720, SUBSAMPLING, 1280, 720,
++ ov5640_setting_30fps_720P_1280_720,
++ ARRAY_SIZE(ov5640_setting_30fps_720P_1280_720)},
++ {ov5640_mode_1080P_1920_1080, SCALING, 1920, 1080,
++ ov5640_setting_30fps_1080P_1920_1080,
++ ARRAY_SIZE(ov5640_setting_30fps_1080P_1920_1080)},
++ {ov5640_mode_QSXGA_2592_1944, -1, 0, 0, NULL, 0},
++ {ov5640_mode_QCIF_176_144, SUBSAMPLING, 176, 144,
++ ov5640_setting_30fps_QCIF_176_144,
++ ARRAY_SIZE(ov5640_setting_30fps_QCIF_176_144)},
++ {ov5640_mode_XGA_1024_768, SUBSAMPLING, 1024, 768,
++ ov5640_setting_30fps_XGA_1024_768,
++ ARRAY_SIZE(ov5640_setting_30fps_XGA_1024_768)},
++ },
++};
++
++static struct regulator *io_regulator;
++static struct regulator *core_regulator;
++static struct regulator *analog_regulator;
++static struct regulator *gpo_regulator;
++
++static int ov5640_probe(struct i2c_client *adapter,
++ const struct i2c_device_id *device_id);
++static int ov5640_remove(struct i2c_client *client);
++
++static s32 ov5640_read_reg(u16 reg, u8 *val);
++static s32 ov5640_write_reg(u16 reg, u8 val);
++
++static const struct i2c_device_id ov5640_id[] = {
++ {"ov5640_mipi", 0},
++ {},
++};
++
++MODULE_DEVICE_TABLE(i2c, ov5640_id);
++
++static struct i2c_driver ov5640_i2c_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "ov5640_mipi",
++ },
++ .probe = ov5640_probe,
++ .remove = ov5640_remove,
++ .id_table = ov5640_id,
++};
++
++static void ov5640_standby(s32 enable)
++{
++ if (enable)
++ gpio_set_value(pwn_gpio, 1);
++ else
++ gpio_set_value(pwn_gpio, 0);
++
++ msleep(2);
++}
++
++static void ov5640_reset(void)
++{
++ /* camera reset */
++ gpio_set_value(rst_gpio, 1);
++
++ /* camera power dowmn */
++ gpio_set_value(pwn_gpio, 1);
++ msleep(5);
++
++ gpio_set_value(pwn_gpio, 0);
++ msleep(5);
++
++ gpio_set_value(rst_gpio, 0);
++ msleep(1);
++
++ gpio_set_value(rst_gpio, 1);
++ msleep(5);
++
++ gpio_set_value(pwn_gpio, 1);
++}
++
++static int ov5640_power_on(struct device *dev)
++{
++ int ret = 0;
++
++ io_regulator = devm_regulator_get(dev, "DOVDD");
++ if (!IS_ERR(io_regulator)) {
++ regulator_set_voltage(io_regulator,
++ OV5640_VOLTAGE_DIGITAL_IO,
++ OV5640_VOLTAGE_DIGITAL_IO);
++ ret = regulator_enable(io_regulator);
++ if (ret) {
++ pr_err("%s:io set voltage error\n", __func__);
++ return ret;
++ } else {
++ dev_dbg(dev,
++ "%s:io set voltage ok\n", __func__);
++ }
++ } else {
++ pr_err("%s: cannot get io voltage error\n", __func__);
++ io_regulator = NULL;
++ }
++
++ core_regulator = devm_regulator_get(dev, "DVDD");
++ if (!IS_ERR(core_regulator)) {
++ regulator_set_voltage(core_regulator,
++ OV5640_VOLTAGE_DIGITAL_CORE,
++ OV5640_VOLTAGE_DIGITAL_CORE);
++ ret = regulator_enable(core_regulator);
++ if (ret) {
++ pr_err("%s:core set voltage error\n", __func__);
++ return ret;
++ } else {
++ dev_dbg(dev,
++ "%s:core set voltage ok\n", __func__);
++ }
++ } else {
++ core_regulator = NULL;
++ pr_err("%s: cannot get core voltage error\n", __func__);
++ }
++
++ analog_regulator = devm_regulator_get(dev, "AVDD");
++ if (!IS_ERR(analog_regulator)) {
++ regulator_set_voltage(analog_regulator,
++ OV5640_VOLTAGE_ANALOG,
++ OV5640_VOLTAGE_ANALOG);
++ ret = regulator_enable(analog_regulator);
++ if (ret) {
++ pr_err("%s:analog set voltage error\n",
++ __func__);
++ return ret;
++ } else {
++ dev_dbg(dev,
++ "%s:analog set voltage ok\n", __func__);
++ }
++ } else {
++ analog_regulator = NULL;
++ pr_err("%s: cannot get analog voltage error\n", __func__);
++ }
++
++ return ret;
++}
++
++static s32 ov5640_write_reg(u16 reg, u8 val)
++{
++ u8 au8Buf[3] = {0};
++
++ au8Buf[0] = reg >> 8;
++ au8Buf[1] = reg & 0xff;
++ au8Buf[2] = val;
++
++ if (i2c_master_send(ov5640_data.i2c_client, au8Buf, 3) < 0) {
++ pr_err("%s:write reg error:reg=%x,val=%x\n",
++ __func__, reg, val);
++ return -1;
++ }
++
++ return 0;
++}
++
++static s32 ov5640_read_reg(u16 reg, u8 *val)
++{
++ u8 au8RegBuf[2] = {0};
++ u8 u8RdVal = 0;
++
++ au8RegBuf[0] = reg >> 8;
++ au8RegBuf[1] = reg & 0xff;
++
++ if (2 != i2c_master_send(ov5640_data.i2c_client, au8RegBuf, 2)) {
++ pr_err("%s:write reg error:reg=%x\n",
++ __func__, reg);
++ return -1;
++ }
++
++ if (1 != i2c_master_recv(ov5640_data.i2c_client, &u8RdVal, 1)) {
++ pr_err("%s:read reg error:reg=%x,val=%x\n",
++ __func__, reg, u8RdVal);
++ return -1;
++ }
++
++ *val = u8RdVal;
++
++ return u8RdVal;
++}
++
++static int prev_sysclk, prev_HTS;
++static int AE_low, AE_high, AE_Target = 52;
++
++void OV5640_stream_on(void)
++{
++ ov5640_write_reg(0x4202, 0x00);
++}
++
++void OV5640_stream_off(void)
++{
++ ov5640_write_reg(0x4202, 0x0f);
++}
++
++
++int OV5640_get_sysclk(void)
++{
++ /* calculate sysclk */
++ int xvclk = ov5640_data.mclk / 10000;
++ int temp1, temp2;
++ int Multiplier, PreDiv, VCO, SysDiv, Pll_rdiv;
++ int Bit_div2x = 1, sclk_rdiv, sysclk;
++ u8 temp;
++
++ int sclk_rdiv_map[] = {1, 2, 4, 8};
++
++ temp1 = ov5640_read_reg(0x3034, &temp);
++ temp2 = temp1 & 0x0f;
++ if (temp2 == 8 || temp2 == 10)
++ Bit_div2x = temp2 / 2;
++
++ temp1 = ov5640_read_reg(0x3035, &temp);
++ SysDiv = temp1>>4;
++ if (SysDiv == 0)
++ SysDiv = 16;
++
++ temp1 = ov5640_read_reg(0x3036, &temp);
++ Multiplier = temp1;
++
++ temp1 = ov5640_read_reg(0x3037, &temp);
++ PreDiv = temp1 & 0x0f;
++ Pll_rdiv = ((temp1 >> 4) & 0x01) + 1;
++
++ temp1 = ov5640_read_reg(0x3108, &temp);
++ temp2 = temp1 & 0x03;
++ sclk_rdiv = sclk_rdiv_map[temp2];
++
++ VCO = xvclk * Multiplier / PreDiv;
++
++ sysclk = VCO / SysDiv / Pll_rdiv * 2 / Bit_div2x / sclk_rdiv;
++
++ return sysclk;
++}
++
++void OV5640_set_night_mode(void)
++{
++ /* read HTS from register settings */
++ u8 mode;
++
++ ov5640_read_reg(0x3a00, &mode);
++ mode &= 0xfb;
++ ov5640_write_reg(0x3a00, mode);
++}
++
++int OV5640_get_HTS(void)
++{
++ /* read HTS from register settings */
++ int HTS;
++ u8 temp;
++
++ HTS = ov5640_read_reg(0x380c, &temp);
++ HTS = (HTS<<8) + ov5640_read_reg(0x380d, &temp);
++
++ return HTS;
++}
++
++int OV5640_get_VTS(void)
++{
++ /* read VTS from register settings */
++ int VTS;
++ u8 temp;
++
++ /* total vertical size[15:8] high byte */
++ VTS = ov5640_read_reg(0x380e, &temp);
++
++ VTS = (VTS<<8) + ov5640_read_reg(0x380f, &temp);
++
++ return VTS;
++}
++
++int OV5640_set_VTS(int VTS)
++{
++ /* write VTS to registers */
++ int temp;
++
++ temp = VTS & 0xff;
++ ov5640_write_reg(0x380f, temp);
++
++ temp = VTS>>8;
++ ov5640_write_reg(0x380e, temp);
++
++ return 0;
++}
++
++int OV5640_get_shutter(void)
++{
++ /* read shutter, in number of line period */
++ int shutter;
++ u8 temp;
++
++ shutter = (ov5640_read_reg(0x03500, &temp) & 0x0f);
++ shutter = (shutter<<8) + ov5640_read_reg(0x3501, &temp);
++ shutter = (shutter<<4) + (ov5640_read_reg(0x3502, &temp)>>4);
++
++ return shutter;
++}
++
++int OV5640_set_shutter(int shutter)
++{
++ /* write shutter, in number of line period */
++ int temp;
++
++ shutter = shutter & 0xffff;
++
++ temp = shutter & 0x0f;
++ temp = temp<<4;
++ ov5640_write_reg(0x3502, temp);
++
++ temp = shutter & 0xfff;
++ temp = temp>>4;
++ ov5640_write_reg(0x3501, temp);
++
++ temp = shutter>>12;
++ ov5640_write_reg(0x3500, temp);
++
++ return 0;
++}
++
++int OV5640_get_gain16(void)
++{
++ /* read gain, 16 = 1x */
++ int gain16;
++ u8 temp;
++
++ gain16 = ov5640_read_reg(0x350a, &temp) & 0x03;
++ gain16 = (gain16<<8) + ov5640_read_reg(0x350b, &temp);
++
++ return gain16;
++}
++
++int OV5640_set_gain16(int gain16)
++{
++ /* write gain, 16 = 1x */
++ u8 temp;
++ gain16 = gain16 & 0x3ff;
++
++ temp = gain16 & 0xff;
++ ov5640_write_reg(0x350b, temp);
++
++ temp = gain16>>8;
++ ov5640_write_reg(0x350a, temp);
++
++ return 0;
++}
++
++int OV5640_get_light_freq(void)
++{
++ /* get banding filter value */
++ int temp, temp1, light_freq = 0;
++ u8 tmp;
++
++ temp = ov5640_read_reg(0x3c01, &tmp);
++
++ if (temp & 0x80) {
++ /* manual */
++ temp1 = ov5640_read_reg(0x3c00, &tmp);
++ if (temp1 & 0x04) {
++ /* 50Hz */
++ light_freq = 50;
++ } else {
++ /* 60Hz */
++ light_freq = 60;
++ }
++ } else {
++ /* auto */
++ temp1 = ov5640_read_reg(0x3c0c, &tmp);
++ if (temp1 & 0x01) {
++ /* 50Hz */
++ light_freq = 50;
++ } else {
++ /* 60Hz */
++ }
++ }
++ return light_freq;
++}
++
++void OV5640_set_bandingfilter(void)
++{
++ int prev_VTS;
++ int band_step60, max_band60, band_step50, max_band50;
++
++ /* read preview PCLK */
++ prev_sysclk = OV5640_get_sysclk();
++ /* read preview HTS */
++ prev_HTS = OV5640_get_HTS();
++
++ /* read preview VTS */
++ prev_VTS = OV5640_get_VTS();
++
++ /* calculate banding filter */
++ /* 60Hz */
++ band_step60 = prev_sysclk * 100/prev_HTS * 100/120;
++ ov5640_write_reg(0x3a0a, (band_step60 >> 8));
++ ov5640_write_reg(0x3a0b, (band_step60 & 0xff));
++
++ max_band60 = (int)((prev_VTS-4)/band_step60);
++ ov5640_write_reg(0x3a0d, max_band60);
++
++ /* 50Hz */
++ band_step50 = prev_sysclk * 100/prev_HTS;
++ ov5640_write_reg(0x3a08, (band_step50 >> 8));
++ ov5640_write_reg(0x3a09, (band_step50 & 0xff));
++
++ max_band50 = (int)((prev_VTS-4)/band_step50);
++ ov5640_write_reg(0x3a0e, max_band50);
++}
++
++int OV5640_set_AE_target(int target)
++{
++ /* stable in high */
++ int fast_high, fast_low;
++ AE_low = target * 23 / 25; /* 0.92 */
++ AE_high = target * 27 / 25; /* 1.08 */
++
++ fast_high = AE_high<<1;
++ if (fast_high > 255)
++ fast_high = 255;
++
++ fast_low = AE_low >> 1;
++
++ ov5640_write_reg(0x3a0f, AE_high);
++ ov5640_write_reg(0x3a10, AE_low);
++ ov5640_write_reg(0x3a1b, AE_high);
++ ov5640_write_reg(0x3a1e, AE_low);
++ ov5640_write_reg(0x3a11, fast_high);
++ ov5640_write_reg(0x3a1f, fast_low);
++
++ return 0;
++}
++
++void OV5640_turn_on_AE_AG(int enable)
++{
++ u8 ae_ag_ctrl;
++
++ ov5640_read_reg(0x3503, &ae_ag_ctrl);
++ if (enable) {
++ /* turn on auto AE/AG */
++ ae_ag_ctrl = ae_ag_ctrl & ~(0x03);
++ } else {
++ /* turn off AE/AG */
++ ae_ag_ctrl = ae_ag_ctrl | 0x03;
++ }
++ ov5640_write_reg(0x3503, ae_ag_ctrl);
++}
++
++bool binning_on(void)
++{
++ u8 temp;
++ ov5640_read_reg(0x3821, &temp);
++ temp &= 0xfe;
++ if (temp)
++ return true;
++ else
++ return false;
++}
++
++static void ov5640_set_virtual_channel(int channel)
++{
++ u8 channel_id;
++
++ ov5640_read_reg(0x4814, &channel_id);
++ channel_id &= ~(3 << 6);
++ ov5640_write_reg(0x4814, channel_id | (channel << 6));
++}
++
++/* download ov5640 settings to sensor through i2c */
++static int ov5640_download_firmware(struct reg_value *pModeSetting, s32 ArySize)
++{
++ register u32 Delay_ms = 0;
++ register u16 RegAddr = 0;
++ register u8 Mask = 0;
++ register u8 Val = 0;
++ u8 RegVal = 0;
++ int i, retval = 0;
++
++ for (i = 0; i < ArySize; ++i, ++pModeSetting) {
++ Delay_ms = pModeSetting->u32Delay_ms;
++ RegAddr = pModeSetting->u16RegAddr;
++ Val = pModeSetting->u8Val;
++ Mask = pModeSetting->u8Mask;
++
++ if (Mask) {
++ retval = ov5640_read_reg(RegAddr, &RegVal);
++ if (retval < 0)
++ goto err;
++
++ RegVal &= ~(u8)Mask;
++ Val &= Mask;
++ Val |= RegVal;
++ }
++
++ retval = ov5640_write_reg(RegAddr, Val);
++ if (retval < 0)
++ goto err;
++
++ if (Delay_ms)
++ msleep(Delay_ms);
++ }
++err:
++ return retval;
++}
++
++/* sensor changes between scaling and subsampling
++ * go through exposure calcualtion
++ */
++static int ov5640_change_mode_exposure_calc(enum ov5640_frame_rate frame_rate,
++ enum ov5640_mode mode)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 ArySize = 0;
++ u8 average;
++ int prev_shutter, prev_gain16;
++ int cap_shutter, cap_gain16;
++ int cap_sysclk, cap_HTS, cap_VTS;
++ int light_freq, cap_bandfilt, cap_maxband;
++ long cap_gain16_shutter;
++ int retval = 0;
++
++ /* check if the input mode and frame rate is valid */
++ pModeSetting =
++ ov5640_mode_info_data[frame_rate][mode].init_data_ptr;
++ ArySize =
++ ov5640_mode_info_data[frame_rate][mode].init_data_size;
++
++ ov5640_data.pix.width =
++ ov5640_mode_info_data[frame_rate][mode].width;
++ ov5640_data.pix.height =
++ ov5640_mode_info_data[frame_rate][mode].height;
++
++ if (ov5640_data.pix.width == 0 || ov5640_data.pix.height == 0 ||
++ pModeSetting == NULL || ArySize == 0)
++ return -EINVAL;
++
++ /* auto focus */
++ /* OV5640_auto_focus();//if no af function, just skip it */
++
++ /* turn off AE/AG */
++ OV5640_turn_on_AE_AG(0);
++
++ /* read preview shutter */
++ prev_shutter = OV5640_get_shutter();
++ if ((binning_on()) && (mode != ov5640_mode_720P_1280_720)
++ && (mode != ov5640_mode_1080P_1920_1080))
++ prev_shutter *= 2;
++
++ /* read preview gain */
++ prev_gain16 = OV5640_get_gain16();
++
++ /* get average */
++ ov5640_read_reg(0x56a1, &average);
++
++ /* turn off night mode for capture */
++ OV5640_set_night_mode();
++
++ /* turn off overlay */
++ /* ov5640_write_reg(0x3022, 0x06);//if no af function, just skip it */
++
++ OV5640_stream_off();
++
++ /* Write capture setting */
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ if (retval < 0)
++ goto err;
++
++ /* read capture VTS */
++ cap_VTS = OV5640_get_VTS();
++ cap_HTS = OV5640_get_HTS();
++ cap_sysclk = OV5640_get_sysclk();
++
++ /* calculate capture banding filter */
++ light_freq = OV5640_get_light_freq();
++ if (light_freq == 60) {
++ /* 60Hz */
++ cap_bandfilt = cap_sysclk * 100 / cap_HTS * 100 / 120;
++ } else {
++ /* 50Hz */
++ cap_bandfilt = cap_sysclk * 100 / cap_HTS;
++ }
++ cap_maxband = (int)((cap_VTS - 4)/cap_bandfilt);
++
++ /* calculate capture shutter/gain16 */
++ if (average > AE_low && average < AE_high) {
++ /* in stable range */
++ cap_gain16_shutter =
++ prev_gain16 * prev_shutter * cap_sysclk/prev_sysclk
++ * prev_HTS/cap_HTS * AE_Target / average;
++ } else {
++ cap_gain16_shutter =
++ prev_gain16 * prev_shutter * cap_sysclk/prev_sysclk
++ * prev_HTS/cap_HTS;
++ }
++
++ /* gain to shutter */
++ if (cap_gain16_shutter < (cap_bandfilt * 16)) {
++ /* shutter < 1/100 */
++ cap_shutter = cap_gain16_shutter/16;
++ if (cap_shutter < 1)
++ cap_shutter = 1;
++
++ cap_gain16 = cap_gain16_shutter/cap_shutter;
++ if (cap_gain16 < 16)
++ cap_gain16 = 16;
++ } else {
++ if (cap_gain16_shutter >
++ (cap_bandfilt * cap_maxband * 16)) {
++ /* exposure reach max */
++ cap_shutter = cap_bandfilt * cap_maxband;
++ cap_gain16 = cap_gain16_shutter / cap_shutter;
++ } else {
++ /* 1/100 < (cap_shutter = n/100) =< max */
++ cap_shutter =
++ ((int) (cap_gain16_shutter/16 / cap_bandfilt))
++ *cap_bandfilt;
++ cap_gain16 = cap_gain16_shutter / cap_shutter;
++ }
++ }
++
++ /* write capture gain */
++ OV5640_set_gain16(cap_gain16);
++
++ /* write capture shutter */
++ if (cap_shutter > (cap_VTS - 4)) {
++ cap_VTS = cap_shutter + 4;
++ OV5640_set_VTS(cap_VTS);
++ }
++ OV5640_set_shutter(cap_shutter);
++
++ OV5640_stream_on();
++
++err:
++ return retval;
++}
++
++/* if sensor changes inside scaling or subsampling
++ * change mode directly
++ * */
++static int ov5640_change_mode_direct(enum ov5640_frame_rate frame_rate,
++ enum ov5640_mode mode)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 ArySize = 0;
++ int retval = 0;
++
++ /* check if the input mode and frame rate is valid */
++ pModeSetting =
++ ov5640_mode_info_data[frame_rate][mode].init_data_ptr;
++ ArySize =
++ ov5640_mode_info_data[frame_rate][mode].init_data_size;
++
++ ov5640_data.pix.width =
++ ov5640_mode_info_data[frame_rate][mode].width;
++ ov5640_data.pix.height =
++ ov5640_mode_info_data[frame_rate][mode].height;
++
++ if (ov5640_data.pix.width == 0 || ov5640_data.pix.height == 0 ||
++ pModeSetting == NULL || ArySize == 0)
++ return -EINVAL;
++
++ /* turn off AE/AG */
++ OV5640_turn_on_AE_AG(0);
++
++ OV5640_stream_off();
++
++ /* Write capture setting */
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ if (retval < 0)
++ goto err;
++
++ OV5640_stream_on();
++
++ OV5640_turn_on_AE_AG(1);
++
++err:
++ return retval;
++}
++
++static int ov5640_init_mode(enum ov5640_frame_rate frame_rate,
++ enum ov5640_mode mode, enum ov5640_mode orig_mode)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 ArySize = 0;
++ int retval = 0;
++ void *mipi_csi2_info;
++ u32 mipi_reg, msec_wait4stable = 0;
++ enum ov5640_downsize_mode dn_mode, orig_dn_mode;
++
++ if ((mode > ov5640_mode_MAX || mode < ov5640_mode_MIN)
++ && (mode != ov5640_mode_INIT)) {
++ pr_err("Wrong ov5640 mode detected!\n");
++ return -1;
++ }
++
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ /* initial mipi dphy */
++ if (!mipi_csi2_info) {
++ printk(KERN_ERR "%s() in %s: Fail to get mipi_csi2_info!\n",
++ __func__, __FILE__);
++ return -1;
++ }
++
++ if (!mipi_csi2_get_status(mipi_csi2_info))
++ mipi_csi2_enable(mipi_csi2_info);
++
++ if (!mipi_csi2_get_status(mipi_csi2_info)) {
++ pr_err("Can not enable mipi csi2 driver!\n");
++ return -1;
++ }
++
++ mipi_csi2_set_lanes(mipi_csi2_info);
++
++ /*Only reset MIPI CSI2 HW at sensor initialize*/
++ if (mode == ov5640_mode_INIT)
++ mipi_csi2_reset(mipi_csi2_info);
++
++ if (ov5640_data.pix.pixelformat == V4L2_PIX_FMT_UYVY)
++ mipi_csi2_set_datatype(mipi_csi2_info, MIPI_DT_YUV422);
++ else if (ov5640_data.pix.pixelformat == V4L2_PIX_FMT_RGB565)
++ mipi_csi2_set_datatype(mipi_csi2_info, MIPI_DT_RGB565);
++ else
++ pr_err("currently this sensor format can not be supported!\n");
++
++ dn_mode = ov5640_mode_info_data[frame_rate][mode].dn_mode;
++ orig_dn_mode = ov5640_mode_info_data[frame_rate][orig_mode].dn_mode;
++ if (mode == ov5640_mode_INIT) {
++ pModeSetting = ov5640_init_setting_30fps_VGA;
++ ArySize = ARRAY_SIZE(ov5640_init_setting_30fps_VGA);
++
++ ov5640_data.pix.width = 640;
++ ov5640_data.pix.height = 480;
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ if (retval < 0)
++ goto err;
++
++ pModeSetting = ov5640_setting_30fps_VGA_640_480;
++ ArySize = ARRAY_SIZE(ov5640_setting_30fps_VGA_640_480);
++ retval = ov5640_download_firmware(pModeSetting, ArySize);
++ } else if ((dn_mode == SUBSAMPLING && orig_dn_mode == SCALING) ||
++ (dn_mode == SCALING && orig_dn_mode == SUBSAMPLING)) {
++ /* change between subsampling and scaling
++ * go through exposure calucation */
++ retval = ov5640_change_mode_exposure_calc(frame_rate, mode);
++ } else {
++ /* change inside subsampling or scaling
++ * download firmware directly */
++ retval = ov5640_change_mode_direct(frame_rate, mode);
++ }
++
++ if (retval < 0)
++ goto err;
++
++ OV5640_set_AE_target(AE_Target);
++ OV5640_get_light_freq();
++ OV5640_set_bandingfilter();
++ ov5640_set_virtual_channel(ov5640_data.csi);
++
++ /* add delay to wait for sensor stable */
++ if (mode == ov5640_mode_QSXGA_2592_1944) {
++ /* dump the first two frames: 1/7.5*2
++ * the frame rate of QSXGA is 7.5fps */
++ msec_wait4stable = 267;
++ } else if (frame_rate == ov5640_15_fps) {
++ /* dump the first nine frames: 1/15*9 */
++ msec_wait4stable = 600;
++ } else if (frame_rate == ov5640_30_fps) {
++ /* dump the first nine frames: 1/30*9 */
++ msec_wait4stable = 300;
++ }
++ msleep(msec_wait4stable);
++
++ if (mipi_csi2_info) {
++ unsigned int i;
++
++ i = 0;
++
++ /* wait for mipi sensor ready */
++ mipi_reg = mipi_csi2_dphy_status(mipi_csi2_info);
++ while ((mipi_reg == 0x200) && (i < 10)) {
++ mipi_reg = mipi_csi2_dphy_status(mipi_csi2_info);
++ i++;
++ msleep(10);
++ }
++
++ if (i >= 10) {
++ pr_err("mipi csi2 can not receive sensor clk!\n");
++ return -1;
++ }
++
++ i = 0;
++
++ /* wait for mipi stable */
++ mipi_reg = mipi_csi2_get_error1(mipi_csi2_info);
++ while ((mipi_reg != 0x0) && (i < 10)) {
++ mipi_reg = mipi_csi2_get_error1(mipi_csi2_info);
++ i++;
++ msleep(10);
++ }
++
++ if (i >= 10) {
++ pr_err("mipi csi2 can not reveive data correctly!\n");
++ return -1;
++ }
++ }
++err:
++ return retval;
++}
++
++/* --------------- IOCTL functions from v4l2_int_ioctl_desc --------------- */
++
++static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
++{
++ if (s == NULL) {
++ pr_err(" ERROR!! no slave device set!\n");
++ return -1;
++ }
++
++ memset(p, 0, sizeof(*p));
++ p->u.bt656.clock_curr = ov5640_data.mclk;
++ pr_debug(" clock_curr=mclk=%d\n", ov5640_data.mclk);
++ p->if_type = V4L2_IF_TYPE_BT656;
++ p->u.bt656.mode = V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT;
++ p->u.bt656.clock_min = OV5640_XCLK_MIN;
++ p->u.bt656.clock_max = OV5640_XCLK_MAX;
++ p->u.bt656.bt_sync_correct = 1; /* Indicate external vsync */
++
++ return 0;
++}
++
++/*!
++ * ioctl_s_power - V4L2 sensor interface handler for VIDIOC_S_POWER ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @on: indicates power mode (on or off)
++ *
++ * Turns the power on or off, depending on the value of on and returns the
++ * appropriate error code.
++ */
++static int ioctl_s_power(struct v4l2_int_device *s, int on)
++{
++ struct sensor_data *sensor = s->priv;
++
++ if (on && !sensor->on) {
++ if (io_regulator)
++ if (regulator_enable(io_regulator) != 0)
++ return -EIO;
++ if (core_regulator)
++ if (regulator_enable(core_regulator) != 0)
++ return -EIO;
++ if (gpo_regulator)
++ if (regulator_enable(gpo_regulator) != 0)
++ return -EIO;
++ if (analog_regulator)
++ if (regulator_enable(analog_regulator) != 0)
++ return -EIO;
++ /* Make sure power on */
++ ov5640_standby(0);
++ } else if (!on && sensor->on) {
++ if (analog_regulator)
++ regulator_disable(analog_regulator);
++ if (core_regulator)
++ regulator_disable(core_regulator);
++ if (io_regulator)
++ regulator_disable(io_regulator);
++ if (gpo_regulator)
++ regulator_disable(gpo_regulator);
++
++ ov5640_standby(1);
++ }
++
++ sensor->on = on;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_parm - V4L2 sensor interface handler for VIDIOC_G_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_G_PARM ioctl structure
++ *
++ * Returns the sensor's video CAPTURE parameters.
++ */
++static int ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor_data *sensor = s->priv;
++ struct v4l2_captureparm *cparm = &a->parm.capture;
++ int ret = 0;
++
++ switch (a->type) {
++ /* This is the only case currently handled. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ memset(a, 0, sizeof(*a));
++ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cparm->capability = sensor->streamcap.capability;
++ cparm->timeperframe = sensor->streamcap.timeperframe;
++ cparm->capturemode = sensor->streamcap.capturemode;
++ ret = 0;
++ break;
++
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ ret = -EINVAL;
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_parm - V4L2 sensor interface handler for VIDIOC_S_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_S_PARM ioctl structure
++ *
++ * Configures the sensor to use the input parameters, if possible. If
++ * not possible, reverts to the old parameters and returns the
++ * appropriate error code.
++ */
++static int ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor_data *sensor = s->priv;
++ struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe;
++ u32 tgt_fps; /* target frames per secound */
++ enum ov5640_frame_rate frame_rate;
++ enum ov5640_mode orig_mode;
++ int ret = 0;
++
++ /* Make sure power on */
++ ov5640_standby(0);
++
++ switch (a->type) {
++ /* This is the only case currently handled. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ /* Check that the new frame rate is allowed. */
++ if ((timeperframe->numerator == 0) ||
++ (timeperframe->denominator == 0)) {
++ timeperframe->denominator = DEFAULT_FPS;
++ timeperframe->numerator = 1;
++ }
++
++ tgt_fps = timeperframe->denominator /
++ timeperframe->numerator;
++
++ if (tgt_fps > MAX_FPS) {
++ timeperframe->denominator = MAX_FPS;
++ timeperframe->numerator = 1;
++ } else if (tgt_fps < MIN_FPS) {
++ timeperframe->denominator = MIN_FPS;
++ timeperframe->numerator = 1;
++ }
++
++ /* Actual frame rate we use */
++ tgt_fps = timeperframe->denominator /
++ timeperframe->numerator;
++
++ if (tgt_fps == 15)
++ frame_rate = ov5640_15_fps;
++ else if (tgt_fps == 30)
++ frame_rate = ov5640_30_fps;
++ else {
++ pr_err(" The camera frame rate is not supported!\n");
++ return -EINVAL;
++ }
++
++ orig_mode = sensor->streamcap.capturemode;
++ ret = ov5640_init_mode(frame_rate,
++ (u32)a->parm.capture.capturemode, orig_mode);
++ if (ret < 0)
++ return ret;
++
++ sensor->streamcap.timeperframe = *timeperframe;
++ sensor->streamcap.capturemode =
++ (u32)a->parm.capture.capturemode;
++
++ break;
++
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ pr_debug(" type is not " \
++ "V4L2_BUF_TYPE_VIDEO_CAPTURE but %d\n",
++ a->type);
++ ret = -EINVAL;
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_g_fmt_cap - V4L2 sensor interface handler for ioctl_g_fmt_cap
++ * @s: pointer to standard V4L2 device structure
++ * @f: pointer to standard V4L2 v4l2_format structure
++ *
++ * Returns the sensor's current pixel format in the v4l2_format
++ * parameter.
++ */
++static int ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f)
++{
++ struct sensor_data *sensor = s->priv;
++
++ f->fmt.pix = sensor->pix;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_ctrl - V4L2 sensor interface handler for VIDIOC_G_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_G_CTRL ioctl structure
++ *
++ * If the requested control is supported, returns the control's current
++ * value from the video_control[] array. Otherwise, returns -EINVAL
++ * if the control is not supported.
++ */
++static int ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int ret = 0;
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ vc->value = ov5640_data.brightness;
++ break;
++ case V4L2_CID_HUE:
++ vc->value = ov5640_data.hue;
++ break;
++ case V4L2_CID_CONTRAST:
++ vc->value = ov5640_data.contrast;
++ break;
++ case V4L2_CID_SATURATION:
++ vc->value = ov5640_data.saturation;
++ break;
++ case V4L2_CID_RED_BALANCE:
++ vc->value = ov5640_data.red;
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ vc->value = ov5640_data.blue;
++ break;
++ case V4L2_CID_EXPOSURE:
++ vc->value = ov5640_data.ae_mode;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_ctrl - V4L2 sensor interface handler for VIDIOC_S_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_S_CTRL ioctl structure
++ *
++ * If the requested control is supported, sets the control's current
++ * value in HW (and updates the video_control[] array). Otherwise,
++ * returns -EINVAL if the control is not supported.
++ */
++static int ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int retval = 0;
++
++ pr_debug("In ov5640:ioctl_s_ctrl %d\n",
++ vc->id);
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ break;
++ case V4L2_CID_CONTRAST:
++ break;
++ case V4L2_CID_SATURATION:
++ break;
++ case V4L2_CID_HUE:
++ break;
++ case V4L2_CID_AUTO_WHITE_BALANCE:
++ break;
++ case V4L2_CID_DO_WHITE_BALANCE:
++ break;
++ case V4L2_CID_RED_BALANCE:
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ break;
++ case V4L2_CID_GAMMA:
++ break;
++ case V4L2_CID_EXPOSURE:
++ break;
++ case V4L2_CID_AUTOGAIN:
++ break;
++ case V4L2_CID_GAIN:
++ break;
++ case V4L2_CID_HFLIP:
++ break;
++ case V4L2_CID_VFLIP:
++ break;
++ default:
++ retval = -EPERM;
++ break;
++ }
++
++ return retval;
++}
++
++/*!
++ * ioctl_enum_framesizes - V4L2 sensor interface handler for
++ * VIDIOC_ENUM_FRAMESIZES ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @fsize: standard V4L2 VIDIOC_ENUM_FRAMESIZES ioctl structure
++ *
++ * Return 0 if successful, otherwise -EINVAL.
++ */
++static int ioctl_enum_framesizes(struct v4l2_int_device *s,
++ struct v4l2_frmsizeenum *fsize)
++{
++ if (fsize->index > ov5640_mode_MAX)
++ return -EINVAL;
++
++ fsize->pixel_format = ov5640_data.pix.pixelformat;
++ fsize->discrete.width =
++ max(ov5640_mode_info_data[0][fsize->index].width,
++ ov5640_mode_info_data[1][fsize->index].width);
++ fsize->discrete.height =
++ max(ov5640_mode_info_data[0][fsize->index].height,
++ ov5640_mode_info_data[1][fsize->index].height);
++ return 0;
++}
++
++/*!
++ * ioctl_g_chip_ident - V4L2 sensor interface handler for
++ * VIDIOC_DBG_G_CHIP_IDENT ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @id: pointer to int
++ *
++ * Return 0.
++ */
++static int ioctl_g_chip_ident(struct v4l2_int_device *s, int *id)
++{
++ ((struct v4l2_dbg_chip_ident *)id)->match.type =
++ V4L2_CHIP_MATCH_I2C_DRIVER;
++ strcpy(((struct v4l2_dbg_chip_ident *)id)->match.name,
++ "ov5640_mipi_camera");
++
++ return 0;
++}
++
++/*!
++ * ioctl_init - V4L2 sensor interface handler for VIDIOC_INT_INIT
++ * @s: pointer to standard V4L2 device structure
++ */
++static int ioctl_init(struct v4l2_int_device *s)
++{
++
++ return 0;
++}
++
++/*!
++ * ioctl_enum_fmt_cap - V4L2 sensor interface handler for VIDIOC_ENUM_FMT
++ * @s: pointer to standard V4L2 device structure
++ * @fmt: pointer to standard V4L2 fmt description structure
++ *
++ * Return 0.
++ */
++static int ioctl_enum_fmt_cap(struct v4l2_int_device *s,
++ struct v4l2_fmtdesc *fmt)
++{
++ if (fmt->index > ov5640_mode_MAX)
++ return -EINVAL;
++
++ fmt->pixelformat = ov5640_data.pix.pixelformat;
++
++ return 0;
++}
++
++/*!
++ * ioctl_dev_init - V4L2 sensor interface handler for vidioc_int_dev_init_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Initialise the device when slave attaches to the master.
++ */
++static int ioctl_dev_init(struct v4l2_int_device *s)
++{
++ struct sensor_data *sensor = s->priv;
++ u32 tgt_xclk; /* target xclk */
++ u32 tgt_fps; /* target frames per secound */
++ int ret;
++ enum ov5640_frame_rate frame_rate;
++ void *mipi_csi2_info;
++
++ ov5640_data.on = true;
++
++ /* mclk */
++ tgt_xclk = ov5640_data.mclk;
++ tgt_xclk = min(tgt_xclk, (u32)OV5640_XCLK_MAX);
++ tgt_xclk = max(tgt_xclk, (u32)OV5640_XCLK_MIN);
++ ov5640_data.mclk = tgt_xclk;
++
++ pr_debug(" Setting mclk to %d MHz\n", tgt_xclk / 1000000);
++
++ /* Default camera frame rate is set in probe */
++ tgt_fps = sensor->streamcap.timeperframe.denominator /
++ sensor->streamcap.timeperframe.numerator;
++
++ if (tgt_fps == 15)
++ frame_rate = ov5640_15_fps;
++ else if (tgt_fps == 30)
++ frame_rate = ov5640_30_fps;
++ else
++ return -EINVAL; /* Only support 15fps or 30fps now. */
++
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ /* enable mipi csi2 */
++ if (mipi_csi2_info)
++ mipi_csi2_enable(mipi_csi2_info);
++ else {
++ printk(KERN_ERR "%s() in %s: Fail to get mipi_csi2_info!\n",
++ __func__, __FILE__);
++ return -EPERM;
++ }
++
++ ret = ov5640_init_mode(frame_rate, ov5640_mode_INIT, ov5640_mode_INIT);
++
++ return ret;
++}
++
++/*!
++ * ioctl_dev_exit - V4L2 sensor interface handler for vidioc_int_dev_exit_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Delinitialise the device when slave detaches to the master.
++ */
++static int ioctl_dev_exit(struct v4l2_int_device *s)
++{
++ void *mipi_csi2_info;
++
++ mipi_csi2_info = mipi_csi2_get_info();
++
++ /* disable mipi csi2 */
++ if (mipi_csi2_info)
++ if (mipi_csi2_get_status(mipi_csi2_info))
++ mipi_csi2_disable(mipi_csi2_info);
++
++ return 0;
++}
++
++/*!
++ * This structure defines all the ioctls for this module and links them to the
++ * enumeration.
++ */
++static struct v4l2_int_ioctl_desc ov5640_ioctl_desc[] = {
++ {vidioc_int_dev_init_num, (v4l2_int_ioctl_func *) ioctl_dev_init},
++ {vidioc_int_dev_exit_num, ioctl_dev_exit},
++ {vidioc_int_s_power_num, (v4l2_int_ioctl_func *) ioctl_s_power},
++ {vidioc_int_g_ifparm_num, (v4l2_int_ioctl_func *) ioctl_g_ifparm},
++/* {vidioc_int_g_needs_reset_num,
++ (v4l2_int_ioctl_func *)ioctl_g_needs_reset}, */
++/* {vidioc_int_reset_num, (v4l2_int_ioctl_func *)ioctl_reset}, */
++ {vidioc_int_init_num, (v4l2_int_ioctl_func *) ioctl_init},
++ {vidioc_int_enum_fmt_cap_num,
++ (v4l2_int_ioctl_func *) ioctl_enum_fmt_cap},
++/* {vidioc_int_try_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_try_fmt_cap}, */
++ {vidioc_int_g_fmt_cap_num, (v4l2_int_ioctl_func *) ioctl_g_fmt_cap},
++/* {vidioc_int_s_fmt_cap_num, (v4l2_int_ioctl_func *) ioctl_s_fmt_cap}, */
++ {vidioc_int_g_parm_num, (v4l2_int_ioctl_func *) ioctl_g_parm},
++ {vidioc_int_s_parm_num, (v4l2_int_ioctl_func *) ioctl_s_parm},
++/* {vidioc_int_queryctrl_num, (v4l2_int_ioctl_func *)ioctl_queryctrl}, */
++ {vidioc_int_g_ctrl_num, (v4l2_int_ioctl_func *) ioctl_g_ctrl},
++ {vidioc_int_s_ctrl_num, (v4l2_int_ioctl_func *) ioctl_s_ctrl},
++ {vidioc_int_enum_framesizes_num,
++ (v4l2_int_ioctl_func *) ioctl_enum_framesizes},
++ {vidioc_int_g_chip_ident_num,
++ (v4l2_int_ioctl_func *) ioctl_g_chip_ident},
++};
++
++static struct v4l2_int_slave ov5640_slave = {
++ .ioctls = ov5640_ioctl_desc,
++ .num_ioctls = ARRAY_SIZE(ov5640_ioctl_desc),
++};
++
++static struct v4l2_int_device ov5640_int_device = {
++ .module = THIS_MODULE,
++ .name = "ov5640",
++ .type = v4l2_int_type_slave,
++ .u = {
++ .slave = &ov5640_slave,
++ },
++};
++
++/*!
++ * ov5640 I2C probe function
++ *
++ * @param adapter struct i2c_adapter *
++ * @return Error code indicating success or failure
++ */
++static int ov5640_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct device *dev = &client->dev;
++ int retval;
++ u8 chip_id_high, chip_id_low;
++
++ /* request power down pin */
++ pwn_gpio = of_get_named_gpio(dev->of_node, "pwn-gpios", 0);
++ if (!gpio_is_valid(pwn_gpio)) {
++ dev_warn(dev, "no sensor pwdn pin available");
++ return -EINVAL;
++ }
++ retval = devm_gpio_request_one(dev, pwn_gpio, GPIOF_OUT_INIT_HIGH,
++ "ov5640_mipi_pwdn");
++ if (retval < 0)
++ return retval;
++
++ /* request reset pin */
++ rst_gpio = of_get_named_gpio(dev->of_node, "rst-gpios", 0);
++ if (!gpio_is_valid(rst_gpio)) {
++ dev_warn(dev, "no sensor reset pin available");
++ return -EINVAL;
++ }
++ retval = devm_gpio_request_one(dev, rst_gpio, GPIOF_OUT_INIT_HIGH,
++ "ov5640_mipi_reset");
++ if (retval < 0)
++ return retval;
++
++ /* Set initial values for the sensor struct. */
++ memset(&ov5640_data, 0, sizeof(ov5640_data));
++ ov5640_data.sensor_clk = devm_clk_get(dev, "csi_mclk");
++ if (IS_ERR(ov5640_data.sensor_clk)) {
++ /* assuming clock enabled by default */
++ ov5640_data.sensor_clk = NULL;
++ dev_err(dev, "clock-frequency missing or invalid\n");
++ return PTR_ERR(ov5640_data.sensor_clk);
++ }
++
++ retval = of_property_read_u32(dev->of_node, "mclk",
++ &(ov5640_data.mclk));
++ if (retval) {
++ dev_err(dev, "mclk missing or invalid\n");
++ return retval;
++ }
++
++ retval = of_property_read_u32(dev->of_node, "mclk_source",
++ (u32 *) &(ov5640_data.mclk_source));
++ if (retval) {
++ dev_err(dev, "mclk_source missing or invalid\n");
++ return retval;
++ }
++
++ retval = of_property_read_u32(dev->of_node, "csi_id",
++ &(ov5640_data.csi));
++ if (retval) {
++ dev_err(dev, "csi id missing or invalid\n");
++ return retval;
++ }
++
++ clk_prepare_enable(ov5640_data.sensor_clk);
++
++ ov5640_data.io_init = ov5640_reset;
++ ov5640_data.i2c_client = client;
++ ov5640_data.pix.pixelformat = V4L2_PIX_FMT_UYVY;
++ ov5640_data.pix.width = 640;
++ ov5640_data.pix.height = 480;
++ ov5640_data.streamcap.capability = V4L2_MODE_HIGHQUALITY |
++ V4L2_CAP_TIMEPERFRAME;
++ ov5640_data.streamcap.capturemode = 0;
++ ov5640_data.streamcap.timeperframe.denominator = DEFAULT_FPS;
++ ov5640_data.streamcap.timeperframe.numerator = 1;
++
++ ov5640_power_on(dev);
++
++ ov5640_reset();
++
++ ov5640_standby(0);
++
++ retval = ov5640_read_reg(OV5640_CHIP_ID_HIGH_BYTE, &chip_id_high);
++ if (retval < 0 || chip_id_high != 0x56) {
++ pr_warning("camera ov5640_mipi is not found\n");
++ clk_disable_unprepare(ov5640_data.sensor_clk);
++ return -ENODEV;
++ }
++ retval = ov5640_read_reg(OV5640_CHIP_ID_LOW_BYTE, &chip_id_low);
++ if (retval < 0 || chip_id_low != 0x40) {
++ pr_warning("camera ov5640_mipi is not found\n");
++ clk_disable_unprepare(ov5640_data.sensor_clk);
++ return -ENODEV;
++ }
++
++ ov5640_standby(1);
++
++ ov5640_int_device.priv = &ov5640_data;
++ retval = v4l2_int_device_register(&ov5640_int_device);
++
++ clk_disable_unprepare(ov5640_data.sensor_clk);
++
++ pr_info("camera ov5640_mipi is found\n");
++ return retval;
++}
++
++/*!
++ * ov5640 I2C detach function
++ *
++ * @param client struct i2c_client *
++ * @return Error code indicating success or failure
++ */
++static int ov5640_remove(struct i2c_client *client)
++{
++ v4l2_int_device_unregister(&ov5640_int_device);
++
++ if (gpo_regulator)
++ regulator_disable(gpo_regulator);
++
++ if (analog_regulator)
++ regulator_disable(analog_regulator);
++
++ if (core_regulator)
++ regulator_disable(core_regulator);
++
++ if (io_regulator)
++ regulator_disable(io_regulator);
++
++ return 0;
++}
++
++/*!
++ * ov5640 init function
++ * Called by insmod ov5640_camera.ko.
++ *
++ * @return Error code indicating success or failure
++ */
++static __init int ov5640_init(void)
++{
++ u8 err;
++
++ err = i2c_add_driver(&ov5640_i2c_driver);
++ if (err != 0)
++ pr_err("%s:driver registration failed, error=%d\n",
++ __func__, err);
++
++ return err;
++}
++
++/*!
++ * OV5640 cleanup function
++ * Called on rmmod ov5640_camera.ko
++ *
++ * @return Error code indicating success or failure
++ */
++static void __exit ov5640_clean(void)
++{
++ i2c_del_driver(&ov5640_i2c_driver);
++}
++
++module_init(ov5640_init);
++module_exit(ov5640_clean);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("OV5640 MIPI Camera Driver");
++MODULE_LICENSE("GPL");
++MODULE_VERSION("1.0");
++MODULE_ALIAS("CSI");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/capture/ov5642.c linux-3.14.40/drivers/media/platform/mxc/capture/ov5642.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/capture/ov5642.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/capture/ov5642.c 2015-05-01 14:57:59.267427001 -0500
+@@ -0,0 +1,4252 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/ctype.h>
++#include <linux/types.h>
++#include <linux/delay.h>
++#include <linux/clk.h>
++#include <linux/of_device.h>
++#include <linux/i2c.h>
++#include <linux/of_gpio.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/regulator/consumer.h>
++#include <linux/fsl_devices.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-int-device.h>
++#include "mxc_v4l2_capture.h"
++
++#define OV5642_VOLTAGE_ANALOG 2800000
++#define OV5642_VOLTAGE_DIGITAL_CORE 1500000
++#define OV5642_VOLTAGE_DIGITAL_IO 1800000
++
++#define MIN_FPS 15
++#define MAX_FPS 30
++#define DEFAULT_FPS 30
++
++#define OV5642_XCLK_MIN 6000000
++#define OV5642_XCLK_MAX 24000000
++
++#define OV5642_CHIP_ID_HIGH_BYTE 0x300A
++#define OV5642_CHIP_ID_LOW_BYTE 0x300B
++
++enum ov5642_mode {
++ ov5642_mode_MIN = 0,
++ ov5642_mode_VGA_640_480 = 0,
++ ov5642_mode_QVGA_320_240 = 1,
++ ov5642_mode_NTSC_720_480 = 2,
++ ov5642_mode_PAL_720_576 = 3,
++ ov5642_mode_720P_1280_720 = 4,
++ ov5642_mode_1080P_1920_1080 = 5,
++ ov5642_mode_QSXGA_2592_1944 = 6,
++ ov5642_mode_QCIF_176_144 = 7,
++ ov5642_mode_XGA_1024_768 = 8,
++ ov5642_mode_MAX = 8
++};
++
++enum ov5642_frame_rate {
++ ov5642_15_fps,
++ ov5642_30_fps
++};
++
++static int ov5642_framerates[] = {
++ [ov5642_15_fps] = 15,
++ [ov5642_30_fps] = 30,
++};
++
++struct reg_value {
++ u16 u16RegAddr;
++ u8 u8Val;
++ u8 u8Mask;
++ u32 u32Delay_ms;
++};
++
++struct ov5642_mode_info {
++ enum ov5642_mode mode;
++ u32 width;
++ u32 height;
++ struct reg_value *init_data_ptr;
++ u32 init_data_size;
++};
++
++/*!
++ * Maintains the information on the current state of the sesor.
++ */
++static struct sensor_data ov5642_data;
++static int pwn_gpio, rst_gpio;
++
++static struct reg_value ov5642_rot_none_VGA[] = {
++ {0x3818, 0xc1, 0x00, 0x00}, {0x3621, 0x87, 0x00, 0x00},
++};
++
++static struct reg_value ov5642_rot_vert_flip_VGA[] = {
++ {0x3818, 0x20, 0xbf, 0x00}, {0x3621, 0x20, 0xff, 0x00},
++};
++
++static struct reg_value ov5642_rot_horiz_flip_VGA[] = {
++ {0x3818, 0x81, 0x00, 0x01}, {0x3621, 0xa7, 0x00, 0x00},
++};
++
++static struct reg_value ov5642_rot_180_VGA[] = {
++ {0x3818, 0x60, 0xff, 0x00}, {0x3621, 0x00, 0xdf, 0x00},
++};
++
++
++static struct reg_value ov5642_rot_none_FULL[] = {
++ {0x3818, 0xc0, 0x00, 0x00}, {0x3621, 0x09, 0x00, 0x00},
++};
++
++static struct reg_value ov5642_rot_vert_flip_FULL[] = {
++ {0x3818, 0x20, 0xbf, 0x01}, {0x3621, 0x20, 0xff, 0x00},
++};
++
++static struct reg_value ov5642_rot_horiz_flip_FULL[] = {
++ {0x3818, 0x80, 0x00, 0x01}, {0x3621, 0x29, 0x00, 0x00},
++};
++
++static struct reg_value ov5642_rot_180_FULL[] = {
++ {0x3818, 0x60, 0xff, 0x00}, {0x3621, 0x00, 0xdf, 0x00},
++};
++
++
++static struct reg_value ov5642_initial_setting[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0},
++ {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c00, 0x04, 0, 0}, {0x3c01, 0x80, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0},
++ {0x5182, 0x00, 0, 0}, {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0},
++ {0x5001, 0xff, 0, 0}, {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0},
++ {0x5505, 0x7f, 0, 0}, {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0},
++ {0x4610, 0x00, 0, 0}, {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0},
++ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
++ {0x380b, 0xe0, 0, 0}, {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0},
++ {0x501f, 0x00, 0, 0}, {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0},
++ {0x350b, 0x00, 0, 0}, {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0},
++ {0x380f, 0xe8, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3810, 0x40, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x0b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 300},
++};
++
++static struct reg_value ov5642_setting_15fps_QCIF_176_144[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0},
++ {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x3010, 0x10, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3501, 0x1e, 0, 0},
++ {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0}, {0x380c, 0x0c, 0, 0},
++ {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3818, 0xc1, 0, 0},
++ {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0}, {0x3801, 0x80, 0, 0},
++ {0x3621, 0x87, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3803, 0x08, 0, 0},
++ {0x3827, 0x08, 0, 0}, {0x3810, 0x40, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0},
++ {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0}, {0x5686, 0x03, 0, 0},
++ {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x05, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0},
++ {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0}, {0x3502, 0x00, 0, 0},
++ {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0}, {0x3503, 0x00, 0, 0},
++ {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0}, {0x528f, 0x10, 0, 0},
++ {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x02, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x02, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x02, 0, 0},
++ {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3c, 0, 0},
++ {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0}, {0x3a03, 0x7d, 0, 0},
++ {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0}, {0x3a15, 0x7d, 0, 0},
++ {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a08, 0x09, 0, 0},
++ {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0}, {0x3a0b, 0xd0, 0, 0},
++ {0x3a0d, 0x08, 0, 0}, {0x3a0e, 0x06, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0}, {0x401e, 0x20, 0, 0},
++ {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0}, {0x528a, 0x01, 0, 0},
++ {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0}, {0x528d, 0x10, 0, 0},
++ {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0}, {0x5290, 0x30, 0, 0},
++ {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0}, {0x5294, 0x00, 0, 0},
++ {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0}, {0x5297, 0x08, 0, 0},
++ {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0}, {0x529a, 0x00, 0, 0},
++ {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0}, {0x529d, 0x28, 0, 0},
++ {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0}, {0x5282, 0x00, 0, 0},
++ {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0}, {0x5302, 0x00, 0, 0},
++ {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0}, {0x530d, 0x0c, 0, 0},
++ {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0}, {0x5310, 0x20, 0, 0},
++ {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0}, {0x5309, 0x40, 0, 0},
++ {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x00, 0, 0},
++ {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0}, {0x5315, 0x20, 0, 0},
++ {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0}, {0x5317, 0x00, 0, 0},
++ {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0}, {0x5381, 0x00, 0, 0},
++ {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0}, {0x5384, 0x00, 0, 0},
++ {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0}, {0x5387, 0x00, 0, 0},
++ {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0}, {0x538a, 0x00, 0, 0},
++ {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0}, {0x538d, 0x00, 0, 0},
++ {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0}, {0x5390, 0x00, 0, 0},
++ {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0}, {0x5393, 0xa2, 0, 0},
++ {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0}, {0x5481, 0x21, 0, 0},
++ {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0}, {0x5484, 0x65, 0, 0},
++ {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0}, {0x5487, 0x87, 0, 0},
++ {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0}, {0x548a, 0xaa, 0, 0},
++ {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0}, {0x548d, 0xdd, 0, 0},
++ {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0}, {0x5490, 0x05, 0, 0},
++ {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0}, {0x5493, 0x20, 0, 0},
++ {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0}, {0x5496, 0x02, 0, 0},
++ {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0}, {0x5499, 0x86, 0, 0},
++ {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0}, {0x549c, 0x02, 0, 0},
++ {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0}, {0x549f, 0x1c, 0, 0},
++ {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0}, {0x54a2, 0x01, 0, 0},
++ {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0}, {0x54a5, 0xc5, 0, 0},
++ {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0}, {0x54a8, 0x01, 0, 0},
++ {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0}, {0x54ab, 0x41, 0, 0},
++ {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0}, {0x54ae, 0x00, 0, 0},
++ {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0}, {0x54b1, 0x20, 0, 0},
++ {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0}, {0x54b4, 0x00, 0, 0},
++ {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0}, {0x54b7, 0xdf, 0, 0},
++ {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0}, {0x3406, 0x00, 0, 0},
++ {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0}, {0x5182, 0x11, 0, 0},
++ {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0},
++ {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0}, {0x5188, 0x08, 0, 0},
++ {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0}, {0x518b, 0xb2, 0, 0},
++ {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0}, {0x518e, 0x3d, 0, 0},
++ {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0},
++ {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0},
++ {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0},
++ {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0}, {0x519a, 0x04, 0, 0},
++ {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0}, {0x519d, 0x82, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0}, {0x3a0f, 0x38, 0, 0},
++ {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0}, {0x3a1e, 0x2e, 0, 0},
++ {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0}, {0x5688, 0xa6, 0, 0},
++ {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0}, {0x568b, 0xae, 0, 0},
++ {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0}, {0x568e, 0x62, 0, 0},
++ {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0}, {0x5584, 0x40, 0, 0},
++ {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0}, {0x5800, 0x27, 0, 0},
++ {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0}, {0x5803, 0x0f, 0, 0},
++ {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0}, {0x5806, 0x1e, 0, 0},
++ {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0}, {0x5809, 0x0d, 0, 0},
++ {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0}, {0x580c, 0x0a, 0, 0},
++ {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0}, {0x580f, 0x19, 0, 0},
++ {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0}, {0x5812, 0x04, 0, 0},
++ {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0}, {0x5815, 0x06, 0, 0},
++ {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0}, {0x5818, 0x0a, 0, 0},
++ {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0}, {0x581b, 0x00, 0, 0},
++ {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0}, {0x581e, 0x08, 0, 0},
++ {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0}, {0x5821, 0x05, 0, 0},
++ {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0}, {0x5824, 0x00, 0, 0},
++ {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0}, {0x5827, 0x0c, 0, 0},
++ {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0}, {0x582a, 0x06, 0, 0},
++ {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0}, {0x582d, 0x07, 0, 0},
++ {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0}, {0x5830, 0x18, 0, 0},
++ {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0}, {0x5833, 0x0a, 0, 0},
++ {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0}, {0x5836, 0x15, 0, 0},
++ {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0}, {0x5839, 0x1f, 0, 0},
++ {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0}, {0x583c, 0x17, 0, 0},
++ {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0}, {0x583f, 0x53, 0, 0},
++ {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0}, {0x5842, 0x0d, 0, 0},
++ {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0}, {0x5845, 0x09, 0, 0},
++ {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0}, {0x5848, 0x10, 0, 0},
++ {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0}, {0x584b, 0x0e, 0, 0},
++ {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0}, {0x584e, 0x11, 0, 0},
++ {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0}, {0x5851, 0x0c, 0, 0},
++ {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0}, {0x5854, 0x10, 0, 0},
++ {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0}, {0x5857, 0x0b, 0, 0},
++ {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0}, {0x585a, 0x0d, 0, 0},
++ {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0}, {0x585d, 0x0c, 0, 0},
++ {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0}, {0x5860, 0x0c, 0, 0},
++ {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0}, {0x5863, 0x08, 0, 0},
++ {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0}, {0x5866, 0x18, 0, 0},
++ {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0}, {0x5869, 0x19, 0, 0},
++ {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0}, {0x586c, 0x13, 0, 0},
++ {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0}, {0x586f, 0x16, 0, 0},
++ {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0}, {0x5872, 0x10, 0, 0},
++ {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0}, {0x5875, 0x16, 0, 0},
++ {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0}, {0x5878, 0x10, 0, 0},
++ {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0}, {0x587b, 0x14, 0, 0},
++ {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0}, {0x587e, 0x11, 0, 0},
++ {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0}, {0x5881, 0x15, 0, 0},
++ {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0}, {0x5884, 0x15, 0, 0},
++ {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0}, {0x5887, 0x17, 0, 0},
++ {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0}, {0x3702, 0x10, 0, 0},
++ {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0}, {0x370b, 0x40, 0, 0},
++ {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0}, {0x3632, 0x52, 0, 0},
++ {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0}, {0x5785, 0x07, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0}, {0x3604, 0x48, 0, 0},
++ {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0}, {0x370f, 0xc0, 0, 0},
++ {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0}, {0x5007, 0x00, 0, 0},
++ {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0}, {0x5013, 0x00, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0}, {0x5087, 0x00, 0, 0},
++ {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0}, {0x302b, 0x00, 0, 0},
++ {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0x90, 0, 0}, {0x3a00, 0x78, 0, 0},
++};
++
++static struct reg_value ov5642_setting_30fps_QCIF_176_144[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0},
++ {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0}, {0x3011, 0x10, 0, 0},
++ {0x3010, 0x10, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3501, 0x1e, 0, 0},
++ {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0}, {0x380c, 0x0c, 0, 0},
++ {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3818, 0xc1, 0, 0},
++ {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0}, {0x3801, 0x80, 0, 0},
++ {0x3621, 0x87, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3803, 0x08, 0, 0},
++ {0x3827, 0x08, 0, 0}, {0x3810, 0x40, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0},
++ {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0}, {0x5686, 0x03, 0, 0},
++ {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x05, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0},
++ {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0}, {0x3502, 0x00, 0, 0},
++ {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0}, {0x3503, 0x00, 0, 0},
++ {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0}, {0x528f, 0x10, 0, 0},
++ {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x02, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x02, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x02, 0, 0},
++ {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3c, 0, 0},
++ {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0}, {0x3a03, 0x7d, 0, 0},
++ {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0}, {0x3a15, 0x7d, 0, 0},
++ {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a08, 0x09, 0, 0},
++ {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0}, {0x3a0b, 0xd0, 0, 0},
++ {0x3a0d, 0x08, 0, 0}, {0x3a0e, 0x06, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0}, {0x401e, 0x20, 0, 0},
++ {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0}, {0x528a, 0x01, 0, 0},
++ {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0}, {0x528d, 0x10, 0, 0},
++ {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0}, {0x5290, 0x30, 0, 0},
++ {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0}, {0x5294, 0x00, 0, 0},
++ {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0}, {0x5297, 0x08, 0, 0},
++ {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0}, {0x529a, 0x00, 0, 0},
++ {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0}, {0x529d, 0x28, 0, 0},
++ {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0}, {0x5282, 0x00, 0, 0},
++ {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0}, {0x5302, 0x00, 0, 0},
++ {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0}, {0x530d, 0x0c, 0, 0},
++ {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0}, {0x5310, 0x20, 0, 0},
++ {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0}, {0x5309, 0x40, 0, 0},
++ {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x00, 0, 0},
++ {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0}, {0x5315, 0x20, 0, 0},
++ {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0}, {0x5317, 0x00, 0, 0},
++ {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0}, {0x5381, 0x00, 0, 0},
++ {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0}, {0x5384, 0x00, 0, 0},
++ {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0}, {0x5387, 0x00, 0, 0},
++ {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0}, {0x538a, 0x00, 0, 0},
++ {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0}, {0x538d, 0x00, 0, 0},
++ {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0}, {0x5390, 0x00, 0, 0},
++ {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0}, {0x5393, 0xa2, 0, 0},
++ {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0}, {0x5481, 0x21, 0, 0},
++ {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0}, {0x5484, 0x65, 0, 0},
++ {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0}, {0x5487, 0x87, 0, 0},
++ {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0}, {0x548a, 0xaa, 0, 0},
++ {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0}, {0x548d, 0xdd, 0, 0},
++ {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0}, {0x5490, 0x05, 0, 0},
++ {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0}, {0x5493, 0x20, 0, 0},
++ {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0}, {0x5496, 0x02, 0, 0},
++ {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0}, {0x5499, 0x86, 0, 0},
++ {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0}, {0x549c, 0x02, 0, 0},
++ {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0}, {0x549f, 0x1c, 0, 0},
++ {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0}, {0x54a2, 0x01, 0, 0},
++ {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0}, {0x54a5, 0xc5, 0, 0},
++ {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0}, {0x54a8, 0x01, 0, 0},
++ {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0}, {0x54ab, 0x41, 0, 0},
++ {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0}, {0x54ae, 0x00, 0, 0},
++ {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0}, {0x54b1, 0x20, 0, 0},
++ {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0}, {0x54b4, 0x00, 0, 0},
++ {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0}, {0x54b7, 0xdf, 0, 0},
++ {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0}, {0x3406, 0x00, 0, 0},
++ {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0}, {0x5182, 0x11, 0, 0},
++ {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0},
++ {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0}, {0x5188, 0x08, 0, 0},
++ {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0}, {0x518b, 0xb2, 0, 0},
++ {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0}, {0x518e, 0x3d, 0, 0},
++ {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0},
++ {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0},
++ {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0},
++ {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0}, {0x519a, 0x04, 0, 0},
++ {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0}, {0x519d, 0x82, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0}, {0x3a0f, 0x38, 0, 0},
++ {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0}, {0x3a1e, 0x2e, 0, 0},
++ {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0}, {0x5688, 0xa6, 0, 0},
++ {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0}, {0x568b, 0xae, 0, 0},
++ {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0}, {0x568e, 0x62, 0, 0},
++ {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0}, {0x5584, 0x40, 0, 0},
++ {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0}, {0x5800, 0x27, 0, 0},
++ {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0}, {0x5803, 0x0f, 0, 0},
++ {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0}, {0x5806, 0x1e, 0, 0},
++ {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0}, {0x5809, 0x0d, 0, 0},
++ {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0}, {0x580c, 0x0a, 0, 0},
++ {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0}, {0x580f, 0x19, 0, 0},
++ {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0}, {0x5812, 0x04, 0, 0},
++ {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0}, {0x5815, 0x06, 0, 0},
++ {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0}, {0x5818, 0x0a, 0, 0},
++ {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0}, {0x581b, 0x00, 0, 0},
++ {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0}, {0x581e, 0x08, 0, 0},
++ {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0}, {0x5821, 0x05, 0, 0},
++ {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0}, {0x5824, 0x00, 0, 0},
++ {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0}, {0x5827, 0x0c, 0, 0},
++ {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0}, {0x582a, 0x06, 0, 0},
++ {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0}, {0x582d, 0x07, 0, 0},
++ {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0}, {0x5830, 0x18, 0, 0},
++ {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0}, {0x5833, 0x0a, 0, 0},
++ {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0}, {0x5836, 0x15, 0, 0},
++ {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0}, {0x5839, 0x1f, 0, 0},
++ {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0}, {0x583c, 0x17, 0, 0},
++ {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0}, {0x583f, 0x53, 0, 0},
++ {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0}, {0x5842, 0x0d, 0, 0},
++ {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0}, {0x5845, 0x09, 0, 0},
++ {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0}, {0x5848, 0x10, 0, 0},
++ {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0}, {0x584b, 0x0e, 0, 0},
++ {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0}, {0x584e, 0x11, 0, 0},
++ {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0}, {0x5851, 0x0c, 0, 0},
++ {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0}, {0x5854, 0x10, 0, 0},
++ {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0}, {0x5857, 0x0b, 0, 0},
++ {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0}, {0x585a, 0x0d, 0, 0},
++ {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0}, {0x585d, 0x0c, 0, 0},
++ {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0}, {0x5860, 0x0c, 0, 0},
++ {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0}, {0x5863, 0x08, 0, 0},
++ {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0}, {0x5866, 0x18, 0, 0},
++ {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0}, {0x5869, 0x19, 0, 0},
++ {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0}, {0x586c, 0x13, 0, 0},
++ {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0}, {0x586f, 0x16, 0, 0},
++ {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0}, {0x5872, 0x10, 0, 0},
++ {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0}, {0x5875, 0x16, 0, 0},
++ {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0}, {0x5878, 0x10, 0, 0},
++ {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0}, {0x587b, 0x14, 0, 0},
++ {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0}, {0x587e, 0x11, 0, 0},
++ {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0}, {0x5881, 0x15, 0, 0},
++ {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0}, {0x5884, 0x15, 0, 0},
++ {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0}, {0x5887, 0x17, 0, 0},
++ {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0}, {0x3702, 0x10, 0, 0},
++ {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0}, {0x370b, 0x40, 0, 0},
++ {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0}, {0x3632, 0x52, 0, 0},
++ {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0}, {0x5785, 0x07, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0}, {0x3604, 0x48, 0, 0},
++ {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0}, {0x370f, 0xc0, 0, 0},
++ {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0}, {0x5007, 0x00, 0, 0},
++ {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0}, {0x5013, 0x00, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0}, {0x5087, 0x00, 0, 0},
++ {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0}, {0x302b, 0x00, 0, 0},
++ {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0x90, 0, 0}, {0x3a00, 0x78, 0, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_QSXGA_2592_1944[] = {
++ {0x3503, 0x07, 0, 0}, {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0},
++ {0x3002, 0x00, 0, 0}, {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0},
++ {0x3005, 0xff, 0, 0}, {0x3006, 0xff, 0, 0}, {0x3007, 0x3f, 0, 0},
++ {0x3011, 0x08, 0, 0}, {0x3010, 0x10, 0, 0}, {0x3818, 0xc0, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0},
++ {0x3602, 0xe4, 0, 0}, {0x3612, 0xac, 0, 0}, {0x3613, 0x44, 0, 0},
++ {0x3622, 0x60, 0, 0}, {0x3623, 0x22, 0, 0}, {0x3604, 0x48, 0, 0},
++ {0x3705, 0xda, 0, 0}, {0x370a, 0x80, 0, 0}, {0x3801, 0x95, 0, 0},
++ {0x3803, 0x0e, 0, 0}, {0x3804, 0x0a, 0, 0}, {0x3805, 0x20, 0, 0},
++ {0x3806, 0x07, 0, 0}, {0x3807, 0x98, 0, 0}, {0x3808, 0x0a, 0, 0},
++ {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0}, {0x380b, 0x98, 0, 0},
++ {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0}, {0x380e, 0x07, 0, 0},
++ {0x380f, 0xd0, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3815, 0x44, 0, 0},
++ {0x3824, 0x11, 0, 0}, {0x3825, 0xac, 0, 0}, {0x3827, 0x0c, 0, 0},
++ {0x3a00, 0x78, 0, 0}, {0x3a0d, 0x10, 0, 0}, {0x3a0e, 0x0d, 0, 0},
++ {0x5682, 0x0a, 0, 0}, {0x5683, 0x20, 0, 0}, {0x5686, 0x07, 0, 0},
++ {0x5687, 0x98, 0, 0}, {0x5001, 0xff, 0, 0}, {0x589b, 0x00, 0, 0},
++ {0x589a, 0xc0, 0, 0}, {0x4407, 0x04, 0, 0}, {0x3008, 0x02, 0, 0},
++ {0x460b, 0x37, 0, 0}, {0x460c, 0x22, 0, 0}, {0x471d, 0x05, 0, 0},
++ {0x4713, 0x03, 0, 0}, {0x471c, 0xd0, 0, 0}, {0x3815, 0x01, 0, 0},
++ {0x501f, 0x00, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3819, 0x80, 0, 0},
++ {0x5002, 0xe0, 0, 0}, {0x530a, 0x01, 0, 0}, {0x530d, 0x10, 0, 0},
++ {0x530c, 0x04, 0, 0}, {0x5312, 0x20, 0, 0}, {0x5282, 0x01, 0, 0},
++ {0x3010, 0x10, 0, 0}, {0x3012, 0x00, 0, 0},
++};
++
++
++static struct reg_value ov5642_setting_VGA_2_QVGA[] = {
++ {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0xf0, 0, 0}, {0x3815, 0x04, 0, 0},
++};
++
++static struct reg_value ov5642_setting_QSXGA_2_VGA[] = {
++ {0x3503, 0x00, 0, 0}, {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0},
++ {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0},
++ {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x3818, 0xc1, 0, 0}, {0x3621, 0x87, 0, 0},
++ {0x350c, 0x03, 0, 0}, {0x350d, 0xe8, 0, 0}, {0x3602, 0xfc, 0, 0},
++ {0x3612, 0xff, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3622, 0x60, 0, 0},
++ {0x3623, 0x01, 0, 0}, {0x3604, 0x48, 0, 0}, {0x3705, 0xdb, 0, 0},
++ {0x370a, 0x81, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3803, 0x08, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x3806, 0x03, 0, 0},
++ {0x3807, 0xc0, 0, 0}, {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0},
++ {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0}, {0x380c, 0x0c, 0, 0},
++ {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0},
++ {0x3810, 0x40, 0, 0}, {0x3815, 0x04, 0, 0}, {0x3824, 0x11, 0, 0},
++ {0x3825, 0xb4, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0},
++ {0x5001, 0xff, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x4407, 0x0c, 0, 0}, {0x3008, 0x02, 0, 0}, {0x460b, 0x37, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x471d, 0x05, 0, 0}, {0x4713, 0x02, 0, 0},
++ {0x471c, 0xd0, 0, 0}, {0x3815, 0x04, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x3002, 0x5c, 0, 0}, {0x3819, 0x80, 0, 0}, {0x5002, 0xe0, 0, 0},
++ {0x530a, 0x01, 0, 0}, {0x530d, 0x0c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x5312, 0x40, 0, 0}, {0x5282, 0x00, 0, 0},
++ {0x3012, 0x02, 0, 0}, {0x3010, 0x00, 0, 0},
++};
++
++static struct reg_value ov5642_setting_30fps_VGA_640_480[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xb0, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x03, 0, 0},
++ {0x380f, 0xe8, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3a00, 0x78, 0, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_VGA_640_480[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xb0, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x07, 0, 0},
++ {0x380f, 0xd0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3a00, 0x78, 0, 0},
++};
++
++
++static struct reg_value ov5642_setting_30fps_XGA_1024_768[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xb0, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x03, 0, 0},
++ {0x380f, 0xe8, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3808, 0x04, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x03, 0, 0},
++ {0x380b, 0x00, 0, 0}, {0x3815, 0x02, 0, 0}, {0x302c, 0x60, 0x60, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_XGA_1024_768[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xb0, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x07, 0, 0},
++ {0x380f, 0xd0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3808, 0x04, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x03, 0, 0},
++ {0x380b, 0x00, 0, 0}, {0x3815, 0x02, 0, 0}, {0x302c, 0x60, 0x60, 0},
++};
++
++static struct reg_value ov5642_setting_30fps_QVGA_320_240[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xb0, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x03, 0, 0},
++ {0x380f, 0xe8, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x08, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3808, 0x01, 0, 0},
++ {0x3809, 0x40, 0, 0}, {0x380a, 0x00, 0, 0}, {0x380b, 0xf0, 0, 0},
++};
++
++static struct reg_value ov5642_setting_30fps_NTSC_720_480[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0xd0, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xb0, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x03, 0, 0},
++ {0x380f, 0xe8, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x3c, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0},
++ {0x5683, 0x00, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0x58, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0x58, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x302c, 0x60, 0x60, 0},
++};
++
++static struct reg_value ov5642_setting_30fps_PAL_720_576[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3615, 0xf0, 0, 0}, {0x3000, 0x00, 0, 0},
++ {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0}, {0x3003, 0x00, 0, 0},
++ {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0}, {0x3006, 0x43, 0, 0},
++ {0x3007, 0x37, 0, 0}, {0x3011, 0x09, 0, 0}, {0x3012, 0x02, 0, 0},
++ {0x3010, 0x00, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0xd0, 0, 0}, {0x380a, 0x02, 0, 0}, {0x380b, 0x40, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3825, 0xd8, 0, 0},
++ {0x3501, 0x1e, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0},
++ {0x380c, 0x07, 0, 0}, {0x380d, 0x2a, 0, 0}, {0x380e, 0x03, 0, 0},
++ {0x380f, 0xe8, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x3818, 0xc1, 0, 0}, {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0},
++ {0x3801, 0x80, 0, 0}, {0x3621, 0xc7, 0, 0}, {0x3801, 0x50, 0, 0},
++ {0x3803, 0x08, 0, 0}, {0x3827, 0x3c, 0, 0}, {0x3810, 0x80, 0, 0},
++ {0x3804, 0x04, 0, 0}, {0x3805, 0xb0, 0, 0}, {0x5682, 0x04, 0, 0},
++ {0x5683, 0xb0, 0, 0}, {0x3806, 0x03, 0, 0}, {0x3807, 0x58, 0, 0},
++ {0x5686, 0x03, 0, 0}, {0x5687, 0x58, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a1a, 0x05, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0},
++ {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0},
++ {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0},
++ {0x350d, 0xd0, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0},
++ {0x528c, 0x08, 0, 0}, {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0},
++ {0x528f, 0x10, 0, 0}, {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0},
++ {0x5293, 0x02, 0, 0}, {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0},
++ {0x5296, 0x00, 0, 0}, {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0},
++ {0x5299, 0x02, 0, 0}, {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0},
++ {0x529c, 0x00, 0, 0}, {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0},
++ {0x529f, 0x02, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0},
++ {0x3a1f, 0x10, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0},
++ {0x5193, 0x70, 0, 0}, {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0},
++ {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3621, 0x87, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x302c, 0x60, 0x60, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_720P_1280_720[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x00, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3030, 0x2b, 0, 0},
++ {0x3011, 0x08, 0, 0}, {0x3010, 0x10, 0, 0}, {0x3604, 0x60, 0, 0},
++ {0x3622, 0x60, 0, 0}, {0x3621, 0x09, 0, 0}, {0x3709, 0x00, 0, 0},
++ {0x4000, 0x21, 0, 0}, {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0},
++ {0x3605, 0x04, 0, 0}, {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0},
++ {0x300d, 0x22, 0, 0}, {0x3623, 0x22, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5500, 0x0a, 0, 0},
++ {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0}, {0x5080, 0x08, 0, 0},
++ {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0}, {0x471d, 0x05, 0, 0},
++ {0x4708, 0x06, 0, 0}, {0x370c, 0xa0, 0, 0}, {0x3808, 0x0a, 0, 0},
++ {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0}, {0x380b, 0x98, 0, 0},
++ {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0}, {0x380e, 0x07, 0, 0},
++ {0x380f, 0xd0, 0, 0}, {0x5687, 0x94, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x5001, 0xcf, 0, 0}, {0x4300, 0x30, 0, 0},
++ {0x4300, 0x30, 0, 0}, {0x460b, 0x35, 0, 0}, {0x471d, 0x00, 0, 0},
++ {0x3002, 0x0c, 0, 0}, {0x3002, 0x00, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x471c, 0x50, 0, 0}, {0x4721, 0x02, 0, 0}, {0x4402, 0x90, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x3815, 0x44, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3818, 0xc8, 0, 0}, {0x3801, 0x88, 0, 0}, {0x3824, 0x11, 0, 0},
++ {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x04, 0, 0}, {0x3a13, 0x30, 0, 0},
++ {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0},
++ {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0},
++ {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0}, {0x3a0d, 0x08, 0, 0},
++ {0x3a0e, 0x06, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x32, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x32, 0, 0}, {0x3a11, 0x80, 0, 0},
++ {0x3a1f, 0x20, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x09, 0, 0}, {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0},
++ {0x3a0b, 0xd0, 0, 0}, {0x3a0d, 0x10, 0, 0}, {0x3a0e, 0x0d, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x589b, 0x00, 0, 0},
++ {0x589a, 0xc0, 0, 0}, {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0},
++ {0x401c, 0x06, 0, 0}, {0x3825, 0xac, 0, 0}, {0x3827, 0x0c, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3503, 0x07, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x350c, 0x02, 0, 0}, {0x350d, 0xe4, 0, 0}, {0x3621, 0xc9, 0, 0},
++ {0x370a, 0x81, 0, 0}, {0x3803, 0x08, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x3806, 0x02, 0, 0}, {0x3807, 0xd0, 0, 0},
++ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0xd0, 0, 0}, {0x380c, 0x08, 0, 0}, {0x380d, 0x72, 0, 0},
++ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3810, 0xc0, 0, 0},
++ {0x3818, 0xc9, 0, 0}, {0x381c, 0x10, 0, 0}, {0x381d, 0xa0, 0, 0},
++ {0x381e, 0x05, 0, 0}, {0x381f, 0xb0, 0, 0}, {0x3820, 0x00, 0, 0},
++ {0x3821, 0x00, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3a08, 0x1b, 0, 0},
++ {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x17, 0, 0}, {0x3a0b, 0x20, 0, 0},
++ {0x3a0d, 0x02, 0, 0}, {0x3a0e, 0x01, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0}, {0x5686, 0x02, 0, 0},
++ {0x5687, 0xcc, 0, 0}, {0x5001, 0x7f, 0, 0}, {0x589b, 0x06, 0, 0},
++ {0x589a, 0xc5, 0, 0}, {0x3503, 0x00, 0, 0}, {0x3010, 0x10, 0, 0},
++ {0x460c, 0x20, 0, 0}, {0x460b, 0x37, 0, 0}, {0x471c, 0xd0, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x3815, 0x01, 0, 0}, {0x3818, 0x00, 0x08, 0},
++ {0x501f, 0x00, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3002, 0x1c, 0, 0},
++ {0x3819, 0x80, 0, 0}, {0x5002, 0xe0, 0, 0}, {0x3010, 0x30, 0, 0},
++ {0x3a08, 0x06, 0, 0}, {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x05, 0, 0},
++ {0x3a0b, 0x50, 0, 0}, {0x3a0d, 0x08, 0, 0}, {0x3a0e, 0x07, 0, 0},
++};
++
++static struct reg_value ov5642_setting_30fps_720P_1280_720[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x00, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3030, 0x2b, 0, 0},
++ {0x3011, 0x08, 0, 0}, {0x3010, 0x10, 0, 0}, {0x3604, 0x60, 0, 0},
++ {0x3622, 0x60, 0, 0}, {0x3621, 0x09, 0, 0}, {0x3709, 0x00, 0, 0},
++ {0x4000, 0x21, 0, 0}, {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0},
++ {0x3605, 0x04, 0, 0}, {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0},
++ {0x300d, 0x22, 0, 0}, {0x3623, 0x22, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5500, 0x0a, 0, 0},
++ {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0}, {0x5080, 0x08, 0, 0},
++ {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0}, {0x471d, 0x05, 0, 0},
++ {0x4708, 0x06, 0, 0}, {0x370c, 0xa0, 0, 0}, {0x3808, 0x0a, 0, 0},
++ {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0}, {0x380b, 0x98, 0, 0},
++ {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0}, {0x380e, 0x07, 0, 0},
++ {0x380f, 0xd0, 0, 0}, {0x5687, 0x94, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x5001, 0xcf, 0, 0}, {0x4300, 0x30, 0, 0},
++ {0x4300, 0x30, 0, 0}, {0x460b, 0x35, 0, 0}, {0x471d, 0x00, 0, 0},
++ {0x3002, 0x0c, 0, 0}, {0x3002, 0x00, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x471c, 0x50, 0, 0}, {0x4721, 0x02, 0, 0}, {0x4402, 0x90, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x3815, 0x44, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3818, 0xc8, 0, 0}, {0x3801, 0x88, 0, 0}, {0x3824, 0x11, 0, 0},
++ {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x04, 0, 0}, {0x3a13, 0x30, 0, 0},
++ {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0},
++ {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0},
++ {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0}, {0x3a0d, 0x08, 0, 0},
++ {0x3a0e, 0x06, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x32, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x32, 0, 0}, {0x3a11, 0x80, 0, 0},
++ {0x3a1f, 0x20, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x09, 0, 0}, {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0},
++ {0x3a0b, 0xd0, 0, 0}, {0x3a0d, 0x10, 0, 0}, {0x3a0e, 0x0d, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x589b, 0x00, 0, 0},
++ {0x589a, 0xc0, 0, 0}, {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0},
++ {0x401c, 0x06, 0, 0}, {0x3825, 0xac, 0, 0}, {0x3827, 0x0c, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3503, 0x07, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x350c, 0x02, 0, 0}, {0x350d, 0xe4, 0, 0}, {0x3621, 0xc9, 0, 0},
++ {0x370a, 0x81, 0, 0}, {0x3803, 0x08, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x3806, 0x02, 0, 0}, {0x3807, 0xd0, 0, 0},
++ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
++ {0x380b, 0xd0, 0, 0}, {0x380c, 0x08, 0, 0}, {0x380d, 0x72, 0, 0},
++ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3810, 0xc0, 0, 0},
++ {0x3818, 0xc9, 0, 0}, {0x381c, 0x10, 0, 0}, {0x381d, 0xa0, 0, 0},
++ {0x381e, 0x05, 0, 0}, {0x381f, 0xb0, 0, 0}, {0x3820, 0x00, 0, 0},
++ {0x3821, 0x00, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3a08, 0x1b, 0, 0},
++ {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x17, 0, 0}, {0x3a0b, 0x20, 0, 0},
++ {0x3a0d, 0x02, 0, 0}, {0x3a0e, 0x01, 0, 0}, {0x401c, 0x04, 0, 0},
++ {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0}, {0x5686, 0x02, 0, 0},
++ {0x5687, 0xcc, 0, 0}, {0x5001, 0x7f, 0, 0}, {0x589b, 0x06, 0, 0},
++ {0x589a, 0xc5, 0, 0}, {0x3503, 0x00, 0, 0}, {0x3010, 0x10, 0, 0},
++ {0x460c, 0x20, 0, 0}, {0x460b, 0x37, 0, 0}, {0x471c, 0xd0, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x3815, 0x01, 0, 0}, {0x3818, 0x00, 0x08, 0},
++ {0x501f, 0x00, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3002, 0x1c, 0, 0},
++ {0x3819, 0x80, 0, 0}, {0x5002, 0xe0, 0, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_1080P_1920_1080[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x00, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3030, 0x2b, 0, 0},
++ {0x3011, 0x08, 0, 0}, {0x3010, 0x10, 0, 0}, {0x3604, 0x60, 0, 0},
++ {0x3622, 0x60, 0, 0}, {0x3621, 0x09, 0, 0}, {0x3709, 0x00, 0, 0},
++ {0x4000, 0x21, 0, 0}, {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0},
++ {0x3605, 0x04, 0, 0}, {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0},
++ {0x300d, 0x22, 0, 0}, {0x3623, 0x22, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5500, 0x0a, 0, 0},
++ {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0}, {0x5080, 0x08, 0, 0},
++ {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0}, {0x471d, 0x05, 0, 0},
++ {0x4708, 0x06, 0, 0}, {0x370c, 0xa0, 0, 0}, {0x3808, 0x0a, 0, 0},
++ {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0}, {0x380b, 0x98, 0, 0},
++ {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0}, {0x380e, 0x07, 0, 0},
++ {0x380f, 0xd0, 0, 0}, {0x5687, 0x94, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x5001, 0xcf, 0, 0}, {0x4300, 0x30, 0, 0},
++ {0x4300, 0x30, 0, 0}, {0x460b, 0x35, 0, 0}, {0x471d, 0x00, 0, 0},
++ {0x3002, 0x0c, 0, 0}, {0x3002, 0x00, 0, 0}, {0x4713, 0x03, 0, 0},
++ {0x471c, 0x50, 0, 0}, {0x4721, 0x02, 0, 0}, {0x4402, 0x90, 0, 0},
++ {0x460c, 0x22, 0, 0}, {0x3815, 0x44, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3818, 0xc8, 0, 0}, {0x3801, 0x88, 0, 0}, {0x3824, 0x11, 0, 0},
++ {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x04, 0, 0}, {0x3a13, 0x30, 0, 0},
++ {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0}, {0x3a08, 0x12, 0, 0},
++ {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0}, {0x3a0b, 0xa0, 0, 0},
++ {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0}, {0x3a0d, 0x08, 0, 0},
++ {0x3a0e, 0x06, 0, 0}, {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0},
++ {0x3502, 0x00, 0, 0}, {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x32, 0, 0},
++ {0x3a1b, 0x3c, 0, 0}, {0x3a1e, 0x32, 0, 0}, {0x3a11, 0x80, 0, 0},
++ {0x3a1f, 0x20, 0, 0}, {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0},
++ {0x3a03, 0x7d, 0, 0}, {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0},
++ {0x3a15, 0x7d, 0, 0}, {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0},
++ {0x3a08, 0x09, 0, 0}, {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0},
++ {0x3a0b, 0xd0, 0, 0}, {0x3a0d, 0x10, 0, 0}, {0x3a0e, 0x0d, 0, 0},
++ {0x4407, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x589b, 0x00, 0, 0},
++ {0x589a, 0xc0, 0, 0}, {0x401e, 0x20, 0, 0}, {0x4001, 0x42, 0, 0},
++ {0x401c, 0x06, 0, 0}, {0x3825, 0xac, 0, 0}, {0x3827, 0x0c, 0, 0},
++ {0x528a, 0x01, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x10, 0, 0}, {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0},
++ {0x5290, 0x30, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x08, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x28, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0},
++ {0x5282, 0x00, 0, 0}, {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0},
++ {0x5302, 0x00, 0, 0}, {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0},
++ {0x530d, 0x0c, 0, 0}, {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0},
++ {0x5310, 0x20, 0, 0}, {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0},
++ {0x5309, 0x40, 0, 0}, {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0},
++ {0x5306, 0x00, 0, 0}, {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0},
++ {0x5315, 0x20, 0, 0}, {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0},
++ {0x5317, 0x00, 0, 0}, {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0},
++ {0x5381, 0x00, 0, 0}, {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0},
++ {0x5384, 0x00, 0, 0}, {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0},
++ {0x5387, 0x00, 0, 0}, {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0},
++ {0x538a, 0x00, 0, 0}, {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0},
++ {0x538d, 0x00, 0, 0}, {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0},
++ {0x5390, 0x00, 0, 0}, {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0},
++ {0x5393, 0xa2, 0, 0}, {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0},
++ {0x5481, 0x21, 0, 0}, {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0},
++ {0x5484, 0x65, 0, 0}, {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0},
++ {0x5487, 0x87, 0, 0}, {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0},
++ {0x548a, 0xaa, 0, 0}, {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0},
++ {0x548d, 0xdd, 0, 0}, {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0},
++ {0x5490, 0x05, 0, 0}, {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0},
++ {0x5493, 0x20, 0, 0}, {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0},
++ {0x5496, 0x02, 0, 0}, {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0},
++ {0x5499, 0x86, 0, 0}, {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0},
++ {0x549c, 0x02, 0, 0}, {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0},
++ {0x549f, 0x1c, 0, 0}, {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0},
++ {0x54a2, 0x01, 0, 0}, {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0},
++ {0x54a5, 0xc5, 0, 0}, {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0},
++ {0x54a8, 0x01, 0, 0}, {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0},
++ {0x54ab, 0x41, 0, 0}, {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0},
++ {0x54ae, 0x00, 0, 0}, {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0},
++ {0x54b1, 0x20, 0, 0}, {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0},
++ {0x54b4, 0x00, 0, 0}, {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0},
++ {0x54b7, 0xdf, 0, 0}, {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0},
++ {0x3406, 0x00, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0},
++ {0x5182, 0x11, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0},
++ {0x5185, 0x24, 0, 0}, {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0},
++ {0x5188, 0x08, 0, 0}, {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0},
++ {0x518b, 0xb2, 0, 0}, {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0},
++ {0x518e, 0x3d, 0, 0}, {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0},
++ {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0},
++ {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0},
++ {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0},
++ {0x519d, 0x82, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0},
++ {0x3a0f, 0x38, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0},
++ {0x3a1e, 0x2e, 0, 0}, {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x5688, 0xa6, 0, 0}, {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0},
++ {0x568b, 0xae, 0, 0}, {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0},
++ {0x568e, 0x62, 0, 0}, {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0},
++ {0x5584, 0x40, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0},
++ {0x5800, 0x27, 0, 0}, {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0},
++ {0x5803, 0x0f, 0, 0}, {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0},
++ {0x5806, 0x1e, 0, 0}, {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0},
++ {0x5809, 0x0d, 0, 0}, {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0},
++ {0x580c, 0x0a, 0, 0}, {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0},
++ {0x580f, 0x19, 0, 0}, {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0},
++ {0x5812, 0x04, 0, 0}, {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0},
++ {0x5815, 0x06, 0, 0}, {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0},
++ {0x5818, 0x0a, 0, 0}, {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0},
++ {0x581b, 0x00, 0, 0}, {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0},
++ {0x581e, 0x08, 0, 0}, {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0},
++ {0x5821, 0x05, 0, 0}, {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0},
++ {0x5824, 0x00, 0, 0}, {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0},
++ {0x5827, 0x0c, 0, 0}, {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0},
++ {0x582a, 0x06, 0, 0}, {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0},
++ {0x582d, 0x07, 0, 0}, {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0},
++ {0x5830, 0x18, 0, 0}, {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0},
++ {0x5833, 0x0a, 0, 0}, {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0},
++ {0x5836, 0x15, 0, 0}, {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0},
++ {0x5839, 0x1f, 0, 0}, {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0},
++ {0x583c, 0x17, 0, 0}, {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0},
++ {0x583f, 0x53, 0, 0}, {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0},
++ {0x5842, 0x0d, 0, 0}, {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0},
++ {0x5845, 0x09, 0, 0}, {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0},
++ {0x5848, 0x10, 0, 0}, {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0},
++ {0x584b, 0x0e, 0, 0}, {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0},
++ {0x584e, 0x11, 0, 0}, {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0},
++ {0x5851, 0x0c, 0, 0}, {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0},
++ {0x5854, 0x10, 0, 0}, {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0},
++ {0x5857, 0x0b, 0, 0}, {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0},
++ {0x585a, 0x0d, 0, 0}, {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0},
++ {0x585d, 0x0c, 0, 0}, {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0},
++ {0x5860, 0x0c, 0, 0}, {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0},
++ {0x5863, 0x08, 0, 0}, {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0},
++ {0x5866, 0x18, 0, 0}, {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0},
++ {0x5869, 0x19, 0, 0}, {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0},
++ {0x586c, 0x13, 0, 0}, {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0},
++ {0x586f, 0x16, 0, 0}, {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0},
++ {0x5872, 0x10, 0, 0}, {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0},
++ {0x5875, 0x16, 0, 0}, {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0},
++ {0x5878, 0x10, 0, 0}, {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0},
++ {0x587b, 0x14, 0, 0}, {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0},
++ {0x587e, 0x11, 0, 0}, {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0},
++ {0x5881, 0x15, 0, 0}, {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0},
++ {0x5884, 0x15, 0, 0}, {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0},
++ {0x5887, 0x17, 0, 0}, {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0},
++ {0x3702, 0x10, 0, 0}, {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0},
++ {0x370b, 0x40, 0, 0}, {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0},
++ {0x3632, 0x52, 0, 0}, {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0},
++ {0x5785, 0x07, 0, 0}, {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0},
++ {0x3604, 0x48, 0, 0}, {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0},
++ {0x370f, 0xc0, 0, 0}, {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0},
++ {0x5007, 0x00, 0, 0}, {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0},
++ {0x5013, 0x00, 0, 0}, {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0},
++ {0x5087, 0x00, 0, 0}, {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0},
++ {0x302b, 0x00, 0, 0}, {0x3503, 0x07, 0, 0}, {0x3011, 0x07, 0, 0},
++ {0x350c, 0x04, 0, 0}, {0x350d, 0x58, 0, 0}, {0x3801, 0x8a, 0, 0},
++ {0x3803, 0x0a, 0, 0}, {0x3804, 0x07, 0, 0}, {0x3805, 0x80, 0, 0},
++ {0x3806, 0x04, 0, 0}, {0x3807, 0x39, 0, 0}, {0x3808, 0x07, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x04, 0, 0}, {0x380b, 0x38, 0, 0},
++ {0x380c, 0x09, 0, 0}, {0x380d, 0xd6, 0, 0}, {0x380e, 0x04, 0, 0},
++ {0x380f, 0x58, 0, 0}, {0x381c, 0x11, 0, 0}, {0x381d, 0xba, 0, 0},
++ {0x381e, 0x04, 0, 0}, {0x381f, 0x48, 0, 0}, {0x3820, 0x04, 0, 0},
++ {0x3821, 0x18, 0, 0}, {0x3a08, 0x14, 0, 0}, {0x3a09, 0xe0, 0, 0},
++ {0x3a0a, 0x11, 0, 0}, {0x3a0b, 0x60, 0, 0}, {0x3a0d, 0x04, 0, 0},
++ {0x3a0e, 0x03, 0, 0}, {0x5682, 0x07, 0, 0}, {0x5683, 0x60, 0, 0},
++ {0x5686, 0x04, 0, 0}, {0x5687, 0x1c, 0, 0}, {0x5001, 0x7f, 0, 0},
++ {0x3503, 0x00, 0, 0}, {0x3010, 0x10, 0, 0}, {0x460c, 0x20, 0, 0},
++ {0x460b, 0x37, 0, 0}, {0x471c, 0xd0, 0, 0}, {0x471d, 0x05, 0, 0},
++ {0x3815, 0x01, 0, 0}, {0x3818, 0x00, 0x08, 0}, {0x501f, 0x00, 0, 0},
++ {0x4300, 0x30, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3819, 0x80, 0, 0},
++ {0x5002, 0xe0, 0, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_QVGA_320_240[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0},
++ {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x3010, 0x10, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3501, 0x1e, 0, 0},
++ {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0}, {0x380c, 0x0c, 0, 0},
++ {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3818, 0xc1, 0, 0},
++ {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0}, {0x3801, 0x80, 0, 0},
++ {0x3621, 0x87, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3803, 0x08, 0, 0},
++ {0x3827, 0x08, 0, 0}, {0x3810, 0x40, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0},
++ {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0}, {0x5686, 0x03, 0, 0},
++ {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x05, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0},
++ {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0}, {0x3502, 0x00, 0, 0},
++ {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0}, {0x3503, 0x00, 0, 0},
++ {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0}, {0x528f, 0x10, 0, 0},
++ {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x02, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x02, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x02, 0, 0},
++ {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3c, 0, 0},
++ {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0}, {0x3a03, 0x7d, 0, 0},
++ {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0}, {0x3a15, 0x7d, 0, 0},
++ {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a08, 0x09, 0, 0},
++ {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0}, {0x3a0b, 0xd0, 0, 0},
++ {0x3a0d, 0x08, 0, 0}, {0x3a0e, 0x06, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0}, {0x401e, 0x20, 0, 0},
++ {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0}, {0x528a, 0x01, 0, 0},
++ {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0}, {0x528d, 0x10, 0, 0},
++ {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0}, {0x5290, 0x30, 0, 0},
++ {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0}, {0x5294, 0x00, 0, 0},
++ {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0}, {0x5297, 0x08, 0, 0},
++ {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0}, {0x529a, 0x00, 0, 0},
++ {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0}, {0x529d, 0x28, 0, 0},
++ {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0}, {0x5282, 0x00, 0, 0},
++ {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0}, {0x5302, 0x00, 0, 0},
++ {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0}, {0x530d, 0x0c, 0, 0},
++ {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0}, {0x5310, 0x20, 0, 0},
++ {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0}, {0x5309, 0x40, 0, 0},
++ {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x00, 0, 0},
++ {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0}, {0x5315, 0x20, 0, 0},
++ {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0}, {0x5317, 0x00, 0, 0},
++ {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0}, {0x5381, 0x00, 0, 0},
++ {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0}, {0x5384, 0x00, 0, 0},
++ {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0}, {0x5387, 0x00, 0, 0},
++ {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0}, {0x538a, 0x00, 0, 0},
++ {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0}, {0x538d, 0x00, 0, 0},
++ {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0}, {0x5390, 0x00, 0, 0},
++ {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0}, {0x5393, 0xa2, 0, 0},
++ {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0}, {0x5481, 0x21, 0, 0},
++ {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0}, {0x5484, 0x65, 0, 0},
++ {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0}, {0x5487, 0x87, 0, 0},
++ {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0}, {0x548a, 0xaa, 0, 0},
++ {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0}, {0x548d, 0xdd, 0, 0},
++ {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0}, {0x5490, 0x05, 0, 0},
++ {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0}, {0x5493, 0x20, 0, 0},
++ {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0}, {0x5496, 0x02, 0, 0},
++ {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0}, {0x5499, 0x86, 0, 0},
++ {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0}, {0x549c, 0x02, 0, 0},
++ {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0}, {0x549f, 0x1c, 0, 0},
++ {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0}, {0x54a2, 0x01, 0, 0},
++ {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0}, {0x54a5, 0xc5, 0, 0},
++ {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0}, {0x54a8, 0x01, 0, 0},
++ {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0}, {0x54ab, 0x41, 0, 0},
++ {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0}, {0x54ae, 0x00, 0, 0},
++ {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0}, {0x54b1, 0x20, 0, 0},
++ {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0}, {0x54b4, 0x00, 0, 0},
++ {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0}, {0x54b7, 0xdf, 0, 0},
++ {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0}, {0x3406, 0x00, 0, 0},
++ {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0}, {0x5182, 0x11, 0, 0},
++ {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0},
++ {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0}, {0x5188, 0x08, 0, 0},
++ {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0}, {0x518b, 0xb2, 0, 0},
++ {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0}, {0x518e, 0x3d, 0, 0},
++ {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0},
++ {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0},
++ {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0},
++ {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0}, {0x519a, 0x04, 0, 0},
++ {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0}, {0x519d, 0x82, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0}, {0x3a0f, 0x38, 0, 0},
++ {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0}, {0x3a1e, 0x2e, 0, 0},
++ {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0}, {0x5688, 0xa6, 0, 0},
++ {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0}, {0x568b, 0xae, 0, 0},
++ {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0}, {0x568e, 0x62, 0, 0},
++ {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0}, {0x5584, 0x40, 0, 0},
++ {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0}, {0x5800, 0x27, 0, 0},
++ {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0}, {0x5803, 0x0f, 0, 0},
++ {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0}, {0x5806, 0x1e, 0, 0},
++ {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0}, {0x5809, 0x0d, 0, 0},
++ {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0}, {0x580c, 0x0a, 0, 0},
++ {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0}, {0x580f, 0x19, 0, 0},
++ {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0}, {0x5812, 0x04, 0, 0},
++ {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0}, {0x5815, 0x06, 0, 0},
++ {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0}, {0x5818, 0x0a, 0, 0},
++ {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0}, {0x581b, 0x00, 0, 0},
++ {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0}, {0x581e, 0x08, 0, 0},
++ {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0}, {0x5821, 0x05, 0, 0},
++ {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0}, {0x5824, 0x00, 0, 0},
++ {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0}, {0x5827, 0x0c, 0, 0},
++ {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0}, {0x582a, 0x06, 0, 0},
++ {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0}, {0x582d, 0x07, 0, 0},
++ {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0}, {0x5830, 0x18, 0, 0},
++ {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0}, {0x5833, 0x0a, 0, 0},
++ {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0}, {0x5836, 0x15, 0, 0},
++ {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0}, {0x5839, 0x1f, 0, 0},
++ {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0}, {0x583c, 0x17, 0, 0},
++ {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0}, {0x583f, 0x53, 0, 0},
++ {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0}, {0x5842, 0x0d, 0, 0},
++ {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0}, {0x5845, 0x09, 0, 0},
++ {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0}, {0x5848, 0x10, 0, 0},
++ {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0}, {0x584b, 0x0e, 0, 0},
++ {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0}, {0x584e, 0x11, 0, 0},
++ {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0}, {0x5851, 0x0c, 0, 0},
++ {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0}, {0x5854, 0x10, 0, 0},
++ {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0}, {0x5857, 0x0b, 0, 0},
++ {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0}, {0x585a, 0x0d, 0, 0},
++ {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0}, {0x585d, 0x0c, 0, 0},
++ {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0}, {0x5860, 0x0c, 0, 0},
++ {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0}, {0x5863, 0x08, 0, 0},
++ {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0}, {0x5866, 0x18, 0, 0},
++ {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0}, {0x5869, 0x19, 0, 0},
++ {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0}, {0x586c, 0x13, 0, 0},
++ {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0}, {0x586f, 0x16, 0, 0},
++ {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0}, {0x5872, 0x10, 0, 0},
++ {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0}, {0x5875, 0x16, 0, 0},
++ {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0}, {0x5878, 0x10, 0, 0},
++ {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0}, {0x587b, 0x14, 0, 0},
++ {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0}, {0x587e, 0x11, 0, 0},
++ {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0}, {0x5881, 0x15, 0, 0},
++ {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0}, {0x5884, 0x15, 0, 0},
++ {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0}, {0x5887, 0x17, 0, 0},
++ {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0}, {0x3702, 0x10, 0, 0},
++ {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0}, {0x370b, 0x40, 0, 0},
++ {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0}, {0x3632, 0x52, 0, 0},
++ {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0}, {0x5785, 0x07, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0}, {0x3604, 0x48, 0, 0},
++ {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0}, {0x370f, 0xc0, 0, 0},
++ {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0}, {0x5007, 0x00, 0, 0},
++ {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0}, {0x5013, 0x00, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0}, {0x5087, 0x00, 0, 0},
++ {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0}, {0x302b, 0x00, 0, 0},
++ {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0}, {0x380a, 0x00, 0, 0},
++ {0x380b, 0xf0, 0, 0}, {0x3a00, 0x78, 0, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_NTSC_720_480[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0},
++ {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x3010, 0x10, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3501, 0x1e, 0, 0},
++ {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0}, {0x380c, 0x0c, 0, 0},
++ {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3818, 0xc1, 0, 0},
++ {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0}, {0x3801, 0x80, 0, 0},
++ {0x3621, 0x87, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3803, 0x08, 0, 0},
++ {0x3827, 0x08, 0, 0}, {0x3810, 0x40, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0},
++ {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0}, {0x5686, 0x03, 0, 0},
++ {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x05, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0},
++ {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0}, {0x3502, 0x00, 0, 0},
++ {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0}, {0x3503, 0x00, 0, 0},
++ {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0}, {0x528f, 0x10, 0, 0},
++ {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x02, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x02, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x02, 0, 0},
++ {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3c, 0, 0},
++ {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0}, {0x3a03, 0x7d, 0, 0},
++ {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0}, {0x3a15, 0x7d, 0, 0},
++ {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a08, 0x09, 0, 0},
++ {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0}, {0x3a0b, 0xd0, 0, 0},
++ {0x3a0d, 0x08, 0, 0}, {0x3a0e, 0x06, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0}, {0x401e, 0x20, 0, 0},
++ {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0}, {0x528a, 0x01, 0, 0},
++ {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0}, {0x528d, 0x10, 0, 0},
++ {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0}, {0x5290, 0x30, 0, 0},
++ {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0}, {0x5294, 0x00, 0, 0},
++ {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0}, {0x5297, 0x08, 0, 0},
++ {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0}, {0x529a, 0x00, 0, 0},
++ {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0}, {0x529d, 0x28, 0, 0},
++ {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0}, {0x5282, 0x00, 0, 0},
++ {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0}, {0x5302, 0x00, 0, 0},
++ {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0}, {0x530d, 0x0c, 0, 0},
++ {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0}, {0x5310, 0x20, 0, 0},
++ {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0}, {0x5309, 0x40, 0, 0},
++ {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x00, 0, 0},
++ {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0}, {0x5315, 0x20, 0, 0},
++ {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0}, {0x5317, 0x00, 0, 0},
++ {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0}, {0x5381, 0x00, 0, 0},
++ {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0}, {0x5384, 0x00, 0, 0},
++ {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0}, {0x5387, 0x00, 0, 0},
++ {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0}, {0x538a, 0x00, 0, 0},
++ {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0}, {0x538d, 0x00, 0, 0},
++ {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0}, {0x5390, 0x00, 0, 0},
++ {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0}, {0x5393, 0xa2, 0, 0},
++ {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0}, {0x5481, 0x21, 0, 0},
++ {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0}, {0x5484, 0x65, 0, 0},
++ {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0}, {0x5487, 0x87, 0, 0},
++ {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0}, {0x548a, 0xaa, 0, 0},
++ {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0}, {0x548d, 0xdd, 0, 0},
++ {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0}, {0x5490, 0x05, 0, 0},
++ {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0}, {0x5493, 0x20, 0, 0},
++ {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0}, {0x5496, 0x02, 0, 0},
++ {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0}, {0x5499, 0x86, 0, 0},
++ {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0}, {0x549c, 0x02, 0, 0},
++ {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0}, {0x549f, 0x1c, 0, 0},
++ {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0}, {0x54a2, 0x01, 0, 0},
++ {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0}, {0x54a5, 0xc5, 0, 0},
++ {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0}, {0x54a8, 0x01, 0, 0},
++ {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0}, {0x54ab, 0x41, 0, 0},
++ {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0}, {0x54ae, 0x00, 0, 0},
++ {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0}, {0x54b1, 0x20, 0, 0},
++ {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0}, {0x54b4, 0x00, 0, 0},
++ {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0}, {0x54b7, 0xdf, 0, 0},
++ {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0}, {0x3406, 0x00, 0, 0},
++ {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0}, {0x5182, 0x11, 0, 0},
++ {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0},
++ {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0}, {0x5188, 0x08, 0, 0},
++ {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0}, {0x518b, 0xb2, 0, 0},
++ {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0}, {0x518e, 0x3d, 0, 0},
++ {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0},
++ {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0},
++ {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0},
++ {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0}, {0x519a, 0x04, 0, 0},
++ {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0}, {0x519d, 0x82, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0}, {0x3a0f, 0x38, 0, 0},
++ {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0}, {0x3a1e, 0x2e, 0, 0},
++ {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0}, {0x5688, 0xa6, 0, 0},
++ {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0}, {0x568b, 0xae, 0, 0},
++ {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0}, {0x568e, 0x62, 0, 0},
++ {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0}, {0x5584, 0x40, 0, 0},
++ {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0}, {0x5800, 0x27, 0, 0},
++ {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0}, {0x5803, 0x0f, 0, 0},
++ {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0}, {0x5806, 0x1e, 0, 0},
++ {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0}, {0x5809, 0x0d, 0, 0},
++ {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0}, {0x580c, 0x0a, 0, 0},
++ {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0}, {0x580f, 0x19, 0, 0},
++ {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0}, {0x5812, 0x04, 0, 0},
++ {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0}, {0x5815, 0x06, 0, 0},
++ {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0}, {0x5818, 0x0a, 0, 0},
++ {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0}, {0x581b, 0x00, 0, 0},
++ {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0}, {0x581e, 0x08, 0, 0},
++ {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0}, {0x5821, 0x05, 0, 0},
++ {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0}, {0x5824, 0x00, 0, 0},
++ {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0}, {0x5827, 0x0c, 0, 0},
++ {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0}, {0x582a, 0x06, 0, 0},
++ {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0}, {0x582d, 0x07, 0, 0},
++ {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0}, {0x5830, 0x18, 0, 0},
++ {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0}, {0x5833, 0x0a, 0, 0},
++ {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0}, {0x5836, 0x15, 0, 0},
++ {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0}, {0x5839, 0x1f, 0, 0},
++ {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0}, {0x583c, 0x17, 0, 0},
++ {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0}, {0x583f, 0x53, 0, 0},
++ {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0}, {0x5842, 0x0d, 0, 0},
++ {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0}, {0x5845, 0x09, 0, 0},
++ {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0}, {0x5848, 0x10, 0, 0},
++ {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0}, {0x584b, 0x0e, 0, 0},
++ {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0}, {0x584e, 0x11, 0, 0},
++ {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0}, {0x5851, 0x0c, 0, 0},
++ {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0}, {0x5854, 0x10, 0, 0},
++ {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0}, {0x5857, 0x0b, 0, 0},
++ {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0}, {0x585a, 0x0d, 0, 0},
++ {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0}, {0x585d, 0x0c, 0, 0},
++ {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0}, {0x5860, 0x0c, 0, 0},
++ {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0}, {0x5863, 0x08, 0, 0},
++ {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0}, {0x5866, 0x18, 0, 0},
++ {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0}, {0x5869, 0x19, 0, 0},
++ {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0}, {0x586c, 0x13, 0, 0},
++ {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0}, {0x586f, 0x16, 0, 0},
++ {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0}, {0x5872, 0x10, 0, 0},
++ {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0}, {0x5875, 0x16, 0, 0},
++ {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0}, {0x5878, 0x10, 0, 0},
++ {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0}, {0x587b, 0x14, 0, 0},
++ {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0}, {0x587e, 0x11, 0, 0},
++ {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0}, {0x5881, 0x15, 0, 0},
++ {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0}, {0x5884, 0x15, 0, 0},
++ {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0}, {0x5887, 0x17, 0, 0},
++ {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0}, {0x3702, 0x10, 0, 0},
++ {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0}, {0x370b, 0x40, 0, 0},
++ {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0}, {0x3632, 0x52, 0, 0},
++ {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0}, {0x5785, 0x07, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0}, {0x3604, 0x48, 0, 0},
++ {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0}, {0x370f, 0xc0, 0, 0},
++ {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0}, {0x5007, 0x00, 0, 0},
++ {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0}, {0x5013, 0x00, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0}, {0x5087, 0x00, 0, 0},
++ {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0}, {0x302b, 0x00, 0, 0},
++ {0x3824, 0x11, 0, 0}, {0x3825, 0xb4, 0, 0}, {0x3826, 0x00, 0, 0},
++ {0x3827, 0x3d, 0, 0}, {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0xd0, 0, 0}, {0x380A, 0x01, 0, 0}, {0x380B, 0xe0, 0, 0},
++ {0x3804, 0x05, 0, 0}, {0x3805, 0x00, 0, 0}, {0x3806, 0x03, 0, 0},
++ {0x3807, 0x55, 0, 0}, {0x5686, 0x03, 0, 0}, {0x5687, 0x55, 0, 0},
++ {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0},
++};
++
++static struct reg_value ov5642_setting_15fps_PAL_720_576[] = {
++ {0x3103, 0x93, 0, 0}, {0x3008, 0x82, 0, 0}, {0x3017, 0x7f, 0, 0},
++ {0x3018, 0xfc, 0, 0}, {0x3810, 0xc2, 0, 0}, {0x3615, 0xf0, 0, 0},
++ {0x3000, 0x00, 0, 0}, {0x3001, 0x00, 0, 0}, {0x3002, 0x5c, 0, 0},
++ {0x3003, 0x00, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3005, 0xff, 0, 0},
++ {0x3006, 0x43, 0, 0}, {0x3007, 0x37, 0, 0}, {0x3011, 0x08, 0, 0},
++ {0x3010, 0x10, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3815, 0x04, 0, 0},
++ {0x370c, 0xa0, 0, 0}, {0x3602, 0xfc, 0, 0}, {0x3612, 0xff, 0, 0},
++ {0x3634, 0xc0, 0, 0}, {0x3613, 0x00, 0, 0}, {0x3605, 0x7c, 0, 0},
++ {0x3621, 0x09, 0, 0}, {0x3622, 0x60, 0, 0}, {0x3604, 0x40, 0, 0},
++ {0x3603, 0xa7, 0, 0}, {0x3603, 0x27, 0, 0}, {0x4000, 0x21, 0, 0},
++ {0x401d, 0x22, 0, 0}, {0x3600, 0x54, 0, 0}, {0x3605, 0x04, 0, 0},
++ {0x3606, 0x3f, 0, 0}, {0x3c01, 0x80, 0, 0}, {0x5000, 0x4f, 0, 0},
++ {0x5020, 0x04, 0, 0}, {0x5181, 0x79, 0, 0}, {0x5182, 0x00, 0, 0},
++ {0x5185, 0x22, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5001, 0xff, 0, 0},
++ {0x5500, 0x0a, 0, 0}, {0x5504, 0x00, 0, 0}, {0x5505, 0x7f, 0, 0},
++ {0x5080, 0x08, 0, 0}, {0x300e, 0x18, 0, 0}, {0x4610, 0x00, 0, 0},
++ {0x471d, 0x05, 0, 0}, {0x4708, 0x06, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0}, {0x380b, 0xe0, 0, 0},
++ {0x380e, 0x07, 0, 0}, {0x380f, 0xd0, 0, 0}, {0x501f, 0x00, 0, 0},
++ {0x5000, 0x4f, 0, 0}, {0x4300, 0x30, 0, 0}, {0x3503, 0x07, 0, 0},
++ {0x3501, 0x73, 0, 0}, {0x3502, 0x80, 0, 0}, {0x350b, 0x00, 0, 0},
++ {0x3503, 0x07, 0, 0}, {0x3824, 0x11, 0, 0}, {0x3501, 0x1e, 0, 0},
++ {0x3502, 0x80, 0, 0}, {0x350b, 0x7f, 0, 0}, {0x380c, 0x0c, 0, 0},
++ {0x380d, 0x80, 0, 0}, {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0},
++ {0x3a0d, 0x04, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3818, 0xc1, 0, 0},
++ {0x3705, 0xdb, 0, 0}, {0x370a, 0x81, 0, 0}, {0x3801, 0x80, 0, 0},
++ {0x3621, 0x87, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3803, 0x08, 0, 0},
++ {0x3827, 0x08, 0, 0}, {0x3810, 0x40, 0, 0}, {0x3804, 0x05, 0, 0},
++ {0x3805, 0x00, 0, 0}, {0x5682, 0x05, 0, 0}, {0x5683, 0x00, 0, 0},
++ {0x3806, 0x03, 0, 0}, {0x3807, 0xc0, 0, 0}, {0x5686, 0x03, 0, 0},
++ {0x5687, 0xbc, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a1a, 0x05, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3a18, 0x00, 0, 0}, {0x3a19, 0x7c, 0, 0},
++ {0x3a08, 0x12, 0, 0}, {0x3a09, 0xc0, 0, 0}, {0x3a0a, 0x0f, 0, 0},
++ {0x3a0b, 0xa0, 0, 0}, {0x350c, 0x07, 0, 0}, {0x350d, 0xd0, 0, 0},
++ {0x3500, 0x00, 0, 0}, {0x3501, 0x00, 0, 0}, {0x3502, 0x00, 0, 0},
++ {0x350a, 0x00, 0, 0}, {0x350b, 0x00, 0, 0}, {0x3503, 0x00, 0, 0},
++ {0x528a, 0x02, 0, 0}, {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0},
++ {0x528d, 0x08, 0, 0}, {0x528e, 0x08, 0, 0}, {0x528f, 0x10, 0, 0},
++ {0x5290, 0x10, 0, 0}, {0x5292, 0x00, 0, 0}, {0x5293, 0x02, 0, 0},
++ {0x5294, 0x00, 0, 0}, {0x5295, 0x02, 0, 0}, {0x5296, 0x00, 0, 0},
++ {0x5297, 0x02, 0, 0}, {0x5298, 0x00, 0, 0}, {0x5299, 0x02, 0, 0},
++ {0x529a, 0x00, 0, 0}, {0x529b, 0x02, 0, 0}, {0x529c, 0x00, 0, 0},
++ {0x529d, 0x02, 0, 0}, {0x529e, 0x00, 0, 0}, {0x529f, 0x02, 0, 0},
++ {0x3a0f, 0x3c, 0, 0}, {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3c, 0, 0},
++ {0x3a1e, 0x30, 0, 0}, {0x3a11, 0x70, 0, 0}, {0x3a1f, 0x10, 0, 0},
++ {0x3030, 0x2b, 0, 0}, {0x3a02, 0x00, 0, 0}, {0x3a03, 0x7d, 0, 0},
++ {0x3a04, 0x00, 0, 0}, {0x3a14, 0x00, 0, 0}, {0x3a15, 0x7d, 0, 0},
++ {0x3a16, 0x00, 0, 0}, {0x3a00, 0x78, 0, 0}, {0x3a08, 0x09, 0, 0},
++ {0x3a09, 0x60, 0, 0}, {0x3a0a, 0x07, 0, 0}, {0x3a0b, 0xd0, 0, 0},
++ {0x3a0d, 0x08, 0, 0}, {0x3a0e, 0x06, 0, 0}, {0x5193, 0x70, 0, 0},
++ {0x589b, 0x04, 0, 0}, {0x589a, 0xc5, 0, 0}, {0x401e, 0x20, 0, 0},
++ {0x4001, 0x42, 0, 0}, {0x401c, 0x04, 0, 0}, {0x528a, 0x01, 0, 0},
++ {0x528b, 0x04, 0, 0}, {0x528c, 0x08, 0, 0}, {0x528d, 0x10, 0, 0},
++ {0x528e, 0x20, 0, 0}, {0x528f, 0x28, 0, 0}, {0x5290, 0x30, 0, 0},
++ {0x5292, 0x00, 0, 0}, {0x5293, 0x01, 0, 0}, {0x5294, 0x00, 0, 0},
++ {0x5295, 0x04, 0, 0}, {0x5296, 0x00, 0, 0}, {0x5297, 0x08, 0, 0},
++ {0x5298, 0x00, 0, 0}, {0x5299, 0x10, 0, 0}, {0x529a, 0x00, 0, 0},
++ {0x529b, 0x20, 0, 0}, {0x529c, 0x00, 0, 0}, {0x529d, 0x28, 0, 0},
++ {0x529e, 0x00, 0, 0}, {0x529f, 0x30, 0, 0}, {0x5282, 0x00, 0, 0},
++ {0x5300, 0x00, 0, 0}, {0x5301, 0x20, 0, 0}, {0x5302, 0x00, 0, 0},
++ {0x5303, 0x7c, 0, 0}, {0x530c, 0x00, 0, 0}, {0x530d, 0x0c, 0, 0},
++ {0x530e, 0x20, 0, 0}, {0x530f, 0x80, 0, 0}, {0x5310, 0x20, 0, 0},
++ {0x5311, 0x80, 0, 0}, {0x5308, 0x20, 0, 0}, {0x5309, 0x40, 0, 0},
++ {0x5304, 0x00, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x00, 0, 0},
++ {0x5307, 0x80, 0, 0}, {0x5314, 0x08, 0, 0}, {0x5315, 0x20, 0, 0},
++ {0x5319, 0x30, 0, 0}, {0x5316, 0x10, 0, 0}, {0x5317, 0x00, 0, 0},
++ {0x5318, 0x02, 0, 0}, {0x5380, 0x01, 0, 0}, {0x5381, 0x00, 0, 0},
++ {0x5382, 0x00, 0, 0}, {0x5383, 0x4e, 0, 0}, {0x5384, 0x00, 0, 0},
++ {0x5385, 0x0f, 0, 0}, {0x5386, 0x00, 0, 0}, {0x5387, 0x00, 0, 0},
++ {0x5388, 0x01, 0, 0}, {0x5389, 0x15, 0, 0}, {0x538a, 0x00, 0, 0},
++ {0x538b, 0x31, 0, 0}, {0x538c, 0x00, 0, 0}, {0x538d, 0x00, 0, 0},
++ {0x538e, 0x00, 0, 0}, {0x538f, 0x0f, 0, 0}, {0x5390, 0x00, 0, 0},
++ {0x5391, 0xab, 0, 0}, {0x5392, 0x00, 0, 0}, {0x5393, 0xa2, 0, 0},
++ {0x5394, 0x08, 0, 0}, {0x5480, 0x14, 0, 0}, {0x5481, 0x21, 0, 0},
++ {0x5482, 0x36, 0, 0}, {0x5483, 0x57, 0, 0}, {0x5484, 0x65, 0, 0},
++ {0x5485, 0x71, 0, 0}, {0x5486, 0x7d, 0, 0}, {0x5487, 0x87, 0, 0},
++ {0x5488, 0x91, 0, 0}, {0x5489, 0x9a, 0, 0}, {0x548a, 0xaa, 0, 0},
++ {0x548b, 0xb8, 0, 0}, {0x548c, 0xcd, 0, 0}, {0x548d, 0xdd, 0, 0},
++ {0x548e, 0xea, 0, 0}, {0x548f, 0x1d, 0, 0}, {0x5490, 0x05, 0, 0},
++ {0x5491, 0x00, 0, 0}, {0x5492, 0x04, 0, 0}, {0x5493, 0x20, 0, 0},
++ {0x5494, 0x03, 0, 0}, {0x5495, 0x60, 0, 0}, {0x5496, 0x02, 0, 0},
++ {0x5497, 0xb8, 0, 0}, {0x5498, 0x02, 0, 0}, {0x5499, 0x86, 0, 0},
++ {0x549a, 0x02, 0, 0}, {0x549b, 0x5b, 0, 0}, {0x549c, 0x02, 0, 0},
++ {0x549d, 0x3b, 0, 0}, {0x549e, 0x02, 0, 0}, {0x549f, 0x1c, 0, 0},
++ {0x54a0, 0x02, 0, 0}, {0x54a1, 0x04, 0, 0}, {0x54a2, 0x01, 0, 0},
++ {0x54a3, 0xed, 0, 0}, {0x54a4, 0x01, 0, 0}, {0x54a5, 0xc5, 0, 0},
++ {0x54a6, 0x01, 0, 0}, {0x54a7, 0xa5, 0, 0}, {0x54a8, 0x01, 0, 0},
++ {0x54a9, 0x6c, 0, 0}, {0x54aa, 0x01, 0, 0}, {0x54ab, 0x41, 0, 0},
++ {0x54ac, 0x01, 0, 0}, {0x54ad, 0x20, 0, 0}, {0x54ae, 0x00, 0, 0},
++ {0x54af, 0x16, 0, 0}, {0x54b0, 0x01, 0, 0}, {0x54b1, 0x20, 0, 0},
++ {0x54b2, 0x00, 0, 0}, {0x54b3, 0x10, 0, 0}, {0x54b4, 0x00, 0, 0},
++ {0x54b5, 0xf0, 0, 0}, {0x54b6, 0x00, 0, 0}, {0x54b7, 0xdf, 0, 0},
++ {0x5402, 0x3f, 0, 0}, {0x5403, 0x00, 0, 0}, {0x3406, 0x00, 0, 0},
++ {0x5180, 0xff, 0, 0}, {0x5181, 0x52, 0, 0}, {0x5182, 0x11, 0, 0},
++ {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0},
++ {0x5186, 0x06, 0, 0}, {0x5187, 0x08, 0, 0}, {0x5188, 0x08, 0, 0},
++ {0x5189, 0x7c, 0, 0}, {0x518a, 0x60, 0, 0}, {0x518b, 0xb2, 0, 0},
++ {0x518c, 0xb2, 0, 0}, {0x518d, 0x44, 0, 0}, {0x518e, 0x3d, 0, 0},
++ {0x518f, 0x58, 0, 0}, {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0},
++ {0x5192, 0x04, 0, 0}, {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0},
++ {0x5195, 0xf0, 0, 0}, {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0},
++ {0x5198, 0x04, 0, 0}, {0x5199, 0x12, 0, 0}, {0x519a, 0x04, 0, 0},
++ {0x519b, 0x00, 0, 0}, {0x519c, 0x06, 0, 0}, {0x519d, 0x82, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5025, 0x80, 0, 0}, {0x3a0f, 0x38, 0, 0},
++ {0x3a10, 0x30, 0, 0}, {0x3a1b, 0x3a, 0, 0}, {0x3a1e, 0x2e, 0, 0},
++ {0x3a11, 0x60, 0, 0}, {0x3a1f, 0x10, 0, 0}, {0x5688, 0xa6, 0, 0},
++ {0x5689, 0x6a, 0, 0}, {0x568a, 0xea, 0, 0}, {0x568b, 0xae, 0, 0},
++ {0x568c, 0xa6, 0, 0}, {0x568d, 0x6a, 0, 0}, {0x568e, 0x62, 0, 0},
++ {0x568f, 0x26, 0, 0}, {0x5583, 0x40, 0, 0}, {0x5584, 0x40, 0, 0},
++ {0x5580, 0x02, 0, 0}, {0x5000, 0xcf, 0, 0}, {0x5800, 0x27, 0, 0},
++ {0x5801, 0x19, 0, 0}, {0x5802, 0x12, 0, 0}, {0x5803, 0x0f, 0, 0},
++ {0x5804, 0x10, 0, 0}, {0x5805, 0x15, 0, 0}, {0x5806, 0x1e, 0, 0},
++ {0x5807, 0x2f, 0, 0}, {0x5808, 0x15, 0, 0}, {0x5809, 0x0d, 0, 0},
++ {0x580a, 0x0a, 0, 0}, {0x580b, 0x09, 0, 0}, {0x580c, 0x0a, 0, 0},
++ {0x580d, 0x0c, 0, 0}, {0x580e, 0x12, 0, 0}, {0x580f, 0x19, 0, 0},
++ {0x5810, 0x0b, 0, 0}, {0x5811, 0x07, 0, 0}, {0x5812, 0x04, 0, 0},
++ {0x5813, 0x03, 0, 0}, {0x5814, 0x03, 0, 0}, {0x5815, 0x06, 0, 0},
++ {0x5816, 0x0a, 0, 0}, {0x5817, 0x0f, 0, 0}, {0x5818, 0x0a, 0, 0},
++ {0x5819, 0x05, 0, 0}, {0x581a, 0x01, 0, 0}, {0x581b, 0x00, 0, 0},
++ {0x581c, 0x00, 0, 0}, {0x581d, 0x03, 0, 0}, {0x581e, 0x08, 0, 0},
++ {0x581f, 0x0c, 0, 0}, {0x5820, 0x0a, 0, 0}, {0x5821, 0x05, 0, 0},
++ {0x5822, 0x01, 0, 0}, {0x5823, 0x00, 0, 0}, {0x5824, 0x00, 0, 0},
++ {0x5825, 0x03, 0, 0}, {0x5826, 0x08, 0, 0}, {0x5827, 0x0c, 0, 0},
++ {0x5828, 0x0e, 0, 0}, {0x5829, 0x08, 0, 0}, {0x582a, 0x06, 0, 0},
++ {0x582b, 0x04, 0, 0}, {0x582c, 0x05, 0, 0}, {0x582d, 0x07, 0, 0},
++ {0x582e, 0x0b, 0, 0}, {0x582f, 0x12, 0, 0}, {0x5830, 0x18, 0, 0},
++ {0x5831, 0x10, 0, 0}, {0x5832, 0x0c, 0, 0}, {0x5833, 0x0a, 0, 0},
++ {0x5834, 0x0b, 0, 0}, {0x5835, 0x0e, 0, 0}, {0x5836, 0x15, 0, 0},
++ {0x5837, 0x19, 0, 0}, {0x5838, 0x32, 0, 0}, {0x5839, 0x1f, 0, 0},
++ {0x583a, 0x18, 0, 0}, {0x583b, 0x16, 0, 0}, {0x583c, 0x17, 0, 0},
++ {0x583d, 0x1e, 0, 0}, {0x583e, 0x26, 0, 0}, {0x583f, 0x53, 0, 0},
++ {0x5840, 0x10, 0, 0}, {0x5841, 0x0f, 0, 0}, {0x5842, 0x0d, 0, 0},
++ {0x5843, 0x0c, 0, 0}, {0x5844, 0x0e, 0, 0}, {0x5845, 0x09, 0, 0},
++ {0x5846, 0x11, 0, 0}, {0x5847, 0x10, 0, 0}, {0x5848, 0x10, 0, 0},
++ {0x5849, 0x10, 0, 0}, {0x584a, 0x10, 0, 0}, {0x584b, 0x0e, 0, 0},
++ {0x584c, 0x10, 0, 0}, {0x584d, 0x10, 0, 0}, {0x584e, 0x11, 0, 0},
++ {0x584f, 0x10, 0, 0}, {0x5850, 0x0f, 0, 0}, {0x5851, 0x0c, 0, 0},
++ {0x5852, 0x0f, 0, 0}, {0x5853, 0x10, 0, 0}, {0x5854, 0x10, 0, 0},
++ {0x5855, 0x0f, 0, 0}, {0x5856, 0x0e, 0, 0}, {0x5857, 0x0b, 0, 0},
++ {0x5858, 0x10, 0, 0}, {0x5859, 0x0d, 0, 0}, {0x585a, 0x0d, 0, 0},
++ {0x585b, 0x0c, 0, 0}, {0x585c, 0x0c, 0, 0}, {0x585d, 0x0c, 0, 0},
++ {0x585e, 0x0b, 0, 0}, {0x585f, 0x0c, 0, 0}, {0x5860, 0x0c, 0, 0},
++ {0x5861, 0x0c, 0, 0}, {0x5862, 0x0d, 0, 0}, {0x5863, 0x08, 0, 0},
++ {0x5864, 0x11, 0, 0}, {0x5865, 0x18, 0, 0}, {0x5866, 0x18, 0, 0},
++ {0x5867, 0x19, 0, 0}, {0x5868, 0x17, 0, 0}, {0x5869, 0x19, 0, 0},
++ {0x586a, 0x16, 0, 0}, {0x586b, 0x13, 0, 0}, {0x586c, 0x13, 0, 0},
++ {0x586d, 0x12, 0, 0}, {0x586e, 0x13, 0, 0}, {0x586f, 0x16, 0, 0},
++ {0x5870, 0x14, 0, 0}, {0x5871, 0x12, 0, 0}, {0x5872, 0x10, 0, 0},
++ {0x5873, 0x11, 0, 0}, {0x5874, 0x11, 0, 0}, {0x5875, 0x16, 0, 0},
++ {0x5876, 0x14, 0, 0}, {0x5877, 0x11, 0, 0}, {0x5878, 0x10, 0, 0},
++ {0x5879, 0x0f, 0, 0}, {0x587a, 0x10, 0, 0}, {0x587b, 0x14, 0, 0},
++ {0x587c, 0x13, 0, 0}, {0x587d, 0x12, 0, 0}, {0x587e, 0x11, 0, 0},
++ {0x587f, 0x11, 0, 0}, {0x5880, 0x12, 0, 0}, {0x5881, 0x15, 0, 0},
++ {0x5882, 0x14, 0, 0}, {0x5883, 0x15, 0, 0}, {0x5884, 0x15, 0, 0},
++ {0x5885, 0x15, 0, 0}, {0x5886, 0x13, 0, 0}, {0x5887, 0x17, 0, 0},
++ {0x3710, 0x10, 0, 0}, {0x3632, 0x51, 0, 0}, {0x3702, 0x10, 0, 0},
++ {0x3703, 0xb2, 0, 0}, {0x3704, 0x18, 0, 0}, {0x370b, 0x40, 0, 0},
++ {0x370d, 0x03, 0, 0}, {0x3631, 0x01, 0, 0}, {0x3632, 0x52, 0, 0},
++ {0x3606, 0x24, 0, 0}, {0x3620, 0x96, 0, 0}, {0x5785, 0x07, 0, 0},
++ {0x3a13, 0x30, 0, 0}, {0x3600, 0x52, 0, 0}, {0x3604, 0x48, 0, 0},
++ {0x3606, 0x1b, 0, 0}, {0x370d, 0x0b, 0, 0}, {0x370f, 0xc0, 0, 0},
++ {0x3709, 0x01, 0, 0}, {0x3823, 0x00, 0, 0}, {0x5007, 0x00, 0, 0},
++ {0x5009, 0x00, 0, 0}, {0x5011, 0x00, 0, 0}, {0x5013, 0x00, 0, 0},
++ {0x519e, 0x00, 0, 0}, {0x5086, 0x00, 0, 0}, {0x5087, 0x00, 0, 0},
++ {0x5088, 0x00, 0, 0}, {0x5089, 0x00, 0, 0}, {0x302b, 0x00, 0, 0},
++ {0x3824, 0x11, 0, 0}, {0x3825, 0xdc, 0, 0}, {0x3826, 0x00, 0, 0},
++ {0x3827, 0x08, 0, 0}, {0x380c, 0x0c, 0, 0}, {0x380d, 0x80, 0, 0},
++ {0x380e, 0x03, 0, 0}, {0x380f, 0xe8, 0, 0}, {0x3808, 0x02, 0, 0},
++ {0x3809, 0xd0, 0, 0}, {0x380A, 0x02, 0, 0}, {0x380B, 0x40, 0, 0},
++ {0x3804, 0x04, 0, 0}, {0x3805, 0xb0, 0, 0}, {0x3806, 0x03, 0, 0},
++ {0x3807, 0xc0, 0, 0}, {0x5686, 0x03, 0, 0}, {0x5687, 0xc0, 0, 0},
++ {0x5682, 0x04, 0, 0}, {0x5683, 0xb0, 0, 0},
++};
++
++static struct ov5642_mode_info ov5642_mode_info_data[2][ov5642_mode_MAX + 1] = {
++ {
++ {ov5642_mode_VGA_640_480, 640, 480,
++ ov5642_setting_15fps_VGA_640_480,
++ ARRAY_SIZE(ov5642_setting_15fps_VGA_640_480)},
++ {ov5642_mode_QVGA_320_240, 320, 240,
++ ov5642_setting_15fps_QVGA_320_240,
++ ARRAY_SIZE(ov5642_setting_15fps_QVGA_320_240)},
++ {ov5642_mode_NTSC_720_480, 720, 480,
++ ov5642_setting_15fps_NTSC_720_480,
++ ARRAY_SIZE(ov5642_setting_15fps_NTSC_720_480)},
++ {ov5642_mode_PAL_720_576, 720, 576,
++ ov5642_setting_15fps_PAL_720_576,
++ ARRAY_SIZE(ov5642_setting_15fps_PAL_720_576)},
++ {ov5642_mode_720P_1280_720, 1280, 720,
++ ov5642_setting_15fps_720P_1280_720,
++ ARRAY_SIZE(ov5642_setting_15fps_720P_1280_720)},
++ {ov5642_mode_1080P_1920_1080, 1920, 1080,
++ ov5642_setting_15fps_1080P_1920_1080,
++ ARRAY_SIZE(ov5642_setting_15fps_1080P_1920_1080)},
++ {ov5642_mode_QSXGA_2592_1944, 2592, 1944,
++ ov5642_setting_15fps_QSXGA_2592_1944,
++ ARRAY_SIZE(ov5642_setting_15fps_QSXGA_2592_1944)},
++ {ov5642_mode_QCIF_176_144, 176, 144,
++ ov5642_setting_15fps_QCIF_176_144,
++ ARRAY_SIZE(ov5642_setting_15fps_QCIF_176_144)},
++ {ov5642_mode_XGA_1024_768, 1024, 768,
++ ov5642_setting_15fps_XGA_1024_768,
++ ARRAY_SIZE(ov5642_setting_15fps_XGA_1024_768)},
++ },
++ {
++ {ov5642_mode_VGA_640_480, 640, 480,
++ ov5642_setting_30fps_VGA_640_480,
++ ARRAY_SIZE(ov5642_setting_30fps_VGA_640_480)},
++ {ov5642_mode_QVGA_320_240, 320, 240,
++ ov5642_setting_30fps_QVGA_320_240,
++ ARRAY_SIZE(ov5642_setting_30fps_QVGA_320_240)},
++ {ov5642_mode_NTSC_720_480, 720, 480,
++ ov5642_setting_30fps_NTSC_720_480,
++ ARRAY_SIZE(ov5642_setting_30fps_NTSC_720_480)},
++ {ov5642_mode_PAL_720_576, 720, 576,
++ ov5642_setting_30fps_PAL_720_576,
++ ARRAY_SIZE(ov5642_setting_30fps_PAL_720_576)},
++ {ov5642_mode_720P_1280_720, 1280, 720,
++ ov5642_setting_30fps_720P_1280_720,
++ ARRAY_SIZE(ov5642_setting_30fps_720P_1280_720)},
++ {ov5642_mode_1080P_1920_1080, 0, 0, NULL, 0},
++ {ov5642_mode_QSXGA_2592_1944, 0, 0, NULL, 0},
++ {ov5642_mode_QCIF_176_144, 176, 144,
++ ov5642_setting_30fps_QCIF_176_144,
++ ARRAY_SIZE(ov5642_setting_30fps_QCIF_176_144)},
++ {ov5642_mode_XGA_1024_768, 1024, 768,
++ ov5642_setting_30fps_XGA_1024_768,
++ ARRAY_SIZE(ov5642_setting_30fps_XGA_1024_768)},
++ },
++};
++
++static struct regulator *io_regulator;
++static struct regulator *core_regulator;
++static struct regulator *analog_regulator;
++static struct regulator *gpo_regulator;
++
++static int ov5642_probe(struct i2c_client *adapter,
++ const struct i2c_device_id *device_id);
++static int ov5642_remove(struct i2c_client *client);
++
++static s32 ov5642_read_reg(u16 reg, u8 *val);
++static s32 ov5642_write_reg(u16 reg, u8 val);
++
++static const struct i2c_device_id ov5642_id[] = {
++ {"ov5642", 0},
++ {"ov564x", 0},
++ {},
++};
++
++MODULE_DEVICE_TABLE(i2c, ov5642_id);
++
++static struct i2c_driver ov5642_i2c_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "ov5642",
++ },
++ .probe = ov5642_probe,
++ .remove = ov5642_remove,
++ .id_table = ov5642_id,
++};
++
++static void ov5642_standby(s32 enable)
++{
++ if (enable)
++ gpio_set_value(pwn_gpio, 1);
++ else
++ gpio_set_value(pwn_gpio, 0);
++
++ msleep(2);
++}
++
++static void ov5642_reset(void)
++{
++ /* camera reset */
++ gpio_set_value(rst_gpio, 1);
++
++ /* camera power down */
++ gpio_set_value(pwn_gpio, 1);
++ msleep(5);
++
++ gpio_set_value(pwn_gpio, 0);
++ msleep(5);
++
++ gpio_set_value(rst_gpio, 0);
++ msleep(1);
++
++ gpio_set_value(rst_gpio, 1);
++ msleep(5);
++
++ gpio_set_value(pwn_gpio, 1);
++}
++
++static int ov5642_power_on(struct device *dev)
++{
++ int ret = 0;
++
++ io_regulator = devm_regulator_get(dev, "DOVDD");
++ if (!IS_ERR(io_regulator)) {
++ regulator_set_voltage(io_regulator,
++ OV5642_VOLTAGE_DIGITAL_IO,
++ OV5642_VOLTAGE_DIGITAL_IO);
++ ret = regulator_enable(io_regulator);
++ if (ret) {
++ pr_err("%s:io set voltage error\n", __func__);
++ return ret;
++ } else {
++ dev_dbg(dev,
++ "%s:io set voltage ok\n", __func__);
++ }
++ } else {
++ pr_err("%s: cannot get io voltage error\n", __func__);
++ io_regulator = NULL;
++ }
++
++ core_regulator = devm_regulator_get(dev, "DVDD");
++ if (!IS_ERR(core_regulator)) {
++ regulator_set_voltage(core_regulator,
++ OV5642_VOLTAGE_DIGITAL_CORE,
++ OV5642_VOLTAGE_DIGITAL_CORE);
++ ret = regulator_enable(core_regulator);
++ if (ret) {
++ pr_err("%s:core set voltage error\n", __func__);
++ return ret;
++ } else {
++ dev_dbg(dev,
++ "%s:core set voltage ok\n", __func__);
++ }
++ } else {
++ core_regulator = NULL;
++ pr_err("%s: cannot get core voltage error\n", __func__);
++ }
++
++ analog_regulator = devm_regulator_get(dev, "AVDD");
++ if (!IS_ERR(analog_regulator)) {
++ regulator_set_voltage(analog_regulator,
++ OV5642_VOLTAGE_ANALOG,
++ OV5642_VOLTAGE_ANALOG);
++ ret = regulator_enable(analog_regulator);
++ if (ret) {
++ pr_err("%s:analog set voltage error\n",
++ __func__);
++ return ret;
++ } else {
++ dev_dbg(dev,
++ "%s:analog set voltage ok\n", __func__);
++ }
++ } else {
++ analog_regulator = NULL;
++ pr_err("%s: cannot get analog voltage error\n", __func__);
++ }
++
++ return ret;
++}
++
++static s32 ov5642_write_reg(u16 reg, u8 val)
++{
++ u8 au8Buf[3] = {0};
++
++ au8Buf[0] = reg >> 8;
++ au8Buf[1] = reg & 0xff;
++ au8Buf[2] = val;
++
++ if (i2c_master_send(ov5642_data.i2c_client, au8Buf, 3) < 0) {
++ pr_err("%s:write reg error:reg=%x,val=%x\n",
++ __func__, reg, val);
++ return -1;
++ }
++
++ return 0;
++}
++
++static s32 ov5642_read_reg(u16 reg, u8 *val)
++{
++ u8 au8RegBuf[2] = {0};
++ u8 u8RdVal = 0;
++
++ au8RegBuf[0] = reg >> 8;
++ au8RegBuf[1] = reg & 0xff;
++
++ if (2 != i2c_master_send(ov5642_data.i2c_client, au8RegBuf, 2)) {
++ pr_err("%s:write reg error:reg=%x\n",
++ __func__, reg);
++ return -1;
++ }
++
++ if (1 != i2c_master_recv(ov5642_data.i2c_client, &u8RdVal, 1)) {
++ pr_err("%s:read reg error:reg=%x,val=%x\n",
++ __func__, reg, u8RdVal);
++ return -1;
++ }
++
++ *val = u8RdVal;
++
++ return u8RdVal;
++}
++
++static int ov5642_set_rot_mode(struct reg_value *rot_mode)
++{
++ s32 i = 0;
++ s32 iModeSettingArySize = 2;
++ register u32 Delay_ms = 0;
++ register u16 RegAddr = 0;
++ register u8 Mask = 0;
++ register u8 Val = 0;
++ u8 RegVal = 0;
++ int retval = 0;
++ for (i = 0; i < iModeSettingArySize; ++i, ++rot_mode) {
++ Delay_ms = rot_mode->u32Delay_ms;
++ RegAddr = rot_mode->u16RegAddr;
++ Val = rot_mode->u8Val;
++ Mask = rot_mode->u8Mask;
++
++ if (Mask) {
++ retval = ov5642_read_reg(RegAddr, &RegVal);
++ if (retval < 0) {
++ pr_err("%s, read reg 0x%x failed\n",
++ __func__, RegAddr);
++ goto err;
++ }
++
++ Val |= RegVal;
++ Val &= Mask;
++ }
++
++ retval = ov5642_write_reg(RegAddr, Val);
++ if (retval < 0) {
++ pr_err("%s, write reg 0x%x failed\n",
++ __func__, RegAddr);
++ goto err;
++ }
++
++ if (Delay_ms)
++ mdelay(Delay_ms);
++ }
++err:
++ return retval;
++}
++static int ov5642_init_mode(enum ov5642_frame_rate frame_rate,
++ enum ov5642_mode mode);
++static int ov5642_write_snapshot_para(enum ov5642_frame_rate frame_rate,
++ enum ov5642_mode mode);
++static int ov5642_change_mode(enum ov5642_frame_rate new_frame_rate,
++ enum ov5642_frame_rate old_frame_rate,
++ enum ov5642_mode new_mode,
++ enum ov5642_mode orig_mode)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 i = 0;
++ s32 iModeSettingArySize = 0;
++ register u32 Delay_ms = 0;
++ register u16 RegAddr = 0;
++ register u8 Mask = 0;
++ register u8 Val = 0;
++ u8 RegVal = 0;
++ int retval = 0;
++
++ if (new_mode > ov5642_mode_MAX || new_mode < ov5642_mode_MIN) {
++ pr_err("Wrong ov5642 mode detected!\n");
++ return -1;
++ }
++
++ if ((new_frame_rate == old_frame_rate) &&
++ (new_mode == ov5642_mode_VGA_640_480) &&
++ (orig_mode == ov5642_mode_QSXGA_2592_1944)) {
++ pModeSetting = ov5642_setting_QSXGA_2_VGA;
++ iModeSettingArySize = ARRAY_SIZE(ov5642_setting_QSXGA_2_VGA);
++ ov5642_data.pix.width = 640;
++ ov5642_data.pix.height = 480;
++ } else if ((new_frame_rate == old_frame_rate) &&
++ (new_mode == ov5642_mode_QVGA_320_240) &&
++ (orig_mode == ov5642_mode_VGA_640_480)) {
++ pModeSetting = ov5642_setting_VGA_2_QVGA;
++ iModeSettingArySize = ARRAY_SIZE(ov5642_setting_VGA_2_QVGA);
++ ov5642_data.pix.width = 320;
++ ov5642_data.pix.height = 240;
++ } else {
++ retval = ov5642_write_snapshot_para(new_frame_rate, new_mode);
++ goto err;
++ }
++
++ if (ov5642_data.pix.width == 0 || ov5642_data.pix.height == 0 ||
++ pModeSetting == NULL || iModeSettingArySize == 0)
++ return -EINVAL;
++
++ for (i = 0; i < iModeSettingArySize; ++i, ++pModeSetting) {
++ Delay_ms = pModeSetting->u32Delay_ms;
++ RegAddr = pModeSetting->u16RegAddr;
++ Val = pModeSetting->u8Val;
++ Mask = pModeSetting->u8Mask;
++
++ if (Mask) {
++ retval = ov5642_read_reg(RegAddr, &RegVal);
++ if (retval < 0) {
++ pr_err("read reg error addr=0x%x", RegAddr);
++ goto err;
++ }
++
++ RegVal &= ~(u8)Mask;
++ Val &= Mask;
++ Val |= RegVal;
++ }
++
++ retval = ov5642_write_reg(RegAddr, Val);
++ if (retval < 0) {
++ pr_err("write reg error addr=0x%x", RegAddr);
++ goto err;
++ }
++
++ if (Delay_ms)
++ msleep(Delay_ms);
++ }
++err:
++ return retval;
++}
++static int ov5642_init_mode(enum ov5642_frame_rate frame_rate,
++ enum ov5642_mode mode)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 i = 0;
++ s32 iModeSettingArySize = 0;
++ register u32 Delay_ms = 0;
++ register u16 RegAddr = 0;
++ register u8 Mask = 0;
++ register u8 Val = 0;
++ u8 RegVal = 0;
++ int retval = 0;
++
++ if (mode > ov5642_mode_MAX || mode < ov5642_mode_MIN) {
++ pr_err("Wrong ov5642 mode detected!\n");
++ return -1;
++ }
++
++ pModeSetting = ov5642_mode_info_data[frame_rate][mode].init_data_ptr;
++ iModeSettingArySize =
++ ov5642_mode_info_data[frame_rate][mode].init_data_size;
++
++ ov5642_data.pix.width = ov5642_mode_info_data[frame_rate][mode].width;
++ ov5642_data.pix.height = ov5642_mode_info_data[frame_rate][mode].height;
++
++ if (ov5642_data.pix.width == 0 || ov5642_data.pix.height == 0 ||
++ pModeSetting == NULL || iModeSettingArySize == 0)
++ return -EINVAL;
++
++ for (i = 0; i < iModeSettingArySize; ++i, ++pModeSetting) {
++ Delay_ms = pModeSetting->u32Delay_ms;
++ RegAddr = pModeSetting->u16RegAddr;
++ Val = pModeSetting->u8Val;
++ Mask = pModeSetting->u8Mask;
++
++ if (Mask) {
++ retval = ov5642_read_reg(RegAddr, &RegVal);
++ if (retval < 0) {
++ pr_err("read reg error addr=0x%x", RegAddr);
++ goto err;
++ }
++
++ RegVal &= ~(u8)Mask;
++ Val &= Mask;
++ Val |= RegVal;
++ }
++
++ retval = ov5642_write_reg(RegAddr, Val);
++ if (retval < 0) {
++ pr_err("write reg error addr=0x%x", RegAddr);
++ goto err;
++ }
++
++ if (Delay_ms)
++ msleep(Delay_ms);
++ }
++err:
++ return retval;
++}
++
++static int ov5642_write_snapshot_para(enum ov5642_frame_rate frame_rate,
++ enum ov5642_mode mode)
++{
++ int ret = 0;
++ bool m_60Hz = false;
++ u16 cap_frame_rate = 50;
++ u16 g_prev_frame_rate = 225;
++
++ u8 ev_low, ev_mid, ev_high;
++ u8 ret_l, ret_m, ret_h, gain, lines_10ms;
++ u16 ulcap_ev, icap_gain, prev_maxlines;
++ u32 ulcap_ev_gain, cap_maxlines, g_prev_ev;
++
++ ov5642_write_reg(0x3503, 0x07);
++
++ ret_h = ret_m = ret_l = 0;
++ g_prev_ev = 0;
++ ov5642_read_reg(0x3500, &ret_h);
++ ov5642_read_reg(0x3501, &ret_m);
++ ov5642_read_reg(0x3502, &ret_l);
++ g_prev_ev = (ret_h << 12) + (ret_m << 4) + (ret_l >> 4);
++
++ ret_h = ret_m = ret_l = 0;
++ prev_maxlines = 0;
++ ov5642_read_reg(0x380e, &ret_h);
++ ov5642_read_reg(0x380f, &ret_l);
++ prev_maxlines = (ret_h << 8) + ret_l;
++ /*Read back AGC Gain for preview*/
++ gain = 0;
++ ov5642_read_reg(0x350b, &gain);
++
++ ret = ov5642_init_mode(frame_rate, mode);
++ if (ret < 0)
++ return ret;
++
++ ret_h = ret_m = ret_l = 0;
++ ov5642_read_reg(0x380e, &ret_h);
++ ov5642_read_reg(0x380f, &ret_l);
++ cap_maxlines = (ret_h << 8) + ret_l;
++ if (m_60Hz == true)
++ lines_10ms = cap_frame_rate * cap_maxlines/12000;
++ else
++ lines_10ms = cap_frame_rate * cap_maxlines/10000;
++
++ if (prev_maxlines == 0)
++ prev_maxlines = 1;
++
++ ulcap_ev = (g_prev_ev*(cap_frame_rate)*(cap_maxlines))/
++ (((prev_maxlines)*(g_prev_frame_rate)));
++ icap_gain = (gain & 0x0f) + 16;
++ if (gain & 0x10)
++ icap_gain = icap_gain << 1;
++
++ if (gain & 0x20)
++ icap_gain = icap_gain << 1;
++
++ if (gain & 0x40)
++ icap_gain = icap_gain << 1;
++
++ if (gain & 0x80)
++ icap_gain = icap_gain << 1;
++
++ ulcap_ev_gain = 2 * ulcap_ev * icap_gain;
++
++ if (ulcap_ev_gain < cap_maxlines*16) {
++ ulcap_ev = ulcap_ev_gain/16;
++ if (ulcap_ev > lines_10ms) {
++ ulcap_ev /= lines_10ms;
++ ulcap_ev *= lines_10ms;
++ }
++ } else
++ ulcap_ev = cap_maxlines;
++
++ if (ulcap_ev == 0)
++ ulcap_ev = 1;
++
++ icap_gain = (ulcap_ev_gain*2/ulcap_ev + 1)/2;
++ ev_low = ((unsigned char)ulcap_ev)<<4;
++ ev_mid = (unsigned char)(ulcap_ev >> 4) & 0xff;
++ ev_high = (unsigned char)(ulcap_ev >> 12);
++
++ gain = 0;
++ if (icap_gain > 31) {
++ gain |= 0x10;
++ icap_gain = icap_gain >> 1;
++ }
++ if (icap_gain > 31) {
++ gain |= 0x20;
++ icap_gain = icap_gain >> 1;
++ }
++ if (icap_gain > 31) {
++ gain |= 0x40;
++ icap_gain = icap_gain >> 1;
++ }
++ if (icap_gain > 31) {
++ gain |= 0x80;
++ icap_gain = icap_gain >> 1;
++ }
++ if (icap_gain > 16)
++ gain |= ((icap_gain - 16) & 0x0f);
++
++ if (gain == 0x10)
++ gain = 0x11;
++
++ ov5642_write_reg(0x350b, gain);
++ ov5642_write_reg(0x3502, ev_low);
++ ov5642_write_reg(0x3501, ev_mid);
++ ov5642_write_reg(0x3500, ev_high);
++ msleep(500);
++
++ return ret ;
++}
++
++
++/* --------------- IOCTL functions from v4l2_int_ioctl_desc --------------- */
++
++static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
++{
++ if (s == NULL) {
++ pr_err(" ERROR!! no slave device set!\n");
++ return -1;
++ }
++
++ memset(p, 0, sizeof(*p));
++ p->u.bt656.clock_curr = ov5642_data.mclk;
++ pr_debug(" clock_curr=mclk=%d\n", ov5642_data.mclk);
++ p->if_type = V4L2_IF_TYPE_BT656;
++ p->u.bt656.mode = V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT;
++ p->u.bt656.clock_min = OV5642_XCLK_MIN;
++ p->u.bt656.clock_max = OV5642_XCLK_MAX;
++ p->u.bt656.bt_sync_correct = 1; /* Indicate external vsync */
++
++ return 0;
++}
++
++/*!
++ * ioctl_s_power - V4L2 sensor interface handler for VIDIOC_S_POWER ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @on: indicates power mode (on or off)
++ *
++ * Turns the power on or off, depending on the value of on and returns the
++ * appropriate error code.
++ */
++static int ioctl_s_power(struct v4l2_int_device *s, int on)
++{
++ struct sensor_data *sensor = s->priv;
++
++ if (on && !sensor->on) {
++ if (io_regulator)
++ if (regulator_enable(io_regulator) != 0)
++ return -EIO;
++ if (core_regulator)
++ if (regulator_enable(core_regulator) != 0)
++ return -EIO;
++ if (gpo_regulator)
++ if (regulator_enable(gpo_regulator) != 0)
++ return -EIO;
++ if (analog_regulator)
++ if (regulator_enable(analog_regulator) != 0)
++ return -EIO;
++ /* Make sure power on */
++ ov5642_standby(0);
++ } else if (!on && sensor->on) {
++ if (analog_regulator)
++ regulator_disable(analog_regulator);
++ if (core_regulator)
++ regulator_disable(core_regulator);
++ if (io_regulator)
++ regulator_disable(io_regulator);
++ if (gpo_regulator)
++ regulator_disable(gpo_regulator);
++
++ ov5642_standby(1);
++ }
++
++ sensor->on = on;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_parm - V4L2 sensor interface handler for VIDIOC_G_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_G_PARM ioctl structure
++ *
++ * Returns the sensor's video CAPTURE parameters.
++ */
++static int ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor_data *sensor = s->priv;
++ struct v4l2_captureparm *cparm = &a->parm.capture;
++ int ret = 0;
++
++ switch (a->type) {
++ /* This is the only case currently handled. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ memset(a, 0, sizeof(*a));
++ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ cparm->capability = sensor->streamcap.capability;
++ cparm->timeperframe = sensor->streamcap.timeperframe;
++ cparm->capturemode = sensor->streamcap.capturemode;
++ ret = 0;
++ break;
++
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ ret = -EINVAL;
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_parm - V4L2 sensor interface handler for VIDIOC_S_PARM ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @a: pointer to standard V4L2 VIDIOC_S_PARM ioctl structure
++ *
++ * Configures the sensor to use the input parameters, if possible. If
++ * not possible, reverts to the old parameters and returns the
++ * appropriate error code.
++ */
++static int ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a)
++{
++ struct sensor_data *sensor = s->priv;
++ struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe;
++ u32 tgt_fps, old_fps; /* target frames per secound */
++ enum ov5642_frame_rate new_frame_rate, old_frame_rate;
++ int ret = 0;
++
++ /* Make sure power on */
++ ov5642_standby(0);
++
++ switch (a->type) {
++ /* This is the only case currently handled. */
++ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
++ /* Check that the new frame rate is allowed. */
++ if ((timeperframe->numerator == 0) ||
++ (timeperframe->denominator == 0)) {
++ timeperframe->denominator = DEFAULT_FPS;
++ timeperframe->numerator = 1;
++ }
++
++ tgt_fps = timeperframe->denominator /
++ timeperframe->numerator;
++
++ if (tgt_fps > MAX_FPS) {
++ timeperframe->denominator = MAX_FPS;
++ timeperframe->numerator = 1;
++ } else if (tgt_fps < MIN_FPS) {
++ timeperframe->denominator = MIN_FPS;
++ timeperframe->numerator = 1;
++ }
++
++ /* Actual frame rate we use */
++ tgt_fps = timeperframe->denominator /
++ timeperframe->numerator;
++
++ if (tgt_fps == 15)
++ new_frame_rate = ov5642_15_fps;
++ else if (tgt_fps == 30)
++ new_frame_rate = ov5642_30_fps;
++ else {
++ pr_err(" The camera frame rate is not supported!\n");
++ return -EINVAL;
++ }
++
++ if (sensor->streamcap.timeperframe.numerator != 0)
++ old_fps = sensor->streamcap.timeperframe.denominator /
++ sensor->streamcap.timeperframe.numerator;
++ else
++ old_fps = 30;
++
++ if (old_fps == 15)
++ old_frame_rate = ov5642_15_fps;
++ else if (old_fps == 30)
++ old_frame_rate = ov5642_30_fps;
++ else {
++ pr_warning(" No valid frame rate set!\n");
++ old_frame_rate = ov5642_30_fps;
++ }
++
++ ret = ov5642_change_mode(new_frame_rate, old_frame_rate,
++ a->parm.capture.capturemode,
++ sensor->streamcap.capturemode);
++ if (ret < 0)
++ return ret;
++
++ sensor->streamcap.timeperframe = *timeperframe;
++ sensor->streamcap.capturemode =
++ (u32)a->parm.capture.capturemode;
++ break;
++
++ /* These are all the possible cases. */
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++ case V4L2_BUF_TYPE_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_VBI_OUTPUT:
++ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
++ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
++ pr_debug(" type is not " \
++ "V4L2_BUF_TYPE_VIDEO_CAPTURE but %d\n",
++ a->type);
++ ret = -EINVAL;
++ break;
++
++ default:
++ pr_debug(" type is unknown - %d\n", a->type);
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_g_fmt_cap - V4L2 sensor interface handler for ioctl_g_fmt_cap
++ * @s: pointer to standard V4L2 device structure
++ * @f: pointer to standard V4L2 v4l2_format structure
++ *
++ * Returns the sensor's current pixel format in the v4l2_format
++ * parameter.
++ */
++static int ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f)
++{
++ struct sensor_data *sensor = s->priv;
++
++ f->fmt.pix = sensor->pix;
++
++ return 0;
++}
++
++/*!
++ * ioctl_g_ctrl - V4L2 sensor interface handler for VIDIOC_G_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_G_CTRL ioctl structure
++ *
++ * If the requested control is supported, returns the control's current
++ * value from the video_control[] array. Otherwise, returns -EINVAL
++ * if the control is not supported.
++ */
++static int ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int ret = 0;
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ vc->value = ov5642_data.brightness;
++ break;
++ case V4L2_CID_HUE:
++ vc->value = ov5642_data.hue;
++ break;
++ case V4L2_CID_CONTRAST:
++ vc->value = ov5642_data.contrast;
++ break;
++ case V4L2_CID_SATURATION:
++ vc->value = ov5642_data.saturation;
++ break;
++ case V4L2_CID_RED_BALANCE:
++ vc->value = ov5642_data.red;
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ vc->value = ov5642_data.blue;
++ break;
++ case V4L2_CID_EXPOSURE:
++ vc->value = ov5642_data.ae_mode;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++/*!
++ * ioctl_s_ctrl - V4L2 sensor interface handler for VIDIOC_S_CTRL ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @vc: standard V4L2 VIDIOC_S_CTRL ioctl structure
++ *
++ * If the requested control is supported, sets the control's current
++ * value in HW (and updates the video_control[] array). Otherwise,
++ * returns -EINVAL if the control is not supported.
++ */
++static int ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc)
++{
++ int retval = 0;
++ struct sensor_data *sensor = s->priv;
++ __u32 captureMode = sensor->streamcap.capturemode;
++ struct reg_value *rot_mode = NULL;
++
++ pr_debug("In ov5642:ioctl_s_ctrl %d\n",
++ vc->id);
++
++ switch (vc->id) {
++ case V4L2_CID_BRIGHTNESS:
++ break;
++ case V4L2_CID_CONTRAST:
++ break;
++ case V4L2_CID_SATURATION:
++ break;
++ case V4L2_CID_HUE:
++ break;
++ case V4L2_CID_AUTO_WHITE_BALANCE:
++ break;
++ case V4L2_CID_DO_WHITE_BALANCE:
++ break;
++ case V4L2_CID_RED_BALANCE:
++ break;
++ case V4L2_CID_BLUE_BALANCE:
++ break;
++ case V4L2_CID_GAMMA:
++ break;
++ case V4L2_CID_EXPOSURE:
++ break;
++ case V4L2_CID_AUTOGAIN:
++ break;
++ case V4L2_CID_GAIN:
++ break;
++ case V4L2_CID_HFLIP:
++ break;
++ case V4L2_CID_VFLIP:
++ break;
++ case V4L2_CID_MXC_ROT:
++ case V4L2_CID_MXC_VF_ROT:
++ switch (vc->value) {
++ case V4L2_MXC_ROTATE_NONE:
++ if (captureMode == ov5642_mode_QSXGA_2592_1944)
++ rot_mode = ov5642_rot_none_FULL;
++ else
++ rot_mode = ov5642_rot_none_VGA;
++
++ if (ov5642_set_rot_mode(rot_mode))
++ retval = -EPERM;
++ break;
++ case V4L2_MXC_ROTATE_VERT_FLIP:
++ if (captureMode == ov5642_mode_QSXGA_2592_1944)
++ rot_mode = ov5642_rot_vert_flip_FULL;
++ else
++ rot_mode = ov5642_rot_vert_flip_VGA ;
++
++ if (ov5642_set_rot_mode(rot_mode))
++ retval = -EPERM;
++ break;
++ case V4L2_MXC_ROTATE_HORIZ_FLIP:
++ if (captureMode == ov5642_mode_QSXGA_2592_1944)
++ rot_mode = ov5642_rot_horiz_flip_FULL;
++ else
++ rot_mode = ov5642_rot_horiz_flip_VGA;
++
++ if (ov5642_set_rot_mode(rot_mode))
++ retval = -EPERM;
++ break;
++ case V4L2_MXC_ROTATE_180:
++ if (captureMode == ov5642_mode_QSXGA_2592_1944)
++ rot_mode = ov5642_rot_180_FULL;
++ else
++ rot_mode = ov5642_rot_180_VGA;
++
++ if (ov5642_set_rot_mode(rot_mode))
++ retval = -EPERM;
++ break;
++ default:
++ retval = -EPERM;
++ break;
++ }
++ break;
++ default:
++ retval = -EPERM;
++ break;
++ }
++
++ return retval;
++}
++
++/*!
++ * ioctl_enum_framesizes - V4L2 sensor interface handler for
++ * VIDIOC_ENUM_FRAMESIZES ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @fsize: standard V4L2 VIDIOC_ENUM_FRAMESIZES ioctl structure
++ *
++ * Return 0 if successful, otherwise -EINVAL.
++ */
++static int ioctl_enum_framesizes(struct v4l2_int_device *s,
++ struct v4l2_frmsizeenum *fsize)
++{
++ if (fsize->index > ov5642_mode_MAX)
++ return -EINVAL;
++
++ fsize->pixel_format = ov5642_data.pix.pixelformat;
++ fsize->discrete.width =
++ max(ov5642_mode_info_data[0][fsize->index].width,
++ ov5642_mode_info_data[1][fsize->index].width);
++ fsize->discrete.height =
++ max(ov5642_mode_info_data[0][fsize->index].height,
++ ov5642_mode_info_data[1][fsize->index].height);
++ return 0;
++}
++
++/*!
++ * ioctl_enum_frameintervals - V4L2 sensor interface handler for
++ * VIDIOC_ENUM_FRAMEINTERVALS ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @fival: standard V4L2 VIDIOC_ENUM_FRAMEINTERVALS ioctl structure
++ *
++ * Return 0 if successful, otherwise -EINVAL.
++ */
++static int ioctl_enum_frameintervals(struct v4l2_int_device *s,
++ struct v4l2_frmivalenum *fival)
++{
++ int i, j, count;
++
++ if (fival->index < 0 || fival->index > ov5642_mode_MAX)
++ return -EINVAL;
++
++ if (fival->pixel_format == 0 || fival->width == 0 ||
++ fival->height == 0) {
++ pr_warning("Please assign pixelformat, width and height.\n");
++ return -EINVAL;
++ }
++
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++
++ count = 0;
++ for (i = 0; i < ARRAY_SIZE(ov5642_mode_info_data); i++) {
++ for (j = 0; j < (ov5642_mode_MAX + 1); j++) {
++ if (fival->pixel_format == ov5642_data.pix.pixelformat
++ && fival->width == ov5642_mode_info_data[i][j].width
++ && fival->height == ov5642_mode_info_data[i][j].height
++ && ov5642_mode_info_data[i][j].init_data_ptr != NULL) {
++ count++;
++ }
++ if (fival->index == (count - 1)) {
++ fival->discrete.denominator =
++ ov5642_framerates[i];
++ return 0;
++ }
++ }
++ }
++
++ return -EINVAL;
++}
++
++/*!
++ * ioctl_g_chip_ident - V4L2 sensor interface handler for
++ * VIDIOC_DBG_G_CHIP_IDENT ioctl
++ * @s: pointer to standard V4L2 device structure
++ * @id: pointer to int
++ *
++ * Return 0.
++ */
++static int ioctl_g_chip_ident(struct v4l2_int_device *s, int *id)
++{
++ ((struct v4l2_dbg_chip_ident *)id)->match.type =
++ V4L2_CHIP_MATCH_I2C_DRIVER;
++ strcpy(((struct v4l2_dbg_chip_ident *)id)->match.name, "ov5642_camera");
++
++ return 0;
++}
++
++/*!
++ * ioctl_init - V4L2 sensor interface handler for VIDIOC_INT_INIT
++ * @s: pointer to standard V4L2 device structure
++ */
++static int ioctl_init(struct v4l2_int_device *s)
++{
++
++ return 0;
++}
++
++/*!
++ * ioctl_enum_fmt_cap - V4L2 sensor interface handler for VIDIOC_ENUM_FMT
++ * @s: pointer to standard V4L2 device structure
++ * @fmt: pointer to standard V4L2 fmt description structure
++ *
++ * Return 0.
++ */
++static int ioctl_enum_fmt_cap(struct v4l2_int_device *s,
++ struct v4l2_fmtdesc *fmt)
++{
++ if (fmt->index > 0) /* only 1 pixelformat support so far */
++ return -EINVAL;
++
++ fmt->pixelformat = ov5642_data.pix.pixelformat;
++
++ return 0;
++}
++
++/*!
++ * ioctl_dev_init - V4L2 sensor interface handler for vidioc_int_dev_init_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Initialise the device when slave attaches to the master.
++ */
++static int ioctl_dev_init(struct v4l2_int_device *s)
++{
++ struct reg_value *pModeSetting = NULL;
++ s32 i = 0;
++ s32 iModeSettingArySize = 0;
++ register u32 Delay_ms = 0;
++ register u16 RegAddr = 0;
++ register u8 Mask = 0;
++ register u8 Val = 0;
++ u8 RegVal = 0;
++ int retval = 0;
++
++ struct sensor_data *sensor = s->priv;
++ u32 tgt_xclk; /* target xclk */
++ u32 tgt_fps; /* target frames per secound */
++ enum ov5642_frame_rate frame_rate;
++
++ ov5642_data.on = true;
++
++ /* mclk */
++ tgt_xclk = ov5642_data.mclk;
++ tgt_xclk = min(tgt_xclk, (u32)OV5642_XCLK_MAX);
++ tgt_xclk = max(tgt_xclk, (u32)OV5642_XCLK_MIN);
++ ov5642_data.mclk = tgt_xclk;
++
++ pr_debug(" Setting mclk to %d MHz\n", tgt_xclk / 1000000);
++
++ /* Default camera frame rate is set in probe */
++ tgt_fps = sensor->streamcap.timeperframe.denominator /
++ sensor->streamcap.timeperframe.numerator;
++
++ if (tgt_fps == 15)
++ frame_rate = ov5642_15_fps;
++ else if (tgt_fps == 30)
++ frame_rate = ov5642_30_fps;
++ else
++ return -EINVAL; /* Only support 15fps or 30fps now. */
++
++ pModeSetting = ov5642_initial_setting;
++ iModeSettingArySize = ARRAY_SIZE(ov5642_initial_setting);
++
++ for (i = 0; i < iModeSettingArySize; ++i, ++pModeSetting) {
++ Delay_ms = pModeSetting->u32Delay_ms;
++ RegAddr = pModeSetting->u16RegAddr;
++ Val = pModeSetting->u8Val;
++ Mask = pModeSetting->u8Mask;
++ if (Mask) {
++ retval = ov5642_read_reg(RegAddr, &RegVal);
++ if (retval < 0)
++ goto err;
++
++ RegVal &= ~(u8)Mask;
++ Val &= Mask;
++ Val |= RegVal;
++ }
++
++ retval = ov5642_write_reg(RegAddr, Val);
++ if (retval < 0)
++ goto err;
++
++ if (Delay_ms)
++ msleep(Delay_ms);
++ }
++err:
++ return retval;
++}
++
++/*!
++ * ioctl_dev_exit - V4L2 sensor interface handler for vidioc_int_dev_exit_num
++ * @s: pointer to standard V4L2 device structure
++ *
++ * Delinitialise the device when slave detaches to the master.
++ */
++static int ioctl_dev_exit(struct v4l2_int_device *s)
++{
++ return 0;
++}
++
++/*!
++ * This structure defines all the ioctls for this module and links them to the
++ * enumeration.
++ */
++static struct v4l2_int_ioctl_desc ov5642_ioctl_desc[] = {
++ { vidioc_int_dev_init_num,
++ (v4l2_int_ioctl_func *)ioctl_dev_init },
++ { vidioc_int_dev_exit_num, ioctl_dev_exit},
++ { vidioc_int_s_power_num,
++ (v4l2_int_ioctl_func *)ioctl_s_power },
++ { vidioc_int_g_ifparm_num,
++ (v4l2_int_ioctl_func *)ioctl_g_ifparm },
++/* { vidioc_int_g_needs_reset_num,
++ (v4l2_int_ioctl_func *)ioctl_g_needs_reset }, */
++/* { vidioc_int_reset_num,
++ (v4l2_int_ioctl_func *)ioctl_reset }, */
++ { vidioc_int_init_num,
++ (v4l2_int_ioctl_func *)ioctl_init },
++ { vidioc_int_enum_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_fmt_cap },
++/* { vidioc_int_try_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_try_fmt_cap }, */
++ { vidioc_int_g_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_g_fmt_cap },
++/* { vidioc_int_s_fmt_cap_num,
++ (v4l2_int_ioctl_func *)ioctl_s_fmt_cap }, */
++ { vidioc_int_g_parm_num,
++ (v4l2_int_ioctl_func *)ioctl_g_parm },
++ { vidioc_int_s_parm_num,
++ (v4l2_int_ioctl_func *)ioctl_s_parm },
++/* { vidioc_int_queryctrl_num,
++ (v4l2_int_ioctl_func *)ioctl_queryctrl }, */
++ { vidioc_int_g_ctrl_num,
++ (v4l2_int_ioctl_func *)ioctl_g_ctrl },
++ { vidioc_int_s_ctrl_num,
++ (v4l2_int_ioctl_func *)ioctl_s_ctrl },
++ { vidioc_int_enum_framesizes_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_framesizes },
++ { vidioc_int_enum_frameintervals_num,
++ (v4l2_int_ioctl_func *)ioctl_enum_frameintervals },
++ { vidioc_int_g_chip_ident_num,
++ (v4l2_int_ioctl_func *)ioctl_g_chip_ident },
++};
++
++static struct v4l2_int_slave ov5642_slave = {
++ .ioctls = ov5642_ioctl_desc,
++ .num_ioctls = ARRAY_SIZE(ov5642_ioctl_desc),
++};
++
++static struct v4l2_int_device ov5642_int_device = {
++ .module = THIS_MODULE,
++ .name = "ov5642",
++ .type = v4l2_int_type_slave,
++ .u = {
++ .slave = &ov5642_slave,
++ },
++};
++
++/*!
++ * ov5642 I2C probe function
++ *
++ * @param adapter struct i2c_adapter *
++ * @return Error code indicating success or failure
++ */
++static int ov5642_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct pinctrl *pinctrl;
++ struct device *dev = &client->dev;
++ int retval;
++ u8 chip_id_high, chip_id_low;
++
++ /* ov5642 pinctrl */
++ pinctrl = devm_pinctrl_get_select_default(dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(dev, "ov5642 setup pinctrl failed!");
++ return PTR_ERR(pinctrl);
++ }
++
++ /* request power down pin */
++ pwn_gpio = of_get_named_gpio(dev->of_node, "pwn-gpios", 0);
++ if (!gpio_is_valid(pwn_gpio)) {
++ dev_warn(dev, "no sensor pwdn pin available");
++ return -EINVAL;
++ }
++ retval = devm_gpio_request_one(dev, pwn_gpio, GPIOF_OUT_INIT_HIGH,
++ "ov5642_pwdn");
++ if (retval < 0)
++ return retval;
++
++ /* request reset pin */
++ rst_gpio = of_get_named_gpio(dev->of_node, "rst-gpios", 0);
++ if (!gpio_is_valid(rst_gpio)) {
++ dev_warn(dev, "no sensor reset pin available");
++ return -EINVAL;
++ }
++ retval = devm_gpio_request_one(dev, rst_gpio, GPIOF_OUT_INIT_HIGH,
++ "ov5642_reset");
++ if (retval < 0)
++ return retval;
++
++ /* Set initial values for the sensor struct. */
++ memset(&ov5642_data, 0, sizeof(ov5642_data));
++ ov5642_data.sensor_clk = devm_clk_get(dev, "csi_mclk");
++ if (IS_ERR(ov5642_data.sensor_clk)) {
++ /* assuming clock enabled by default */
++ ov5642_data.sensor_clk = NULL;
++ dev_err(dev, "clock-frequency missing or invalid\n");
++ return PTR_ERR(ov5642_data.sensor_clk);
++ }
++
++ retval = of_property_read_u32(dev->of_node, "mclk",
++ (u32 *) &(ov5642_data.mclk));
++ if (retval) {
++ dev_err(dev, "mclk missing or invalid\n");
++ return retval;
++ }
++
++ retval = of_property_read_u32(dev->of_node, "mclk_source",
++ (u32 *) &(ov5642_data.mclk_source));
++ if (retval) {
++ dev_err(dev, "mclk_source missing or invalid\n");
++ return retval;
++ }
++
++ retval = of_property_read_u32(dev->of_node, "csi_id",
++ &(ov5642_data.csi));
++ if (retval) {
++ dev_err(dev, "csi_id missing or invalid\n");
++ return retval;
++ }
++
++ clk_prepare_enable(ov5642_data.sensor_clk);
++
++ ov5642_data.io_init = ov5642_reset;
++ ov5642_data.i2c_client = client;
++ ov5642_data.pix.pixelformat = V4L2_PIX_FMT_YUYV;
++ ov5642_data.pix.width = 640;
++ ov5642_data.pix.height = 480;
++ ov5642_data.streamcap.capability = V4L2_MODE_HIGHQUALITY |
++ V4L2_CAP_TIMEPERFRAME;
++ ov5642_data.streamcap.capturemode = 0;
++ ov5642_data.streamcap.timeperframe.denominator = DEFAULT_FPS;
++ ov5642_data.streamcap.timeperframe.numerator = 1;
++
++ ov5642_power_on(&client->dev);
++
++ ov5642_reset();
++
++ ov5642_standby(0);
++
++ retval = ov5642_read_reg(OV5642_CHIP_ID_HIGH_BYTE, &chip_id_high);
++ if (retval < 0 || chip_id_high != 0x56) {
++ pr_warning("camera ov5642 is not found\n");
++ clk_disable_unprepare(ov5642_data.sensor_clk);
++ return -ENODEV;
++ }
++ retval = ov5642_read_reg(OV5642_CHIP_ID_LOW_BYTE, &chip_id_low);
++ if (retval < 0 || chip_id_low != 0x42) {
++ pr_warning("camera ov5642 is not found\n");
++ clk_disable_unprepare(ov5642_data.sensor_clk);
++ return -ENODEV;
++ }
++
++ ov5642_standby(1);
++
++ ov5642_int_device.priv = &ov5642_data;
++ retval = v4l2_int_device_register(&ov5642_int_device);
++
++ clk_disable_unprepare(ov5642_data.sensor_clk);
++
++ pr_info("camera ov5642 is found\n");
++ return retval;
++}
++
++/*!
++ * ov5642 I2C detach function
++ *
++ * @param client struct i2c_client *
++ * @return Error code indicating success or failure
++ */
++static int ov5642_remove(struct i2c_client *client)
++{
++ v4l2_int_device_unregister(&ov5642_int_device);
++
++ if (gpo_regulator)
++ regulator_disable(gpo_regulator);
++
++ if (analog_regulator)
++ regulator_disable(analog_regulator);
++
++ if (core_regulator)
++ regulator_disable(core_regulator);
++
++ if (io_regulator)
++ regulator_disable(io_regulator);
++
++ return 0;
++}
++
++/*!
++ * ov5642 init function
++ * Called by insmod ov5642_camera.ko.
++ *
++ * @return Error code indicating success or failure
++ */
++static __init int ov5642_init(void)
++{
++ u8 err;
++
++ err = i2c_add_driver(&ov5642_i2c_driver);
++ if (err != 0)
++ pr_err("%s:driver registration failed, error=%d\n",
++ __func__, err);
++
++ return err;
++}
++
++/*!
++ * OV5642 cleanup function
++ * Called on rmmod ov5642_camera.ko
++ *
++ * @return Error code indicating success or failure
++ */
++static void __exit ov5642_clean(void)
++{
++ i2c_del_driver(&ov5642_i2c_driver);
++}
++
++module_init(ov5642_init);
++module_exit(ov5642_clean);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("OV5642 Camera Driver");
++MODULE_LICENSE("GPL");
++MODULE_VERSION("1.0");
++MODULE_ALIAS("CSI");
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/output/Kconfig linux-3.14.40/drivers/media/platform/mxc/output/Kconfig
+--- linux-3.14.40.orig/drivers/media/platform/mxc/output/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/output/Kconfig 2015-05-01 14:57:59.267427001 -0500
+@@ -0,0 +1,5 @@
++config VIDEO_MXC_IPU_OUTPUT
++ tristate "IPU v4l2 output support"
++ depends on VIDEO_MXC_OUTPUT && MXC_IPU
++ ---help---
++ This is the video4linux2 driver for IPU post processing video output.
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/output/Makefile linux-3.14.40/drivers/media/platform/mxc/output/Makefile
+--- linux-3.14.40.orig/drivers/media/platform/mxc/output/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/output/Makefile 2015-05-01 14:57:59.267427001 -0500
+@@ -0,0 +1 @@
++obj-$(CONFIG_VIDEO_MXC_IPU_OUTPUT) += mxc_vout.o
+diff -Nur linux-3.14.40.orig/drivers/media/platform/mxc/output/mxc_vout.c linux-3.14.40/drivers/media/platform/mxc/output/mxc_vout.c
+--- linux-3.14.40.orig/drivers/media/platform/mxc/output/mxc_vout.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/media/platform/mxc/output/mxc_vout.c 2015-05-01 14:57:59.271427001 -0500
+@@ -0,0 +1,2265 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/console.h>
++#include <linux/dma-mapping.h>
++#include <linux/init.h>
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <linux/mxc_v4l2.h>
++#include <linux/platform_device.h>
++#include <linux/sched.h>
++#include <linux/types.h>
++#include <linux/videodev2.h>
++#include <linux/vmalloc.h>
++
++#include <media/videobuf-dma-contig.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-ioctl.h>
++
++#define UYVY_BLACK (0x00800080)
++#define RGB_BLACK (0x0)
++#define UV_BLACK (0x80)
++#define Y_BLACK (0x0)
++
++#define MAX_FB_NUM 6
++#define FB_BUFS 3
++#define VDOA_FB_BUFS (FB_BUFS - 1)
++#define VALID_HEIGHT_1080P (1080)
++#define FRAME_HEIGHT_1080P (1088)
++#define FRAME_WIDTH_1080P (1920)
++#define CHECK_TILED_1080P_DISPLAY(vout) \
++ ((((vout)->task.input.format == IPU_PIX_FMT_TILED_NV12) || \
++ ((vout)->task.input.format == IPU_PIX_FMT_TILED_NV12F)) &&\
++ ((vout)->task.input.width == FRAME_WIDTH_1080P) && \
++ ((vout)->task.input.height == FRAME_HEIGHT_1080P) && \
++ ((vout)->task.input.crop.w == FRAME_WIDTH_1080P) && \
++ (((vout)->task.input.crop.h == FRAME_HEIGHT_1080P) || \
++ ((vout)->task.input.crop.h == VALID_HEIGHT_1080P)) && \
++ ((vout)->task.output.width == FRAME_WIDTH_1080P) && \
++ ((vout)->task.output.height == VALID_HEIGHT_1080P) && \
++ ((vout)->task.output.crop.w == FRAME_WIDTH_1080P) && \
++ ((vout)->task.output.crop.h == VALID_HEIGHT_1080P))
++#define CHECK_TILED_1080P_STREAM(vout) \
++ ((((vout)->task.input.format == IPU_PIX_FMT_TILED_NV12) || \
++ ((vout)->task.input.format == IPU_PIX_FMT_TILED_NV12F)) &&\
++ ((vout)->task.input.width == FRAME_WIDTH_1080P) && \
++ ((vout)->task.input.crop.w == FRAME_WIDTH_1080P) && \
++ ((vout)->task.input.height == FRAME_HEIGHT_1080P) && \
++ ((vout)->task.input.crop.h == FRAME_HEIGHT_1080P))
++#define IS_PLANAR_PIXEL_FORMAT(format) \
++ (format == IPU_PIX_FMT_NV12 || \
++ format == IPU_PIX_FMT_YUV420P2 || \
++ format == IPU_PIX_FMT_YUV420P || \
++ format == IPU_PIX_FMT_YVU420P || \
++ format == IPU_PIX_FMT_YUV422P || \
++ format == IPU_PIX_FMT_YVU422P || \
++ format == IPU_PIX_FMT_YUV444P)
++
++#define NSEC_PER_FRAME_30FPS (33333333)
++
++struct mxc_vout_fb {
++ char *name;
++ int ipu_id;
++ struct v4l2_rect crop_bounds;
++ unsigned int disp_fmt;
++ bool disp_support_csc;
++ bool disp_support_windows;
++};
++
++struct dma_mem {
++ void *vaddr;
++ dma_addr_t paddr;
++ size_t size;
++};
++
++struct mxc_vout_output {
++ int open_cnt;
++ struct fb_info *fbi;
++ unsigned long fb_smem_start;
++ unsigned long fb_smem_len;
++ struct video_device *vfd;
++ struct mutex mutex;
++ struct mutex task_lock;
++ enum v4l2_buf_type type;
++
++ struct videobuf_queue vbq;
++ spinlock_t vbq_lock;
++
++ struct list_head queue_list;
++ struct list_head active_list;
++
++ struct v4l2_rect crop_bounds;
++ unsigned int disp_fmt;
++ struct mxcfb_pos win_pos;
++ bool disp_support_windows;
++ bool disp_support_csc;
++
++ bool fmt_init;
++ bool release;
++ bool linear_bypass_pp;
++ bool vdoa_1080p;
++ bool tiled_bypass_pp;
++ struct v4l2_rect in_rect;
++ struct ipu_task task;
++ struct ipu_task vdoa_task;
++ struct dma_mem vdoa_work;
++ struct dma_mem vdoa_output[VDOA_FB_BUFS];
++
++ bool timer_stop;
++ struct hrtimer timer;
++ struct workqueue_struct *v4l_wq;
++ struct work_struct disp_work;
++ unsigned long frame_count;
++ unsigned long vdi_frame_cnt;
++ ktime_t start_ktime;
++
++ int ctrl_rotate;
++ int ctrl_vflip;
++ int ctrl_hflip;
++
++ dma_addr_t disp_bufs[FB_BUFS];
++
++ struct videobuf_buffer *pre1_vb;
++ struct videobuf_buffer *pre2_vb;
++};
++
++struct mxc_vout_dev {
++ struct device *dev;
++ struct v4l2_device v4l2_dev;
++ struct mxc_vout_output *out[MAX_FB_NUM];
++ int out_num;
++};
++
++/* Driver Configuration macros */
++#define VOUT_NAME "mxc_vout"
++
++/* Variables configurable through module params*/
++static int debug;
++static int vdi_rate_double;
++static int video_nr = 16;
++
++/* Module parameters */
++module_param(video_nr, int, S_IRUGO);
++MODULE_PARM_DESC(video_nr, "video device numbers");
++module_param(debug, int, 0600);
++MODULE_PARM_DESC(debug, "Debug level (0-1)");
++module_param(vdi_rate_double, int, 0600);
++MODULE_PARM_DESC(vdi_rate_double, "vdi frame rate double on/off");
++
++static const struct v4l2_fmtdesc mxc_formats[] = {
++ {
++ .description = "RGB565",
++ .pixelformat = V4L2_PIX_FMT_RGB565,
++ },
++ {
++ .description = "BGR24",
++ .pixelformat = V4L2_PIX_FMT_BGR24,
++ },
++ {
++ .description = "RGB24",
++ .pixelformat = V4L2_PIX_FMT_RGB24,
++ },
++ {
++ .description = "RGB32",
++ .pixelformat = V4L2_PIX_FMT_RGB32,
++ },
++ {
++ .description = "BGR32",
++ .pixelformat = V4L2_PIX_FMT_BGR32,
++ },
++ {
++ .description = "NV12",
++ .pixelformat = V4L2_PIX_FMT_NV12,
++ },
++ {
++ .description = "UYVY",
++ .pixelformat = V4L2_PIX_FMT_UYVY,
++ },
++ {
++ .description = "YUYV",
++ .pixelformat = V4L2_PIX_FMT_YUYV,
++ },
++ {
++ .description = "YUV422 planar",
++ .pixelformat = V4L2_PIX_FMT_YUV422P,
++ },
++ {
++ .description = "YUV444",
++ .pixelformat = V4L2_PIX_FMT_YUV444,
++ },
++ {
++ .description = "YUV420",
++ .pixelformat = V4L2_PIX_FMT_YUV420,
++ },
++ {
++ .description = "YVU420",
++ .pixelformat = V4L2_PIX_FMT_YVU420,
++ },
++ {
++ .description = "TILED NV12P",
++ .pixelformat = IPU_PIX_FMT_TILED_NV12,
++ },
++ {
++ .description = "TILED NV12F",
++ .pixelformat = IPU_PIX_FMT_TILED_NV12F,
++ },
++ {
++ .description = "YUV444 planar",
++ .pixelformat = IPU_PIX_FMT_YUV444P,
++ },
++};
++
++#define NUM_MXC_VOUT_FORMATS (ARRAY_SIZE(mxc_formats))
++
++#define DEF_INPUT_WIDTH 320
++#define DEF_INPUT_HEIGHT 240
++
++static int mxc_vidioc_streamoff(struct file *file, void *fh,
++ enum v4l2_buf_type i);
++
++static struct mxc_vout_fb g_fb_setting[MAX_FB_NUM];
++static int config_disp_output(struct mxc_vout_output *vout);
++static void release_disp_output(struct mxc_vout_output *vout);
++
++static unsigned int get_frame_size(struct mxc_vout_output *vout)
++{
++ unsigned int size;
++
++ if (IPU_PIX_FMT_TILED_NV12 == vout->task.input.format)
++ size = TILED_NV12_FRAME_SIZE(vout->task.input.width,
++ vout->task.input.height);
++ else if (IPU_PIX_FMT_TILED_NV12F == vout->task.input.format) {
++ size = TILED_NV12_FRAME_SIZE(vout->task.input.width,
++ vout->task.input.height/2);
++ size *= 2;
++ } else
++ size = vout->task.input.width * vout->task.input.height *
++ fmt_to_bpp(vout->task.input.format)/8;
++
++ return size;
++}
++
++static void free_dma_buf(struct mxc_vout_output *vout, struct dma_mem *buf)
++{
++ dma_free_coherent(vout->vbq.dev, buf->size, buf->vaddr, buf->paddr);
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "free dma size:0x%x, paddr:0x%x\n",
++ buf->size, buf->paddr);
++ memset(buf, 0, sizeof(*buf));
++}
++
++static int alloc_dma_buf(struct mxc_vout_output *vout, struct dma_mem *buf)
++{
++
++ buf->vaddr = dma_alloc_coherent(vout->vbq.dev, buf->size, &buf->paddr,
++ GFP_DMA | GFP_KERNEL);
++ if (!buf->vaddr) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "cannot get dma buf size:0x%x\n", buf->size);
++ return -ENOMEM;
++ }
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "alloc dma buf size:0x%x, paddr:0x%x\n", buf->size, buf->paddr);
++ return 0;
++}
++
++static ipu_channel_t get_ipu_channel(struct fb_info *fbi)
++{
++ ipu_channel_t ipu_ch = CHAN_NONE;
++ mm_segment_t old_fs;
++
++ if (fbi->fbops->fb_ioctl) {
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++ fbi->fbops->fb_ioctl(fbi, MXCFB_GET_FB_IPU_CHAN,
++ (unsigned long)&ipu_ch);
++ set_fs(old_fs);
++ }
++
++ return ipu_ch;
++}
++
++static unsigned int get_ipu_fmt(struct fb_info *fbi)
++{
++ mm_segment_t old_fs;
++ unsigned int fb_fmt;
++
++ if (fbi->fbops->fb_ioctl) {
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++ fbi->fbops->fb_ioctl(fbi, MXCFB_GET_DIFMT,
++ (unsigned long)&fb_fmt);
++ set_fs(old_fs);
++ }
++
++ return fb_fmt;
++}
++
++static void update_display_setting(void)
++{
++ int i;
++ struct fb_info *fbi;
++ struct v4l2_rect bg_crop_bounds[2];
++
++ for (i = 0; i < num_registered_fb; i++) {
++ fbi = registered_fb[i];
++
++ memset(&g_fb_setting[i], 0, sizeof(struct mxc_vout_fb));
++
++ if (!strncmp(fbi->fix.id, "DISP3", 5))
++ g_fb_setting[i].ipu_id = 0;
++ else
++ g_fb_setting[i].ipu_id = 1;
++
++ g_fb_setting[i].name = fbi->fix.id;
++ g_fb_setting[i].crop_bounds.left = 0;
++ g_fb_setting[i].crop_bounds.top = 0;
++ g_fb_setting[i].crop_bounds.width = fbi->var.xres;
++ g_fb_setting[i].crop_bounds.height = fbi->var.yres;
++ g_fb_setting[i].disp_fmt = get_ipu_fmt(fbi);
++
++ if (get_ipu_channel(fbi) == MEM_BG_SYNC) {
++ bg_crop_bounds[g_fb_setting[i].ipu_id] =
++ g_fb_setting[i].crop_bounds;
++ g_fb_setting[i].disp_support_csc = true;
++ } else if (get_ipu_channel(fbi) == MEM_FG_SYNC) {
++ g_fb_setting[i].disp_support_csc = true;
++ g_fb_setting[i].disp_support_windows = true;
++ }
++ }
++
++ for (i = 0; i < num_registered_fb; i++) {
++ fbi = registered_fb[i];
++
++ if (get_ipu_channel(fbi) == MEM_FG_SYNC)
++ g_fb_setting[i].crop_bounds =
++ bg_crop_bounds[g_fb_setting[i].ipu_id];
++ }
++}
++
++/* called after g_fb_setting filled by update_display_setting */
++static int update_setting_from_fbi(struct mxc_vout_output *vout,
++ struct fb_info *fbi)
++{
++ int i;
++ bool found = false;
++
++ for (i = 0; i < MAX_FB_NUM; i++) {
++ if (g_fb_setting[i].name) {
++ if (!strcmp(fbi->fix.id, g_fb_setting[i].name)) {
++ vout->crop_bounds = g_fb_setting[i].crop_bounds;
++ vout->disp_fmt = g_fb_setting[i].disp_fmt;
++ vout->disp_support_csc =
++ g_fb_setting[i].disp_support_csc;
++ vout->disp_support_windows =
++ g_fb_setting[i].disp_support_windows;
++ found = true;
++ break;
++ }
++ }
++ }
++
++ if (!found) {
++ v4l2_err(vout->vfd->v4l2_dev, "can not find output\n");
++ return -EINVAL;
++ }
++ strlcpy(vout->vfd->name, fbi->fix.id, sizeof(vout->vfd->name));
++
++ memset(&vout->task, 0, sizeof(struct ipu_task));
++
++ vout->task.input.width = DEF_INPUT_WIDTH;
++ vout->task.input.height = DEF_INPUT_HEIGHT;
++ vout->task.input.crop.pos.x = 0;
++ vout->task.input.crop.pos.y = 0;
++ vout->task.input.crop.w = DEF_INPUT_WIDTH;
++ vout->task.input.crop.h = DEF_INPUT_HEIGHT;
++
++ vout->task.output.width = vout->crop_bounds.width;
++ vout->task.output.height = vout->crop_bounds.height;
++ vout->task.output.crop.pos.x = 0;
++ vout->task.output.crop.pos.y = 0;
++ vout->task.output.crop.w = vout->crop_bounds.width;
++ vout->task.output.crop.h = vout->crop_bounds.height;
++ if (colorspaceofpixel(vout->disp_fmt) == YUV_CS)
++ vout->task.output.format = IPU_PIX_FMT_UYVY;
++ else
++ vout->task.output.format = IPU_PIX_FMT_RGB565;
++
++ return 0;
++}
++
++static inline unsigned long get_jiffies(struct timeval *t)
++{
++ struct timeval cur;
++
++ if (t->tv_usec >= 1000000) {
++ t->tv_sec += t->tv_usec / 1000000;
++ t->tv_usec = t->tv_usec % 1000000;
++ }
++
++ do_gettimeofday(&cur);
++ if ((t->tv_sec < cur.tv_sec)
++ || ((t->tv_sec == cur.tv_sec) && (t->tv_usec < cur.tv_usec)))
++ return jiffies;
++
++ if (t->tv_usec < cur.tv_usec) {
++ cur.tv_sec = t->tv_sec - cur.tv_sec - 1;
++ cur.tv_usec = t->tv_usec + 1000000 - cur.tv_usec;
++ } else {
++ cur.tv_sec = t->tv_sec - cur.tv_sec;
++ cur.tv_usec = t->tv_usec - cur.tv_usec;
++ }
++
++ return jiffies + timeval_to_jiffies(&cur);
++}
++
++static bool deinterlace_3_field(struct mxc_vout_output *vout)
++{
++ return (vout->task.input.deinterlace.enable &&
++ (vout->task.input.deinterlace.motion != HIGH_MOTION));
++}
++
++static int set_field_fmt(struct mxc_vout_output *vout, enum v4l2_field field)
++{
++ struct ipu_deinterlace *deinterlace = &vout->task.input.deinterlace;
++
++ switch (field) {
++ /* Images are in progressive format, not interlaced */
++ case V4L2_FIELD_NONE:
++ case V4L2_FIELD_ANY:
++ deinterlace->enable = false;
++ deinterlace->field_fmt = 0;
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev, "Progressive frame.\n");
++ break;
++ case V4L2_FIELD_INTERLACED_TB:
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "Enable deinterlace TB.\n");
++ deinterlace->enable = true;
++ deinterlace->field_fmt = IPU_DEINTERLACE_FIELD_TOP;
++ break;
++ case V4L2_FIELD_INTERLACED_BT:
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "Enable deinterlace BT.\n");
++ deinterlace->enable = true;
++ deinterlace->field_fmt = IPU_DEINTERLACE_FIELD_BOTTOM;
++ break;
++ default:
++ v4l2_err(vout->vfd->v4l2_dev,
++ "field format:%d not supported yet!\n", field);
++ return -EINVAL;
++ }
++
++ if (IPU_PIX_FMT_TILED_NV12F == vout->task.input.format) {
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "tiled fmt enable deinterlace.\n");
++ deinterlace->enable = true;
++ }
++
++ if (deinterlace->enable && vdi_rate_double)
++ deinterlace->field_fmt |= IPU_DEINTERLACE_RATE_EN;
++
++ return 0;
++}
++
++static bool is_pp_bypass(struct mxc_vout_output *vout)
++{
++ if ((IPU_PIX_FMT_TILED_NV12 == vout->task.input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == vout->task.input.format))
++ return false;
++ if ((vout->task.input.width == vout->task.output.width) &&
++ (vout->task.input.height == vout->task.output.height) &&
++ (vout->task.input.crop.w == vout->task.output.crop.w) &&
++ (vout->task.input.crop.h == vout->task.output.crop.h) &&
++ (vout->task.output.rotate < IPU_ROTATE_HORIZ_FLIP) &&
++ !vout->task.input.deinterlace.enable) {
++ if (vout->disp_support_csc)
++ return true;
++ else if (!need_csc(vout->task.input.format, vout->disp_fmt))
++ return true;
++ /*
++ * input crop show to full output which can show based on
++ * xres_virtual/yres_virtual
++ */
++ } else if ((vout->task.input.crop.w == vout->task.output.crop.w) &&
++ (vout->task.output.crop.w == vout->task.output.width) &&
++ (vout->task.input.crop.h == vout->task.output.crop.h) &&
++ (vout->task.output.crop.h ==
++ vout->task.output.height) &&
++ (vout->task.output.rotate < IPU_ROTATE_HORIZ_FLIP) &&
++ !vout->task.input.deinterlace.enable) {
++ if (vout->disp_support_csc)
++ return true;
++ else if (!need_csc(vout->task.input.format, vout->disp_fmt))
++ return true;
++ }
++ return false;
++}
++
++static void setup_buf_timer(struct mxc_vout_output *vout,
++ struct videobuf_buffer *vb)
++{
++ ktime_t expiry_time, now;
++
++ /* if timestamp is 0, then default to 30fps */
++ if ((vb->ts.tv_sec == 0) && (vb->ts.tv_usec == 0))
++ expiry_time = ktime_add_ns(vout->start_ktime,
++ NSEC_PER_FRAME_30FPS * vout->frame_count);
++ else
++ expiry_time = timeval_to_ktime(vb->ts);
++
++ now = hrtimer_cb_get_time(&vout->timer);
++ if ((now.tv64 > expiry_time.tv64)) {
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "warning: timer timeout already expired.\n");
++ expiry_time = now;
++ }
++
++ hrtimer_start(&vout->timer, expiry_time, HRTIMER_MODE_ABS);
++
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev, "timer handler next "
++ "schedule: %lldnsecs\n", expiry_time.tv64);
++}
++
++static int show_buf(struct mxc_vout_output *vout, int idx,
++ struct ipu_pos *ipos)
++{
++ struct fb_info *fbi = vout->fbi;
++ struct fb_var_screeninfo var;
++ int ret;
++ u32 fb_base = 0;
++
++ memcpy(&var, &fbi->var, sizeof(var));
++
++ if (vout->linear_bypass_pp || vout->tiled_bypass_pp) {
++ /*
++ * crack fb base
++ * NOTE: should not do other fb operation during v4l2
++ */
++ console_lock();
++ fb_base = fbi->fix.smem_start;
++ fbi->fix.smem_start = vout->task.output.paddr;
++ fbi->var.yoffset = ipos->y + 1;
++ var.xoffset = ipos->x;
++ var.yoffset = ipos->y;
++ var.vmode |= FB_VMODE_YWRAP;
++ ret = fb_pan_display(fbi, &var);
++ fbi->fix.smem_start = fb_base;
++ console_unlock();
++ } else {
++ console_lock();
++ var.yoffset = idx * fbi->var.yres;
++ var.vmode &= ~FB_VMODE_YWRAP;
++ ret = fb_pan_display(fbi, &var);
++ console_unlock();
++ }
++
++ return ret;
++}
++
++static void disp_work_func(struct work_struct *work)
++{
++ struct mxc_vout_output *vout =
++ container_of(work, struct mxc_vout_output, disp_work);
++ struct videobuf_queue *q = &vout->vbq;
++ struct videobuf_buffer *vb, *vb_next = NULL;
++ unsigned long flags = 0;
++ struct ipu_pos ipos;
++ int ret = 0;
++ u32 in_fmt = 0;
++ u32 vdi_cnt = 0;
++ u32 vdi_frame;
++ u32 index = 0;
++ u32 ocrop_h = 0;
++ u32 o_height = 0;
++ u32 tiled_interlaced = 0;
++ bool tiled_fmt = false;
++
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev, "disp work begin one frame\n");
++
++ spin_lock_irqsave(q->irqlock, flags);
++
++ if (list_empty(&vout->active_list)) {
++ v4l2_warn(vout->vfd->v4l2_dev,
++ "no entry in active_list, should not be here\n");
++ spin_unlock_irqrestore(q->irqlock, flags);
++ return;
++ }
++
++ vb = list_first_entry(&vout->active_list,
++ struct videobuf_buffer, queue);
++ ret = set_field_fmt(vout, vb->field);
++ if (ret < 0) {
++ spin_unlock_irqrestore(q->irqlock, flags);
++ return;
++ }
++ if (deinterlace_3_field(vout)) {
++ if (list_is_singular(&vout->active_list)) {
++ if (list_empty(&vout->queue_list)) {
++ vout->timer_stop = true;
++ spin_unlock_irqrestore(q->irqlock, flags);
++ v4l2_warn(vout->vfd->v4l2_dev,
++ "no enough entry for 3 fields "
++ "deinterlacer\n");
++ return;
++ }
++
++ /*
++ * We need to use the next vb even if it is
++ * not on the active list.
++ */
++ vb_next = list_first_entry(&vout->queue_list,
++ struct videobuf_buffer, queue);
++ } else
++ vb_next = list_first_entry(vout->active_list.next,
++ struct videobuf_buffer, queue);
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "cur field_fmt:%d, next field_fmt:%d.\n",
++ vb->field, vb_next->field);
++ /* repeat the last field during field format changing */
++ if ((vb->field != vb_next->field) &&
++ (vb_next->field != V4L2_FIELD_NONE))
++ vb_next = vb;
++ }
++
++ spin_unlock_irqrestore(q->irqlock, flags);
++
++vdi_frame_rate_double:
++ mutex_lock(&vout->task_lock);
++
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "v4l2 frame_cnt:%ld, vb_field:%d, fmt:%d\n",
++ vout->frame_count, vb->field,
++ vout->task.input.deinterlace.field_fmt);
++ if (vb->memory == V4L2_MEMORY_USERPTR)
++ vout->task.input.paddr = vb->baddr;
++ else
++ vout->task.input.paddr = videobuf_to_dma_contig(vb);
++
++ if (vout->task.input.deinterlace.field_fmt & IPU_DEINTERLACE_RATE_EN)
++ index = vout->vdi_frame_cnt % FB_BUFS;
++ else
++ index = vout->frame_count % FB_BUFS;
++ if (vout->linear_bypass_pp) {
++ vout->task.output.paddr = vout->task.input.paddr;
++ ipos.x = vout->task.input.crop.pos.x;
++ ipos.y = vout->task.input.crop.pos.y;
++ } else {
++ if (deinterlace_3_field(vout)) {
++ if (vb->memory == V4L2_MEMORY_USERPTR)
++ vout->task.input.paddr_n = vb_next->baddr;
++ else
++ vout->task.input.paddr_n =
++ videobuf_to_dma_contig(vb_next);
++ }
++ vout->task.output.paddr = vout->disp_bufs[index];
++ if (vout->vdoa_1080p) {
++ o_height = vout->task.output.height;
++ ocrop_h = vout->task.output.crop.h;
++ vout->task.output.height = FRAME_HEIGHT_1080P;
++ vout->task.output.crop.h = FRAME_HEIGHT_1080P;
++ }
++ tiled_fmt =
++ (IPU_PIX_FMT_TILED_NV12 == vout->task.input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == vout->task.input.format);
++ if (vout->tiled_bypass_pp) {
++ ipos.x = vout->task.input.crop.pos.x;
++ ipos.y = vout->task.input.crop.pos.y;
++ } else if (tiled_fmt) {
++ vout->vdoa_task.input.paddr = vout->task.input.paddr;
++ if (deinterlace_3_field(vout))
++ vout->vdoa_task.input.paddr_n =
++ vout->task.input.paddr_n;
++ vout->vdoa_task.output.paddr = vout->vdoa_work.paddr;
++ ret = ipu_queue_task(&vout->vdoa_task);
++ if (ret < 0) {
++ mutex_unlock(&vout->task_lock);
++ goto err;
++ }
++ vout->task.input.paddr = vout->vdoa_task.output.paddr;
++ in_fmt = vout->task.input.format;
++ vout->task.input.format = vout->vdoa_task.output.format;
++ if (vout->task.input.deinterlace.enable) {
++ tiled_interlaced = 1;
++ vout->task.input.deinterlace.enable = 0;
++ }
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "tiled queue task\n");
++ }
++ ret = ipu_queue_task(&vout->task);
++ if ((!vout->tiled_bypass_pp) && tiled_fmt)
++ vout->task.input.format = in_fmt;
++ if (tiled_interlaced)
++ vout->task.input.deinterlace.enable = 1;
++ if (ret < 0) {
++ mutex_unlock(&vout->task_lock);
++ goto err;
++ }
++ if (vout->vdoa_1080p) {
++ vout->task.output.crop.h = ocrop_h;
++ vout->task.output.height = o_height;
++ }
++ }
++
++ mutex_unlock(&vout->task_lock);
++
++ ret = show_buf(vout, index, &ipos);
++ if (ret < 0)
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "show buf with ret %d\n", ret);
++
++ if (vout->task.input.deinterlace.field_fmt & IPU_DEINTERLACE_RATE_EN) {
++ vdi_frame = vout->task.input.deinterlace.field_fmt
++ & IPU_DEINTERLACE_RATE_FRAME1;
++ if (vdi_frame)
++ vout->task.input.deinterlace.field_fmt &=
++ ~IPU_DEINTERLACE_RATE_FRAME1;
++ else
++ vout->task.input.deinterlace.field_fmt |=
++ IPU_DEINTERLACE_RATE_FRAME1;
++ vout->vdi_frame_cnt++;
++ vdi_cnt++;
++ if (vdi_cnt < IPU_DEINTERLACE_MAX_FRAME)
++ goto vdi_frame_rate_double;
++ }
++ spin_lock_irqsave(q->irqlock, flags);
++
++ list_del(&vb->queue);
++
++ /*
++ * The videobuf before the last one has been shown. Set
++ * VIDEOBUF_DONE state here to avoid tearing issue in ic bypass
++ * case, which makes sure a buffer being shown will not be
++ * dequeued to be overwritten. It also brings side-effect that
++ * the last 2 buffers can not be dequeued correctly, apps need
++ * to take care of it.
++ */
++ if (vout->pre2_vb) {
++ vout->pre2_vb->state = VIDEOBUF_DONE;
++ wake_up_interruptible(&vout->pre2_vb->done);
++ vout->pre2_vb = NULL;
++ }
++
++ if (vout->linear_bypass_pp) {
++ vout->pre2_vb = vout->pre1_vb;
++ vout->pre1_vb = vb;
++ } else {
++ if (vout->pre1_vb) {
++ vout->pre1_vb->state = VIDEOBUF_DONE;
++ wake_up_interruptible(&vout->pre1_vb->done);
++ vout->pre1_vb = NULL;
++ }
++ vb->state = VIDEOBUF_DONE;
++ wake_up_interruptible(&vb->done);
++ }
++
++ vout->frame_count++;
++
++ /* pick next queue buf to setup timer */
++ if (list_empty(&vout->queue_list))
++ vout->timer_stop = true;
++ else {
++ vb = list_first_entry(&vout->queue_list,
++ struct videobuf_buffer, queue);
++ setup_buf_timer(vout, vb);
++ }
++
++ spin_unlock_irqrestore(q->irqlock, flags);
++
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev, "disp work finish one frame\n");
++
++ return;
++err:
++ v4l2_err(vout->vfd->v4l2_dev, "display work fail ret = %d\n", ret);
++ vout->timer_stop = true;
++ vb->state = VIDEOBUF_ERROR;
++ return;
++}
++
++static enum hrtimer_restart mxc_vout_timer_handler(struct hrtimer *timer)
++{
++ struct mxc_vout_output *vout = container_of(timer,
++ struct mxc_vout_output,
++ timer);
++ struct videobuf_queue *q = &vout->vbq;
++ struct videobuf_buffer *vb;
++ unsigned long flags = 0;
++
++ spin_lock_irqsave(q->irqlock, flags);
++
++ /*
++ * put first queued entry into active, if previous entry did not
++ * finish, setup current entry's timer again.
++ */
++ if (list_empty(&vout->queue_list)) {
++ spin_unlock_irqrestore(q->irqlock, flags);
++ return HRTIMER_NORESTART;
++ }
++
++ /* move videobuf from queued list to active list */
++ vb = list_first_entry(&vout->queue_list,
++ struct videobuf_buffer, queue);
++ list_del(&vb->queue);
++ list_add_tail(&vb->queue, &vout->active_list);
++
++ if (queue_work(vout->v4l_wq, &vout->disp_work) == 0) {
++ v4l2_warn(vout->vfd->v4l2_dev,
++ "disp work was in queue already, queue buf again next time\n");
++ list_del(&vb->queue);
++ list_add(&vb->queue, &vout->queue_list);
++ spin_unlock_irqrestore(q->irqlock, flags);
++ return HRTIMER_NORESTART;
++ }
++
++ vb->state = VIDEOBUF_ACTIVE;
++
++ spin_unlock_irqrestore(q->irqlock, flags);
++
++ return HRTIMER_NORESTART;
++}
++
++/* Video buffer call backs */
++
++/*
++ * Buffer setup function is called by videobuf layer when REQBUF ioctl is
++ * called. This is used to setup buffers and return size and count of
++ * buffers allocated. After the call to this buffer, videobuf layer will
++ * setup buffer queue depending on the size and count of buffers
++ */
++static int mxc_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
++ unsigned int *size)
++{
++ struct mxc_vout_output *vout = q->priv_data;
++ unsigned int frame_size;
++
++ if (!vout)
++ return -EINVAL;
++
++ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)
++ return -EINVAL;
++
++ frame_size = get_frame_size(vout);
++ *size = PAGE_ALIGN(frame_size);
++
++ return 0;
++}
++
++/*
++ * This function will be called when VIDIOC_QBUF ioctl is called.
++ * It prepare buffers before give out for the display. This function
++ * converts user space virtual address into physical address if userptr memory
++ * exchange mechanism is used.
++ */
++static int mxc_vout_buffer_prepare(struct videobuf_queue *q,
++ struct videobuf_buffer *vb,
++ enum v4l2_field field)
++{
++ vb->state = VIDEOBUF_PREPARED;
++ return 0;
++}
++
++/*
++ * Buffer queue funtion will be called from the videobuf layer when _QBUF
++ * ioctl is called. It is used to enqueue buffer, which is ready to be
++ * displayed.
++ * This function is protected by q->irqlock.
++ */
++static void mxc_vout_buffer_queue(struct videobuf_queue *q,
++ struct videobuf_buffer *vb)
++{
++ struct mxc_vout_output *vout = q->priv_data;
++ struct videobuf_buffer *active_vb;
++
++ list_add_tail(&vb->queue, &vout->queue_list);
++ vb->state = VIDEOBUF_QUEUED;
++
++ if (vout->timer_stop) {
++ if (deinterlace_3_field(vout) &&
++ !list_empty(&vout->active_list)) {
++ active_vb = list_first_entry(&vout->active_list,
++ struct videobuf_buffer, queue);
++ setup_buf_timer(vout, active_vb);
++ } else {
++ setup_buf_timer(vout, vb);
++ }
++ vout->timer_stop = false;
++ }
++}
++
++/*
++ * Buffer release function is called from videobuf layer to release buffer
++ * which are already allocated
++ */
++static void mxc_vout_buffer_release(struct videobuf_queue *q,
++ struct videobuf_buffer *vb)
++{
++ vb->state = VIDEOBUF_NEEDS_INIT;
++}
++
++static int mxc_vout_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ int ret;
++ struct mxc_vout_output *vout = file->private_data;
++
++ if (!vout)
++ return -ENODEV;
++
++ ret = videobuf_mmap_mapper(&vout->vbq, vma);
++ if (ret < 0)
++ v4l2_err(vout->vfd->v4l2_dev,
++ "offset invalid [offset=0x%lx]\n",
++ (vma->vm_pgoff << PAGE_SHIFT));
++
++ return ret;
++}
++
++static int mxc_vout_release(struct file *file)
++{
++ unsigned int ret = 0;
++ struct videobuf_queue *q;
++ struct mxc_vout_output *vout = file->private_data;
++
++ if (!vout)
++ return 0;
++
++ if (--vout->open_cnt == 0) {
++ q = &vout->vbq;
++ if (q->streaming)
++ mxc_vidioc_streamoff(file, vout, vout->type);
++ else {
++ release_disp_output(vout);
++ videobuf_queue_cancel(q);
++ }
++ destroy_workqueue(vout->v4l_wq);
++ ret = videobuf_mmap_free(q);
++ }
++
++ return ret;
++}
++
++static int mxc_vout_open(struct file *file)
++{
++ struct mxc_vout_output *vout = NULL;
++ int ret = 0;
++
++ vout = video_drvdata(file);
++
++ if (vout == NULL)
++ return -ENODEV;
++
++ if (vout->open_cnt++ == 0) {
++ vout->ctrl_rotate = 0;
++ vout->ctrl_vflip = 0;
++ vout->ctrl_hflip = 0;
++ update_display_setting();
++ ret = update_setting_from_fbi(vout, vout->fbi);
++ if (ret < 0)
++ goto err;
++
++ vout->v4l_wq = create_singlethread_workqueue("v4l2q");
++ if (!vout->v4l_wq) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "Could not create work queue\n");
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ INIT_WORK(&vout->disp_work, disp_work_func);
++
++ INIT_LIST_HEAD(&vout->queue_list);
++ INIT_LIST_HEAD(&vout->active_list);
++
++ vout->fmt_init = false;
++ vout->frame_count = 0;
++ vout->vdi_frame_cnt = 0;
++
++ vout->win_pos.x = 0;
++ vout->win_pos.y = 0;
++ vout->release = true;
++ }
++
++ file->private_data = vout;
++
++err:
++ return ret;
++}
++
++/*
++ * V4L2 ioctls
++ */
++static int mxc_vidioc_querycap(struct file *file, void *fh,
++ struct v4l2_capability *cap)
++{
++ struct mxc_vout_output *vout = fh;
++
++ strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
++ strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
++ cap->bus_info[0] = '\0';
++ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
++
++ return 0;
++}
++
++static int mxc_vidioc_enum_fmt_vid_out(struct file *file, void *fh,
++ struct v4l2_fmtdesc *fmt)
++{
++ if (fmt->index >= NUM_MXC_VOUT_FORMATS)
++ return -EINVAL;
++
++ strlcpy(fmt->description, mxc_formats[fmt->index].description,
++ sizeof(fmt->description));
++ fmt->pixelformat = mxc_formats[fmt->index].pixelformat;
++
++ return 0;
++}
++
++static int mxc_vidioc_g_fmt_vid_out(struct file *file, void *fh,
++ struct v4l2_format *f)
++{
++ struct mxc_vout_output *vout = fh;
++ struct v4l2_rect rect;
++
++ f->fmt.pix.width = vout->task.input.width;
++ f->fmt.pix.height = vout->task.input.height;
++ f->fmt.pix.pixelformat = vout->task.input.format;
++ f->fmt.pix.sizeimage = get_frame_size(vout);
++
++ if (f->fmt.pix.priv) {
++ rect.left = vout->task.input.crop.pos.x;
++ rect.top = vout->task.input.crop.pos.y;
++ rect.width = vout->task.input.crop.w;
++ rect.height = vout->task.input.crop.h;
++ if (copy_to_user((void __user *)f->fmt.pix.priv,
++ &rect, sizeof(rect)))
++ return -EFAULT;
++ }
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "frame_size:0x%x, pix_fmt:0x%x\n",
++ f->fmt.pix.sizeimage,
++ vout->task.input.format);
++
++ return 0;
++}
++
++static inline int ipu_try_task(struct mxc_vout_output *vout)
++{
++ int ret;
++ struct ipu_task *task = &vout->task;
++
++again:
++ ret = ipu_check_task(task);
++ if (ret != IPU_CHECK_OK) {
++ if (ret > IPU_CHECK_ERR_MIN) {
++ if (ret == IPU_CHECK_ERR_SPLIT_INPUTW_OVER ||
++ ret == IPU_CHECK_ERR_W_DOWNSIZE_OVER) {
++ task->input.crop.w -= 8;
++ goto again;
++ }
++ if (ret == IPU_CHECK_ERR_SPLIT_INPUTH_OVER ||
++ ret == IPU_CHECK_ERR_H_DOWNSIZE_OVER) {
++ task->input.crop.h -= 8;
++ goto again;
++ }
++ if (ret == IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER) {
++ if (vout->disp_support_windows) {
++ task->output.width -= 8;
++ task->output.crop.w =
++ task->output.width;
++ } else
++ task->output.crop.w -= 8;
++ goto again;
++ }
++ if (ret == IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER) {
++ if (vout->disp_support_windows) {
++ task->output.height -= 8;
++ task->output.crop.h =
++ task->output.height;
++ } else
++ task->output.crop.h -= 8;
++ goto again;
++ }
++ ret = -EINVAL;
++ }
++ } else
++ ret = 0;
++
++ return ret;
++}
++
++static inline int vdoaipu_try_task(struct mxc_vout_output *vout)
++{
++ int ret;
++ int is_1080p_stream;
++ size_t size;
++ struct ipu_task *ipu_task = &vout->task;
++ struct ipu_crop *icrop = &ipu_task->input.crop;
++ struct ipu_task *vdoa_task = &vout->vdoa_task;
++ u32 deinterlace = 0;
++ u32 in_fmt;
++
++ if (vout->task.input.deinterlace.enable)
++ deinterlace = 1;
++
++ memset(vdoa_task, 0, sizeof(*vdoa_task));
++ vdoa_task->output.format = IPU_PIX_FMT_NV12;
++ memcpy(&vdoa_task->input, &ipu_task->input,
++ sizeof(ipu_task->input));
++ if ((icrop->w % IPU_PIX_FMT_TILED_NV12_MBALIGN) ||
++ (icrop->h % IPU_PIX_FMT_TILED_NV12_MBALIGN)) {
++ vdoa_task->input.crop.w =
++ ALIGN(icrop->w, IPU_PIX_FMT_TILED_NV12_MBALIGN);
++ vdoa_task->input.crop.h =
++ ALIGN(icrop->h, IPU_PIX_FMT_TILED_NV12_MBALIGN);
++ }
++ vdoa_task->output.width = vdoa_task->input.crop.w;
++ vdoa_task->output.height = vdoa_task->input.crop.h;
++ vdoa_task->output.crop.w = vdoa_task->input.crop.w;
++ vdoa_task->output.crop.h = vdoa_task->input.crop.h;
++
++ size = PAGE_ALIGN(vdoa_task->input.crop.w *
++ vdoa_task->input.crop.h *
++ fmt_to_bpp(vdoa_task->output.format)/8);
++ if (size > vout->vdoa_work.size) {
++ if (vout->vdoa_work.vaddr)
++ free_dma_buf(vout, &vout->vdoa_work);
++ vout->vdoa_work.size = size;
++ ret = alloc_dma_buf(vout, &vout->vdoa_work);
++ if (ret < 0)
++ return ret;
++ }
++ ret = ipu_check_task(vdoa_task);
++ if (ret != IPU_CHECK_OK)
++ return -EINVAL;
++
++ is_1080p_stream = CHECK_TILED_1080P_STREAM(vout);
++ if (is_1080p_stream)
++ ipu_task->input.crop.h = VALID_HEIGHT_1080P;
++ in_fmt = ipu_task->input.format;
++ ipu_task->input.format = vdoa_task->output.format;
++ ipu_task->input.height = vdoa_task->output.height;
++ ipu_task->input.width = vdoa_task->output.width;
++ if (deinterlace)
++ ipu_task->input.deinterlace.enable = 0;
++ ret = ipu_try_task(vout);
++ if (deinterlace)
++ ipu_task->input.deinterlace.enable = 1;
++ ipu_task->input.format = in_fmt;
++
++ return ret;
++}
++
++static int mxc_vout_try_task(struct mxc_vout_output *vout)
++{
++ int ret = 0;
++ struct ipu_output *output = &vout->task.output;
++ struct ipu_input *input = &vout->task.input;
++ struct ipu_crop *crop = &input->crop;
++ u32 o_height = 0;
++ u32 ocrop_h = 0;
++ bool tiled_fmt = false;
++ bool tiled_need_pp = false;
++
++ vout->vdoa_1080p = CHECK_TILED_1080P_DISPLAY(vout);
++ if (vout->vdoa_1080p) {
++ input->crop.h = FRAME_HEIGHT_1080P;
++ o_height = output->height;
++ ocrop_h = output->crop.h;
++ output->height = FRAME_HEIGHT_1080P;
++ output->crop.h = FRAME_HEIGHT_1080P;
++ }
++
++ if ((IPU_PIX_FMT_TILED_NV12 == input->format) ||
++ (IPU_PIX_FMT_TILED_NV12F == input->format)) {
++ if ((input->width % IPU_PIX_FMT_TILED_NV12_MBALIGN) ||
++ (input->height % IPU_PIX_FMT_TILED_NV12_MBALIGN) ||
++ (crop->pos.x % IPU_PIX_FMT_TILED_NV12_MBALIGN) ||
++ (crop->pos.y % IPU_PIX_FMT_TILED_NV12_MBALIGN)) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "ERR: tiled fmt needs 16 pixel align.\n");
++ return -EINVAL;
++ }
++ if ((crop->w % IPU_PIX_FMT_TILED_NV12_MBALIGN) ||
++ (crop->h % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ tiled_need_pp = true;
++ } else {
++ crop->w -= crop->w % 8;
++ crop->h -= crop->h % 8;
++ }
++ /* assume task.output already set by S_CROP */
++ vout->linear_bypass_pp = is_pp_bypass(vout);
++ if (vout->linear_bypass_pp) {
++ v4l2_info(vout->vfd->v4l2_dev, "Bypass IC.\n");
++ output->format = input->format;
++ } else {
++ /* if need CSC, choose IPU-DP or IPU_IC do it */
++ if (vout->disp_support_csc) {
++ if (colorspaceofpixel(input->format) == YUV_CS)
++ output->format = IPU_PIX_FMT_UYVY;
++ else
++ output->format = IPU_PIX_FMT_RGB565;
++ } else {
++ if (colorspaceofpixel(vout->disp_fmt) == YUV_CS)
++ output->format = IPU_PIX_FMT_UYVY;
++ else
++ output->format = IPU_PIX_FMT_RGB565;
++ }
++
++ vout->tiled_bypass_pp = false;
++ if ((IPU_PIX_FMT_TILED_NV12 == input->format) ||
++ (IPU_PIX_FMT_TILED_NV12F == input->format)) {
++ /* check resize/rotate/flip, or csc task */
++ if (!(tiled_need_pp ||
++ (IPU_ROTATE_NONE != output->rotate) ||
++ (input->crop.w != output->crop.w) ||
++ (input->crop.h != output->crop.h) ||
++ (!vout->disp_support_csc &&
++ (colorspaceofpixel(vout->disp_fmt) == RGB_CS)))
++ ) {
++ /* IC bypass */
++ output->format = IPU_PIX_FMT_NV12;
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "tiled bypass pp\n");
++ vout->tiled_bypass_pp = true;
++ }
++ tiled_fmt = true;
++ }
++
++ if ((!vout->tiled_bypass_pp) && tiled_fmt)
++ ret = vdoaipu_try_task(vout);
++ else
++ ret = ipu_try_task(vout);
++ }
++
++ if (vout->vdoa_1080p) {
++ output->height = o_height;
++ output->crop.h = ocrop_h;
++ }
++
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "icrop.w:%u, icrop.h:%u, iw:%u, ih:%u,"
++ "ocrop.w:%u, ocrop.h:%u, ow:%u, oh:%u\n",
++ input->crop.w, input->crop.h,
++ input->width, input->height,
++ output->crop.w, output->crop.h,
++ output->width, output->height);
++ return ret;
++}
++
++static int mxc_vout_try_format(struct mxc_vout_output *vout,
++ struct v4l2_format *f)
++{
++ int ret = 0;
++ struct v4l2_rect rect;
++
++ if ((f->fmt.pix.field != V4L2_FIELD_NONE) &&
++ (IPU_PIX_FMT_TILED_NV12 == vout->task.input.format)) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "progressive tiled fmt should used V4L2_FIELD_NONE!\n");
++ return -EINVAL;
++ }
++
++ if (f->fmt.pix.priv && copy_from_user(&rect,
++ (void __user *)f->fmt.pix.priv, sizeof(rect)))
++ return -EFAULT;
++
++ vout->task.input.width = f->fmt.pix.width;
++ vout->task.input.height = f->fmt.pix.height;
++ vout->task.input.format = f->fmt.pix.pixelformat;
++
++ ret = set_field_fmt(vout, f->fmt.pix.field);
++ if (ret < 0)
++ return ret;
++
++ if (f->fmt.pix.priv) {
++ vout->task.input.crop.pos.x = rect.left;
++ vout->task.input.crop.pos.y = rect.top;
++ vout->task.input.crop.w = rect.width;
++ vout->task.input.crop.h = rect.height;
++ } else {
++ vout->task.input.crop.pos.x = 0;
++ vout->task.input.crop.pos.y = 0;
++ vout->task.input.crop.w = f->fmt.pix.width;
++ vout->task.input.crop.h = f->fmt.pix.height;
++ }
++ memcpy(&vout->in_rect, &vout->task.input.crop, sizeof(vout->in_rect));
++
++ ret = mxc_vout_try_task(vout);
++ if (!ret) {
++ if (f->fmt.pix.priv) {
++ rect.width = vout->task.input.crop.w;
++ rect.height = vout->task.input.crop.h;
++ if (copy_to_user((void __user *)f->fmt.pix.priv,
++ &rect, sizeof(rect)))
++ ret = -EFAULT;
++ } else {
++ f->fmt.pix.width = vout->task.input.crop.w;
++ f->fmt.pix.height = vout->task.input.crop.h;
++ }
++ }
++
++ return ret;
++}
++
++static bool mxc_vout_need_fb_reconfig(struct mxc_vout_output *vout,
++ struct mxc_vout_output *pre_vout)
++{
++ if (!vout->vbq.streaming)
++ return false;
++
++ if (vout->tiled_bypass_pp)
++ return true;
++
++ if (vout->linear_bypass_pp != pre_vout->linear_bypass_pp)
++ return true;
++
++ /* cropped output resolution or format are changed */
++ if (vout->task.output.format != pre_vout->task.output.format ||
++ vout->task.output.crop.w != pre_vout->task.output.crop.w ||
++ vout->task.output.crop.h != pre_vout->task.output.crop.h)
++ return true;
++
++ /* overlay: window position or resolution are changed */
++ if (vout->disp_support_windows &&
++ (vout->win_pos.x != pre_vout->win_pos.x ||
++ vout->win_pos.y != pre_vout->win_pos.y ||
++ vout->task.output.width != pre_vout->task.output.width ||
++ vout->task.output.height != pre_vout->task.output.height))
++ return true;
++
++ /* background: cropped position is changed */
++ if (!vout->disp_support_windows &&
++ (vout->task.output.crop.pos.x !=
++ pre_vout->task.output.crop.pos.x ||
++ vout->task.output.crop.pos.y !=
++ pre_vout->task.output.crop.pos.y))
++ return true;
++
++ return false;
++}
++
++static int mxc_vidioc_s_fmt_vid_out(struct file *file, void *fh,
++ struct v4l2_format *f)
++{
++ struct mxc_vout_output *vout = fh;
++ int ret = 0;
++
++ if (vout->vbq.streaming)
++ return -EBUSY;
++
++ mutex_lock(&vout->task_lock);
++ ret = mxc_vout_try_format(vout, f);
++ if (ret >= 0)
++ vout->fmt_init = true;
++ mutex_unlock(&vout->task_lock);
++
++ return ret;
++}
++
++static int mxc_vidioc_cropcap(struct file *file, void *fh,
++ struct v4l2_cropcap *cropcap)
++{
++ struct mxc_vout_output *vout = fh;
++
++ if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
++ return -EINVAL;
++
++ cropcap->bounds = vout->crop_bounds;
++ cropcap->defrect = vout->crop_bounds;
++
++ return 0;
++}
++
++static int mxc_vidioc_g_crop(struct file *file, void *fh,
++ struct v4l2_crop *crop)
++{
++ struct mxc_vout_output *vout = fh;
++
++ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
++ return -EINVAL;
++
++ if (vout->disp_support_windows) {
++ crop->c.left = vout->win_pos.x;
++ crop->c.top = vout->win_pos.y;
++ crop->c.width = vout->task.output.width;
++ crop->c.height = vout->task.output.height;
++ } else {
++ if (vout->task.output.crop.w && vout->task.output.crop.h) {
++ crop->c.left = vout->task.output.crop.pos.x;
++ crop->c.top = vout->task.output.crop.pos.y;
++ crop->c.width = vout->task.output.crop.w;
++ crop->c.height = vout->task.output.crop.h;
++ } else {
++ crop->c.left = 0;
++ crop->c.top = 0;
++ crop->c.width = vout->task.output.width;
++ crop->c.height = vout->task.output.height;
++ }
++ }
++
++ return 0;
++}
++
++static int mxc_vidioc_s_crop(struct file *file, void *fh,
++ const struct v4l2_crop *crop)
++{
++ struct mxc_vout_output *vout = fh, *pre_vout;
++ struct v4l2_rect *b = &vout->crop_bounds;
++ struct v4l2_crop fix_up_crop;
++ int ret = 0;
++
++ memcpy(&fix_up_crop, crop, sizeof(*crop));
++
++ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
++ return -EINVAL;
++
++ if (crop->c.width < 0 || crop->c.height < 0)
++ return -EINVAL;
++
++ if (crop->c.width == 0)
++ fix_up_crop.c.width = b->width - b->left;
++ if (crop->c.height == 0)
++ fix_up_crop.c.height = b->height - b->top;
++
++ if (crop->c.top < b->top)
++ fix_up_crop.c.top = b->top;
++ if (crop->c.top >= b->top + b->height)
++ fix_up_crop.c.top = b->top + b->height - 1;
++ if (crop->c.height > b->top - crop->c.top + b->height)
++ fix_up_crop.c.height =
++ b->top - fix_up_crop.c.top + b->height;
++
++ if (crop->c.left < b->left)
++ fix_up_crop.c.left = b->left;
++ if (crop->c.left >= b->left + b->width)
++ fix_up_crop.c.left = b->left + b->width - 1;
++ if (crop->c.width > b->left - crop->c.left + b->width)
++ fix_up_crop.c.width =
++ b->left - fix_up_crop.c.left + b->width;
++
++ /* stride line limitation */
++ fix_up_crop.c.height -= fix_up_crop.c.height % 8;
++ fix_up_crop.c.width -= fix_up_crop.c.width % 8;
++ if ((fix_up_crop.c.width <= 0) || (fix_up_crop.c.height <= 0) ||
++ ((fix_up_crop.c.left + fix_up_crop.c.width) >
++ (b->left + b->width)) ||
++ ((fix_up_crop.c.top + fix_up_crop.c.height) >
++ (b->top + b->height))) {
++ v4l2_err(vout->vfd->v4l2_dev, "s_crop err: %d, %d, %d, %d",
++ fix_up_crop.c.left, fix_up_crop.c.top,
++ fix_up_crop.c.width, fix_up_crop.c.height);
++ return -EINVAL;
++ }
++
++ /* the same setting, return */
++ if (vout->disp_support_windows) {
++ if ((vout->win_pos.x == fix_up_crop.c.left) &&
++ (vout->win_pos.y == fix_up_crop.c.top) &&
++ (vout->task.output.crop.w == fix_up_crop.c.width) &&
++ (vout->task.output.crop.h == fix_up_crop.c.height))
++ return 0;
++ } else {
++ if ((vout->task.output.crop.pos.x == fix_up_crop.c.left) &&
++ (vout->task.output.crop.pos.y == fix_up_crop.c.top) &&
++ (vout->task.output.crop.w == fix_up_crop.c.width) &&
++ (vout->task.output.crop.h == fix_up_crop.c.height))
++ return 0;
++ }
++
++ pre_vout = vmalloc(sizeof(*pre_vout));
++ if (!pre_vout)
++ return -ENOMEM;
++
++ /* wait current work finish */
++ if (vout->vbq.streaming)
++ flush_workqueue(vout->v4l_wq);
++
++ mutex_lock(&vout->task_lock);
++
++ memcpy(pre_vout, vout, sizeof(*vout));
++
++ if (vout->disp_support_windows) {
++ vout->task.output.crop.pos.x = 0;
++ vout->task.output.crop.pos.y = 0;
++ vout->win_pos.x = fix_up_crop.c.left;
++ vout->win_pos.y = fix_up_crop.c.top;
++ vout->task.output.width = fix_up_crop.c.width;
++ vout->task.output.height = fix_up_crop.c.height;
++ } else {
++ vout->task.output.crop.pos.x = fix_up_crop.c.left;
++ vout->task.output.crop.pos.y = fix_up_crop.c.top;
++ }
++
++ vout->task.output.crop.w = fix_up_crop.c.width;
++ vout->task.output.crop.h = fix_up_crop.c.height;
++
++ /*
++ * must S_CROP before S_FMT, for fist time S_CROP, will not check
++ * ipu task, it will check in S_FMT, after S_FMT, S_CROP should
++ * check ipu task too.
++ */
++ if (vout->fmt_init) {
++ memcpy(&vout->task.input.crop, &vout->in_rect,
++ sizeof(vout->in_rect));
++ ret = mxc_vout_try_task(vout);
++ if (ret < 0) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "vout check task failed\n");
++ memcpy(vout, pre_vout, sizeof(*vout));
++ goto done;
++ }
++
++ if (mxc_vout_need_fb_reconfig(vout, pre_vout)) {
++ ret = config_disp_output(vout);
++ if (ret < 0)
++ v4l2_err(vout->vfd->v4l2_dev,
++ "Config display output failed\n");
++ }
++ }
++
++done:
++ vfree(pre_vout);
++ mutex_unlock(&vout->task_lock);
++
++ return ret;
++}
++
++static int mxc_vidioc_queryctrl(struct file *file, void *fh,
++ struct v4l2_queryctrl *ctrl)
++{
++ int ret = 0;
++
++ switch (ctrl->id) {
++ case V4L2_CID_ROTATE:
++ ret = v4l2_ctrl_query_fill(ctrl, 0, 270, 90, 0);
++ break;
++ case V4L2_CID_VFLIP:
++ ret = v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0);
++ break;
++ case V4L2_CID_HFLIP:
++ ret = v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0);
++ break;
++ case V4L2_CID_MXC_MOTION:
++ ret = v4l2_ctrl_query_fill(ctrl, 0, 2, 1, 0);
++ break;
++ default:
++ ctrl->name[0] = '\0';
++ ret = -EINVAL;
++ }
++ return ret;
++}
++
++static int mxc_vidioc_g_ctrl(struct file *file, void *fh,
++ struct v4l2_control *ctrl)
++{
++ int ret = 0;
++ struct mxc_vout_output *vout = fh;
++
++ switch (ctrl->id) {
++ case V4L2_CID_ROTATE:
++ ctrl->value = vout->ctrl_rotate;
++ break;
++ case V4L2_CID_VFLIP:
++ ctrl->value = vout->ctrl_vflip;
++ break;
++ case V4L2_CID_HFLIP:
++ ctrl->value = vout->ctrl_hflip;
++ break;
++ case V4L2_CID_MXC_MOTION:
++ if (vout->task.input.deinterlace.enable)
++ ctrl->value = vout->task.input.deinterlace.motion;
++ else
++ ctrl->value = 0;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ return ret;
++}
++
++static void setup_task_rotation(struct mxc_vout_output *vout)
++{
++ if (vout->ctrl_rotate == 0) {
++ if (vout->ctrl_vflip && vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_180;
++ else if (vout->ctrl_vflip)
++ vout->task.output.rotate = IPU_ROTATE_VERT_FLIP;
++ else if (vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_HORIZ_FLIP;
++ else
++ vout->task.output.rotate = IPU_ROTATE_NONE;
++ } else if (vout->ctrl_rotate == 90) {
++ if (vout->ctrl_vflip && vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_90_LEFT;
++ else if (vout->ctrl_vflip)
++ vout->task.output.rotate = IPU_ROTATE_90_RIGHT_VFLIP;
++ else if (vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_90_RIGHT_HFLIP;
++ else
++ vout->task.output.rotate = IPU_ROTATE_90_RIGHT;
++ } else if (vout->ctrl_rotate == 180) {
++ if (vout->ctrl_vflip && vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_NONE;
++ else if (vout->ctrl_vflip)
++ vout->task.output.rotate = IPU_ROTATE_HORIZ_FLIP;
++ else if (vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_VERT_FLIP;
++ else
++ vout->task.output.rotate = IPU_ROTATE_180;
++ } else if (vout->ctrl_rotate == 270) {
++ if (vout->ctrl_vflip && vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_90_RIGHT;
++ else if (vout->ctrl_vflip)
++ vout->task.output.rotate = IPU_ROTATE_90_RIGHT_HFLIP;
++ else if (vout->ctrl_hflip)
++ vout->task.output.rotate = IPU_ROTATE_90_RIGHT_VFLIP;
++ else
++ vout->task.output.rotate = IPU_ROTATE_90_LEFT;
++ }
++}
++
++static int mxc_vidioc_s_ctrl(struct file *file, void *fh,
++ struct v4l2_control *ctrl)
++{
++ int ret = 0;
++ struct mxc_vout_output *vout = fh, *pre_vout;
++
++ pre_vout = vmalloc(sizeof(*pre_vout));
++ if (!pre_vout)
++ return -ENOMEM;
++
++ /* wait current work finish */
++ if (vout->vbq.streaming)
++ flush_workqueue(vout->v4l_wq);
++
++ mutex_lock(&vout->task_lock);
++
++ memcpy(pre_vout, vout, sizeof(*vout));
++
++ switch (ctrl->id) {
++ case V4L2_CID_ROTATE:
++ {
++ vout->ctrl_rotate = (ctrl->value/90) * 90;
++ if (vout->ctrl_rotate > 270)
++ vout->ctrl_rotate = 270;
++ setup_task_rotation(vout);
++ break;
++ }
++ case V4L2_CID_VFLIP:
++ {
++ vout->ctrl_vflip = ctrl->value;
++ setup_task_rotation(vout);
++ break;
++ }
++ case V4L2_CID_HFLIP:
++ {
++ vout->ctrl_hflip = ctrl->value;
++ setup_task_rotation(vout);
++ break;
++ }
++ case V4L2_CID_MXC_MOTION:
++ {
++ vout->task.input.deinterlace.motion = ctrl->value;
++ break;
++ }
++ default:
++ ret = -EINVAL;
++ goto done;
++ }
++
++ if (vout->fmt_init) {
++ memcpy(&vout->task.input.crop, &vout->in_rect,
++ sizeof(vout->in_rect));
++ ret = mxc_vout_try_task(vout);
++ if (ret < 0) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "vout check task failed\n");
++ memcpy(vout, pre_vout, sizeof(*vout));
++ goto done;
++ }
++
++ if (mxc_vout_need_fb_reconfig(vout, pre_vout)) {
++ ret = config_disp_output(vout);
++ if (ret < 0)
++ v4l2_err(vout->vfd->v4l2_dev,
++ "Config display output failed\n");
++ }
++ }
++
++done:
++ vfree(pre_vout);
++ mutex_unlock(&vout->task_lock);
++
++ return ret;
++}
++
++static int mxc_vidioc_reqbufs(struct file *file, void *fh,
++ struct v4l2_requestbuffers *req)
++{
++ int ret = 0;
++ struct mxc_vout_output *vout = fh;
++ struct videobuf_queue *q = &vout->vbq;
++
++ if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
++ return -EINVAL;
++
++ /* should not be here after streaming, videobuf_reqbufs will control */
++ mutex_lock(&vout->task_lock);
++
++ ret = videobuf_reqbufs(q, req);
++
++ mutex_unlock(&vout->task_lock);
++ return ret;
++}
++
++static int mxc_vidioc_querybuf(struct file *file, void *fh,
++ struct v4l2_buffer *b)
++{
++ int ret;
++ struct mxc_vout_output *vout = fh;
++
++ ret = videobuf_querybuf(&vout->vbq, b);
++ if (!ret) {
++ /* return physical address */
++ struct videobuf_buffer *vb = vout->vbq.bufs[b->index];
++ if (b->flags & V4L2_BUF_FLAG_MAPPED)
++ b->m.offset = videobuf_to_dma_contig(vb);
++ }
++
++ return ret;
++}
++
++static int mxc_vidioc_qbuf(struct file *file, void *fh,
++ struct v4l2_buffer *buffer)
++{
++ struct mxc_vout_output *vout = fh;
++
++ return videobuf_qbuf(&vout->vbq, buffer);
++}
++
++static int mxc_vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
++{
++ struct mxc_vout_output *vout = fh;
++
++ if (!vout->vbq.streaming)
++ return -EINVAL;
++
++ if (file->f_flags & O_NONBLOCK)
++ return videobuf_dqbuf(&vout->vbq, (struct v4l2_buffer *)b, 1);
++ else
++ return videobuf_dqbuf(&vout->vbq, (struct v4l2_buffer *)b, 0);
++}
++
++static int set_window_position(struct mxc_vout_output *vout,
++ struct mxcfb_pos *pos)
++{
++ struct fb_info *fbi = vout->fbi;
++ mm_segment_t old_fs;
++ int ret = 0;
++
++ if (vout->disp_support_windows) {
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++ ret = fbi->fbops->fb_ioctl(fbi, MXCFB_SET_OVERLAY_POS,
++ (unsigned long)pos);
++ set_fs(old_fs);
++ }
++
++ return ret;
++}
++
++static int config_disp_output(struct mxc_vout_output *vout)
++{
++ struct dma_mem *buf = NULL;
++ struct fb_info *fbi = vout->fbi;
++ struct fb_var_screeninfo var;
++ struct mxcfb_pos pos;
++ int i, fb_num, ret;
++ u32 fb_base;
++ u32 size;
++ u32 display_buf_size;
++ u32 *pixel = NULL;
++ u32 color;
++ int j;
++
++ memcpy(&var, &fbi->var, sizeof(var));
++ fb_base = fbi->fix.smem_start;
++
++ var.xres = vout->task.output.width;
++ var.yres = vout->task.output.height;
++ if (vout->linear_bypass_pp || vout->tiled_bypass_pp) {
++ fb_num = 1;
++ /* input crop */
++ if (vout->task.input.width > vout->task.output.width)
++ var.xres_virtual = vout->task.input.width;
++ else
++ var.xres_virtual = var.xres;
++ if (vout->task.input.height > vout->task.output.height)
++ var.yres_virtual = vout->task.input.height;
++ else
++ var.yres_virtual = var.yres;
++ var.rotate = vout->task.output.rotate;
++ var.vmode |= FB_VMODE_YWRAP;
++ } else {
++ fb_num = FB_BUFS;
++ var.xres_virtual = var.xres;
++ var.yres_virtual = fb_num * var.yres;
++ var.vmode &= ~FB_VMODE_YWRAP;
++ }
++ var.bits_per_pixel = fmt_to_bpp(vout->task.output.format);
++ var.nonstd = vout->task.output.format;
++
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "set display fb to %d %d\n",
++ var.xres, var.yres);
++
++ /*
++ * To setup the overlay fb from scratch without
++ * the last time overlay fb position or resolution's
++ * impact, we take the following steps:
++ * - blank fb
++ * - set fb position to the starting point
++ * - reconfigure fb
++ * - set fb position to a specific point
++ * - unblank fb
++ * This procedure applies to non-overlay fbs as well.
++ */
++ console_lock();
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ fb_blank(fbi, FB_BLANK_POWERDOWN);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++
++ pos.x = 0;
++ pos.y = 0;
++ ret = set_window_position(vout, &pos);
++ if (ret < 0) {
++ v4l2_err(vout->vfd->v4l2_dev, "failed to set fb position "
++ "to starting point\n");
++ return ret;
++ }
++
++ /* Init display channel through fb API */
++ var.yoffset = 0;
++ var.activate |= FB_ACTIVATE_FORCE;
++ console_lock();
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ ret = fb_set_var(fbi, &var);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++ if (ret < 0) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "ERR:%s fb_set_var ret:%d\n", __func__, ret);
++ return ret;
++ }
++
++ ret = set_window_position(vout, &vout->win_pos);
++ if (ret < 0) {
++ v4l2_err(vout->vfd->v4l2_dev, "failed to set fb position\n");
++ return ret;
++ }
++
++ if (vout->linear_bypass_pp || vout->tiled_bypass_pp)
++ display_buf_size = fbi->fix.line_length * fbi->var.yres_virtual;
++ else
++ display_buf_size = fbi->fix.line_length * fbi->var.yres;
++ for (i = 0; i < fb_num; i++)
++ vout->disp_bufs[i] = fbi->fix.smem_start + i * display_buf_size;
++ if (vout->tiled_bypass_pp) {
++ size = PAGE_ALIGN(vout->task.input.crop.w *
++ vout->task.input.crop.h *
++ fmt_to_bpp(vout->task.output.format)/8);
++ if (size > vout->vdoa_output[0].size) {
++ for (i = 0; i < VDOA_FB_BUFS; i++) {
++ buf = &vout->vdoa_output[i];
++ if (buf->vaddr)
++ free_dma_buf(vout, buf);
++ buf->size = size;
++ ret = alloc_dma_buf(vout, buf);
++ if (ret < 0)
++ goto err;
++ }
++ }
++ for (i = fb_num; i < (fb_num + VDOA_FB_BUFS); i++)
++ vout->disp_bufs[i] =
++ vout->vdoa_output[i - fb_num].paddr;
++ }
++ vout->fb_smem_len = fbi->fix.smem_len;
++ vout->fb_smem_start = fbi->fix.smem_start;
++ if (fb_base != fbi->fix.smem_start) {
++ v4l2_dbg(1, debug, vout->vfd->v4l2_dev,
++ "realloc fb mem size:0x%x@0x%lx,old paddr @0x%x\n",
++ fbi->fix.smem_len, fbi->fix.smem_start, fb_base);
++ }
++
++ /* fill black when video config changed */
++ color = colorspaceofpixel(vout->task.output.format) == YUV_CS ?
++ UYVY_BLACK : RGB_BLACK;
++ if (IS_PLANAR_PIXEL_FORMAT(vout->task.output.format)) {
++ size = display_buf_size * 8 /
++ fmt_to_bpp(vout->task.output.format);
++ memset(fbi->screen_base, Y_BLACK, size);
++ memset(fbi->screen_base + size, UV_BLACK,
++ display_buf_size - size);
++ } else {
++ pixel = (u32 *)fbi->screen_base;
++ for (i = 0; i < (display_buf_size >> 2); i++)
++ *pixel++ = color;
++ }
++ console_lock();
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ ret = fb_blank(fbi, FB_BLANK_UNBLANK);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++ vout->release = false;
++
++ return ret;
++err:
++ for (j = i - 1; j >= 0; j--) {
++ buf = &vout->vdoa_output[j];
++ if (buf->vaddr)
++ free_dma_buf(vout, buf);
++ }
++ return ret;
++}
++
++static inline void wait_for_vsync(struct mxc_vout_output *vout)
++{
++ struct fb_info *fbi = vout->fbi;
++ mm_segment_t old_fs;
++
++ if (fbi->fbops->fb_ioctl) {
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++ fbi->fbops->fb_ioctl(fbi, MXCFB_WAIT_FOR_VSYNC,
++ (unsigned long)NULL);
++ set_fs(old_fs);
++ }
++
++ return;
++}
++
++static void release_disp_output(struct mxc_vout_output *vout)
++{
++ struct fb_info *fbi = vout->fbi;
++ struct mxcfb_pos pos;
++
++ if (vout->release)
++ return;
++ console_lock();
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ fb_blank(fbi, FB_BLANK_POWERDOWN);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++
++ /* restore pos to 0,0 avoid fb pan display hang? */
++ pos.x = 0;
++ pos.y = 0;
++ set_window_position(vout, &pos);
++
++ if (get_ipu_channel(fbi) == MEM_BG_SYNC) {
++ console_lock();
++ fbi->fix.smem_start = vout->disp_bufs[0];
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ fb_blank(fbi, FB_BLANK_UNBLANK);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++
++ }
++
++ vout->release = true;
++}
++
++static int mxc_vidioc_streamon(struct file *file, void *fh,
++ enum v4l2_buf_type i)
++{
++ struct mxc_vout_output *vout = fh;
++ struct videobuf_queue *q = &vout->vbq;
++ int ret;
++
++ if (q->streaming) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "video output already run\n");
++ ret = -EBUSY;
++ goto done;
++ }
++
++ if (deinterlace_3_field(vout) && list_is_singular(&q->stream)) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "deinterlacing: need queue 2 frame before streamon\n");
++ ret = -EINVAL;
++ goto done;
++ }
++
++ ret = config_disp_output(vout);
++ if (ret < 0) {
++ v4l2_err(vout->vfd->v4l2_dev,
++ "Config display output failed\n");
++ goto done;
++ }
++
++ hrtimer_init(&vout->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
++ vout->timer.function = mxc_vout_timer_handler;
++ vout->timer_stop = true;
++
++ vout->start_ktime = hrtimer_cb_get_time(&vout->timer);
++
++ vout->pre1_vb = NULL;
++ vout->pre2_vb = NULL;
++
++ ret = videobuf_streamon(q);
++done:
++ return ret;
++}
++
++static int mxc_vidioc_streamoff(struct file *file, void *fh,
++ enum v4l2_buf_type i)
++{
++ struct mxc_vout_output *vout = fh;
++ struct videobuf_queue *q = &vout->vbq;
++ int ret = 0;
++
++ if (q->streaming) {
++ flush_workqueue(vout->v4l_wq);
++
++ hrtimer_cancel(&vout->timer);
++
++ /*
++ * Wait for 2 vsyncs to make sure
++ * frames are drained on triple
++ * buffer.
++ */
++ wait_for_vsync(vout);
++ wait_for_vsync(vout);
++
++ release_disp_output(vout);
++
++ ret = videobuf_streamoff(&vout->vbq);
++ }
++ INIT_LIST_HEAD(&vout->queue_list);
++ INIT_LIST_HEAD(&vout->active_list);
++
++ return ret;
++}
++
++static const struct v4l2_ioctl_ops mxc_vout_ioctl_ops = {
++ .vidioc_querycap = mxc_vidioc_querycap,
++ .vidioc_enum_fmt_vid_out = mxc_vidioc_enum_fmt_vid_out,
++ .vidioc_g_fmt_vid_out = mxc_vidioc_g_fmt_vid_out,
++ .vidioc_s_fmt_vid_out = mxc_vidioc_s_fmt_vid_out,
++ .vidioc_cropcap = mxc_vidioc_cropcap,
++ .vidioc_g_crop = mxc_vidioc_g_crop,
++ .vidioc_s_crop = mxc_vidioc_s_crop,
++ .vidioc_queryctrl = mxc_vidioc_queryctrl,
++ .vidioc_g_ctrl = mxc_vidioc_g_ctrl,
++ .vidioc_s_ctrl = mxc_vidioc_s_ctrl,
++ .vidioc_reqbufs = mxc_vidioc_reqbufs,
++ .vidioc_querybuf = mxc_vidioc_querybuf,
++ .vidioc_qbuf = mxc_vidioc_qbuf,
++ .vidioc_dqbuf = mxc_vidioc_dqbuf,
++ .vidioc_streamon = mxc_vidioc_streamon,
++ .vidioc_streamoff = mxc_vidioc_streamoff,
++};
++
++static const struct v4l2_file_operations mxc_vout_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = video_ioctl2,
++ .mmap = mxc_vout_mmap,
++ .open = mxc_vout_open,
++ .release = mxc_vout_release,
++};
++
++static struct video_device mxc_vout_template = {
++ .name = "MXC Video Output",
++ .fops = &mxc_vout_fops,
++ .ioctl_ops = &mxc_vout_ioctl_ops,
++ .release = video_device_release,
++};
++
++static struct videobuf_queue_ops mxc_vout_vbq_ops = {
++ .buf_setup = mxc_vout_buffer_setup,
++ .buf_prepare = mxc_vout_buffer_prepare,
++ .buf_release = mxc_vout_buffer_release,
++ .buf_queue = mxc_vout_buffer_queue,
++};
++
++static void mxc_vout_free_output(struct mxc_vout_dev *dev)
++{
++ int i;
++ int j;
++ struct mxc_vout_output *vout;
++ struct video_device *vfd;
++
++ for (i = 0; i < dev->out_num; i++) {
++ vout = dev->out[i];
++ vfd = vout->vfd;
++ if (vout->vdoa_work.vaddr)
++ free_dma_buf(vout, &vout->vdoa_work);
++ for (j = 0; j < VDOA_FB_BUFS; j++) {
++ if (vout->vdoa_output[j].vaddr)
++ free_dma_buf(vout, &vout->vdoa_output[j]);
++ }
++ if (vfd) {
++ if (!video_is_registered(vfd))
++ video_device_release(vfd);
++ else
++ video_unregister_device(vfd);
++ }
++ kfree(vout);
++ }
++}
++
++static int mxc_vout_setup_output(struct mxc_vout_dev *dev)
++{
++ struct videobuf_queue *q;
++ struct fb_info *fbi;
++ struct mxc_vout_output *vout;
++ int i, ret = 0;
++
++ update_display_setting();
++
++ /* all output/overlay based on fb */
++ for (i = 0; i < num_registered_fb; i++) {
++ fbi = registered_fb[i];
++
++ vout = kzalloc(sizeof(struct mxc_vout_output), GFP_KERNEL);
++ if (!vout) {
++ ret = -ENOMEM;
++ break;
++ }
++
++ dev->out[dev->out_num] = vout;
++ dev->out_num++;
++
++ vout->fbi = fbi;
++ vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
++ vout->vfd = video_device_alloc();
++ if (!vout->vfd) {
++ ret = -ENOMEM;
++ break;
++ }
++
++ *vout->vfd = mxc_vout_template;
++ vout->vfd->debug = debug;
++ vout->vfd->v4l2_dev = &dev->v4l2_dev;
++ vout->vfd->lock = &vout->mutex;
++ vout->vfd->vfl_dir = VFL_DIR_TX;
++
++ mutex_init(&vout->mutex);
++ mutex_init(&vout->task_lock);
++
++ strlcpy(vout->vfd->name, fbi->fix.id, sizeof(vout->vfd->name));
++
++ video_set_drvdata(vout->vfd, vout);
++
++ if (video_register_device(vout->vfd,
++ VFL_TYPE_GRABBER, video_nr + i) < 0) {
++ ret = -ENODEV;
++ break;
++ }
++
++ q = &vout->vbq;
++ q->dev = dev->dev;
++ spin_lock_init(&vout->vbq_lock);
++ videobuf_queue_dma_contig_init(q, &mxc_vout_vbq_ops, q->dev,
++ &vout->vbq_lock, vout->type, V4L2_FIELD_NONE,
++ sizeof(struct videobuf_buffer), vout, NULL);
++
++ v4l2_info(vout->vfd->v4l2_dev, "V4L2 device registered as %s\n",
++ video_device_node_name(vout->vfd));
++
++ }
++
++ return ret;
++}
++
++static int mxc_vout_probe(struct platform_device *pdev)
++{
++ int ret;
++ struct mxc_vout_dev *dev;
++
++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ if (!dev)
++ return -ENOMEM;
++
++ dev->dev = &pdev->dev;
++ dev->dev->dma_mask = kmalloc(sizeof(*dev->dev->dma_mask), GFP_KERNEL);
++ *dev->dev->dma_mask = DMA_BIT_MASK(32);
++ dev->dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++ ret = v4l2_device_register(dev->dev, &dev->v4l2_dev);
++ if (ret) {
++ dev_err(dev->dev, "v4l2_device_register failed\n");
++ goto free_dev;
++ }
++
++ ret = mxc_vout_setup_output(dev);
++ if (ret < 0)
++ goto rel_vdev;
++
++ return 0;
++
++rel_vdev:
++ mxc_vout_free_output(dev);
++ v4l2_device_unregister(&dev->v4l2_dev);
++free_dev:
++ kfree(dev);
++ return ret;
++}
++
++static int mxc_vout_remove(struct platform_device *pdev)
++{
++ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
++ struct mxc_vout_dev *dev = container_of(v4l2_dev, struct
++ mxc_vout_dev, v4l2_dev);
++
++ mxc_vout_free_output(dev);
++ v4l2_device_unregister(v4l2_dev);
++ kfree(dev);
++ return 0;
++}
++
++static const struct of_device_id mxc_v4l2_dt_ids[] = {
++ { .compatible = "fsl,mxc_v4l2_output", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver mxc_vout_driver = {
++ .driver = {
++ .name = "mxc_v4l2_output",
++ .of_match_table = mxc_v4l2_dt_ids,
++ },
++ .probe = mxc_vout_probe,
++ .remove = mxc_vout_remove,
++};
++
++static int __init mxc_vout_init(void)
++{
++ if (platform_driver_register(&mxc_vout_driver) != 0) {
++ printk(KERN_ERR VOUT_NAME ":Could not register Video driver\n");
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static void mxc_vout_cleanup(void)
++{
++ platform_driver_unregister(&mxc_vout_driver);
++}
++
++module_init(mxc_vout_init);
++module_exit(mxc_vout_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("V4L2-driver for MXC video output");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/media/v4l2-core/videobuf2-dma-contig.c linux-3.14.40/drivers/media/v4l2-core/videobuf2-dma-contig.c
+--- linux-3.14.40.orig/drivers/media/v4l2-core/videobuf2-dma-contig.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/media/v4l2-core/videobuf2-dma-contig.c 2015-05-01 14:57:59.295427001 -0500
+@@ -719,7 +719,7 @@
+
+ /* get the associated scatterlist for this buffer */
+ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+- if (IS_ERR_OR_NULL(sgt)) {
++ if (IS_ERR(sgt)) {
+ pr_err("Error getting dmabuf scatterlist\n");
+ return -EINVAL;
+ }
+diff -Nur linux-3.14.40.orig/drivers/media/v4l2-core/videobuf-dma-contig.c linux-3.14.40/drivers/media/v4l2-core/videobuf-dma-contig.c
+--- linux-3.14.40.orig/drivers/media/v4l2-core/videobuf-dma-contig.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/media/v4l2-core/videobuf-dma-contig.c 2015-05-01 14:57:59.295427001 -0500
+@@ -304,7 +304,7 @@
+
+ /* Try to remap memory */
+ size = vma->vm_end - vma->vm_start;
+- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ retval = vm_iomap_memory(vma, vma->vm_start, size);
+ if (retval) {
+ dev_err(q->dev, "mmap: remap failed with error %d. ",
+diff -Nur linux-3.14.40.orig/drivers/mfd/ab8500-core.c linux-3.14.40/drivers/mfd/ab8500-core.c
+--- linux-3.14.40.orig/drivers/mfd/ab8500-core.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mfd/ab8500-core.c 2015-05-01 14:57:59.315427001 -0500
+@@ -592,7 +592,7 @@
+
+ /* If ->irq_base is zero this will give a linear mapping */
+ ab8500->domain = irq_domain_add_simple(NULL,
+- num_irqs, ab8500->irq_base,
++ num_irqs, 0,
+ &ab8500_irq_ops, ab8500);
+
+ if (!ab8500->domain) {
+@@ -1583,14 +1583,13 @@
+ if (!ab8500)
+ return -ENOMEM;
+
+- if (plat)
+- ab8500->irq_base = plat->irq_base;
+-
+ ab8500->dev = &pdev->dev;
+
+ resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+- if (!resource)
++ if (!resource) {
++ dev_err(&pdev->dev, "no IRQ resource\n");
+ return -ENODEV;
++ }
+
+ ab8500->irq = resource->start;
+
+@@ -1612,8 +1611,10 @@
+ else {
+ ret = get_register_interruptible(ab8500, AB8500_MISC,
+ AB8500_IC_NAME_REG, &value);
+- if (ret < 0)
++ if (ret < 0) {
++ dev_err(&pdev->dev, "could not probe HW\n");
+ return ret;
++ }
+
+ ab8500->version = value;
+ }
+@@ -1759,30 +1760,30 @@
+ if (is_ab9540(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
+ ARRAY_SIZE(ab9540_devs), NULL,
+- ab8500->irq_base, ab8500->domain);
++ 0, ab8500->domain);
+ else if (is_ab8540(ab8500)) {
+ ret = mfd_add_devices(ab8500->dev, 0, ab8540_devs,
+ ARRAY_SIZE(ab8540_devs), NULL,
+- ab8500->irq_base, NULL);
++ 0, ab8500->domain);
+ if (ret)
+ return ret;
+
+ if (is_ab8540_1p2_or_earlier(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab8540_cut1_devs,
+ ARRAY_SIZE(ab8540_cut1_devs), NULL,
+- ab8500->irq_base, NULL);
++ 0, ab8500->domain);
+ else /* ab8540 >= cut2 */
+ ret = mfd_add_devices(ab8500->dev, 0, ab8540_cut2_devs,
+ ARRAY_SIZE(ab8540_cut2_devs), NULL,
+- ab8500->irq_base, NULL);
++ 0, ab8500->domain);
+ } else if (is_ab8505(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab8505_devs,
+ ARRAY_SIZE(ab8505_devs), NULL,
+- ab8500->irq_base, ab8500->domain);
++ 0, ab8500->domain);
+ else
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
+ ARRAY_SIZE(ab8500_devs), NULL,
+- ab8500->irq_base, ab8500->domain);
++ 0, ab8500->domain);
+ if (ret)
+ return ret;
+
+@@ -1790,7 +1791,7 @@
+ /* Add battery management devices */
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
+ ARRAY_SIZE(ab8500_bm_devs), NULL,
+- ab8500->irq_base, ab8500->domain);
++ 0, ab8500->domain);
+ if (ret)
+ dev_err(ab8500->dev, "error adding bm devices\n");
+ }
+diff -Nur linux-3.14.40.orig/drivers/mfd/db8500-prcmu.c linux-3.14.40/drivers/mfd/db8500-prcmu.c
+--- linux-3.14.40.orig/drivers/mfd/db8500-prcmu.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mfd/db8500-prcmu.c 2015-05-01 14:57:59.323427001 -0500
+@@ -25,6 +25,7 @@
+ #include <linux/bitops.h>
+ #include <linux/fs.h>
+ #include <linux/of.h>
++#include <linux/of_irq.h>
+ #include <linux/platform_device.h>
+ #include <linux/uaccess.h>
+ #include <linux/mfd/core.h>
+@@ -2678,16 +2679,12 @@
+ .xlate = irq_domain_xlate_twocell,
+ };
+
+-static int db8500_irq_init(struct device_node *np, int irq_base)
++static int db8500_irq_init(struct device_node *np)
+ {
+ int i;
+
+- /* In the device tree case, just take some IRQs */
+- if (np)
+- irq_base = 0;
+-
+ db8500_irq_domain = irq_domain_add_simple(
+- np, NUM_PRCMU_WAKEUPS, irq_base,
++ np, NUM_PRCMU_WAKEUPS, 0,
+ &db8500_irq_ops, NULL);
+
+ if (!db8500_irq_domain) {
+@@ -3114,10 +3111,10 @@
+ }
+
+ static int db8500_prcmu_register_ab8500(struct device *parent,
+- struct ab8500_platform_data *pdata,
+- int irq)
++ struct ab8500_platform_data *pdata)
+ {
+- struct resource ab8500_resource = DEFINE_RES_IRQ(irq);
++ struct device_node *np;
++ struct resource ab8500_resource;
+ struct mfd_cell ab8500_cell = {
+ .name = "ab8500-core",
+ .of_compatible = "stericsson,ab8500",
+@@ -3128,6 +3125,20 @@
+ .num_resources = 1,
+ };
+
++ if (!parent->of_node)
++ return -ENODEV;
++
++ /* Look up the device node, sneak the IRQ out of it */
++ for_each_child_of_node(parent->of_node, np) {
++ if (of_device_is_compatible(np, ab8500_cell.of_compatible))
++ break;
++ }
++ if (!np) {
++ dev_info(parent, "could not find AB8500 node in the device tree\n");
++ return -ENODEV;
++ }
++ of_irq_to_resource_table(np, &ab8500_resource, 1);
++
+ return mfd_add_devices(parent, 0, &ab8500_cell, 1, NULL, 0, NULL);
+ }
+
+@@ -3180,7 +3191,7 @@
+ goto no_irq_return;
+ }
+
+- db8500_irq_init(np, pdata->irq_base);
++ db8500_irq_init(np);
+
+ prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
+
+@@ -3205,8 +3216,7 @@
+ }
+ }
+
+- err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata,
+- pdata->ab_irq);
++ err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata);
+ if (err) {
+ mfd_remove_devices(&pdev->dev);
+ pr_err("prcmu: Failed to add ab8500 subdevice\n");
+diff -Nur linux-3.14.40.orig/drivers/mfd/Kconfig linux-3.14.40/drivers/mfd/Kconfig
+--- linux-3.14.40.orig/drivers/mfd/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mfd/Kconfig 2015-05-01 14:57:59.331427001 -0500
+@@ -163,6 +163,14 @@
+ Additional drivers must be enabled in order to use the functionality
+ of the device.
+
++config MFD_MXC_HDMI
++ tristate "Freescale HDMI Core"
++ select MFD_CORE
++ help
++ This is the core driver for the Freescale i.MX6 on-chip HDMI.
++ This MFD driver connects with the video and audio drivers for HDMI.
++
++
+ config MFD_MC13XXX
+ tristate
+ depends on (SPI_MASTER || I2C)
+@@ -1226,3 +1234,4 @@
+ help
+ Platform configuration infrastructure for the ARM Ltd.
+ Versatile Express.
++
+diff -Nur linux-3.14.40.orig/drivers/mfd/Makefile linux-3.14.40/drivers/mfd/Makefile
+--- linux-3.14.40.orig/drivers/mfd/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mfd/Makefile 2015-05-01 14:57:59.351427001 -0500
+@@ -166,3 +166,4 @@
+ obj-$(CONFIG_MFD_AS3711) += as3711.o
+ obj-$(CONFIG_MFD_AS3722) += as3722.o
+ obj-$(CONFIG_MFD_STW481X) += stw481x.o
++obj-$(CONFIG_MFD_MXC_HDMI) += mxc-hdmi-core.o
+diff -Nur linux-3.14.40.orig/drivers/mfd/mxc-hdmi-core.c linux-3.14.40/drivers/mfd/mxc-hdmi-core.c
+--- linux-3.14.40.orig/drivers/mfd/mxc-hdmi-core.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mfd/mxc-hdmi-core.c 2015-05-01 14:57:59.351427001 -0500
+@@ -0,0 +1,798 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/clk.h>
++#include <linux/spinlock.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++
++#include <linux/platform_device.h>
++#include <linux/regulator/machine.h>
++#include <asm/mach-types.h>
++
++#include <video/mxc_hdmi.h>
++#include <linux/ipu-v3.h>
++#include <video/mxc_edid.h>
++#include "../mxc/ipu3/ipu_prv.h"
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <linux/of_device.h>
++#include <linux/mod_devicetable.h>
++
++struct mxc_hdmi_data {
++ struct platform_device *pdev;
++ unsigned long __iomem *reg_base;
++ unsigned long reg_phys_base;
++ struct device *dev;
++};
++
++static void __iomem *hdmi_base;
++static struct clk *isfr_clk;
++static struct clk *iahb_clk;
++static spinlock_t irq_spinlock;
++static spinlock_t edid_spinlock;
++static unsigned int sample_rate;
++static unsigned long pixel_clk_rate;
++static struct clk *pixel_clk;
++static int hdmi_ratio;
++int mxc_hdmi_ipu_id;
++int mxc_hdmi_disp_id;
++static int hdmi_core_edid_status;
++static struct mxc_edid_cfg hdmi_core_edid_cfg;
++static int hdmi_core_init;
++static unsigned int hdmi_dma_running;
++static struct snd_pcm_substream *hdmi_audio_stream_playback;
++static unsigned int hdmi_cable_state;
++static unsigned int hdmi_blank_state;
++static unsigned int hdmi_abort_state;
++static spinlock_t hdmi_audio_lock, hdmi_blank_state_lock, hdmi_cable_state_lock;
++
++void hdmi_set_dvi_mode(unsigned int state)
++{
++ if (state) {
++ mxc_hdmi_abort_stream();
++ hdmi_cec_stop_device();
++ } else {
++ hdmi_cec_start_device();
++ }
++}
++EXPORT_SYMBOL(hdmi_set_dvi_mode);
++
++unsigned int hdmi_set_cable_state(unsigned int state)
++{
++ unsigned long flags;
++ struct snd_pcm_substream *substream = hdmi_audio_stream_playback;
++
++ spin_lock_irqsave(&hdmi_cable_state_lock, flags);
++ hdmi_cable_state = state;
++ spin_unlock_irqrestore(&hdmi_cable_state_lock, flags);
++
++ if (check_hdmi_state() && substream && hdmi_abort_state) {
++ hdmi_abort_state = 0;
++ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(hdmi_set_cable_state);
++
++unsigned int hdmi_set_blank_state(unsigned int state)
++{
++ unsigned long flags;
++ struct snd_pcm_substream *substream = hdmi_audio_stream_playback;
++
++ spin_lock_irqsave(&hdmi_blank_state_lock, flags);
++ hdmi_blank_state = state;
++ spin_unlock_irqrestore(&hdmi_blank_state_lock, flags);
++
++ if (check_hdmi_state() && substream && hdmi_abort_state) {
++ hdmi_abort_state = 0;
++ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(hdmi_set_blank_state);
++
++static void hdmi_audio_abort_stream(struct snd_pcm_substream *substream)
++{
++ unsigned long flags;
++
++ snd_pcm_stream_lock_irqsave(substream, flags);
++
++ if (snd_pcm_running(substream)) {
++ hdmi_abort_state = 1;
++ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
++ }
++
++ snd_pcm_stream_unlock_irqrestore(substream, flags);
++}
++
++int mxc_hdmi_abort_stream(void)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&hdmi_audio_lock, flags);
++ if (hdmi_audio_stream_playback)
++ hdmi_audio_abort_stream(hdmi_audio_stream_playback);
++ spin_unlock_irqrestore(&hdmi_audio_lock, flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(mxc_hdmi_abort_stream);
++
++int check_hdmi_state(void)
++{
++ unsigned long flags1, flags2;
++ unsigned int ret;
++
++ spin_lock_irqsave(&hdmi_cable_state_lock, flags1);
++ spin_lock_irqsave(&hdmi_blank_state_lock, flags2);
++
++ ret = hdmi_cable_state && hdmi_blank_state;
++
++ spin_unlock_irqrestore(&hdmi_blank_state_lock, flags2);
++ spin_unlock_irqrestore(&hdmi_cable_state_lock, flags1);
++
++ return ret;
++}
++EXPORT_SYMBOL(check_hdmi_state);
++
++int mxc_hdmi_register_audio(struct snd_pcm_substream *substream)
++{
++ unsigned long flags, flags1;
++ int ret = 0;
++
++ snd_pcm_stream_lock_irqsave(substream, flags);
++
++ if (substream && check_hdmi_state()) {
++ spin_lock_irqsave(&hdmi_audio_lock, flags1);
++ if (hdmi_audio_stream_playback) {
++ pr_err("%s unconsist hdmi auido stream!\n", __func__);
++ ret = -EINVAL;
++ }
++ hdmi_audio_stream_playback = substream;
++ hdmi_abort_state = 0;
++ spin_unlock_irqrestore(&hdmi_audio_lock, flags1);
++ } else
++ ret = -EINVAL;
++
++ snd_pcm_stream_unlock_irqrestore(substream, flags);
++
++ return ret;
++}
++EXPORT_SYMBOL(mxc_hdmi_register_audio);
++
++void mxc_hdmi_unregister_audio(struct snd_pcm_substream *substream)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&hdmi_audio_lock, flags);
++ hdmi_audio_stream_playback = NULL;
++ hdmi_abort_state = 0;
++ spin_unlock_irqrestore(&hdmi_audio_lock, flags);
++}
++EXPORT_SYMBOL(mxc_hdmi_unregister_audio);
++
++u8 hdmi_readb(unsigned int reg)
++{
++ u8 value;
++
++ value = __raw_readb(hdmi_base + reg);
++
++ return value;
++}
++EXPORT_SYMBOL(hdmi_readb);
++
++#ifdef DEBUG
++static bool overflow_lo;
++static bool overflow_hi;
++
++bool hdmi_check_overflow(void)
++{
++ u8 val, lo, hi;
++
++ val = hdmi_readb(HDMI_IH_FC_STAT2);
++ lo = (val & HDMI_IH_FC_STAT2_LOW_PRIORITY_OVERFLOW) != 0;
++ hi = (val & HDMI_IH_FC_STAT2_HIGH_PRIORITY_OVERFLOW) != 0;
++
++ if ((lo != overflow_lo) || (hi != overflow_hi)) {
++ pr_debug("%s LowPriority=%d HighPriority=%d <=======================\n",
++ __func__, lo, hi);
++ overflow_lo = lo;
++ overflow_hi = hi;
++ return true;
++ }
++ return false;
++}
++#else
++bool hdmi_check_overflow(void)
++{
++ return false;
++}
++#endif
++EXPORT_SYMBOL(hdmi_check_overflow);
++
++void hdmi_writeb(u8 value, unsigned int reg)
++{
++ hdmi_check_overflow();
++ __raw_writeb(value, hdmi_base + reg);
++ hdmi_check_overflow();
++}
++EXPORT_SYMBOL(hdmi_writeb);
++
++void hdmi_mask_writeb(u8 data, unsigned int reg, u8 shift, u8 mask)
++{
++ u8 value = hdmi_readb(reg) & ~mask;
++ value |= (data << shift) & mask;
++ hdmi_writeb(value, reg);
++}
++EXPORT_SYMBOL(hdmi_mask_writeb);
++
++unsigned int hdmi_read4(unsigned int reg)
++{
++ /* read a four byte address from registers */
++ return (hdmi_readb(reg + 3) << 24) |
++ (hdmi_readb(reg + 2) << 16) |
++ (hdmi_readb(reg + 1) << 8) |
++ hdmi_readb(reg);
++}
++EXPORT_SYMBOL(hdmi_read4);
++
++void hdmi_write4(unsigned int value, unsigned int reg)
++{
++ /* write a four byte address to hdmi regs */
++ hdmi_writeb(value & 0xff, reg);
++ hdmi_writeb((value >> 8) & 0xff, reg + 1);
++ hdmi_writeb((value >> 16) & 0xff, reg + 2);
++ hdmi_writeb((value >> 24) & 0xff, reg + 3);
++}
++EXPORT_SYMBOL(hdmi_write4);
++
++static void initialize_hdmi_ih_mutes(void)
++{
++ u8 ih_mute;
++
++ /*
++ * Boot up defaults are:
++ * HDMI_IH_MUTE = 0x03 (disabled)
++ * HDMI_IH_MUTE_* = 0x00 (enabled)
++ */
++
++ /* Disable top level interrupt bits in HDMI block */
++ ih_mute = hdmi_readb(HDMI_IH_MUTE) |
++ HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT |
++ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT;
++
++ hdmi_writeb(ih_mute, HDMI_IH_MUTE);
++
++ /* by default mask all interrupts */
++ hdmi_writeb(0xff, HDMI_VP_MASK);
++ hdmi_writeb(0xff, HDMI_FC_MASK0);
++ hdmi_writeb(0xff, HDMI_FC_MASK1);
++ hdmi_writeb(0xff, HDMI_FC_MASK2);
++ hdmi_writeb(0xff, HDMI_PHY_MASK0);
++ hdmi_writeb(0xff, HDMI_PHY_I2CM_INT_ADDR);
++ hdmi_writeb(0xff, HDMI_PHY_I2CM_CTLINT_ADDR);
++ hdmi_writeb(0xff, HDMI_AUD_INT);
++ hdmi_writeb(0xff, HDMI_AUD_SPDIFINT);
++ hdmi_writeb(0xff, HDMI_AUD_HBR_MASK);
++ hdmi_writeb(0xff, HDMI_GP_MASK);
++ hdmi_writeb(0xff, HDMI_A_APIINTMSK);
++ hdmi_writeb(0xff, HDMI_CEC_MASK);
++ hdmi_writeb(0xff, HDMI_I2CM_INT);
++ hdmi_writeb(0xff, HDMI_I2CM_CTLINT);
++
++ /* Disable interrupts in the IH_MUTE_* registers */
++ hdmi_writeb(0xff, HDMI_IH_MUTE_FC_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_FC_STAT1);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_FC_STAT2);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_AS_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_PHY_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_I2CM_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_CEC_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_VP_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_I2CMPHY_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
++
++ /* Enable top level interrupt bits in HDMI block */
++ ih_mute &= ~(HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT |
++ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT);
++ hdmi_writeb(ih_mute, HDMI_IH_MUTE);
++}
++
++static void hdmi_set_clock_regenerator_n(unsigned int value)
++{
++ u8 val;
++
++ if (!hdmi_dma_running) {
++ hdmi_writeb(value & 0xff, HDMI_AUD_N1);
++ hdmi_writeb(0, HDMI_AUD_N2);
++ hdmi_writeb(0, HDMI_AUD_N3);
++ }
++
++ hdmi_writeb(value & 0xff, HDMI_AUD_N1);
++ hdmi_writeb((value >> 8) & 0xff, HDMI_AUD_N2);
++ hdmi_writeb((value >> 16) & 0x0f, HDMI_AUD_N3);
++
++ /* nshift factor = 0 */
++ val = hdmi_readb(HDMI_AUD_CTS3);
++ val &= ~HDMI_AUD_CTS3_N_SHIFT_MASK;
++ hdmi_writeb(val, HDMI_AUD_CTS3);
++}
++
++static void hdmi_set_clock_regenerator_cts(unsigned int cts)
++{
++ u8 val;
++
++ if (!hdmi_dma_running) {
++ hdmi_writeb(cts & 0xff, HDMI_AUD_CTS1);
++ hdmi_writeb(0, HDMI_AUD_CTS2);
++ hdmi_writeb(0, HDMI_AUD_CTS3);
++ }
++
++ /* Must be set/cleared first */
++ val = hdmi_readb(HDMI_AUD_CTS3);
++ val &= ~HDMI_AUD_CTS3_CTS_MANUAL;
++ hdmi_writeb(val, HDMI_AUD_CTS3);
++
++ hdmi_writeb(cts & 0xff, HDMI_AUD_CTS1);
++ hdmi_writeb((cts >> 8) & 0xff, HDMI_AUD_CTS2);
++ hdmi_writeb(((cts >> 16) & HDMI_AUD_CTS3_AUDCTS19_16_MASK) |
++ HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
++}
++
++static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk,
++ unsigned int ratio)
++{
++ unsigned int n = (128 * freq) / 1000;
++
++ switch (freq) {
++ case 32000:
++ if (pixel_clk == 25174000)
++ n = (ratio == 150) ? 9152 : 4576;
++ else if (pixel_clk == 27020000)
++ n = (ratio == 150) ? 8192 : 4096;
++ else if (pixel_clk == 74170000 || pixel_clk == 148350000)
++ n = 11648;
++ else if (pixel_clk == 297000000)
++ n = (ratio == 150) ? 6144 : 3072;
++ else
++ n = 4096;
++ break;
++
++ case 44100:
++ if (pixel_clk == 25174000)
++ n = 7007;
++ else if (pixel_clk == 74170000)
++ n = 17836;
++ else if (pixel_clk == 148350000)
++ n = (ratio == 150) ? 17836 : 8918;
++ else if (pixel_clk == 297000000)
++ n = (ratio == 150) ? 9408 : 4704;
++ else
++ n = 6272;
++ break;
++
++ case 48000:
++ if (pixel_clk == 25174000)
++ n = (ratio == 150) ? 9152 : 6864;
++ else if (pixel_clk == 27020000)
++ n = (ratio == 150) ? 8192 : 6144;
++ else if (pixel_clk == 74170000)
++ n = 11648;
++ else if (pixel_clk == 148350000)
++ n = (ratio == 150) ? 11648 : 5824;
++ else if (pixel_clk == 297000000)
++ n = (ratio == 150) ? 10240 : 5120;
++ else
++ n = 6144;
++ break;
++
++ case 88200:
++ n = hdmi_compute_n(44100, pixel_clk, ratio) * 2;
++ break;
++
++ case 96000:
++ n = hdmi_compute_n(48000, pixel_clk, ratio) * 2;
++ break;
++
++ case 176400:
++ n = hdmi_compute_n(44100, pixel_clk, ratio) * 4;
++ break;
++
++ case 192000:
++ n = hdmi_compute_n(48000, pixel_clk, ratio) * 4;
++ break;
++
++ default:
++ break;
++ }
++
++ return n;
++}
++
++static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
++ unsigned int ratio)
++{
++ unsigned int cts = 0;
++ switch (freq) {
++ case 32000:
++ if (pixel_clk == 297000000) {
++ cts = 222750;
++ break;
++ } else if (pixel_clk == 25174000) {
++ cts = 28125;
++ break;
++ }
++ case 48000:
++ case 96000:
++ case 192000:
++ switch (pixel_clk) {
++ case 25200000:
++ case 27000000:
++ case 54000000:
++ case 74250000:
++ case 148500000:
++ cts = pixel_clk / 1000;
++ break;
++ case 297000000:
++ cts = 247500;
++ break;
++ case 25174000:
++ cts = 28125l;
++ break;
++ /*
++ * All other TMDS clocks are not supported by
++ * DWC_hdmi_tx. The TMDS clocks divided or
++ * multiplied by 1,001 coefficients are not
++ * supported.
++ */
++ default:
++ break;
++ }
++ break;
++ case 44100:
++ case 88200:
++ case 176400:
++ switch (pixel_clk) {
++ case 25200000:
++ cts = 28000;
++ break;
++ case 25174000:
++ cts = 31250;
++ break;
++ case 27000000:
++ cts = 30000;
++ break;
++ case 54000000:
++ cts = 60000;
++ break;
++ case 74250000:
++ cts = 82500;
++ break;
++ case 148500000:
++ cts = 165000;
++ break;
++ case 297000000:
++ cts = 247500;
++ break;
++ default:
++ break;
++ }
++ break;
++ default:
++ break;
++ }
++ if (ratio == 100)
++ return cts;
++ else
++ return (cts * ratio) / 100;
++}
++
++static void hdmi_set_clk_regenerator(void)
++{
++ unsigned int clk_n, clk_cts;
++
++ clk_n = hdmi_compute_n(sample_rate, pixel_clk_rate, hdmi_ratio);
++ clk_cts = hdmi_compute_cts(sample_rate, pixel_clk_rate, hdmi_ratio);
++
++ if (clk_cts == 0) {
++ pr_debug("%s: pixel clock not supported: %d\n",
++ __func__, (int)pixel_clk_rate);
++ return;
++ }
++
++ pr_debug("%s: samplerate=%d ratio=%d pixelclk=%d N=%d cts=%d\n",
++ __func__, sample_rate, hdmi_ratio, (int)pixel_clk_rate,
++ clk_n, clk_cts);
++
++ hdmi_set_clock_regenerator_cts(clk_cts);
++ hdmi_set_clock_regenerator_n(clk_n);
++}
++
++static int hdmi_core_get_of_property(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ int err;
++ int ipu_id, disp_id;
++
++ err = of_property_read_u32(np, "ipu_id", &ipu_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property ipu_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "disp_id", &disp_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property disp_id fail\n");
++ return err;
++ }
++
++ mxc_hdmi_ipu_id = ipu_id;
++ mxc_hdmi_disp_id = disp_id;
++
++ return err;
++}
++
++/* Need to run this before phy is enabled the first time to prevent
++ * overflow condition in HDMI_IH_FC_STAT2 */
++void hdmi_init_clk_regenerator(void)
++{
++ if (pixel_clk_rate == 0) {
++ pixel_clk_rate = 74250000;
++ hdmi_set_clk_regenerator();
++ }
++}
++EXPORT_SYMBOL(hdmi_init_clk_regenerator);
++
++void hdmi_clk_regenerator_update_pixel_clock(u32 pixclock)
++{
++
++ /* Translate pixel clock in ps (pico seconds) to Hz */
++ pixel_clk_rate = PICOS2KHZ(pixclock) * 1000UL;
++ hdmi_set_clk_regenerator();
++}
++EXPORT_SYMBOL(hdmi_clk_regenerator_update_pixel_clock);
++
++void hdmi_set_dma_mode(unsigned int dma_running)
++{
++ hdmi_dma_running = dma_running;
++ hdmi_set_clk_regenerator();
++}
++EXPORT_SYMBOL(hdmi_set_dma_mode);
++
++void hdmi_set_sample_rate(unsigned int rate)
++{
++ sample_rate = rate;
++}
++EXPORT_SYMBOL(hdmi_set_sample_rate);
++
++void hdmi_set_edid_cfg(int edid_status, struct mxc_edid_cfg *cfg)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&edid_spinlock, flags);
++ hdmi_core_edid_status = edid_status;
++ memcpy(&hdmi_core_edid_cfg, cfg, sizeof(struct mxc_edid_cfg));
++ spin_unlock_irqrestore(&edid_spinlock, flags);
++}
++EXPORT_SYMBOL(hdmi_set_edid_cfg);
++
++int hdmi_get_edid_cfg(struct mxc_edid_cfg *cfg)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&edid_spinlock, flags);
++ memcpy(cfg, &hdmi_core_edid_cfg, sizeof(struct mxc_edid_cfg));
++ spin_unlock_irqrestore(&edid_spinlock, flags);
++
++ return hdmi_core_edid_status;
++}
++EXPORT_SYMBOL(hdmi_get_edid_cfg);
++
++void hdmi_set_registered(int registered)
++{
++ hdmi_core_init = registered;
++}
++EXPORT_SYMBOL(hdmi_set_registered);
++
++int hdmi_get_registered(void)
++{
++ return hdmi_core_init;
++}
++EXPORT_SYMBOL(hdmi_get_registered);
++
++static int mxc_hdmi_core_probe(struct platform_device *pdev)
++{
++ struct mxc_hdmi_data *hdmi_data;
++ struct resource *res;
++ unsigned long flags;
++ int ret = 0;
++
++#ifdef DEBUG
++ overflow_lo = false;
++ overflow_hi = false;
++#endif
++
++ hdmi_core_init = 0;
++ hdmi_dma_running = 0;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ return -ENOENT;
++
++ ret = hdmi_core_get_of_property(pdev);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "get hdmi of property fail\n");
++ return -ENOENT;
++ }
++
++ hdmi_data = devm_kzalloc(&pdev->dev, sizeof(struct mxc_hdmi_data), GFP_KERNEL);
++ if (!hdmi_data) {
++ dev_err(&pdev->dev, "Couldn't allocate mxc hdmi mfd device\n");
++ return -ENOMEM;
++ }
++ hdmi_data->pdev = pdev;
++
++ pixel_clk = NULL;
++ sample_rate = 48000;
++ pixel_clk_rate = 0;
++ hdmi_ratio = 100;
++
++ spin_lock_init(&irq_spinlock);
++ spin_lock_init(&edid_spinlock);
++
++
++ spin_lock_init(&hdmi_cable_state_lock);
++ spin_lock_init(&hdmi_blank_state_lock);
++ spin_lock_init(&hdmi_audio_lock);
++
++ spin_lock_irqsave(&hdmi_cable_state_lock, flags);
++ hdmi_cable_state = 0;
++ spin_unlock_irqrestore(&hdmi_cable_state_lock, flags);
++
++ spin_lock_irqsave(&hdmi_blank_state_lock, flags);
++ hdmi_blank_state = 0;
++ spin_unlock_irqrestore(&hdmi_blank_state_lock, flags);
++
++ spin_lock_irqsave(&hdmi_audio_lock, flags);
++ hdmi_audio_stream_playback = NULL;
++ hdmi_abort_state = 0;
++ spin_unlock_irqrestore(&hdmi_audio_lock, flags);
++
++ isfr_clk = clk_get(&hdmi_data->pdev->dev, "hdmi_isfr");
++ if (IS_ERR(isfr_clk)) {
++ ret = PTR_ERR(isfr_clk);
++ dev_err(&hdmi_data->pdev->dev,
++ "Unable to get HDMI isfr clk: %d\n", ret);
++ goto eclkg;
++ }
++
++ ret = clk_prepare_enable(isfr_clk);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Cannot enable HDMI clock: %d\n", ret);
++ goto eclke;
++ }
++
++ pr_debug("%s isfr_clk:%d\n", __func__,
++ (int)clk_get_rate(isfr_clk));
++
++ iahb_clk = clk_get(&hdmi_data->pdev->dev, "hdmi_iahb");
++ if (IS_ERR(iahb_clk)) {
++ ret = PTR_ERR(iahb_clk);
++ dev_err(&hdmi_data->pdev->dev,
++ "Unable to get HDMI iahb clk: %d\n", ret);
++ goto eclkg2;
++ }
++
++ ret = clk_prepare_enable(iahb_clk);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Cannot enable HDMI clock: %d\n", ret);
++ goto eclke2;
++ }
++
++ hdmi_data->reg_phys_base = res->start;
++ if (!request_mem_region(res->start, resource_size(res),
++ dev_name(&pdev->dev))) {
++ dev_err(&pdev->dev, "request_mem_region failed\n");
++ ret = -EBUSY;
++ goto emem;
++ }
++
++ hdmi_data->reg_base = ioremap(res->start, resource_size(res));
++ if (!hdmi_data->reg_base) {
++ dev_err(&pdev->dev, "ioremap failed\n");
++ ret = -ENOMEM;
++ goto eirq;
++ }
++ hdmi_base = hdmi_data->reg_base;
++
++ pr_debug("\n%s hdmi hw base = 0x%08x\n\n", __func__, (int)res->start);
++
++ initialize_hdmi_ih_mutes();
++
++ /* Disable HDMI clocks until video/audio sub-drivers are initialized */
++ clk_disable_unprepare(isfr_clk);
++ clk_disable_unprepare(iahb_clk);
++
++ /* Replace platform data coming in with a local struct */
++ platform_set_drvdata(pdev, hdmi_data);
++
++ return ret;
++
++eirq:
++ release_mem_region(res->start, resource_size(res));
++emem:
++ clk_disable_unprepare(iahb_clk);
++eclke2:
++ clk_put(iahb_clk);
++eclkg2:
++ clk_disable_unprepare(isfr_clk);
++eclke:
++ clk_put(isfr_clk);
++eclkg:
++ return ret;
++}
++
++
++static int __exit mxc_hdmi_core_remove(struct platform_device *pdev)
++{
++ struct mxc_hdmi_data *hdmi_data = platform_get_drvdata(pdev);
++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ iounmap(hdmi_data->reg_base);
++ release_mem_region(res->start, resource_size(res));
++
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_dt_ids[] = {
++ { .compatible = "fsl,imx6q-hdmi-core", },
++ { .compatible = "fsl,imx6dl-hdmi-core", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver mxc_hdmi_core_driver = {
++ .driver = {
++ .name = "mxc_hdmi_core",
++ .of_match_table = imx_hdmi_dt_ids,
++ .owner = THIS_MODULE,
++ },
++ .remove = __exit_p(mxc_hdmi_core_remove),
++};
++
++static int __init mxc_hdmi_core_init(void)
++{
++ return platform_driver_probe(&mxc_hdmi_core_driver,
++ mxc_hdmi_core_probe);
++}
++
++static void __exit mxc_hdmi_core_exit(void)
++{
++ platform_driver_unregister(&mxc_hdmi_core_driver);
++}
++
++subsys_initcall(mxc_hdmi_core_init);
++module_exit(mxc_hdmi_core_exit);
++
++MODULE_DESCRIPTION("Core driver for Freescale i.Mx on-chip HDMI");
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/mfd/si476x-cmd.c linux-3.14.40/drivers/mfd/si476x-cmd.c
+--- linux-3.14.40.orig/drivers/mfd/si476x-cmd.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mfd/si476x-cmd.c 2015-05-01 14:57:59.359427001 -0500
+@@ -303,13 +303,13 @@
+ * possible racing conditions when working in polling mode */
+ atomic_set(&core->cts, 0);
+
+- /* if (unlikely(command == CMD_POWER_DOWN) */
+- if (!wait_event_timeout(core->command,
+- atomic_read(&core->cts),
+- usecs_to_jiffies(usecs) + 1))
+- dev_warn(&core->client->dev,
+- "(%s) [CMD 0x%02x] Answer timeout.\n",
+- __func__, command);
++ if (!(command == CMD_POWER_DOWN))
++ if (!wait_event_timeout(core->command,
++ atomic_read(&core->cts),
++ usecs_to_jiffies(usecs) + 1))
++ dev_warn(&core->client->dev,
++ "(%s) [CMD 0x%02x] Answer timeout.\n",
++ __func__, command);
+
+ /*
+ When working in polling mode, for some reason the tuner will
+diff -Nur linux-3.14.40.orig/drivers/mfd/si476x-i2c.c linux-3.14.40/drivers/mfd/si476x-i2c.c
+--- linux-3.14.40.orig/drivers/mfd/si476x-i2c.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mfd/si476x-i2c.c 2015-05-01 14:57:59.371427001 -0500
+@@ -303,7 +303,7 @@
+ */
+ udelay(100);
+
+- err = si476x_core_start(core, false);
++ err = si476x_core_start(core, true);
+ if (err < 0)
+ goto disable_regulators;
+
+@@ -312,7 +312,7 @@
+
+ case SI476X_POWER_DOWN:
+ core->power_state = next_state;
+- err = si476x_core_stop(core, false);
++ err = si476x_core_stop(core, true);
+ if (err < 0)
+ core->power_state = SI476X_POWER_INCONSISTENT;
+ disable_regulators:
+@@ -740,8 +740,15 @@
+ memcpy(&core->pinmux, &pdata->pinmux,
+ sizeof(struct si476x_pinmux));
+ } else {
+- dev_err(&client->dev, "No platform data provided\n");
+- return -EINVAL;
++ dev_warn(&client->dev, "Using default platform data.\n");
++ core->power_up_parameters.xcload = 0x28;
++ core->power_up_parameters.func = SI476X_FUNC_FM_RECEIVER;
++ core->power_up_parameters.freq = SI476X_FREQ_37P209375_MHZ;
++ core->diversity_mode = SI476X_PHDIV_DISABLED;
++ core->pinmux.dclk = SI476X_DCLK_DAUDIO;
++ core->pinmux.dfs = SI476X_DFS_DAUDIO;
++ core->pinmux.dout = SI476X_DOUT_I2S_OUTPUT;
++ core->pinmux.xout = SI476X_XOUT_TRISTATE;
+ }
+
+ core->supplies[0].supply = "vd";
+@@ -799,6 +806,10 @@
+
+ core->chip_id = id->driver_data;
+
++ /* Power down si476x first */
++ core->power_state = SI476X_POWER_UP_FULL;
++ si476x_core_set_power_state(core, SI476X_POWER_DOWN);
++
+ rval = si476x_core_get_revision_info(core);
+ if (rval < 0) {
+ rval = -ENODEV;
+diff -Nur linux-3.14.40.orig/drivers/mfd/si476x-prop.c linux-3.14.40/drivers/mfd/si476x-prop.c
+--- linux-3.14.40.orig/drivers/mfd/si476x-prop.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mfd/si476x-prop.c 2015-05-01 14:57:59.383427001 -0500
+@@ -217,15 +217,36 @@
+ return 0;
+ }
+
++static bool si476x_core_regmap_volatile_register(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case SI476X_PROP_DIGITAL_IO_OUTPUT_SAMPLE_RATE:
++ case SI476X_PROP_DIGITAL_IO_OUTPUT_FORMAT:
++ return false;
++ default:
++ return true;
++ }
++
++ return true;
++}
++
++/* These two register is used by the codec, so add reg_default here */
++static struct reg_default si476x_core_reg[] = {
++ { 0x202, 0xBB80 },
++ { 0x203, 0x1700 },
++};
+
+ static const struct regmap_config si476x_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+
+ .max_register = 0x4003,
++ .reg_defaults = si476x_core_reg,
++ .num_reg_defaults = ARRAY_SIZE(si476x_core_reg),
+
+ .writeable_reg = si476x_core_regmap_writable_register,
+ .readable_reg = si476x_core_regmap_readable_register,
++ .volatile_reg = si476x_core_regmap_volatile_register,
+
+ .reg_read = si476x_core_regmap_read,
+ .reg_write = si476x_core_regmap_write,
+diff -Nur linux-3.14.40.orig/drivers/misc/sram.c linux-3.14.40/drivers/misc/sram.c
+--- linux-3.14.40.orig/drivers/misc/sram.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/misc/sram.c 2015-05-01 14:57:59.383427001 -0500
+@@ -29,7 +29,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/genalloc.h>
+
+-#define SRAM_GRANULARITY 32
++#define SRAM_GRANULARITY 4096
+
+ struct sram_dev {
+ struct gen_pool *pool;
+diff -Nur linux-3.14.40.orig/drivers/mmc/core/core.c linux-3.14.40/drivers/mmc/core/core.c
+--- linux-3.14.40.orig/drivers/mmc/core/core.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/core/core.c 2015-05-01 14:57:59.395427001 -0500
+@@ -13,11 +13,13 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/clk.h>
+ #include <linux/completion.h>
+ #include <linux/device.h>
+ #include <linux/delay.h>
+ #include <linux/pagemap.h>
+ #include <linux/err.h>
++#include <linux/gpio.h>
+ #include <linux/leds.h>
+ #include <linux/scatterlist.h>
+ #include <linux/log2.h>
+@@ -1519,6 +1521,43 @@
+ mmc_host_clk_release(host);
+ }
+
++static void mmc_card_power_up(struct mmc_host *host)
++{
++ int i;
++ struct gpio_desc **gds = host->card_reset_gpios;
++
++ for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
++ if (gds[i]) {
++ dev_dbg(host->parent, "Asserting reset line %d", i);
++ gpiod_set_value(gds[i], 1);
++ }
++ }
++
++ if (host->card_regulator) {
++ dev_dbg(host->parent, "Enabling external regulator");
++ if (regulator_enable(host->card_regulator))
++ dev_err(host->parent, "Failed to enable external regulator");
++ }
++
++ if (host->card_clk) {
++ dev_dbg(host->parent, "Enabling external clock");
++ clk_prepare_enable(host->card_clk);
++ }
++
++ /* 2ms delay to let clocks and power settle */
++ mmc_delay(20);
++
++ for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
++ if (gds[i]) {
++ dev_dbg(host->parent, "Deasserting reset line %d", i);
++ gpiod_set_value(gds[i], 0);
++ }
++ }
++
++ /* 2ms delay to after reset release */
++ mmc_delay(20);
++}
++
+ /*
+ * Apply power to the MMC stack. This is a two-stage process.
+ * First, we enable power to the card without the clock running.
+@@ -1535,6 +1574,9 @@
+ if (host->ios.power_mode == MMC_POWER_ON)
+ return;
+
++ /* Power up the card/module first, if needed */
++ mmc_card_power_up(host);
++
+ mmc_host_clk_hold(host);
+
+ host->ios.vdd = fls(ocr) - 1;
+diff -Nur linux-3.14.40.orig/drivers/mmc/core/host.c linux-3.14.40/drivers/mmc/core/host.c
+--- linux-3.14.40.orig/drivers/mmc/core/host.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/core/host.c 2015-05-01 14:57:59.407427001 -0500
+@@ -12,14 +12,18 @@
+ * MMC host class device management
+ */
+
++#include <linux/kernel.h>
++#include <linux/clk.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/idr.h>
+ #include <linux/of.h>
+ #include <linux/of_gpio.h>
+ #include <linux/pagemap.h>
+ #include <linux/export.h>
+ #include <linux/leds.h>
++#include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
+ #include <linux/suspend.h>
+
+@@ -439,6 +443,66 @@
+
+ EXPORT_SYMBOL(mmc_of_parse);
+
++static int mmc_of_parse_child(struct mmc_host *host)
++{
++ struct device_node *np;
++ struct clk *clk;
++ int i;
++
++ if (!host->parent || !host->parent->of_node)
++ return 0;
++
++ np = host->parent->of_node;
++
++ host->card_regulator = regulator_get(host->parent, "card-external-vcc");
++ if (IS_ERR(host->card_regulator)) {
++ if (PTR_ERR(host->card_regulator) == -EPROBE_DEFER)
++ return PTR_ERR(host->card_regulator);
++ host->card_regulator = NULL;
++ }
++
++ /* Parse card power/reset/clock control */
++ if (of_find_property(np, "card-reset-gpios", NULL)) {
++ struct gpio_desc *gpd;
++ int level = 0;
++
++ /*
++ * If the regulator is enabled, then we can hold the
++ * card in reset with an active high resets. Otherwise,
++ * hold the resets low.
++ */
++ if (host->card_regulator && regulator_is_enabled(host->card_regulator))
++ level = 1;
++
++ for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
++ gpd = devm_gpiod_get_index(host->parent, "card-reset", i);
++ if (IS_ERR(gpd)) {
++ if (PTR_ERR(gpd) == -EPROBE_DEFER)
++ return PTR_ERR(gpd);
++ break;
++ }
++ gpiod_direction_output(gpd, gpiod_is_active_low(gpd) | level);
++ host->card_reset_gpios[i] = gpd;
++ }
++
++ gpd = devm_gpiod_get_index(host->parent, "card-reset", ARRAY_SIZE(host->card_reset_gpios));
++ if (!IS_ERR(gpd)) {
++ dev_warn(host->parent, "More reset gpios than we can handle");
++ gpiod_put(gpd);
++ }
++ }
++
++ clk = of_clk_get_by_name(np, "card_ext_clock");
++ if (IS_ERR(clk)) {
++ if (PTR_ERR(clk) == -EPROBE_DEFER)
++ return PTR_ERR(clk);
++ clk = NULL;
++ }
++ host->card_clk = clk;
++
++ return 0;
++}
++
+ /**
+ * mmc_alloc_host - initialise the per-host structure.
+ * @extra: sizeof private data structure
+@@ -518,6 +582,10 @@
+ {
+ int err;
+
++ err = mmc_of_parse_child(host);
++ if (err)
++ return err;
++
+ WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
+ !host->ops->enable_sdio_irq);
+
+diff -Nur linux-3.14.40.orig/drivers/mmc/core/mmc.c linux-3.14.40/drivers/mmc/core/mmc.c
+--- linux-3.14.40.orig/drivers/mmc/core/mmc.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/core/mmc.c 2015-05-01 14:57:59.407427001 -0500
+@@ -317,6 +317,11 @@
+ mmc_card_set_blockaddr(card);
+ }
+
++ card->ext_csd.boot_info = ext_csd[EXT_CSD_BOOT_INFO];
++ card->ext_csd.boot_config = ext_csd[EXT_CSD_PART_CONFIG];
++ card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT];
++ card->ext_csd.boot_bus_width = ext_csd[EXT_CSD_BOOT_BUS_WIDTH];
++
+ card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
+ mmc_select_card_type(card);
+
+@@ -655,6 +660,372 @@
+ return err;
+ }
+
++static ssize_t mmc_boot_info_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ char *boot_partition[8] = {
++ "Device not boot enabled",
++ "Boot partition 1 enabled",
++ "Boot partition 2 enabled",
++ "Reserved",
++ "Reserved",
++ "Reserved",
++ "Reserved",
++ "User area enabled for boot"};
++
++ char *bus_width[4] = {
++ "x1 (sdr) or x4 (ddr) bus width in boot operation mode",
++ "x4 (sdr/ddr) bus width in boot operation mode",
++ "x8 (sdr/ddr) bus width in boot operation mode",
++ "Reserved"};
++
++ char *boot_mode[4] = {
++ "Use single data rate + backward compatible timings in boot operation",
++ "Use single data rate + high speed timings in boot operation mode",
++ "Use dual data rate in boot operation",
++ "Reserved"};
++
++ int partition;
++ int width;
++ int mode;
++ int err;
++ u8 *ext_csd = NULL;
++ struct mmc_card *card = container_of(dev, struct mmc_card, dev);
++
++ /* read it again because user may change it */
++ mmc_claim_host(card->host);
++ err = mmc_get_ext_csd(card, &ext_csd);
++ mmc_release_host(card->host);
++ if (err || !ext_csd) {
++ pr_err("%s: failed to get ext_csd, err=%d\n",
++ mmc_hostname(card->host),
++ err);
++ return err;
++ }
++
++ mmc_read_ext_csd(card, ext_csd);
++ mmc_free_ext_csd(ext_csd);
++
++ partition = (card->ext_csd.boot_config >> 3) & 0x7;
++ width = card->ext_csd.boot_bus_width & 0x3;
++ mode = (card->ext_csd.boot_bus_width >> 3) & 0x3;
++
++ return sprintf(buf,
++ "boot_info:0x%02x;\n"
++ " ALT_BOOT_MODE:%x - %s\n"
++ " DDR_BOOT_MODE:%x - %s\n"
++ " HS_BOOTMODE:%x - %s\n"
++ "boot_size:%04dKB\n"
++ "boot_partition:0x%02x;\n"
++ " BOOT_ACK:%x - %s\n"
++ " BOOT_PARTITION-ENABLE: %x - %s\n"
++ "boot_bus:0x%02x\n"
++ " BOOT_MODE:%x - %s\n"
++ " RESET_BOOT_BUS_WIDTH:%x - %s\n"
++ " BOOT_BUS_WIDTH:%x - %s\n",
++
++ card->ext_csd.boot_info,
++ !!(card->ext_csd.boot_info & 0x1),
++ (card->ext_csd.boot_info & 0x1) ?
++ "Supports alternate boot method" :
++ "Does not support alternate boot method",
++ !!(card->ext_csd.boot_info & 0x2),
++ (card->ext_csd.boot_info & 0x2) ?
++ "Supports alternate dual data rate during boot" :
++ "Does not support dual data rate during boot",
++ !!(card->ext_csd.boot_info & 0x4),
++ (card->ext_csd.boot_info & 0x4) ?
++ "Supports high speed timing during boot" :
++ "Does not support high speed timing during boot",
++
++ card->ext_csd.boot_size * 128,
++
++ card->ext_csd.boot_config,
++ !!(card->ext_csd.boot_config & 0x40),
++ (card->ext_csd.boot_config & 0x40) ?
++ "Boot acknowledge sent during boot operation" :
++ "No boot acknowledge sent",
++ partition,
++ boot_partition[partition],
++
++ card->ext_csd.boot_bus_width,
++ mode,
++ boot_mode[mode],
++ !!(card->ext_csd.boot_bus_width & 0x4),
++ (card->ext_csd.boot_bus_width & 0x4) ?
++ "Retain boot bus width and boot mode after boot operation" :
++ "Reset bus width to x1, single data rate and backward"
++ "compatible timings after boot operation",
++ width,
++ bus_width[width]);
++}
++
++/* set up boot partitions */
++static ssize_t
++setup_boot_partitions(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int err, busy = 0;
++ u32 part;
++ u8 *ext_csd, boot_config;
++ struct mmc_command cmd;
++ struct mmc_card *card = container_of(dev, struct mmc_card, dev);
++
++ BUG_ON(!card);
++
++ sscanf(buf, "%d\n", &part);
++
++ if (card->csd.mmca_vsn < CSD_SPEC_VER_4) {
++ pr_err("%s: invalid mmc version" \
++ " mmc version is below version 4!)\n",
++ mmc_hostname(card->host));
++ return -EINVAL;
++ }
++
++ /* it's a normal SD/MMC but user request to configure boot partition */
++ if (card->ext_csd.boot_size <= 0) {
++ pr_err("%s: fail to send SWITCH command to card " \
++ "to update boot_config of the EXT_CSD!\n",
++ mmc_hostname(card->host));
++ return -EINVAL;
++ }
++
++ /*
++ * partition must be -
++ * 0 - user area
++ * 1 - boot partition 1
++ * 2 - boot partition 2
++ * DO NOT switch the partitions that used to be accessed
++ * in OS layer HERE
++ */
++ if (part & EXT_CSD_BOOT_PARTITION_ACCESS_MASK) {
++ pr_err("%s: DO NOT switch the partitions that used to be\n" \
++ " accessed in OS layer HERE. please following the\n" \
++ " guidance of Documentation/mmc/mmc-dev-parts.txt.\n",
++ mmc_hostname(card->host));
++ return -EINVAL;
++ }
++
++ ext_csd = kmalloc(512, GFP_KERNEL);
++ if (!ext_csd) {
++ pr_err("%s: could not allocate a buffer to " \
++ "receive the ext_csd.\n", mmc_hostname(card->host));
++ return -ENOMEM;
++ }
++
++ mmc_claim_host(card->host);
++ err = mmc_send_ext_csd(card, ext_csd);
++ if (err) {
++ pr_err("%s: unable to read EXT_CSD.\n",
++ mmc_hostname(card->host));
++ goto err_rtn;
++ }
++
++ /* enable the boot partition in boot mode */
++ /* boot enable be -
++ * 0x00 - disable boot enable.
++ * 0x08 - boot partition 1 is enabled for boot.
++ * 0x10 - boot partition 2 is enabled for boot.
++ * 0x38 - User area is enabled for boot.
++ */
++ switch (part & EXT_CSD_BOOT_PARTITION_ENABLE_MASK) {
++ case 0:
++ boot_config = (ext_csd[EXT_CSD_PART_CONFIG]
++ & ~EXT_CSD_BOOT_PARTITION_ENABLE_MASK
++ & ~EXT_CSD_BOOT_ACK_ENABLE);
++ break;
++ case EXT_CSD_BOOT_PARTITION_PART1:
++ boot_config = ((ext_csd[EXT_CSD_PART_CONFIG]
++ & ~EXT_CSD_BOOT_PARTITION_ENABLE_MASK)
++ | EXT_CSD_BOOT_PARTITION_PART1
++ | EXT_CSD_BOOT_ACK_ENABLE);
++ break;
++ case EXT_CSD_BOOT_PARTITION_PART2:
++ boot_config = ((ext_csd[EXT_CSD_PART_CONFIG]
++ & ~EXT_CSD_BOOT_PARTITION_ENABLE_MASK)
++ | EXT_CSD_BOOT_PARTITION_PART2
++ | EXT_CSD_BOOT_ACK_ENABLE);
++ break;
++ case EXT_CSD_BOOT_PARTITION_ENABLE_MASK:
++ boot_config = ((ext_csd[EXT_CSD_PART_CONFIG]
++ | EXT_CSD_BOOT_PARTITION_ENABLE_MASK)
++ & ~EXT_CSD_BOOT_ACK_ENABLE);
++ break;
++ default:
++ pr_err("%s: wrong boot config parameter" \
++ " 00 (disable boot), 08 (enable boot1)," \
++ "16 (enable boot2), 56 (User area)\n",
++ mmc_hostname(card->host));
++ err = -EINVAL;
++ goto err_rtn;
++ }
++
++ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
++ EXT_CSD_PART_CONFIG, boot_config, card->ext_csd.part_time);
++ if (err) {
++ pr_err("%s: fail to send SWITCH command to card " \
++ "to update boot_config of the EXT_CSD!\n",
++ mmc_hostname(card->host));
++ goto err_rtn;
++ }
++
++ /* waiting for the card to finish the busy state */
++ do {
++ memset(&cmd, 0, sizeof(struct mmc_command));
++
++ cmd.opcode = MMC_SEND_STATUS;
++ cmd.arg = card->rca << 16;
++ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
++
++ err = mmc_wait_for_cmd(card->host, &cmd, 0);
++ if (err || busy > 100) {
++ pr_err("%s: failed to wait for" \
++ "the busy state to end.\n",
++ mmc_hostname(card->host));
++ break;
++ }
++
++ if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) {
++ pr_info("%s: card is in busy state" \
++ "pls wait for busy state to end.\n",
++ mmc_hostname(card->host));
++ }
++ busy++;
++ } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
++
++ /* Now check whether it works */
++ err = mmc_send_ext_csd(card, ext_csd);
++ if (err) {
++ pr_err("%s: %d unable to re-read EXT_CSD.\n",
++ mmc_hostname(card->host), err);
++ goto err_rtn;
++ }
++
++ card->ext_csd.boot_config = ext_csd[EXT_CSD_PART_CONFIG];
++
++err_rtn:
++ mmc_release_host(card->host);
++ kfree(ext_csd);
++ if (err)
++ return err;
++ else
++ return count;
++}
++
++/* configure the boot bus */
++static ssize_t
++setup_boot_bus(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int err, busy = 0;
++ u32 boot_bus, new_bus;
++ u8 *ext_csd;
++ struct mmc_command cmd;
++ struct mmc_card *card = container_of(dev, struct mmc_card, dev);
++
++ BUG_ON(!card);
++
++ sscanf(buf, "%d\n", &boot_bus);
++
++ if (card->csd.mmca_vsn < CSD_SPEC_VER_4) {
++ pr_err("%s: invalid mmc version" \
++ " mmc version is below version 4!)\n",
++ mmc_hostname(card->host));
++ return -EINVAL;
++ }
++
++ /* it's a normal SD/MMC but user request to configure boot bus */
++ if (card->ext_csd.boot_size <= 0) {
++ pr_err("%s: this is a normal SD/MMC card" \
++ " but you request to configure boot bus !\n",
++ mmc_hostname(card->host));
++ return -EINVAL;
++ }
++
++ ext_csd = kmalloc(512, GFP_KERNEL);
++ if (!ext_csd) {
++ pr_err("%s: could not allocate a buffer to " \
++ "receive the ext_csd.\n", mmc_hostname(card->host));
++ return -ENOMEM;
++ }
++
++ mmc_claim_host(card->host);
++ err = mmc_send_ext_csd(card, ext_csd);
++ if (err) {
++ pr_err("%s: unable to read EXT_CSD.\n",
++ mmc_hostname(card->host));
++ goto err_rtn;
++ }
++
++ /* Configure the boot bus width when boot partition is enabled */
++ if (((boot_bus & EXT_CSD_BOOT_BUS_WIDTH_MODE_MASK) >> 3) > 2
++ || (boot_bus & EXT_CSD_BOOT_BUS_WIDTH_WIDTH_MASK) > 2
++ || (boot_bus & ~EXT_CSD_BOOT_BUS_WIDTH_MASK) > 0) {
++ pr_err("%s: Invalid inputs!\n",
++ mmc_hostname(card->host));
++ err = -EINVAL;
++ goto err_rtn;
++ }
++
++ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
++ EXT_CSD_BOOT_BUS_WIDTH, boot_bus, card->ext_csd.part_time);
++ if (err) {
++ pr_err("%s: fail to send SWITCH command to card " \
++ "to update boot_config of the EXT_CSD!\n",
++ mmc_hostname(card->host));
++ goto err_rtn;
++ }
++
++ /* waiting for the card to finish the busy state */
++ do {
++ memset(&cmd, 0, sizeof(struct mmc_command));
++
++ cmd.opcode = MMC_SEND_STATUS;
++ cmd.arg = card->rca << 16;
++ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
++
++ err = mmc_wait_for_cmd(card->host, &cmd, 0);
++ if (err || busy > 100) {
++ pr_err("%s: failed to wait for" \
++ "the busy state to end.\n",
++ mmc_hostname(card->host));
++ break;
++ }
++
++ if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) {
++ pr_info("%s: card is in busy state" \
++ "pls wait for busy state to end.\n",
++ mmc_hostname(card->host));
++ }
++ busy++;
++ } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
++
++ /* Now check whether it works */
++ err = mmc_send_ext_csd(card, ext_csd);
++ if (err) {
++ pr_err("%s: %d unable to re-read EXT_CSD.\n",
++ mmc_hostname(card->host), err);
++ goto err_rtn;
++ }
++
++ new_bus = ext_csd[EXT_CSD_BOOT_BUS_WIDTH];
++ if (boot_bus != new_bus) {
++ pr_err("%s: after SWITCH, current boot bus mode %d" \
++ " is not same as requested bus mode %d!\n",
++ mmc_hostname(card->host), new_bus, boot_bus);
++ goto err_rtn;
++ }
++ card->ext_csd.boot_bus_width = ext_csd[EXT_CSD_BOOT_BUS_WIDTH];
++
++err_rtn:
++ mmc_release_host(card->host);
++ mmc_free_ext_csd(ext_csd);
++ if (err)
++ return err;
++ else
++ return count;
++}
++
+ MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
+ card->raw_cid[2], card->raw_cid[3]);
+ MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
+@@ -674,6 +1045,9 @@
+ MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
+ MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
+ MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
++DEVICE_ATTR(boot_info, S_IRUGO, mmc_boot_info_show, NULL);
++DEVICE_ATTR(boot_config, S_IWUGO, NULL, setup_boot_partitions);
++DEVICE_ATTR(boot_bus_config, S_IWUGO, NULL, setup_boot_bus);
+
+ static struct attribute *mmc_std_attrs[] = {
+ &dev_attr_cid.attr,
+@@ -692,6 +1066,9 @@
+ &dev_attr_enhanced_area_size.attr,
+ &dev_attr_raw_rpmb_size_mult.attr,
+ &dev_attr_rel_sectors.attr,
++ &dev_attr_boot_info.attr,
++ &dev_attr_boot_config.attr,
++ &dev_attr_boot_bus_config.attr,
+ NULL,
+ };
+
+diff -Nur linux-3.14.40.orig/drivers/mmc/core/sdio_irq.c linux-3.14.40/drivers/mmc/core/sdio_irq.c
+--- linux-3.14.40.orig/drivers/mmc/core/sdio_irq.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/core/sdio_irq.c 2015-05-01 14:57:59.407427001 -0500
+@@ -90,6 +90,15 @@
+ return ret;
+ }
+
++void sdio_run_irqs(struct mmc_host *host)
++{
++ mmc_claim_host(host);
++ host->sdio_irq_pending = true;
++ process_sdio_pending_irqs(host);
++ mmc_release_host(host);
++}
++EXPORT_SYMBOL_GPL(sdio_run_irqs);
++
+ static int sdio_irq_thread(void *_host)
+ {
+ struct mmc_host *host = _host;
+@@ -189,14 +198,20 @@
+ WARN_ON(!host->claimed);
+
+ if (!host->sdio_irqs++) {
+- atomic_set(&host->sdio_irq_thread_abort, 0);
+- host->sdio_irq_thread =
+- kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
+- mmc_hostname(host));
+- if (IS_ERR(host->sdio_irq_thread)) {
+- int err = PTR_ERR(host->sdio_irq_thread);
+- host->sdio_irqs--;
+- return err;
++ if (!(host->caps2 & MMC_CAP2_SDIO_NOTHREAD)) {
++ atomic_set(&host->sdio_irq_thread_abort, 0);
++ host->sdio_irq_thread =
++ kthread_run(sdio_irq_thread, host,
++ "ksdioirqd/%s", mmc_hostname(host));
++ if (IS_ERR(host->sdio_irq_thread)) {
++ int err = PTR_ERR(host->sdio_irq_thread);
++ host->sdio_irqs--;
++ return err;
++ }
++ } else {
++ mmc_host_clk_hold(host);
++ host->ops->enable_sdio_irq(host, 1);
++ mmc_host_clk_release(host);
+ }
+ }
+
+@@ -211,8 +226,14 @@
+ BUG_ON(host->sdio_irqs < 1);
+
+ if (!--host->sdio_irqs) {
+- atomic_set(&host->sdio_irq_thread_abort, 1);
+- kthread_stop(host->sdio_irq_thread);
++ if (!(host->caps2 & MMC_CAP2_SDIO_NOTHREAD)) {
++ atomic_set(&host->sdio_irq_thread_abort, 1);
++ kthread_stop(host->sdio_irq_thread);
++ } else {
++ mmc_host_clk_hold(host);
++ host->ops->enable_sdio_irq(host, 0);
++ mmc_host_clk_release(host);
++ }
+ }
+
+ return 0;
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/dw_mmc.c linux-3.14.40/drivers/mmc/host/dw_mmc.c
+--- linux-3.14.40.orig/drivers/mmc/host/dw_mmc.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/dw_mmc.c 2015-05-01 14:57:59.415427001 -0500
+@@ -2147,6 +2147,8 @@
+ if (!mmc)
+ return -ENOMEM;
+
++ mmc_of_parse(mmc);
++
+ slot = mmc_priv(mmc);
+ slot->id = id;
+ slot->mmc = mmc;
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/Kconfig linux-3.14.40/drivers/mmc/host/Kconfig
+--- linux-3.14.40.orig/drivers/mmc/host/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/Kconfig 2015-05-01 14:57:59.415427001 -0500
+@@ -25,8 +25,7 @@
+ If unsure, say N.
+
+ config MMC_SDHCI
+- tristate "Secure Digital Host Controller Interface support"
+- depends on HAS_DMA
++ tristate
+ help
+ This selects the generic Secure Digital Host Controller Interface.
+ It is used by manufacturers such as Texas Instruments(R), Ricoh(R)
+@@ -59,7 +58,8 @@
+
+ config MMC_SDHCI_PCI
+ tristate "SDHCI support on PCI bus"
+- depends on MMC_SDHCI && PCI
++ depends on PCI && HAS_DMA
++ select MMC_SDHCI
+ help
+ This selects the PCI Secure Digital Host Controller Interface.
+ Most controllers found today are PCI devices.
+@@ -83,7 +83,8 @@
+
+ config MMC_SDHCI_ACPI
+ tristate "SDHCI support for ACPI enumerated SDHCI controllers"
+- depends on MMC_SDHCI && ACPI
++ depends on ACPI && HAS_DMA
++ select MMC_SDHCI
+ help
+ This selects support for ACPI enumerated SDHCI controllers,
+ identified by ACPI Compatibility ID PNP0D40 or specific
+@@ -94,8 +95,8 @@
+ If unsure, say N.
+
+ config MMC_SDHCI_PLTFM
+- tristate "SDHCI platform and OF driver helper"
+- depends on MMC_SDHCI
++ tristate
++ select MMC_SDHCI
+ help
+ This selects the common helper functions support for Secure Digital
+ Host Controller Interface based platform and OF drivers.
+@@ -106,8 +107,8 @@
+
+ config MMC_SDHCI_OF_ARASAN
+ tristate "SDHCI OF support for the Arasan SDHCI controllers"
+- depends on MMC_SDHCI_PLTFM
+- depends on OF
++ depends on OF && HAS_DMA
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the Arasan Secure Digital Host Controller Interface
+ (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
+@@ -118,9 +119,9 @@
+
+ config MMC_SDHCI_OF_ESDHC
+ tristate "SDHCI OF support for the Freescale eSDHC controller"
+- depends on MMC_SDHCI_PLTFM
+- depends on PPC_OF
++ depends on PPC_OF && HAS_DMA
+ select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the Freescale eSDHC controller support.
+
+@@ -130,9 +131,9 @@
+
+ config MMC_SDHCI_OF_HLWD
+ tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers"
+- depends on MMC_SDHCI_PLTFM
+- depends on PPC_OF
++ depends on PPC_OF && HAS_DMA
+ select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ found in the "Hollywood" chipset of the Nintendo Wii video game
+@@ -144,8 +145,8 @@
+
+ config MMC_SDHCI_CNS3XXX
+ tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
+- depends on ARCH_CNS3XXX
+- depends on MMC_SDHCI_PLTFM
++ depends on ARCH_CNS3XXX && HAS_DMA
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the SDHCI support for CNS3xxx System-on-Chip devices.
+
+@@ -155,9 +156,9 @@
+
+ config MMC_SDHCI_ESDHC_IMX
+ tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller"
+- depends on ARCH_MXC
+- depends on MMC_SDHCI_PLTFM
++ depends on ARCH_MXC && HAS_DMA
+ select MMC_SDHCI_IO_ACCESSORS
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the Freescale eSDHC/uSDHC controller support
+ found on i.MX25, i.MX35 i.MX5x and i.MX6x.
+@@ -168,9 +169,9 @@
+
+ config MMC_SDHCI_DOVE
+ tristate "SDHCI support on Marvell's Dove SoC"
+- depends on ARCH_DOVE
+- depends on MMC_SDHCI_PLTFM
++ depends on ARCH_DOVE && HAS_DMA
+ select MMC_SDHCI_IO_ACCESSORS
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the Secure Digital Host Controller Interface in
+ Marvell's Dove SoC.
+@@ -181,9 +182,9 @@
+
+ config MMC_SDHCI_TEGRA
+ tristate "SDHCI platform support for the Tegra SD/MMC Controller"
+- depends on ARCH_TEGRA
+- depends on MMC_SDHCI_PLTFM
++ depends on ARCH_TEGRA && HAS_DMA
+ select MMC_SDHCI_IO_ACCESSORS
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the Tegra SD/MMC controller. If you have a Tegra
+ platform with SD or MMC devices, say Y or M here.
+@@ -192,7 +193,8 @@
+
+ config MMC_SDHCI_S3C
+ tristate "SDHCI support on Samsung S3C SoC"
+- depends on MMC_SDHCI && PLAT_SAMSUNG
++ depends on PLAT_SAMSUNG && HAS_DMA
++ select MMC_SDHCI
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ often referrered to as the HSMMC block in some of the Samsung S3C
+@@ -204,8 +206,8 @@
+
+ config MMC_SDHCI_SIRF
+ tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs"
+- depends on ARCH_SIRF
+- depends on MMC_SDHCI_PLTFM
++ depends on ARCH_SIRF && HAS_DMA
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the SDHCI support for SiRF System-on-Chip devices.
+
+@@ -215,8 +217,7 @@
+
+ config MMC_SDHCI_PXAV3
+ tristate "Marvell MMP2 SD Host Controller support (PXAV3)"
+- depends on CLKDEV_LOOKUP
+- select MMC_SDHCI
++ depends on CLKDEV_LOOKUP && HAS_DMA
+ select MMC_SDHCI_PLTFM
+ default CPU_MMP2
+ help
+@@ -228,8 +229,7 @@
+
+ config MMC_SDHCI_PXAV2
+ tristate "Marvell PXA9XX SD Host Controller support (PXAV2)"
+- depends on CLKDEV_LOOKUP
+- select MMC_SDHCI
++ depends on CLKDEV_LOOKUP && HAS_DMA
+ select MMC_SDHCI_PLTFM
+ default CPU_PXA910
+ help
+@@ -241,7 +241,8 @@
+
+ config MMC_SDHCI_SPEAR
+ tristate "SDHCI support on ST SPEAr platform"
+- depends on MMC_SDHCI && PLAT_SPEAR
++ depends on PLAT_SPEAR && HAS_DMA
++ select MMC_SDHCI
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ often referrered to as the HSMMC block in some of the ST SPEAR range
+@@ -263,7 +264,7 @@
+
+ config MMC_SDHCI_BCM_KONA
+ tristate "SDHCI support on Broadcom KONA platform"
+- depends on ARCH_BCM
++ depends on ARCH_BCM && HAS_DMA
+ select MMC_SDHCI_PLTFM
+ help
+ This selects the Broadcom Kona Secure Digital Host Controller
+@@ -274,9 +275,9 @@
+
+ config MMC_SDHCI_BCM2835
+ tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
+- depends on ARCH_BCM2835
+- depends on MMC_SDHCI_PLTFM
++ depends on ARCH_BCM2835 && HAS_DMA
+ select MMC_SDHCI_IO_ACCESSORS
++ select MMC_SDHCI_PLTFM
+ help
+ This selects the BCM2835 SD/MMC controller. If you have a BCM2835
+ platform with SD or MMC devices, say Y or M here.
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-acpi.c linux-3.14.40/drivers/mmc/host/sdhci-acpi.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-acpi.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-acpi.c 2015-05-01 14:57:59.423427001 -0500
+@@ -101,11 +101,19 @@
+ }
+
+ static const struct sdhci_ops sdhci_acpi_ops_dflt = {
++ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_acpi_enable_dma,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static const struct sdhci_ops sdhci_acpi_ops_int = {
++ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_acpi_enable_dma,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_acpi_int_hw_reset,
+ };
+
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-bcm2835.c linux-3.14.40/drivers/mmc/host/sdhci-bcm2835.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-bcm2835.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-bcm2835.c 2015-05-01 14:57:59.423427001 -0500
+@@ -131,8 +131,12 @@
+ .read_l = bcm2835_sdhci_readl,
+ .read_w = bcm2835_sdhci_readw,
+ .read_b = bcm2835_sdhci_readb,
++ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_min_clock = bcm2835_sdhci_get_min_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-bcm-kona.c linux-3.14.40/drivers/mmc/host/sdhci-bcm-kona.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-bcm-kona.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-bcm-kona.c 2015-05-01 14:57:59.427427001 -0500
+@@ -205,9 +205,13 @@
+ }
+
+ static struct sdhci_ops sdhci_bcm_kona_ops = {
++ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_bcm_kona_get_max_clk,
+ .get_timeout_clock = sdhci_bcm_kona_get_timeout_clock,
+ .platform_send_init_74_clocks = sdhci_bcm_kona_init_74_clocks,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .card_event = sdhci_bcm_kona_card_event,
+ };
+
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci.c linux-3.14.40/drivers/mmc/host/sdhci.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci.c 2015-05-01 15:20:07.579427001 -0500
+@@ -44,6 +44,8 @@
+
+ #define MAX_TUNING_LOOP 40
+
++#define ADMA_SIZE ((128 * 2 + 1) * 4)
++
+ static unsigned int debug_quirks = 0;
+ static unsigned int debug_quirks2;
+
+@@ -131,43 +133,28 @@
+ * *
+ \*****************************************************************************/
+
+-static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
+-{
+- u32 ier;
+-
+- ier = sdhci_readl(host, SDHCI_INT_ENABLE);
+- ier &= ~clear;
+- ier |= set;
+- sdhci_writel(host, ier, SDHCI_INT_ENABLE);
+- sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+-}
+-
+-static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
+-{
+- sdhci_clear_set_irqs(host, 0, irqs);
+-}
+-
+-static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
+-{
+- sdhci_clear_set_irqs(host, irqs, 0);
+-}
+-
+ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
+ {
+- u32 present, irqs;
++ u32 present;
++ int gpio_cd = mmc_gpio_get_cd(host->mmc);
+
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
+- (host->mmc->caps & MMC_CAP_NONREMOVABLE))
++ (host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
++ !IS_ERR_VALUE(gpio_cd))
+ return;
+
+- present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+- SDHCI_CARD_PRESENT;
+- irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
++ if (enable) {
++ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
++ SDHCI_CARD_PRESENT;
+
+- if (enable)
+- sdhci_unmask_irqs(host, irqs);
+- else
+- sdhci_mask_irqs(host, irqs);
++ host->ier |= present ? SDHCI_INT_CARD_REMOVE :
++ SDHCI_INT_CARD_INSERT;
++ } else {
++ host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
++ }
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
+ static void sdhci_enable_card_detection(struct sdhci_host *host)
+@@ -180,22 +167,9 @@
+ sdhci_set_card_detection(host, false);
+ }
+
+-static void sdhci_reset(struct sdhci_host *host, u8 mask)
++void sdhci_reset(struct sdhci_host *host, u8 mask)
+ {
+ unsigned long timeout;
+- u32 uninitialized_var(ier);
+-
+- if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
+- if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
+- SDHCI_CARD_PRESENT))
+- return;
+- }
+-
+- if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
+- ier = sdhci_readl(host, SDHCI_INT_ENABLE);
+-
+- if (host->ops->platform_reset_enter)
+- host->ops->platform_reset_enter(host, mask);
+
+ sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
+
+@@ -220,16 +194,27 @@
+ timeout--;
+ mdelay(1);
+ }
++}
++EXPORT_SYMBOL_GPL(sdhci_reset);
+
+- if (host->ops->platform_reset_exit)
+- host->ops->platform_reset_exit(host, mask);
++static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
++{
++ if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
++ if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
++ SDHCI_CARD_PRESENT))
++ return;
++ }
+
+- if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
+- sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
++ host->ops->reset(host, mask);
+
+- if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+- if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
+- host->ops->enable_dma(host);
++ if (mask & SDHCI_RESET_ALL) {
++ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
++ if (host->ops->enable_dma)
++ host->ops->enable_dma(host);
++ }
++
++ /* Resetting the controller clears many */
++ host->preset_enabled = false;
+ }
+ }
+
+@@ -238,15 +223,18 @@
+ static void sdhci_init(struct sdhci_host *host, int soft)
+ {
+ if (soft)
+- sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
++ sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
+ else
+- sdhci_reset(host, SDHCI_RESET_ALL);
++ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
+- sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
+- SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
+- SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
+- SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
+- SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
++ host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
++ SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
++ SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
++ SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
++ SDHCI_INT_RESPONSE;
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+ if (soft) {
+ /* force clock reconfiguration */
+@@ -502,11 +490,6 @@
+ else
+ direction = DMA_TO_DEVICE;
+
+- /*
+- * The ADMA descriptor table is mapped further down as we
+- * need to fill it with data first.
+- */
+-
+ host->align_addr = dma_map_single(mmc_dev(host->mmc),
+ host->align_buffer, 128 * 4, direction);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
+@@ -567,7 +550,7 @@
+ * If this triggers then we have a calculation bug
+ * somewhere. :/
+ */
+- WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
++ WARN_ON((desc - host->adma_desc) > ADMA_SIZE);
+ }
+
+ if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
+@@ -595,17 +578,8 @@
+ host->align_addr, 128 * 4, direction);
+ }
+
+- host->adma_addr = dma_map_single(mmc_dev(host->mmc),
+- host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
+- if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
+- goto unmap_entries;
+- BUG_ON(host->adma_addr & 0x3);
+-
+ return 0;
+
+-unmap_entries:
+- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+- data->sg_len, direction);
+ unmap_align:
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+ 128 * 4, direction);
+@@ -623,19 +597,25 @@
+ u8 *align;
+ char *buffer;
+ unsigned long flags;
++ bool has_unaligned;
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+- dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
+- (128 * 2 + 1) * 4, DMA_TO_DEVICE);
+-
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+ 128 * 4, direction);
+
+- if (data->flags & MMC_DATA_READ) {
++ /* Do a quick scan of the SG list for any unaligned mappings */
++ has_unaligned = false;
++ for_each_sg(data->sg, sg, host->sg_count, i)
++ if (sg_dma_address(sg) & 3) {
++ has_unaligned = true;
++ break;
++ }
++
++ if (has_unaligned && data->flags & MMC_DATA_READ) {
+ dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
+
+@@ -721,9 +701,12 @@
+ u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
+
+ if (host->flags & SDHCI_REQ_USE_DMA)
+- sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
++ host->ier = (host->ier & ~pio_irqs) | dma_irqs;
+ else
+- sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
++ host->ier = (host->ier & ~dma_irqs) | pio_irqs;
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
+ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+@@ -976,8 +959,8 @@
+ * upon error conditions.
+ */
+ if (data->error) {
+- sdhci_reset(host, SDHCI_RESET_CMD);
+- sdhci_reset(host, SDHCI_RESET_DATA);
++ sdhci_do_reset(host, SDHCI_RESET_CMD);
++ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ }
+
+ sdhci_send_command(host, data->stop);
+@@ -1107,24 +1090,23 @@
+
+ static u16 sdhci_get_preset_value(struct sdhci_host *host)
+ {
+- u16 ctrl, preset = 0;
+-
+- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ u16 preset = 0;
+
+- switch (ctrl & SDHCI_CTRL_UHS_MASK) {
+- case SDHCI_CTRL_UHS_SDR12:
++ switch (host->timing) {
++ case MMC_TIMING_UHS_SDR12:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
+ break;
+- case SDHCI_CTRL_UHS_SDR25:
++ case MMC_TIMING_UHS_SDR25:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
+ break;
+- case SDHCI_CTRL_UHS_SDR50:
++ case MMC_TIMING_UHS_SDR50:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
+ break;
+- case SDHCI_CTRL_UHS_SDR104:
++ case MMC_TIMING_UHS_SDR104:
++ case MMC_TIMING_MMC_HS200:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
+ break;
+- case SDHCI_CTRL_UHS_DDR50:
++ case MMC_TIMING_UHS_DDR50:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
+ break;
+ default:
+@@ -1136,32 +1118,22 @@
+ return preset;
+ }
+
+-static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
++void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+ {
+ int div = 0; /* Initialized for compiler warning */
+ int real_div = div, clk_mul = 1;
+ u16 clk = 0;
+ unsigned long timeout;
+
+- if (clock && clock == host->clock)
+- return;
+-
+ host->mmc->actual_clock = 0;
+
+- if (host->ops->set_clock) {
+- host->ops->set_clock(host, clock);
+- if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
+- return;
+- }
+-
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+- goto out;
++ return;
+
+ if (host->version >= SDHCI_SPEC_300) {
+- if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
+- SDHCI_CTRL_PRESET_VAL_ENABLE) {
++ if (host->preset_enabled) {
+ u16 pre_val;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+@@ -1247,26 +1219,16 @@
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+-
+-out:
+- host->clock = clock;
+-}
+-
+-static inline void sdhci_update_clock(struct sdhci_host *host)
+-{
+- unsigned int clock;
+-
+- clock = host->clock;
+- host->clock = 0;
+- sdhci_set_clock(host, clock);
+ }
++EXPORT_SYMBOL_GPL(sdhci_set_clock);
+
+-static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
++static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
++ unsigned short vdd)
+ {
+ u8 pwr = 0;
+
+- if (power != (unsigned short)-1) {
+- switch (1 << power) {
++ if (mode != MMC_POWER_OFF) {
++ switch (1 << vdd) {
+ case MMC_VDD_165_195:
+ pwr = SDHCI_POWER_180;
+ break;
+@@ -1284,7 +1246,7 @@
+ }
+
+ if (host->pwr == pwr)
+- return -1;
++ return;
+
+ host->pwr = pwr;
+
+@@ -1292,38 +1254,43 @@
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
+ sdhci_runtime_pm_bus_off(host);
+- return 0;
+- }
+-
+- /*
+- * Spec says that we should clear the power reg before setting
+- * a new value. Some controllers don't seem to like this though.
+- */
+- if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+- sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
++ vdd = 0;
++ } else {
++ /*
++ * Spec says that we should clear the power reg before setting
++ * a new value. Some controllers don't seem to like this though.
++ */
++ if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
++ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
+- /*
+- * At least the Marvell CaFe chip gets confused if we set the voltage
+- * and set turn on power at the same time, so set the voltage first.
+- */
+- if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
+- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
++ /*
++ * At least the Marvell CaFe chip gets confused if we set the
++ * voltage and set turn on power at the same time, so set the
++ * voltage first.
++ */
++ if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
++ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+
+- pwr |= SDHCI_POWER_ON;
++ pwr |= SDHCI_POWER_ON;
+
+- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
++ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+
+- if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
+- sdhci_runtime_pm_bus_on(host);
++ if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
++ sdhci_runtime_pm_bus_on(host);
+
+- /*
+- * Some controllers need an extra 10ms delay of 10ms before they
+- * can apply clock after applying power
+- */
+- if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
+- mdelay(10);
++ /*
++ * Some controllers need an extra 10ms delay of 10ms before
++ * they can apply clock after applying power
++ */
++ if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
++ mdelay(10);
++ }
+
+- return power;
++ if (host->vmmc) {
++ spin_unlock_irq(&host->lock);
++ mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd);
++ spin_lock_irq(&host->lock);
++ }
+ }
+
+ /*****************************************************************************\
+@@ -1428,10 +1395,52 @@
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
++void sdhci_set_bus_width(struct sdhci_host *host, int width)
++{
++ u8 ctrl;
++
++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
++ if (width == MMC_BUS_WIDTH_8) {
++ ctrl &= ~SDHCI_CTRL_4BITBUS;
++ if (host->version >= SDHCI_SPEC_300)
++ ctrl |= SDHCI_CTRL_8BITBUS;
++ } else {
++ if (host->version >= SDHCI_SPEC_300)
++ ctrl &= ~SDHCI_CTRL_8BITBUS;
++ if (width == MMC_BUS_WIDTH_4)
++ ctrl |= SDHCI_CTRL_4BITBUS;
++ else
++ ctrl &= ~SDHCI_CTRL_4BITBUS;
++ }
++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
++}
++EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
++
++void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
++{
++ u16 ctrl_2;
++
++ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ /* Select Bus Speed Mode for host */
++ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
++ if ((timing == MMC_TIMING_MMC_HS200) ||
++ (timing == MMC_TIMING_UHS_SDR104))
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
++ else if (timing == MMC_TIMING_UHS_SDR12)
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
++ else if (timing == MMC_TIMING_UHS_SDR25)
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
++ else if (timing == MMC_TIMING_UHS_SDR50)
++ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
++ else if (timing == MMC_TIMING_UHS_DDR50)
++ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
++ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
++}
++EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
++
+ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
+ {
+ unsigned long flags;
+- int vdd_bit = -1;
+ u8 ctrl;
+
+ spin_lock_irqsave(&host->lock, flags);
+@@ -1457,45 +1466,17 @@
+ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
+ sdhci_enable_preset_value(host, false);
+
+- sdhci_set_clock(host, ios->clock);
+-
+- if (ios->power_mode == MMC_POWER_OFF)
+- vdd_bit = sdhci_set_power(host, -1);
+- else
+- vdd_bit = sdhci_set_power(host, ios->vdd);
+-
+- if (host->vmmc && vdd_bit != -1) {
+- spin_unlock_irqrestore(&host->lock, flags);
+- mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
+- spin_lock_irqsave(&host->lock, flags);
++ if (!ios->clock || ios->clock != host->clock) {
++ host->ops->set_clock(host, ios->clock);
++ host->clock = ios->clock;
+ }
+
++ sdhci_set_power(host, ios->power_mode, ios->vdd);
++
+ if (host->ops->platform_send_init_74_clocks)
+ host->ops->platform_send_init_74_clocks(host, ios->power_mode);
+
+- /*
+- * If your platform has 8-bit width support but is not a v3 controller,
+- * or if it requires special setup code, you should implement that in
+- * platform_bus_width().
+- */
+- if (host->ops->platform_bus_width) {
+- host->ops->platform_bus_width(host, ios->bus_width);
+- } else {
+- ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+- if (ios->bus_width == MMC_BUS_WIDTH_8) {
+- ctrl &= ~SDHCI_CTRL_4BITBUS;
+- if (host->version >= SDHCI_SPEC_300)
+- ctrl |= SDHCI_CTRL_8BITBUS;
+- } else {
+- if (host->version >= SDHCI_SPEC_300)
+- ctrl &= ~SDHCI_CTRL_8BITBUS;
+- if (ios->bus_width == MMC_BUS_WIDTH_4)
+- ctrl |= SDHCI_CTRL_4BITBUS;
+- else
+- ctrl &= ~SDHCI_CTRL_4BITBUS;
+- }
+- sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+- }
++ host->ops->set_bus_width(host, ios->bus_width);
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+@@ -1517,13 +1498,13 @@
+ (ios->timing == MMC_TIMING_UHS_SDR25))
+ ctrl |= SDHCI_CTRL_HISPD;
+
+- ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+- if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
++ if (!host->preset_enabled) {
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ /*
+ * We only need to set Driver Strength if the
+ * preset value enable is not set.
+ */
++ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
+ if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
+ ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
+@@ -1547,34 +1528,16 @@
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+ /* Re-enable SD Clock */
+- sdhci_update_clock(host);
++ host->ops->set_clock(host, host->clock);
+ }
+
+-
+ /* Reset SD Clock Enable */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+- if (host->ops->set_uhs_signaling)
+- host->ops->set_uhs_signaling(host, ios->timing);
+- else {
+- ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+- /* Select Bus Speed Mode for host */
+- ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+- if ((ios->timing == MMC_TIMING_MMC_HS200) ||
+- (ios->timing == MMC_TIMING_UHS_SDR104))
+- ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+- else if (ios->timing == MMC_TIMING_UHS_SDR12)
+- ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+- else if (ios->timing == MMC_TIMING_UHS_SDR25)
+- ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+- else if (ios->timing == MMC_TIMING_UHS_SDR50)
+- ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+- else if (ios->timing == MMC_TIMING_UHS_DDR50)
+- ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+- sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+- }
++ host->ops->set_uhs_signaling(host, ios->timing);
++ host->timing = ios->timing;
+
+ if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
+ ((ios->timing == MMC_TIMING_UHS_SDR12) ||
+@@ -1590,8 +1553,7 @@
+ >> SDHCI_PRESET_DRV_SHIFT;
+ }
+
+- /* Re-enable SD Clock */
+- sdhci_update_clock(host);
++ host->ops->set_clock(host, host->clock);
+ } else
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+@@ -1601,7 +1563,7 @@
+ * it on each ios seems to solve the problem.
+ */
+ if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
+- sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
++ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+@@ -1710,24 +1672,16 @@
+
+ static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
+ {
+- if (host->flags & SDHCI_DEVICE_DEAD)
+- goto out;
+-
+- if (enable)
+- host->flags |= SDHCI_SDIO_IRQ_ENABLED;
+- else
+- host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
+-
+- /* SDIO IRQ will be enabled as appropriate in runtime resume */
+- if (host->runtime_suspended)
+- goto out;
++ if (!(host->flags & SDHCI_DEVICE_DEAD)) {
++ if (enable)
++ host->ier |= SDHCI_INT_CARD_INT;
++ else
++ host->ier &= ~SDHCI_INT_CARD_INT;
+
+- if (enable)
+- sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
+- else
+- sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
+-out:
+- mmiowb();
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++ mmiowb();
++ }
+ }
+
+ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+@@ -1735,9 +1689,18 @@
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
++ sdhci_runtime_pm_get(host);
++
+ spin_lock_irqsave(&host->lock, flags);
++ if (enable)
++ host->flags |= SDHCI_SDIO_IRQ_ENABLED;
++ else
++ host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
++
+ sdhci_enable_sdio_irq_nolock(host, enable);
+ spin_unlock_irqrestore(&host->lock, flags);
++
++ sdhci_runtime_pm_put(host);
+ }
+
+ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
+@@ -1856,22 +1819,16 @@
+
+ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ {
+- struct sdhci_host *host;
++ struct sdhci_host *host = mmc_priv(mmc);
+ u16 ctrl;
+- u32 ier;
+ int tuning_loop_counter = MAX_TUNING_LOOP;
+ unsigned long timeout;
+ int err = 0;
+- bool requires_tuning_nonuhs = false;
+ unsigned long flags;
+
+- host = mmc_priv(mmc);
+-
+ sdhci_runtime_pm_get(host);
+ spin_lock_irqsave(&host->lock, flags);
+
+- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+-
+ /*
+ * The Host Controller needs tuning only in case of SDR104 mode
+ * and for SDR50 mode when Use Tuning for SDR50 is set in the
+@@ -1879,15 +1836,18 @@
+ * If the Host Controller supports the HS200 mode then the
+ * tuning function has to be executed.
+ */
+- if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
+- (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
+- host->flags & SDHCI_SDR104_NEEDS_TUNING))
+- requires_tuning_nonuhs = true;
+-
+- if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
+- requires_tuning_nonuhs)
+- ctrl |= SDHCI_CTRL_EXEC_TUNING;
+- else {
++ switch (host->timing) {
++ case MMC_TIMING_MMC_HS200:
++ case MMC_TIMING_UHS_SDR104:
++ break;
++
++ case MMC_TIMING_UHS_SDR50:
++ if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
++ host->flags & SDHCI_SDR104_NEEDS_TUNING)
++ break;
++ /* FALLTHROUGH */
++
++ default:
+ spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_runtime_pm_put(host);
+ return 0;
+@@ -1900,6 +1860,8 @@
+ return err;
+ }
+
++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ ctrl |= SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ /*
+@@ -1912,8 +1874,8 @@
+ * to make sure we don't hit a controller bug, we _only_
+ * enable Buffer Read Ready interrupt here.
+ */
+- ier = sdhci_readl(host, SDHCI_INT_ENABLE);
+- sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
++ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
++ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
+
+ /*
+ * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
+@@ -2046,7 +2008,8 @@
+ if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
+ err = 0;
+
+- sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_runtime_pm_put(host);
+
+@@ -2056,26 +2019,30 @@
+
+ static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
+ {
+- u16 ctrl;
+-
+ /* Host Controller v3.00 defines preset value registers */
+ if (host->version < SDHCI_SPEC_300)
+ return;
+
+- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+-
+ /*
+ * We only enable or disable Preset Value if they are not already
+ * enabled or disabled respectively. Otherwise, we bail out.
+ */
+- if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
+- ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
+- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+- host->flags |= SDHCI_PV_ENABLED;
+- } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
+- ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
++ if (host->preset_enabled != enable) {
++ u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++
++ if (enable)
++ ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
++ else
++ ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
++
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+- host->flags &= ~SDHCI_PV_ENABLED;
++
++ if (enable)
++ host->flags |= SDHCI_PV_ENABLED;
++ else
++ host->flags &= ~SDHCI_PV_ENABLED;
++
++ host->preset_enabled = enable;
+ }
+ }
+
+@@ -2100,8 +2067,8 @@
+ pr_err("%s: Resetting controller.\n",
+ mmc_hostname(host->mmc));
+
+- sdhci_reset(host, SDHCI_RESET_CMD);
+- sdhci_reset(host, SDHCI_RESET_DATA);
++ sdhci_do_reset(host, SDHCI_RESET_CMD);
++ sdhci_do_reset(host, SDHCI_RESET_DATA);
+
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+@@ -2129,15 +2096,6 @@
+ * *
+ \*****************************************************************************/
+
+-static void sdhci_tasklet_card(unsigned long param)
+-{
+- struct sdhci_host *host = (struct sdhci_host*)param;
+-
+- sdhci_card_event(host->mmc);
+-
+- mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+-}
+-
+ static void sdhci_tasklet_finish(unsigned long param)
+ {
+ struct sdhci_host *host;
+@@ -2174,12 +2132,12 @@
+ /* Some controllers need this kick or reset won't work here */
+ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
+ /* This is to force an update */
+- sdhci_update_clock(host);
++ host->ops->set_clock(host, host->clock);
+
+ /* Spec says we should do both at the same time, but Ricoh
+ controllers do not like that. */
+- sdhci_reset(host, SDHCI_RESET_CMD);
+- sdhci_reset(host, SDHCI_RESET_DATA);
++ sdhci_do_reset(host, SDHCI_RESET_CMD);
++ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ }
+
+ host->mrq = NULL;
+@@ -2429,14 +2387,14 @@
+
+ static irqreturn_t sdhci_irq(int irq, void *dev_id)
+ {
+- irqreturn_t result;
++ irqreturn_t result = IRQ_NONE;
+ struct sdhci_host *host = dev_id;
+- u32 intmask, unexpected = 0;
+- int cardint = 0, max_loops = 16;
++ u32 intmask, mask, unexpected = 0;
++ int max_loops = 16;
+
+ spin_lock(&host->lock);
+
+- if (host->runtime_suspended) {
++ if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
+ spin_unlock(&host->lock);
+ pr_warning("%s: got irq while runtime suspended\n",
+ mmc_hostname(host->mmc));
+@@ -2444,88 +2402,81 @@
+ }
+
+ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+-
+ if (!intmask || intmask == 0xffffffff) {
+ result = IRQ_NONE;
+ goto out;
+ }
+
+-again:
+- DBG("*** %s got interrupt: 0x%08x\n",
+- mmc_hostname(host->mmc), intmask);
+-
+- if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+- u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+- SDHCI_CARD_PRESENT;
+-
+- /*
+- * There is a observation on i.mx esdhc. INSERT bit will be
+- * immediately set again when it gets cleared, if a card is
+- * inserted. We have to mask the irq to prevent interrupt
+- * storm which will freeze the system. And the REMOVE gets
+- * the same situation.
+- *
+- * More testing are needed here to ensure it works for other
+- * platforms though.
+- */
+- sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
+- SDHCI_INT_CARD_REMOVE);
+- sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
+- SDHCI_INT_CARD_INSERT);
+-
+- sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
+- SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+- intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+- tasklet_schedule(&host->card_tasklet);
+- }
+-
+- if (intmask & SDHCI_INT_CMD_MASK) {
+- sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
+- SDHCI_INT_STATUS);
+- sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+- }
++ do {
++ /* Clear selected interrupts. */
++ mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
++ SDHCI_INT_BUS_POWER);
++ sdhci_writel(host, mask, SDHCI_INT_STATUS);
++
++ DBG("*** %s got interrupt: 0x%08x\n",
++ mmc_hostname(host->mmc), intmask);
++
++ if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
++ u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
++ SDHCI_CARD_PRESENT;
+
+- if (intmask & SDHCI_INT_DATA_MASK) {
+- sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
+- SDHCI_INT_STATUS);
+- sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+- }
++ /*
++ * There is a observation on i.mx esdhc. INSERT
++ * bit will be immediately set again when it gets
++ * cleared, if a card is inserted. We have to mask
++ * the irq to prevent interrupt storm which will
++ * freeze the system. And the REMOVE gets the
++ * same situation.
++ *
++ * More testing are needed here to ensure it works
++ * for other platforms though.
++ */
++ host->ier &= ~(SDHCI_INT_CARD_INSERT |
++ SDHCI_INT_CARD_REMOVE);
++ host->ier |= present ? SDHCI_INT_CARD_REMOVE :
++ SDHCI_INT_CARD_INSERT;
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+- intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
++ sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
++ SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+
+- intmask &= ~SDHCI_INT_ERROR;
++ host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
++ SDHCI_INT_CARD_REMOVE);
++ result = IRQ_WAKE_THREAD;
++ }
+
+- if (intmask & SDHCI_INT_BUS_POWER) {
+- pr_err("%s: Card is consuming too much power!\n",
+- mmc_hostname(host->mmc));
+- sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
+- }
++ if (intmask & SDHCI_INT_CMD_MASK)
++ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+
+- intmask &= ~SDHCI_INT_BUS_POWER;
++ if (intmask & SDHCI_INT_DATA_MASK)
++ sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+
+- if (intmask & SDHCI_INT_CARD_INT)
+- cardint = 1;
++ if (intmask & SDHCI_INT_BUS_POWER)
++ pr_err("%s: Card is consuming too much power!\n",
++ mmc_hostname(host->mmc));
+
+- intmask &= ~SDHCI_INT_CARD_INT;
++ if (intmask & SDHCI_INT_CARD_INT) {
++ sdhci_enable_sdio_irq_nolock(host, false);
++ host->thread_isr |= SDHCI_INT_CARD_INT;
++ result = IRQ_WAKE_THREAD;
++ }
+
+- if (intmask) {
+- unexpected |= intmask;
+- sdhci_writel(host, intmask, SDHCI_INT_STATUS);
+- }
++ intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
++ SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
++ SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
++ SDHCI_INT_CARD_INT);
+
+- result = IRQ_HANDLED;
++ if (intmask) {
++ unexpected |= intmask;
++ sdhci_writel(host, intmask, SDHCI_INT_STATUS);
++ }
+
+- intmask = sdhci_readl(host, SDHCI_INT_STATUS);
++ if (result == IRQ_NONE)
++ result = IRQ_HANDLED;
+
+- /*
+- * If we know we'll call the driver to signal SDIO IRQ, disregard
+- * further indications of Card Interrupt in the status to avoid a
+- * needless loop.
+- */
+- if (cardint)
+- intmask &= ~SDHCI_INT_CARD_INT;
+- if (intmask && --max_loops)
+- goto again;
++ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
++ } while (intmask && --max_loops);
+ out:
+ spin_unlock(&host->lock);
+
+@@ -2534,15 +2485,38 @@
+ mmc_hostname(host->mmc), unexpected);
+ sdhci_dumpregs(host);
+ }
+- /*
+- * We have to delay this as it calls back into the driver.
+- */
+- if (cardint && host->mmc->sdio_irqs)
+- mmc_signal_sdio_irq(host->mmc);
+
+ return result;
+ }
+
++static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
++{
++ struct sdhci_host *host = dev_id;
++ unsigned long flags;
++ u32 isr;
++
++ spin_lock_irqsave(&host->lock, flags);
++ isr = host->thread_isr;
++ host->thread_isr = 0;
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
++ sdhci_card_event(host->mmc);
++ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
++ }
++
++ if (isr & SDHCI_INT_CARD_INT) {
++ sdio_run_irqs(host->mmc);
++
++ spin_lock_irqsave(&host->lock, flags);
++ if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
++ sdhci_enable_sdio_irq_nolock(host, true);
++ spin_unlock_irqrestore(&host->lock, flags);
++ }
++
++ return isr ? IRQ_HANDLED : IRQ_NONE;
++}
++
+ /*****************************************************************************\
+ * *
+ * Suspend/resume *
+@@ -2552,6 +2526,7 @@
+ #ifdef CONFIG_PM
+ void sdhci_enable_irq_wakeups(struct sdhci_host *host)
+ {
++ int gpio_cd = mmc_gpio_get_cd(host->mmc);
+ u8 val;
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
+ | SDHCI_WAKE_ON_INT;
+@@ -2559,7 +2534,8 @@
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val |= mask ;
+ /* Avoid fake wake up */
+- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
++ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION ||
++ !IS_ERR_VALUE(gpio_cd))
+ val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
+ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+ }
+@@ -2579,9 +2555,6 @@
+
+ int sdhci_suspend_host(struct sdhci_host *host)
+ {
+- if (host->ops->platform_suspend)
+- host->ops->platform_suspend(host);
+-
+ sdhci_disable_card_detection(host);
+
+ /* Disable tuning since we are suspending */
+@@ -2591,7 +2564,9 @@
+ }
+
+ if (!device_may_wakeup(mmc_dev(host->mmc))) {
+- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
++ host->ier = 0;
++ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
++ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ free_irq(host->irq, host);
+ } else {
+ sdhci_enable_irq_wakeups(host);
+@@ -2612,8 +2587,9 @@
+ }
+
+ if (!device_may_wakeup(mmc_dev(host->mmc))) {
+- ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
+- mmc_hostname(host->mmc), host);
++ ret = request_threaded_irq(host->irq, sdhci_irq,
++ sdhci_thread_irq, IRQF_SHARED,
++ mmc_hostname(host->mmc), host);
+ if (ret)
+ return ret;
+ } else {
+@@ -2635,9 +2611,6 @@
+
+ sdhci_enable_card_detection(host);
+
+- if (host->ops->platform_resume)
+- host->ops->platform_resume(host);
+-
+ /* Set the re-tuning expiration flag */
+ if (host->flags & SDHCI_USING_RETUNING_TIMER)
+ host->flags |= SDHCI_NEEDS_RETUNING;
+@@ -2689,10 +2662,12 @@
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
++ host->ier &= SDHCI_INT_CARD_INT;
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+- synchronize_irq(host->irq);
++ synchronize_hardirq(host->irq);
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+@@ -2736,7 +2711,7 @@
+ host->runtime_suspended = false;
+
+ /* Enable SDIO IRQ */
+- if ((host->flags & SDHCI_SDIO_IRQ_ENABLED))
++ if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
+ sdhci_enable_sdio_irq_nolock(host, true);
+
+ /* Enable Card Detection */
+@@ -2795,7 +2770,7 @@
+ if (debug_quirks2)
+ host->quirks2 = debug_quirks2;
+
+- sdhci_reset(host, SDHCI_RESET_ALL);
++ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
+ host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
+ host->version = (host->version & SDHCI_SPEC_VER_MASK)
+@@ -2855,15 +2830,29 @@
+ * (128) and potentially one alignment transfer for
+ * each of those entries.
+ */
+- host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
++ host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc),
++ ADMA_SIZE, &host->adma_addr,
++ GFP_KERNEL);
+ host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
+ if (!host->adma_desc || !host->align_buffer) {
+- kfree(host->adma_desc);
++ dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
++ host->adma_desc, host->adma_addr);
+ kfree(host->align_buffer);
+ pr_warning("%s: Unable to allocate ADMA "
+ "buffers. Falling back to standard DMA.\n",
+ mmc_hostname(mmc));
+ host->flags &= ~SDHCI_USE_ADMA;
++ host->adma_desc = NULL;
++ host->align_buffer = NULL;
++ } else if (host->adma_addr & 3) {
++ pr_warning("%s: unable to allocate aligned ADMA descriptor\n",
++ mmc_hostname(mmc));
++ host->flags &= ~SDHCI_USE_ADMA;
++ dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
++ host->adma_desc, host->adma_addr);
++ kfree(host->align_buffer);
++ host->adma_desc = NULL;
++ host->align_buffer = NULL;
+ }
+ }
+
+@@ -2945,9 +2934,22 @@
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
+ host->timeout_clk = mmc->f_max / 1000;
+
+- mmc->max_discard_to = (1 << 27) / host->timeout_clk;
++ if (host->quirks2 & SDHCI_QUIRK2_NOSTD_TIMEOUT_COUNTER) {
++ if (host->ops->get_max_timeout_counter) {
++ mmc->max_discard_to =
++ host->ops->get_max_timeout_counter(host)
++ / host->timeout_clk;
++ } else {
++ pr_err("%s: Hardware doesn't specify max timeout "
++ "counter\n", mmc_hostname(mmc));
++ return -ENODEV;
++ }
++ } else {
++ mmc->max_discard_to = (1 << 27) / host->timeout_clk;
++ }
+
+ mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
++ mmc->caps2 |= MMC_CAP2_SDIO_NOTHREAD;
+
+ if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
+ host->flags |= SDHCI_AUTO_CMD12;
+@@ -3218,8 +3220,6 @@
+ /*
+ * Init tasklets.
+ */
+- tasklet_init(&host->card_tasklet,
+- sdhci_tasklet_card, (unsigned long)host);
+ tasklet_init(&host->finish_tasklet,
+ sdhci_tasklet_finish, (unsigned long)host);
+
+@@ -3236,8 +3236,8 @@
+
+ sdhci_init(host, 0);
+
+- ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
+- mmc_hostname(mmc), host);
++ ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
++ IRQF_SHARED, mmc_hostname(mmc), host);
+ if (ret) {
+ pr_err("%s: Failed to request IRQ %d: %d\n",
+ mmc_hostname(mmc), host->irq, ret);
+@@ -3279,12 +3279,12 @@
+
+ #ifdef SDHCI_USE_LEDS_CLASS
+ reset:
+- sdhci_reset(host, SDHCI_RESET_ALL);
+- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
++ sdhci_do_reset(host, SDHCI_RESET_ALL);
++ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
++ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ free_irq(host->irq, host);
+ #endif
+ untasklet:
+- tasklet_kill(&host->card_tasklet);
+ tasklet_kill(&host->finish_tasklet);
+
+ return ret;
+@@ -3321,14 +3321,14 @@
+ #endif
+
+ if (!dead)
+- sdhci_reset(host, SDHCI_RESET_ALL);
++ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
+- sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
++ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
++ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ free_irq(host->irq, host);
+
+ del_timer_sync(&host->timer);
+
+- tasklet_kill(&host->card_tasklet);
+ tasklet_kill(&host->finish_tasklet);
+
+ if (host->vmmc) {
+@@ -3341,7 +3341,9 @@
+ regulator_put(host->vqmmc);
+ }
+
+- kfree(host->adma_desc);
++ if (host->adma_desc)
++ dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
++ host->adma_desc, host->adma_addr);
+ kfree(host->align_buffer);
+
+ host->adma_desc = NULL;
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-cns3xxx.c linux-3.14.40/drivers/mmc/host/sdhci-cns3xxx.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-cns3xxx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-cns3xxx.c 2015-05-01 14:57:59.427427001 -0500
+@@ -30,13 +30,12 @@
+ u16 clk;
+ unsigned long timeout;
+
+- if (clock == host->clock)
+- return;
++ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+- goto out;
++ return;
+
+ while (host->max_clk / div > clock) {
+ /*
+@@ -75,13 +74,14 @@
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+-out:
+- host->clock = clock;
+ }
+
+ static const struct sdhci_ops sdhci_cns3xxx_ops = {
+ .get_max_clock = sdhci_cns3xxx_get_max_clk,
+ .set_clock = sdhci_cns3xxx_set_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
+@@ -90,8 +90,7 @@
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+- SDHCI_QUIRK_NONSTANDARD_CLOCK,
++ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ };
+
+ static int sdhci_cns3xxx_probe(struct platform_device *pdev)
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-dove.c linux-3.14.40/drivers/mmc/host/sdhci-dove.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-dove.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-dove.c 2015-05-01 14:57:59.447427001 -0500
+@@ -86,6 +86,10 @@
+ static const struct sdhci_ops sdhci_dove_ops = {
+ .read_w = sdhci_dove_readw,
+ .read_l = sdhci_dove_readl,
++ .set_clock = sdhci_set_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_dove_pdata = {
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-esdhc.h linux-3.14.40/drivers/mmc/host/sdhci-esdhc.h
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-esdhc.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-esdhc.h 2015-05-01 14:57:59.447427001 -0500
+@@ -20,12 +20,11 @@
+
+ #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
+ SDHCI_QUIRK_NO_BUSY_IRQ | \
+- SDHCI_QUIRK_NONSTANDARD_CLOCK | \
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
+- SDHCI_QUIRK_PIO_NEEDS_DELAY | \
+- SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
++ SDHCI_QUIRK_PIO_NEEDS_DELAY)
+
+ #define ESDHC_SYSTEM_CONTROL 0x2c
++#define ESDHC_SYS_CTRL_RSTA (1 << 24)
+ #define ESDHC_CLOCK_MASK 0x0000fff0
+ #define ESDHC_PREDIV_SHIFT 8
+ #define ESDHC_DIVIDER_SHIFT 4
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-esdhc-imx.c linux-3.14.40/drivers/mmc/host/sdhci-esdhc-imx.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-esdhc-imx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-esdhc-imx.c 2015-05-01 14:57:59.447427001 -0500
+@@ -11,6 +11,7 @@
+ * the Free Software Foundation; either version 2 of the License.
+ */
+
++#include <linux/busfreq-imx6.h>
+ #include <linux/io.h>
+ #include <linux/delay.h>
+ #include <linux/err.h>
+@@ -114,6 +115,10 @@
+ #define ESDHC_FLAG_STD_TUNING BIT(5)
+ /* The IP has SDHCI_CAPABILITIES_1 register */
+ #define ESDHC_FLAG_HAVE_CAP1 BIT(6)
++/* The IP has errata ERR004536 */
++#define ESDHC_FLAG_ERR004536 BIT(7)
++/* need request bus freq during low power */
++#define ESDHC_FLAG_BUSFREQ BIT(8)
+
+ struct esdhc_soc_data {
+ u32 flags;
+@@ -141,7 +146,8 @@
+
+ static struct esdhc_soc_data usdhc_imx6sl_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+- | ESDHC_FLAG_HAVE_CAP1,
++ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536
++ | ESDHC_FLAG_BUSFREQ,
+ };
+
+ struct pltfm_imx_data {
+@@ -160,7 +166,6 @@
+ MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
+ WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
+ } multiblock_status;
+- u32 uhs_mode;
+ u32 is_ddr;
+ };
+
+@@ -382,7 +387,6 @@
+ if (val & ESDHC_MIX_CTRL_SMPCLK_SEL)
+ ret |= SDHCI_CTRL_TUNED_CLK;
+
+- ret |= (imx_data->uhs_mode & SDHCI_CTRL_UHS_MASK);
+ ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
+
+ return ret;
+@@ -429,7 +433,6 @@
+ else
+ new_val &= ~ESDHC_VENDOR_SPEC_VSELECT;
+ writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
+- imx_data->uhs_mode = val & SDHCI_CTRL_UHS_MASK;
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
+ new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ if (val & SDHCI_CTRL_TUNED_CLK)
+@@ -600,12 +603,14 @@
+ u32 temp, val;
+
+ if (clock == 0) {
++ host->mmc->actual_clock = 0;
++
+ if (esdhc_is_usdhc(imx_data)) {
+ val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
+ }
+- goto out;
++ return;
+ }
+
+ if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr)
+@@ -645,8 +650,6 @@
+ }
+
+ mdelay(1);
+-out:
+- host->clock = clock;
+ }
+
+ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
+@@ -668,7 +671,7 @@
+ return -ENOSYS;
+ }
+
+-static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
++static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
+ {
+ u32 ctrl;
+
+@@ -686,17 +689,56 @@
+
+ esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl,
+ SDHCI_HOST_CONTROL);
++}
+
+- return 0;
++static void esdhc_tuning_reset(struct sdhci_host *host, u32 rst_bits)
++{
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct pltfm_imx_data *imx_data = pltfm_host->priv;
++ u32 timeout;
++ u32 reg;
++
++ reg = readl(host->ioaddr + ESDHC_SYSTEM_CONTROL);
++ reg |= rst_bits;
++ writel(reg, host->ioaddr + ESDHC_SYSTEM_CONTROL);
++
++ /* Wait for max 100ms */
++ timeout = 100;
++
++ /* hw clears the bit when it's done */
++ while (readl(host->ioaddr + ESDHC_SYSTEM_CONTROL) & rst_bits) {
++ if (timeout == 0) {
++ dev_err(mmc_dev(host->mmc),
++ "Reset never completes!\n");
++ return;
++ }
++ timeout--;
++ mdelay(1);
++ }
++
++ /*
++ * The RSTA, reset all, on usdhc will not clear following regs:
++ * > SDHCI_MIX_CTRL
++ * > SDHCI_TUNE_CTRL_STATUS
++ *
++ * Do it manually here.
++ */
++ if ((rst_bits & ESDHC_SYS_CTRL_RSTA) && is_imx6q_usdhc(imx_data)) {
++ writel(0, host->ioaddr + ESDHC_MIX_CTRL);
++ writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
++ /* FIXME: delay for clear tuning status or some cards may not work */
++ mdelay(1);
++ }
+ }
+
+ static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
+ {
+ u32 reg;
+
+- /* FIXME: delay a bit for card to be ready for next tuning due to errors */
+- mdelay(1);
++ /* reset controller before tuning or it may fail on some cards */
++ esdhc_tuning_reset(host, ESDHC_SYS_CTRL_RSTA);
+
++ /* This is balanced by the runtime put in sdhci_tasklet_finish */
+ pm_runtime_get_sync(host->mmc->parent);
+ reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
+@@ -713,13 +755,12 @@
+ complete(&mrq->completion);
+ }
+
+-static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode)
++static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode,
++ struct scatterlist *sg)
+ {
+ struct mmc_command cmd = {0};
+ struct mmc_request mrq = {NULL};
+ struct mmc_data data = {0};
+- struct scatterlist sg;
+- char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN];
+
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+@@ -728,11 +769,9 @@
+ data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+- data.sg = &sg;
++ data.sg = sg;
+ data.sg_len = 1;
+
+- sg_init_one(&sg, tuning_pattern, sizeof(tuning_pattern));
+-
+ mrq.cmd = &cmd;
+ mrq.cmd->mrq = &mrq;
+ mrq.data = &data;
+@@ -742,14 +781,12 @@
+ mrq.done = esdhc_request_done;
+ init_completion(&(mrq.completion));
+
+- disable_irq(host->irq);
+- spin_lock(&host->lock);
++ spin_lock_irq(&host->lock);
+ host->mrq = &mrq;
+
+ sdhci_send_command(host, mrq.cmd);
+
+- spin_unlock(&host->lock);
+- enable_irq(host->irq);
++ spin_unlock_irq(&host->lock);
+
+ wait_for_completion(&mrq.completion);
+
+@@ -772,13 +809,21 @@
+
+ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
+ {
++ struct scatterlist sg;
++ char *tuning_pattern;
+ int min, max, avg, ret;
+
++ tuning_pattern = kmalloc(ESDHC_TUNING_BLOCK_PATTERN_LEN, GFP_KERNEL);
++ if (!tuning_pattern)
++ return -ENOMEM;
++
++ sg_init_one(&sg, tuning_pattern, ESDHC_TUNING_BLOCK_PATTERN_LEN);
++
+ /* find the mininum delay first which can pass tuning */
+ min = ESDHC_TUNE_CTRL_MIN;
+ while (min < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, min);
+- if (!esdhc_send_tuning_cmd(host, opcode))
++ if (!esdhc_send_tuning_cmd(host, opcode, &sg))
+ break;
+ min += ESDHC_TUNE_CTRL_STEP;
+ }
+@@ -787,7 +832,7 @@
+ max = min + ESDHC_TUNE_CTRL_STEP;
+ while (max < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, max);
+- if (esdhc_send_tuning_cmd(host, opcode)) {
++ if (esdhc_send_tuning_cmd(host, opcode, &sg)) {
+ max -= ESDHC_TUNE_CTRL_STEP;
+ break;
+ }
+@@ -797,9 +842,11 @@
+ /* use average delay to get the best timing */
+ avg = (min + max) / 2;
+ esdhc_prepare_tuning(host, avg);
+- ret = esdhc_send_tuning_cmd(host, opcode);
++ ret = esdhc_send_tuning_cmd(host, opcode, &sg);
+ esdhc_post_tuning(host);
+
++ kfree(tuning_pattern);
++
+ dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
+ ret ? "failed" : "passed", avg, ret);
+
+@@ -837,28 +884,20 @@
+ return pinctrl_select_state(imx_data->pinctrl, pinctrl);
+ }
+
+-static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
++static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+
+- switch (uhs) {
++ switch (timing) {
+ case MMC_TIMING_UHS_SDR12:
+- imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR12;
+- break;
+ case MMC_TIMING_UHS_SDR25:
+- imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR25;
+- break;
+ case MMC_TIMING_UHS_SDR50:
+- imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50;
+- break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+- imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+- imx_data->uhs_mode = SDHCI_CTRL_UHS_DDR50;
+ writel(readl(host->ioaddr + ESDHC_MIX_CTRL) |
+ ESDHC_MIX_CTRL_DDREN,
+ host->ioaddr + ESDHC_MIX_CTRL);
+@@ -875,7 +914,20 @@
+ break;
+ }
+
+- return esdhc_change_pinstate(host, uhs);
++ esdhc_change_pinstate(host, timing);
++}
++
++static void esdhc_reset(struct sdhci_host *host, u8 mask)
++{
++ sdhci_reset(host, mask);
++
++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++}
++
++static unsigned int esdhc_get_max_timeout_counter(struct sdhci_host *host)
++{
++ return 1 << 28;
+ }
+
+ static struct sdhci_ops sdhci_esdhc_ops = {
+@@ -888,8 +940,9 @@
+ .get_max_clock = esdhc_pltfm_get_max_clock,
+ .get_min_clock = esdhc_pltfm_get_min_clock,
+ .get_ro = esdhc_pltfm_get_ro,
+- .platform_bus_width = esdhc_pltfm_bus_width,
++ .set_bus_width = esdhc_pltfm_set_bus_width,
+ .set_uhs_signaling = esdhc_set_uhs_signaling,
++ .reset = esdhc_reset,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
+@@ -906,6 +959,7 @@
+ struct esdhc_platform_data *boarddata)
+ {
+ struct device_node *np = pdev->dev.of_node;
++ struct sdhci_host *host = platform_get_drvdata(pdev);
+
+ if (!np)
+ return -ENODEV;
+@@ -939,6 +993,12 @@
+ if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line))
+ boarddata->delay_line = 0;
+
++ if (of_find_property(np, "keep-power-in-suspend", NULL))
++ host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
++
++ if (of_find_property(np, "enable-sdio-wakeup", NULL))
++ host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
++
+ return 0;
+ }
+ #else
+@@ -994,6 +1054,9 @@
+ goto free_sdhci;
+ }
+
++ if (imx_data->socdata->flags & ESDHC_FLAG_BUSFREQ)
++ request_bus_freq(BUS_FREQ_HIGH);
++
+ pltfm_host->clk = imx_data->clk_per;
+ pltfm_host->clock = clk_get_rate(pltfm_host->clk);
+ clk_prepare_enable(imx_data->clk_per);
+@@ -1027,8 +1090,17 @@
+ */
+ if (esdhc_is_usdhc(imx_data)) {
+ writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL);
+- host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
++ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
++ SDHCI_QUIRK2_NOSTD_TIMEOUT_COUNTER;
+ host->mmc->caps |= MMC_CAP_1_8V_DDR;
++
++ /*
++ * errata ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
++ * TO1.1, it's harmless for MX6SL
++ */
++ writel(readl(host->ioaddr + 0x6c) | BIT(7), host->ioaddr + 0x6c);
++ sdhci_esdhc_ops.get_max_timeout_counter =
++ esdhc_get_max_timeout_counter;
+ }
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
+@@ -1040,6 +1112,9 @@
+ ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP,
+ host->ioaddr + ESDHC_TUNING_CTRL);
+
++ if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
++ host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
++
+ boarddata = &imx_data->boarddata;
+ if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
+ if (!host->mmc->parent->platform_data) {
+@@ -1116,6 +1191,10 @@
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+
++ if (host->mmc->pm_caps & MMC_PM_KEEP_POWER &&
++ host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ)
++ device_init_wakeup(&pdev->dev, 1);
++
+ err = sdhci_add_host(host);
+ if (err)
+ goto disable_clk;
+@@ -1132,6 +1211,8 @@
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
++ if (imx_data->socdata->flags & ESDHC_FLAG_BUSFREQ)
++ release_bus_freq(BUS_FREQ_HIGH);
+ free_sdhci:
+ sdhci_pltfm_free(pdev);
+ return err;
+@@ -1170,10 +1251,15 @@
+
+ ret = sdhci_runtime_suspend_host(host);
+
+- clk_disable_unprepare(imx_data->clk_per);
+- clk_disable_unprepare(imx_data->clk_ipg);
++ if (!sdhci_sdio_irq_enabled(host)) {
++ clk_disable_unprepare(imx_data->clk_per);
++ clk_disable_unprepare(imx_data->clk_ipg);
++ }
+ clk_disable_unprepare(imx_data->clk_ahb);
+
++ if (imx_data->socdata->flags & ESDHC_FLAG_BUSFREQ)
++ release_bus_freq(BUS_FREQ_HIGH);
++
+ return ret;
+ }
+
+@@ -1183,8 +1269,10 @@
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
+- clk_prepare_enable(imx_data->clk_per);
+- clk_prepare_enable(imx_data->clk_ipg);
++ if (!sdhci_sdio_irq_enabled(host)) {
++ clk_prepare_enable(imx_data->clk_per);
++ clk_prepare_enable(imx_data->clk_ipg);
++ }
+ clk_prepare_enable(imx_data->clk_ahb);
+
+ return sdhci_runtime_resume_host(host);
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci.h linux-3.14.40/drivers/mmc/host/sdhci.h
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci.h 2015-05-01 14:57:59.463427001 -0500
+@@ -281,18 +281,15 @@
+ unsigned int (*get_max_clock)(struct sdhci_host *host);
+ unsigned int (*get_min_clock)(struct sdhci_host *host);
+ unsigned int (*get_timeout_clock)(struct sdhci_host *host);
+- int (*platform_bus_width)(struct sdhci_host *host,
+- int width);
++ unsigned int (*get_max_timeout_counter)(struct sdhci_host *host);
++ void (*set_bus_width)(struct sdhci_host *host, int width);
+ void (*platform_send_init_74_clocks)(struct sdhci_host *host,
+ u8 power_mode);
+ unsigned int (*get_ro)(struct sdhci_host *host);
+- void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
+- void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
++ void (*reset)(struct sdhci_host *host, u8 mask);
+ int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+- int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
++ void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
+ void (*hw_reset)(struct sdhci_host *host);
+- void (*platform_suspend)(struct sdhci_host *host);
+- void (*platform_resume)(struct sdhci_host *host);
+ void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ void (*platform_init)(struct sdhci_host *host);
+ void (*card_event)(struct sdhci_host *host);
+@@ -397,6 +394,16 @@
+ extern void sdhci_send_command(struct sdhci_host *host,
+ struct mmc_command *cmd);
+
++static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
++{
++ return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED);
++}
++
++void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
++void sdhci_set_bus_width(struct sdhci_host *host, int width);
++void sdhci_reset(struct sdhci_host *host, u8 mask);
++void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
++
+ #ifdef CONFIG_PM
+ extern int sdhci_suspend_host(struct sdhci_host *host);
+ extern int sdhci_resume_host(struct sdhci_host *host);
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-of-arasan.c linux-3.14.40/drivers/mmc/host/sdhci-of-arasan.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-of-arasan.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-of-arasan.c 2015-05-01 14:57:59.467427001 -0500
+@@ -52,8 +52,12 @@
+ }
+
+ static struct sdhci_ops sdhci_arasan_ops = {
++ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_arasan_get_timeout_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static struct sdhci_pltfm_data sdhci_arasan_pdata = {
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-of-esdhc.c linux-3.14.40/drivers/mmc/host/sdhci-of-esdhc.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-of-esdhc.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-of-esdhc.c 2015-05-01 14:57:59.467427001 -0500
+@@ -199,13 +199,14 @@
+
+ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+ {
+-
+ int pre_div = 2;
+ int div = 1;
+ u32 temp;
+
++ host->mmc->actual_clock = 0;
++
+ if (clock == 0)
+- goto out;
++ return;
+
+ /* Workaround to reduce the clock frequency for p1010 esdhc */
+ if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
+@@ -238,24 +239,8 @@
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ mdelay(1);
+-out:
+- host->clock = clock;
+ }
+
+-#ifdef CONFIG_PM
+-static u32 esdhc_proctl;
+-static void esdhc_of_suspend(struct sdhci_host *host)
+-{
+- esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
+-}
+-
+-static void esdhc_of_resume(struct sdhci_host *host)
+-{
+- esdhc_of_enable_dma(host);
+- sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
+-}
+-#endif
+-
+ static void esdhc_of_platform_init(struct sdhci_host *host)
+ {
+ u32 vvn;
+@@ -269,7 +254,7 @@
+ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
+ }
+
+-static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
++static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
+ {
+ u32 ctrl;
+
+@@ -289,8 +274,6 @@
+
+ clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL,
+ ESDHC_CTRL_BUSWIDTH_MASK, ctrl);
+-
+- return 0;
+ }
+
+ static const struct sdhci_ops sdhci_esdhc_ops = {
+@@ -305,13 +288,46 @@
+ .get_max_clock = esdhc_of_get_max_clock,
+ .get_min_clock = esdhc_of_get_min_clock,
+ .platform_init = esdhc_of_platform_init,
+-#ifdef CONFIG_PM
+- .platform_suspend = esdhc_of_suspend,
+- .platform_resume = esdhc_of_resume,
+-#endif
+ .adma_workaround = esdhci_of_adma_workaround,
+- .platform_bus_width = esdhc_pltfm_bus_width,
++ .set_bus_width = esdhc_pltfm_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
++};
++
++#ifdef CONFIG_PM
++
++static u32 esdhc_proctl;
++static int esdhc_of_suspend(struct device *dev)
++{
++ struct sdhci_host *host = dev_get_drvdata(dev);
++
++ esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
++
++ return sdhci_suspend_host(host);
++}
++
++static void esdhc_of_resume(device *dev)
++{
++ struct sdhci_host *host = dev_get_drvdata(dev);
++ int ret = sdhci_resume_host(host);
++
++ if (ret == 0) {
++ /* Isn't this already done by sdhci_resume_host() ? --rmk */
++ esdhc_of_enable_dma(host);
++ sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
++ }
++
++ return ret;
++}
++
++static const struct dev_pm_ops esdhc_pmops = {
++ .suspend = esdhci_of_suspend,
++ .resume = esdhci_of_resume,
+ };
++#define ESDHC_PMOPS (&esdhc_pmops)
++#else
++#define ESDHC_PMOPS NULL
++#endif
+
+ static const struct sdhci_pltfm_data sdhci_esdhc_pdata = {
+ /*
+@@ -374,7 +390,7 @@
+ .name = "sdhci-esdhc",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_esdhc_of_match,
+- .pm = SDHCI_PLTFM_PMOPS,
++ .pm = ESDHC_PMOPS,
+ },
+ .probe = sdhci_esdhc_probe,
+ .remove = sdhci_esdhc_remove,
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-of-hlwd.c linux-3.14.40/drivers/mmc/host/sdhci-of-hlwd.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-of-hlwd.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-of-hlwd.c 2015-05-01 14:57:59.467427001 -0500
+@@ -58,6 +58,10 @@
+ .write_l = sdhci_hlwd_writel,
+ .write_w = sdhci_hlwd_writew,
+ .write_b = sdhci_hlwd_writeb,
++ .set_clock = sdhci_set_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_hlwd_pdata = {
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-pci.c linux-3.14.40/drivers/mmc/host/sdhci-pci.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-pci.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-pci.c 2015-05-01 14:57:59.471427001 -0500
+@@ -1023,7 +1023,7 @@
+ return 0;
+ }
+
+-static int sdhci_pci_bus_width(struct sdhci_host *host, int width)
++static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width)
+ {
+ u8 ctrl;
+
+@@ -1044,8 +1044,6 @@
+ }
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+-
+- return 0;
+ }
+
+ static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host)
+@@ -1072,8 +1070,11 @@
+ }
+
+ static const struct sdhci_ops sdhci_pci_ops = {
++ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+- .platform_bus_width = sdhci_pci_bus_width,
++ .set_bus_width = sdhci_pci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_pci_hw_reset,
+ };
+
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-pltfm.c linux-3.14.40/drivers/mmc/host/sdhci-pltfm.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-pltfm.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-pltfm.c 2015-05-01 14:57:59.471427001 -0500
+@@ -45,6 +45,10 @@
+ EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock);
+
+ static const struct sdhci_ops sdhci_pltfm_ops = {
++ .set_clock = sdhci_set_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ #ifdef CONFIG_OF
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-pxav2.c linux-3.14.40/drivers/mmc/host/sdhci-pxav2.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-pxav2.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-pxav2.c 2015-05-01 14:57:59.471427001 -0500
+@@ -51,11 +51,13 @@
+ #define MMC_CARD 0x1000
+ #define MMC_WIDTH 0x0100
+
+-static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask)
++static void pxav2_reset(struct sdhci_host *host, u8 mask)
+ {
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+
++ sdhci_reset(host, mask);
++
+ if (mask == SDHCI_RESET_ALL) {
+ u16 tmp = 0;
+
+@@ -88,7 +90,7 @@
+ }
+ }
+
+-static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
++static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width)
+ {
+ u8 ctrl;
+ u16 tmp;
+@@ -107,14 +109,14 @@
+ }
+ writew(tmp, host->ioaddr + SD_CE_ATA_2);
+ writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+-
+- return 0;
+ }
+
+ static const struct sdhci_ops pxav2_sdhci_ops = {
++ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+- .platform_reset_exit = pxav2_set_private_registers,
+- .platform_bus_width = pxav2_mmc_set_width,
++ .set_bus_width = pxav2_mmc_set_bus_width,
++ .reset = pxav2_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ #ifdef CONFIG_OF
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-pxav3.c linux-3.14.40/drivers/mmc/host/sdhci-pxav3.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-pxav3.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-pxav3.c 2015-05-01 14:57:59.471427001 -0500
+@@ -57,11 +57,13 @@
+ #define SDCE_MISC_INT (1<<2)
+ #define SDCE_MISC_INT_EN (1<<1)
+
+-static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask)
++static void pxav3_reset(struct sdhci_host *host, u8 mask)
+ {
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+
++ sdhci_reset(host, mask);
++
+ if (mask == SDHCI_RESET_ALL) {
+ /*
+ * tune timing of read data/command when crc error happen
+@@ -129,7 +131,7 @@
+ pxa->power_mode = power_mode;
+ }
+
+-static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
++static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+ {
+ u16 ctrl_2;
+
+@@ -163,15 +165,16 @@
+ dev_dbg(mmc_dev(host->mmc),
+ "%s uhs = %d, ctrl_2 = %04X\n",
+ __func__, uhs, ctrl_2);
+-
+- return 0;
+ }
+
+ static const struct sdhci_ops pxav3_sdhci_ops = {
+- .platform_reset_exit = pxav3_set_private_registers,
++ .set_clock = sdhci_set_clock,
+ .set_uhs_signaling = pxav3_set_uhs_signaling,
+ .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = pxav3_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-s3c.c linux-3.14.40/drivers/mmc/host/sdhci-s3c.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-s3c.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-s3c.c 2015-05-01 14:57:59.483427001 -0500
+@@ -57,6 +57,8 @@
+
+ struct clk *clk_io;
+ struct clk *clk_bus[MAX_BUS_CLK];
++
++ bool no_divider;
+ };
+
+ /**
+@@ -69,6 +71,7 @@
+ */
+ struct sdhci_s3c_drv_data {
+ unsigned int sdhci_quirks;
++ bool no_divider;
+ };
+
+ static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
+@@ -153,7 +156,7 @@
+ * If controller uses a non-standard clock division, find the best clock
+ * speed possible with selected clock source and skip the division.
+ */
+- if (ourhost->host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
++ if (ourhost->no_divider) {
+ rate = clk_round_rate(clksrc, wanted);
+ return wanted - rate;
+ }
+@@ -188,9 +191,13 @@
+ int src;
+ u32 ctrl;
+
++ host->mmc->actual_clock = 0;
++
+ /* don't bother if the clock is going off. */
+- if (clock == 0)
++ if (clock == 0) {
++ sdhci_set_clock(host, clock);
+ return;
++ }
+
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ delta = sdhci_s3c_consider_clock(ourhost, src, clock);
+@@ -240,6 +247,8 @@
+ if (clock < 25 * 1000000)
+ ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2);
+ writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3);
++
++ sdhci_set_clock(host, clock);
+ }
+
+ /**
+@@ -296,10 +305,11 @@
+ unsigned long timeout;
+ u16 clk = 0;
+
++ host->mmc->actual_clock = 0;
++
+ /* If the clock is going off, set to 0 at clock control register */
+ if (clock == 0) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+- host->clock = clock;
+ return;
+ }
+
+@@ -307,8 +317,6 @@
+
+ clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
+
+- host->clock = clock;
+-
+ clk = SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+@@ -330,14 +338,14 @@
+ }
+
+ /**
+- * sdhci_s3c_platform_bus_width - support 8bit buswidth
++ * sdhci_s3c_set_bus_width - support 8bit buswidth
+ * @host: The SDHCI host being queried
+ * @width: MMC_BUS_WIDTH_ macro for the bus width being requested
+ *
+ * We have 8-bit width support but is not a v3 controller.
+ * So we add platform_bus_width() and support 8bit width.
+ */
+-static int sdhci_s3c_platform_bus_width(struct sdhci_host *host, int width)
++static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width)
+ {
+ u8 ctrl;
+
+@@ -359,15 +367,15 @@
+ }
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+-
+- return 0;
+ }
+
+ static struct sdhci_ops sdhci_s3c_ops = {
+ .get_max_clock = sdhci_s3c_get_max_clk,
+ .set_clock = sdhci_s3c_set_clock,
+ .get_min_clock = sdhci_s3c_get_min_clock,
+- .platform_bus_width = sdhci_s3c_platform_bus_width,
++ .set_bus_width = sdhci_s3c_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
+@@ -617,8 +625,10 @@
+ /* Setup quirks for the controller */
+ host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
+ host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
+- if (drv_data)
++ if (drv_data) {
+ host->quirks |= drv_data->sdhci_quirks;
++ sc->no_divider = drv_data->no_divider;
++ }
+
+ #ifndef CONFIG_MMC_SDHCI_S3C_DMA
+
+@@ -667,7 +677,7 @@
+ * If controller does not have internal clock divider,
+ * we can use overriding functions instead of default.
+ */
+- if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
++ if (sc->no_divider) {
+ sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
+ sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
+ sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
+@@ -813,7 +823,7 @@
+
+ #if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
+ static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
+- .sdhci_quirks = SDHCI_QUIRK_NONSTANDARD_CLOCK,
++ .no_divider = true,
+ };
+ #define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
+ #else
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-sirf.c linux-3.14.40/drivers/mmc/host/sdhci-sirf.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-sirf.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-sirf.c 2015-05-01 14:57:59.483427001 -0500
+@@ -28,7 +28,11 @@
+ }
+
+ static struct sdhci_ops sdhci_sirf_ops = {
++ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_sirf_get_max_clk,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static struct sdhci_pltfm_data sdhci_sirf_pdata = {
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-spear.c linux-3.14.40/drivers/mmc/host/sdhci-spear.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-spear.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-spear.c 2015-05-01 14:57:59.483427001 -0500
+@@ -37,7 +37,10 @@
+
+ /* sdhci ops */
+ static const struct sdhci_ops sdhci_pltfm_ops = {
+- /* Nothing to do for now. */
++ .set_clock = sdhci_set_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ /* gpio card detection interrupt handler */
+diff -Nur linux-3.14.40.orig/drivers/mmc/host/sdhci-tegra.c linux-3.14.40/drivers/mmc/host/sdhci-tegra.c
+--- linux-3.14.40.orig/drivers/mmc/host/sdhci-tegra.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mmc/host/sdhci-tegra.c 2015-05-01 14:57:59.483427001 -0500
+@@ -48,19 +48,6 @@
+ int power_gpio;
+ };
+
+-static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
+-{
+- u32 val;
+-
+- if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+- /* Use wp_gpio here instead? */
+- val = readl(host->ioaddr + reg);
+- return val | SDHCI_WRITE_PROTECT;
+- }
+-
+- return readl(host->ioaddr + reg);
+-}
+-
+ static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+@@ -108,12 +95,14 @@
+ return mmc_gpio_get_ro(host->mmc);
+ }
+
+-static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
++static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
++ sdhci_reset(host, mask);
++
+ if (!(mask & SDHCI_RESET_ALL))
+ return;
+
+@@ -127,7 +116,7 @@
+ }
+ }
+
+-static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width)
++static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width)
+ {
+ u32 ctrl;
+
+@@ -144,16 +133,16 @@
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+- return 0;
+ }
+
+ static const struct sdhci_ops tegra_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
+- .read_l = tegra_sdhci_readl,
+ .read_w = tegra_sdhci_readw,
+ .write_l = tegra_sdhci_writel,
+- .platform_bus_width = tegra_sdhci_buswidth,
+- .platform_reset_exit = tegra_sdhci_reset_exit,
++ .set_clock = sdhci_set_clock,
++ .set_bus_width = tegra_sdhci_set_bus_width,
++ .reset = tegra_sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
+diff -Nur linux-3.14.40.orig/drivers/mtd/chips/cfi_cmdset_0002.c linux-3.14.40/drivers/mtd/chips/cfi_cmdset_0002.c
+--- linux-3.14.40.orig/drivers/mtd/chips/cfi_cmdset_0002.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mtd/chips/cfi_cmdset_0002.c 2015-05-01 14:57:59.495427001 -0500
+@@ -1058,17 +1058,13 @@
+
+ #define UDELAY(map, chip, adr, usec) \
+ do { \
+- mutex_unlock(&chip->mutex); \
+ cfi_udelay(usec); \
+- mutex_lock(&chip->mutex); \
+ } while (0)
+
+ #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
+ do { \
+- mutex_unlock(&chip->mutex); \
+ INVALIDATE_CACHED_RANGE(map, adr, len); \
+ cfi_udelay(usec); \
+- mutex_lock(&chip->mutex); \
+ } while (0)
+
+ #endif
+diff -Nur linux-3.14.40.orig/drivers/mtd/ubi/build.c linux-3.14.40/drivers/mtd/ubi/build.c
+--- linux-3.14.40.orig/drivers/mtd/ubi/build.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/mtd/ubi/build.c 2015-05-01 14:57:59.503427001 -0500
+@@ -640,7 +640,7 @@
+ dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+ dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
+- if (ubi->mtd->numeraseregions != 0) {
++ if (ubi->mtd->numeraseregions > 1) {
+ /*
+ * Some flashes have several erase regions. Different regions
+ * may have different eraseblock size and other
+diff -Nur linux-3.14.40.orig/drivers/mxc/asrc/Kconfig linux-3.14.40/drivers/mxc/asrc/Kconfig
+--- linux-3.14.40.orig/drivers/mxc/asrc/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/asrc/Kconfig 2015-05-01 14:57:59.507427001 -0500
+@@ -0,0 +1,14 @@
++#
++# ASRC configuration
++#
++
++menu "MXC Asynchronous Sample Rate Converter support"
++
++config MXC_ASRC
++ tristate "ASRC support"
++ depends on SOC_IMX35 || SOC_IMX53 || SOC_IMX6Q
++ select SND_SOC_FSL_ASRC
++ ---help---
++ Say Y to get the ASRC service.
++
++endmenu
+diff -Nur linux-3.14.40.orig/drivers/mxc/asrc/Makefile linux-3.14.40/drivers/mxc/asrc/Makefile
+--- linux-3.14.40.orig/drivers/mxc/asrc/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/asrc/Makefile 2015-05-01 14:57:59.507427001 -0500
+@@ -0,0 +1,4 @@
++#
++# Makefile for the kernel Asynchronous Sample Rate Converter driver
++#
++obj-$(CONFIG_MXC_ASRC) += mxc_asrc.o
+diff -Nur linux-3.14.40.orig/drivers/mxc/asrc/mxc_asrc.c linux-3.14.40/drivers/mxc/asrc/mxc_asrc.c
+--- linux-3.14.40.orig/drivers/mxc/asrc/mxc_asrc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/asrc/mxc_asrc.c 2015-05-01 14:57:59.507427001 -0500
+@@ -0,0 +1,1957 @@
++/*
++ * Freescale Asynchronous Sample Rate Converter (ASRC) driver
++ *
++ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++#include <linux/clk.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/regmap.h>
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <linux/pagemap.h>
++#include <linux/interrupt.h>
++#include <linux/miscdevice.h>
++#include <linux/dma-mapping.h>
++#include <linux/of_platform.h>
++#include <linux/platform_data/dma-imx.h>
++
++#include <linux/mxc_asrc.h>
++
++#define ASRC_PROC_PATH "driver/asrc"
++
++#define ASRC_RATIO_DECIMAL_DEPTH 26
++
++#define pair_err(fmt, ...) \
++ dev_err(asrc->dev, "Pair %c: " fmt, 'A' + index, ##__VA_ARGS__)
++
++#define pair_dbg(fmt, ...) \
++ dev_dbg(asrc->dev, "Pair %c: " fmt, 'A' + index, ##__VA_ARGS__)
++
++DEFINE_SPINLOCK(data_lock);
++DEFINE_SPINLOCK(pair_lock);
++
++/* Sample rates are aligned with that defined in pcm.h file */
++static const unsigned char asrc_process_table[][8][2] = {
++ /* 32kHz 44.1kHz 48kHz 64kHz 88.2kHz 96kHz 176kHz 192kHz */
++ {{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 5512Hz */
++ {{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 8kHz */
++ {{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 11025Hz */
++ {{0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 16kHz */
++ {{0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 22050Hz */
++ {{0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0},}, /* 32kHz */
++ {{0, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0},}, /* 44.1kHz */
++ {{0, 2}, {0, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0},}, /* 48kHz */
++ {{1, 2}, {0, 2}, {0, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 0},}, /* 64kHz */
++ {{1, 2}, {1, 2}, {1, 2}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1},}, /* 88.2kHz */
++ {{1, 2}, {1, 2}, {1, 2}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1},}, /* 96kHz */
++ {{2, 2}, {2, 2}, {2, 2}, {2, 1}, {2, 1}, {2, 1}, {2, 1}, {2, 1},}, /* 176kHz */
++ {{2, 2}, {2, 2}, {2, 2}, {2, 1}, {2, 1}, {2, 1}, {2, 1}, {2, 1},}, /* 192kHz */
++};
++
++static struct asrc_data *asrc;
++
++/*
++ * The following tables map the relationship between asrc_inclk/asrc_outclk in
++ * mxc_asrc.h and the registers of ASRCSR
++ */
++static unsigned char input_clk_map_v1[] = {
++ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
++};
++
++static unsigned char output_clk_map_v1[] = {
++ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
++};
++
++/* V2 uses the same map for input and output */
++static unsigned char input_clk_map_v2[] = {
++/* 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 0x8 0x9 0xa 0xb 0xc 0xd 0xe 0xf */
++ 0x0, 0x1, 0x2, 0x7, 0x4, 0x5, 0x6, 0x3, 0x8, 0x9, 0xa, 0xb, 0xc, 0xf, 0xe, 0xd,
++};
++
++static unsigned char output_clk_map_v2[] = {
++/* 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 0x8 0x9 0xa 0xb 0xc 0xd 0xe 0xf */
++ 0x8, 0x9, 0xa, 0x7, 0xc, 0x5, 0x6, 0xb, 0x0, 0x1, 0x2, 0x3, 0x4, 0xf, 0xe, 0xd,
++};
++
++static unsigned char *input_clk_map, *output_clk_map;
++
++enum mxc_asrc_type {
++ IMX35_ASRC,
++ IMX53_ASRC,
++};
++
++static const struct platform_device_id mxc_asrc_devtype[] = {
++ {
++ .name = "imx35-asrc",
++ .driver_data = IMX35_ASRC,
++ }, {
++ .name = "imx53-asrc",
++ .driver_data = IMX53_ASRC,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, mxc_asrc_devtype);
++
++static const struct of_device_id fsl_asrc_ids[] = {
++ {
++ .compatible = "fsl,imx35-asrc",
++ .data = &mxc_asrc_devtype[IMX35_ASRC],
++ }, {
++ .compatible = "fsl,imx53-asrc",
++ .data = &mxc_asrc_devtype[IMX53_ASRC],
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(of, fsl_asrc_ids);
++
++
++#ifdef DEBUG
++u32 asrc_reg[] = {
++ REG_ASRCTR,
++ REG_ASRIER,
++ REG_ASRCNCR,
++ REG_ASRCFG,
++ REG_ASRCSR,
++ REG_ASRCDR1,
++ REG_ASRCDR2,
++ REG_ASRSTR,
++ REG_ASRRA,
++ REG_ASRRB,
++ REG_ASRRC,
++ REG_ASRPM1,
++ REG_ASRPM2,
++ REG_ASRPM3,
++ REG_ASRPM4,
++ REG_ASRPM5,
++ REG_ASRTFR1,
++ REG_ASRCCR,
++ REG_ASRIDRHA,
++ REG_ASRIDRLA,
++ REG_ASRIDRHB,
++ REG_ASRIDRLB,
++ REG_ASRIDRHC,
++ REG_ASRIDRLC,
++ REG_ASR76K,
++ REG_ASR56K,
++ REG_ASRMCRA,
++ REG_ASRFSTA,
++ REG_ASRMCRB,
++ REG_ASRFSTB,
++ REG_ASRMCRC,
++ REG_ASRFSTC,
++ REG_ASRMCR1A,
++ REG_ASRMCR1B,
++ REG_ASRMCR1C,
++};
++
++static void dump_regs(void)
++{
++ u32 reg, val;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(asrc_reg); i++) {
++ reg = asrc_reg[i];
++ regmap_read(asrc->regmap, reg, &val);
++ dev_dbg(asrc->dev, "REG addr=0x%x val=0x%x\n", reg, val);
++ }
++}
++#else
++static void dump_regs(void) {}
++#endif
++
++/* Only used for Ideal Ratio mode */
++static int asrc_set_clock_ratio(enum asrc_pair_index index,
++ int inrate, int outrate)
++{
++ unsigned long val = 0;
++ int integ, i;
++
++ if (outrate == 0) {
++ dev_err(asrc->dev, "wrong output sample rate: %d\n", outrate);
++ return -EINVAL;
++ }
++
++ /* Formula: r = (1 << ASRC_RATIO_DECIMAL_DEPTH) / outrate * inrate; */
++ for (integ = 0; inrate >= outrate; integ++)
++ inrate -= outrate;
++
++ val |= (integ << ASRC_RATIO_DECIMAL_DEPTH);
++
++ for (i = 1; i <= ASRC_RATIO_DECIMAL_DEPTH; i++) {
++ if ((inrate * 2) >= outrate) {
++ val |= (1 << (ASRC_RATIO_DECIMAL_DEPTH - i));
++ inrate = inrate * 2 - outrate;
++ } else
++ inrate = inrate << 1;
++
++ if (inrate == 0)
++ break;
++ }
++
++ regmap_write(asrc->regmap, REG_ASRIDRL(index), val);
++ regmap_write(asrc->regmap, REG_ASRIDRH(index), (val >> 24));
++
++ return 0;
++}
++
++/* Corresponding to asrc_process_table */
++static int supported_input_rate[] = {
++ 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, 88200,
++ 96000, 176400, 192000,
++};
++
++static int supported_output_rate[] = {
++ 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000,
++};
++
++static int asrc_set_process_configuration(enum asrc_pair_index index,
++ int inrate, int outrate)
++{
++ int in, out;
++
++ for (in = 0; in < ARRAY_SIZE(supported_input_rate); in++) {
++ if (inrate == supported_input_rate[in])
++ break;
++ }
++
++ if (in == ARRAY_SIZE(supported_input_rate)) {
++ dev_err(asrc->dev, "unsupported input sample rate: %d\n", in);
++ return -EINVAL;
++ }
++
++ for (out = 0; out < ARRAY_SIZE(supported_output_rate); out++) {
++ if (outrate == supported_output_rate[out])
++ break;
++ }
++
++ if (out == ARRAY_SIZE(supported_output_rate)) {
++ dev_err(asrc->dev, "unsupported output sample rate: %d\n", out);
++ return -EINVAL;
++ }
++
++ regmap_update_bits(asrc->regmap, REG_ASRCFG,
++ ASRCFG_PREMODx_MASK(index) | ASRCFG_POSTMODx_MASK(index),
++ ASRCFG_PREMOD(index, asrc_process_table[in][out][0]) |
++ ASRCFG_POSTMOD(index, asrc_process_table[in][out][1]));
++
++ return 0;
++}
++
++static int asrc_get_asrck_clock_divider(int samplerate)
++{
++ unsigned int prescaler, divider, ratio, ra, i;
++ unsigned long bitclk;
++
++ if (samplerate == 0) {
++ dev_err(asrc->dev, "invalid sample rate: %d\n", samplerate);
++ return -EINVAL;
++ }
++
++ bitclk = clk_get_rate(asrc->asrc_clk);
++
++ ra = bitclk / samplerate;
++ ratio = ra;
++
++ /* Calculate the prescaler */
++ for (i = 0; ratio > 8; i++)
++ ratio >>= 1;
++
++ prescaler = i;
++
++ /* Calculate the divider */
++ divider = i ? (((ra + (1 << (i - 1)) - 1) >> i) - 1) : (ra - 1);
++
++ /* The totally divider is (2 ^ prescaler) * divider */
++ return (divider << ASRCDRx_AxCPx_WIDTH) + prescaler;
++}
++
++int asrc_req_pair(int chn_num, enum asrc_pair_index *index)
++{
++ int imax = 0, busy = 0, i, ret = 0;
++ unsigned long lock_flags;
++ struct asrc_pair *pair;
++
++ spin_lock_irqsave(&data_lock, lock_flags);
++
++ for (i = ASRC_PAIR_A; i < ASRC_PAIR_MAX_NUM; i++) {
++ pair = &asrc->asrc_pair[i];
++ if (chn_num > pair->chn_max) {
++ imax++;
++ continue;
++ } else if (pair->active) {
++ busy++;
++ continue;
++ }
++ /* Save the current qualified pair */
++ *index = i;
++
++ /* Check if this pair is a perfect one */
++ if (chn_num == pair->chn_max)
++ break;
++ }
++
++ if (imax == ASRC_PAIR_MAX_NUM) {
++ dev_err(asrc->dev, "no pair could afford required channel number\n");
++ ret = -EINVAL;
++ } else if (busy == ASRC_PAIR_MAX_NUM) {
++ dev_err(asrc->dev, "all pairs are busy now\n");
++ ret = -EBUSY;
++ } else if (busy + imax >= ASRC_PAIR_MAX_NUM) {
++ dev_err(asrc->dev, "all affordable pairs are busy now\n");
++ ret = -EBUSY;
++ } else {
++ pair = &asrc->asrc_pair[*index];
++ pair->chn_num = chn_num;
++ pair->active = 1;
++ }
++
++ spin_unlock_irqrestore(&data_lock, lock_flags);
++
++ if (!ret) {
++ clk_enable(asrc->asrc_clk);
++ clk_prepare_enable(asrc->dma_clk);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(asrc_req_pair);
++
++void asrc_release_pair(enum asrc_pair_index index)
++{
++ struct asrc_pair *pair = &asrc->asrc_pair[index];
++ unsigned long lock_flags;
++
++ spin_lock_irqsave(&data_lock, lock_flags);
++
++ pair->active = 0;
++ pair->overload_error = 0;
++
++ spin_unlock_irqrestore(&data_lock, lock_flags);
++
++ /* Disable PAIR */
++ regmap_update_bits(asrc->regmap, REG_ASRCTR, ASRCTR_ASRCEx_MASK(index), 0);
++}
++EXPORT_SYMBOL(asrc_release_pair);
++
++int asrc_config_pair(struct asrc_config *config)
++{
++ u32 inrate = config->input_sample_rate, indiv;
++ u32 outrate = config->output_sample_rate, outdiv;
++ int ret, channels, index = config->pair;
++ unsigned long lock_flags;
++
++ /* Set the channel number */
++ spin_lock_irqsave(&data_lock, lock_flags);
++ asrc->asrc_pair[index].chn_num = config->channel_num;
++ spin_unlock_irqrestore(&data_lock, lock_flags);
++
++ if (asrc->channel_bits > 3)
++ channels = config->channel_num;
++ else
++ channels = (config->channel_num + 1) / 2;
++
++ /* Update channel number of current pair */
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(index, asrc->channel_bits),
++ ASRCNCR_ANCx_set(index, channels, asrc->channel_bits));
++
++ /* Set the clock source */
++ regmap_update_bits(asrc->regmap, REG_ASRCSR,
++ ASRCSR_AICSx_MASK(index) | ASRCSR_AOCSx_MASK(index),
++ ASRCSR_AICS(index, input_clk_map[config->inclk]) |
++ ASRCSR_AOCS(index, output_clk_map[config->outclk]));
++
++ /* Default setting: Automatic selection for processing mode */
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_ATSx_MASK(index), ASRCTR_ATS(index));
++ regmap_update_bits(asrc->regmap, REG_ASRCTR, ASRCTR_USRx_MASK(index), 0);
++
++ /* Default Input Clock Divider Setting */
++ switch (config->inclk & ASRCSR_AxCSx_MASK) {
++ case INCLK_SPDIF_RX:
++ indiv = ASRC_PRESCALER_SPDIF_RX;
++ break;
++ case INCLK_SPDIF_TX:
++ indiv = ASRC_PRESCALER_SPDIF_TX;
++ break;
++ case INCLK_ASRCK1_CLK:
++ indiv = asrc_get_asrck_clock_divider(inrate);
++ break;
++ default:
++ switch (config->input_word_width) {
++ case ASRC_WIDTH_16_BIT:
++ indiv = ASRC_PRESCALER_I2S_16BIT;
++ break;
++ case ASRC_WIDTH_24_BIT:
++ indiv = ASRC_PRESCALER_I2S_24BIT;
++ break;
++ default:
++ pair_err("unsupported input word width %d\n",
++ config->input_word_width);
++ return -EINVAL;
++ }
++ break;
++ }
++
++ /* Default Output Clock Divider Setting */
++ switch (config->outclk & ASRCSR_AxCSx_MASK) {
++ case OUTCLK_SPDIF_RX:
++ outdiv = ASRC_PRESCALER_SPDIF_RX;
++ break;
++ case OUTCLK_SPDIF_TX:
++ outdiv = ASRC_PRESCALER_SPDIF_TX;
++ break;
++ case OUTCLK_ASRCK1_CLK:
++ if ((config->inclk & ASRCSR_AxCSx_MASK) == INCLK_NONE)
++ outdiv = ASRC_PRESCALER_IDEAL_RATIO;
++ else
++ outdiv = asrc_get_asrck_clock_divider(outrate);
++ break;
++ default:
++ switch (config->output_word_width) {
++ case ASRC_WIDTH_16_BIT:
++ outdiv = ASRC_PRESCALER_I2S_16BIT;
++ break;
++ case ASRC_WIDTH_24_BIT:
++ outdiv = ASRC_PRESCALER_I2S_24BIT;
++ break;
++ default:
++ pair_err("unsupported output word width %d\n",
++ config->input_word_width);
++ return -EINVAL;
++ }
++ break;
++ }
++
++ /* indiv and outdiv'd include prescaler's value, so add its MASK too */
++ regmap_update_bits(asrc->regmap, REG_ASRCDR(index),
++ ASRCDRx_AOCPx_MASK(index) | ASRCDRx_AICPx_MASK(index) |
++ ASRCDRx_AOCDx_MASK(index) | ASRCDRx_AICDx_MASK(index),
++ ASRCDRx_AOCP(index, outdiv) | ASRCDRx_AICP(index, indiv));
++
++ /* Check whether ideal ratio is a must */
++ switch (config->inclk & ASRCSR_AxCSx_MASK) {
++ case INCLK_NONE:
++ /* Clear ASTSx bit to use ideal ratio */
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_ATSx_MASK(index), 0);
++
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_IDRx_MASK(index) | ASRCTR_USRx_MASK(index),
++ ASRCTR_IDR(index) | ASRCTR_USR(index));
++
++ ret = asrc_set_clock_ratio(index, inrate, outrate);
++ if (ret)
++ return ret;
++
++ ret = asrc_set_process_configuration(index, inrate, outrate);
++ if (ret)
++ return ret;
++
++ break;
++ case INCLK_ASRCK1_CLK:
++ /* This case and default are both remained for v1 */
++ if (inrate == 44100 || inrate == 88200) {
++ pair_err("unsupported sample rate %d by selected clock\n",
++ inrate);
++ return -EINVAL;
++ }
++ break;
++ default:
++ if ((config->outclk & ASRCSR_AxCSx_MASK) != OUTCLK_ASRCK1_CLK)
++ break;
++
++ if (outrate == 44100 || outrate == 88200) {
++ pair_err("unsupported sample rate %d by selected clock\n",
++ outrate);
++ return -EINVAL;
++ }
++ break;
++ }
++
++ /* Config input and output wordwidth */
++ if (config->output_word_width == ASRC_WIDTH_8_BIT) {
++ pair_err("unsupported wordwidth for output: 8bit\n");
++ pair_err("output only support: 16bit or 24bit\n");
++ return -EINVAL;
++ }
++
++ regmap_update_bits(asrc->regmap, REG_ASRMCR1(index),
++ ASRMCR1x_OW16_MASK | ASRMCR1x_IWD_MASK,
++ ASRMCR1x_OW16(config->output_word_width) |
++ ASRMCR1x_IWD(config->input_word_width));
++
++ /* Enable BUFFER STALL */
++ regmap_update_bits(asrc->regmap, REG_ASRMCR(index),
++ ASRMCRx_BUFSTALLx_MASK, ASRMCRx_BUFSTALLx);
++
++ /* Set Threshold for input and output FIFO */
++ return asrc_set_watermark(index, ASRC_INPUTFIFO_THRESHOLD,
++ ASRC_INPUTFIFO_THRESHOLD);
++}
++EXPORT_SYMBOL(asrc_config_pair);
++
++int asrc_set_watermark(enum asrc_pair_index index, u32 in_wm, u32 out_wm)
++{
++ if (in_wm > ASRC_FIFO_THRESHOLD_MAX || out_wm > ASRC_FIFO_THRESHOLD_MAX) {
++ pair_err("invalid watermark!\n");
++ return -EINVAL;
++ }
++
++ return regmap_update_bits(asrc->regmap, REG_ASRMCR(index),
++ ASRMCRx_EXTTHRSHx_MASK | ASRMCRx_INFIFO_THRESHOLD_MASK |
++ ASRMCRx_OUTFIFO_THRESHOLD_MASK,
++ ASRMCRx_EXTTHRSHx | ASRMCRx_INFIFO_THRESHOLD(in_wm) |
++ ASRMCRx_OUTFIFO_THRESHOLD(out_wm));
++}
++EXPORT_SYMBOL(asrc_set_watermark);
++
++void asrc_start_conv(enum asrc_pair_index index)
++{
++ int reg, retry, channels, i;
++
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_ASRCEx_MASK(index), ASRCTR_ASRCE(index));
++
++ /* Wait for status of initialization */
++ for (retry = 10, reg = 0; !reg && retry; --retry) {
++ udelay(5);
++ regmap_read(asrc->regmap, REG_ASRCFG, &reg);
++ reg &= ASRCFG_INIRQx_MASK(index);
++ }
++
++ /* Set the input fifo to ASRC STALL level */
++ regmap_read(asrc->regmap, REG_ASRCNCR, &reg);
++ channels = ASRCNCR_ANCx_get(index, reg, asrc->channel_bits);
++ for (i = 0; i < channels * 4; i++)
++ regmap_write(asrc->regmap, REG_ASRDI(index), 0);
++
++ /* Overload Interrupt Enable */
++ regmap_write(asrc->regmap, REG_ASRIER, ASRIER_AOLIE);
++}
++EXPORT_SYMBOL(asrc_start_conv);
++
++void asrc_stop_conv(enum asrc_pair_index index)
++{
++ regmap_update_bits(asrc->regmap, REG_ASRCTR, ASRCTR_ASRCEx_MASK(index), 0);
++}
++EXPORT_SYMBOL(asrc_stop_conv);
++
++void asrc_finish_conv(enum asrc_pair_index index)
++{
++ clk_disable_unprepare(asrc->dma_clk);
++ clk_disable(asrc->asrc_clk);
++}
++EXPORT_SYMBOL(asrc_finish_conv);
++
++#define SET_OVERLOAD_ERR(index, err, msg) \
++ do { \
++ asrc->asrc_pair[index].overload_error |= err; \
++ pair_dbg(msg); \
++ } while (0)
++
++static irqreturn_t asrc_isr(int irq, void *dev_id)
++{
++ enum asrc_pair_index index;
++ u32 status;
++
++ regmap_read(asrc->regmap, REG_ASRSTR, &status);
++
++ for (index = ASRC_PAIR_A; index < ASRC_PAIR_MAX_NUM; index++) {
++ if (asrc->asrc_pair[index].active == 0)
++ continue;
++ if (status & ASRSTR_ATQOL)
++ SET_OVERLOAD_ERR(index, ASRC_TASK_Q_OVERLOAD,
++ "Task Queue FIFO overload");
++ if (status & ASRSTR_AOOL(index))
++ SET_OVERLOAD_ERR(index, ASRC_OUTPUT_TASK_OVERLOAD,
++ "Output Task Overload");
++ if (status & ASRSTR_AIOL(index))
++ SET_OVERLOAD_ERR(index, ASRC_INPUT_TASK_OVERLOAD,
++ "Input Task Overload");
++ if (status & ASRSTR_AODO(index))
++ SET_OVERLOAD_ERR(index, ASRC_OUTPUT_BUFFER_OVERFLOW,
++ "Output Data Buffer has overflowed");
++ if (status & ASRSTR_AIDU(index))
++ SET_OVERLOAD_ERR(index, ASRC_INPUT_BUFFER_UNDERRUN,
++ "Input Data Buffer has underflowed");
++ }
++
++ /* Clean overload error */
++ regmap_write(asrc->regmap, REG_ASRSTR, ASRSTR_AOLE);
++
++ return IRQ_HANDLED;
++}
++
++void asrc_get_status(struct asrc_status_flags *flags)
++{
++ enum asrc_pair_index index = flags->index;
++ unsigned long lock_flags;
++
++ spin_lock_irqsave(&data_lock, lock_flags);
++
++ flags->overload_error = asrc->asrc_pair[index].overload_error;
++
++ spin_unlock_irqrestore(&data_lock, lock_flags);
++}
++EXPORT_SYMBOL(asrc_get_status);
++
++u32 asrc_get_per_addr(enum asrc_pair_index index, bool in)
++{
++ return asrc->paddr + (in ? REG_ASRDI(index) : REG_ASRDO(index));
++}
++EXPORT_SYMBOL(asrc_get_per_addr);
++
++static int mxc_init_asrc(void)
++{
++ /* Halt ASRC internal FP when input FIFO needs data for pair A, B, C */
++ regmap_write(asrc->regmap, REG_ASRCTR, ASRCTR_ASRCEN);
++
++ /* Disable interrupt by default */
++ regmap_write(asrc->regmap, REG_ASRIER, 0x0);
++
++ /* Default 2: 6: 2 channel assignment */
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_A, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_A, 2, asrc->channel_bits));
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_B, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_B, 6, asrc->channel_bits));
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_C, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_C, 2, asrc->channel_bits));
++
++ /* Parameter Registers recommended settings */
++ regmap_write(asrc->regmap, REG_ASRPM1, 0x7fffff);
++ regmap_write(asrc->regmap, REG_ASRPM2, 0x255555);
++ regmap_write(asrc->regmap, REG_ASRPM3, 0xff7280);
++ regmap_write(asrc->regmap, REG_ASRPM4, 0xff7280);
++ regmap_write(asrc->regmap, REG_ASRPM5, 0xff7280);
++
++ /* Base address for task queue FIFO. Set to 0x7C */
++ regmap_update_bits(asrc->regmap, REG_ASRTFR1,
++ ASRTFR1_TF_BASE_MASK, ASRTFR1_TF_BASE(0xfc));
++
++ /* Set the processing clock for 76KHz, 133M */
++ regmap_write(asrc->regmap, REG_ASR76K, 0x06D6);
++
++ /* Set the processing clock for 56KHz, 133M */
++ return regmap_write(asrc->regmap, REG_ASR56K, 0x0947);
++}
++
++#define ASRC_xPUT_DMA_CALLBACK(in) \
++ ((in) ? asrc_input_dma_callback : asrc_output_dma_callback)
++
++static void asrc_input_dma_callback(void *data)
++{
++ struct asrc_pair_params *params = (struct asrc_pair_params *)data;
++
++ dma_unmap_sg(NULL, params->input_sg, params->input_sg_nodes,
++ DMA_MEM_TO_DEV);
++
++ complete(&params->input_complete);
++
++ schedule_work(&params->task_output_work);
++}
++
++static void asrc_output_dma_callback(void *data)
++{
++ struct asrc_pair_params *params = (struct asrc_pair_params *)data;
++
++ dma_unmap_sg(NULL, params->output_sg, params->output_sg_nodes,
++ DMA_DEV_TO_MEM);
++
++ complete(&params->output_complete);
++}
++
++static unsigned int asrc_get_output_FIFO_size(enum asrc_pair_index index)
++{
++ u32 val;
++
++ regmap_read(asrc->regmap, REG_ASRFST(index), &val);
++
++ val &= ASRFSTx_OUTPUT_FIFO_MASK;
++
++ return val >> ASRFSTx_OUTPUT_FIFO_SHIFT;
++}
++
++static u32 asrc_read_one_from_output_FIFO(enum asrc_pair_index index)
++{
++ u32 val;
++
++ regmap_read(asrc->regmap, REG_ASRDO(index), &val);
++
++ return val;
++}
++
++static void asrc_read_output_FIFO(struct asrc_pair_params *params)
++{
++ u32 *reg24 = params->output_last_period.dma_vaddr;
++ u16 *reg16 = params->output_last_period.dma_vaddr;
++ enum asrc_pair_index index = params->index;
++ u32 i, j, reg, size, t_size;
++ bool bit24 = false;
++
++ if (params->output_word_width == ASRC_WIDTH_24_BIT)
++ bit24 = true;
++
++ t_size = 0;
++ do {
++ size = asrc_get_output_FIFO_size(index);
++ for (i = 0; i < size; i++) {
++ for (j = 0; j < params->channel_nums; j++) {
++ reg = asrc_read_one_from_output_FIFO(index);
++ if (bit24) {
++ *(reg24) = reg;
++ reg24++;
++ } else {
++ *(reg16) = (u16)reg;
++ reg16++;
++ }
++ }
++ }
++ t_size += size;
++ } while (size);
++
++ if (t_size > params->last_period_sample)
++ t_size = params->last_period_sample;
++
++ params->output_last_period.length = t_size * params->channel_nums * 2;
++ if (bit24)
++ params->output_last_period.length *= 2;
++}
++
++static void asrc_output_task_worker(struct work_struct *w)
++{
++ struct asrc_pair_params *params =
++ container_of(w, struct asrc_pair_params, task_output_work);
++ enum asrc_pair_index index = params->index;
++ unsigned long lock_flags;
++
++ if (!wait_for_completion_interruptible_timeout(&params->output_complete, HZ / 10)) {
++ pair_err("output dma task timeout\n");
++ return;
++ }
++
++ init_completion(&params->output_complete);
++
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ if (!params->pair_hold) {
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++ return;
++ }
++ asrc_read_output_FIFO(params);
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++
++ complete(&params->lastperiod_complete);
++}
++
++static void mxc_free_dma_buf(struct asrc_pair_params *params)
++{
++ if (params->input_dma_total.dma_vaddr != NULL) {
++ kfree(params->input_dma_total.dma_vaddr);
++ params->input_dma_total.dma_vaddr = NULL;
++ }
++
++ if (params->output_dma_total.dma_vaddr != NULL) {
++ kfree(params->output_dma_total.dma_vaddr);
++ params->output_dma_total.dma_vaddr = NULL;
++ }
++
++ if (params->output_last_period.dma_vaddr) {
++ dma_free_coherent(asrc->dev, 1024 * params->last_period_sample,
++ params->output_last_period.dma_vaddr,
++ params->output_last_period.dma_paddr);
++ params->output_last_period.dma_vaddr = NULL;
++ }
++}
++
++static int mxc_allocate_dma_buf(struct asrc_pair_params *params)
++{
++ struct dma_block *input_a, *output_a, *last_period;
++ enum asrc_pair_index index = params->index;
++
++ input_a = &params->input_dma_total;
++ output_a = &params->output_dma_total;
++ last_period = &params->output_last_period;
++
++ input_a->dma_vaddr = kzalloc(input_a->length, GFP_KERNEL);
++ if (!input_a->dma_vaddr) {
++ pair_err("failed to allocate input dma buffer\n");
++ goto exit;
++ }
++ input_a->dma_paddr = virt_to_dma(NULL, input_a->dma_vaddr);
++
++ output_a->dma_vaddr = kzalloc(output_a->length, GFP_KERNEL);
++ if (!output_a->dma_vaddr) {
++ pair_err("failed to allocate output dma buffer\n");
++ goto exit;
++ }
++ output_a->dma_paddr = virt_to_dma(NULL, output_a->dma_vaddr);
++
++ last_period->dma_vaddr = dma_alloc_coherent(asrc->dev,
++ 1024 * params->last_period_sample,
++ &last_period->dma_paddr, GFP_KERNEL);
++ if (!last_period->dma_vaddr) {
++ pair_err("failed to allocate last period buffer\n");
++ goto exit;
++ }
++
++ return 0;
++
++exit:
++ mxc_free_dma_buf(params);
++
++ return -ENOBUFS;
++}
++
++static struct dma_chan *imx_asrc_get_dma_channel(enum asrc_pair_index index, bool in)
++{
++ char name[4];
++
++ sprintf(name, "%cx%c", in ? 'r' : 't', index + 'a');
++
++ return dma_request_slave_channel(asrc->dev, name);
++}
++
++static int imx_asrc_dma_config(struct asrc_pair_params *params,
++ struct dma_chan *chan, u32 dma_addr,
++ void *buf_addr, u32 buf_len, bool in,
++ enum asrc_word_width word_width)
++{
++ enum asrc_pair_index index = params->index;
++ struct dma_async_tx_descriptor *desc;
++ struct dma_slave_config slave_config;
++ enum dma_slave_buswidth buswidth;
++ struct scatterlist *sg;
++ unsigned int sg_nent, i;
++ int ret;
++
++ if (in) {
++ sg = params->input_sg;
++ sg_nent = params->input_sg_nodes;
++ desc = params->desc_in;
++ } else {
++ sg = params->output_sg;
++ sg_nent = params->output_sg_nodes;
++ desc = params->desc_out;
++ }
++
++ switch (word_width) {
++ case ASRC_WIDTH_16_BIT:
++ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ break;
++ case ASRC_WIDTH_24_BIT:
++ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
++ break;
++ default:
++ pair_err("invalid word width\n");
++ return -EINVAL;
++ }
++
++ slave_config.dma_request0 = 0;
++ slave_config.dma_request1 = 0;
++
++ if (in) {
++ slave_config.direction = DMA_MEM_TO_DEV;
++ slave_config.dst_addr = dma_addr;
++ slave_config.dst_addr_width = buswidth;
++ slave_config.dst_maxburst =
++ params->input_wm * params->channel_nums / buswidth;
++ } else {
++ slave_config.direction = DMA_DEV_TO_MEM;
++ slave_config.src_addr = dma_addr;
++ slave_config.src_addr_width = buswidth;
++ slave_config.src_maxburst =
++ params->output_wm * params->channel_nums / buswidth;
++ }
++ ret = dmaengine_slave_config(chan, &slave_config);
++ if (ret) {
++ pair_err("failed to config dmaengine for %sput task: %d\n",
++ in ? "in" : "out", ret);
++ return -EINVAL;
++ }
++
++ sg_init_table(sg, sg_nent);
++ switch (sg_nent) {
++ case 1:
++ sg_init_one(sg, buf_addr, buf_len);
++ break;
++ case 2:
++ case 3:
++ case 4:
++ for (i = 0; i < (sg_nent - 1); i++)
++ sg_set_buf(&sg[i], buf_addr + i * ASRC_MAX_BUFFER_SIZE,
++ ASRC_MAX_BUFFER_SIZE);
++
++ sg_set_buf(&sg[i], buf_addr + i * ASRC_MAX_BUFFER_SIZE,
++ buf_len - ASRC_MAX_BUFFER_SIZE * i);
++ break;
++ default:
++ pair_err("invalid input DMA nodes number: %d\n", sg_nent);
++ return -EINVAL;
++ }
++
++ ret = dma_map_sg(NULL, sg, sg_nent, slave_config.direction);
++ if (ret != sg_nent) {
++ pair_err("failed to map dma sg for %sput task\n",
++ in ? "in" : "out");
++ return -EINVAL;
++ }
++
++ desc = dmaengine_prep_slave_sg(chan, sg, sg_nent,
++ slave_config.direction, DMA_PREP_INTERRUPT);
++ if (!desc) {
++ pair_err("failed to prepare slave sg for %sput task\n",
++ in ? "in" : "out");
++ return -EINVAL;
++ }
++
++ if (in) {
++ params->desc_in = desc;
++ params->desc_in->callback = asrc_input_dma_callback;
++ } else {
++ params->desc_out = desc;
++ params->desc_out->callback = asrc_output_dma_callback;
++ }
++
++ desc->callback = ASRC_xPUT_DMA_CALLBACK(in);
++ desc->callback_param = params;
++
++ return 0;
++}
++
++static int mxc_asrc_prepare_io_buffer(struct asrc_pair_params *params,
++ struct asrc_convert_buffer *pbuf, bool in)
++{
++ enum asrc_pair_index index = params->index;
++ struct dma_chan *dma_channel;
++ enum asrc_word_width width;
++ unsigned int *dma_len, *sg_nodes, buf_len, wm;
++ void __user *buf_vaddr;
++ void *dma_vaddr;
++ u32 word_size, fifo_addr;
++
++ if (in) {
++ dma_channel = params->input_dma_channel;
++ dma_vaddr = params->input_dma_total.dma_vaddr;
++ dma_len = &params->input_dma_total.length;
++ width = params->input_word_width;
++ sg_nodes = &params->input_sg_nodes;
++ wm = params->input_wm;
++ buf_vaddr = (void __user *)pbuf->input_buffer_vaddr;
++ buf_len = pbuf->input_buffer_length;
++ } else {
++ dma_channel = params->output_dma_channel;
++ dma_vaddr = params->output_dma_total.dma_vaddr;
++ dma_len = &params->output_dma_total.length;
++ width = params->output_word_width;
++ sg_nodes = &params->output_sg_nodes;
++ wm = params->last_period_sample;
++ buf_vaddr = (void __user *)pbuf->output_buffer_vaddr;
++ buf_len = pbuf->output_buffer_length;
++ }
++
++ switch (width) {
++ case ASRC_WIDTH_24_BIT:
++ word_size = 4;
++ break;
++ case ASRC_WIDTH_16_BIT:
++ case ASRC_WIDTH_8_BIT:
++ word_size = 2;
++ break;
++ default:
++ pair_err("invalid %sput word size!\n", in ? "in" : "out");
++ return -EINVAL;
++ }
++
++ if (buf_len < word_size * params->channel_nums * wm) {
++ pair_err("%sput buffer size[%d] is too small!\n",
++ in ? "in" : "out", buf_len);
++ return -EINVAL;
++ }
++
++ /* Copy origin data into input buffer */
++ if (in && copy_from_user(dma_vaddr, buf_vaddr, buf_len))
++ return -EFAULT;
++
++ *dma_len = buf_len;
++ if (!in)
++ *dma_len -= wm * word_size * params->channel_nums;
++
++ *sg_nodes = *dma_len / ASRC_MAX_BUFFER_SIZE + 1;
++
++ fifo_addr = asrc_get_per_addr(params->index, in);
++
++ return imx_asrc_dma_config(params, dma_channel, fifo_addr, dma_vaddr,
++ *dma_len, in, width);
++}
++
++static int mxc_asrc_prepare_buffer(struct asrc_pair_params *params,
++ struct asrc_convert_buffer *pbuf)
++{
++ enum asrc_pair_index index = params->index;
++ int ret;
++
++ ret = mxc_asrc_prepare_io_buffer(params, pbuf, true);
++ if (ret) {
++ pair_err("failed to prepare input buffer: %d\n", ret);
++ return ret;
++ }
++
++ ret = mxc_asrc_prepare_io_buffer(params, pbuf, false);
++ if (ret) {
++ pair_err("failed to prepare output buffer: %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++int mxc_asrc_process_io_buffer(struct asrc_pair_params *params,
++ struct asrc_convert_buffer *pbuf, bool in)
++{
++ void *last_vaddr = params->output_last_period.dma_vaddr;
++ unsigned int *last_len = &params->output_last_period.length;
++ enum asrc_pair_index index = params->index;
++ unsigned int dma_len, *buf_len;
++ struct completion *complete;
++ void __user *buf_vaddr;
++ void *dma_vaddr;
++
++ if (in) {
++ dma_vaddr = params->input_dma_total.dma_vaddr;
++ dma_len = params->input_dma_total.length;
++ buf_len = &pbuf->input_buffer_length;
++ complete = &params->input_complete;
++ buf_vaddr = (void __user *)pbuf->input_buffer_vaddr;
++ } else {
++ dma_vaddr = params->output_dma_total.dma_vaddr;
++ dma_len = params->output_dma_total.length;
++ buf_len = &pbuf->output_buffer_length;
++ complete = &params->lastperiod_complete;
++ buf_vaddr = (void __user *)pbuf->output_buffer_vaddr;
++ }
++
++ if (!wait_for_completion_interruptible_timeout(complete, 10 * HZ)) {
++ pair_err("%s task timeout\n", in ? "input dma" : "last period");
++ return -ETIME;
++ } else if (signal_pending(current)) {
++ pair_err("%sput task forcibly aborted\n", in ? "in" : "out");
++ return -ERESTARTSYS;
++ }
++
++ init_completion(complete);
++
++ *buf_len = dma_len;
++
++ /* Only output need return data to user space */
++ if (!in) {
++ if (copy_to_user(buf_vaddr, dma_vaddr, dma_len))
++ return -EFAULT;
++
++ *buf_len += *last_len;
++
++ if (copy_to_user(buf_vaddr + dma_len, last_vaddr, *last_len))
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++int mxc_asrc_process_buffer(struct asrc_pair_params *params,
++ struct asrc_convert_buffer *pbuf)
++{
++ enum asrc_pair_index index = params->index;
++ int ret;
++
++ ret = mxc_asrc_process_io_buffer(params, pbuf, true);
++ if (ret) {
++ pair_err("failed to process input buffer: %d\n", ret);
++ return ret;
++ }
++
++ ret = mxc_asrc_process_io_buffer(params, pbuf, false);
++ if (ret) {
++ pair_err("failed to process output buffer: %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++#ifdef ASRC_POLLING_WITHOUT_DMA
++static void asrc_write_one_to_input_FIFO(enum asrc_pair_index index, u32 val)
++{
++ regmap_write(asrc->regmap, REG_ASRDI(index), val);
++}
++
++/* THIS FUNCTION ONLY EXISTS FOR DEBUGGING AND ONLY SUPPORTS TWO CHANNELS */
++static void asrc_polling_debug(struct asrc_pair_params *params)
++{
++ enum asrc_pair_index index = params->index;
++ u32 *in24 = params->input_dma_total.dma_vaddr;
++ u32 dma_len = params->input_dma_total.length / (params->channel_nums * 4);
++ u32 size, i, j, t_size, reg;
++ u32 *reg24 = params->output_dma_total.dma_vaddr;
++
++ t_size = 0;
++
++ for (i = 0; i < dma_len; ) {
++ for (j = 0; j < 2; j++) {
++ asrc_write_one_to_input_FIFO(index, *in24);
++ in24++;
++ asrc_write_one_to_input_FIFO(index, *in24);
++ in24++;
++ i++;
++ }
++ udelay(50);
++ udelay(50 * params->output_sample_rate / params->input_sample_rate);
++
++ size = asrc_get_output_FIFO_size(index);
++ for (j = 0; j < size; j++) {
++ reg = asrc_read_one_from_output_FIFO(index);
++ *(reg24) = reg;
++ reg24++;
++ reg = asrc_read_one_from_output_FIFO(index);
++ *(reg24) = reg;
++ reg24++;
++ }
++ t_size += size;
++ }
++
++ mdelay(1);
++ size = asrc_get_output_FIFO_size(index);
++ for (j = 0; j < size; j++) {
++ reg = asrc_read_one_from_output_FIFO(index);
++ *(reg24) = reg;
++ reg24++;
++ reg = asrc_read_one_from_output_FIFO(index);
++ *(reg24) = reg;
++ reg24++;
++ }
++ t_size += size;
++
++ params->output_dma_total.length = t_size * params->channel_nums * 4;
++ params->output_last_period.length = 0;
++
++ dma_unmap_sg(NULL, params->input_sg, params->input_sg_nodes,
++ DMA_MEM_TO_DEV);
++ dma_unmap_sg(NULL, params->output_sg, params->output_sg_nodes,
++ DMA_DEV_TO_MEM);
++
++ complete(&params->input_complete);
++ complete(&params->lastperiod_complete);
++}
++#else
++static void mxc_asrc_submit_dma(struct asrc_pair_params *params)
++{
++ enum asrc_pair_index index = params->index;
++ u32 size = asrc_get_output_FIFO_size(params->index);
++ int i, j;
++
++ /* Read all data in OUTPUT FIFO */
++ while (size) {
++ for (j = 0; j < size; j++)
++ for (i = 0; i < params->channel_nums; i++)
++ asrc_read_one_from_output_FIFO(index);
++ /* Fetch the data every 100us */
++ udelay(100);
++
++ size = asrc_get_output_FIFO_size(index);
++ }
++
++ /* Submit dma request */
++ dmaengine_submit(params->desc_in);
++ dma_async_issue_pending(params->desc_in->chan);
++
++ dmaengine_submit(params->desc_out);
++ dma_async_issue_pending(params->desc_out->chan);
++
++ /*
++ * Clear dma request during the stall state of ASRC:
++ * During STALL state, the remaining in input fifo would never be
++ * smaller than the input threshold while the output fifo would not
++ * be bigger than output one. Thus the dma request would be cleared.
++ */
++ asrc_set_watermark(index, ASRC_FIFO_THRESHOLD_MIN, ASRC_FIFO_THRESHOLD_MAX);
++
++ /* Update the real input threshold to raise dma request */
++ asrc_set_watermark(index, params->input_wm, params->output_wm);
++}
++#endif
++
++static long asrc_ioctl_req_pair(struct asrc_pair_params *params,
++ void __user *user)
++{
++ struct asrc_req req;
++ long ret;
++
++ ret = copy_from_user(&req, user, sizeof(req));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get req from user space: %ld\n", ret);
++ return ret;
++ }
++
++ ret = asrc_req_pair(req.chn_num, &req.index);
++ if (ret) {
++ dev_err(asrc->dev, "failed to request pair: %ld\n", ret);
++ return ret;
++ }
++
++ params->pair_hold = 1;
++ params->index = req.index;
++ params->channel_nums = req.chn_num;
++
++ ret = copy_to_user(user, &req, sizeof(req));
++ if (ret) {
++ dev_err(asrc->dev, "failed to send req to user space: %ld\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl_config_pair(struct asrc_pair_params *params,
++ void __user *user)
++{
++ struct asrc_config config;
++ enum asrc_pair_index index;
++ long ret;
++
++ ret = copy_from_user(&config, user, sizeof(config));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get config from user space: %ld\n", ret);
++ return ret;
++ }
++
++ index = config.pair;
++
++ ret = asrc_config_pair(&config);
++ if (ret) {
++ pair_err("failed to config pair: %ld\n", ret);
++ return ret;
++ }
++
++ params->input_wm = 4;
++ params->output_wm = 2;
++
++ ret = asrc_set_watermark(index, params->input_wm, params->output_wm);
++ if (ret)
++ return ret;
++
++ params->output_buffer_size = config.dma_buffer_size;
++ params->input_buffer_size = config.dma_buffer_size;
++ if (config.buffer_num > ASRC_DMA_BUFFER_NUM)
++ params->buffer_num = ASRC_DMA_BUFFER_NUM;
++ else
++ params->buffer_num = config.buffer_num;
++
++ params->input_dma_total.length = ASRC_DMA_BUFFER_SIZE;
++ params->output_dma_total.length = ASRC_DMA_BUFFER_SIZE;
++
++ params->input_word_width = config.input_word_width;
++ params->output_word_width = config.output_word_width;
++
++ params->input_sample_rate = config.input_sample_rate;
++ params->output_sample_rate = config.output_sample_rate;
++
++ params->last_period_sample = ASRC_OUTPUT_LAST_SAMPLE_DEFAULT;
++
++ ret = mxc_allocate_dma_buf(params);
++ if (ret) {
++ pair_err("failed to allocate dma buffer: %ld\n", ret);
++ return ret;
++ }
++
++ /* Request DMA channel for both input and output */
++ params->input_dma_channel = imx_asrc_get_dma_channel(index, true);
++ if (params->input_dma_channel == NULL) {
++ pair_err("failed to request input task dma channel\n");
++ return -EBUSY;
++ }
++
++ params->output_dma_channel = imx_asrc_get_dma_channel(index, false);
++ if (params->output_dma_channel == NULL) {
++ pair_err("failed to request output task dma channel\n");
++ return -EBUSY;
++ }
++
++ init_completion(&params->input_complete);
++ init_completion(&params->output_complete);
++ init_completion(&params->lastperiod_complete);
++
++ /* Add work struct to receive last period of output data */
++ INIT_WORK(&params->task_output_work, asrc_output_task_worker);
++
++ ret = copy_to_user(user, &config, sizeof(config));
++ if (ret) {
++ pair_err("failed to send config to user space: %ld\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl_release_pair(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index;
++ unsigned long lock_flags;
++ long ret;
++
++ ret = copy_from_user(&index, user, sizeof(index));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get index from user space: %ld\n", ret);
++ return ret;
++ }
++
++ /* index might be not valid due to some application failure. */
++ if (index < 0)
++ return -EINVAL;
++
++ params->asrc_active = 0;
++
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ params->pair_hold = 0;
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++
++ if (params->input_dma_channel)
++ dma_release_channel(params->input_dma_channel);
++ if (params->output_dma_channel)
++ dma_release_channel(params->output_dma_channel);
++ mxc_free_dma_buf(params);
++ asrc_release_pair(index);
++ asrc_finish_conv(index);
++
++ return 0;
++}
++
++static long asrc_ioctl_convert(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index = params->index;
++ struct asrc_convert_buffer buf;
++ long ret;
++
++ ret = copy_from_user(&buf, user, sizeof(buf));
++ if (ret) {
++ pair_err("failed to get buf from user space: %ld\n", ret);
++ return ret;
++ }
++
++ ret = mxc_asrc_prepare_buffer(params, &buf);
++ if (ret) {
++ pair_err("failed to prepare buffer: %ld\n", ret);
++ return ret;
++ }
++
++#ifdef ASRC_POLLING_WITHOUT_DMA
++ asrc_polling_debug(params);
++#else
++ mxc_asrc_submit_dma(params);
++#endif
++
++ ret = mxc_asrc_process_buffer(params, &buf);
++ if (ret) {
++ pair_err("failed to process buffer: %ld\n", ret);
++ return ret;
++ }
++
++ ret = copy_to_user(user, &buf, sizeof(buf));
++ if (ret) {
++ pair_err("failed to send buf to user space: %ld\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl_start_conv(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index;
++ long ret;
++
++ ret = copy_from_user(&index, user, sizeof(index));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get index from user space: %ld\n", ret);
++ return ret;
++ }
++
++ params->asrc_active = 1;
++ asrc_start_conv(index);
++
++ return 0;
++}
++
++static long asrc_ioctl_stop_conv(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index;
++ long ret;
++
++ ret = copy_from_user(&index, user, sizeof(index));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get index from user space: %ld\n", ret);
++ return ret;
++ }
++
++ dmaengine_terminate_all(params->input_dma_channel);
++ dmaengine_terminate_all(params->output_dma_channel);
++
++ asrc_stop_conv(index);
++ params->asrc_active = 0;
++
++ return 0;
++}
++
++static long asrc_ioctl_status(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index = params->index;
++ struct asrc_status_flags flags;
++ long ret;
++
++ ret = copy_from_user(&flags, user, sizeof(flags));
++ if (ret) {
++ pair_err("failed to get flags from user space: %ld\n", ret);
++ return ret;
++ }
++
++ asrc_get_status(&flags);
++
++ ret = copy_to_user(user, &flags, sizeof(flags));
++ if (ret) {
++ pair_err("failed to send flags to user space: %ld\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl_flush(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index = params->index;
++ init_completion(&params->input_complete);
++ init_completion(&params->output_complete);
++ init_completion(&params->lastperiod_complete);
++
++ /* Release DMA and request again */
++ dma_release_channel(params->input_dma_channel);
++ dma_release_channel(params->output_dma_channel);
++
++ params->input_dma_channel = imx_asrc_get_dma_channel(index, true);
++ if (params->input_dma_channel == NULL) {
++ pair_err("failed to request input task dma channel\n");
++ return -EBUSY;
++ }
++
++ params->output_dma_channel = imx_asrc_get_dma_channel(index, false);
++ if (params->output_dma_channel == NULL) {
++ pair_err("failed to request output task dma channel\n");
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct asrc_pair_params *params = file->private_data;
++ void __user *user = (void __user *)arg;
++ long ret = 0;
++
++ switch (cmd) {
++ case ASRC_REQ_PAIR:
++ ret = asrc_ioctl_req_pair(params, user);
++ break;
++ case ASRC_CONFIG_PAIR:
++ ret = asrc_ioctl_config_pair(params, user);
++ break;
++ case ASRC_RELEASE_PAIR:
++ ret = asrc_ioctl_release_pair(params, user);
++ break;
++ case ASRC_CONVERT:
++ ret = asrc_ioctl_convert(params, user);
++ break;
++ case ASRC_START_CONV:
++ ret = asrc_ioctl_start_conv(params, user);
++ dump_regs();
++ break;
++ case ASRC_STOP_CONV:
++ ret = asrc_ioctl_stop_conv(params, user);
++ break;
++ case ASRC_STATUS:
++ ret = asrc_ioctl_status(params, user);
++ break;
++ case ASRC_FLUSH:
++ ret = asrc_ioctl_flush(params, user);
++ break;
++ default:
++ dev_err(asrc->dev, "invalid ioctl cmd!\n");
++ break;
++ }
++
++ return ret;
++}
++
++static int mxc_asrc_open(struct inode *inode, struct file *file)
++{
++ struct asrc_pair_params *params;
++ int ret = 0;
++
++ ret = signal_pending(current);
++ if (ret) {
++ dev_err(asrc->dev, "current process has a signal pending\n");
++ return ret;
++ }
++
++ params = kzalloc(sizeof(struct asrc_pair_params), GFP_KERNEL);
++ if (params == NULL) {
++ dev_err(asrc->dev, "failed to allocate pair_params\n");
++ return -ENOBUFS;
++ }
++
++ file->private_data = params;
++
++ return ret;
++}
++
++static int mxc_asrc_close(struct inode *inode, struct file *file)
++{
++ struct asrc_pair_params *params;
++ unsigned long lock_flags;
++
++ params = file->private_data;
++
++ if (!params)
++ return 0;
++
++ if (params->asrc_active) {
++ params->asrc_active = 0;
++
++ dmaengine_terminate_all(params->input_dma_channel);
++ dmaengine_terminate_all(params->output_dma_channel);
++
++ asrc_stop_conv(params->index);
++
++ complete(&params->input_complete);
++ complete(&params->output_complete);
++ complete(&params->lastperiod_complete);
++ }
++
++ if (params->pair_hold) {
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ params->pair_hold = 0;
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++
++ if (params->input_dma_channel)
++ dma_release_channel(params->input_dma_channel);
++ if (params->output_dma_channel)
++ dma_release_channel(params->output_dma_channel);
++
++ mxc_free_dma_buf(params);
++
++ asrc_release_pair(params->index);
++ asrc_finish_conv(params->index);
++ }
++
++ kfree(params);
++ file->private_data = NULL;
++
++ return 0;
++}
++
++static int mxc_asrc_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ unsigned long size = vma->vm_end - vma->vm_start;
++ int ret;
++
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ size, vma->vm_page_prot);
++ if (ret) {
++ dev_err(asrc->dev, "failed to memory map!\n");
++ return ret;
++ }
++
++ vma->vm_flags &= ~VM_IO;
++
++ return ret;
++}
++
++static const struct file_operations asrc_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = asrc_ioctl,
++ .mmap = mxc_asrc_mmap,
++ .open = mxc_asrc_open,
++ .release = mxc_asrc_close,
++};
++
++static struct miscdevice asrc_miscdev = {
++ .name = "mxc_asrc",
++ .fops = &asrc_fops,
++ .minor = MISC_DYNAMIC_MINOR,
++};
++
++static int asrc_read_proc_attr(struct file *file, char __user *buf,
++ size_t count, loff_t *off)
++{
++ char tmpbuf[80];
++ int len = 0;
++ u32 reg;
++
++ if (*off)
++ return 0;
++
++ regmap_read(asrc->regmap, REG_ASRCNCR, &reg);
++
++ len += sprintf(tmpbuf, "ANCA: %d\nANCB: %d\nANCC: %d\n",
++ ASRCNCR_ANCx_get(ASRC_PAIR_A, reg, asrc->channel_bits),
++ ASRCNCR_ANCx_get(ASRC_PAIR_B, reg, asrc->channel_bits),
++ ASRCNCR_ANCx_get(ASRC_PAIR_C, reg, asrc->channel_bits));
++
++ if (len > count)
++ return 0;
++
++ if (copy_to_user(buf, &tmpbuf, len))
++ return -EFAULT;
++
++ *off += len;
++
++ return len;
++}
++
++#define ASRC_MAX_PROC_BUFFER_SIZE 63
++
++static int asrc_write_proc_attr(struct file *file, const char __user *buffer,
++ size_t count, loff_t *data)
++{
++ char buf[ASRC_MAX_PROC_BUFFER_SIZE];
++ int na, nb, nc;
++ int total;
++
++ if (count > ASRC_MAX_PROC_BUFFER_SIZE) {
++ dev_err(asrc->dev, "proc write: the input string was too long\n");
++ return -EINVAL;
++ }
++
++ if (copy_from_user(buf, buffer, count)) {
++ dev_err(asrc->dev, "proc write: failed to copy buffer from user\n");
++ return -EFAULT;
++ }
++
++ sscanf(buf, "ANCA: %d\nANCB: %d\nANCC: %d", &na, &nb, &nc);
++
++ total = asrc->channel_bits > 3 ? 10 : 5;
++
++ if (na + nb + nc > total) {
++ dev_err(asrc->dev, "don't surpass %d for total\n", total);
++ return -EINVAL;
++ } else if (na % 2 != 0 || nb % 2 != 0 || nc % 2 != 0) {
++ dev_err(asrc->dev, "please set an even number for each pair\n");
++ return -EINVAL;
++ } else if (na < 0 || nb < 0 || nc < 0) {
++ dev_err(asrc->dev, "please set an positive number for each pair\n");
++ return -EINVAL;
++ }
++
++
++ asrc->asrc_pair[ASRC_PAIR_A].chn_max = na;
++ asrc->asrc_pair[ASRC_PAIR_B].chn_max = nb;
++ asrc->asrc_pair[ASRC_PAIR_C].chn_max = nc;
++
++ /* Update channel number settings */
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_A, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_A, na, asrc->channel_bits));
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_B, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_B, nb, asrc->channel_bits));
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_C, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_C, nc, asrc->channel_bits));
++
++ return count;
++}
++
++static const struct file_operations asrc_proc_fops = {
++ .read = asrc_read_proc_attr,
++ .write = asrc_write_proc_attr,
++};
++
++static void asrc_proc_create(void)
++{
++ struct proc_dir_entry *proc_attr;
++
++ asrc->proc_asrc = proc_mkdir(ASRC_PROC_PATH, NULL);
++ if (!asrc->proc_asrc) {
++ dev_err(asrc->dev, "failed to create proc entry %s\n", ASRC_PROC_PATH);
++ return;
++ }
++
++ proc_attr = proc_create("ChSettings", S_IFREG | S_IRUGO | S_IWUSR,
++ asrc->proc_asrc, &asrc_proc_fops);
++ if (!proc_attr) {
++ remove_proc_entry(ASRC_PROC_PATH, NULL);
++ dev_err(asrc->dev, "failed to create proc attribute entry\n");
++ }
++}
++
++static void asrc_proc_remove(void)
++{
++ remove_proc_entry("ChSettings", asrc->proc_asrc);
++ remove_proc_entry(ASRC_PROC_PATH, NULL);
++}
++
++
++static bool asrc_readable_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case REG_ASRCTR:
++ case REG_ASRIER:
++ case REG_ASRCNCR:
++ case REG_ASRCFG:
++ case REG_ASRCSR:
++ case REG_ASRCDR1:
++ case REG_ASRCDR2:
++ case REG_ASRSTR:
++ case REG_ASRPM1:
++ case REG_ASRPM2:
++ case REG_ASRPM3:
++ case REG_ASRPM4:
++ case REG_ASRPM5:
++ case REG_ASRTFR1:
++ case REG_ASRCCR:
++ case REG_ASRDOA:
++ case REG_ASRDOB:
++ case REG_ASRDOC:
++ case REG_ASRIDRHA:
++ case REG_ASRIDRLA:
++ case REG_ASRIDRHB:
++ case REG_ASRIDRLB:
++ case REG_ASRIDRHC:
++ case REG_ASRIDRLC:
++ case REG_ASR76K:
++ case REG_ASR56K:
++ case REG_ASRMCRA:
++ case REG_ASRFSTA:
++ case REG_ASRMCRB:
++ case REG_ASRFSTB:
++ case REG_ASRMCRC:
++ case REG_ASRFSTC:
++ case REG_ASRMCR1A:
++ case REG_ASRMCR1B:
++ case REG_ASRMCR1C:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static bool asrc_writeable_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case REG_ASRCTR:
++ case REG_ASRIER:
++ case REG_ASRCNCR:
++ case REG_ASRCFG:
++ case REG_ASRCSR:
++ case REG_ASRCDR1:
++ case REG_ASRCDR2:
++ case REG_ASRSTR:
++ case REG_ASRPM1:
++ case REG_ASRPM2:
++ case REG_ASRPM3:
++ case REG_ASRPM4:
++ case REG_ASRPM5:
++ case REG_ASRTFR1:
++ case REG_ASRCCR:
++ case REG_ASRDIA:
++ case REG_ASRDIB:
++ case REG_ASRDIC:
++ case REG_ASRIDRHA:
++ case REG_ASRIDRLA:
++ case REG_ASRIDRHB:
++ case REG_ASRIDRLB:
++ case REG_ASRIDRHC:
++ case REG_ASRIDRLC:
++ case REG_ASR76K:
++ case REG_ASR56K:
++ case REG_ASRMCRA:
++ case REG_ASRMCRB:
++ case REG_ASRMCRC:
++ case REG_ASRMCR1A:
++ case REG_ASRMCR1B:
++ case REG_ASRMCR1C:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static struct regmap_config asrc_regmap_config = {
++ .reg_bits = 32,
++ .reg_stride = 4,
++ .val_bits = 32,
++
++ .max_register = REG_ASRMCR1C,
++ .readable_reg = asrc_readable_reg,
++ .writeable_reg = asrc_writeable_reg,
++};
++
++static int mxc_asrc_probe(struct platform_device *pdev)
++{
++ const struct of_device_id *of_id = of_match_device(fsl_asrc_ids, &pdev->dev);
++ struct device_node *np = pdev->dev.of_node;
++ enum mxc_asrc_type devtype;
++ struct resource *res;
++ void __iomem *regs;
++ int ret;
++
++ /* Check if the device is existed */
++ if (!np)
++ return -ENODEV;
++
++ asrc = devm_kzalloc(&pdev->dev, sizeof(struct asrc_data), GFP_KERNEL);
++ if (!asrc)
++ return -ENOMEM;
++
++ if (of_id) {
++ const struct platform_device_id *id_entry = of_id->data;
++ devtype = id_entry->driver_data;
++ } else {
++ devtype = pdev->id_entry->driver_data;
++ }
++
++ asrc->dev = &pdev->dev;
++ asrc->dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++ asrc->asrc_pair[ASRC_PAIR_A].chn_max = 2;
++ asrc->asrc_pair[ASRC_PAIR_B].chn_max = 6;
++ asrc->asrc_pair[ASRC_PAIR_C].chn_max = 2;
++ asrc->asrc_pair[ASRC_PAIR_A].overload_error = 0;
++ asrc->asrc_pair[ASRC_PAIR_B].overload_error = 0;
++ asrc->asrc_pair[ASRC_PAIR_C].overload_error = 0;
++
++ /* Map the address */
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (IS_ERR(res)) {
++ dev_err(&pdev->dev, "could not determine device resources\n");
++ return PTR_ERR(res);
++ }
++
++ regs = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(regs)) {
++ dev_err(&pdev->dev, "could not map device resources\n");
++ return PTR_ERR(regs);
++ }
++ asrc->paddr = res->start;
++
++ /* Register regmap and let it prepare core clock */
++ asrc->regmap = devm_regmap_init_mmio_clk(&pdev->dev,
++ "core", regs, &asrc_regmap_config);
++ if (IS_ERR(asrc->regmap)) {
++ dev_err(&pdev->dev, "regmap init failed\n");
++ return PTR_ERR(asrc->regmap);
++ }
++
++ asrc->irq = platform_get_irq(pdev, 0);
++ if (asrc->irq == NO_IRQ) {
++ dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
++ return asrc->irq;
++ }
++
++ ret = devm_request_irq(&pdev->dev, asrc->irq, asrc_isr, 0, np->name, NULL);
++ if (ret) {
++ dev_err(&pdev->dev, "could not claim irq %u: %d\n", asrc->irq, ret);
++ return ret;
++ }
++
++ asrc->asrc_clk = devm_clk_get(&pdev->dev, "core");
++ if (IS_ERR(asrc->asrc_clk)) {
++ dev_err(&pdev->dev, "failed to get core clock\n");
++ return PTR_ERR(asrc->asrc_clk);
++ }
++
++ asrc->dma_clk = devm_clk_get(&pdev->dev, "dma");
++ if (IS_ERR(asrc->dma_clk)) {
++ dev_err(&pdev->dev, "failed to get dma script clock\n");
++ return PTR_ERR(asrc->dma_clk);
++ }
++
++ switch (devtype) {
++ case IMX35_ASRC:
++ asrc->channel_bits = 3;
++ input_clk_map = input_clk_map_v1;
++ output_clk_map = output_clk_map_v1;
++ break;
++ case IMX53_ASRC:
++ asrc->channel_bits = 4;
++ input_clk_map = input_clk_map_v2;
++ output_clk_map = output_clk_map_v2;
++ break;
++ default:
++ dev_err(&pdev->dev, "unsupported device type\n");
++ return -EINVAL;
++ }
++
++ ret = misc_register(&asrc_miscdev);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register char device %d\n", ret);
++ return ret;
++ }
++
++ asrc_proc_create();
++
++ ret = mxc_init_asrc();
++ if (ret) {
++ dev_err(&pdev->dev, "failed to init asrc %d\n", ret);
++ goto err_misc;
++ }
++
++ dev_info(&pdev->dev, "mxc_asrc registered\n");
++
++ return ret;
++
++err_misc:
++ misc_deregister(&asrc_miscdev);
++
++ return ret;
++}
++
++static int mxc_asrc_remove(struct platform_device *pdev)
++{
++ asrc_proc_remove();
++ misc_deregister(&asrc_miscdev);
++
++ return 0;
++}
++
++static struct platform_driver mxc_asrc_driver = {
++ .driver = {
++ .name = "mxc_asrc",
++ .of_match_table = fsl_asrc_ids,
++ },
++ .probe = mxc_asrc_probe,
++ .remove = mxc_asrc_remove,
++};
++
++module_platform_driver(mxc_asrc_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Asynchronous Sample Rate Converter");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:mxc_asrc");
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/Kconfig linux-3.14.40/drivers/mxc/gpu-viv/Kconfig
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/Kconfig 2015-05-01 14:57:59.507427001 -0500
+@@ -0,0 +1,20 @@
++menu "MXC Vivante GPU support"
++ depends on SOC_IMX6Q
++
++config MXC_GPU_VIV
++ tristate "MXC Vivante GPU support"
++ ---help---
++ Say Y to get the GPU driver support.
++choice
++ prompt "Galcore Version"
++ default MXC_GPU_VIV_V5
++
++config MXC_GPU_VIV_V5
++ bool "Galcore Version 5.x"
++
++config MXC_GPU_VIV_V4
++ bool "Galcore Version 4.x"
++
++endchoice
++
++endmenu
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.c linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.c 2015-05-01 14:57:59.507427001 -0500
+@@ -0,0 +1,932 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++
++#if gcdENABLE_VG
++
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++****************************** gckVGCOMMAND API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_InitializeInfo
++**
++** Initialize architecture dependent command buffer information.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGCOMMAND_InitializeInfo(
++ IN gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ do
++ {
++ /* Reset interrupts. */
++ Command->info.feBufferInt = -1;
++ Command->info.tsOverflowInt = -1;
++
++ /* Set command buffer attributes. */
++ Command->info.addressAlignment = 64;
++ Command->info.commandAlignment = 8;
++
++ /* Determine command alignment address mask. */
++ Command->info.addressMask = ((((gctUINT32) (Command->info.addressAlignment - 1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) ((gctUINT32) (0 ) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Query the number of bytes needed by the STATE command. */
++ gcmkERR_BREAK(gckVGCOMMAND_StateCommand(
++ Command, 0x0, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.stateCommandSize
++ ));
++
++ /* Query the number of bytes needed by the RESTART command. */
++ gcmkERR_BREAK(gckVGCOMMAND_RestartCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.restartCommandSize
++ ));
++
++ /* Query the number of bytes needed by the FETCH command. */
++ gcmkERR_BREAK(gckVGCOMMAND_FetchCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.fetchCommandSize
++ ));
++
++ /* Query the number of bytes needed by the CALL command. */
++ gcmkERR_BREAK(gckVGCOMMAND_CallCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.callCommandSize
++ ));
++
++ /* Query the number of bytes needed by the RETURN command. */
++ gcmkERR_BREAK(gckVGCOMMAND_ReturnCommand(
++ Command, gcvNULL,
++ &Command->info.returnCommandSize
++ ));
++
++ /* Query the number of bytes needed by the EVENT command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ Command, gcvNULL, gcvBLOCK_PIXEL, -1,
++ &Command->info.eventCommandSize
++ ));
++
++ /* Query the number of bytes needed by the END command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command, gcvNULL, -1,
++ &Command->info.endCommandSize
++ ));
++
++ /* Determine the tail reserve size. */
++ Command->info.staticTailSize = gcmMAX(
++ Command->info.fetchCommandSize,
++ gcmMAX(
++ Command->info.returnCommandSize,
++ Command->info.endCommandSize
++ )
++ );
++
++ /* Determine the maximum tail size. */
++ Command->info.dynamicTailSize
++ = Command->info.staticTailSize
++ + Command->info.eventCommandSize * gcvBLOCK_COUNT;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_StateCommand
++**
++** Append a STATE command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctUINT32 Pipe
++** Harwdare destination pipe.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** STATE command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 Address
++** Starting register address of the state buffer.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT32 Count
++** Number of states in state buffer.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the STATE command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the STATE command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_StateCommand(
++ IN gckVGCOMMAND Command,
++ IN gctUINT32 Pipe,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Count,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Pipe=0x%x Logical=0x%x Address=0x%x Count=0x%x Bytes = 0x%x",
++ Command, Pipe, Logical, Address, Count, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append STATE. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) | (((gctUINT32) ((gctUINT32) (Pipe) & ((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the STATE command. */
++ *Bytes = 4 * (Count + 1);
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append LOAD_STATE. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the STATE command. */
++ *Bytes = 4 * (Count + 1);
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_RestartCommand
++**
++** Form a RESTART command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** RESTART command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this RESTART
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this RESTART command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the RESTART command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the RESTART command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_RestartCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++ gctUINT32 beginEndMark;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Determine Begin/End flag. */
++ beginEndMark = (FetchCount > 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)));
++
++ /* Append RESTART. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x9 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)))
++ | beginEndMark;
++
++ buffer[1]
++ = FetchAddress;
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the RESTART command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_FetchCommand
++**
++** Form a FETCH command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** FETCH command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this FETCH
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this FETCH command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the FETCH command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the FETCH command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_FetchCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append FETCH. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x5 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the FETCH command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append LINK. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the LINK command. */
++ *Bytes = 8;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_CallCommand
++**
++** Append a CALL command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** CALL command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this CALL
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this CALL command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the CALL command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the CALL command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_CallCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append CALL. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x6 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the CALL command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_ReturnCommand
++**
++** Append a RETURN command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** RETURN command at or gcvNULL to query the size of the command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the RETURN command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the RETURN command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_ReturnCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x Bytes = 0x%x",
++ Command, Logical, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append RETURN. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x7 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the RETURN command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_EventCommand
++**
++** Form an EVENT command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** EVENT command at or gcvNULL to query the size of the command.
++**
++** gctINT32 InterruptId
++** The ID of the interrupt to generate.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gceBLOCK Block
++** Block that will generate the interrupt.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the EVENT command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_EventCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gceBLOCK Block,
++ IN gctINT32 InterruptId,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x Block=0x%x InterruptId=0x%x Bytes = 0x%x",
++ Command, Logical, Block, InterruptId, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ typedef struct _gcsEVENTSTATES
++ {
++ /* Chips before VG21 use these values. */
++ gctUINT eventFromFE;
++ gctUINT eventFromPE;
++
++ /* VG21 chips and later use SOURCE field. */
++ gctUINT eventSource;
++ }
++ gcsEVENTSTATES;
++
++ static gcsEVENTSTATES states[] =
++ {
++ /* gcvBLOCK_COMMAND */
++ {
++ (gctUINT)~0,
++ (gctUINT)~0,
++ (gctUINT)~0
++ },
++
++ /* gcvBLOCK_TESSELLATOR */
++ {
++ 0x0,
++ 0x1,
++ 0x10
++ },
++
++ /* gcvBLOCK_TESSELLATOR2 */
++ {
++ 0x0,
++ 0x1,
++ 0x12
++ },
++
++ /* gcvBLOCK_TESSELLATOR3 */
++ {
++ 0x0,
++ 0x1,
++ 0x14
++ },
++
++ /* gcvBLOCK_RASTER */
++ {
++ 0x0,
++ 0x1,
++ 0x07,
++ },
++
++ /* gcvBLOCK_VG */
++ {
++ 0x0,
++ 0x1,
++ 0x0F
++ },
++
++ /* gcvBLOCK_VG2 */
++ {
++ 0x0,
++ 0x1,
++ 0x11
++ },
++
++ /* gcvBLOCK_VG3 */
++ {
++ 0x0,
++ 0x1,
++ 0x13
++ },
++
++ /* gcvBLOCK_PIXEL */
++ {
++ 0x0,
++ 0x1,
++ 0x07
++ },
++ };
++
++ /* Verify block ID. */
++ gcmkVERIFY_ARGUMENT(gcmIS_VALID_INDEX(Block, states));
++
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++ gcmkVERIFY_ARGUMENT(InterruptId <= ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))));
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12)));
++
++ /* Determine chip version. */
++ if (Command->vg21)
++ {
++ /* Get the event source for the block. */
++ gctUINT eventSource = states[Block].eventSource;
++
++ /* Supported? */
++ if (eventSource == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) ((gctUINT32) (eventSource) & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++ else
++ {
++ /* Get the event source for the block. */
++ gctUINT eventFromFE = states[Block].eventFromFE;
++ gctUINT eventFromPE = states[Block].eventFromPE;
++
++ /* Supported? */
++ if (eventFromFE == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (eventFromFE) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (eventFromPE) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Make sure the events are directly supported for the block. */
++ if (states[Block].eventSource == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++ gcmkVERIFY_ARGUMENT(InterruptId <= ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))));
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Determine event source. */
++ if (Block == gcvBLOCK_COMMAND)
++ {
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++ else
++ {
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT and END commands. */
++ *Bytes = 8;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_EndCommand
++**
++** Form an END command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** END command at or gcvNULL to query the size of the command.
++**
++** gctINT32 InterruptId
++** The ID of the interrupt to generate.
++** If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the END command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_EndCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctINT32 InterruptId,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x InterruptId=0x%x Bytes = 0x%x",
++ Command, Logical, InterruptId, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append END. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR memory;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++
++ /* Cast the buffer pointer. */
++ memory = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ memory[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ memory[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Append END. */
++ memory[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT and END commands. */
++ *Bytes = 16;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++#endif /* gcdENABLE_VG */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.h linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.h 2015-05-01 14:57:59.507427001 -0500
+@@ -0,0 +1,319 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_command_vg_h_
++#define __gc_hal_kernel_hardware_command_vg_h_
++
++/******************************************************************************\
++******************* Task and Interrupt Management Structures. ******************
++\******************************************************************************/
++
++/* Task storage header. */
++typedef struct _gcsTASK_STORAGE * gcsTASK_STORAGE_PTR;
++typedef struct _gcsTASK_STORAGE
++{
++ /* Next allocated storage buffer. */
++ gcsTASK_STORAGE_PTR next;
++}
++gcsTASK_STORAGE;
++
++/* Task container header. */
++typedef struct _gcsTASK_CONTAINER * gcsTASK_CONTAINER_PTR;
++typedef struct _gcsTASK_CONTAINER
++{
++ /* The number of tasks left to be processed in the container. */
++ gctINT referenceCount;
++
++ /* Size of the buffer. */
++ gctUINT size;
++
++ /* Link to the previous and the next allocated containers. */
++ gcsTASK_CONTAINER_PTR allocPrev;
++ gcsTASK_CONTAINER_PTR allocNext;
++
++ /* Link to the previous and the next containers in the free list. */
++ gcsTASK_CONTAINER_PTR freePrev;
++ gcsTASK_CONTAINER_PTR freeNext;
++}
++gcsTASK_CONTAINER;
++
++/* Kernel space task master table entry. */
++typedef struct _gcsBLOCK_TASK_ENTRY * gcsBLOCK_TASK_ENTRY_PTR;
++typedef struct _gcsBLOCK_TASK_ENTRY
++{
++ /* Pointer to the current task container for the block. */
++ gcsTASK_CONTAINER_PTR container;
++
++ /* Pointer to the current task data within the container. */
++ gcsTASK_HEADER_PTR task;
++
++ /* Pointer to the last link task within the container. */
++ gcsTASK_LINK_PTR link;
++
++ /* Number of interrupts allocated for this block. */
++ gctUINT interruptCount;
++
++ /* The index of the current interrupt. */
++ gctUINT interruptIndex;
++
++ /* Interrupt semaphore. */
++ gctSEMAPHORE interruptSemaphore;
++
++ /* Interrupt value array. */
++ gctINT32 interruptArray[32];
++}
++gcsBLOCK_TASK_ENTRY;
++
++
++/******************************************************************************\
++********************* Command Queue Management Structures. *********************
++\******************************************************************************/
++
++/* Command queue kernel element pointer. */
++typedef struct _gcsKERNEL_CMDQUEUE * gcsKERNEL_CMDQUEUE_PTR;
++
++/* Command queue object handler function type. */
++typedef gceSTATUS (* gctOBJECT_HANDLER) (
++ gckVGKERNEL Kernel,
++ gcsKERNEL_CMDQUEUE_PTR Entry
++ );
++
++/* Command queue kernel element. */
++typedef struct _gcsKERNEL_CMDQUEUE
++{
++ /* The number of buffers in the queue. */
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Pointer to the object handler function. */
++ gctOBJECT_HANDLER handler;
++}
++gcsKERNEL_CMDQUEUE;
++
++/* Command queue header. */
++typedef struct _gcsKERNEL_QUEUE_HEADER * gcsKERNEL_QUEUE_HEADER_PTR;
++typedef struct _gcsKERNEL_QUEUE_HEADER
++{
++ /* The size of the buffer in bytes. */
++ gctUINT size;
++
++ /* The number of pending entries to be processed. */
++ volatile gctUINT pending;
++
++ /* The current command queue entry. */
++ gcsKERNEL_CMDQUEUE_PTR currentEntry;
++
++ /* Next buffer. */
++ gcsKERNEL_QUEUE_HEADER_PTR next;
++}
++gcsKERNEL_QUEUE_HEADER;
++
++
++/******************************************************************************\
++******************************* gckVGCOMMAND Object *******************************
++\******************************************************************************/
++
++/* gckVGCOMMAND object. */
++struct _gckVGCOMMAND
++{
++ /***************************************************************************
++ ** Object data and pointers.
++ */
++
++ gcsOBJECT object;
++ gckVGKERNEL kernel;
++ gckOS os;
++ gckVGHARDWARE hardware;
++
++ /* Features. */
++ gctBOOL fe20;
++ gctBOOL vg20;
++ gctBOOL vg21;
++
++
++ /***************************************************************************
++ ** Enable command queue dumping.
++ */
++
++ gctBOOL enableDumping;
++
++
++ /***************************************************************************
++ ** Bus Error interrupt.
++ */
++
++ gctINT32 busErrorInt;
++
++
++ /***************************************************************************
++ ** Command buffer information.
++ */
++
++ gcsCOMMAND_BUFFER_INFO info;
++
++
++ /***************************************************************************
++ ** Synchronization objects.
++ */
++
++ gctPOINTER queueMutex;
++ gctPOINTER taskMutex;
++ gctPOINTER commitMutex;
++
++
++ /***************************************************************************
++ ** Task management.
++ */
++
++ /* The head of the storage buffer linked list. */
++ gcsTASK_STORAGE_PTR taskStorage;
++
++ /* Allocation size. */
++ gctUINT taskStorageGranularity;
++ gctUINT taskStorageUsable;
++
++ /* The free container list. */
++ gcsTASK_CONTAINER_PTR taskFreeHead;
++ gcsTASK_CONTAINER_PTR taskFreeTail;
++
++ /* Task table */
++ gcsBLOCK_TASK_ENTRY taskTable[gcvBLOCK_COUNT];
++
++
++ /***************************************************************************
++ ** Command queue.
++ */
++
++ /* Pointer to the allocated queue memory. */
++ gcsKERNEL_QUEUE_HEADER_PTR queue;
++
++ /* Pointer to the current available queue from which new queue entries
++ will be allocated. */
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++
++ /* If different from queueHead, points to the command queue which is
++ currently being executed by the hardware. */
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++
++ /* Points to the queue to merge the tail with when the tail is processed. */
++ gcsKERNEL_QUEUE_HEADER_PTR mergeQueue;
++
++ /* Queue overflow counter. */
++ gctUINT queueOverflow;
++
++
++ /***************************************************************************
++ ** Context.
++ */
++
++ /* Context counter used for unique ID. */
++ gctUINT64 contextCounter;
++
++ /* Current context ID. */
++ gctUINT64 currentContext;
++
++ /* Command queue power semaphore. */
++ gctPOINTER powerSemaphore;
++ gctINT32 powerStallInt;
++ gcsCMDBUFFER_PTR powerStallBuffer;
++ gctSIGNAL powerStallSignal;
++
++};
++
++/******************************************************************************\
++************************ gckVGCOMMAND Object Internal API. ***********************
++\******************************************************************************/
++
++/* Initialize architecture dependent command buffer information. */
++gceSTATUS
++gckVGCOMMAND_InitializeInfo(
++ IN gckVGCOMMAND Command
++ );
++
++/* Form a STATE command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_StateCommand(
++ IN gckVGCOMMAND Command,
++ IN gctUINT32 Pipe,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Count,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form a RESTART command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_RestartCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form a FETCH command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_FetchCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form a CALL command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_CallCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form a RETURN command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_ReturnCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form an EVENT command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_EventCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gceBLOCK Block,
++ IN gctINT32 InterruptId,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form an END command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_EndCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctINT32 InterruptId,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++#endif /* __gc_hal_kernel_hardware_command_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.c linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.c 2015-05-01 14:57:59.511427001 -0500
+@@ -0,0 +1,2114 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++typedef enum
++{
++ gcvPOWER_FLAG_INITIALIZE = 1 << 0,
++ gcvPOWER_FLAG_STALL = 1 << 1,
++ gcvPOWER_FLAG_STOP = 1 << 2,
++ gcvPOWER_FLAG_START = 1 << 3,
++ gcvPOWER_FLAG_RELEASE = 1 << 4,
++ gcvPOWER_FLAG_DELAY = 1 << 5,
++ gcvPOWER_FLAG_SAVE = 1 << 6,
++ gcvPOWER_FLAG_ACQUIRE = 1 << 7,
++ gcvPOWER_FLAG_POWER_OFF = 1 << 8,
++ gcvPOWER_FLAG_CLOCK_OFF = 1 << 9,
++ gcvPOWER_FLAG_CLOCK_ON = 1 << 10,
++ gcvPOWER_FLAG_NOP = 1 << 11,
++}
++gcePOWER_FLAGS;
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_ResetGPU(
++ IN gckOS Os
++ )
++{
++ gctUINT32 control, idle;
++ gceSTATUS status;
++
++ /* Read register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ &control));
++
++ for (;;)
++ {
++ /* Disable clock gating. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00104,
++ 0x00000000));
++
++ /* Wait for clock being stable. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Isolate the GPU. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ control));
++
++ /* Set soft reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Wait for reset. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Reset soft reset bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Reset GPU isolation. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ control));
++
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++
++ /* GPU is idle. */
++ break;
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the error. */
++ return status;
++}
++
++
++static gceSTATUS
++_IdentifyHardware(
++ IN gckOS Os,
++ OUT gceCHIPMODEL * ChipModel,
++ OUT gctUINT32 * ChipRevision,
++ OUT gctUINT32 * ChipFeatures,
++ OUT gctUINT32 * ChipMinorFeatures,
++ OUT gctUINT32 * ChipMinorFeatures2
++ )
++{
++ gceSTATUS status;
++ gctUINT32 chipIdentity;
++
++ do
++ {
++ /* Read chip identity register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG, 0x00018, &chipIdentity));
++
++ /* Special case for older graphic cores. */
++ if (((((gctUINT32) (chipIdentity)) >> (0 ? 31:24) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))))
++ {
++ *ChipModel = gcv500;
++ *ChipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) );
++ }
++
++ else
++ {
++ /* Read chip identity register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG,
++ 0x00020,
++ (gctUINT32 *) ChipModel));
++
++ /* Read CHIP_REV register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG,
++ 0x00024,
++ ChipRevision));
++ }
++
++ /* Read chip feature register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x0001C, ChipFeatures
++ ));
++
++ /* Read chip minor feature register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x00034, ChipMinorFeatures
++ ));
++
++ /* Read chip minor feature register #2. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x00074, ChipMinorFeatures2
++ ));
++
++ gcmkTRACE(
++ gcvLEVEL_VERBOSE,
++ "ChipModel=0x%08X\n"
++ "ChipRevision=0x%08X\n"
++ "ChipFeatures=0x%08X\n"
++ "ChipMinorFeatures=0x%08X\n"
++ "ChipMinorFeatures2=0x%08X\n",
++ *ChipModel,
++ *ChipRevision,
++ *ChipFeatures,
++ *ChipMinorFeatures,
++ *ChipMinorFeatures2
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return the status. */
++ return status;
++}
++
++#if gcdPOWEROFF_TIMEOUT
++void
++_VGPowerTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckVGHARDWARE hardware = (gckVGHARDWARE)Data;
++ gcmkVERIFY_OK(
++ gckVGHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT));
++}
++#endif
++
++/******************************************************************************\
++****************************** gckVGHARDWARE API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Construct
++**
++** Construct a new gckVGHARDWARE object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an initialized gckOS object.
++**
++** OUTPUT:
++**
++** gckVGHARDWARE * Hardware
++** Pointer to a variable that will hold the pointer to the gckVGHARDWARE
++** object.
++*/
++gceSTATUS
++gckVGHARDWARE_Construct(
++ IN gckOS Os,
++ OUT gckVGHARDWARE * Hardware
++ )
++{
++ gckVGHARDWARE hardware = gcvNULL;
++ gceSTATUS status;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 chipFeatures;
++ gctUINT32 chipMinorFeatures;
++ gctUINT32 chipMinorFeatures2;
++
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x ", Os, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ do
++ {
++ gcmkERR_BREAK(gckOS_SetGPUPower(Os, gcvCORE_VG, gcvTRUE, gcvTRUE));
++
++ status = _ResetGPU(Os);
++
++ if (status != gcvSTATUS_OK)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "_ResetGPU failed: status=%d\n", status);
++ }
++
++ /* Identify the hardware. */
++ gcmkERR_BREAK(_IdentifyHardware(Os,
++ &chipModel, &chipRevision,
++ &chipFeatures, &chipMinorFeatures, &chipMinorFeatures2
++ ));
++
++ /* Allocate the gckVGHARDWARE object. */
++ gcmkERR_BREAK(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckVGHARDWARE), (gctPOINTER *) &hardware
++ ));
++
++ /* Initialize the gckVGHARDWARE object. */
++ hardware->object.type = gcvOBJ_HARDWARE;
++ hardware->os = Os;
++
++ /* Set chip identity. */
++ hardware->chipModel = chipModel;
++ hardware->chipRevision = chipRevision;
++ hardware->chipFeatures = chipFeatures;
++ hardware->chipMinorFeatures = chipMinorFeatures;
++ hardware->chipMinorFeatures2 = chipMinorFeatures2;
++
++ hardware->powerMutex = gcvNULL;
++ hardware->chipPowerState = gcvPOWER_ON;
++ hardware->chipPowerStateGlobal = gcvPOWER_ON;
++ hardware->clockState = gcvTRUE;
++ hardware->powerState = gcvTRUE;
++
++#if gcdPOWEROFF_TIMEOUT
++ hardware->powerOffTime = 0;
++ hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT;
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(Os,
++ _VGPowerTimerFunction,
++ (gctPOINTER)hardware,
++ &hardware->powerOffTimer));
++#endif
++
++ /* Determine whether FE 2.0 is present. */
++ hardware->fe20 = ((((gctUINT32) (hardware->chipFeatures)) >> (0 ? 28:28) & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))));
++
++ /* Determine whether VG 2.0 is present. */
++ hardware->vg20 = ((((gctUINT32) (hardware->chipMinorFeatures)) >> (0 ? 13:13) & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))));
++
++ /* Determine whether VG 2.1 is present. */
++ hardware->vg21 = ((((gctUINT32) (hardware->chipMinorFeatures)) >> (0 ? 18:18) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))));
++
++ /* Set default event mask. */
++ hardware->eventMask = 0xFFFFFFFF;
++
++ gcmkERR_BREAK(gckOS_AtomConstruct(Os, &hardware->pageTableDirty));
++
++ /* Set fast clear to auto. */
++ gcmkVERIFY_OK(gckVGHARDWARE_SetFastClear(hardware, -1));
++
++ gcmkERR_BREAK(gckOS_CreateMutex(Os, &hardware->powerMutex));
++
++ /* Enable power management by default. */
++ hardware->powerManagement = gcvTRUE;
++
++ /* Return pointer to the gckVGHARDWARE object. */
++ *Hardware = hardware;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++#if gcdPOWEROFF_TIMEOUT
++ if (hardware->powerOffTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer));
++ }
++#endif
++
++ if (hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty));
++ }
++
++ if (hardware != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_Free(Os, hardware));
++ }
++
++ gcmkVERIFY_OK(gckOS_SetGPUPower(Os, gcvCORE_VG, gcvFALSE, gcvFALSE));
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Destroy
++**
++** Destroy an gckVGHARDWARE object.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_Destroy(
++ IN gckVGHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x ", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Mark the object as unknown. */
++ Hardware->object.type = gcvOBJ_UNKNOWN;
++
++ if (Hardware->powerMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(
++ Hardware->os, Hardware->powerMutex));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer));
++#endif
++
++ if (Hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty));
++ }
++
++ /* Free the object. */
++ status = gckOS_Free(Hardware->os, Hardware);
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QueryMemory
++**
++** Query the amount of memory available on the hardware.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * InternalSize
++** Pointer to a variable that will hold the size of the internal video
++** memory in bytes. If 'InternalSize' is gcvNULL, no information of the
++** internal memory will be returned.
++**
++** gctUINT32 * InternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * InternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctSIZE_T * ExternalSize
++** Pointer to a variable that will hold the size of the external video
++** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the
++** external memory will be returned.
++**
++** gctUINT32 * ExternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * ExternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * HorizontalTileSize
++** Number of horizontal pixels per tile. If 'HorizontalTileSize' is
++** gcvNULL, no horizontal pixel per tile will be returned.
++**
++** gctUINT32 * VerticalTileSize
++** Number of vertical pixels per tile. If 'VerticalTileSize' is
++** gcvNULL, no vertical pixel per tile will be returned.
++*/
++gceSTATUS
++gckVGHARDWARE_QueryMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x InternalSize=0x%x InternalBaseAddress=0x%x InternalAlignment=0x%x"
++ "ExternalSize=0x%x ExternalBaseAddress=0x%x ExternalAlignment=0x%x HorizontalTileSize=0x%x VerticalTileSize=0x%x",
++ Hardware, InternalSize, InternalBaseAddress, InternalAlignment,
++ ExternalSize, ExternalBaseAddress, ExternalAlignment, HorizontalTileSize, VerticalTileSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (InternalSize != gcvNULL)
++ {
++ /* No internal memory. */
++ *InternalSize = 0;
++ }
++
++ if (ExternalSize != gcvNULL)
++ {
++ /* No external memory. */
++ *ExternalSize = 0;
++ }
++
++ if (HorizontalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *HorizontalTileSize = 4;
++ }
++
++ if (VerticalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *VerticalTileSize = 4;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QueryChipIdentity
++**
++** Query the identity of the hardware.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gceCHIPMODEL * ChipModel
++** If 'ChipModel' is not gcvNULL, the variable it points to will
++** receive the model of the chip.
++**
++** gctUINT32 * ChipRevision
++** If 'ChipRevision' is not gcvNULL, the variable it points to will
++** receive the revision of the chip.
++**
++** gctUINT32 * ChipFeatures
++** If 'ChipFeatures' is not gcvNULL, the variable it points to will
++** receive the feature set of the chip.
++**
++** gctUINT32 * ChipMinorFeatures
++** If 'ChipMinorFeatures' is not gcvNULL, the variable it points to
++** will receive the minor feature set of the chip.
++**
++** gctUINT32 * ChipMinorFeatures2
++** If 'ChipMinorFeatures2' is not gcvNULL, the variable it points to
++** will receive the minor feature set of the chip.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_QueryChipIdentity(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPMODEL * ChipModel,
++ OUT gctUINT32 * ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures,
++ OUT gctUINT32* ChipMinorFeatures2
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x ChipModel=0x%x ChipRevision=0x%x ChipFeatures = 0x%x ChipMinorFeatures = 0x%x ChipMinorFeatures2 = 0x%x",
++ Hardware, ChipModel, ChipRevision, ChipFeatures, ChipMinorFeatures, ChipMinorFeatures2);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Return chip model. */
++ if (ChipModel != gcvNULL)
++ {
++ *ChipModel = Hardware->chipModel;
++ }
++
++ /* Return revision number. */
++ if (ChipRevision != gcvNULL)
++ {
++ *ChipRevision = Hardware->chipRevision;
++ }
++
++ /* Return feature set. */
++ if (ChipFeatures != gcvNULL)
++ {
++ gctUINT32 features = Hardware->chipFeatures;
++
++ if ((((((gctUINT32) (features)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Hardware->allowFastClear) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ /* Mark 2D pipe as available for GC500.0 since it did not have this *\
++ \* bit. */
++ if ((Hardware->chipModel == gcv500)
++ && (Hardware->chipRevision == 0)
++ )
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ /* Mark 2D pipe as available for GC300 since it did not have this *\
++ \* bit. */
++ if (Hardware->chipModel == gcv300)
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ *ChipFeatures = features;
++ }
++
++ /* Return minor feature set. */
++ if (ChipMinorFeatures != gcvNULL)
++ {
++ *ChipMinorFeatures = Hardware->chipMinorFeatures;
++ }
++
++ /* Return minor feature set #2. */
++ if (ChipMinorFeatures2 != gcvNULL)
++ {
++ *ChipMinorFeatures2 = Hardware->chipMinorFeatures2;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_ConvertFormat
++**
++** Convert an API format to hardware parameters.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gceSURF_FORMAT Format
++** API format to convert.
++**
++** OUTPUT:
++**
++** gctUINT32 * BitsPerPixel
++** Pointer to a variable that will hold the number of bits per pixel.
++**
++** gctUINT32 * BytesPerTile
++** Pointer to a variable that will hold the number of bytes per tile.
++*/
++gceSTATUS
++gckVGHARDWARE_ConvertFormat(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT32 * BitsPerPixel,
++ OUT gctUINT32 * BytesPerTile
++ )
++{
++ gctUINT32 bitsPerPixel;
++ gctUINT32 bytesPerTile;
++
++ gcmkHEADER_ARG("Hardware=0x%x Format=0x%x BitsPerPixel=0x%x BytesPerTile = 0x%x",
++ Hardware, Format, BitsPerPixel, BytesPerTile);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Dispatch on format. */
++ switch (Format)
++ {
++ case gcvSURF_A1:
++ case gcvSURF_L1:
++ /* 1-bpp format. */
++ bitsPerPixel = 1;
++ bytesPerTile = (1 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_A4:
++ /* 4-bpp format. */
++ bitsPerPixel = 4;
++ bytesPerTile = (4 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_INDEX8:
++ case gcvSURF_A8:
++ case gcvSURF_L8:
++ /* 8-bpp format. */
++ bitsPerPixel = 8;
++ bytesPerTile = (8 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_YV12:
++ /* 12-bpp planar YUV formats. */
++ bitsPerPixel = 12;
++ bytesPerTile = (12 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_NV12:
++ /* 12-bpp planar YUV formats. */
++ bitsPerPixel = 12;
++ bytesPerTile = (12 * 4 * 4) / 8;
++ break;
++
++ /* 4444 variations. */
++ case gcvSURF_X4R4G4B4:
++ case gcvSURF_A4R4G4B4:
++ case gcvSURF_R4G4B4X4:
++ case gcvSURF_R4G4B4A4:
++ case gcvSURF_B4G4R4X4:
++ case gcvSURF_B4G4R4A4:
++ case gcvSURF_X4B4G4R4:
++ case gcvSURF_A4B4G4R4:
++
++ /* 1555 variations. */
++ case gcvSURF_X1R5G5B5:
++ case gcvSURF_A1R5G5B5:
++ case gcvSURF_R5G5B5X1:
++ case gcvSURF_R5G5B5A1:
++ case gcvSURF_X1B5G5R5:
++ case gcvSURF_A1B5G5R5:
++ case gcvSURF_B5G5R5X1:
++ case gcvSURF_B5G5R5A1:
++
++ /* 565 variations. */
++ case gcvSURF_R5G6B5:
++ case gcvSURF_B5G6R5:
++
++ case gcvSURF_A8L8:
++ case gcvSURF_YUY2:
++ case gcvSURF_UYVY:
++ case gcvSURF_D16:
++ /* 16-bpp format. */
++ bitsPerPixel = 16;
++ bytesPerTile = (16 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_X8R8G8B8:
++ case gcvSURF_A8R8G8B8:
++ case gcvSURF_X8B8G8R8:
++ case gcvSURF_A8B8G8R8:
++ case gcvSURF_R8G8B8X8:
++ case gcvSURF_R8G8B8A8:
++ case gcvSURF_B8G8R8X8:
++ case gcvSURF_B8G8R8A8:
++ case gcvSURF_D32:
++ /* 32-bpp format. */
++ bitsPerPixel = 32;
++ bytesPerTile = (32 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_D24S8:
++ /* 24-bpp format. */
++ bitsPerPixel = 32;
++ bytesPerTile = (32 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_DXT1:
++ case gcvSURF_ETC1:
++ bitsPerPixel = 4;
++ bytesPerTile = (4 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_DXT2:
++ case gcvSURF_DXT3:
++ case gcvSURF_DXT4:
++ case gcvSURF_DXT5:
++ bitsPerPixel = 8;
++ bytesPerTile = (8 * 4 * 4) / 8;
++ break;
++
++ default:
++ /* Invalid format. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Set the result. */
++ if (BitsPerPixel != gcvNULL)
++ {
++ * BitsPerPixel = bitsPerPixel;
++ }
++
++ if (BytesPerTile != gcvNULL)
++ {
++ * BytesPerTile = bytesPerTile;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SplitMemory
++**
++** Split a hardware specific memory address into a pool and offset.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gctUINT32 Address
++** Address in hardware specific format.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to a variable that will hold the pool type for the address.
++**
++** gctUINT32 * Offset
++** Pointer to a variable that will hold the offset for the address.
++*/
++gceSTATUS
++gckVGHARDWARE_SplitMemory(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Pool=0x%x Offset = 0x%x",
++ Hardware, Address, Pool, Offset);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Offset != gcvNULL);
++
++ /* Dispatch on memory type. */
++ switch ((((((gctUINT32) (Address)) >> (0 ? 1:0)) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1)))))) ))
++ {
++ case 0x0:
++ /* System memory. */
++ *Pool = gcvPOOL_SYSTEM;
++ break;
++
++ case 0x2:
++ /* Virtual memory. */
++ *Pool = gcvPOOL_VIRTUAL;
++ break;
++
++ default:
++ /* Invalid memory type. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Return offset of address. */
++ *Offset = ((((gctUINT32) (Address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Execute
++**
++** Kickstart the hardware's command processor with an initialized command
++** buffer.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gctUINT32 Address
++** Address of the command buffer.
++**
++** gctSIZE_T Count
++** Number of command-sized data units to be executed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_Execute(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Count
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Count=0x%x",
++ Hardware, Address, Count);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ /* Enable all events. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00014,
++ Hardware->eventMask
++ ));
++
++ if (Hardware->fe20)
++ {
++ /* Write address register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00500,
++ gcmkFIXADDRESS(Address)
++ ));
++
++ /* Write control register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00504,
++ Count
++ ));
++ }
++ else
++ {
++ /* Write address register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00654,
++ gcmkFIXADDRESS(Address)
++ ));
++
++ /* Write control register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00658,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ ));
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_AlignToTile
++**
++** Align the specified width and height to tile boundaries.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to an gckVGHARDWARE object.
++**
++** gceSURF_TYPE Type
++** Type of alignment.
++**
++** gctUINT32 * Width
++** Pointer to the width to be aligned. If 'Width' is gcvNULL, no width
++** will be aligned.
++**
++** gctUINT32 * Height
++** Pointer to the height to be aligned. If 'Height' is gcvNULL, no height
++** will be aligned.
++**
++** OUTPUT:
++**
++** gctUINT32 * Width
++** Pointer to a variable that will receive the aligned width.
++**
++** gctUINT32 * Height
++** Pointer to a variable that will receive the aligned height.
++*/
++gceSTATUS
++gckVGHARDWARE_AlignToTile(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32 * Width,
++ IN OUT gctUINT32 * Height
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Type=0x%x Width=0x%x Height=0x%x",
++ Hardware, Type, Width, Height);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Width != gcvNULL)
++ {
++ /* Align the width. */
++ *Width = gcmALIGN(*Width, (Type == gcvSURF_TEXTURE) ? 4 : 16);
++ }
++
++ if (Height != gcvNULL)
++ {
++ /* Special case for VG images. */
++ if ((*Height == 0) && (Type == gcvSURF_IMAGE))
++ {
++ *Height = 4;
++ }
++ else
++ {
++ /* Align the height. */
++ *Height = gcmALIGN(*Height, 4);
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_ConvertLogical
++**
++** Convert a logical system address into a hardware specific address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to an gckVGHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address to convert.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_ConvertLogical(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Address=0x%x",
++ Hardware, Logical, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ do
++ {
++ /* Convert logical address into a physical address. */
++ gcmkERR_BREAK(gckOS_GetPhysicalAddress(
++ Hardware->os, Logical, &address
++ ));
++
++ /* Return hardware specific address. */
++ *Address = ((((gctUINT32) (address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QuerySystemMemory
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * SystemSize
++** Pointer to a variable that receives the maximum size of the system
++** memory.
++**
++** gctUINT32 * SystemBaseAddress
++** Poinetr to a variable that receives the base address for system
++** memory.
++*/
++gceSTATUS gckVGHARDWARE_QuerySystemMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x SystemSize=0x%x SystemBaseAddress=0x%x",
++ Hardware, SystemSize, SystemBaseAddress);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (SystemSize != gcvNULL)
++ {
++ /* Maximum system memory can be 2GB. */
++ *SystemSize = (gctSIZE_T)(1 << 31);
++ }
++
++ if (SystemBaseAddress != gcvNULL)
++ {
++ /* Set system memory base address. */
++ *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SetMMU
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the page table.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGHARDWARE_SetMMU(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x",
++ Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ do
++ {
++ /* Convert the logical address into an hardware address. */
++ gcmkERR_BREAK(gckVGHARDWARE_ConvertLogical(Hardware, Logical, &address) );
++
++ /* Write the AQMemoryFePageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00400,
++ gcmkFIXADDRESS(address)) );
++
++ /* Write the AQMemoryTxPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00404,
++ gcmkFIXADDRESS(address)) );
++
++ /* Write the AQMemoryPePageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00408,
++ gcmkFIXADDRESS(address)) );
++
++ /* Write the AQMemoryPezPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x0040C,
++ gcmkFIXADDRESS(address)) );
++
++ /* Write the AQMemoryRaPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00410,
++ gcmkFIXADDRESS(address)) );
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_FlushMMU
++**
++** Flush the page table.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGHARDWARE_FlushMMU(
++ IN gckVGHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckVGCOMMAND command;
++
++ gcmkHEADER_ARG("Hardware=0x%x ", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++ gctUINT32_PTR buffer;
++
++ /* Create a shortcut to the command buffer object. */
++ command = Hardware->kernel->command;
++
++ /* Allocate command buffer space. */
++ gcmkERR_BREAK(gckVGCOMMAND_Allocate(
++ command, 8, &commandBuffer, (gctPOINTER *) &buffer
++ ));
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ }
++ while(gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_BuildVirtualAddress
++**
++** Build a virtual address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** gctUINT32 Index
++** Index into page table.
++**
++** gctUINT32 Offset
++** Offset into page.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable receiving te hardware address.
++*/
++gceSTATUS gckVGHARDWARE_BuildVirtualAddress(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Hardware=0x%x Index=0x%x Offset=0x%x Address=0x%x",
++ Hardware, Index, Offset, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Build virtual address. */
++ address = (Index << 12) | Offset;
++
++ /* Set virtual type. */
++ address = ((((gctUINT32) (address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Set the result. */
++ *Address = address;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGHARDWARE_GetIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32 * Data
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x Data=0x%x", Hardware, Data);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ /* Read register and return. */
++ status = gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG, 0x00004, Data);
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVGHARDWARE_SetFastClear(
++ IN gckVGHARDWARE Hardware,
++ IN gctINT Enable
++ )
++{
++ gctUINT32 debug;
++ gceSTATUS status;
++
++ if (!(((((gctUINT32) (Hardware->chipFeatures)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ return gcvSTATUS_OK;
++ }
++
++ do
++ {
++ if (Enable == -1)
++ {
++ Enable = (Hardware->chipModel > gcv500) ||
++ ((Hardware->chipModel == gcv500) && (Hardware->chipRevision >= 3));
++ }
++
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00414,
++ &debug));
++
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++
++#ifdef AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1) == 32) ? ~0 : (~(~0 << ((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1))))))) << (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1) == 32) ? ~0 : (~(~0 << ((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1))))))) << (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION)));
++#endif
++
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00414,
++ debug));
++
++ Hardware->allowFastClear = Enable;
++
++ status = gcvFALSE;
++ }
++ while (gcvFALSE);
++
++ return status;
++}
++
++gceSTATUS
++gckVGHARDWARE_ReadInterrupt(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x IDs=0x%x", Hardware, IDs);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IDs != gcvNULL);
++
++ /* Read AQIntrAcknowledge register. */
++ status = gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00010,
++ IDs);
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS _CommandStall(
++ gckVGHARDWARE Hardware)
++{
++ gceSTATUS status;
++ gckVGCOMMAND command;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ gctUINT32_PTR buffer;
++ command = Hardware->kernel->command;
++
++ /* Allocate command buffer space. */
++ gcmkERR_BREAK(gckVGCOMMAND_Allocate(
++ command, 8, &command->powerStallBuffer,
++ (gctPOINTER *) &buffer
++ ));
++
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ command, buffer, gcvBLOCK_PIXEL,
++ command->powerStallInt, gcvNULL));
++
++ gcmkERR_BREAK(gckVGCOMMAND_Execute(
++ command,
++ command->powerStallBuffer
++ ));
++
++ /* Wait the signal. */
++ gcmkERR_BREAK(gckOS_WaitSignal(
++ command->os,
++ command->powerStallSignal,
++ gcdGPU_TIMEOUT));
++
++
++ }
++ while(gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementState
++**
++** Set GPU to a specified power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE State
++** Power State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_SetPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ )
++{
++ gceSTATUS status;
++ gckVGCOMMAND command = gcvNULL;
++ gckOS os;
++ gctUINT flag/*, clock*/;
++
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL stall = gcvTRUE;
++ gctBOOL commitMutex = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctBOOL timeout = gcvFALSE;
++ gctBOOL isAfter = gcvFALSE;
++ gctUINT32 currentTime;
++#endif
++
++ gctBOOL broadcast = gcvFALSE;
++ gctUINT32 process, thread;
++ gctBOOL global = gcvFALSE;
++
++#if gcdENABLE_PROFILING
++ gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime,
++ initTime, offTime, startTime, totalTime;
++#endif
++
++ /* State transition flags. */
++ static const gctUINT flags[4][4] =
++ {
++ /* gcvPOWER_ON */
++ { /* ON */ 0,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_NOP,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_OFF */
++ { /* ON */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* OFF */ 0,
++ /* IDLE */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_IDLE */
++ { /* ON */ gcvPOWER_FLAG_NOP,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ 0,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_SUSPEND */
++ { /* ON */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* OFF */ gcvPOWER_FLAG_SAVE |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* SUSPEND */ 0,
++ },
++ };
++
++ gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State);
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Switching to power state %d",
++ State);
++#endif
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get the gckOS object pointer. */
++ os = Hardware->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Get the gckCOMMAND object pointer. */
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ if (Hardware->powerManagement == gcvFALSE)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Start profiler. */
++ gcmkPROFILE_INIT(freq, time);
++
++ /* Convert the broadcast power state. */
++ switch (State)
++ {
++ case gcvPOWER_SUSPEND_ATPOWERON:
++ /* Convert to SUSPEND and don't wait for STALL. */
++ State = gcvPOWER_SUSPEND;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_OFF_ATPOWERON:
++ /* Convert to OFF and don't wait for STALL. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_IDLE_BROADCAST:
++ /* Convert to IDLE and note we are inside broadcast. */
++ State = gcvPOWER_IDLE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_SUSPEND_BROADCAST:
++ /* Convert to SUSPEND and note we are inside broadcast. */
++ State = gcvPOWER_SUSPEND;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_BROADCAST:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_RECOVERY:
++ /* Convert to OFF and note we are inside recovery. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_ON_AUTO:
++ /* Convert to ON and note we are inside recovery. */
++ State = gcvPOWER_ON;
++ break;
++
++ case gcvPOWER_ON:
++ case gcvPOWER_IDLE:
++ case gcvPOWER_SUSPEND:
++ case gcvPOWER_OFF:
++ /* Mark as global power management. */
++ global = gcvTRUE;
++ break;
++
++#if gcdPOWEROFF_TIMEOUT
++ case gcvPOWER_OFF_TIMEOUT:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ /* Check time out */
++ timeout = gcvTRUE;
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++ /* Get current process and thread IDs. */
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ /* Acquire the power mutex. */
++ if (broadcast)
++ {
++ /* Try to acquire the power mutex. */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Check if we already own this mutex. */
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread)
++ )
++ {
++ /* Bail out on recursive power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ else if (State == gcvPOWER_IDLE)
++ {
++ /* gcvPOWER_IDLE_BROADCAST is from IST,
++ ** so waiting here will cause deadlock,
++ ** if lock holder call gckCOMMAND_Stall() */
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ }
++ }
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE));
++ }
++
++ /* Get time until mtuex acquired. */
++ gcmkPROFILE_QUERY(time, mutexTime);
++
++ Hardware->powerProcess = process;
++ Hardware->powerThread = thread;
++ mutexAcquired = gcvTRUE;
++
++ /* Grab control flags and clock. */
++ flag = flags[Hardware->chipPowerState][State];
++ /*clock = clocks[State];*/
++
++#if gcdPOWEROFF_TIMEOUT
++ if (timeout)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ gcmkONERROR(
++ gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter));
++
++ /* powerOffTime is pushed forward, give up.*/
++ if (isAfter
++ /* Expect a transition start from IDLE. */
++ || (Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_OFF)
++ )
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++#endif
++
++ if (flag == 0)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* internal power control */
++ if (!global)
++ {
++ if (Hardware->chipPowerStateGlobal == gcvPOWER_OFF)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++
++ /* avoid acquiring again. */
++ flag &= ~gcvPOWER_FLAG_ACQUIRE;
++ }
++ }
++
++ if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON))
++ {
++ /* Turn on the power. */
++ gcmkONERROR(gckOS_SetGPUPower(os, gcvCORE_VG, gcvTRUE, gcvTRUE));
++
++ /* Mark clock and power as enabled. */
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++ }
++
++ /* Get time until powered on. */
++ gcmkPROFILE_QUERY(time, onTime);
++
++ if ((flag & gcvPOWER_FLAG_STALL) && stall)
++ {
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ command->os,
++ command->commitMutex,
++ gcvINFINITE
++ ));
++
++ commitMutex = gcvTRUE;
++
++ gcmkONERROR(_CommandStall(Hardware));
++ }
++
++ /* Get time until stalled. */
++ gcmkPROFILE_QUERY(time, stallTime);
++
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++
++ acquired = gcvTRUE;
++ }
++
++ if (flag & gcvPOWER_FLAG_STOP)
++ {
++ }
++
++ /* Get time until stopped. */
++ gcmkPROFILE_QUERY(time, stopTime);
++
++ /* Only process this when hardware is enabled. */
++ if (Hardware->clockState && Hardware->powerState)
++ {
++ }
++
++ if (flag & gcvPOWER_FLAG_DELAY)
++ {
++ /* Wait for the specified amount of time to settle coming back from
++ ** power-off or suspend state. */
++ gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY));
++ }
++
++ /* Get time until delayed. */
++ gcmkPROFILE_QUERY(time, delayTime);
++
++ if (flag & gcvPOWER_FLAG_INITIALIZE)
++ {
++ gcmkONERROR(gckVGHARDWARE_SetMMU(Hardware, Hardware->kernel->mmu->pageTableLogical));
++
++ /* Force the command queue to reload the next context. */
++ command->currentContext = 0;
++ }
++
++ /* Get time until initialized. */
++ gcmkPROFILE_QUERY(time, initTime);
++
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ gcvCORE_VG,
++ (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE,
++ (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE));
++
++ /* Save current hardware power and clock states. */
++ Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE;
++ Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE;
++ }
++
++ /* Get time until off. */
++ gcmkPROFILE_QUERY(time, offTime);
++
++ if (flag & gcvPOWER_FLAG_START)
++ {
++ }
++
++ /* Get time until started. */
++ gcmkPROFILE_QUERY(time, startTime);
++
++ if (flag & gcvPOWER_FLAG_RELEASE)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore));
++ acquired = gcvFALSE;
++ }
++
++ /* Save the new power state. */
++ Hardware->chipPowerState = State;
++
++ if (global)
++ {
++ /* Save the new power state. */
++ Hardware->chipPowerStateGlobal = State;
++ }
++
++ if (commitMutex)
++ {
++ /* Acquire the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os,
++ command->commitMutex
++ ));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ /* Reset power off time */
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout;
++
++ if (State == gcvPOWER_IDLE)
++ {
++ /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */
++ gcmkVERIFY_OK(gckOS_StartTimer(os,
++ Hardware->powerOffTimer,
++ Hardware->powerOffTimeout));
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer");
++
++ /* Cancel running timer when GPU enters ON or OFF. */
++ gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer));
++ }
++#endif
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* Get total time. */
++ gcmkPROFILE_QUERY(time, totalTime);
++#if gcdENABLE_PROFILING
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu",
++ freq, mutexTime, onTime, stallTime, stopTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ " delay:%llu init:%llu off:%llu start:%llu total:%llu",
++ delayTime, initTime, offTime, startTime, totalTime);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ if (acquired)
++ {
++ /* Release semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ command->powerSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ if (commitMutex)
++ {
++ /* Acquire the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os,
++ command->commitMutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryPowerManagementState
++**
++** Get GPU power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE* State
++** Power State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_QueryPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(State != gcvNULL);
++
++ /* Return the statue. */
++ *State = Hardware->chipPowerState;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SetPowerManagement
++**
++** Configure GPU power management function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL PowerManagement
++** Power Mangement State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_SetPowerManagement(
++ IN gckVGHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->powerManagement = PowerManagement;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGHARDWARE_SetPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Timeout
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout);
++
++#if gcdPOWEROFF_TIMEOUT
++ Hardware->powerOffTimeout = Timeout;
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++#if gcdPOWEROFF_TIMEOUT
++ *Timeout = Hardware->powerOffTimeout;
++#endif
++
++ gcmkFOOTER_ARG("*Timeout=%d", *Timeout);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGHARDWARE_QueryIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL);
++
++ /* We are idle when the power is not ON. */
++ if (Hardware->chipPowerState != gcvPOWER_ON)
++ {
++ *IsIdle = gcvTRUE;
++ }
++
++ else
++ {
++ /* Read idle register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG, 0x00004, &idle));
++
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 8:8)) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 10:10)) & ((gctUINT32) ((((1 ? 10:10) - (0 ? 10:10) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 10:10) - (0 ? 10:10) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 11:11)) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ *IsIdle = gcvFALSE;
++ }
++
++ else
++ {
++ *IsIdle = gcvTRUE;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif /* gcdENABLE_VG */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.h linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.h 2015-05-01 14:57:59.511427001 -0500
+@@ -0,0 +1,75 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_vg_h_
++#define __gc_hal_kernel_hardware_vg_h_
++
++/* gckHARDWARE object. */
++struct _gckVGHARDWARE
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckKERNEL object. */
++ gckVGKERNEL kernel;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Chip characteristics. */
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 chipFeatures;
++ gctUINT32 chipMinorFeatures;
++ gctUINT32 chipMinorFeatures2;
++ gctBOOL allowFastClear;
++
++ /* Features. */
++ gctBOOL fe20;
++ gctBOOL vg20;
++ gctBOOL vg21;
++
++ /* Event mask. */
++ gctUINT32 eventMask;
++
++ gctBOOL clockState;
++ gctBOOL powerState;
++ gctPOINTER powerMutex;
++ gctUINT32 powerProcess;
++ gctUINT32 powerThread;
++ gceCHIPPOWERSTATE chipPowerState;
++ gceCHIPPOWERSTATE chipPowerStateGlobal;
++ gctISRMANAGERFUNC startIsr;
++ gctISRMANAGERFUNC stopIsr;
++ gctPOINTER isrContext;
++ gctPOINTER pageTableDirty;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctUINT32 powerOffTime;
++ gctUINT32 powerOffTimeout;
++ gctPOINTER powerOffTimer;
++#endif
++
++ gctBOOL powerManagement;
++};
++
++#endif /* __gc_hal_kernel_hardware_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.c linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.c 2015-05-01 14:57:59.511427001 -0500
+@@ -0,0 +1,1735 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_context.h"
++#include "gc_hal_kernel_buffer.h"
++
++/******************************************************************************\
++******************************** Debugging Macro *******************************
++\******************************************************************************/
++
++/* Zone used for header/footer. */
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++
++/******************************************************************************\
++************************** Context State Buffer Helpers ************************
++\******************************************************************************/
++
++#define _STATE(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_COUNT(reg, count) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_COUNT_OFFSET(reg, offset, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + offset, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_MIRROR_COUNT(reg, mirror, count) \
++ _StateMirror(\
++ Context, \
++ reg ## _Address >> 2, \
++ count, \
++ mirror ## _Address >> 2 \
++ )
++
++#define _STATE_HINT(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_HINT_BLOCK(reg, block, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + (block << reg ## _BLK), \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_X(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvTRUE, gcvFALSE \
++ )
++
++#define _CLOSE_RANGE() \
++ _TerminateStateBlock(Context, index)
++
++#define _ENABLE(reg, field) \
++ do \
++ { \
++ if (gcmVERIFYFIELDVALUE(data, reg, MASK_ ## field, ENABLED)) \
++ { \
++ enable |= gcmFIELDMASK(reg, field); \
++ } \
++ } \
++ while (gcvFALSE)
++
++#define _BLOCK_COUNT(reg) \
++ ((reg ## _Count) >> (reg ## _BLK))
++
++
++/******************************************************************************\
++*********************** Support Functions and Definitions **********************
++\******************************************************************************/
++
++#define gcdSTATE_MASK \
++ (((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 | 0xC0FFEE & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))))
++
++#if !defined(VIVANTE_NO_3D)
++static gctSIZE_T
++_TerminateStateBlock(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index
++ )
++{
++ gctUINT32_PTR buffer;
++ gctSIZE_T align;
++
++ /* Determine if we need alignment. */
++ align = (Index & 1) ? 1 : 0;
++
++ /* Address correct index. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++ /* Flush the current state block; make sure no pairing with the states
++ to follow happens. */
++ if (align && (buffer != gcvNULL))
++ {
++ buffer[Index] = 0xDEADDEAD;
++ }
++
++ /* Reset last address. */
++ Context->lastAddress = ~0U;
++
++ /* Return alignment requirement. */
++ return align;
++}
++#endif
++
++
++static gctSIZE_T
++_FlushPipe(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index,
++ IN gcePIPE_SELECT Pipe
++ )
++{
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* Flush the current pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = (Pipe == gcvPIPE_2D)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ /* Flushing 3D pipe takes 6 slots. */
++ return 6;
++}
++
++#if !defined(VIVANTE_NO_3D)
++static gctSIZE_T
++_SemaphoreStall(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index
++ )
++{
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ /* Semaphore/stall takes 4 slots. */
++ return 4;
++}
++#endif
++
++static gctSIZE_T
++_SwitchPipe(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index,
++ IN gcePIPE_SELECT Pipe
++ )
++{
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer
++ = (Pipe == gcvPIPE_2D)
++ ? 0x1
++ : 0x0;
++ }
++
++ return 2;
++}
++
++#if !defined(VIVANTE_NO_3D)
++static gctSIZE_T
++_State(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index,
++ IN gctUINT32 Address,
++ IN gctUINT32 Value,
++ IN gctSIZE_T Size,
++ IN gctBOOL FixedPoint,
++ IN gctBOOL Hinted
++ )
++{
++ gctUINT32_PTR buffer;
++ gctSIZE_T align, i;
++
++ /* Determine if we need alignment. */
++ align = (Index & 1) ? 1 : 0;
++
++ /* Address correct index. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++ if ((buffer == gcvNULL) && (Address + Size > Context->stateCount))
++ {
++ /* Determine maximum state. */
++ Context->stateCount = Address + Size;
++ }
++
++ /* Do we need a new entry? */
++ if ((Address != Context->lastAddress) || (FixedPoint != Context->lastFixed))
++ {
++ if (buffer != gcvNULL)
++ {
++ if (align)
++ {
++ /* Add filler. */
++ buffer[Index++] = 0xDEADDEAD;
++ }
++
++ /* LoadState(Address, Count). */
++ gcmkASSERT((Index & 1) == 0);
++
++ if (FixedPoint)
++ {
++ buffer[Index]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++ else
++ {
++ buffer[Index]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++
++ /* Walk all the states. */
++ for (i = 0; i < Size; i += 1)
++ {
++ /* Set state to uninitialized value. */
++ buffer[Index + 1 + i] = Value;
++
++ /* Set index in state mapping table. */
++ Context->map[Address + i].index = Index + 1 + i;
++
++#if gcdSECURE_USER
++ /* Save hint. */
++ if (Context->hint != gcvNULL)
++ {
++ Context->hint[Address + i] = Hinted;
++ }
++#endif
++ }
++ }
++
++ /* Save information for this LoadState. */
++ Context->lastIndex = Index;
++ Context->lastAddress = Address + Size;
++ Context->lastSize = Size;
++ Context->lastFixed = FixedPoint;
++
++ /* Return size for load state. */
++ return align + 1 + Size;
++ }
++
++ /* Append this state to the previous one. */
++ if (buffer != gcvNULL)
++ {
++ /* Update last load state. */
++ buffer[Context->lastIndex] =
++ ((((gctUINT32) (buffer[Context->lastIndex])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Context->lastSize + Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Walk all the states. */
++ for (i = 0; i < Size; i += 1)
++ {
++ /* Set state to uninitialized value. */
++ buffer[Index + i] = Value;
++
++ /* Set index in state mapping table. */
++ Context->map[Address + i].index = Index + i;
++
++#if gcdSECURE_USER
++ /* Save hint. */
++ if (Context->hint != gcvNULL)
++ {
++ Context->hint[Address + i] = Hinted;
++ }
++#endif
++ }
++ }
++
++ /* Update last address and size. */
++ Context->lastAddress += Size;
++ Context->lastSize += Size;
++
++ /* Return number of slots required. */
++ return Size;
++}
++
++static gctSIZE_T
++_StateMirror(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Size,
++ IN gctUINT32 AddressMirror
++ )
++{
++ gctSIZE_T i;
++
++ /* Process when buffer is set. */
++ if (Context->buffer != gcvNULL)
++ {
++ /* Walk all states. */
++ for (i = 0; i < Size; i++)
++ {
++ /* Copy the mapping address. */
++ Context->map[Address + i].index =
++ Context->map[AddressMirror + i].index;
++ }
++ }
++
++ /* Return the number of required maps. */
++ return Size;
++}
++#endif
++
++static gceSTATUS
++_InitializeContextBuffer(
++ IN gckCONTEXT Context
++ )
++{
++ gctUINT32_PTR buffer;
++ gctSIZE_T index;
++
++#if !defined(VIVANTE_NO_3D)
++ gctUINT i;
++ gctUINT vertexUniforms, fragmentUniforms;
++ gctUINT fe2vsCount;
++ gctBOOL halti0;
++#endif
++
++ /* Reset the buffer index. */
++ index = 0;
++
++ /* Reset the last state address. */
++ Context->lastAddress = ~0U;
++
++ /* Get the buffer pointer. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++
++ /**************************************************************************/
++ /* Build 2D states. *******************************************************/
++
++
++#if !defined(VIVANTE_NO_3D)
++ /**************************************************************************/
++ /* Build 3D states. *******************************************************/
++ halti0 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) );
++
++ /* Query shader support. */
++ gcmkVERIFY_OK(gckHARDWARE_QueryShaderCaps(
++ Context->hardware, &vertexUniforms, &fragmentUniforms, gcvNULL));
++
++ /* Store the 3D entry index. */
++ Context->entryOffset3D = index * gcmSIZEOF(gctUINT32);
++
++ /* Flush 2D pipe. */
++ index += _FlushPipe(Context, index, gcvPIPE_2D);
++
++ /* Switch to 3D pipe. */
++ index += _SwitchPipe(Context, index, gcvPIPE_3D);
++
++ /* Current context pointer. */
++#if gcdDEBUG
++ index += _State(Context, index, 0x03850 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++#endif
++
++ index += _FlushPipe(Context, index, gcvPIPE_3D);
++
++ /* Global states. */
++ index += _State(Context, index, 0x03814 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03818 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0381C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03820 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03828 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0382C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03834 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0384C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Front End states. */
++ fe2vsCount = 12;
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) ))
++ {
++ fe2vsCount = 16;
++ }
++ index += _State(Context, index, 0x00600 >> 2, 0x00000000, fe2vsCount, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ index += _State(Context, index, 0x00644 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x00648 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0064C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x00650 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00680 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x006A0 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0067C >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x006C0 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00700 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00740 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00780 >> 2, 0x3F800000, 16, gcvFALSE, gcvFALSE);
++
++ /* Vertex Shader states. */
++ index += _State(Context, index, 0x00800 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00804 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00808 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0080C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00810 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00820 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00830 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ if (Context->hardware->identity.instructionCount <= 256)
++ {
++ index += _State(Context, index, 0x04000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE);
++ }
++
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x05000 >> 2, 0x00000000, vertexUniforms * 4, gcvFALSE, gcvFALSE);
++
++ /* Primitive Assembly states. */
++ index += _State(Context, index, 0x00A00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A08 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A0C >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A10 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A1C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A28 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A2C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A30 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A40 >> 2, 0x00000000, 10, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A34 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A38 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A3C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A80 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A84 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A8C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Setup states. */
++ index += _State(Context, index, 0x00C00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C08 >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C0C >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C10 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C1C >> 2, 0x42000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C20 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C24 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++
++ /* Raster states. */
++ index += _State(Context, index, 0x00E00 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E10 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E40 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E08 >> 2, 0x00000031, 1, gcvFALSE, gcvFALSE);
++
++ /* Pixel Shader states. */
++ index += _State(Context, index, 0x01000 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01004 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0100C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01010 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01018 >> 2, 0x01000000, 1, gcvFALSE, gcvFALSE);
++ if (Context->hardware->identity.instructionCount <= 256)
++ {
++ index += _State(Context, index, 0x06000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE);
++ }
++
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x07000 >> 2, 0x00000000, fragmentUniforms * 4, gcvFALSE, gcvFALSE);
++
++ /* Texture states. */
++ index += _State(Context, index, 0x02000 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02040 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02080 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x020C0 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02100 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02140 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02180 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x021C0 >> 2, 0x00321000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02200 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02240 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x02400 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02440 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02480 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x024C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02500 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02540 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02580 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x025C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02600 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02640 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02680 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x026C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02700 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02740 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _CLOSE_RANGE();
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures2)) >> (0 ? 11:11)) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1)))))) ))
++ {
++ gctUINT texBlockCount;
++
++ /* New texture block. */
++ index += _State(Context, index, 0x10000 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10080 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10100 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10180 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10200 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10280 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ for (i = 0; i < 256 / 16; i += 1)
++ {
++ index += _State(Context, index, (0x02C00 >> 2) + i * 16, 0x00000000, 14, gcvFALSE, gcvFALSE);
++ }
++ index += _State(Context, index, 0x10300 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10380 >> 2, 0x00321000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10400 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10480 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures2)) >> (0 ? 15:15)) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1)))))) ))
++ {
++ index += _State(Context, index, 0x12000 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x12400 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE);
++ }
++
++ if ((Context->hardware->identity.chipModel == gcv2000)
++ && (Context->hardware->identity.chipRevision == 0x5108))
++ {
++ texBlockCount = 12;
++ }
++ else
++ {
++ texBlockCount = ((512) >> (4));
++ }
++ for (i = 0; i < texBlockCount; i += 1)
++ {
++ index += _State(Context, index, (0x10800 >> 2) + (i << 4), 0x00000000, 14, gcvFALSE, gcvTRUE);
++ }
++ }
++
++ /* YUV. */
++ index += _State(Context, index, 0x01678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0167C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01680 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01684 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01688 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0168C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01690 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01694 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01698 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0169C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* Thread walker states. */
++ index += _State(Context, index, 0x00900 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00904 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00908 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0090C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00910 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00914 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00918 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0091C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00924 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ if (Context->hardware->identity.instructionCount > 1024)
++ {
++ /* New Shader instruction memory. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00860 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ for (i = 0;
++ i < Context->hardware->identity.instructionCount << 2;
++ i += 256 << 2
++ )
++ {
++ index += _State(Context, index, (0x20000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++ }
++ else if (Context->hardware->identity.instructionCount > 256)
++ {
++ /* New Shader instruction memory. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* VX instruction memory. */
++ for (i = 0;
++ i < Context->hardware->identity.instructionCount << 2;
++ i += 256 << 2
++ )
++ {
++ index += _State(Context, index, (0x0C000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++
++ _StateMirror(Context, 0x08000 >> 2, Context->hardware->identity.instructionCount << 2 , 0x0C000 >> 2);
++ }
++
++ /* Store the index of the "XD" entry. */
++ Context->entryOffsetXDFrom3D = index * gcmSIZEOF(gctUINT32);
++
++
++ /* Pixel Engine states. */
++ index += _State(Context, index, 0x01400 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01404 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01408 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0140C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01414 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01418 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0141C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01420 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01424 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01428 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0142C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01434 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01454 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01458 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0145C >> 2, 0x00000010, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A8 >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014AC >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A4 >> 2, 0x000E400C, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01580 >> 2, 0x00000000, 3, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Composition states. */
++ index += _State(Context, index, 0x03008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (Context->hardware->identity.pixelPipes == 1)
++ {
++ index += _State(Context, index, 0x01460 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, 0x01430 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01410 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ }
++ else
++ {
++ index += _State(Context, index, (0x01460 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ for (i = 0; i < 2; i++)
++ {
++ index += _State(Context, index, (0x01500 >> 2) + (i << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++ }
++
++ if (Context->hardware->identity.pixelPipes > 1 || halti0)
++ {
++ index += _State(Context, index, (0x01480 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++
++ /* Resolve states. */
++ index += _State(Context, index, 0x01604 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01608 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0160C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01610 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01614 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01620 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01630 >> 2, 0x00000000, 2, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01640 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0163C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ if (Context->hardware->identity.pixelPipes > 1)
++ {
++ index += _State(Context, index, (0x016C0 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, (0x016E0 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, 0x01700 >> 2, 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvFALSE);
++ }
++
++ /* Tile status. */
++ index += _State(Context, index, 0x01654 >> 2, 0x00200000, 1, gcvFALSE, gcvFALSE);
++
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x01658 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0165C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01660 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01664 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01668 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0166C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01674 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A4 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x016AC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01720 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01740 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01760 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* Semaphore/stall. */
++ index += _SemaphoreStall(Context, index);
++#endif
++
++ /**************************************************************************/
++ /* Link to another address. ***********************************************/
++
++ Context->linkIndex3D = index;
++
++ if (buffer != gcvNULL)
++ {
++ buffer[index + 0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[index + 1]
++ = 0;
++ }
++
++ index += 2;
++
++ /* Store the end of the context buffer. */
++ Context->bufferSize = index * gcmSIZEOF(gctUINT32);
++
++
++ /**************************************************************************/
++ /* Pipe switch for the case where neither 2D nor 3D are used. *************/
++
++ /* Store the 3D entry index. */
++ Context->entryOffsetXDFrom2D = index * gcmSIZEOF(gctUINT32);
++
++ /* Flush 2D pipe. */
++ index += _FlushPipe(Context, index, gcvPIPE_2D);
++
++ /* Switch to 3D pipe. */
++ index += _SwitchPipe(Context, index, gcvPIPE_3D);
++
++ /* Store the location of the link. */
++ Context->linkIndexXD = index;
++
++ if (buffer != gcvNULL)
++ {
++ buffer[index + 0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[index + 1]
++ = 0;
++ }
++
++ index += 2;
++
++
++ /**************************************************************************/
++ /* Save size for buffer. **************************************************/
++
++ Context->totalSize = index * gcmSIZEOF(gctUINT32);
++
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_DestroyContext(
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ if (Context != gcvNULL)
++ {
++ gcsCONTEXT_PTR bufferHead;
++
++ /* Free context buffers. */
++ for (bufferHead = Context->buffer; Context->buffer != gcvNULL;)
++ {
++ /* Get a shortcut to the current buffer. */
++ gcsCONTEXT_PTR buffer = Context->buffer;
++
++ /* Get the next buffer. */
++ gcsCONTEXT_PTR next = buffer->next;
++
++ /* Last item? */
++ if (next == bufferHead)
++ {
++ next = gcvNULL;
++ }
++
++ /* Destroy the signal. */
++ if (buffer->signal != gcvNULL)
++ {
++ gcmkONERROR(gckOS_DestroySignal(
++ Context->os, buffer->signal
++ ));
++
++ buffer->signal = gcvNULL;
++ }
++
++ /* Free state delta map. */
++ if (buffer->logical != gcvNULL)
++ {
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gcmkONERROR(gckEVENT_DestroyVirtualCommandBuffer(
++ Context->hardware->kernel->eventObj,
++ Context->totalSize,
++ buffer->physical,
++ buffer->logical,
++ gcvKERNEL_PIXEL
++ ));
++
++#else
++ gcmkONERROR(gckEVENT_FreeContiguousMemory(
++ Context->hardware->kernel->eventObj,
++ Context->totalSize,
++ buffer->physical,
++ buffer->logical,
++ gcvKERNEL_PIXEL
++ ));
++#endif
++
++ buffer->logical = gcvNULL;
++ }
++
++ /* Free context buffer. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, buffer));
++
++ /* Remove from the list. */
++ Context->buffer = next;
++ }
++
++#if gcdSECURE_USER
++ /* Free the hint array. */
++ if (Context->hint != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->hint));
++ }
++#endif
++ /* Free record array copy. */
++ if (Context->recordArray != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->recordArray));
++ }
++
++ /* Free the state mapping. */
++ if (Context->map != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->map));
++ }
++
++ /* Mark the gckCONTEXT object as unknown. */
++ Context->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckCONTEXT object. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context));
++ }
++
++OnError:
++ return status;
++}
++
++
++/******************************************************************************\
++**************************** Context Management API ****************************
++\******************************************************************************/
++
++/******************************************************************************\
++**
++** gckCONTEXT_Construct
++**
++** Construct a new gckCONTEXT object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** gckHARDWARE Hardware
++** Pointer to gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gckCONTEXT * Context
++** Pointer to a variable thet will receive the gckCONTEXT object
++** pointer.
++*/
++gceSTATUS
++gckCONTEXT_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ OUT gckCONTEXT * Context
++ )
++{
++ gceSTATUS status;
++ gckCONTEXT context = gcvNULL;
++ gctSIZE_T allocationSize;
++ gctUINT i;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%08X Hardware=0x%08X", Os, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Context != gcvNULL);
++
++
++ /**************************************************************************/
++ /* Allocate and initialize basic fields of gckCONTEXT. ********************/
++
++ /* The context object size. */
++ allocationSize = gcmSIZEOF(struct _gckCONTEXT);
++
++ /* Allocate the object. */
++ gcmkONERROR(gckOS_Allocate(
++ Os, allocationSize, &pointer
++ ));
++
++ context = pointer;
++
++ /* Reset the entire object. */
++ gcmkONERROR(gckOS_ZeroMemory(context, allocationSize));
++
++ /* Initialize the gckCONTEXT object. */
++ context->object.type = gcvOBJ_CONTEXT;
++ context->os = Os;
++ context->hardware = Hardware;
++
++
++#if defined(VIVANTE_NO_3D)
++ context->entryPipe = gcvPIPE_2D;
++ context->exitPipe = gcvPIPE_2D;
++#elif gcdCMD_NO_2D_CONTEXT
++ context->entryPipe = gcvPIPE_3D;
++ context->exitPipe = gcvPIPE_3D;
++#else
++ context->entryPipe
++ = (((((gctUINT32) (context->hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) )
++ ? gcvPIPE_2D
++ : gcvPIPE_3D;
++ context->exitPipe = gcvPIPE_3D;
++#endif
++
++ /* Get the command buffer requirements. */
++ gcmkONERROR(gckHARDWARE_QueryCommandBuffer(
++ Hardware,
++ &context->alignment,
++ &context->reservedHead,
++ &context->reservedTail
++ ));
++
++ /* Mark the context as dirty to force loading of the entire state table
++ the first time. */
++ context->dirty = gcvTRUE;
++
++
++ /**************************************************************************/
++ /* Get the size of the context buffer. ************************************/
++
++ gcmkONERROR(_InitializeContextBuffer(context));
++
++
++ /**************************************************************************/
++ /* Compute the size of the record array. **********************************/
++
++ context->recordArraySize
++ = gcmSIZEOF(gcsSTATE_DELTA_RECORD) * context->stateCount;
++
++
++ if (context->stateCount > 0)
++ {
++ /**************************************************************************/
++ /* Allocate and reset the state mapping table. ****************************/
++
++ /* Allocate the state mapping table. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gcsSTATE_MAP) * context->stateCount,
++ &pointer
++ ));
++
++ context->map = pointer;
++
++ /* Zero the state mapping table. */
++ gcmkONERROR(gckOS_ZeroMemory(
++ context->map, gcmSIZEOF(gcsSTATE_MAP) * context->stateCount
++ ));
++
++
++ /**************************************************************************/
++ /* Allocate the hint array. ***********************************************/
++
++#if gcdSECURE_USER
++ /* Allocate hints. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gctBOOL) * context->stateCount,
++ &pointer
++ ));
++
++ context->hint = pointer;
++#endif
++ }
++
++ /**************************************************************************/
++ /* Allocate the context and state delta buffers. **************************/
++
++ for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i += 1)
++ {
++ /* Allocate a context buffer. */
++ gcsCONTEXT_PTR buffer;
++
++ /* Allocate the context buffer structure. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gcsCONTEXT),
++ &pointer
++ ));
++
++ buffer = pointer;
++
++ /* Reset the context buffer structure. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ buffer, gcmSIZEOF(gcsCONTEXT)
++ ));
++
++ /* Append to the list. */
++ if (context->buffer == gcvNULL)
++ {
++ buffer->next = buffer;
++ context->buffer = buffer;
++ }
++ else
++ {
++ buffer->next = context->buffer->next;
++ context->buffer->next = buffer;
++ }
++
++ /* Set the number of delta in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ buffer->num = i;
++#endif
++
++ /* Create the busy signal. */
++ gcmkONERROR(gckOS_CreateSignal(
++ Os, gcvFALSE, &buffer->signal
++ ));
++
++ /* Set the signal, buffer is currently not busy. */
++ gcmkONERROR(gckOS_Signal(
++ Os, buffer->signal, gcvTRUE
++ ));
++
++ /* Create a new physical context buffer. */
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gcmkONERROR(gckKERNEL_AllocateVirtualCommandBuffer(
++ context->hardware->kernel,
++ gcvFALSE,
++ &context->totalSize,
++ &buffer->physical,
++ &pointer
++ ));
++
++#else
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Os,
++ gcvFALSE,
++ &context->totalSize,
++ &buffer->physical,
++ &pointer
++ ));
++#endif
++
++ buffer->logical = pointer;
++
++ /* Set gckEVENT object pointer. */
++ buffer->eventObj = Hardware->kernel->eventObj;
++
++ /* Set the pointers to the LINK commands. */
++ if (context->linkIndex2D != 0)
++ {
++ buffer->link2D = &buffer->logical[context->linkIndex2D];
++ }
++
++ if (context->linkIndex3D != 0)
++ {
++ buffer->link3D = &buffer->logical[context->linkIndex3D];
++ }
++
++ if (context->linkIndexXD != 0)
++ {
++ gctPOINTER xdLink;
++ gctUINT8_PTR xdEntryLogical;
++ gctSIZE_T xdEntrySize;
++ gctSIZE_T linkBytes;
++
++ /* Determine LINK parameters. */
++ xdLink
++ = &buffer->logical[context->linkIndexXD];
++
++ xdEntryLogical
++ = (gctUINT8_PTR) buffer->logical
++ + context->entryOffsetXDFrom3D;
++
++ xdEntrySize
++ = context->bufferSize
++ - context->entryOffsetXDFrom3D;
++
++ /* Query LINK size. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Hardware, gcvNULL, gcvNULL, 0, &linkBytes
++ ));
++
++ /* Generate a LINK. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Hardware,
++ xdLink,
++ xdEntryLogical,
++ xdEntrySize,
++ &linkBytes
++ ));
++ }
++ }
++
++
++ /**************************************************************************/
++ /* Initialize the context buffers. ****************************************/
++
++ /* Initialize the current context buffer. */
++ gcmkONERROR(_InitializeContextBuffer(context));
++
++ /* Make all created contexts equal. */
++ {
++ gcsCONTEXT_PTR currContext, tempContext;
++
++ /* Set the current context buffer. */
++ currContext = context->buffer;
++
++ /* Get the next context buffer. */
++ tempContext = currContext->next;
++
++ /* Loop through all buffers. */
++ while (tempContext != currContext)
++ {
++ if (tempContext == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ /* Copy the current context. */
++ gckOS_MemCopy(
++ tempContext->logical,
++ currContext->logical,
++ context->totalSize
++ );
++
++ /* Get the next context buffer. */
++ tempContext = tempContext->next;
++ }
++ }
++
++ /* Return pointer to the gckCONTEXT object. */
++ *Context = context;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Context=0x%08X", *Context);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back on error. */
++ gcmkVERIFY_OK(_DestroyContext(context));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/******************************************************************************\
++**
++** gckCONTEXT_Destroy
++**
++** Destroy a gckCONTEXT object.
++**
++** INPUT:
++**
++** gckCONTEXT Context
++** Pointer to an gckCONTEXT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCONTEXT_Destroy(
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Context=0x%08X", Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ /* Destroy the context and all related objects. */
++ status = _DestroyContext(Context);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/******************************************************************************\
++**
++** gckCONTEXT_Update
++**
++** Merge all pending state delta buffers into the current context buffer.
++**
++** INPUT:
++**
++** gckCONTEXT Context
++** Pointer to an gckCONTEXT object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** gcsSTATE_DELTA_PTR StateDelta
++** Pointer to the state delta.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCONTEXT_Update(
++ IN gckCONTEXT Context,
++ IN gctUINT32 ProcessID,
++ IN gcsSTATE_DELTA_PTR StateDelta
++ )
++{
++#ifndef VIVANTE_NO_3D
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsSTATE_DELTA _stateDelta;
++ gckKERNEL kernel;
++ gcsCONTEXT_PTR buffer;
++ gcsSTATE_MAP_PTR map;
++ gctBOOL needCopy = gcvFALSE;
++ gcsSTATE_DELTA_PTR nDelta;
++ gcsSTATE_DELTA_PTR uDelta = gcvNULL;
++ gcsSTATE_DELTA_PTR kDelta = gcvNULL;
++ gcsSTATE_DELTA_RECORD_PTR record;
++ gcsSTATE_DELTA_RECORD_PTR recordArray = gcvNULL;
++ gctUINT elementCount;
++ gctUINT address;
++ gctUINT32 mask;
++ gctUINT32 data;
++ gctUINT index;
++ gctUINT i, j;
++
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++
++ gcmkHEADER_ARG(
++ "Context=0x%08X ProcessID=%d StateDelta=0x%08X",
++ Context, ProcessID, StateDelta
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ /* Get a shortcut to the kernel object. */
++ kernel = Context->hardware->kernel;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Context->os, ProcessID, &needCopy));
++
++ /* Allocate the copy buffer for the user record array. */
++ if (needCopy && (Context->recordArray == gcvNULL))
++ {
++ /* Allocate the buffer. */
++ gcmkONERROR(gckOS_Allocate(
++ Context->os,
++ Context->recordArraySize,
++ (gctPOINTER *) &Context->recordArray
++ ));
++ }
++
++ /* Get the current context buffer. */
++ buffer = Context->buffer;
++
++ /* Wait until the context buffer becomes available; this will
++ also reset the signal and mark the buffer as busy. */
++ gcmkONERROR(gckOS_WaitSignal(
++ Context->os, buffer->signal, gcvINFINITE
++ ));
++
++#if gcdSECURE_USER
++ /* Get the cache form the database. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache));
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE) && 1 && !defined(VIVANTE_NO_3D)
++ /* Update current context token. */
++ buffer->logical[Context->map[0x0E14].index]
++ = gcmPTR2INT(Context);
++#endif
++
++ /* Are there any pending deltas? */
++ if (buffer->deltaCount != 0)
++ {
++ /* Get the state map. */
++ map = Context->map;
++
++ /* Get the first delta item. */
++ uDelta = buffer->delta;
++
++ /* Reset the vertex stream count. */
++ elementCount = 0;
++
++ /* Merge all pending deltas. */
++ for (i = 0; i < buffer->deltaCount; i += 1)
++ {
++ /* Get access to the state delta. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ &_stateDelta,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Get access to the state records. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ Context->recordArray,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++
++ /* Merge all pending states. */
++ for (j = 0; j < kDelta->recordCount; j += 1)
++ {
++ if (j >= Context->stateCount)
++ {
++ break;
++ }
++
++ /* Get the current state record. */
++ record = &recordArray[j];
++
++ /* Get the state address. */
++ address = record->address;
++
++ /* Make sure the state is a part of the mapping table. */
++ if (address >= Context->stateCount)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): State 0x%04X is not mapped.\n",
++ __FUNCTION__, __LINE__,
++ address
++ );
++
++ continue;
++ }
++
++ /* Get the state index. */
++ index = map[address].index;
++
++ /* Skip the state if not mapped. */
++ if (index == 0)
++ {
++#if gcdDEBUG
++ if ((address != 0x0594)
++ && (address != 0x0E00)
++ && (address != 0x0E03)
++ )
++ {
++#endif
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): State 0x%04X is not mapped.\n",
++ __FUNCTION__, __LINE__,
++ address
++ );
++#if gcdDEBUG
++ }
++#endif
++ continue;
++ }
++
++ /* Get the data mask. */
++ mask = record->mask;
++
++ /* Masked states that are being completly reset or regular states. */
++ if ((mask == 0) || (mask == ~0U))
++ {
++ /* Get the new data value. */
++ data = record->data;
++
++ /* Process special states. */
++ if (address == 0x0595)
++ {
++ /* Force auto-disable to be disabled. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13)));
++ }
++
++#if gcdSECURE_USER
++ /* Do we need to convert the logical address? */
++ if (Context->hint[address])
++ {
++ /* Map handle into physical address. */
++ gcmkONERROR(gckKERNEL_MapLogicalToPhysical(
++ kernel, cache, (gctPOINTER) &data
++ ));
++ }
++#endif
++
++ /* Set new data. */
++ buffer->logical[index] = data;
++ }
++
++ /* Masked states that are being set partially. */
++ else
++ {
++ buffer->logical[index]
++ = (~mask & buffer->logical[index])
++ | (mask & record->data);
++ }
++ }
++
++ /* Get the element count. */
++ if (kDelta->elementCount != 0)
++ {
++ elementCount = kDelta->elementCount;
++ }
++
++ /* Dereference delta. */
++ kDelta->refCount -= 1;
++ gcmkASSERT(kDelta->refCount >= 0);
++
++ /* Get the next state delta. */
++ nDelta = gcmUINT64_TO_PTR(kDelta->next);
++
++ /* Get access to the state records. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvFALSE,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++
++ /* Close access to the current state delta. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Update the user delta pointer. */
++ uDelta = nDelta;
++ }
++
++ /* Hardware disables all input streams when the stream 0 is programmed,
++ it then reenables those streams that were explicitely programmed by
++ the software. Because of this we cannot program the entire array of
++ values, otherwise we'll get all streams reenabled, but rather program
++ only those that are actully needed by the software. */
++ if (elementCount != 0)
++ {
++ gctUINT base;
++ gctUINT nopCount;
++ gctUINT32_PTR nop;
++ gctUINT fe2vsCount = 12;
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) ))
++ {
++ fe2vsCount = 16;
++ }
++
++ /* Determine the base index of the vertex stream array. */
++ base = map[0x0180].index;
++
++ /* Set the proper state count. */
++ buffer->logical[base - 1]
++ = ((((gctUINT32) (buffer->logical[base - 1])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (elementCount ) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Determine the number of NOP commands. */
++ nopCount
++ = (fe2vsCount / 2)
++ - (elementCount / 2);
++
++ /* Determine the location of the first NOP. */
++ nop = &buffer->logical[base + (elementCount | 1)];
++
++ /* Fill the unused space with NOPs. */
++ for (i = 0; i < nopCount; i += 1)
++ {
++ if (nop >= buffer->logical + Context->totalSize)
++ {
++ break;
++ }
++
++ /* Generate a NOP command. */
++ *nop = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ /* Advance. */
++ nop += 2;
++ }
++ }
++
++ /* Reset pending deltas. */
++ buffer->deltaCount = 0;
++ buffer->delta = gcvNULL;
++ }
++
++ /* Set state delta user pointer. */
++ uDelta = StateDelta;
++
++ /* Get access to the state delta. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ &_stateDelta,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* State delta cannot be attached to anything yet. */
++ if (kDelta->refCount != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): kDelta->refCount = %d (has to be 0).\n",
++ __FUNCTION__, __LINE__,
++ kDelta->refCount
++ );
++ }
++
++ /* Attach to all contexts. */
++ buffer = Context->buffer;
++
++ do
++ {
++ /* Attach to the context if nothing is attached yet. If a delta
++ is allready attached, all we need to do is to increment
++ the number of deltas in the context. */
++ if (buffer->delta == gcvNULL)
++ {
++ buffer->delta = uDelta;
++ }
++
++ /* Update reference count. */
++ kDelta->refCount += 1;
++
++ /* Update counters. */
++ buffer->deltaCount += 1;
++
++ /* Get the next context buffer. */
++ buffer = buffer->next;
++
++ if (buffer == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++ }
++ while (Context->buffer != buffer);
++
++ /* Close access to the current state delta. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Schedule an event to mark the context buffer as available. */
++ gcmkONERROR(gckEVENT_Signal(
++ buffer->eventObj, buffer->signal, gcvKERNEL_PIXEL
++ ));
++
++ /* Advance to the next context buffer. */
++ Context->buffer = buffer->next;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Get access to the state records. */
++ if (kDelta != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvFALSE,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++ }
++
++ /* Close access to the current state delta. */
++ gcmkVERIFY_OK(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.h linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_context.h 2015-05-01 14:57:59.511427001 -0500
+@@ -0,0 +1,157 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_context_h_
++#define __gc_hal_kernel_context_h_
++
++#include "gc_hal_kernel_buffer.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Maps state locations within the context buffer. */
++typedef struct _gcsSTATE_MAP * gcsSTATE_MAP_PTR;
++typedef struct _gcsSTATE_MAP
++{
++ /* Index of the state in the context buffer. */
++ gctUINT index;
++
++ /* State mask. */
++ gctUINT32 mask;
++}
++gcsSTATE_MAP;
++
++/* Context buffer. */
++typedef struct _gcsCONTEXT * gcsCONTEXT_PTR;
++typedef struct _gcsCONTEXT
++{
++ /* For debugging: the number of context buffer in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT num;
++#endif
++
++ /* Pointer to gckEVENT object. */
++ gckEVENT eventObj;
++
++ /* Context busy signal. */
++ gctSIGNAL signal;
++
++ /* Physical address of the context buffer. */
++ gctPHYS_ADDR physical;
++
++ /* Logical address of the context buffer. */
++ gctUINT32_PTR logical;
++
++ /* Pointer to the LINK commands. */
++ gctPOINTER link2D;
++ gctPOINTER link3D;
++
++ /* The number of pending state deltas. */
++ gctUINT deltaCount;
++
++ /* Pointer to the first delta to be applied. */
++ gcsSTATE_DELTA_PTR delta;
++
++ /* Next context buffer. */
++ gcsCONTEXT_PTR next;
++}
++gcsCONTEXT;
++
++/* gckCONTEXT structure that hold the current context. */
++struct _gckCONTEXT
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* Command buffer alignment. */
++ gctSIZE_T alignment;
++ gctSIZE_T reservedHead;
++ gctSIZE_T reservedTail;
++
++ /* Context buffer metrics. */
++ gctSIZE_T stateCount;
++ gctSIZE_T totalSize;
++ gctSIZE_T bufferSize;
++ gctUINT32 linkIndex2D;
++ gctUINT32 linkIndex3D;
++ gctUINT32 linkIndexXD;
++ gctUINT32 entryOffset3D;
++ gctUINT32 entryOffsetXDFrom2D;
++ gctUINT32 entryOffsetXDFrom3D;
++
++ /* Dirty flags. */
++ gctBOOL dirty;
++ gctBOOL dirty2D;
++ gctBOOL dirty3D;
++ gcsCONTEXT_PTR dirtyBuffer;
++
++ /* State mapping. */
++ gcsSTATE_MAP_PTR map;
++
++ /* List of context buffers. */
++ gcsCONTEXT_PTR buffer;
++
++ /* A copy of the user record array. */
++ gctUINT recordArraySize;
++ gcsSTATE_DELTA_RECORD_PTR recordArray;
++
++ /* Requested pipe select for context. */
++ gcePIPE_SELECT entryPipe;
++ gcePIPE_SELECT exitPipe;
++
++ /* Variables used for building state buffer. */
++ gctUINT32 lastAddress;
++ gctSIZE_T lastSize;
++ gctUINT32 lastIndex;
++ gctBOOL lastFixed;
++
++ /* Hint array. */
++#if gcdSECURE_USER
++ gctBOOL_PTR hint;
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ gcsPROFILER_COUNTERS latestProfiler;
++ gcsPROFILER_COUNTERS histroyProfiler;
++ gctUINT32 prevVSInstCount;
++ gctUINT32 prevVSBranchInstCount;
++ gctUINT32 prevVSTexInstCount;
++ gctUINT32 prevVSVertexCount;
++ gctUINT32 prevPSInstCount;
++ gctUINT32 prevPSBranchInstCount;
++ gctUINT32 prevPSTexInstCount;
++ gctUINT32 prevPSPixelCount;
++#endif
++};
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_context_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.c linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.c 2015-05-01 14:57:59.519427001 -0500
+@@ -0,0 +1,7280 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#if VIVANTE_PROFILER_CONTEXT
++#include "gc_hal_kernel_context.h"
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++typedef struct _gcsiDEBUG_REGISTERS * gcsiDEBUG_REGISTERS_PTR;
++typedef struct _gcsiDEBUG_REGISTERS
++{
++ gctSTRING module;
++ gctUINT index;
++ gctUINT shift;
++ gctUINT data;
++ gctUINT count;
++ gctUINT32 signature;
++}
++gcsiDEBUG_REGISTERS;
++
++extern int gpu3DMinClock;
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_ResetGPU(
++ IN gckHARDWARE Hardware,
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++static gceSTATUS
++_IdentifyHardware(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ )
++{
++ gceSTATUS status;
++
++ gctUINT32 chipIdentity;
++
++ gctUINT32 streamCount = 0;
++ gctUINT32 registerMax = 0;
++ gctUINT32 threadCount = 0;
++ gctUINT32 shaderCoreCount = 0;
++ gctUINT32 vertexCacheSize = 0;
++ gctUINT32 vertexOutputBufferSize = 0;
++ gctUINT32 pixelPipes = 0;
++ gctUINT32 instructionCount = 0;
++ gctUINT32 numConstants = 0;
++ gctUINT32 bufferSize = 0;
++ gctUINT32 varyingsCount = 0;
++ gctBOOL useHZ;
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /***************************************************************************
++ ** Get chip ID and revision.
++ */
++
++ /* Read chip identity register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00018,
++ &chipIdentity));
++
++ /* Special case for older graphic cores. */
++ if (((((gctUINT32) (chipIdentity)) >> (0 ? 31:24) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))))
++ {
++ Identity->chipModel = gcv500;
++ Identity->chipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) );
++ }
++
++ else
++ {
++ /* Read chip identity register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00020,
++ (gctUINT32_PTR) &Identity->chipModel));
++
++ /* !!!! HACK ALERT !!!! */
++ /* Because people change device IDs without letting software know
++ ** about it - here is the hack to make it all look the same. Only
++ ** for GC400 family. Next time - TELL ME!!! */
++ if (((Identity->chipModel & 0xFF00) == 0x0400)
++ && (Identity->chipModel != 0x0420))
++ {
++ Identity->chipModel = (gceCHIPMODEL) (Identity->chipModel & 0x0400);
++ }
++
++ /* Read CHIP_REV register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00024,
++ &Identity->chipRevision));
++
++ if ((Identity->chipModel == gcv300)
++ && (Identity->chipRevision == 0x2201)
++ )
++ {
++ gctUINT32 chipDate;
++ gctUINT32 chipTime;
++
++ /* Read date and time registers. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00028,
++ &chipDate));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0002C,
++ &chipTime));
++
++ if ((chipDate == 0x20080814) && (chipTime == 0x12051100))
++ {
++ /* This IP has an ECO; put the correct revision in it. */
++ Identity->chipRevision = 0x1051;
++ }
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipModel=%X",
++ Identity->chipModel);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipRevision=%X",
++ Identity->chipRevision);
++
++
++ /***************************************************************************
++ ** Get chip features.
++ */
++
++ /* Read chip feature register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0001C,
++ &Identity->chipFeatures));
++
++#ifndef VIVANTE_NO_3D
++ /* Disable fast clear on GC700. */
++ if (Identity->chipModel == gcv700)
++ {
++ Identity->chipFeatures
++ = ((((gctUINT32) (Identity->chipFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++#endif
++
++ if (((Identity->chipModel == gcv500) && (Identity->chipRevision < 2))
++ || ((Identity->chipModel == gcv300) && (Identity->chipRevision < 0x2000))
++ )
++ {
++ /* GC500 rev 1.x and GC300 rev < 2.0 doesn't have these registers. */
++ Identity->chipMinorFeatures = 0;
++ Identity->chipMinorFeatures1 = 0;
++ Identity->chipMinorFeatures2 = 0;
++ Identity->chipMinorFeatures3 = 0;
++ Identity->chipMinorFeatures4 = 0;
++ }
++ else
++ {
++ /* Read chip minor feature register #0. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00034,
++ &Identity->chipMinorFeatures));
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))))
++ )
++ {
++ /* Read chip minor featuress register #1. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00074,
++ &Identity->chipMinorFeatures1));
++
++ /* Read chip minor featuress register #2. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00084,
++ &Identity->chipMinorFeatures2));
++
++ /*Identity->chipMinorFeatures2 &= ~(0x1 << 3);*/
++
++ /* Read chip minor featuress register #1. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00088,
++ &Identity->chipMinorFeatures3));
++
++ /*The BG2 chip has no compression supertiled, and the bit of GCMinorFeature3BugFixes15 is n/a*/
++ if(Identity->chipModel == gcv1000 && Identity->chipRevision == 0x5036)
++ {
++ Identity->chipMinorFeatures3
++ = ((((gctUINT32) (Identity->chipMinorFeatures3)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ Identity->chipMinorFeatures3
++ = ((((gctUINT32) (Identity->chipMinorFeatures3)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27)));
++ }
++
++ /* Read chip minor featuress register #4. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00094,
++ &Identity->chipMinorFeatures4));
++ }
++ else
++ {
++ /* Chip doesn't has minor features register #1 or 2 or 3 or 4. */
++ Identity->chipMinorFeatures1 = 0;
++ Identity->chipMinorFeatures2 = 0;
++ Identity->chipMinorFeatures3 = 0;
++ Identity->chipMinorFeatures4 = 0;
++ }
++ }
++
++ /* Get the Supertile layout in the hardware. */
++ if (((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))))
++ || ((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))))
++ {
++ Identity->superTileMode = 2;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))))
++ {
++ Identity->superTileMode = 1;
++ }
++ else
++ {
++ Identity->superTileMode = 0;
++ }
++
++ /* Exception for GC1000, revision 5035 & GC800, revision 4612 */
++ if (((Identity->chipModel == gcv1000) && ((Identity->chipRevision == 0x5035)
++ || (Identity->chipRevision == 0x5036)
++ || (Identity->chipRevision == 0x5037)))
++ || ((Identity->chipModel == gcv800) && (Identity->chipRevision == 0x4612))
++ || ((Identity->chipModel == gcv860) && (Identity->chipRevision == 0x4647)))
++ {
++ Identity->superTileMode = 1;
++ }
++
++ if (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5245)
++ {
++ useHZ = ((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))))
++ || ((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))));
++ }
++ else
++ {
++ useHZ = gcvFALSE;
++ }
++
++ if (useHZ)
++ {
++ /* Disable EZ. */
++ Identity->chipFeatures
++ = ((((gctUINT32) (Identity->chipFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++ }
++
++ /* Disable HZ when EZ is present for older chips. */
++ else if (!((((gctUINT32) (Identity->chipFeatures)) >> (0 ? 16:16) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))))
++ {
++ /* Disable HIERARCHICAL_Z. */
++ Identity->chipMinorFeatures
++ = ((((gctUINT32) (Identity->chipMinorFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27)));
++ }
++
++ /* Disable rectangle primitive when chip is gc880_5_1_0_rc6*/
++ if ((Identity->chipModel == gcv880) && (Identity->chipRevision == 0x5106))
++ {
++ /* Disable rectangle primitive. */
++ Identity->chipMinorFeatures2
++ = ((((gctUINT32) (Identity->chipMinorFeatures2)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++
++ if ((Identity->chipModel == gcv800) && (Identity->chipRevision == 0x4605))
++ {
++ /* Correct feature bit: RTL does not have such feature. */
++ Identity->chipFeatures
++ = ((((gctUINT32) (Identity->chipFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)));
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipFeatures=0x%08X",
++ Identity->chipFeatures);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures=0x%08X",
++ Identity->chipMinorFeatures);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures1=0x%08X",
++ Identity->chipMinorFeatures1);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures2=0x%08X",
++ Identity->chipMinorFeatures2);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures3=0x%08X",
++ Identity->chipMinorFeatures3);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures4=0x%08X",
++ Identity->chipMinorFeatures4);
++
++ /***************************************************************************
++ ** Get chip specs.
++ */
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ gctUINT32 specs, specs2, specs3;
++
++ /* Read gcChipSpecs register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00048,
++ &specs));
++
++ /* Extract the fields. */
++ streamCount = (((((gctUINT32) (specs)) >> (0 ? 3:0)) & ((gctUINT32) ((((1 ? 3:0) - (0 ? 3:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:0) - (0 ? 3:0) + 1)))))) );
++ registerMax = (((((gctUINT32) (specs)) >> (0 ? 7:4)) & ((gctUINT32) ((((1 ? 7:4) - (0 ? 7:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:4) - (0 ? 7:4) + 1)))))) );
++ threadCount = (((((gctUINT32) (specs)) >> (0 ? 11:8)) & ((gctUINT32) ((((1 ? 11:8) - (0 ? 11:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:8) - (0 ? 11:8) + 1)))))) );
++ shaderCoreCount = (((((gctUINT32) (specs)) >> (0 ? 24:20)) & ((gctUINT32) ((((1 ? 24:20) - (0 ? 24:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:20) - (0 ? 24:20) + 1)))))) );
++ vertexCacheSize = (((((gctUINT32) (specs)) >> (0 ? 16:12)) & ((gctUINT32) ((((1 ? 16:12) - (0 ? 16:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:12) - (0 ? 16:12) + 1)))))) );
++ vertexOutputBufferSize = (((((gctUINT32) (specs)) >> (0 ? 31:28)) & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1)))))) );
++ pixelPipes = (((((gctUINT32) (specs)) >> (0 ? 27:25)) & ((gctUINT32) ((((1 ? 27:25) - (0 ? 27:25) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:25) - (0 ? 27:25) + 1)))))) );
++
++ /* Read gcChipSpecs2 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00080,
++ &specs2));
++
++ instructionCount = (((((gctUINT32) (specs2)) >> (0 ? 15:8)) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1)))))) );
++ numConstants = (((((gctUINT32) (specs2)) >> (0 ? 31:16)) & ((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1)))))) );
++ bufferSize = (((((gctUINT32) (specs2)) >> (0 ? 7:0)) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1)))))) );
++
++ /* Read gcChipSpecs3 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0008C,
++ &specs3));
++
++ varyingsCount = (((((gctUINT32) (specs3)) >> (0 ? 8:4)) & ((gctUINT32) ((((1 ? 8:4) - (0 ? 8:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:4) - (0 ? 8:4) + 1)))))) );
++ }
++
++ /* Get the number of pixel pipes. */
++ Identity->pixelPipes = gcmMAX(pixelPipes, 1);
++
++ /* Get the stream count. */
++ Identity->streamCount = (streamCount != 0)
++ ? streamCount
++ : (Identity->chipModel >= gcv1000) ? 4 : 1;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: streamCount=%u%s",
++ Identity->streamCount,
++ (streamCount == 0) ? " (default)" : "");
++
++ /* Get the vertex output buffer size. */
++ Identity->vertexOutputBufferSize = (vertexOutputBufferSize != 0)
++ ? 1 << vertexOutputBufferSize
++ : (Identity->chipModel == gcv400)
++ ? (Identity->chipRevision < 0x4000) ? 512
++ : (Identity->chipRevision < 0x4200) ? 256
++ : 128
++ : (Identity->chipModel == gcv530)
++ ? (Identity->chipRevision < 0x4200) ? 512
++ : 128
++ : 512;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: vertexOutputBufferSize=%u%s",
++ Identity->vertexOutputBufferSize,
++ (vertexOutputBufferSize == 0) ? " (default)" : "");
++
++ /* Get the maximum number of threads. */
++ Identity->threadCount = (threadCount != 0)
++ ? 1 << threadCount
++ : (Identity->chipModel == gcv400) ? 64
++ : (Identity->chipModel == gcv500) ? 128
++ : (Identity->chipModel == gcv530) ? 128
++ : 256;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: threadCount=%u%s",
++ Identity->threadCount,
++ (threadCount == 0) ? " (default)" : "");
++
++ /* Get the number of shader cores. */
++ Identity->shaderCoreCount = (shaderCoreCount != 0)
++ ? shaderCoreCount
++ : (Identity->chipModel >= gcv1000) ? 2
++ : 1;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: shaderCoreCount=%u%s",
++ Identity->shaderCoreCount,
++ (shaderCoreCount == 0) ? " (default)" : "");
++
++ /* Get the vertex cache size. */
++ Identity->vertexCacheSize = (vertexCacheSize != 0)
++ ? vertexCacheSize
++ : 8;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: vertexCacheSize=%u%s",
++ Identity->vertexCacheSize,
++ (vertexCacheSize == 0) ? " (default)" : "");
++
++ /* Get the maximum number of temporary registers. */
++ Identity->registerMax = (registerMax != 0)
++ /* Maximum of registerMax/4 registers are accessible to 1 shader */
++ ? 1 << registerMax
++ : (Identity->chipModel == gcv400) ? 32
++ : 64;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: registerMax=%u%s",
++ Identity->registerMax,
++ (registerMax == 0) ? " (default)" : "");
++
++ /* Get the instruction count. */
++ Identity->instructionCount = (instructionCount == 0) ? 256
++ : (instructionCount == 1) ? 1024
++ : (instructionCount == 2) ? 2048
++ : (instructionCount == 0xFF) ? 512
++ : 256;
++
++ if (Identity->instructionCount == 256)
++ {
++ if ((Identity->chipModel == gcv2000 && Identity->chipRevision == 0x5108)
++ || Identity->chipModel == gcv880)
++ {
++ Identity->instructionCount = 512;
++ }
++ }
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))))
++ {
++ Identity->instructionCount = 512;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: instructionCount=%u%s",
++ Identity->instructionCount,
++ (instructionCount == 0) ? " (default)" : "");
++
++ /* Get the number of constants. */
++ Identity->numConstants = (numConstants == 0) ? 168 : numConstants;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: numConstants=%u%s",
++ Identity->numConstants,
++ (numConstants == 0) ? " (default)" : "");
++
++ /* Get the buffer size. */
++ Identity->bufferSize = bufferSize;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: bufferSize=%u%s",
++ Identity->bufferSize,
++ (bufferSize == 0) ? " (default)" : "");
++
++
++ if (varyingsCount != 0)
++ {
++ /* Bug 4480. */
++ /*Identity->varyingsCount = varyingsCount;*/
++ Identity->varyingsCount = 12;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures1)) >> (0 ? 23:23) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))))
++ {
++ Identity->varyingsCount = 12;
++ }
++ else
++ {
++ Identity->varyingsCount = 8;
++ }
++
++ /* For some cores, it consumes two varying for position, so the max varying vectors should minus one. */
++ if ((Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5222) ||
++ (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5208) ||
++ ((Identity->chipModel == gcv2100 || Identity->chipModel == gcv2000) && Identity->chipRevision == 0x5108) ||
++ (Identity->chipModel == gcv880 && (Identity->chipRevision == 0x5107 || Identity->chipRevision == 0x5106)))
++ {
++ Identity->varyingsCount -= 1;
++ }
++
++ Identity->chip2DControl = 0;
++ if (Identity->chipModel == gcv320)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x0002C,
++ &data));
++
++ if ((data != 33956864) &&
++ ((Identity->chipRevision == 0x5007) ||
++ (Identity->chipRevision == 0x5220)))
++ {
++ Identity->chip2DControl |= 0xFF &
++ (Identity->chipRevision == 0x5220 ? 8 :
++ (Identity->chipRevision == 0x5007 ? 12 : 0));
++ }
++
++ if (Identity->chipRevision == 0x5007)
++ {
++ /* Disable splitting rectangle. */
++ Identity->chip2DControl |= 0x100;
++
++ /* Enable 2D Flush. */
++ Identity->chip2DControl |= 0x200;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdPOWEROFF_TIMEOUT
++void
++_PowerTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckHARDWARE hardware = (gckHARDWARE)Data;
++ gcmkVERIFY_OK(
++ gckHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT));
++}
++#endif
++
++static gceSTATUS
++_VerifyDMA(
++ IN gckOS Os,
++ IN gceCORE Core,
++ gctUINT32_PTR Address1,
++ gctUINT32_PTR Address2,
++ gctUINT32_PTR State1,
++ gctUINT32_PTR State2
++ )
++{
++ gceSTATUS status;
++ gctUINT32 i;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x660, State1));
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x664, Address1));
++
++ for (i = 0; i < 500; i += 1)
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x660, State2));
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x664, Address2));
++
++ if (*Address1 != *Address2)
++ {
++ break;
++ }
++
++ if (*State1 != *State2)
++ {
++ break;
++ }
++ }
++
++OnError:
++ return status;
++}
++
++static gceSTATUS
++_DumpDebugRegisters(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gcsiDEBUG_REGISTERS_PTR Descriptor
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctUINT32 select;
++ gctUINT32 data = 0;
++ gctUINT i;
++
++ gcmkHEADER_ARG("Os=0x%X Descriptor=0x%X", Os, Descriptor);
++
++ gcmkPRINT_N(4, " %s debug registers:\n", Descriptor->module);
++
++ for (i = 0; i < Descriptor->count; i += 1)
++ {
++ select = i << Descriptor->shift;
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, select));
++#if gcdFPGA_BUILD
++ gcmkONERROR(gckOS_Delay(Os, 1000));
++#endif
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &data));
++
++ gcmkPRINT_N(12, " [0x%02X] 0x%08X\n", i, data);
++ }
++
++ select = 0xF << Descriptor->shift;
++
++ for (i = 0; i < 500; i += 1)
++ {
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, select));
++#if gcdFPGA_BUILD
++ gcmkONERROR(gckOS_Delay(Os, 1000));
++#endif
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &data));
++
++ if (data == Descriptor->signature)
++ {
++ break;
++ }
++ }
++
++ if (i == 500)
++ {
++ gcmkPRINT_N(4, " failed to obtain the signature (read 0x%08X).\n", data);
++ }
++ else
++ {
++ gcmkPRINT_N(8, " signature = 0x%08X (%d read attempt(s))\n", data, i + 1);
++ }
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_IsGPUPresent(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcsHAL_QUERY_CHIP_IDENTITY identity;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ control));
++
++ /* Identify the hardware. */
++ gcmkONERROR(_IdentifyHardware(Hardware->os,
++ Hardware->core,
++ &identity));
++
++ /* Check if these are the same values as saved before. */
++ if ((Hardware->identity.chipModel != identity.chipModel)
++ || (Hardware->identity.chipRevision != identity.chipRevision)
++ || (Hardware->identity.chipFeatures != identity.chipFeatures)
++ || (Hardware->identity.chipMinorFeatures != identity.chipMinorFeatures)
++ || (Hardware->identity.chipMinorFeatures1 != identity.chipMinorFeatures1)
++ || (Hardware->identity.chipMinorFeatures2 != identity.chipMinorFeatures2)
++ )
++ {
++ gcmkPRINT("[galcore]: GPU is not present.");
++ gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++_FlushCache(
++ gckHARDWARE Hardware,
++ gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes, requested;
++ gctPOINTER buffer;
++
++ /* Get the size of the flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(Hardware,
++ gcvFLUSH_ALL,
++ gcvNULL,
++ &requested));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(Command,
++ requested,
++ &buffer,
++ &bytes));
++
++ /* Append a flush. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ Hardware, gcvFLUSH_ALL, buffer, &bytes
++ ));
++
++ /* Execute the command queue. */
++ gcmkONERROR(gckCOMMAND_Execute(Command, requested));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++/******************************************************************************\
++****************************** gckHARDWARE API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckHARDWARE_Construct
++**
++** Construct a new gckHARDWARE object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an initialized gckOS object.
++**
++** gceCORE Core
++** Specified core.
++**
++** OUTPUT:
++**
++** gckHARDWARE * Hardware
++** Pointer to a variable that will hold the pointer to the gckHARDWARE
++** object.
++*/
++gceSTATUS
++gckHARDWARE_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gckHARDWARE * Hardware
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware = gcvNULL;
++ gctUINT16 data = 0xff00;
++ gctUINT32 axi_ot;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ /* Enable the GPU. */
++ gcmkONERROR(gckOS_SetGPUPower(Os, Core, gcvTRUE, gcvTRUE));
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ 0x00000900));
++
++ /* Allocate the gckHARDWARE object. */
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckHARDWARE),
++ &pointer));
++
++ hardware = (gckHARDWARE) pointer;
++
++ /* Initialize the gckHARDWARE object. */
++ hardware->object.type = gcvOBJ_HARDWARE;
++ hardware->os = Os;
++ hardware->core = Core;
++
++ /* Identify the hardware. */
++ gcmkONERROR(_IdentifyHardware(Os, Core, &hardware->identity));
++
++ /* Determine the hardware type */
++ switch (hardware->identity.chipModel)
++ {
++ case gcv350:
++ case gcv355:
++ hardware->type = gcvHARDWARE_VG;
++ break;
++
++ case gcv300:
++ case gcv320:
++ case gcv420:
++ hardware->type = gcvHARDWARE_2D;
++ /*set outstanding limit*/
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x10;
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00414, axi_ot));
++ break;
++
++ default:
++ hardware->type = gcvHARDWARE_3D;
++ if(hardware->identity.chipModel == gcv880)
++ {
++ /*set outstanding limit*/
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x10;
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00414, axi_ot));
++ }
++
++ if ((((((gctUINT32) (hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ))
++ {
++ hardware->type = (gceHARDWARE_TYPE) (hardware->type | gcvHARDWARE_2D);
++ }
++ }
++
++ hardware->powerBaseAddress
++ = ((hardware->identity.chipModel == gcv300)
++ && (hardware->identity.chipRevision < 0x2000))
++ ? 0x0100
++ : 0x0000;
++
++ /* _ResetGPU need powerBaseAddress. */
++ status = _ResetGPU(hardware, Os, Core);
++
++ if (status != gcvSTATUS_OK)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "_ResetGPU failed: status=%d\n", status);
++ }
++
++ hardware->powerMutex = gcvNULL;
++
++ hardware->mmuVersion
++ = (((((gctUINT32) (hardware->identity.chipMinorFeatures1)) >> (0 ? 28:28)) & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))) );
++
++ /* Determine whether bug fixes #1 are present. */
++ hardware->extraEventStates = ((((gctUINT32) (hardware->identity.chipMinorFeatures1)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))));
++
++ /* Check if big endian */
++ hardware->bigEndian = (*(gctUINT8 *)&data == 0xff);
++
++ /* Initialize the fast clear. */
++ gcmkONERROR(gckHARDWARE_SetFastClear(hardware, -1, -1));
++
++#if !gcdENABLE_128B_MERGE
++
++ if (((((gctUINT32) (hardware->identity.chipMinorFeatures2)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ /* 128B merge is turned on by default. Disable it. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00558, 0));
++ }
++
++#endif
++
++ /* Set power state to ON. */
++ hardware->chipPowerState = gcvPOWER_ON;
++ hardware->clockState = gcvTRUE;
++ hardware->powerState = gcvTRUE;
++ hardware->lastWaitLink = ~0U;
++ hardware->globalSemaphore = gcvNULL;
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ hardware->powerOnFscaleVal = 64;
++#endif
++
++ gcmkONERROR(gckOS_CreateMutex(Os, &hardware->powerMutex));
++ gcmkONERROR(gckOS_CreateSemaphore(Os, &hardware->globalSemaphore));
++ hardware->startIsr = gcvNULL;
++ hardware->stopIsr = gcvNULL;
++
++#if gcdPOWEROFF_TIMEOUT
++ hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT;
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(Os,
++ _PowerTimerFunction,
++ (gctPOINTER)hardware,
++ &hardware->powerOffTimer));
++#endif
++
++ gcmkONERROR(gckOS_AtomConstruct(Os, &hardware->pageTableDirty));
++
++#if gcdLINK_QUEUE_SIZE
++ hardware->linkQueue.front = 0;
++ hardware->linkQueue.rear = 0;
++ hardware->linkQueue.count = 0;
++#endif
++
++ /* Enable power management by default. */
++ hardware->powerManagement = gcvTRUE;
++
++ /* Disable profiler by default */
++ hardware->gpuProfiler = gcvFALSE;
++
++ /* Return pointer to the gckHARDWARE object. */
++ *Hardware = hardware;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Hardware=0x%x", *Hardware);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (hardware != gcvNULL)
++ {
++ /* Turn off the power. */
++ gcmkVERIFY_OK(gckOS_SetGPUPower(Os, Core, gcvFALSE, gcvFALSE));
++
++ if (hardware->globalSemaphore != gcvNULL)
++ {
++ /* Destroy the global semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Os,
++ hardware->globalSemaphore));
++ }
++
++ if (hardware->powerMutex != gcvNULL)
++ {
++ /* Destroy the power mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, hardware->powerMutex));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ if (hardware->powerOffTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer));
++ }
++#endif
++
++ if (hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, hardware));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Destroy
++**
++** Destroy an gckHARDWARE object.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Destroy(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Destroy the power semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Hardware->os,
++ Hardware->globalSemaphore));
++
++ /* Destroy the power mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Hardware->os, Hardware->powerMutex));
++
++#if gcdPOWEROFF_TIMEOUT
++ gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer));
++#endif
++
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty));
++
++ /* Mark the object as unknown. */
++ Hardware->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the object. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Hardware->os, Hardware));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_GetType
++**
++** Get the hardware type.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gceHARDWARE_TYPE * Type
++** Pointer to a variable that receives the type of hardware object.
++*/
++gceSTATUS
++gckHARDWARE_GetType(
++ IN gckHARDWARE Hardware,
++ OUT gceHARDWARE_TYPE * Type
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ gcmkVERIFY_ARGUMENT(Type != gcvNULL);
++
++ *Type = Hardware->type;
++
++ gcmkFOOTER_ARG("*Type=%d", *Type);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_InitializeHardware
++**
++** Initialize the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_InitializeHardware(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 baseAddress;
++ gctUINT32 chipRev;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Read the chip revision register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00024,
++ &chipRev));
++
++ if (chipRev != Hardware->identity.chipRevision)
++ {
++ /* Chip is not there! */
++ gcmkONERROR(gcvSTATUS_CONTEXT_LOSSED);
++ }
++
++ /* Disable isolate GPU bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)))));
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++
++ /* Enable debug register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++ /* Reset memory counters. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ ~0U));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 0));
++
++ /* Get the system's physical base address. */
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Program the base addesses. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0041C,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00418,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00428,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00420,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00424,
++ baseAddress));
++
++#if !VIVANTE_PROFILER
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress +
++ 0x00100,
++ &data));
++
++ /* Enable clock gating. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ if ((Hardware->identity.chipRevision == 0x4301)
++ || (Hardware->identity.chipRevision == 0x4302)
++ )
++ {
++ /* Disable stall module level clock gating for 4.3.0.1 and 4.3.0.2
++ ** revisions. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ }
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00100,
++ data));
++
++#ifndef VIVANTE_NO_3D
++ /* Disable PE clock gating on revs < 5.0 when HZ is present without a
++ ** bug fix. */
++ if ((Hardware->identity.chipRevision < 0x5000)
++ && ((((gctUINT32) (Hardware->identity.chipMinorFeatures1)) >> (0 ? 9:9) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))))
++ && ((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))))
++ )
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++ /* Disable PE clock gating. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++#endif
++ }
++#endif
++
++ /* Special workaround for this core
++ ** Make sure pulse eater kicks in only when SH is idle */
++ if (Hardware->identity.chipModel == gcv4000 &&
++ Hardware->identity.chipRevision == 0x5208)
++ {
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23)))));
++ }
++
++ if ((gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI2) == gcvFALSE)
++ || (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI2) && (Hardware->identity.chipRevision < 0x5422))
++ )
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15)));
++
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++ /* Special workaround for this core
++ ** Make sure FE and TX are on different buses */
++ if ((Hardware->identity.chipModel == gcv2000)
++ && (Hardware->identity.chipRevision == 0x5108))
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00480,
++ &data));
++
++ /* Set FE bus to one, TX bus to zero */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00480,
++ data));
++ }
++
++ /* Test if MMU is initialized. */
++ if ((Hardware->kernel != gcvNULL)
++ && (Hardware->kernel->mmu != gcvNULL)
++ )
++ {
++ /* Reset MMU. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(
++ gckHARDWARE_SetMMU(Hardware,
++ Hardware->kernel->mmu->pageTableLogical));
++ }
++ }
++
++ if (Hardware->identity.chipModel >= gcv400
++ && Hardware->identity.chipModel != gcv420
++ && (((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 15:15) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) != gcvTRUE)
++ )
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++ /* Disable PA clock gating. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++#if gcdHZ_L2_DISALBE
++ /* Disable HZ-L2. */
++ if (((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) == gcvTRUE ||
++ ((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) == gcvTRUE)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ data));
++ }
++#endif
++
++ /* Limit 2D outstanding request. */
++ if(Hardware->identity.chipModel == gcv880)
++ {
++ gctUINT32 axi_ot;
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x10;
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00414, axi_ot));
++ }
++
++ if (Hardware->identity.chip2DControl & 0xFF)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ &data));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (Hardware->identity.chip2DControl & 0xFF) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ data));
++ }
++
++ /* Update GPU AXI cache atttribute. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00008,
++ 0x00002200));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryMemory
++**
++** Query the amount of memory available on the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * InternalSize
++** Pointer to a variable that will hold the size of the internal video
++** memory in bytes. If 'InternalSize' is gcvNULL, no information of the
++** internal memory will be returned.
++**
++** gctUINT32 * InternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * InternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctSIZE_T * ExternalSize
++** Pointer to a variable that will hold the size of the external video
++** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the
++** external memory will be returned.
++**
++** gctUINT32 * ExternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * ExternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * HorizontalTileSize
++** Number of horizontal pixels per tile. If 'HorizontalTileSize' is
++** gcvNULL, no horizontal pixel per tile will be returned.
++**
++** gctUINT32 * VerticalTileSize
++** Number of vertical pixels per tile. If 'VerticalTileSize' is
++** gcvNULL, no vertical pixel per tile will be returned.
++*/
++gceSTATUS
++gckHARDWARE_QueryMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (InternalSize != gcvNULL)
++ {
++ /* No internal memory. */
++ *InternalSize = 0;
++ }
++
++ if (ExternalSize != gcvNULL)
++ {
++ /* No external memory. */
++ *ExternalSize = 0;
++ }
++
++ if (HorizontalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *HorizontalTileSize = 4;
++ }
++
++ if (VerticalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *VerticalTileSize = 4;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*InternalSize=%lu *InternalBaseAddress=0x%08x "
++ "*InternalAlignment=0x%08x *ExternalSize=%lu "
++ "*ExternalBaseAddress=0x%08x *ExtenalAlignment=0x%08x "
++ "*HorizontalTileSize=%u *VerticalTileSize=%u",
++ gcmOPT_VALUE(InternalSize),
++ gcmOPT_VALUE(InternalBaseAddress),
++ gcmOPT_VALUE(InternalAlignment),
++ gcmOPT_VALUE(ExternalSize),
++ gcmOPT_VALUE(ExternalBaseAddress),
++ gcmOPT_VALUE(ExternalAlignment),
++ gcmOPT_VALUE(HorizontalTileSize),
++ gcmOPT_VALUE(VerticalTileSize));
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryChipIdentity
++**
++** Query the identity of the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++** Pointer to the identity structure.
++**
++*/
++gceSTATUS
++gckHARDWARE_QueryChipIdentity(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ )
++{
++ gctUINT32 features;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Identity != gcvNULL);
++
++ /* Return chip model and revision. */
++ Identity->chipModel = Hardware->identity.chipModel;
++ Identity->chipRevision = Hardware->identity.chipRevision;
++
++ /* Return feature set. */
++ features = Hardware->identity.chipFeatures;
++
++ if ((((((gctUINT32) (features)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ /* Override fast clear by command line. */
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Hardware->allowFastClear) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ if ((((((gctUINT32) (features)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ))
++ {
++ /* Override compression by command line. */
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (Hardware->allowCompression) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++
++ /* Mark 2D pipe as available for GC500.0 through GC500.2 and GC300,
++ ** since they did not have this bit. */
++ if (((Hardware->identity.chipModel == gcv500) && (Hardware->identity.chipRevision <= 2))
++ || (Hardware->identity.chipModel == gcv300)
++ )
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ Identity->chipFeatures = features;
++
++ /* Return minor features. */
++ Identity->chipMinorFeatures = Hardware->identity.chipMinorFeatures;
++ Identity->chipMinorFeatures1 = Hardware->identity.chipMinorFeatures1;
++ Identity->chipMinorFeatures2 = Hardware->identity.chipMinorFeatures2;
++ Identity->chipMinorFeatures3 = Hardware->identity.chipMinorFeatures3;
++ Identity->chipMinorFeatures4 = Hardware->identity.chipMinorFeatures4;
++
++ /* Return chip specs. */
++ Identity->streamCount = Hardware->identity.streamCount;
++ Identity->registerMax = Hardware->identity.registerMax;
++ Identity->threadCount = Hardware->identity.threadCount;
++ Identity->shaderCoreCount = Hardware->identity.shaderCoreCount;
++ Identity->vertexCacheSize = Hardware->identity.vertexCacheSize;
++ Identity->vertexOutputBufferSize = Hardware->identity.vertexOutputBufferSize;
++ Identity->pixelPipes = Hardware->identity.pixelPipes;
++ Identity->instructionCount = Hardware->identity.instructionCount;
++ Identity->numConstants = Hardware->identity.numConstants;
++ Identity->bufferSize = Hardware->identity.bufferSize;
++ Identity->varyingsCount = Hardware->identity.varyingsCount;
++ Identity->superTileMode = Hardware->identity.superTileMode;
++ Identity->chip2DControl = Hardware->identity.chip2DControl;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SplitMemory
++**
++** Split a hardware specific memory address into a pool and offset.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT32 Address
++** Address in hardware specific format.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to a variable that will hold the pool type for the address.
++**
++** gctUINT32 * Offset
++** Pointer to a variable that will hold the offset for the address.
++*/
++gceSTATUS
++gckHARDWARE_SplitMemory(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Addres=0x%08x", Hardware, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Offset != gcvNULL);
++
++ if (Hardware->mmuVersion == 0)
++ {
++ /* Dispatch on memory type. */
++ switch ((((((gctUINT32) (Address)) >> (0 ? 31:31)) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) ))
++ {
++ case 0x0:
++ /* System memory. */
++ *Pool = gcvPOOL_SYSTEM;
++ break;
++
++ case 0x1:
++ /* Virtual memory. */
++ *Pool = gcvPOOL_VIRTUAL;
++ break;
++
++ default:
++ /* Invalid memory type. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Return offset of address. */
++ *Offset = (((((gctUINT32) (Address)) >> (0 ? 30:0)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1)))))) );
++ }
++ else
++ {
++ *Pool = gcvPOOL_SYSTEM;
++ *Offset = Address;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Pool=%d *Offset=0x%08x", *Pool, *Offset);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Execute
++**
++** Kickstart the hardware's command processor with an initialized command
++** buffer.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of command buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes for the prefetch unit (until after the first LINK).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Execute(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++#ifdef __QNXNTO__
++ IN gctPOINTER Physical,
++ IN gctBOOL PhysicalAddresses,
++#endif
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0, control;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Bytes=%lu",
++ Hardware, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++#ifdef __QNXNTO__
++ if (PhysicalAddresses && (Hardware->mmuVersion == 0))
++ {
++ /* Convert physical into hardware specific address. */
++ gcmkONERROR(
++ gckHARDWARE_ConvertPhysical(Hardware, Physical, &address));
++ }
++ else
++ {
++#endif
++ /* Convert logical into hardware specific address. */
++ gcmkONERROR(
++ gckHARDWARE_ConvertLogical(Hardware, Logical, &address));
++#ifdef __QNXNTO__
++ }
++#endif
++
++ /* Enable all events. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00014, ~0U));
++
++ /* Write address register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00654, address));
++
++ /* Build control register. */
++ control = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) ((Bytes + 7) >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ /* Set big endian */
++ if (Hardware->bigEndian)
++ {
++ control |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 21:20) - (0 ? 21:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? 21:20) - (0 ? 21:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20)));
++ }
++
++ /* Write control register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00658, control));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Started command buffer @ 0x%08x",
++ address);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_WaitLink
++**
++** Append a WAIT/LINK command sequence at the specified location in the command
++** queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** WAIT/LINK command sequence at or gcvNULL just to query the size of the
++** WAIT/LINK command sequence.
++**
++** gctUINT32 Offset
++** Offset into command buffer required for alignment.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the WAIT/LINK command
++** sequence. If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** by the WAIT/LINK command sequence. If 'Bytes' is gcvNULL, nothing will
++** be returned.
++**
++** gctUINT32 * WaitOffset
++** Pointer to a variable that will receive the offset of the WAIT command
++** from the specified logcial pointer.
++** If 'WaitOffset' is gcvNULL nothing will be returned.
++**
++** gctSIZE_T * WaitSize
++** Pointer to a variable that will receive the number of bytes used by
++** the WAIT command. If 'LinkSize' is gcvNULL nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_WaitLink(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctUINT32 * WaitOffset,
++ OUT gctSIZE_T * WaitSize
++ )
++{
++ static const gctUINT waitCount = 200;
++
++ gceSTATUS status;
++ gctUINT32 address;
++ gctUINT32_PTR logical;
++ gctSIZE_T bytes;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x *Bytes=%lu",
++ Hardware, Logical, Offset, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical != gcvNULL) || (Bytes != gcvNULL));
++
++ /* Compute number of bytes required. */
++#if gcd6000_SUPPORT
++ bytes = gcmALIGN(Offset + 96, 8) - Offset;
++#else
++ bytes = gcmALIGN(Offset + 16, 8) - Offset;
++#endif
++
++ /* Cast the input pointer. */
++ logical = (gctUINT32_PTR) Logical;
++
++ if (logical != gcvNULL)
++ {
++ /* Not enough space? */
++ if (*Bytes < bytes)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Convert logical into hardware specific address. */
++ gcmkONERROR(gckHARDWARE_ConvertLogical(Hardware, logical, &address));
++
++ /* Store the WAIT/LINK address. */
++ Hardware->lastWaitLink = address;
++
++ /* Append WAIT(count). */
++ logical[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (waitCount) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++#if gcd6000_SUPPORT
++ /* Send FE-PE sempahore token. */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Send FE-PE stall token. */
++ logical[4]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ logical[5]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /*************************************************************/
++ /* Enable chip ID 0. */
++ logical[6] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | (1 << 0);
++
++ /* Send semaphore from FE to ChipID 1. */
++ logical[8] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[9] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24)));
++
++ /* Send semaphore from FE to ChipID 1. */
++ logical[10] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ logical[11] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24)));
++
++ /*************************************************************/
++ /* Enable chip ID 1. */
++ logical[12] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | (1 << 1);
++
++ /* Send semaphore from FE to ChipID 1. */
++ logical[14] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[15] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24)));
++
++ /* Wait for semaphore from ChipID 0. */
++ logical[16] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ logical[17] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24)));
++
++ /*************************************************************/
++ /* Enable all chips. */
++ logical[18] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | (0xFFFF);
++
++ /* LoadState(AQFlush, 1), flush. */
++ logical[20]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[21]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Append LINK(2, address). */
++ logical[22]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[23] = address;
++#else
++ /* Append LINK(2, address). */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3] = address;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: WAIT %u", address, waitCount
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: LINK 0x%08x, #%lu",
++ address + 8, address, bytes
++ );
++#endif
++
++ if (WaitOffset != gcvNULL)
++ {
++ /* Return the offset pointer to WAIT command. */
++ *WaitOffset = 0;
++ }
++
++ if (WaitSize != gcvNULL)
++ {
++ /* Return number of bytes used by the WAIT command. */
++ *WaitSize = 8;
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the WAIT/LINK command
++ ** sequence. */
++ *Bytes = bytes;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *WaitOffset=0x%x *WaitSize=%lu",
++ gcmOPT_VALUE(Bytes), gcmOPT_VALUE(WaitOffset),
++ gcmOPT_VALUE(WaitSize));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_End
++**
++** Append an END command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** END command at or gcvNULL just to query the size of the END command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the END command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_End(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append END. */
++ logical[0] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: END", Logical);
++
++ /* Make sure the CPU writes out the data to memory. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, Logical));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Nop
++**
++** Append a NOP command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** NOP command at or gcvNULL just to query the size of the NOP command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the NOP command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the NOP command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Nop(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append NOP. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: NOP", Logical);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the NOP command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Wait
++**
++** Append a WAIT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** WAIT command at or gcvNULL just to query the size of the WAIT command.
++**
++** gctUINT32 Count
++** Number of cycles to wait.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the WAIT command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the NOP command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Wait(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Count,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gceSTATUS status;
++ gctUINT32_PTR logical;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Count=%u *Bytes=%lu",
++ Hardware, Logical, Count, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ /* Cast the input pointer. */
++ logical = (gctUINT32_PTR) Logical;
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append WAIT. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ {
++ gctUINT32 address;
++
++ /* Convert logical into hardware specific address. */
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ Hardware, logical, &address
++ ));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: WAIT %u", address, Count
++ );
++ }
++#endif
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the WAIT command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Event
++**
++** Append an EVENT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the EVENT command at or gcvNULL just to query the size of the EVENT
++** command.
++**
++** gctUINT8 Event
++** Event ID to program.
++**
++** gceKERNEL_WHERE FromWhere
++** Location of the pipe to send the event.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the EVENT command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the EVENT command. If 'Bytes' is gcvNULL, nothing will be
++** returned.
++*/
++gceSTATUS
++gckHARDWARE_Event(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT8 Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT size;
++ gctUINT32 destination = 0;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Event=%u FromWhere=%d *Bytes=%lu",
++ Hardware, Logical, Event, FromWhere, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++ gcmkVERIFY_ARGUMENT(Event < 32);
++
++ /* Determine the size of the command. */
++
++ size = (Hardware->extraEventStates && (FromWhere == gcvKERNEL_PIXEL))
++ ? gcmALIGN(8 + (1 + 5) * 4, 8) /* EVENT + 5 STATES */
++ : 8;
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < size)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ switch (FromWhere)
++ {
++ case gcvKERNEL_COMMAND:
++ /* From command processor. */
++ destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ break;
++
++ case gcvKERNEL_PIXEL:
++ /* From pixel engine. */
++ destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Append EVENT(Event, destiantion). */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1] = ((((gctUINT32) (destination)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (Event) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++
++ /* Make sure the event ID gets written out before GPU can access it. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical + 1));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ {
++ gctUINT32 phys;
++ gckOS_GetPhysicalAddress(Hardware->os, Logical, &phys);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: EVENT %d", phys, Event);
++ }
++#endif
++
++ /* Append the extra states. These are needed for the chips that do not
++ ** support back-to-back events due to the async interface. The extra
++ ** states add the necessary delay to ensure that event IDs do not
++ ** collide. */
++ if (size > 8)
++ {
++ logical[2] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0100) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++ logical[3] = 0;
++ logical[4] = 0;
++ logical[5] = 0;
++ logical[6] = 0;
++ logical[7] = 0;
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT command. */
++ *Bytes = size;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_PipeSelect
++**
++** Append a PIPESELECT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the PIPESELECT command at or gcvNULL just to query the size of the
++** PIPESELECT command.
++**
++** gcePIPE_SELECT Pipe
++** Pipe value to select.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the PIPESELECT command.
++** If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the PIPESELECT command. If 'Bytes' is gcvNULL, nothing will be
++** returned.
++*/
++gceSTATUS
++gckHARDWARE_PipeSelect(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gcePIPE_SELECT Pipe,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Pipe=%d *Bytes=%lu",
++ Hardware, Logical, Pipe, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ /* Append a PipeSelect. */
++ if (Logical != gcvNULL)
++ {
++ gctUINT32 flush, stall;
++
++ if (*Bytes < 32)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ flush = (Pipe == gcvPIPE_2D)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++
++ stall = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LoadState(AQFlush, 1), flush. */
++ logical[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1]
++ = flush;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical, flush);
++
++ /* LoadState(AQSempahore, 1), stall. */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3]
++ = stall;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: SEMAPHORE 0x%x", logical + 2, stall);
++
++ /* Stall, stall. */
++ logical[4] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ logical[5] = stall;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: STALL 0x%x", logical + 4, stall);
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ logical[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[7] = (Pipe == gcvPIPE_2D)
++ ? 0x1
++ : 0x0;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: PIPE %d", logical + 6, Pipe);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the PIPESELECT command. */
++ *Bytes = 32;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Link
++**
++** Append a LINK command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the LINK command at or gcvNULL just to query the size of the LINK
++** command.
++**
++** gctPOINTER FetchAddress
++** Logical address of destination of LINK.
++**
++** gctSIZE_T FetchSize
++** Number of bytes in destination of LINK.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the LINK command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the LINK command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Link(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctPOINTER FetchAddress,
++ IN gctSIZE_T FetchSize,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gctUINT32 address;
++ gctUINT32 link;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x FetchAddress=0x%x FetchSize=%lu "
++ "*Bytes=%lu",
++ Hardware, Logical, FetchAddress, FetchSize,
++ gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Convert logical address to hardware address. */
++ gcmkONERROR(
++ gckHARDWARE_ConvertLogical(Hardware, FetchAddress, &address));
++
++ gcmkONERROR(
++ gckOS_WriteMemory(Hardware->os, logical + 1, address));
++
++ /* Make sure the address got written before the LINK command. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical + 1));
++
++ /* Compute number of 64-byte aligned bytes to fetch. */
++ bytes = gcmALIGN(address + FetchSize, 8) - address;
++
++ /* Append LINK(bytes / 8), FetchAddress. */
++ link = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ gcmkONERROR(
++ gckOS_WriteMemory(Hardware->os, logical, link));
++
++ /* Memory barrier. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical));
++
++#if gcdLINK_QUEUE_SIZE && gcdVIRTUAL_COMMAND_BUFFER
++ if (address >= 0x80000000)
++ {
++ gckLINKQUEUE_Enqueue(&Hardware->linkQueue, address, address + bytes);
++ }
++#endif
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the LINK command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_UpdateQueueTail
++**
++** Update the tail of the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the start of the command queue.
++**
++** gctUINT32 Offset
++** Offset into the command queue of the tail (last command).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_UpdateQueueTail(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x",
++ Hardware, Logical, Offset);
++
++ /* Verify the hardware. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Force a barrier. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, Logical));
++
++ /* Notify gckKERNEL object of change. */
++ gcmkONERROR(
++ gckKERNEL_Notify(Hardware->kernel,
++ gcvNOTIFY_COMMAND_QUEUE,
++ gcvFALSE));
++
++ if (status == gcvSTATUS_CHIP_NOT_READY)
++ {
++ gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_ConvertLogical
++**
++** Convert a logical system address into a hardware specific address.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address to convert.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ConvertLogical(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gceSTATUS status;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x", Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ status = gckKERNEL_GetGPUAddress(Hardware->kernel, Logical, Address);
++
++ if (status == gcvSTATUS_INVALID_ADDRESS)
++#endif
++ {
++ /* Convert logical address into a physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, Logical, &address));
++
++ /* For old MMU, get GPU address according to baseAddress. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Subtract base address to get a GPU address. */
++ gcmkASSERT(address >= baseAddress);
++ address -= baseAddress;
++ }
++
++ /* Return hardware specific address. */
++ *Address = (Hardware->mmuVersion == 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (address) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)))
++ : address;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_ConvertPhysical
++**
++** Convert a physical address into a hardware specific address.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPHYS_ADDR Physical
++** Physical address to convert.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ConvertPhysical(
++ IN gckHARDWARE Hardware,
++ IN gctPHYS_ADDR Physical,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Hardware=0x%x Physical=0x%x", Hardware, Physical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ address = gcmPTR2INT(Physical);
++
++ /* For old MMU, get GPU address according to baseAddress. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkVERIFY_OK(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Subtract base address to get a GPU address. */
++ gcmkASSERT(address >= baseAddress);
++ address -= baseAddress;
++ }
++
++ /* Return hardware specific address. */
++ *Address = (Hardware->mmuVersion == 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (address) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)))
++ : address;
++
++ /* Return the status. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Interrupt
++**
++** Process an interrupt.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL InterruptValid
++** If gcvTRUE, this function will read the interrupt acknowledge
++** register, stores the data, and return whether or not the interrupt
++** is ours or not. If gcvFALSE, this functions will read the interrupt
++** acknowledge register and combine it with any stored value to handle
++** the event notifications.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Interrupt(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL InterruptValid
++ )
++{
++ gckEVENT eventObj;
++ gctUINT32 data;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x InterruptValid=%d", Hardware, InterruptValid);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Extract gckEVENT object. */
++ eventObj = Hardware->kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObj, gcvOBJ_EVENT);
++
++ if (InterruptValid)
++ {
++ /* Read AQIntrAcknowledge register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00010,
++ &data));
++
++ if (data == 0)
++ {
++ /* Not our interrupt. */
++ status = gcvSTATUS_NOT_OUR_INTERRUPT;
++ }
++ else
++ {
++ /* Inform gckEVENT of the interrupt. */
++ status = gckEVENT_Interrupt(eventObj, data);
++ }
++ }
++ else
++ {
++ /* Handle events. */
++ status = gckEVENT_Notify(eventObj, 0);
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryCommandBuffer
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Alignment
++** Pointer to a variable receiving the alignment for each command.
++**
++** gctSIZE_T * ReservedHead
++** Pointer to a variable receiving the number of reserved bytes at the
++** head of each command buffer.
++**
++** gctSIZE_T * ReservedTail
++** Pointer to a variable receiving the number of bytes reserved at the
++** tail of each command buffer.
++*/
++gceSTATUS
++gckHARDWARE_QueryCommandBuffer(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * Alignment,
++ OUT gctSIZE_T * ReservedHead,
++ OUT gctSIZE_T * ReservedTail
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Alignment != gcvNULL)
++ {
++ /* Align every 8 bytes. */
++ *Alignment = 8;
++ }
++
++ if (ReservedHead != gcvNULL)
++ {
++ /* Reserve space for SelectPipe(). */
++ *ReservedHead = 32;
++ }
++
++ if (ReservedTail != gcvNULL)
++ {
++ /* Reserve space for Link(). */
++ *ReservedTail = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Alignment=%lu *ReservedHead=%lu *ReservedTail=%lu",
++ gcmOPT_VALUE(Alignment), gcmOPT_VALUE(ReservedHead),
++ gcmOPT_VALUE(ReservedTail));
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QuerySystemMemory
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * SystemSize
++** Pointer to a variable that receives the maximum size of the system
++** memory.
++**
++** gctUINT32 * SystemBaseAddress
++** Poinetr to a variable that receives the base address for system
++** memory.
++*/
++gceSTATUS
++gckHARDWARE_QuerySystemMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (SystemSize != gcvNULL)
++ {
++ /* Maximum system memory can be 2GB. */
++ *SystemSize = 1U << 31;
++ }
++
++ if (SystemBaseAddress != gcvNULL)
++ {
++ /* Set system memory base address. */
++ *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*SystemSize=%lu *SystemBaseAddress=%lu",
++ gcmOPT_VALUE(SystemSize), gcmOPT_VALUE(SystemBaseAddress));
++ return gcvSTATUS_OK;
++}
++
++#ifndef VIVANTE_NO_3D
++/*******************************************************************************
++**
++** gckHARDWARE_QueryShaderCaps
++**
++** Query the shader capabilities.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT * VertexUniforms
++** Pointer to a variable receiving the number of uniforms in the vertex
++** shader.
++**
++** gctUINT * FragmentUniforms
++** Pointer to a variable receiving the number of uniforms in the
++** fragment shader.
++**
++** gctUINT * Varyings
++** Pointer to a variable receiving the maimum number of varyings.
++*/
++gceSTATUS
++gckHARDWARE_QueryShaderCaps(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctUINT * Varyings
++ )
++{
++ gctUINT32 vsConstMax;
++ gctUINT32 psConstMax;
++
++ gcmkHEADER_ARG("Hardware=0x%x VertexUniforms=0x%x "
++ "FragmentUniforms=0x%x Varyings=0x%x",
++ Hardware, VertexUniforms,
++ FragmentUniforms, Varyings);
++
++ if ((Hardware->identity.chipModel == gcv2000)
++ && (Hardware->identity.chipRevision == 0x5118))
++ {
++ vsConstMax = 256;
++ psConstMax = 64;
++ }
++ else if (Hardware->identity.numConstants > 256)
++ {
++ vsConstMax = 256;
++ psConstMax = 256;
++ }
++ else if (Hardware->identity.numConstants == 256)
++ {
++ vsConstMax = 256;
++ psConstMax = 256;
++ }
++ else
++ {
++ vsConstMax = 168;
++ psConstMax = 64;
++ }
++
++ if (VertexUniforms != gcvNULL)
++ {
++ *VertexUniforms = vsConstMax;
++ }
++
++ if (FragmentUniforms != gcvNULL)
++ {
++ *FragmentUniforms = psConstMax;
++ }
++
++ if (Varyings != gcvNULL)
++ {
++ /* Return the shader varyings count. */
++ *Varyings = Hardware->identity.varyingsCount;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetMMU
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the page table.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_SetMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x", Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Convert the logical address into an hardware address. */
++ gcmkONERROR(
++ gckHARDWARE_ConvertLogical(Hardware, Logical, &address));
++
++ /* Also get the base address - we need a real physical address. */
++ gcmkONERROR(
++ gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Setting page table to 0x%08X",
++ address + baseAddress);
++
++ /* Write the AQMemoryFePageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00400,
++ address + baseAddress));
++
++ /* Write the AQMemoryRaPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00410,
++ address + baseAddress));
++
++ /* Write the AQMemoryTxPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00404,
++ address + baseAddress));
++
++
++ /* Write the AQMemoryPePageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00408,
++ address + baseAddress));
++
++ /* Write the AQMemoryPezPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0040C,
++ address + baseAddress));
++
++ /* Return the status. */
++ gcmkFOOTER_NO();
++ return status;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_FlushMMU
++**
++** Flush the page table.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_FlushMMU(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command;
++ gctUINT32_PTR buffer;
++ gctSIZE_T bufferSize;
++ gctBOOL commitEntered = gcvFALSE;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 flushSize;
++ gctUINT32 count;
++ gctUINT32 physical;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Verify the gckCOMMAND object pointer. */
++ command = Hardware->kernel->command;
++
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvFALSE));
++ commitEntered = gcvTRUE;
++
++ /* Flush the memory controller. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, 8, &pointer, &bufferSize
++ ));
++
++ buffer = (gctUINT32_PTR) pointer;
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ gcmkONERROR(gckCOMMAND_Execute(command, 8));
++ }
++ else
++ {
++ flushSize = 16 * 4;
++
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, flushSize, &pointer, &bufferSize
++ ));
++
++ buffer = (gctUINT32_PTR) pointer;
++
++ count = (bufferSize - flushSize + 7) >> 3;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(command->os, buffer, &physical));
++
++ /* Flush cache. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Arm the PE-FE Semaphore. */
++ buffer[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[3]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ buffer[4]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ buffer[5]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ buffer[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[7]
++ = physical + 8 * gcmSIZEOF(gctUINT32);
++
++ /* Flush MMU cache. */
++ buffer[8]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[9]
++ = (((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) & ((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))));
++
++ /* Arm the PE-FE Semaphore. */
++ buffer[10]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[11]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ buffer[12]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ buffer[13]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ buffer[14]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[15]
++ = physical + flushSize;
++
++ gcmkONERROR(gckCOMMAND_Execute(command, flushSize));
++ }
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvFALSE));
++ commitEntered = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(Hardware->kernel->command,
++ gcvFALSE));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetMMUv2
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_SetMMUv2(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Enable,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++ gctUINT32 config, address;
++ gckCOMMAND command;
++ gctUINT32_PTR buffer;
++ gctSIZE_T bufferSize;
++ gctBOOL commitEntered = gcvFALSE;
++ gctPOINTER pointer = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL config2D;
++ gctSIZE_T configSize;
++
++ gcmkHEADER_ARG("Hardware=0x%x Enable=%d", Hardware, Enable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ config2D = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_3D)
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_2D);
++
++ configSize = 4 * 4;
++
++ if (config2D)
++ {
++ configSize +=
++ /* Pipe Select. */
++ 4 * 4
++ /* Configure MMU States. */
++ + 4 * 4;
++ }
++
++ /* Convert logical address into physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, MtlbAddress, &config));
++
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, SafeAddress, &address));
++
++ if (address & 0x3F)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ switch (Mode)
++ {
++ case gcvMMU_MODE_1K:
++ if (config & 0x3FF)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ break;
++
++ case gcvMMU_MODE_4K:
++ if (config & 0xFFF)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Verify the gckCOMMAND object pointer. */
++ command = Hardware->kernel->command;
++
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, FromPower));
++ commitEntered = gcvTRUE;
++
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, configSize, &pointer, &bufferSize
++ ));
++
++ buffer = pointer;
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1] = config;
++
++ buffer[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[3] = address;
++
++ if (config2D)
++ {
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ buffer[4]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[5] = 0x1;
++
++ buffer[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[7] = config;
++
++ buffer[8]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[9] = address;
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ buffer[10]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[11] = 0x0;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Setup MMU: config=%08x, Safe Address=%08x\n.", config, address);
++
++ gcmkONERROR(gckCOMMAND_Execute(command, configSize));
++
++ if (FromPower == gcvFALSE)
++ {
++ /* Acquire global semaphore to suspend power management until MMU
++ ** is enabled. And acquired it before gckCOMMAND_ExitCommit to
++ ** make sure GPU keeps ON. */
++ gcmkONERROR(
++ gckOS_AcquireSemaphore(Hardware->os, Hardware->globalSemaphore));
++
++ acquired = gcvTRUE;
++ }
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, FromPower));
++ commitEntered = gcvFALSE;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "call gckCOMMAND_Stall to make sure the config is done.\n ");
++
++ gcmkONERROR(gckCOMMAND_Stall(command, FromPower));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Enable MMU through GCREG_MMU_CONTROL.");
++
++ /* Enable MMU. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0018C,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Enable) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))));
++
++ if (FromPower == gcvFALSE)
++ {
++ /* Relase global semaphore. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseSemaphore(Hardware->os, Hardware->globalSemaphore));
++
++ acquired = gcvFALSE;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "call gckCOMMAND_Stall to check MMU available.\n");
++
++ gcmkONERROR(gckCOMMAND_Stall(command, FromPower));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "The MMU is available.\n");
++
++ /* Return the status. */
++ gcmkFOOTER_NO();
++ return status;
++
++OnError:
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(Hardware->kernel->command,
++ FromPower));
++ }
++
++ if (acquired)
++ {
++ gcmkVERIFY_OK(
++ gckOS_ReleaseSemaphore(Hardware->os, Hardware->globalSemaphore));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_BuildVirtualAddress
++**
++** Build a virtual address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctUINT32 Index
++** Index into page table.
++**
++** gctUINT32 Offset
++** Offset into page.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable receiving te hardware address.
++*/
++gceSTATUS
++gckHARDWARE_BuildVirtualAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Index=%u Offset=%u", Hardware, Index, Offset);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Build virtual address. */
++ *Address = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (Offset | (Index << 12)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_GetIdle(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Wait,
++ OUT gctUINT32 * Data
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle = 0;
++ gctINT retry, poll, pollCount;
++
++ gcmkHEADER_ARG("Hardware=0x%x Wait=%d", Hardware, Wait);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++
++ /* If we have to wait, try 100 polls per millisecond. */
++ pollCount = Wait ? 100 : 1;
++
++ /* At most, try for 1 second. */
++ for (retry = 0; retry < 1000; ++retry)
++ {
++ /* If we have to wait, try 100 polls per millisecond. */
++ for (poll = pollCount; poll > 0; --poll)
++ {
++ /* Read register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle));
++
++ /* See if we have to wait for FE idle. */
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ /* FE is idle. */
++ break;
++ }
++ }
++
++ /* Check if we need to wait for FE and FE is busy. */
++ if (Wait && !(((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ /* Wait a little. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "%s: Waiting for idle: 0x%08X",
++ __FUNCTION__, idle);
++
++ gcmkVERIFY_OK(gckOS_Delay(Hardware->os, 1));
++ }
++ else
++ {
++ break;
++ }
++ }
++
++ /* Return idle to caller. */
++ *Data = idle;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/* Flush the caches. */
++gceSTATUS
++gckHARDWARE_Flush(
++ IN gckHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32 pipe;
++ gctUINT32 flush = 0;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++ gctBOOL fcFlushStall;
++ gctUINT32 reserveBytes = 8;
++
++ gcmkHEADER_ARG("Hardware=0x%x Flush=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Flush, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get current pipe. */
++ pipe = Hardware->kernel->command->pipeSelect;
++
++ fcFlushStall
++ = ((((gctUINT32) (Hardware->identity.chipMinorFeatures1)) >> (0 ? 31:31) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))))
++ && (Flush == gcvFLUSH_ALL)
++ ;
++
++ if (fcFlushStall)
++ {
++ reserveBytes += 8;
++ }
++
++ /* Flush 3D color cache. */
++ if ((Flush & gcvFLUSH_COLOR) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ }
++
++ /* Flush 3D depth cache. */
++ if ((Flush & gcvFLUSH_DEPTH) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ /* Flush 3D texture cache. */
++ if ((Flush & gcvFLUSH_TEXTURE) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ }
++
++ /* Flush 2D cache. */
++ if ((Flush & gcvFLUSH_2D) && (pipe == 0x1))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++ }
++
++ /* See if there is a valid flush. */
++ if (flush == 0)
++ {
++ if (Bytes != gcvNULL)
++ {
++ /* No bytes required. */
++ *Bytes = 0;
++ }
++ }
++
++ else
++ {
++ /* Copy to command queue. */
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < reserveBytes)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append LOAD_STATE to AQFlush. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1] = flush;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical, flush);
++
++ if (fcFlushStall)
++ {
++ logical[2] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[3] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical + 3, logical[3]);
++ }
++
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* bytes required. */
++ *Bytes = reserveBytes;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetFastClear(
++ IN gckHARDWARE Hardware,
++ IN gctINT Enable,
++ IN gctINT Compression
++ )
++{
++#ifndef VIVANTE_NO_3D
++ gctUINT32 debug;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Enable=%d Compression=%d",
++ Hardware, Enable, Compression);
++
++ /* Only process if fast clear is available. */
++ if ((((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ if (Enable == -1)
++ {
++ /* Determine automatic value for fast clear. */
++ Enable = ((Hardware->identity.chipModel != gcv500)
++ || (Hardware->identity.chipRevision >= 3)
++ ) ? 1 : 0;
++ }
++
++ if (Compression == -1)
++ {
++ /* Determine automatic value for compression. */
++ Compression = Enable
++ & (((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) );
++ }
++
++ /* Read AQMemoryDebug register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &debug));
++
++ /* Set fast clear bypass. */
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++
++ if (
++ ((((gctUINT32) (Hardware->identity.chipMinorFeatures2)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) ||
++ (Hardware->identity.chipModel >= gcv4000))
++ {
++ /* Set compression bypass. */
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21))) | (((gctUINT32) ((gctUINT32) (Compression == 0) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21)));
++ }
++
++ /* Write back AQMemoryDebug register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ debug));
++
++ /* Store fast clear and comprersison flags. */
++ Hardware->allowFastClear = Enable;
++ Hardware->allowCompression = Compression;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "FastClear=%d Compression=%d", Enable, Compression);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
++typedef enum
++{
++ gcvPOWER_FLAG_INITIALIZE = 1 << 0,
++ gcvPOWER_FLAG_STALL = 1 << 1,
++ gcvPOWER_FLAG_STOP = 1 << 2,
++ gcvPOWER_FLAG_START = 1 << 3,
++ gcvPOWER_FLAG_RELEASE = 1 << 4,
++ gcvPOWER_FLAG_DELAY = 1 << 5,
++ gcvPOWER_FLAG_SAVE = 1 << 6,
++ gcvPOWER_FLAG_ACQUIRE = 1 << 7,
++ gcvPOWER_FLAG_POWER_OFF = 1 << 8,
++ gcvPOWER_FLAG_CLOCK_OFF = 1 << 9,
++ gcvPOWER_FLAG_CLOCK_ON = 1 << 10,
++}
++gcePOWER_FLAGS;
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++static gctCONST_STRING
++_PowerEnum(gceCHIPPOWERSTATE State)
++{
++ const gctCONST_STRING states[] =
++ {
++ gcmSTRING(gcvPOWER_ON),
++ gcmSTRING(gcvPOWER_OFF),
++ gcmSTRING(gcvPOWER_IDLE),
++ gcmSTRING(gcvPOWER_SUSPEND),
++ gcmSTRING(gcvPOWER_SUSPEND_ATPOWERON),
++ gcmSTRING(gcvPOWER_OFF_ATPOWERON),
++ gcmSTRING(gcvPOWER_IDLE_BROADCAST),
++ gcmSTRING(gcvPOWER_SUSPEND_BROADCAST),
++ gcmSTRING(gcvPOWER_OFF_BROADCAST),
++ gcmSTRING(gcvPOWER_OFF_RECOVERY),
++ gcmSTRING(gcvPOWER_ON_AUTO)
++ };
++
++ if ((State >= gcvPOWER_ON) && (State <= gcvPOWER_ON_AUTO))
++ {
++ return states[State - gcvPOWER_ON];
++ }
++
++ return "unknown";
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementState
++**
++** Set GPU to a specified power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE State
++** Power State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagementState(
++ IN gckHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command = gcvNULL;
++ gckOS os;
++ gctUINT flag, clock;
++ gctPOINTER buffer;
++ gctSIZE_T bytes, requested;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++ gctBOOL stall = gcvTRUE;
++ gctBOOL broadcast = gcvFALSE;
++#if gcdPOWEROFF_TIMEOUT
++ gctBOOL timeout = gcvFALSE;
++ gctBOOL isAfter = gcvFALSE;
++ gctUINT32 currentTime;
++#endif
++ gctUINT32 process, thread;
++ gctBOOL commitEntered = gcvFALSE;
++ gctBOOL commandStarted = gcvFALSE;
++ gctBOOL isrStarted = gcvFALSE;
++
++#if gcdENABLE_PROFILING
++ gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime,
++ initTime, offTime, startTime, totalTime;
++#endif
++ gctBOOL global = gcvFALSE;
++ gctBOOL globalAcquired = gcvFALSE;
++ gctBOOL configMmu = gcvFALSE;
++
++ /* State transition flags. */
++ static const gctUINT flags[4][4] =
++ {
++ /* gcvPOWER_ON */
++ { /* ON */ 0,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_OFF */
++ { /* ON */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* OFF */ 0,
++ /* IDLE */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY,
++ /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_IDLE */
++ { /* ON */ gcvPOWER_FLAG_RELEASE,
++ /* OFF */ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ 0,
++ /* SUSPEND */ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_SUSPEND */
++ { /* ON */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* OFF */ gcvPOWER_FLAG_SAVE |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* SUSPEND */ 0,
++ },
++ };
++
++ /* Clocks. */
++ static const gctUINT clocks[4] =
++ {
++ /* gcvPOWER_ON */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (64) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_OFF */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_IDLE */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_SUSPEND */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++ };
++
++ gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State);
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Switching to power state %d(%s)",
++ State, _PowerEnum(State));
++#endif
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get the gckOS object pointer. */
++ os = Hardware->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Get the gckCOMMAND object pointer. */
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ if (Hardware->powerManagement == gcvFALSE)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Start profiler. */
++ gcmkPROFILE_INIT(freq, time);
++
++ /* Convert the broadcast power state. */
++ switch (State)
++ {
++ case gcvPOWER_SUSPEND_ATPOWERON:
++ /* Convert to SUSPEND and don't wait for STALL. */
++ State = gcvPOWER_SUSPEND;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_OFF_ATPOWERON:
++ /* Convert to OFF and don't wait for STALL. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_IDLE_BROADCAST:
++ /* Convert to IDLE and note we are inside broadcast. */
++ State = gcvPOWER_IDLE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_SUSPEND_BROADCAST:
++ /* Convert to SUSPEND and note we are inside broadcast. */
++ State = gcvPOWER_SUSPEND;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_BROADCAST:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_RECOVERY:
++ /* Convert to OFF and note we are inside recovery. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_ON_AUTO:
++ /* Convert to ON and note we are inside recovery. */
++ State = gcvPOWER_ON;
++ break;
++
++ case gcvPOWER_ON:
++ case gcvPOWER_IDLE:
++ case gcvPOWER_SUSPEND:
++ case gcvPOWER_OFF:
++ /* Mark as global power management. */
++ global = gcvTRUE;
++ break;
++
++#if gcdPOWEROFF_TIMEOUT
++ case gcvPOWER_OFF_TIMEOUT:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ /* Check time out */
++ timeout = gcvTRUE;
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++ /* Get current process and thread IDs. */
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ /* Before we grab locks see if this is actually a needed change */
++ if (State == Hardware->chipPowerState)
++ return gcvSTATUS_OK;
++
++ if (broadcast)
++ {
++ /* Try to acquire the power mutex. */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Check if we already own this mutex. */
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread)
++ )
++ {
++ /* Bail out on recursive power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ else if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ /* Called from IST,
++ ** so waiting here will cause deadlock,
++ ** if lock holder call gckCOMMAND_Stall() */
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++#if gcdPOWEROFF_TIMEOUT
++ else if(State == gcvPOWER_OFF && timeout == gcvTRUE)
++ {
++ /*
++ ** try to aqcuire the mutex with more milliseconds,
++ ** flush_delayed_work should be running with timeout,
++ ** so waiting here will cause deadlock */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, gcdPOWEROFF_TIMEOUT);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gckOS_Print("GPU Timer deadlock, exit by timeout!!!!\n");
++
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ }
++#endif
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ }
++ }
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE));
++ }
++
++ /* Get time until mtuex acquired. */
++ gcmkPROFILE_QUERY(time, mutexTime);
++
++ Hardware->powerProcess = process;
++ Hardware->powerThread = thread;
++ mutexAcquired = gcvTRUE;
++
++ /* Grab control flags and clock. */
++ flag = flags[Hardware->chipPowerState][State];
++ clock = clocks[State];
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ if (State == gcvPOWER_ON)
++ {
++ clock = ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (Hardware->powerOnFscaleVal) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2)));
++ }
++#endif
++
++ if (State == gcvPOWER_SUSPEND && Hardware->chipPowerState == gcvPOWER_OFF && broadcast)
++ {
++#if gcdPOWER_SUSNPEND_WHEN_IDLE
++ /* Do nothing */
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++#else
++ /* Clock should be on when switch power from off to suspend */
++ clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) ;
++#endif
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ if (timeout)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ gcmkONERROR(
++ gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter));
++
++ /* powerOffTime is pushed forward, give up.*/
++ if (isAfter
++ /* Expect a transition start from IDLE or SUSPEND. */
++ || (Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_OFF)
++ )
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Power Off GPU[%d] at %u [supposed to be at %u]",
++ Hardware->core, currentTime, Hardware->powerOffTime);
++ }
++
++ if (State == gcvPOWER_ON || State == gcvPOWER_OFF)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer");
++
++ /* Cancel running timer when GPU enters ON or OFF. */
++ gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer));
++ }
++#endif
++
++ if (flag == 0)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* If this is an internal power management, we have to check if we can grab
++ ** the global power semaphore. If we cannot, we have to wait until the
++ ** external world changes power management. */
++ if (!global)
++ {
++ /* Try to acquire the global semaphore. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ /* Called from thread routine which should NEVER sleep.*/
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++
++ /* Release the power mutex. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Releasing the power mutex.");
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ /* Wait for the semaphore. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Waiting for global semaphore.");
++ gcmkONERROR(gckOS_AcquireSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvTRUE;
++
++ /* Acquire the power mutex. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Reacquiring the power mutex.");
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ mutexAcquired = gcvTRUE;
++
++ /* chipPowerState may be changed by external world during the time
++ ** we give up powerMutex, so updating flag now is necessary. */
++ flag = flags[Hardware->chipPowerState][State];
++
++ if (flag == 0)
++ {
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ /* Error. */
++ gcmkONERROR(status);
++ }
++
++ /* Release the global semaphore again. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++ }
++ else
++ {
++ if (State == gcvPOWER_OFF || State == gcvPOWER_SUSPEND || State == gcvPOWER_IDLE)
++ {
++ /* Acquire the global semaphore if it has not been acquired. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status == gcvSTATUS_OK)
++ {
++ globalAcquired = gcvTRUE;
++ }
++ else if (status != gcvSTATUS_TIMEOUT)
++ {
++ /* Other errors. */
++ gcmkONERROR(status);
++ }
++ /* Ignore gcvSTATUS_TIMEOUT and leave globalAcquired as gcvFALSE.
++ ** gcvSTATUS_TIMEOUT means global semaphore has already
++ ** been acquired before this operation, so even if we fail,
++ ** we should not release it in our error handling. It should be
++ ** released by the next successful global gcvPOWER_ON. */
++ }
++
++ /* Global power management can't be aborted, so sync with
++ ** proceeding last commit. */
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++
++ /* avoid acquiring again. */
++ flag &= ~gcvPOWER_FLAG_ACQUIRE;
++ }
++ }
++
++ if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON))
++ {
++ /* Turn on the power. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE));
++
++ /* Mark clock and power as enabled. */
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++
++ for (;;)
++ {
++ /* Check if GPU is present and awake. */
++ status = _IsGPUPresent(Hardware);
++
++ /* Check if the GPU is not responding. */
++ if (status == gcvSTATUS_GPU_NOT_RESPONDING)
++ {
++ /* Turn off the power and clock. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvFALSE, gcvFALSE));
++
++ Hardware->clockState = gcvFALSE;
++ Hardware->powerState = gcvFALSE;
++
++ /* Wait a little. */
++ gckOS_Delay(os, 1);
++
++ /* Turn on the power and clock. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE));
++
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++
++ /* We need to initialize the hardware and start the command
++ * processor. */
++ flag |= gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_START;
++ }
++ else
++ {
++ /* Test for error. */
++ gcmkONERROR(status);
++
++ /* Break out of loop. */
++ break;
++ }
++ }
++ }
++
++ /* Get time until powered on. */
++ gcmkPROFILE_QUERY(time, onTime);
++
++ if ((flag & gcvPOWER_FLAG_STALL) && stall)
++ {
++ gctBOOL idle;
++ gctINT32 atomValue;
++
++ /* For global operation, all pending commits have already been
++ ** blocked by globalSemaphore or powerSemaphore.*/
++ if (!global)
++ {
++ /* Check commit atom. */
++ gcmkONERROR(gckOS_AtomGet(os, command->atomCommit, &atomValue));
++
++ if (atomValue > 0)
++ {
++ /* Commits are pending - abort power management. */
++ status = broadcast ? gcvSTATUS_CHIP_NOT_READY
++ : gcvSTATUS_MORE_DATA;
++ goto OnError;
++ }
++ }
++
++ if (broadcast)
++ {
++ /* Check for idle. */
++ gcmkONERROR(gckHARDWARE_QueryIdle(Hardware, &idle));
++
++ if (!idle)
++ {
++ status = gcvSTATUS_CHIP_NOT_READY;
++ goto OnError;
++ }
++ }
++
++ else
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvTRUE));
++ commitEntered = gcvTRUE;
++
++ /* Get the size of the flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(Hardware,
++ gcvFLUSH_ALL,
++ gcvNULL,
++ &requested));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(command,
++ requested,
++ &buffer,
++ &bytes));
++
++ /* Append a flush. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ Hardware, gcvFLUSH_ALL, buffer, &bytes
++ ));
++
++ /* Execute the command queue. */
++ gcmkONERROR(gckCOMMAND_Execute(command, requested));
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvTRUE));
++ commitEntered = gcvFALSE;
++
++ /* Wait to finish all commands. */
++ gcmkONERROR(gckCOMMAND_Stall(command, gcvTRUE));
++ }
++ }
++
++ /* Get time until stalled. */
++ gcmkPROFILE_QUERY(time, stallTime);
++
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++ }
++
++ if (flag & gcvPOWER_FLAG_STOP)
++ {
++ /* Stop the command parser. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvFALSE));
++
++ /* Stop the Isr. */
++ if (Hardware->stopIsr)
++ {
++ gcmkONERROR(Hardware->stopIsr(Hardware->isrContext, Hardware->core));
++ }
++ }
++
++ /* Flush Cache before Power Off. */
++ if (flag & gcvPOWER_FLAG_POWER_OFF)
++ {
++ if (Hardware->clockState == gcvFALSE)
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ Hardware->core,
++ gcvTRUE,
++ gcvTRUE));
++
++ Hardware->clockState = gcvTRUE;
++
++ if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE)
++ {
++ /* Write the clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ clocks[0]));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clocks[0])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++ }
++ }
++
++ gcmkONERROR(gckCOMMAND_Start(command));
++
++ gcmkONERROR(_FlushCache(Hardware, command));
++
++ gckOS_Delay(gcvNULL, 1);
++
++ /* Stop the command parser. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvFALSE));
++
++ flag |= gcvPOWER_FLAG_CLOCK_OFF;
++ }
++
++ /* Get time until stopped. */
++ gcmkPROFILE_QUERY(time, stopTime);
++
++ /* Only process this when hardware is enabled. */
++ if (Hardware->clockState && Hardware->powerState
++ /* Don't touch clock control if dynamic frequency scaling is available. */
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE
++ )
++ {
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ if (Hardware->identity.chipModel == gcv4000
++ && Hardware->identity.chipRevision == 0x5208)
++ {
++ clock &= ~2U;
++ }
++ }
++
++ /* Write the clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++ }
++
++ if (flag & gcvPOWER_FLAG_DELAY)
++ {
++ /* Wait for the specified amount of time to settle coming back from
++ ** power-off or suspend state. */
++ gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY));
++ }
++
++ /* Get time until delayed. */
++ gcmkPROFILE_QUERY(time, delayTime);
++
++ if (flag & gcvPOWER_FLAG_INITIALIZE)
++ {
++ /* Initialize hardware. */
++ gcmkONERROR(gckHARDWARE_InitializeHardware(Hardware));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(Hardware,
++ Hardware->allowFastClear,
++ Hardware->allowCompression));
++
++ /* Force the command queue to reload the next context. */
++ command->currContext = gcvNULL;
++
++ /* Need to config mmu after command start. */
++ configMmu = gcvTRUE;
++ }
++
++ /* Get time until initialized. */
++ gcmkPROFILE_QUERY(time, initTime);
++
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ Hardware->core,
++ (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE,
++ (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE));
++
++ /* Save current hardware power and clock states. */
++ Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE;
++ Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE;
++ }
++
++ /* Get time until off. */
++ gcmkPROFILE_QUERY(time, offTime);
++
++ if (flag & gcvPOWER_FLAG_START)
++ {
++ /* Start the command processor. */
++ gcmkONERROR(gckCOMMAND_Start(command));
++ commandStarted = gcvTRUE;
++
++ if (Hardware->startIsr)
++ {
++ /* Start the Isr. */
++ gcmkONERROR(Hardware->startIsr(Hardware->isrContext, Hardware->core));
++ isrStarted = gcvTRUE;
++ }
++
++ /* Set NEW MMU. */
++ if (Hardware->mmuVersion != 0 && configMmu)
++ {
++ gcmkONERROR(
++ gckHARDWARE_SetMMUv2(
++ Hardware,
++ gcvTRUE,
++ Hardware->kernel->mmu->mtlbLogical,
++ gcvMMU_MODE_4K,
++ (gctUINT8_PTR)Hardware->kernel->mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
++ gcvTRUE
++ ));
++ }
++ }
++
++ /* Get time until started. */
++ gcmkPROFILE_QUERY(time, startTime);
++
++ if (flag & gcvPOWER_FLAG_RELEASE)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore));
++ acquired = gcvFALSE;
++
++ if (global)
++ {
++ /* Verify global semaphore has been acquired already before
++ ** we release it.
++ ** If it was acquired, gckOS_TryAcquireSemaphore will return
++ ** gcvSTATUS_TIMEOUT and we release it. Otherwise, global
++ ** semaphore will be acquried now, but it still is released
++ ** immediately. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status != gcvSTATUS_TIMEOUT)
++ {
++ gcmkONERROR(status);
++ }
++
++ /* Release the global semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++ }
++ }
++
++ /* Save the new power state. */
++ Hardware->chipPowerState = State;
++
++#if gcdDVFS
++ if (State == gcvPOWER_ON && Hardware->kernel->dvfs)
++ {
++ gckDVFS_Start(Hardware->kernel->dvfs);
++ }
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++ if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout;
++ /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */
++ gcmkVERIFY_OK(gckOS_StartTimer(os,
++ Hardware->powerOffTimer,
++ Hardware->powerOffTimeout));
++ }
++#endif
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* Get total time. */
++ gcmkPROFILE_QUERY(time, totalTime);
++#if gcdENABLE_PROFILING
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu",
++ freq, mutexTime, onTime, stallTime, stopTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ " delay:%llu init:%llu off:%llu start:%llu total:%llu",
++ delayTime, initTime, offTime, startTime, totalTime);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commandStarted)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_Stop(command, gcvFALSE));
++ }
++
++ if (isrStarted)
++ {
++ gcmkVERIFY_OK(Hardware->stopIsr(Hardware->isrContext, Hardware->core));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, gcvTRUE));
++ }
++
++ if (acquired)
++ {
++ /* Release semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ command->powerSemaphore));
++ }
++
++ if (globalAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ Hardware->globalSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryPowerManagementState
++**
++** Get GPU power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE* State
++** Power State.
++**
++*/
++gceSTATUS
++gckHARDWARE_QueryPowerManagementState(
++ IN gckHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(State != gcvNULL);
++
++ /* Return the statue. */
++ *State = Hardware->chipPowerState;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagement
++**
++** Configure GPU power management function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL PowerManagement
++** Power Mangement State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagement(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->powerManagement = PowerManagement;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetGpuProfiler
++**
++** Configure GPU profiler function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL GpuProfiler
++** GOU Profiler State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetGpuProfiler(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL GpuProfiler
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->gpuProfiler = GpuProfiler;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ )
++{
++ gceSTATUS status;
++ gctUINT32 clock;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x FscaleValue=%d", Hardware, FscaleValue);
++
++ gcmkVERIFY_ARGUMENT(FscaleValue > 0 && FscaleValue <= 64);
++
++ gcmkONERROR(
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ Hardware->powerOnFscaleVal = FscaleValue;
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++ /* Disable all clock gating. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++ clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (FscaleValue) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++
++ /* Restore all clock gating. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++ gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ )
++{
++ *FscaleValue = Hardware->powerOnFscaleVal;
++ if ((gpu3DMinClock > 0) && (gpu3DMinClock <= 64) && (Hardware->core == gcvCORE_MAJOR))
++ *MinFscaleValue = gpu3DMinClock;
++ else
++ *MinFscaleValue = 1;
++ *MaxFscaleValue = 64;
++
++ return gcvSTATUS_OK;
++}
++
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckHARDWARE_SetPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Timeout
++)
++{
++ gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout);
++
++ Hardware->powerOffTimeout = Timeout;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckHARDWARE_QueryPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++)
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ *Timeout = Hardware->powerOffTimeout;
++
++ gcmkFOOTER_ARG("*Timeout=%d", *Timeout);
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckHARDWARE_QueryIdle(
++ IN gckHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle, address;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL);
++
++ /* We are idle when the power is not ON. */
++ if (Hardware->chipPowerState != gcvPOWER_ON)
++ {
++ *IsIdle = gcvTRUE;
++ }
++
++ else
++ {
++ /* Read idle register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle));
++
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle)) >> (0 ? 1:1)) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 3:3)) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 4:4)) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 6:6)) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 7:7)) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 2:2)) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ *IsIdle = gcvFALSE;
++ }
++
++ else
++ {
++ /* Read the current FE address. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00664,
++ &address));
++
++ /* Test if address is inside the last WAIT/LINK sequence. */
++ if ((address >= Hardware->lastWaitLink)
++ && (address <= Hardware->lastWaitLink + 16)
++ )
++ {
++ /* FE is in last WAIT/LINK and the pipe is idle. */
++ *IsIdle = gcvTRUE;
++ }
++ else
++ {
++ /* FE is not in WAIT/LINK yet. */
++ *IsIdle = gcvFALSE;
++ }
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** Handy macros that will help in reading those debug registers.
++*/
++
++#define gcmkREAD_DEBUG_REGISTER(control, block, index, data) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ index))); \
++ gcmkONERROR(\
++ gckOS_ReadRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_SIGNALS_##block##_Address, \
++ &profiler->data))
++
++#define gcmkREAD_DEBUG_REGISTER_N(control, block, index, data) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ index))); \
++ gcmkONERROR(\
++ gckOS_ReadRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_SIGNALS_##block##_Address, \
++ &data))
++
++#define gcmkRESET_DEBUG_REGISTER(control, block) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ 15))); \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ 0)))
++
++/*******************************************************************************
++**
++** gckHARDWARE_ProfileEngine2D
++**
++** Read the profile registers available in the 2D engine and sets them in the
++** profile. The function will also reset the pixelsRendered counter every time.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** OPTIONAL gcs2D_PROFILE_PTR Profile
++** Pointer to a gcs2D_Profile structure.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ProfileEngine2D(
++ IN gckHARDWARE Hardware,
++ OPTIONAL gcs2D_PROFILE_PTR Profile
++ )
++{
++ gceSTATUS status;
++ gcs2D_PROFILE_PTR profiler = Profile;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Profile != gcvNULL)
++ {
++ /* Read the cycle count. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &Profile->cycleCount));
++
++ /* Read pixels rendered by 2D engine. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &profiler->pixelsRendered));
++
++ /* Reset counter. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHARDWARE_QueryProfileRegisters(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ OUT gcsPROFILER_COUNTERS * Counters
++ )
++{
++ gceSTATUS status;
++ gcsPROFILER_COUNTERS * profiler = Counters;
++ gctUINT i, clock;
++ gctUINT32 colorKilled, colorDrawn, depthKilled, depthDrawn;
++ gctUINT32 totalRead, totalWrite;
++
++ gcmkHEADER_ARG("Hardware=0x%x Counters=0x%x", Hardware, Counters);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Read the counters. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &profiler->gpuCyclesCounter));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &profiler->gpuTotalCyclesCounter));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &profiler->gpuIdleCyclesCounter));
++
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ profiler->gpuTotalRead64BytesPerFrame = 0;
++ profiler->gpuTotalWrite64BytesPerFrame = 0;
++ profiler->pe_pixel_count_killed_by_color_pipe = 0;
++ profiler->pe_pixel_count_killed_by_depth_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_color_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_depth_pipe = 0;
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* BW */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &totalRead));
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &totalWrite));
++
++ profiler->gpuTotalRead64BytesPerFrame += totalRead;
++ profiler->gpuTotalWrite64BytesPerFrame += totalWrite;
++
++ /* PE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn));
++
++ profiler->pe_pixel_count_killed_by_color_pipe += colorKilled;
++ profiler->pe_pixel_count_killed_by_depth_pipe += depthKilled;
++ profiler->pe_pixel_count_drawn_by_color_pipe += colorDrawn;
++ profiler->pe_pixel_count_drawn_by_depth_pipe += depthDrawn;
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ if(Reset){
++ /* Reset counters. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++ }
++
++ /* SH */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->ps_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_pixel_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vs_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_vertice_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_branch_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_texld_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_branch_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_texld_inst_counter));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));}
++
++ /* PA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_vtx_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_prim_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_output_prim_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_depth_clipped_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_trivial_rejected_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_culled_counter));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));}
++
++ /* SE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_triangle_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_lines_count));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));}
++
++ /* RA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_pixel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_quad_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_quad_count_after_early_z));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_primitive_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_pipe_cache_miss_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_prefetch_cache_miss_counter));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));}
++
++ /* TX */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_bilinear_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_trilinear_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_discarded_texture_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_texture_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_in_8B_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_hit_texel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_texel_count));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));}
++
++ /* MC */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_pipeline));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_IP));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_write_req_8B_from_pipeline));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));}
++
++ /* HI */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_read_request_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_request_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_data_stalled));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));}
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++#define gcmkUPDATE_PROFILE_DATA(data) \
++ profilerHistroy->data += profiler->data
++
++gceSTATUS
++gckHARDWARE_QueryContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ IN gckCONTEXT Context,
++ OUT gcsPROFILER_COUNTERS * Counters
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command = Hardware->kernel->command;
++ gcsPROFILER_COUNTERS * profiler = Counters;
++
++ gcmkHEADER_ARG("Hardware=0x%x Counters=0x%x", Hardware, Counters);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Acquire the context sequnence mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ command->os, command->mutexContextSeq, gcvINFINITE
++ ));
++
++ /* Read the counters. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ profiler, &Context->histroyProfiler, gcmSIZEOF(gcsPROFILER_COUNTERS)
++ ));
++
++ if (Reset)
++ {
++ /* Reset counters. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ &Context->histroyProfiler, gcmSIZEOF(gcsPROFILER_COUNTERS)
++ ));
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os, command->mutexContextSeq
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++
++gceSTATUS
++gckHARDWARE_UpdateContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++ gcsPROFILER_COUNTERS * profiler = &Context->latestProfiler;
++ gcsPROFILER_COUNTERS * profilerHistroy = &Context->histroyProfiler;
++ gctUINT i, clock;
++ gctUINT32 colorKilled, colorDrawn, depthKilled, depthDrawn;
++ gctUINT32 totalRead, totalWrite;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 temp;
++ gctBOOL needResetShader = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x Context=0x%x", Hardware, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ chipModel = Hardware->identity.chipModel;
++ chipRevision = Hardware->identity.chipRevision;
++ if (chipModel == gcv2000 || (chipModel == gcv2100 && chipRevision == 0x5118))
++ {
++ needResetShader = gcvTRUE;
++ }
++
++ /* Read the counters. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &profiler->gpuCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuCyclesCounter);
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &profiler->gpuTotalCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuTotalCyclesCounter);
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &profiler->gpuIdleCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuIdleCyclesCounter);
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ profiler->gpuTotalRead64BytesPerFrame = 0;
++ profiler->gpuTotalWrite64BytesPerFrame = 0;
++ profiler->pe_pixel_count_killed_by_color_pipe = 0;
++ profiler->pe_pixel_count_killed_by_depth_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_color_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_depth_pipe = 0;
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* BW */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &totalRead));
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &totalWrite));
++
++ profiler->gpuTotalRead64BytesPerFrame += totalRead;
++ profiler->gpuTotalWrite64BytesPerFrame += totalWrite;
++ gcmkUPDATE_PROFILE_DATA(gpuTotalRead64BytesPerFrame);
++ gcmkUPDATE_PROFILE_DATA(gpuTotalWrite64BytesPerFrame);
++
++ /* PE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn));
++
++ profiler->pe_pixel_count_killed_by_color_pipe += colorKilled;
++ profiler->pe_pixel_count_killed_by_depth_pipe += depthKilled;
++ profiler->pe_pixel_count_drawn_by_color_pipe += colorDrawn;
++ profiler->pe_pixel_count_drawn_by_depth_pipe += depthDrawn;
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_killed_by_color_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_killed_by_depth_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_drawn_by_color_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_drawn_by_depth_pipe);
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++
++
++
++ /* Reset counters. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* SH */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->ps_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->ps_inst_counter;
++ profiler->ps_inst_counter -= Context->prevPSInstCount;
++ Context->prevPSInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(ps_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_pixel_counter));
++ if (needResetShader)
++ {
++ temp = profiler->rendered_pixel_counter;
++ profiler->rendered_pixel_counter -= Context->prevPSPixelCount;
++ Context->prevPSPixelCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(rendered_pixel_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vs_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vs_inst_counter;
++ profiler->vs_inst_counter -= Context->prevVSInstCount;
++ Context->prevVSInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vs_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_vertice_counter));
++ if (needResetShader)
++ {
++ temp = profiler->rendered_vertice_counter;
++ profiler->rendered_vertice_counter -= Context->prevVSVertexCount;
++ Context->prevVSVertexCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(rendered_vertice_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_branch_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vtx_branch_inst_counter;
++ profiler->vtx_branch_inst_counter -= Context->prevVSBranchInstCount;
++ Context->prevVSBranchInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vtx_branch_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_texld_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vtx_texld_inst_counter;
++ profiler->vtx_texld_inst_counter -= Context->prevVSTexInstCount;
++ Context->prevVSTexInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vtx_texld_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_branch_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->pxl_branch_inst_counter;
++ profiler->pxl_branch_inst_counter -= Context->prevPSBranchInstCount;
++ Context->prevPSBranchInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(pxl_branch_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_texld_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->pxl_texld_inst_counter;
++ profiler->pxl_texld_inst_counter -= Context->prevPSTexInstCount;
++ Context->prevPSTexInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(pxl_texld_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* PA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_vtx_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_input_vtx_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_prim_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_input_prim_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_output_prim_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_output_prim_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_depth_clipped_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_depth_clipped_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_trivial_rejected_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_trivial_rejected_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_culled_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_culled_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* SE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_triangle_count));
++ gcmkUPDATE_PROFILE_DATA(se_culled_triangle_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_lines_count));
++ gcmkUPDATE_PROFILE_DATA(se_culled_lines_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* RA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_pixel_count));
++ gcmkUPDATE_PROFILE_DATA(ra_valid_pixel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_quad_count));
++ gcmkUPDATE_PROFILE_DATA(ra_total_quad_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_quad_count_after_early_z));
++ gcmkUPDATE_PROFILE_DATA(ra_valid_quad_count_after_early_z);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_primitive_count));
++ gcmkUPDATE_PROFILE_DATA(ra_total_primitive_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_pipe_cache_miss_counter));
++ gcmkUPDATE_PROFILE_DATA(ra_pipe_cache_miss_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_prefetch_cache_miss_counter));
++ gcmkUPDATE_PROFILE_DATA(ra_prefetch_cache_miss_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* TX */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_bilinear_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_bilinear_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_trilinear_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_trilinear_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_discarded_texture_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_discarded_texture_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_texture_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_texture_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_count));
++ gcmkUPDATE_PROFILE_DATA(tx_mem_read_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_in_8B_count));
++ gcmkUPDATE_PROFILE_DATA(tx_mem_read_in_8B_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_miss_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_hit_texel_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_hit_texel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_texel_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_miss_texel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* MC */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_pipeline));
++ gcmkUPDATE_PROFILE_DATA(mc_total_read_req_8B_from_pipeline);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_IP));
++ gcmkUPDATE_PROFILE_DATA(mc_total_read_req_8B_from_IP);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_write_req_8B_from_pipeline));
++ gcmkUPDATE_PROFILE_DATA(mc_total_write_req_8B_from_pipeline);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* HI */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_read_request_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_read_request_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_request_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_write_request_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_data_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_write_data_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_ResetGPU(
++ IN gckHARDWARE Hardware,
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gctUINT32 control, idle;
++ gceSTATUS status;
++
++ for (;;)
++ {
++ /* Disable clock gating. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ Hardware->powerBaseAddress +
++ 0x00104,
++ 0x00000000));
++
++ control = ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)));
++
++ /* Disable pulse-eater. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ control));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ control));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ 0x00000900));
++
++ /* Wait for clock being stable. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Isolate the GPU. */
++ control = ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ control));
++
++ /* Set soft reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Wait for reset. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Reset soft reset bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Reset GPU isolation. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ control));
++
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++
++ /* GPU is idle. */
++ break;
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_Reset(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++ gctUINT32 process, thread;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ if (Hardware->identity.chipRevision < 0x4600)
++ {
++ /* Not supported - we need the isolation bit. */
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++ status = gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread))
++ {
++ /* No way to recovery from a error in power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ mutexAcquired = gcvTRUE;
++ }
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(
++ gckOS_AcquireSemaphore(Hardware->os, command->powerSemaphore));
++ acquired = gcvTRUE;
++ }
++
++ if ((Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_IDLE)
++ )
++ {
++ /* Stop the command processor. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvTRUE));
++ }
++
++ /* Stop isr, we will start it again when power on GPU. */
++ if (Hardware->stopIsr)
++ {
++ gcmkONERROR(Hardware->stopIsr(Hardware->isrContext, Hardware->core));
++ }
++
++ /* Hardware reset. */
++ status = gckOS_ResetGPU(Hardware->os, Hardware->core);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Soft reset. */
++ gcmkONERROR(_ResetGPU(Hardware, Hardware->os, Hardware->core));
++ }
++
++ /* Force an OFF to ON power switch. */
++ Hardware->chipPowerState = gcvPOWER_OFF;
++
++ gcmkONERROR(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the power management semaphore. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseSemaphore(Hardware->os, command->powerSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++ }
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetBaseAddress(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR BaseAddress
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL);
++
++ /* Test if we have a new Memory Controller. */
++ if (((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 22:22) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))))
++ {
++ /* No base address required. */
++ *BaseAddress = 0;
++ }
++ else
++ {
++ /* Get the base address from the OS. */
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, BaseAddress));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_NeedBaseAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 State,
++ OUT gctBOOL_PTR NeedBase
++ )
++{
++ gctBOOL need = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x State=0x%08x", Hardware, State);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(NeedBase != gcvNULL);
++
++ /* Make sure this is a load state. */
++ if (((((gctUINT32) (State)) >> (0 ? 31:27) & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))))
++ {
++#ifndef VIVANTE_NO_3D
++ /* Get the state address. */
++ switch ((((((gctUINT32) (State)) >> (0 ? 15:0)) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1)))))) ))
++ {
++ case 0x0596:
++ case 0x0597:
++ case 0x0599:
++ case 0x059A:
++ case 0x05A9:
++ /* These states need a TRUE physical address. */
++ need = gcvTRUE;
++ break;
++ }
++#else
++ /* 2D addresses don't need a base address. */
++#endif
++ }
++
++ /* Return the flag. */
++ *NeedBase = need;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*NeedBase=%d", *NeedBase);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_SetIsrManager(
++ IN gckHARDWARE Hardware,
++ IN gctISRMANAGERFUNC StartIsr,
++ IN gctISRMANAGERFUNC StopIsr,
++ IN gctPOINTER Context
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Hardware=0x%x, StartIsr=0x%x, StopIsr=0x%x, Context=0x%x",
++ Hardware, StartIsr, StopIsr, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (StartIsr == gcvNULL ||
++ StopIsr == gcvNULL ||
++ Context == gcvNULL)
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ Hardware->startIsr = StartIsr;
++ Hardware->stopIsr = StopIsr;
++ Hardware->isrContext = Context;
++
++ /* Success. */
++ gcmkFOOTER();
++
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Compose
++**
++** Start a composition.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Compose(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Size,
++ IN gctUINT8 EventID
++ )
++{
++#ifndef VIVANTE_NO_3D
++ gceSTATUS status;
++ gctUINT32_PTR triggerState;
++
++ gcmkHEADER_ARG("Hardware=0x%x Physical=0x%x Logical=0x%x"
++ " Offset=%d Size=%d EventID=%d",
++ Hardware, Physical, Logical, Offset, Size, EventID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(((Size + 8) & 63) == 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Program the trigger state. */
++ triggerState = (gctUINT32_PTR) ((gctUINT8_PTR) Logical + Offset + Size);
++ triggerState[0] = 0x0C03;
++ triggerState[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:4) - (0 ? 5:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:4) - (0 ? 5:4) + 1))))))) << (0 ? 5:4))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 5:4) - (0 ? 5:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:4) - (0 ? 5:4) + 1))))))) << (0 ? 5:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:16) - (0 ? 20:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16))) | (((gctUINT32) ((gctUINT32) (EventID) & ((gctUINT32) ((((1 ? 20:16) - (0 ? 20:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16)))
++ ;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the wait/link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Hardware->os, ProcessID, gcvNULL,
++ Physical, Logical, Offset + Size
++ ));
++#endif
++
++ /* Start composition. */
++ gcmkONERROR(gckOS_WriteRegisterEx(
++ Hardware->os, Hardware->core, 0x00554,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)))
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ /* Return the status. */
++ return gcvSTATUS_NOT_SUPPORTED;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_IsFeatureAvailable
++**
++** Verifies whether the specified feature is available in hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gceFEATURE Feature
++** Feature to be verified.
++*/
++gceSTATUS
++gckHARDWARE_IsFeatureAvailable(
++ IN gckHARDWARE Hardware,
++ IN gceFEATURE Feature
++ )
++{
++ gctBOOL available;
++
++ gcmkHEADER_ARG("Hardware=0x%x Feature=%d", Hardware, Feature);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Only features needed by common kernel logic added here. */
++ switch (Feature)
++ {
++ case gcvFEATURE_END_EVENT:
++ /*available = gcmVERIFYFIELDVALUE(Hardware->identity.chipMinorFeatures2,
++ GC_MINOR_FEATURES2, END_EVENT, AVAILABLE
++ );*/
++ available = gcvFALSE;
++ break;
++ case gcvFEATURE_MC20:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 22:22) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))));
++ break;
++ case gcvFEATURE_DYNAMIC_FREQUENCY_SCALING:
++ /* This feature doesn't apply for 2D cores. */
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures2)) >> (0 ? 14:14) & ((gctUINT32) ((((1 ? 14:14) - (0 ? 14:14) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 14:14) - (0 ? 14:14) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 14:14) - (0 ? 14:14) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 14:14) - (0 ? 14:14) + 1)))))))
++ && ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 2:2) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))));
++ break;
++
++ case gcvFEATURE_PIPE_2D:
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 9:9) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))));
++ break;
++
++ case gcvFEATURE_PIPE_3D:
++#ifndef VIVANTE_NO_3D
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 2:2) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))));
++#else
++ available = gcvFALSE;
++#endif
++ break;
++
++ case gcvFEATURE_HALTI2:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures4)) >> (0 ? 16:16) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))));
++ break;
++
++ default:
++ gcmkFATAL("Invalid feature has been requested.");
++ available = gcvFALSE;
++ }
++
++ /* Return result. */
++ gcmkFOOTER_ARG("%d", available ? gcvSTATUS_TRUE : gcvSTATUS_OK);
++ return available ? gcvSTATUS_TRUE : gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_DumpMMUException
++**
++** Dump the MMU debug info on an MMU exception.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_DumpMMUException(
++ IN gckHARDWARE Hardware
++ )
++{
++#if !gcdPOWER_SUSNPEND_WHEN_IDLE && !gcdPOWEROFF_TIMEOUT
++ gctUINT32 mmu, mmuStatus, address, i;
++#if gcdDEBUG
++ gctUINT32 mtlb, stlb, offset;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkPRINT("GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n",
++ Hardware->core,
++ Hardware->identity.chipModel,
++ Hardware->identity.chipRevision);
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** MMU ERROR DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00188,
++ &mmuStatus));
++
++ gcmkPRINT(" MMU status = 0x%08X\n", mmuStatus);
++
++ for (i = 0; i < 4; i += 1)
++ {
++ mmu = mmuStatus & 0xF;
++ mmuStatus >>= 4;
++
++ if (mmu == 0)
++ {
++ continue;
++ }
++
++ switch (mmu)
++ {
++ case 1:
++ gcmkPRINT(" MMU%d: slave not present\n", i);
++ break;
++
++ case 2:
++ gcmkPRINT(" MMU%d: page not present\n", i);
++ break;
++
++ case 3:
++ gcmkPRINT(" MMU%d: write violation\n", i);
++ break;
++
++ default:
++ gcmkPRINT(" MMU%d: unknown state\n", i);
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00190 + i * 4,
++ &address));
++
++ mtlb = (address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++ stlb = (address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++ offset = address & gcdMMU_OFFSET_4K_MASK;
++
++ gcmkPRINT(" MMU%d: exception address = 0x%08X\n", i, address);
++
++ gcmkPRINT(" MTLB entry = %d\n", mtlb);
++
++ gcmkPRINT(" STLB entry = %d\n", stlb);
++
++ gcmkPRINT(" Offset = 0x%08X (%d)\n", offset, offset);
++
++ gckMMU_DumpPageTableEntry(Hardware->kernel->mmu, address);
++
++ }
++
++ gcmkFOOTER_NO();
++#else
++ /* If clock could be off automatically, we can't read mmu debug
++ ** register here; build driver with gcdPOWER_SUSPEND_WHEN_IDLE = 0
++ ** and gcdPOWEROFF_TIMEOUT = 0 to make it safe to read mmu register. */
++ gcmkPRINT("[galcore] %s(%d): MMU Exception!", __FUNCTION__, __LINE__);
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_DumpGPUState
++**
++** Dump the GPU debug registers.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_DumpGPUState(
++ IN gckHARDWARE Hardware
++ )
++{
++ static gctCONST_STRING _cmdState[] =
++ {
++ "PAR_IDLE_ST", "PAR_DEC_ST", "PAR_ADR0_ST", "PAR_LOAD0_ST",
++ "PAR_ADR1_ST", "PAR_LOAD1_ST", "PAR_3DADR_ST", "PAR_3DCMD_ST",
++ "PAR_3DCNTL_ST", "PAR_3DIDXCNTL_ST", "PAR_INITREQDMA_ST",
++ "PAR_DRAWIDX_ST", "PAR_DRAW_ST", "PAR_2DRECT0_ST", "PAR_2DRECT1_ST",
++ "PAR_2DDATA0_ST", "PAR_2DDATA1_ST", "PAR_WAITFIFO_ST", "PAR_WAIT_ST",
++ "PAR_LINK_ST", "PAR_END_ST", "PAR_STALL_ST"
++ };
++
++ static gctCONST_STRING _cmdDmaState[] =
++ {
++ "CMD_IDLE_ST", "CMD_START_ST", "CMD_REQ_ST", "CMD_END_ST"
++ };
++
++ static gctCONST_STRING _cmdFetState[] =
++ {
++ "FET_IDLE_ST", "FET_RAMVALID_ST", "FET_VALID_ST"
++ };
++
++ static gctCONST_STRING _reqDmaState[] =
++ {
++ "REQ_IDLE_ST", "REQ_WAITIDX_ST", "REQ_CAL_ST"
++ };
++
++ static gctCONST_STRING _calState[] =
++ {
++ "CAL_IDLE_ST", "CAL_LDADR_ST", "CAL_IDXCALC_ST"
++ };
++
++ static gctCONST_STRING _veReqState[] =
++ {
++ "VER_IDLE_ST", "VER_CKCACHE_ST", "VER_MISS_ST"
++ };
++
++ static gcsiDEBUG_REGISTERS _dbgRegs[] =
++ {
++ { "RA", 0x474, 16, 0x448, 16, 0x12344321 },
++ { "TX", 0x474, 24, 0x44C, 16, 0x12211221 },
++ { "FE", 0x470, 0, 0x450, 16, 0xBABEF00D },
++ { "PE", 0x470, 16, 0x454, 16, 0xBABEF00D },
++ { "DE", 0x470, 8, 0x458, 16, 0xBABEF00D },
++ { "SH", 0x470, 24, 0x45C, 16, 0xDEADBEEF },
++ { "PA", 0x474, 0, 0x460, 16, 0x0000AAAA },
++ { "SE", 0x474, 8, 0x464, 16, 0x5E5E5E5E },
++ { "MC", 0x478, 0, 0x468, 16, 0x12345678 },
++ { "HI", 0x478, 8, 0x46C, 16, 0xAAAAAAAA }
++ };
++
++ static gctUINT32 _otherRegs[] =
++ {
++ 0x040, 0x044, 0x04C, 0x050, 0x054, 0x058, 0x05C, 0x060,
++ 0x43c, 0x440, 0x444, 0x414,
++ };
++
++ gceSTATUS status;
++ gckKERNEL kernel;
++ gctUINT32 idle, axi;
++ gctUINT32 dmaAddress1, dmaAddress2;
++ gctUINT32 dmaState1, dmaState2;
++ gctUINT32 dmaLow, dmaHigh;
++ gctUINT32 cmdState, cmdDmaState, cmdFetState;
++ gctUINT32 dmaReqState, calState, veReqState;
++ gctUINT i;
++ gctUINT pipe, pixelPipes;
++ gctUINT32 control, oldControl;
++ gckOS os = Hardware->os;
++ gceCORE core = Hardware->core;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ kernel = Hardware->kernel;
++
++ gcmkPRINT_N(12, "GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n",
++ core,
++ Hardware->identity.chipModel,
++ Hardware->identity.chipRevision);
++
++ pixelPipes = Hardware->identity.pixelPipes
++ ? Hardware->identity.pixelPipes
++ : 1;
++
++ /* Reset register values. */
++ idle = axi =
++ dmaState1 = dmaState2 =
++ dmaAddress1 = dmaAddress2 =
++ dmaLow = dmaHigh = 0;
++
++ /* Verify whether DMA is running. */
++ gcmkONERROR(_VerifyDMA(
++ os, core, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2
++ ));
++
++ cmdState = dmaState2 & 0x1F;
++ cmdDmaState = (dmaState2 >> 8) & 0x03;
++ cmdFetState = (dmaState2 >> 10) & 0x03;
++ dmaReqState = (dmaState2 >> 12) & 0x03;
++ calState = (dmaState2 >> 14) & 0x03;
++ veReqState = (dmaState2 >> 16) & 0x03;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x004, &idle));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x00C, &axi));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x668, &dmaLow));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x66C, &dmaHigh));
++
++ gcmkPRINT_N(0, "**************************\n");
++ gcmkPRINT_N(0, "*** GPU STATE DUMP ***\n");
++ gcmkPRINT_N(0, "**************************\n");
++
++ gcmkPRINT_N(4, " axi = 0x%08X\n", axi);
++
++ gcmkPRINT_N(4, " idle = 0x%08X\n", idle);
++ if ((idle & 0x00000001) == 0) gcmkPRINT_N(0, " FE not idle\n");
++ if ((idle & 0x00000002) == 0) gcmkPRINT_N(0, " DE not idle\n");
++ if ((idle & 0x00000004) == 0) gcmkPRINT_N(0, " PE not idle\n");
++ if ((idle & 0x00000008) == 0) gcmkPRINT_N(0, " SH not idle\n");
++ if ((idle & 0x00000010) == 0) gcmkPRINT_N(0, " PA not idle\n");
++ if ((idle & 0x00000020) == 0) gcmkPRINT_N(0, " SE not idle\n");
++ if ((idle & 0x00000040) == 0) gcmkPRINT_N(0, " RA not idle\n");
++ if ((idle & 0x00000080) == 0) gcmkPRINT_N(0, " TX not idle\n");
++ if ((idle & 0x00000100) == 0) gcmkPRINT_N(0, " VG not idle\n");
++ if ((idle & 0x00000200) == 0) gcmkPRINT_N(0, " IM not idle\n");
++ if ((idle & 0x00000400) == 0) gcmkPRINT_N(0, " FP not idle\n");
++ if ((idle & 0x00000800) == 0) gcmkPRINT_N(0, " TS not idle\n");
++ if ((idle & 0x80000000) != 0) gcmkPRINT_N(0, " AXI low power mode\n");
++
++ if (
++ (dmaAddress1 == dmaAddress2)
++ && (dmaState1 == dmaState2)
++ )
++ {
++ gcmkPRINT_N(0, " DMA appears to be stuck at this address:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1);
++ }
++ else
++ {
++ if (dmaAddress1 == dmaAddress2)
++ {
++ gcmkPRINT_N(0, " DMA address is constant, but state is changing:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaState1);
++ gcmkPRINT_N(4, " 0x%08X\n", dmaState2);
++ }
++ else
++ {
++ gcmkPRINT_N(0, " DMA is running; known addresses are:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1);
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress2);
++ }
++ }
++ gcmkPRINT_N(4, " dmaLow = 0x%08X\n", dmaLow);
++ gcmkPRINT_N(4, " dmaHigh = 0x%08X\n", dmaHigh);
++ gcmkPRINT_N(4, " dmaState = 0x%08X\n", dmaState2);
++ gcmkPRINT_N(8, " command state = %d (%s)\n", cmdState, _cmdState [cmdState]);
++ gcmkPRINT_N(8, " command DMA state = %d (%s)\n", cmdDmaState, _cmdDmaState[cmdDmaState]);
++ gcmkPRINT_N(8, " command fetch state = %d (%s)\n", cmdFetState, _cmdFetState[cmdFetState]);
++ gcmkPRINT_N(8, " DMA request state = %d (%s)\n", dmaReqState, _reqDmaState[dmaReqState]);
++ gcmkPRINT_N(8, " cal state = %d (%s)\n", calState, _calState [calState]);
++ gcmkPRINT_N(8, " VE request state = %d (%s)\n", veReqState, _veReqState [veReqState]);
++
++ /* Record control. */
++ gckOS_ReadRegisterEx(os, core, 0x0, &oldControl);
++
++ for (pipe = 0; pipe < pixelPipes; pipe++)
++ {
++ gcmkPRINT_N(4, " Debug registers of pipe[%d]:\n", pipe);
++
++ /* Switch pipe. */
++ gckOS_ReadRegisterEx(os, core, 0x0, &control);
++ control &= ~(0xF << 20);
++ control |= (pipe << 20);
++ gckOS_WriteRegisterEx(os, core, 0x0, control);
++
++ for (i = 0; i < gcmCOUNTOF(_dbgRegs); i += 1)
++ {
++ gcmkONERROR(_DumpDebugRegisters(os, core, &_dbgRegs[i]));
++ }
++
++ gcmkPRINT_N(0, " Other Registers:\n");
++ for (i = 0; i < gcmCOUNTOF(_otherRegs); i += 1)
++ {
++ gctUINT32 read;
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, _otherRegs[i], &read));
++ gcmkPRINT_N(12, " [0x%04X] 0x%08X\n", _otherRegs[i], read);
++ }
++ }
++
++ if (kernel->hardware->identity.chipFeatures & (1 << 4))
++ {
++ gctUINT32 read0, read1, write;
++
++ read0 = read1 = write = 0;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x43C, &read0));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x440, &read1));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x444, &write));
++
++ gcmkPRINT_N(4, " read0 = 0x%08X\n", read0);
++ gcmkPRINT_N(4, " read1 = 0x%08X\n", read1);
++ gcmkPRINT_N(4, " write = 0x%08X\n", write);
++ }
++
++ /* Restore control. */
++ gckOS_WriteRegisterEx(os, core, 0x0, oldControl);
++
++ /* dump stack. */
++ gckOS_DumpCallStack(os);
++
++OnError:
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++
++#if gcdFRAME_DB
++static gceSTATUS
++gckHARDWARE_ReadPerformanceRegister(
++ IN gckHARDWARE Hardware,
++ IN gctUINT PerformanceAddress,
++ IN gctUINT IndexAddress,
++ IN gctUINT IndexShift,
++ IN gctUINT Index,
++ OUT gctUINT32_PTR Value
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x PerformanceAddress=0x%x IndexAddress=0x%x "
++ "IndexShift=%u Index=%u",
++ Hardware, PerformanceAddress, IndexAddress, IndexShift,
++ Index);
++
++ /* Write the index. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ IndexAddress,
++ Index << IndexShift));
++
++ /* Read the register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ PerformanceAddress,
++ Value));
++
++ /* Test for reset. */
++ if (Index == 15)
++ {
++ /* Index another register to get out of reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, IndexAddress, 0));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=0x%x", *Value);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetFrameInfo(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_FRAME_INFO * FrameInfo
++ )
++{
++ gceSTATUS status;
++ gctUINT i, clock;
++ gcsHAL_FRAME_INFO info;
++#if gcdFRAME_DB_RESET
++ gctUINT reset;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Get profile tick. */
++ gcmkONERROR(gckOS_GetProfileTick(&info.ticks));
++
++ /* Read SH counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 4,
++ &info.shaderCycles));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 9,
++ &info.vsInstructionCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 12,
++ &info.vsTextureCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 7,
++ &info.psInstructionCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 14,
++ &info.psTextureCount));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 15,
++ &reset));
++#endif
++
++ /* Read PA counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 3,
++ &info.vertexCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 4,
++ &info.primitiveCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 7,
++ &info.rejectedPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 8,
++ &info.culledPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 6,
++ &info.clippedPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 5,
++ &info.outPrimitives));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 15,
++ &reset));
++#endif
++
++ /* Read RA counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 3,
++ &info.inPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 11,
++ &info.culledQuadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 1,
++ &info.totalQuadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 2,
++ &info.quadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 0,
++ &info.totalPixelCount));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 15,
++ &reset));
++#endif
++
++ /* Read TX counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 0,
++ &info.bilinearRequests));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 1,
++ &info.trilinearRequests));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 8,
++ &info.txHitCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 9,
++ &info.txMissCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 6,
++ &info.txBytes8));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 15,
++ &reset));
++#endif
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* Read cycle registers. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &info.cycles[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &info.idleCycles[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &info.mcCycles[i]));
++
++ /* Read bandwidth registers. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0005C,
++ &info.readRequests[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &info.readBytes8[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00050,
++ &info.writeRequests[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &info.writeBytes8[i]));
++
++ /* Read PE counters. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 0,
++ &info.colorKilled[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 2,
++ &info.colorDrawn[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 1,
++ &info.depthKilled[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 3,
++ &info.depthDrawn[i]));
++ }
++
++ /* Zero out remaning reserved counters. */
++ for (; i < 8; ++i)
++ {
++ info.readBytes8[i] = 0;
++ info.writeBytes8[i] = 0;
++ info.cycles[i] = 0;
++ info.idleCycles[i] = 0;
++ info.mcCycles[i] = 0;
++ info.readRequests[i] = 0;
++ info.writeRequests[i] = 0;
++ info.colorKilled[i] = 0;
++ info.colorDrawn[i] = 0;
++ info.depthKilled[i] = 0;
++ info.depthDrawn[i] = 0;
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Reset cycle and bandwidth counters. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 1));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ 0));
++
++#if gcdFRAME_DB_RESET
++ /* Reset PE counters. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 15,
++ &reset));
++#endif
++
++ /* Copy to user. */
++ gcmkONERROR(gckOS_CopyToUserData(Hardware->os,
++ &info,
++ FrameInfo,
++ gcmSIZEOF(info)));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++#if gcdDVFS
++#define READ_FROM_EATER1 0
++
++gceSTATUS
++gckHARDWARE_QueryLoad(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Load
++ )
++{
++ gctUINT32 debug1;
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Load != gcvNULL);
++
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE);
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00110,
++ Load));
++#if READ_FROM_EATER1
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00134,
++ Load));
++#endif
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00114,
++ &debug1));
++
++ /* Patch result of 0x110 with result of 0x114. */
++ if ((debug1 & 0xFF) == 1)
++ {
++ *Load &= ~0xFF;
++ *Load |= 1;
++ }
++
++ if (((debug1 & 0xFF00) >> 8) == 1)
++ {
++ *Load &= ~(0xFF << 8);
++ *Load |= 1 << 8;
++ }
++
++ if (((debug1 & 0xFF0000) >> 16) == 1)
++ {
++ *Load &= ~(0xFF << 16);
++ *Load |= 1 << 16;
++ }
++
++ if (((debug1 & 0xFF000000) >> 24) == 1)
++ {
++ *Load &= ~(0xFF << 24);
++ *Load |= 1 << 24;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_INVALID_REQUEST;
++ }
++
++OnError:
++
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetDVFSPeroid(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 Frequency
++ )
++{
++ gceSTATUS status;
++ gctUINT32 period;
++ gctUINT32 eater;
++
++#if READ_FROM_EATER1
++ gctUINT32 period1;
++ gctUINT32 eater1;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%X Frequency=%d", Hardware, Frequency);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ period = 0;
++
++ while((64 << period) < (gcdDVFS_ANAYLSE_WINDOW * Frequency * 1000) )
++ {
++ period++;
++ }
++
++#if READ_FROM_EATER1
++ /*
++ * Peroid = F * 1000 * 1000 / (60 * 16 * 1024);
++ */
++ period1 = Frequency * 6250 / 6114;
++#endif
++
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE);
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ /* Get current configure. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &eater));
++
++ /* Change peroid. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (eater)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (period) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))));
++
++#if READ_FROM_EATER1
++ /* Config eater1. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00130,
++ &eater1));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00130,
++ ((((gctUINT32) (eater1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16))) | (((gctUINT32) ((gctUINT32) (period1) & ((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16)))));
++#endif
++ }
++ else
++ {
++ status = gcvSTATUS_INVALID_REQUEST;
++ }
++
++OnError:
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_InitDVFS(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 data;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "DVFS Configure=0x%X",
++ data);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ data));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h 2015-05-01 14:57:59.519427001 -0500
+@@ -0,0 +1,136 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_h_
++#define __gc_hal_kernel_hardware_h_
++
++#if gcdENABLE_VG
++#include "gc_hal_kernel_hardware_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* gckHARDWARE object. */
++struct _gckHARDWARE
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gctKERNEL object. */
++ gckKERNEL kernel;
++
++ /* Pointer to gctOS object. */
++ gckOS os;
++
++ /* Core */
++ gceCORE core;
++
++ /* Chip characteristics. */
++ gcsHAL_QUERY_CHIP_IDENTITY identity;
++ gctBOOL allowFastClear;
++ gctBOOL allowCompression;
++ gctUINT32 powerBaseAddress;
++ gctBOOL extraEventStates;
++
++ /* Big endian */
++ gctBOOL bigEndian;
++
++ /* Chip status */
++ gctPOINTER powerMutex;
++ gctUINT32 powerProcess;
++ gctUINT32 powerThread;
++ gceCHIPPOWERSTATE chipPowerState;
++ gctUINT32 lastWaitLink;
++ gctBOOL clockState;
++ gctBOOL powerState;
++ gctPOINTER globalSemaphore;
++
++ gctISRMANAGERFUNC startIsr;
++ gctISRMANAGERFUNC stopIsr;
++ gctPOINTER isrContext;
++
++ gctUINT32 mmuVersion;
++
++ /* Type */
++ gceHARDWARE_TYPE type;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctUINT32 powerOffTime;
++ gctUINT32 powerOffTimeout;
++ gctPOINTER powerOffTimer;
++#endif
++
++ gctPOINTER pageTableDirty;
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ /* FSCALE_VAL when gcvPOWER_ON. */
++ gctUINT32 powerOnFscaleVal;
++#endif
++
++#if gcdLINK_QUEUE_SIZE
++ struct _gckLINKQUEUE linkQueue;
++#endif
++
++ gctBOOL powerManagement;
++ gctBOOL gpuProfiler;
++};
++
++gceSTATUS
++gckHARDWARE_GetBaseAddress(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++gceSTATUS
++gckHARDWARE_NeedBaseAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 State,
++ OUT gctBOOL_PTR NeedBase
++ );
++
++gceSTATUS
++gckHARDWARE_GetFrameInfo(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_FRAME_INFO * FrameInfo
++ );
++
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ );
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_hardware_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/config linux-3.14.40/drivers/mxc/gpu-viv/v4/config
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/config 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/config 2015-05-01 14:57:59.519427001 -0500
+@@ -0,0 +1,38 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2013 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++ARCH_TYPE ?= arm
++SDK_DIR ?= $(AQROOT)/build/sdk
++USE_3D_VG ?= 1
++FORCE_ALL_VIDEO_MEMORY_CACHED ?= 0
++NONPAGED_MEMORY_CACHEABLE ?= 0
++NONPAGED_MEMORY_BUFFERABLE ?= 1
++CACHE_FUNCTION_UNIMPLEMENTED ?= 0
++VIVANTE_ENABLE_VG ?= 1
++NO_USER_DIRECT_ACCESS_FROM_KERNEL ?= 1
++VIVANTE_NO_3D ?= 0
++ENABLE_OUTER_CACHE_PATCH ?= 1
++USE_BANK_ALIGNMENT ?= 1
++BANK_BIT_START ?= 13
++BANK_BIT_END ?= 15
++BANK_CHANNEL_BIT ?= 12
++ENABLE_GPU_CLOCK_BY_DRIVER = 1
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.c 2015-05-01 14:57:59.523427001 -0500
+@@ -0,0 +1,3967 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++/*******************************************************************************
++***** Version Signature *******************************************************/
++
++#define _gcmTXT2STR(t) #t
++#define gcmTXT2STR(t) _gcmTXT2STR(t)
++const char * _VERSION = "\n\0$VERSION$"
++ gcmTXT2STR(gcvVERSION_MAJOR) "."
++ gcmTXT2STR(gcvVERSION_MINOR) "."
++ gcmTXT2STR(gcvVERSION_PATCH) ":"
++ gcmTXT2STR(gcvVERSION_BUILD) "$\n";
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++#define gcmDEFINE2TEXT(d) #d
++gctCONST_STRING _DispatchText[] =
++{
++ gcmDEFINE2TEXT(gcvHAL_QUERY_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_CHIP_IDENTITY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_NON_PAGED_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_NON_PAGED_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_CONTIGUOUS_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_MAP_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNMAP_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_MAP_USER_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNMAP_USER_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_LOCK_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNLOCK_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_EVENT_COMMIT),
++ gcmDEFINE2TEXT(gcvHAL_USER_SIGNAL),
++ gcmDEFINE2TEXT(gcvHAL_SIGNAL),
++ gcmDEFINE2TEXT(gcvHAL_WRITE_DATA),
++ gcmDEFINE2TEXT(gcvHAL_COMMIT),
++ gcmDEFINE2TEXT(gcvHAL_STALL),
++ gcmDEFINE2TEXT(gcvHAL_READ_REGISTER),
++ gcmDEFINE2TEXT(gcvHAL_WRITE_REGISTER),
++ gcmDEFINE2TEXT(gcvHAL_GET_PROFILE_SETTING),
++ gcmDEFINE2TEXT(gcvHAL_SET_PROFILE_SETTING),
++ gcmDEFINE2TEXT(gcvHAL_READ_ALL_PROFILE_REGISTERS),
++#if VIVANTE_PROFILER_PERDRAW
++ gcmDEFINE2TEXT(gcvHAL_READ_PROFILER_REGISTER_SETTING),
++#endif
++ gcmDEFINE2TEXT(gcvHAL_PROFILE_REGISTERS_2D),
++ gcmDEFINE2TEXT(gcvHAL_SET_POWER_MANAGEMENT_STATE),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_POWER_MANAGEMENT_STATE),
++ gcmDEFINE2TEXT(gcvHAL_GET_BASE_ADDRESS),
++ gcmDEFINE2TEXT(gcvHAL_SET_IDLE),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_KERNEL_SETTINGS),
++ gcmDEFINE2TEXT(gcvHAL_RESET),
++ gcmDEFINE2TEXT(gcvHAL_MAP_PHYSICAL),
++ gcmDEFINE2TEXT(gcvHAL_DEBUG),
++ gcmDEFINE2TEXT(gcvHAL_CACHE),
++ gcmDEFINE2TEXT(gcvHAL_TIMESTAMP),
++ gcmDEFINE2TEXT(gcvHAL_DATABASE),
++ gcmDEFINE2TEXT(gcvHAL_VERSION),
++ gcmDEFINE2TEXT(gcvHAL_CHIP_INFO),
++ gcmDEFINE2TEXT(gcvHAL_ATTACH),
++ gcmDEFINE2TEXT(gcvHAL_DETACH)
++};
++#endif
++
++#if gcdENABLE_RECOVERY
++void
++_ResetFinishFunction(
++ gctPOINTER Data
++ )
++{
++ gckKERNEL kernel = (gckKERNEL)Data;
++
++ gckOS_AtomSet(kernel->os, kernel->resetAtom, 0);
++}
++#endif
++
++/*******************************************************************************
++**
++** gckKERNEL_Construct
++**
++** Construct a new gckKERNEL object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gceCORE Core
++** Specified core.
++**
++** IN gctPOINTER Context
++** Pointer to a driver defined context.
++**
++** IN gckDB SharedDB,
++** Pointer to a shared DB.
++**
++** OUTPUT:
++**
++** gckKERNEL * Kernel
++** Pointer to a variable that will hold the pointer to the gckKERNEL
++** object.
++*/
++
++gceSTATUS
++gckKERNEL_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Context,
++ IN gckDB SharedDB,
++ OUT gckKERNEL * Kernel
++ )
++{
++ gckKERNEL kernel = gcvNULL;
++ gceSTATUS status;
++ gctSIZE_T i;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++
++ /* Allocate the gckKERNEL object. */
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckKERNEL),
++ &pointer));
++
++ kernel = pointer;
++
++ /* Zero the object pointers. */
++ kernel->hardware = gcvNULL;
++ kernel->command = gcvNULL;
++ kernel->eventObj = gcvNULL;
++ kernel->mmu = gcvNULL;
++#if gcdDVFS
++ kernel->dvfs = gcvNULL;
++#endif
++
++ /* Initialize the gckKERNEL object. */
++ kernel->object.type = gcvOBJ_KERNEL;
++ kernel->os = Os;
++ kernel->core = Core;
++
++
++ if (SharedDB == gcvNULL)
++ {
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckDB),
++ &pointer));
++
++ kernel->db = pointer;
++ kernel->dbCreated = gcvTRUE;
++ kernel->db->freeDatabase = gcvNULL;
++ kernel->db->freeRecord = gcvNULL;
++ kernel->db->dbMutex = gcvNULL;
++ kernel->db->lastDatabase = gcvNULL;
++ kernel->db->idleTime = 0;
++ kernel->db->lastIdle = 0;
++ kernel->db->lastSlowdown = 0;
++
++ for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i)
++ {
++ kernel->db->db[i] = gcvNULL;
++ }
++
++ /* Construct a database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->dbMutex));
++
++ /* Construct a id-pointer database. */
++ gcmkONERROR(gckKERNEL_CreateIntegerDatabase(kernel, &kernel->db->pointerDatabase));
++
++ /* Construct a id-pointer database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->pointerDatabaseMutex));
++ }
++ else
++ {
++ kernel->db = SharedDB;
++ kernel->dbCreated = gcvFALSE;
++ }
++
++ for (i = 0; i < gcmCOUNTOF(kernel->timers); ++i)
++ {
++ kernel->timers[i].startTime = 0;
++ kernel->timers[i].stopTime = 0;
++ }
++
++ kernel->timeOut = gcdGPU_TIMEOUT;
++
++ /* Save context. */
++ kernel->context = Context;
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ kernel->virtualBufferHead =
++ kernel->virtualBufferTail = gcvNULL;
++
++ gcmkONERROR(
++ gckOS_CreateMutex(Os, (gctPOINTER)&kernel->virtualBufferLock));
++#endif
++
++ /* Construct atom holding number of clients. */
++ kernel->atomClients = gcvNULL;
++ gcmkONERROR(gckOS_AtomConstruct(Os, &kernel->atomClients));
++
++#if gcdENABLE_VG
++ kernel->vg = gcvNULL;
++
++ if (Core == gcvCORE_VG)
++ {
++ /* Construct the gckMMU object. */
++ gcmkONERROR(
++ gckVGKERNEL_Construct(Os, Context, kernel, &kernel->vg));
++ }
++ else
++#endif
++ {
++ /* Construct the gckHARDWARE object. */
++ gcmkONERROR(
++ gckHARDWARE_Construct(Os, kernel->core, &kernel->hardware));
++
++ /* Set pointer to gckKERNEL object in gckHARDWARE object. */
++ kernel->hardware->kernel = kernel;
++
++ /* Initialize the hardware. */
++ gcmkONERROR(
++ gckHARDWARE_InitializeHardware(kernel->hardware));
++
++ /* Construct the gckCOMMAND object. */
++ gcmkONERROR(
++ gckCOMMAND_Construct(kernel, &kernel->command));
++
++ /* Construct the gckEVENT object. */
++ gcmkONERROR(
++ gckEVENT_Construct(kernel, &kernel->eventObj));
++
++ /* Construct the gckMMU object. */
++ gcmkONERROR(
++ gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu));
++
++#if gcdENABLE_RECOVERY
++ gcmkONERROR(
++ gckOS_AtomConstruct(Os, &kernel->resetAtom));
++
++ gcmkVERIFY_OK(
++ gckOS_CreateTimer(Os,
++ (gctTIMERFUNCTION)_ResetFinishFunction,
++ (gctPOINTER)kernel,
++ &kernel->resetFlagClearTimer));
++ kernel->resetTimeStamp = 0;
++#endif
++
++#if gcdDVFS
++ if (gckHARDWARE_IsFeatureAvailable(kernel->hardware,
++ gcvFEATURE_DYNAMIC_FREQUENCY_SCALING))
++ {
++ gcmkONERROR(gckDVFS_Construct(kernel->hardware, &kernel->dvfs));
++ gcmkONERROR(gckDVFS_Start(kernel->dvfs));
++ }
++#endif
++ }
++
++ spin_lock_init(&kernel->irq_lock);
++
++#if VIVANTE_PROFILER
++ /* Initialize profile setting */
++ kernel->profileEnable = gcvFALSE;
++ kernel->profileCleanRegister = gcvTRUE;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gcmkONERROR(gckOS_CreateSyncTimeline(Os, &kernel->timeline));
++#endif
++
++ /* Return pointer to the gckKERNEL object. */
++ *Kernel = kernel;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (kernel != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Core != gcvCORE_VG)
++#endif
++ {
++ if (kernel->eventObj != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckEVENT_Destroy(kernel->eventObj));
++ }
++
++ if (kernel->command != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_Destroy(kernel->command));
++ }
++
++ if (kernel->hardware != gcvNULL)
++ {
++ /* Turn off the power. */
++ gcmkVERIFY_OK(gckOS_SetGPUPower(kernel->hardware->os,
++ kernel->hardware->core,
++ gcvFALSE,
++ gcvFALSE));
++ gcmkVERIFY_OK(gckHARDWARE_Destroy(kernel->hardware));
++ }
++ }
++
++ if (kernel->atomClients != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, kernel->atomClients));
++ }
++
++#if gcdENABLE_RECOVERY
++ if (kernel->resetAtom != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, kernel->resetAtom));
++ }
++
++ if (kernel->resetFlagClearTimer)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, kernel->resetFlagClearTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, kernel->resetFlagClearTimer));
++ }
++#endif
++
++ if (kernel->dbCreated && kernel->db != gcvNULL)
++ {
++ if (kernel->db->dbMutex != gcvNULL)
++ {
++ /* Destroy the database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, kernel->db->dbMutex));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, kernel->db));
++ }
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ if (kernel->virtualBufferLock != gcvNULL)
++ {
++ /* Destroy the virtual command buffer mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, kernel->virtualBufferLock));
++ }
++#endif
++
++#if gcdDVFS
++ if (kernel->dvfs)
++ {
++ gcmkVERIFY_OK(gckDVFS_Stop(kernel->dvfs));
++ gcmkVERIFY_OK(gckDVFS_Destroy(kernel->dvfs));
++ }
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ if (kernel->timeline)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySyncTimeline(Os, kernel->timeline));
++ }
++#endif
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, kernel));
++ }
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Destroy
++**
++** Destroy an gckKERNEL object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Destroy(
++ IN gckKERNEL Kernel
++ )
++{
++ gctSIZE_T i;
++ gcsDATABASE_PTR database, databaseNext;
++ gcsDATABASE_RECORD_PTR record, recordNext;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->debugMutex));
++#endif
++
++ /* Destroy the database. */
++ if (Kernel->dbCreated)
++ {
++ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
++ {
++ if (Kernel->db->db[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckKERNEL_DestroyProcessDB(Kernel, Kernel->db->db[i]->processID));
++ }
++ }
++
++ /* Free all databases. */
++ for (database = Kernel->db->freeDatabase;
++ database != gcvNULL;
++ database = databaseNext)
++ {
++ databaseNext = database->next;
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, database));
++ }
++
++ if (Kernel->db->lastDatabase != gcvNULL)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel->db->lastDatabase));
++ }
++
++ /* Free all database records. */
++ for (record = Kernel->db->freeRecord; record != gcvNULL; record = recordNext)
++ {
++ recordNext = record->next;
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record));
++ }
++
++ /* Destroy the database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->dbMutex));
++
++
++ /* Destroy id-pointer database. */
++ gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Kernel->db->pointerDatabase));
++
++ /* Destroy id-pointer database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ }
++
++#if gcdENABLE_VG
++ if (Kernel->vg)
++ {
++ gcmkVERIFY_OK(gckVGKERNEL_Destroy(Kernel->vg));
++ }
++ else
++#endif
++ {
++ /* Destroy the gckMMU object. */
++ gcmkVERIFY_OK(gckMMU_Destroy(Kernel->mmu));
++
++ /* Destroy the gckCOMMNAND object. */
++ gcmkVERIFY_OK(gckCOMMAND_Destroy(Kernel->command));
++
++ /* Destroy the gckEVENT object. */
++ gcmkVERIFY_OK(gckEVENT_Destroy(Kernel->eventObj));
++
++ /* Destroy the gckHARDWARE object. */
++ gcmkVERIFY_OK(gckHARDWARE_Destroy(Kernel->hardware));
++
++#if gcdENABLE_RECOVERY
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Kernel->resetAtom));
++
++ if (Kernel->resetFlagClearTimer)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Kernel->os, Kernel->resetFlagClearTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Kernel->os, Kernel->resetFlagClearTimer));
++ }
++#endif
++ }
++
++ /* Detsroy the client atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Kernel->atomClients));
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->virtualBufferLock));
++#endif
++
++#if gcdDVFS
++ if (Kernel->dvfs)
++ {
++ gcmkVERIFY_OK(gckDVFS_Stop(Kernel->dvfs));
++ gcmkVERIFY_OK(gckDVFS_Destroy(Kernel->dvfs));
++ }
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gcmkVERIFY_OK(gckOS_DestroySyncTimeline(Kernel->os, Kernel->timeline));
++#endif
++
++ /* Mark the gckKERNEL object as unknown. */
++ Kernel->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckKERNEL object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/oom.h>
++#include <linux/sched.h>
++#include <linux/notifier.h>
++
++extern struct task_struct *lowmem_deathpending;
++static unsigned long lowmem_deathpending_timeout;
++
++static int force_contiguous_lowmem_shrink(IN gckKERNEL Kernel)
++{
++ struct task_struct *p;
++ struct task_struct *selected = NULL;
++ int tasksize;
++ int ret = -1;
++ int min_adj = 0;
++ int selected_tasksize = 0;
++ int selected_oom_adj;
++ /*
++ * If we already have a death outstanding, then
++ * bail out right away; indicating to vmscan
++ * that we have nothing further to offer on
++ * this pass.
++ *
++ */
++ if (lowmem_deathpending &&
++ time_before_eq(jiffies, lowmem_deathpending_timeout))
++ return 0;
++ selected_oom_adj = min_adj;
++
++ read_lock(&tasklist_lock);
++ for_each_process(p) {
++ struct mm_struct *mm;
++ struct signal_struct *sig;
++ gcuDATABASE_INFO info;
++ int oom_adj;
++
++ task_lock(p);
++ mm = p->mm;
++ sig = p->signal;
++ if (!mm || !sig) {
++ task_unlock(p);
++ continue;
++ }
++ oom_adj = sig->oom_adj;
++ if (oom_adj < min_adj) {
++ task_unlock(p);
++ continue;
++ }
++
++ tasksize = 0;
++ if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_VIDEO_MEMORY, &info) == gcvSTATUS_OK){
++ tasksize += info.counters.bytes / PAGE_SIZE;
++ }
++ if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_CONTIGUOUS, &info) == gcvSTATUS_OK){
++ tasksize += info.counters.bytes / PAGE_SIZE;
++ }
++
++ task_unlock(p);
++
++ if (tasksize <= 0)
++ continue;
++
++ gckOS_Print("<gpu> pid %d (%s), adj %d, size %d \n", p->pid, p->comm, oom_adj, tasksize);
++
++ if (selected) {
++ if (oom_adj < selected_oom_adj)
++ continue;
++ if (oom_adj == selected_oom_adj &&
++ tasksize <= selected_tasksize)
++ continue;
++ }
++ selected = p;
++ selected_tasksize = tasksize;
++ selected_oom_adj = oom_adj;
++ }
++ if (selected) {
++ gckOS_Print("<gpu> send sigkill to %d (%s), adj %d, size %d\n",
++ selected->pid, selected->comm,
++ selected_oom_adj, selected_tasksize);
++ lowmem_deathpending = selected;
++ lowmem_deathpending_timeout = jiffies + HZ;
++ force_sig(SIGKILL, selected);
++ ret = 0;
++ }
++ read_unlock(&tasklist_lock);
++ return ret;
++}
++
++#endif
++
++/*******************************************************************************
++**
++** _AllocateMemory
++**
++** Private function to walk all required memory pools to allocate the requested
++** amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++static gceSTATUS
++_AllocateMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gcePOOL pool;
++ gceSTATUS status;
++ gckVIDMEM videoMemory;
++ gctINT loopCount;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctBOOL tileStatusInVirtual;
++ gctBOOL forceContiguous = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x *Pool=%d Bytes=%lu Alignment=%lu Type=%d",
++ Kernel, *Pool, Bytes, Alignment, Type);
++
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes != 0);
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++_AllocateMemory_Retry:
++#endif
++ /* Get initial pool. */
++ switch (pool = *Pool)
++ {
++ case gcvPOOL_DEFAULT_FORCE_CONTIGUOUS:
++ forceContiguous = gcvTRUE;
++ case gcvPOOL_DEFAULT:
++ case gcvPOOL_LOCAL:
++ pool = gcvPOOL_LOCAL_INTERNAL;
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_UNIFIED:
++ pool = gcvPOOL_SYSTEM;
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_CONTIGUOUS:
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_DEFAULT_FORCE_CONTIGUOUS_CACHEABLE:
++ pool = gcvPOOL_CONTIGUOUS;
++ loopCount = 1;
++ forceContiguous = gcvTRUE;
++ break;
++
++ default:
++ loopCount = 1;
++ break;
++ }
++
++ while (loopCount-- > 0)
++ {
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ /* Create a gcuVIDMEM_NODE for virtual memory. */
++ gcmkONERROR(
++ gckVIDMEM_ConstructVirtual(Kernel, gcvFALSE, Bytes, &node));
++
++ /* Success. */
++ break;
++ }
++
++ else
++ if (pool == gcvPOOL_CONTIGUOUS)
++ {
++#if gcdCONTIGUOUS_SIZE_LIMIT
++ if (Bytes > gcdCONTIGUOUS_SIZE_LIMIT && forceContiguous == gcvFALSE)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ }
++ else
++#endif
++ {
++ /* Create a gcuVIDMEM_NODE from contiguous memory. */
++ status = gckVIDMEM_ConstructVirtual(Kernel, gcvTRUE, Bytes, &node);
++ }
++
++ if (gcmIS_SUCCESS(status) || forceContiguous == gcvTRUE)
++ {
++ /* Memory allocated. */
++ if(node && forceContiguous == gcvTRUE)
++ {
++ gctUINT32 physAddr=0;
++ gctUINT32 baseAddress = 0;
++
++ gcmkONERROR(
++ gckOS_LockPages(Kernel->os,
++ node->Virtual.physical,
++ node->Virtual.bytes,
++ gcvFALSE,
++ &node->Virtual.logical,
++ &node->Virtual.pageCount));
++
++ /* Convert logical address into a physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os,
++ node->Virtual.logical,
++ &physAddr));
++
++ gcmkONERROR(
++ gckOS_UnlockPages(Kernel->os,
++ node->Virtual.physical,
++ node->Virtual.bytes,
++ node->Virtual.logical));
++
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++
++ gcmkASSERT(physAddr >= baseAddress);
++
++ /* Subtract baseAddress to get a GPU address used for programming. */
++ physAddr -= baseAddress;
++
++ if((physAddr & 0x80000000) || ((physAddr + Bytes) & 0x80000000))
++ {
++ gckOS_Print("gpu virtual memory 0x%x cannot be allocated in force contiguous request!\n", physAddr);
++
++ gcmkONERROR(gckVIDMEM_Free(node));
++
++ node = gcvNULL;
++ }
++ }
++
++ break;
++ }
++ }
++
++ else
++ {
++ /* Get pointer to gckVIDMEM object for pool. */
++#if gcdUSE_VIDMEM_PER_PID
++ gctUINT32 pid;
++ gckOS_GetProcessID(&pid);
++
++ status = gckKERNEL_GetVideoMemoryPoolPid(Kernel, pool, pid, &videoMemory);
++ if (status == gcvSTATUS_NOT_FOUND)
++ {
++ /* Create VidMem pool for this process. */
++ status = gckKERNEL_CreateVideoMemoryPoolPid(Kernel, pool, pid, &videoMemory);
++ }
++#else
++ status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory);
++#endif
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Allocate memory. */
++ status = gckVIDMEM_AllocateLinear(videoMemory,
++ Bytes,
++ Alignment,
++ Type,
++ &node);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Memory allocated. */
++ node->VidMem.pool = pool;
++ break;
++ }
++ }
++ }
++
++ if (pool == gcvPOOL_LOCAL_INTERNAL)
++ {
++ /* Advance to external memory. */
++ pool = gcvPOOL_LOCAL_EXTERNAL;
++ }
++
++ else
++ if (pool == gcvPOOL_LOCAL_EXTERNAL)
++ {
++ /* Advance to contiguous system memory. */
++ pool = gcvPOOL_SYSTEM;
++ }
++
++ else
++ if (pool == gcvPOOL_SYSTEM)
++ {
++ /* Advance to contiguous memory. */
++ pool = gcvPOOL_CONTIGUOUS;
++ }
++
++ else
++ if (pool == gcvPOOL_CONTIGUOUS)
++ {
++ tileStatusInVirtual =
++ gckHARDWARE_IsFeatureAvailable(Kernel->hardware,
++ gcvFEATURE_MC20);
++
++ if (Type == gcvSURF_TILE_STATUS && tileStatusInVirtual != gcvTRUE)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Advance to virtual memory. */
++ pool = gcvPOOL_VIRTUAL;
++ }
++
++ else
++ {
++ /* Out of pools. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++ }
++
++ if (node == gcvNULL)
++ {
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++ if(forceContiguous == gcvTRUE)
++ {
++ if(force_contiguous_lowmem_shrink(Kernel) == 0)
++ {
++ /* Sleep 1 millisecond. */
++ gckOS_Delay(gcvNULL, 1);
++ goto _AllocateMemory_Retry;
++ }
++ }
++#endif
++ /* Nothing allocated. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Return node and pool used for allocation. */
++ *Node = node;
++ *Pool = pool;
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*Pool=%d *Node=0x%x", *Pool, *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Dispatch
++**
++** Dispatch a command received from the user HAL layer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL FromUser
++** whether the call is from the user space.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++
++gceSTATUS
++gckKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctSIZE_T bytes;
++ gcuVIDMEM_NODE_PTR node;
++ gctBOOL locked = gcvFALSE;
++ gctPHYS_ADDR physical = gcvNULL;
++ gctPOINTER logical = gcvNULL;
++ gctPOINTER info = gcvNULL;
++ gckCONTEXT context = gcvNULL;
++ gctUINT32 address;
++ gctUINT32 processID;
++ gckKERNEL kernel = Kernel;
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ gctBOOL asynchronous;
++ gctPOINTER paddr = gcvNULL;
++#if !USE_NEW_LINUX_SIGNAL
++ gctSIGNAL signal;
++#endif
++ gceSURF_TYPE type;
++
++ gcmkHEADER_ARG("Kernel=0x%x FromUser=%d Interface=0x%x",
++ Kernel, FromUser, Interface);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "Dispatching command %d (%s)",
++ Interface->command, _DispatchText[Interface->command]);
++#endif
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gckOS_AcquireMutex(Kernel->os, Kernel->debugMutex, gcvINFINITE);
++#endif
++
++ /* Get the current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Kernel, processID, &cache));
++#endif
++
++ /* Dispatch on command. */
++ switch (Interface->command)
++ {
++ case gcvHAL_GET_BASE_ADDRESS:
++ /* Get base address. */
++ gcmkONERROR(
++ gckOS_GetBaseAddress(Kernel->os,
++ &Interface->u.GetBaseAddress.baseAddress));
++ break;
++
++ case gcvHAL_QUERY_VIDEO_MEMORY:
++ /* Query video memory size. */
++ gcmkONERROR(gckKERNEL_QueryVideoMemory(Kernel, Interface));
++ break;
++
++ case gcvHAL_QUERY_CHIP_IDENTITY:
++ /* Query chip identity. */
++ gcmkONERROR(
++ gckHARDWARE_QueryChipIdentity(
++ Kernel->hardware,
++ &Interface->u.QueryChipIdentity));
++ break;
++
++ case gcvHAL_MAP_MEMORY:
++ physical = gcmINT2PTR(Interface->u.MapMemory.physical);
++
++ /* Map memory. */
++ gcmkONERROR(
++ gckKERNEL_MapMemory(Kernel,
++ physical,
++ (gctSIZE_T) Interface->u.MapMemory.bytes,
++ &logical));
++
++ Interface->u.MapMemory.logical = gcmPTR_TO_UINT64(logical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_MAP_MEMORY,
++ logical,
++ physical,
++ (gctSIZE_T) Interface->u.MapMemory.bytes));
++ break;
++
++ case gcvHAL_UNMAP_MEMORY:
++ physical = gcmINT2PTR(Interface->u.UnmapMemory.physical);
++
++ /* Unmap memory. */
++ gcmkONERROR(
++ gckKERNEL_UnmapMemory(Kernel,
++ physical,
++ (gctSIZE_T) Interface->u.UnmapMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical)));
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_MAP_MEMORY,
++ gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical)));
++ break;
++
++ case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
++ bytes = (gctSIZE_T) Interface->u.AllocateNonPagedMemory.bytes;
++
++ /* Allocate non-paged memory. */
++ gcmkONERROR(
++ gckOS_AllocateNonPagedMemory(
++ Kernel->os,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateNonPagedMemory.bytes = bytes;
++ Interface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_NON_PAGED,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateNonPagedMemory.physical),
++ bytes));
++
++ break;
++
++ case gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER:
++#if gcdVIRTUAL_COMMAND_BUFFER
++ bytes = (gctSIZE_T) Interface->u.AllocateVirtualCommandBuffer.bytes;
++
++ gcmkONERROR(
++ gckKERNEL_AllocateVirtualCommandBuffer(
++ Kernel,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateVirtualCommandBuffer.bytes = bytes;
++ Interface->u.AllocateVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateVirtualCommandBuffer.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_COMMAND_BUFFER,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateVirtualCommandBuffer.physical),
++ bytes));
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ physical = gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical);
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkONERROR(gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++ /* Free non-paged memory. */
++ gcmkONERROR(
++ gckOS_FreeNonPagedMemory(Kernel->os,
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ physical,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_NON_PAGED,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical),
++ Interface->u.FreeNonPagedMemory.bytes));
++#endif
++
++ gcmRELEASE_NAME(Interface->u.FreeNonPagedMemory.physical);
++
++ break;
++
++ case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY:
++ bytes = (gctSIZE_T) Interface->u.AllocateContiguousMemory.bytes;
++
++ /* Allocate contiguous memory. */
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Kernel->os,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateContiguousMemory.bytes = bytes;
++ Interface->u.AllocateContiguousMemory.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateContiguousMemory.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.AllocateContiguousMemory.logical),
++ &Interface->u.AllocateContiguousMemory.address));
++
++ gcmkVERIFY_OK(gckKERNEL_AddProcessDB(
++ Kernel,
++ processID, gcvDB_CONTIGUOUS,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateContiguousMemory.physical),
++ bytes));
++
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ physical = gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical);
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkONERROR(gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical)));
++
++ /* Free contiguous memory. */
++ gcmkONERROR(
++ gckOS_FreeContiguous(Kernel->os,
++ physical,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical),
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_CONTIGUOUS,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical),
++ Interface->u.FreeContiguousMemory.bytes));
++#endif
++
++ gcmRELEASE_NAME(Interface->u.FreeContiguousMemory.physical);
++
++ break;
++
++ case gcvHAL_ALLOCATE_VIDEO_MEMORY:
++
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++
++ break;
++
++ case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
++ type = Interface->u.AllocateLinearVideoMemory.type;
++
++ /* Allocate memory. */
++ gcmkONERROR(
++ _AllocateMemory(Kernel,
++ &Interface->u.AllocateLinearVideoMemory.pool,
++ Interface->u.AllocateLinearVideoMemory.bytes,
++ Interface->u.AllocateLinearVideoMemory.alignment,
++ Interface->u.AllocateLinearVideoMemory.type,
++ &node));
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ bytes = node->VidMem.bytes;
++ node->VidMem.type = type;
++
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_RESERVED,
++ node,
++ gcvNULL,
++ bytes));
++ }
++ else
++ {
++ bytes = node->Virtual.bytes;
++ node->Virtual.type = type;
++
++ if(node->Virtual.contiguous)
++ {
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_CONTIGUOUS,
++ node,
++ gcvNULL,
++ bytes));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_VIRTUAL,
++ node,
++ gcvNULL,
++ bytes));
++ }
++
++ }
++
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ node,
++ gcvNULL,
++ bytes));
++
++ /* Get the node. */
++ Interface->u.AllocateLinearVideoMemory.node = gcmPTR_TO_UINT64(node);
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(Interface->u.FreeVideoMemory.node);
++#ifdef __QNXNTO__
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM
++ && node->VidMem.logical != gcvNULL)
++ {
++ gcmkONERROR(
++ gckKERNEL_UnmapVideoMemory(Kernel,
++ node->VidMem.logical,
++ processID,
++ node->VidMem.bytes));
++ node->VidMem.logical = gcvNULL;
++ }
++#endif
++ /* Free video memory. */
++ gcmkONERROR(
++ gckVIDMEM_Free(node));
++
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ node));
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_RESERVED,
++ node));
++ }
++ else if(node->Virtual.contiguous)
++ {
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_CONTIGUOUS,
++ node));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_VIRTUAL,
++ node));
++ }
++
++ break;
++
++ case gcvHAL_LOCK_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(Interface->u.LockVideoMemory.node);
++
++ /* Lock video memory. */
++ gcmkONERROR(
++ gckVIDMEM_Lock(Kernel,
++ node,
++ Interface->u.LockVideoMemory.cacheable,
++ &Interface->u.LockVideoMemory.address));
++
++ locked = gcvTRUE;
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Map video memory address into user space. */
++#ifdef __QNXNTO__
++ if (node->VidMem.logical == gcvNULL)
++ {
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemory(Kernel,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ processID,
++ node->VidMem.bytes,
++ &node->VidMem.logical));
++ }
++ gcmkASSERT(node->VidMem.logical != gcvNULL);
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->VidMem.logical);
++#else
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemory(Kernel,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ &logical));
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(logical);
++#endif
++ }
++ else
++ {
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->Virtual.logical);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++
++#if gcdSECURE_USER
++ /* Return logical address as physical address. */
++ Interface->u.LockVideoMemory.address =
++ Interface->u.LockVideoMemory.memory;
++#endif
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_LOCKED,
++ node,
++ gcvNULL,
++ 0));
++
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ /* Unlock video memory. */
++ node = gcmUINT64_TO_PTR(Interface->u.UnlockVideoMemory.node);
++
++#if gcdSECURE_USER
++ /* Save node information before it disappears. */
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock video memory. */
++ gcmkONERROR(
++ gckVIDMEM_Unlock(Kernel,
++ node,
++ Interface->u.UnlockVideoMemory.type,
++ &Interface->u.UnlockVideoMemory.asynchroneous));
++
++#if gcdSECURE_USER
++ /* Flush the translation cache for virtual surfaces. */
++ if (logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(Kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++ if (Interface->u.UnlockVideoMemory.asynchroneous == gcvFALSE)
++ {
++ /* There isn't a event to unlock this node, remove record now */
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_LOCKED,
++ node));
++ }
++ break;
++
++ case gcvHAL_EVENT_COMMIT:
++ /* Commit an event queue. */
++ gcmkONERROR(
++ gckEVENT_Commit(Kernel->eventObj,
++ gcmUINT64_TO_PTR(Interface->u.Event.queue)));
++ break;
++
++ case gcvHAL_COMMIT:
++ /* Commit a command and context buffer. */
++ gcmkONERROR(
++ gckCOMMAND_Commit(Kernel->command,
++ Interface->u.Commit.context ?
++ gcmNAME_TO_PTR(Interface->u.Commit.context) : gcvNULL,
++ gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffer),
++ gcmUINT64_TO_PTR(Interface->u.Commit.delta),
++ gcmUINT64_TO_PTR(Interface->u.Commit.queue),
++ processID));
++ break;
++
++ case gcvHAL_STALL:
++ /* Stall the command queue. */
++ gcmkONERROR(gckCOMMAND_Stall(Kernel->command, gcvFALSE));
++ break;
++
++ case gcvHAL_MAP_USER_MEMORY:
++ /* Map user memory to DMA. */
++ gcmkONERROR(
++ gckOS_MapUserMemory(Kernel->os,
++ Kernel->core,
++ gcmUINT64_TO_PTR(Interface->u.MapUserMemory.memory),
++ Interface->u.MapUserMemory.physical,
++ (gctSIZE_T) Interface->u.MapUserMemory.size,
++ &info,
++ &Interface->u.MapUserMemory.address));
++
++ Interface->u.MapUserMemory.info = gcmPTR_TO_NAME(info);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Interface->u.MapUserMemory.info),
++ gcmUINT64_TO_PTR(Interface->u.MapUserMemory.memory),
++ (gctSIZE_T) Interface->u.MapUserMemory.size));
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ address = Interface->u.UnmapUserMemory.address;
++ info = gcmNAME_TO_PTR(Interface->u.UnmapUserMemory.info);
++
++ /* Unmap user memory. */
++ gcmkONERROR(
++ gckOS_UnmapUserMemory(Kernel->os,
++ Kernel->core,
++ gcmUINT64_TO_PTR(Interface->u.UnmapUserMemory.memory),
++ (gctSIZE_T) Interface->u.UnmapUserMemory.size,
++ info,
++ address));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.UnmapUserMemory.memory),
++ Interface->u.UnmapUserMemory.size));
++#endif
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Interface->u.UnmapUserMemory.info)));
++
++ gcmRELEASE_NAME(Interface->u.UnmapUserMemory.info);
++
++ break;
++
++#if !USE_NEW_LINUX_SIGNAL
++ case gcvHAL_USER_SIGNAL:
++ /* Dispatch depends on the user signal subcommands. */
++ switch(Interface->u.UserSignal.command)
++ {
++ case gcvUSER_SIGNAL_CREATE:
++ /* Create a signal used in the user space. */
++ gcmkONERROR(
++ gckOS_CreateUserSignal(Kernel->os,
++ Interface->u.UserSignal.manualReset,
++ &Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_DESTROY:
++ /* Destroy the signal. */
++ gcmkONERROR(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++ break;
++
++ case gcvUSER_SIGNAL_SIGNAL:
++ /* Signal the signal. */
++ gcmkONERROR(
++ gckOS_SignalUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.state));
++ break;
++
++ case gcvUSER_SIGNAL_WAIT:
++#if gcdGPU_TIMEOUT
++ if (Interface->u.UserSignal.wait == gcvINFINITE)
++ {
++ gckHARDWARE hardware;
++ gctUINT32 timer = 0;
++
++ for(;;)
++ {
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ gcdGPU_ADVANCETIMER);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gcmkONERROR(
++ gckOS_SignalQueryHardware(Kernel->os,
++ (gctSIGNAL)(gctUINTPTR_T)Interface->u.UserSignal.id,
++ &hardware));
++
++ if (hardware)
++ {
++ /* This signal is bound to a hardware,
++ ** so the timeout is limited by Kernel->timeOut.
++ */
++ timer += gcdGPU_ADVANCETIMER;
++ }
++
++ if (timer >= Kernel->timeOut)
++ {
++ gcmkONERROR(
++ gckOS_Broadcast(Kernel->os,
++ hardware,
++ gcvBROADCAST_GPU_STUCK));
++
++ timer = 0;
++
++ /* If a few process try to reset GPU, only one
++ ** of them can do the real reset, other processes
++ ** still need to wait for this signal is triggered,
++ ** which menas reset is finished.
++ */
++ continue;
++ }
++ }
++ else
++ {
++ /* Bail out on other error. */
++ gcmkONERROR(status);
++
++ /* Wait for signal successfully. */
++ break;
++ }
++ }
++ }
++ else
++#endif
++ {
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.wait);
++ }
++
++ break;
++
++ case gcvUSER_SIGNAL_MAP:
++ gcmkONERROR(
++ gckOS_MapSignal(Kernel->os,
++ (gctSIGNAL)(gctUINTPTR_T)Interface->u.UserSignal.id,
++ (gctHANDLE)(gctUINTPTR_T)processID,
++ &signal));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_UNMAP:
++ /* Destroy the signal. */
++ gcmkONERROR(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++ break;
++
++ default:
++ /* Invalid user signal command. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++ break;
++#endif
++
++ case gcvHAL_SET_POWER_MANAGEMENT_STATE:
++ /* Set the power management state. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(
++ Kernel->hardware,
++ Interface->u.SetPowerManagement.state));
++ break;
++
++ case gcvHAL_QUERY_POWER_MANAGEMENT_STATE:
++ /* Chip is not idle. */
++ Interface->u.QueryPowerManagement.isIdle = gcvFALSE;
++
++ /* Query the power management state. */
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(
++ Kernel->hardware,
++ &Interface->u.QueryPowerManagement.state));
++
++ /* Query the idle state. */
++ gcmkONERROR(
++ gckHARDWARE_QueryIdle(Kernel->hardware,
++ &Interface->u.QueryPowerManagement.isIdle));
++ break;
++
++ case gcvHAL_READ_REGISTER:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++
++ gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE);
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ /* Read a register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(
++ Kernel->os,
++ Kernel->core,
++ Interface->u.ReadRegisterData.address,
++ &Interface->u.ReadRegisterData.data));
++ }
++ else
++ {
++ /* Chip is in power-state. */
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ }
++#else
++ /* No access from user land to read registers. */
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_WRITE_REGISTER:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++
++ gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE);
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ /* Write a register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Kernel->os,
++ Kernel->core,
++ Interface->u.WriteRegisterData.address,
++ Interface->u.WriteRegisterData.data));
++ }
++ else
++ {
++ /* Chip is in power-state. */
++ Interface->u.WriteRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ }
++#else
++ /* No access from user land to write registers. */
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_READ_ALL_PROFILE_REGISTERS:
++#if VIVANTE_PROFILER && VIVANTE_PROFILER_CONTEXT
++ /* Read profile data according to the context. */
++ gcmkONERROR(
++ gckHARDWARE_QueryContextProfile(
++ Kernel->hardware,
++ Kernel->profileCleanRegister,
++ gcmNAME_TO_PTR(Interface->u.RegisterProfileData.context),
++ &Interface->u.RegisterProfileData.counters));
++#elif VIVANTE_PROFILER
++ /* Read all 3D profile registers. */
++ gcmkONERROR(
++ gckHARDWARE_QueryProfileRegisters(
++ Kernel->hardware,
++ Kernel->profileCleanRegister,
++ &Interface->u.RegisterProfileData.counters));
++#else
++ status = gcvSTATUS_OK;
++#endif
++ break;
++
++ case gcvHAL_PROFILE_REGISTERS_2D:
++#if VIVANTE_PROFILER
++ /* Read all 2D profile registers. */
++ gcmkONERROR(
++ gckHARDWARE_ProfileEngine2D(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.RegisterProfileData2D.hwProfile2D)));
++#else
++ status = gcvSTATUS_OK;
++#endif
++ break;
++
++ case gcvHAL_GET_PROFILE_SETTING:
++#if VIVANTE_PROFILER
++ /* Get profile setting */
++ Interface->u.GetProfileSetting.enable = Kernel->profileEnable;
++#endif
++
++ status = gcvSTATUS_OK;
++ break;
++ case gcvHAL_SET_PROFILE_SETTING:
++#if VIVANTE_PROFILER
++ /* Set profile setting */
++ if(Kernel->hardware->gpuProfiler)
++ Kernel->profileEnable = Interface->u.SetProfileSetting.enable;
++ else
++ {
++ status = gcvSTATUS_NOT_SUPPORTED;
++ break;
++ }
++#endif
++
++ status = gcvSTATUS_OK;
++ break;
++
++#if VIVANTE_PROFILER_PERDRAW
++ case gcvHAL_READ_PROFILER_REGISTER_SETTING:
++ #if VIVANTE_PROFILER
++ Kernel->profileCleanRegister = Interface->u.SetProfilerRegisterClear.bclear;
++ #endif
++ status = gcvSTATUS_OK;
++ break;
++#endif
++
++ case gcvHAL_QUERY_KERNEL_SETTINGS:
++ /* Get kernel settings. */
++ gcmkONERROR(
++ gckKERNEL_QuerySettings(Kernel,
++ &Interface->u.QueryKernelSettings.settings));
++ break;
++
++ case gcvHAL_RESET:
++ /* Reset the hardware. */
++ gckKERNEL_Recovery(Kernel);
++ break;
++
++ case gcvHAL_DEBUG:
++ /* Set debug level and zones. */
++ if (Interface->u.Debug.set)
++ {
++ gckOS_SetDebugLevel(Interface->u.Debug.level);
++ gckOS_SetDebugZones(Interface->u.Debug.zones,
++ Interface->u.Debug.enable);
++ }
++
++ if (Interface->u.Debug.message[0] != '\0')
++ {
++ /* Print a message to the debugger. */
++ if (Interface->u.Debug.type == gcvMESSAGE_TEXT)
++ {
++ gckOS_CopyPrint(Interface->u.Debug.message);
++ }
++ else
++ {
++ gckOS_DumpBuffer(Kernel->os,
++ Interface->u.Debug.message,
++ Interface->u.Debug.messageSize,
++ gceDUMP_BUFFER_FROM_USER,
++ gcvTRUE);
++ }
++ }
++ status = gcvSTATUS_OK;
++ break;
++
++ case gcvHAL_DUMP_GPU_STATE:
++ /* Dump GPU state */
++ {
++ gceCHIPPOWERSTATE power;
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ Interface->u.ReadRegisterData.data = 1;
++ gcmkVERIFY_OK(
++ gckHARDWARE_DumpGPUState(Kernel->hardware));
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gcmkVERIFY_OK(
++ gckCOMMAND_DumpExecutingBuffer(Kernel->command));
++#endif
++ }
++ else
++ {
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ }
++ break;
++
++ case gcvHAL_DUMP_EVENT:
++ /* Dump GPU event */
++ gcmkVERIFY_OK(gckEVENT_Dump(Kernel->eventObj));
++
++ /* Dump Process DB. */
++ gcmkVERIFY_OK(gckKERNEL_DumpProcessDB(Kernel));
++ break;
++
++ case gcvHAL_CACHE:
++ node = gcmUINT64_TO_PTR(Interface->u.Cache.node);
++ if (node == gcvNULL)
++ {
++ /* FIXME Surface wrap some memory which is not allocated by us,
++ ** So we don't have physical address to handle outer cache, ignore it*/
++ status = gcvSTATUS_OK;
++ break;
++ }
++ else if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Video memory has no physical handles. */
++ physical = gcvNULL;
++ }
++ else
++ {
++ /* Grab physical handle. */
++ physical = node->Virtual.physical;
++ }
++
++ logical = gcmUINT64_TO_PTR(Interface->u.Cache.logical);
++ bytes = (gctSIZE_T) Interface->u.Cache.bytes;
++ switch(Interface->u.Cache.operation)
++ {
++ case gcvCACHE_FLUSH:
++ /* Clean and invalidate the cache. */
++ status = gckOS_CacheFlush(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++ case gcvCACHE_CLEAN:
++ /* Clean the cache. */
++ status = gckOS_CacheClean(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++ case gcvCACHE_INVALIDATE:
++ /* Invalidate the cache. */
++ status = gckOS_CacheInvalidate(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++
++ case gcvCACHE_MEMORY_BARRIER:
++ status = gckOS_MemoryBarrier(Kernel->os,
++ logical);
++ break;
++ default:
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ /* Check for invalid timer. */
++ if ((Interface->u.TimeStamp.timer >= gcmCOUNTOF(Kernel->timers))
++ || (Interface->u.TimeStamp.request != 2))
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Return timer results and reset timer. */
++ {
++ gcsTIMER_PTR timer = &(Kernel->timers[Interface->u.TimeStamp.timer]);
++ gctUINT64 timeDelta = 0;
++
++ if (timer->stopTime < timer->startTime )
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_TIMER_OVERFLOW);
++ }
++
++ timeDelta = timer->stopTime - timer->startTime;
++
++ /* Check truncation overflow. */
++ Interface->u.TimeStamp.timeDelta = (gctINT32) timeDelta;
++ /*bit0~bit30 is available*/
++ if (timeDelta>>31)
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_TIMER_OVERFLOW);
++ }
++
++ status = gcvSTATUS_OK;
++ }
++ break;
++
++ case gcvHAL_DATABASE:
++ /* Query video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_VIDEO_MEMORY,
++ &Interface->u.Database.vidMem));
++
++ /* Query non-paged memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_NON_PAGED,
++ &Interface->u.Database.nonPaged));
++
++ /* Query contiguous memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_CONTIGUOUS,
++ &Interface->u.Database.contiguous));
++
++ /* Query GPU idle time. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_IDLE,
++ &Interface->u.Database.gpuIdle));
++ break;
++
++ case gcvHAL_VIDMEM_DATABASE:
++ /* Query reserved video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.VidMemDatabase.processID,
++ !Interface->u.VidMemDatabase.validProcessID,
++ gcvDB_VIDEO_MEMORY_RESERVED,
++ &Interface->u.VidMemDatabase.vidMemResv));
++
++ /* Query contiguous video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.VidMemDatabase.processID,
++ !Interface->u.VidMemDatabase.validProcessID,
++ gcvDB_VIDEO_MEMORY_CONTIGUOUS,
++ &Interface->u.VidMemDatabase.vidMemCont));
++
++ /* Query virtual video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.VidMemDatabase.processID,
++ !Interface->u.VidMemDatabase.validProcessID,
++ gcvDB_VIDEO_MEMORY_VIRTUAL,
++ &Interface->u.VidMemDatabase.vidMemVirt));
++
++ break;
++
++ case gcvHAL_VERSION:
++ Interface->u.Version.major = gcvVERSION_MAJOR;
++ Interface->u.Version.minor = gcvVERSION_MINOR;
++ Interface->u.Version.patch = gcvVERSION_PATCH;
++ Interface->u.Version.build = gcvVERSION_BUILD;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "KERNEL version %d.%d.%d build %u %s %s",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH,
++ gcvVERSION_BUILD, gcvVERSION_DATE, gcvVERSION_TIME);
++#endif
++ break;
++
++ case gcvHAL_CHIP_INFO:
++ /* Only if not support multi-core */
++ Interface->u.ChipInfo.count = 1;
++ Interface->u.ChipInfo.types[0] = Kernel->hardware->type;
++ break;
++
++ case gcvHAL_ATTACH:
++ /* Attach user process. */
++ gcmkONERROR(
++ gckCOMMAND_Attach(Kernel->command,
++ &context,
++ &bytes,
++ processID));
++
++ Interface->u.Attach.stateCount = bytes;
++ Interface->u.Attach.context = gcmPTR_TO_NAME(context);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_CONTEXT,
++ gcmINT2PTR(Interface->u.Attach.context),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvHAL_DETACH:
++ /* Detach user process. */
++ gcmkONERROR(
++ gckCOMMAND_Detach(Kernel->command,
++ gcmNAME_TO_PTR(Interface->u.Detach.context)));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_CONTEXT,
++ gcmINT2PTR(Interface->u.Detach.context)));
++
++ gcmRELEASE_NAME(Interface->u.Detach.context);
++ break;
++
++ case gcvHAL_COMPOSE:
++ Interface->u.Compose.physical = gcmPTR_TO_UINT64(gcmNAME_TO_PTR(Interface->u.Compose.physical));
++ /* Start composition. */
++ gcmkONERROR(
++ gckEVENT_Compose(Kernel->eventObj,
++ &Interface->u.Compose));
++ break;
++
++ case gcvHAL_SET_TIMEOUT:
++ /* set timeOut value from user */
++ gckKERNEL_SetTimeOut(Kernel, Interface->u.SetTimeOut.timeOut);
++ break;
++
++#if gcdFRAME_DB
++ case gcvHAL_GET_FRAME_INFO:
++ gcmkONERROR(gckHARDWARE_GetFrameInfo(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.GetFrameInfo.frameInfo)));
++ break;
++#endif
++
++ case gcvHAL_GET_SHARED_INFO:
++ if (Interface->u.GetSharedInfo.data == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++ else
++ {
++ gctUINT32 pid = Interface->u.GetSharedInfo.pid;
++ gctUINT32 dataId = Interface->u.GetSharedInfo.dataId;
++ gctSIZE_T bytes = Interface->u.GetSharedInfo.bytes;
++ gctPOINTER data = Interface->u.GetSharedInfo.data;
++ gcsDATABASE_RECORD record;
++
++ /* Find record. */
++ gcmkONERROR(
++ gckKERNEL_FindProcessDB(Kernel,
++ pid,
++ 0,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId),
++ &record));
++
++ /* Check memory size. */
++ if (bytes < record.bytes)
++ {
++ /* Insufficient memory to hold shared data. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Copy to user. */
++ status = gckOS_CopyToUserData(Kernel->os,
++ record.physical,
++ data,
++ record.bytes);
++
++ /*
++ * Remove from process db.
++ * Every time when shared info is taken, the record is erased in
++ * kernel side.
++ */
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ pid,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId)));
++ /* Free existed data. */
++ gcmkVERIFY_OK(
++ gckOS_FreeMemory(Kernel->os, record.physical));
++ }
++ break;
++
++ case gcvHAL_SET_SHARED_INFO:
++ {
++ gctUINT32 dataId = Interface->u.SetSharedInfo.dataId;
++ gctPOINTER data = Interface->u.SetSharedInfo.data;
++ gctUINT32 bytes = Interface->u.SetSharedInfo.bytes;
++ gctPOINTER memory = gcvNULL;
++ gcsDATABASE_RECORD record;
++
++ if (gcmIS_SUCCESS(gckKERNEL_FindProcessDB(Kernel,
++ processID,
++ 0,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId),
++ &record)))
++ {
++ /* Find a record with the same id. */
++ if (bytes != record.bytes)
++ {
++ /* Remove from process db. */
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId)));
++
++ /* Free existed data. */
++ gcmkVERIFY_OK(
++ gckOS_FreeMemory(Kernel->os, record.physical));
++ }
++ else
++ {
++ /* Re-use allocated memory. */
++ memory = record.physical;
++ }
++ }
++
++ if ((data == gcvNULL) || (bytes == 0))
++ {
++ /* Nothing to record. */
++ break;
++ }
++
++ if (bytes > 1024)
++ {
++ /* Limite data size. */
++ gcmkONERROR(gcvSTATUS_TOO_COMPLEX);
++ }
++
++ if (memory == gcvNULL)
++ {
++ /* Allocate memory for holding shared data. */
++ gcmkONERROR(
++ gckOS_AllocateMemory(Kernel->os, bytes, &memory));
++
++ /* Add to process db. */
++ status = gckKERNEL_AddProcessDB(Kernel,
++ processID,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId),
++ memory,
++ bytes);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Failed to add process db. Free allocated memory. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Kernel->os, memory));
++ break;
++ }
++ }
++
++ /* Copy shared data to kernel memory. */
++ gcmkONERROR(
++ gckOS_CopyFromUserData(Kernel->os,
++ memory,
++ data,
++ bytes));
++ }
++ break;
++
++ case gcvHAL_SET_FSCALE_VALUE:
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ status = gckHARDWARE_SetFscaleValue(Kernel->hardware,
++ Interface->u.SetFscaleValue.value);
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++ case gcvHAL_GET_FSCALE_VALUE:
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ status = gckHARDWARE_GetFscaleValue(Kernel->hardware,
++ &Interface->u.GetFscaleValue.value,
++ &Interface->u.GetFscaleValue.minValue,
++ &Interface->u.GetFscaleValue.maxValue);
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_QUERY_RESET_TIME_STAMP:
++#if gcdENABLE_RECOVERY
++ Interface->u.QueryResetTimeStamp.timeStamp = Kernel->resetTimeStamp;
++#else
++ Interface->u.QueryResetTimeStamp.timeStamp = 0;
++#endif
++ break;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvHAL_SYNC_POINT:
++ {
++ gctSYNC_POINT syncPoint;
++
++ switch (Interface->u.SyncPoint.command)
++ {
++ case gcvSYNC_POINT_CREATE:
++ gcmkONERROR(gckOS_CreateSyncPoint(Kernel->os, &syncPoint));
++
++ Interface->u.SyncPoint.syncPoint = gcmPTR_TO_UINT64(syncPoint);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SYNC_POINT,
++ syncPoint,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSYNC_POINT_DESTROY:
++ syncPoint = gcmUINT64_TO_PTR(Interface->u.SyncPoint.syncPoint);
++
++ gcmkONERROR(gckOS_DestroySyncPoint(Kernel->os, syncPoint));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_SYNC_POINT,
++ syncPoint));
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ break;
++ }
++ }
++ break;
++
++ case gcvHAL_CREATE_NATIVE_FENCE:
++ {
++ gctINT fenceFD;
++ gctSYNC_POINT syncPoint =
++ gcmUINT64_TO_PTR(Interface->u.CreateNativeFence.syncPoint);
++
++ gcmkONERROR(
++ gckOS_CreateNativeFence(Kernel->os,
++ Kernel->timeline,
++ syncPoint,
++ &fenceFD));
++
++ Interface->u.CreateNativeFence.fenceFD = fenceFD;
++ }
++ break;
++#endif
++
++ default:
++ /* Invalid command. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++OnError:
++ /* Save status. */
++ Interface->status = status;
++
++ if (gcmIS_ERROR(status))
++ {
++ if (locked)
++ {
++ /* Roll back the lock. */
++ gcmkVERIFY_OK(
++ gckVIDMEM_Unlock(Kernel,
++ gcmUINT64_TO_PTR(Interface->u.LockVideoMemory.node),
++ gcvSURF_TYPE_UNKNOWN,
++ &asynchronous));
++
++ if (gcvTRUE == asynchronous)
++ {
++ /* Bottom Half */
++ gcmkVERIFY_OK(
++ gckVIDMEM_Unlock(Kernel,
++ gcmUINT64_TO_PTR(Interface->u.LockVideoMemory.node),
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL));
++ }
++ }
++ }
++
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gckOS_ReleaseMutex(Kernel->os, Kernel->debugMutex);
++#endif
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AttachProcess
++**
++** Attach or detach a process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL Attach
++** gcvTRUE if a new process gets attached or gcFALSE when a process
++** gets detatched.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AttachProcess(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach
++ )
++{
++ gceSTATUS status;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Kernel=0x%x Attach=%d", Kernel, Attach);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Get current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(gckKERNEL_AttachProcessEx(Kernel, Attach, processID));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AttachProcessEx
++**
++** Attach or detach a process with the given PID. Can be paired with gckKERNEL_AttachProcess
++** provided the programmer is aware of the consequences.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL Attach
++** gcvTRUE if a new process gets attached or gcFALSE when a process
++** gets detatched.
++**
++** gctUINT32 PID
++** PID of the process to attach or detach.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AttachProcessEx(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach,
++ IN gctUINT32 PID
++ )
++{
++ gceSTATUS status;
++ gctINT32 old;
++
++ gcmkHEADER_ARG("Kernel=0x%x Attach=%d PID=%d", Kernel, Attach, PID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (Attach)
++ {
++ /* Increment the number of clients attached. */
++ gcmkONERROR(
++ gckOS_AtomIncrement(Kernel->os, Kernel->atomClients, &old));
++
++ if (old == 0)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ gcmkONERROR(gckOS_Broadcast(Kernel->os,
++ Kernel->hardware,
++ gcvBROADCAST_FIRST_PROCESS));
++ }
++ }
++
++ if (Kernel->dbCreated)
++ {
++ /* Create the process database. */
++ gcmkONERROR(gckKERNEL_CreateProcessDB(Kernel, PID));
++ }
++ }
++ else
++ {
++ if (Kernel->dbCreated)
++ {
++ /* Clean up the process database. */
++ gcmkONERROR(gckKERNEL_DestroyProcessDB(Kernel, PID));
++
++ /* Save the last know process ID. */
++ Kernel->db->lastProcessID = PID;
++ }
++
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ status = gckEVENT_Submit(Kernel->eventObj, gcvTRUE, gcvFALSE);
++
++ if (status == gcvSTATUS_INTERRUPTED && Kernel->eventObj->submitTimer)
++ {
++ gcmkONERROR(gckOS_StartTimer(Kernel->os,
++ Kernel->eventObj->submitTimer,
++ 1));
++ }
++ else
++ {
++ gcmkONERROR(status);
++ }
++ }
++
++ /* Decrement the number of clients attached. */
++ gcmkONERROR(
++ gckOS_AtomDecrement(Kernel->os, Kernel->atomClients, &old));
++
++ if (old == 1)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ /* Last client detached, switch to SUSPEND power state. */
++ gcmkONERROR(gckOS_Broadcast(Kernel->os,
++ Kernel->hardware,
++ gcvBROADCAST_LAST_PROCESS));
++ }
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(~0U);
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++gceSTATUS
++gckKERNEL_MapLogicalToPhysical(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN OUT gctPOINTER * Data
++ )
++{
++ gceSTATUS status;
++ static gctBOOL baseAddressValid = gcvFALSE;
++ static gctUINT32 baseAddress;
++ gctBOOL needBase;
++ gcskLOGICAL_CACHE_PTR slot;
++
++ gcmkHEADER_ARG("Kernel=0x%x Cache=0x%x *Data=0x%x",
++ Kernel, Cache, gcmOPT_POINTER(Data));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (!baseAddressValid)
++ {
++ /* Get base address. */
++ gcmkONERROR(gckHARDWARE_GetBaseAddress(Kernel->hardware, &baseAddress));
++
++ baseAddressValid = gcvTRUE;
++ }
++
++ /* Does this state load need a base address? */
++ gcmkONERROR(gckHARDWARE_NeedBaseAddress(Kernel->hardware,
++ ((gctUINT32_PTR) Data)[-1],
++ &needBase));
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LRU
++ {
++ gcskLOGICAL_CACHE_PTR next;
++ gctINT i;
++
++ /* Walk all used cache slots. */
++ for (i = 1, slot = Cache->cache[0].next, next = gcvNULL;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->next
++ )
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++ }
++
++ /* See if we had a miss. */
++ if (next == gcvNULL)
++ {
++ /* Use the tail of the cache. */
++ slot = Cache->cache[0].prev;
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++
++ /* Move slot to head of list. */
++ if (slot != Cache->cache[0].next)
++ {
++ /* Unlink. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Move to head of chain. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ {
++ gctINT i;
++ gcskLOGICAL_CACHE_PTR next = gcvNULL;
++ gcskLOGICAL_CACHE_PTR oldestSlot = gcvNULL;
++ slot = gcvNULL;
++
++ if (Cache->cacheIndex != gcvNULL)
++ {
++ /* Walk the cache forwards. */
++ for (i = 1, slot = Cache->cacheIndex;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->next)
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++
++ /* Determine age of this slot. */
++ if ((oldestSlot == gcvNULL)
++ || (oldestSlot->stamp > slot->stamp)
++ )
++ {
++ oldestSlot = slot;
++ }
++ }
++
++ if (next == gcvNULL)
++ {
++ /* Walk the cache backwards. */
++ for (slot = Cache->cacheIndex->prev;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->prev)
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++
++ /* Determine age of this slot. */
++ if ((oldestSlot == gcvNULL)
++ || (oldestSlot->stamp > slot->stamp)
++ )
++ {
++ oldestSlot = slot;
++ }
++ }
++ }
++ }
++
++ /* See if we had a miss. */
++ if (next == gcvNULL)
++ {
++ if (Cache->cacheFree != 0)
++ {
++ slot = &Cache->cache[Cache->cacheFree];
++ gcmkASSERT(slot->logical == gcvNULL);
++
++ ++ Cache->cacheFree;
++ if (Cache->cacheFree >= gcmCOUNTOF(Cache->cache))
++ {
++ Cache->cacheFree = 0;
++ }
++ }
++ else
++ {
++ /* Use the oldest cache slot. */
++ gcmkASSERT(oldestSlot != gcvNULL);
++ slot = oldestSlot;
++
++ /* Unlink from the chain. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append to the end. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++
++ /* Save time stamp. */
++ slot->stamp = ++ Cache->cacheStamp;
++
++ /* Save current slot for next lookup. */
++ Cache->cacheIndex = slot;
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ {
++ gctINT i;
++ gctUINT32 data = gcmPTR2INT(*Data);
++ gctUINT32 key, index;
++ gcskLOGICAL_CACHE_PTR hash;
++
++ /* Generate a hash key. */
++ key = (data >> 24) + (data >> 16) + (data >> 8) + data;
++ index = key % gcmCOUNTOF(Cache->hash);
++
++ /* Get the hash entry. */
++ hash = &Cache->hash[index];
++
++ for (slot = hash->nextHash, i = 0;
++ (slot != gcvNULL) && (i < gcdSECURE_CACHE_SLOTS);
++ slot = slot->nextHash, ++i
++ )
++ {
++ if (slot->logical == (*Data))
++ {
++ break;
++ }
++ }
++
++ if (slot == gcvNULL)
++ {
++ /* Grab from the tail of the cache. */
++ slot = Cache->cache[0].prev;
++
++ /* Unlink slot from any hash table it is part of. */
++ if (slot->prevHash != gcvNULL)
++ {
++ slot->prevHash->nextHash = slot->nextHash;
++ }
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot->prevHash;
++ }
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++
++ if (hash->nextHash != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "Hash Collision: logical=0x%x key=0x%08x",
++ *Data, key);
++ }
++
++ /* Insert the slot at the head of the hash list. */
++ slot->nextHash = hash->nextHash;
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot;
++ }
++ slot->prevHash = hash;
++ hash->nextHash = slot;
++ }
++
++ /* Move slot to head of list. */
++ if (slot != Cache->cache[0].next)
++ {
++ /* Unlink. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Move to head of chain. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_TABLE
++ {
++ gctUINT32 index = (gcmPTR2INT(*Data) % gcdSECURE_CACHE_SLOTS) + 1;
++
++ /* Get cache slot. */
++ slot = &Cache->cache[index];
++
++ /* Check for cache miss. */
++ if (slot->logical != *Data)
++ {
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++ }
++#endif
++
++ /* Return DMA address. */
++ *Data = gcmINT2PTR(slot->dma + (needBase ? baseAddress : 0));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_FlushTranslationCache(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gctINT i;
++ gcskLOGICAL_CACHE_PTR slot;
++ gctUINT8_PTR ptr;
++
++ gcmkHEADER_ARG("Kernel=0x%x Cache=0x%x Logical=0x%x Bytes=%lu",
++ Kernel, Cache, Logical, Bytes);
++
++ /* Do we need to flush the entire cache? */
++ if (Logical == gcvNULL)
++ {
++ /* Clear all cache slots. */
++ for (i = 1; i <= gcdSECURE_CACHE_SLOTS; ++i)
++ {
++ Cache->cache[i].logical = gcvNULL;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ Cache->cache[i].nextHash = gcvNULL;
++ Cache->cache[i].prevHash = gcvNULL;
++#endif
++}
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Zero the hash table. */
++ for (i = 0; i < gcmCOUNTOF(Cache->hash); ++i)
++ {
++ Cache->hash[i].nextHash = gcvNULL;
++ }
++#endif
++
++ /* Reset the cache functionality. */
++ Cache->cacheIndex = gcvNULL;
++ Cache->cacheFree = 1;
++ Cache->cacheStamp = 0;
++ }
++
++ else
++ {
++ gctUINT8_PTR low = (gctUINT8_PTR) Logical;
++ gctUINT8_PTR high = low + Bytes;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LRU
++ gcskLOGICAL_CACHE_PTR next;
++
++ /* Walk all used cache slots. */
++ for (i = 1, slot = Cache->cache[0].next;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = next
++ )
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Unlink slot. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append slot to tail of cache. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ gcskLOGICAL_CACHE_PTR next;
++
++ for (i = 1, slot = Cache->cache[0].next;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = next)
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Test if this slot is the current slot. */
++ if (slot == Cache->cacheIndex)
++ {
++ /* Move to next or previous slot. */
++ Cache->cacheIndex = (slot->next->logical != gcvNULL)
++ ? slot->next
++ : (slot->prev->logical != gcvNULL)
++ ? slot->prev
++ : gcvNULL;
++ }
++
++ /* Unlink slot from cache. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Insert slot to head of cache. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ slot->stamp = 0;
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ gctINT j;
++ gcskLOGICAL_CACHE_PTR hash, next;
++
++ /* Walk all hash tables. */
++ for (i = 0, hash = Cache->hash;
++ i < gcmCOUNTOF(Cache->hash);
++ ++i, ++hash)
++ {
++ /* Walk all slots in the hash. */
++ for (j = 0, slot = hash->nextHash;
++ (j < gcdSECURE_CACHE_SLOTS) && (slot != gcvNULL);
++ ++j, slot = next)
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Unlink slot from hash table. */
++ if (slot->prevHash == hash)
++ {
++ hash->nextHash = slot->nextHash;
++ }
++ else
++ {
++ slot->prevHash->nextHash = slot->nextHash;
++ }
++
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot->prevHash;
++ }
++
++ /* Unlink slot from cache. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append slot to tail of cache. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ slot->prevHash = gcvNULL;
++ slot->nextHash = gcvNULL;
++ }
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_TABLE
++ gctUINT32 index;
++
++ /* Loop while inside the range. */
++ for (i = 1; (low < high) && (i <= gcdSECURE_CACHE_SLOTS); ++i)
++ {
++ /* Get index into cache for this range. */
++ index = (gcmPTR2INT(low) % gcdSECURE_CACHE_SLOTS) + 1;
++ slot = &Cache->cache[index];
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Remove entry from cache. */
++ slot->logical = gcvNULL;
++ }
++
++ /* Next block. */
++ low += gcdSECURE_CACHE_SLOTS;
++ }
++#endif
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckKERNEL_Recovery
++**
++** Try to recover the GPU from a fatal error.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Recovery(
++ IN gckKERNEL Kernel
++ )
++{
++#if gcdENABLE_RECOVERY
++#define gcdEVENT_MASK 0x3FFFFFFF
++ gceSTATUS status;
++ gckEVENT eventObj;
++ gckHARDWARE hardware;
++#if gcdSECURE_USER
++ gctUINT32 processID;
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ gctUINT32 oldValue;
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Grab gckEVENT object. */
++ eventObj = Kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObj, gcvOBJ_EVENT);
++
++ /* Grab gckHARDWARE object. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++#if gcdSECURE_USER
++ /* Flush the secure mapping cache. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Kernel, processID, &cache));
++ gcmkONERROR(gckKERNEL_FlushTranslationCache(Kernel, cache, gcvNULL, 0));
++#endif
++
++ gcmkONERROR(
++ gckOS_AtomicExchange(Kernel->os, Kernel->resetAtom, 1, &oldValue));
++
++ if (oldValue)
++ {
++ /* Some one else will recovery GPU. */
++ return gcvSTATUS_OK;
++ }
++
++ gcmkPRINT("[galcore]: GPU[%d] hang, automatic recovery.", Kernel->core);
++
++ /* Start a timer to clear reset flag, before timer is expired,
++ ** other recovery request is ignored. */
++ gcmkVERIFY_OK(
++ gckOS_StartTimer(Kernel->os,
++ Kernel->resetFlagClearTimer,
++ gcdGPU_TIMEOUT - 500));
++
++
++ /* Try issuing a soft reset for the GPU. */
++ status = gckHARDWARE_Reset(hardware);
++ if (status == gcvSTATUS_NOT_SUPPORTED)
++ {
++ /* Switch to OFF power. The next submit should return the GPU to ON
++ ** state. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(hardware,
++ gcvPOWER_OFF_RECOVERY));
++ }
++ else
++ {
++ /* Bail out on reset error. */
++ gcmkONERROR(status);
++ }
++
++ /* Handle all outstanding events now. */
++#if gcdSMP
++ gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending, gcdEVENT_MASK));
++#else
++ eventObj->pending = gcdEVENT_MASK;
++#endif
++ gcmkONERROR(gckEVENT_Notify(eventObj, 1));
++
++ /* Again in case more events got submitted. */
++#if gcdSMP
++ gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending, gcdEVENT_MASK));
++#else
++ eventObj->pending = gcdEVENT_MASK;
++#endif
++ gcmkONERROR(gckEVENT_Notify(eventObj, 2));
++
++ Kernel->resetTimeStamp++;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_OpenUserData
++**
++** Get access to the user data.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL NeedCopy
++** The flag indicating whether or not the data should be copied.
++**
++** gctPOINTER StaticStorage
++** Pointer to the kernel storage where the data is to be copied if
++** NeedCopy is gcvTRUE.
++**
++** gctPOINTER UserPointer
++** User pointer to the data.
++**
++** gctSIZE_T Size
++** Size of the data.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to the kernel pointer that will be pointing to the data.
++*/
++gceSTATUS
++gckKERNEL_OpenUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctPOINTER StaticStorage,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG(
++ "Kernel=0x%08X NeedCopy=%d StaticStorage=0x%08X "
++ "UserPointer=0x%08X Size=%lu KernelPointer=0x%08X",
++ Kernel, NeedCopy, StaticStorage, UserPointer, Size, KernelPointer
++ );
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(!NeedCopy || (StaticStorage != gcvNULL));
++ gcmkVERIFY_ARGUMENT(UserPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ if (NeedCopy)
++ {
++ /* Copy the user data to the static storage. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Kernel->os, StaticStorage, UserPointer, Size
++ ));
++
++ /* Set the kernel pointer. */
++ * KernelPointer = StaticStorage;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Map the user pointer. */
++ gcmkONERROR(gckOS_MapUserPointer(
++ Kernel->os, UserPointer, Size, &pointer
++ ));
++
++ /* Set the kernel pointer. */
++ * KernelPointer = pointer;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_CloseUserData
++**
++** Release resources associated with the user data connection opened by
++** gckKERNEL_OpenUserData.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL NeedCopy
++** The flag indicating whether or not the data should be copied.
++**
++** gctBOOL FlushData
++** If gcvTRUE, the data is written back to the user.
++**
++** gctPOINTER UserPointer
++** User pointer to the data.
++**
++** gctSIZE_T Size
++** Size of the data.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Kernel pointer to the data.
++*/
++gceSTATUS
++gckKERNEL_CloseUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctBOOL FlushData,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG(
++ "Kernel=0x%08X NeedCopy=%d FlushData=%d "
++ "UserPointer=0x%08X Size=%lu KernelPointer=0x%08X",
++ Kernel, NeedCopy, FlushData, UserPointer, Size, KernelPointer
++ );
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(UserPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Get a shortcut to the kernel pointer. */
++ pointer = * KernelPointer;
++
++ if (pointer != gcvNULL)
++ {
++ if (NeedCopy)
++ {
++ if (FlushData)
++ {
++ gcmkONERROR(gckOS_CopyToUserData(
++ Kernel->os, * KernelPointer, UserPointer, Size
++ ));
++ }
++ }
++ else
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Kernel->os,
++ UserPointer,
++ Size,
++ * KernelPointer
++ ));
++ }
++
++ /* Reset the kernel pointer. */
++ * KernelPointer = gcvNULL;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++void
++gckKERNEL_SetTimeOut(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 timeOut
++ )
++{
++ gcmkHEADER_ARG("Kernel=0x%x timeOut=%d", Kernel, timeOut);
++#if gcdGPU_TIMEOUT
++ Kernel->timeOut = timeOut;
++#endif
++ gcmkFOOTER_NO();
++}
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++gceSTATUS
++gckKERNEL_AllocateVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckOS os = Kernel->os;
++ gceSTATUS status;
++ gctPOINTER logical;
++ gctSIZE_T pageCount;
++ gctSIZE_T bytes = *Bytes;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gcmkONERROR(gckOS_Allocate(os,
++ sizeof(gckVIRTUAL_COMMAND_BUFFER),
++ (gctPOINTER)&buffer));
++
++ gcmkONERROR(gckOS_ZeroMemory(buffer, sizeof(gckVIRTUAL_COMMAND_BUFFER)));
++
++ gcmkONERROR(gckOS_AllocatePagedMemoryEx(os,
++ gcvFALSE,
++ bytes,
++ &buffer->physical));
++
++ if (InUserSpace)
++ {
++ gcmkONERROR(gckOS_LockPages(os,
++ buffer->physical,
++ bytes,
++ gcvFALSE,
++ &logical,
++ &pageCount));
++
++ *Logical =
++ buffer->userLogical = logical;
++ }
++ else
++ {
++ gcmkONERROR(
++ gckOS_CreateKernelVirtualMapping(buffer->physical,
++ &pageCount,
++ &logical));
++ *Logical =
++ buffer->kernelLogical = logical;
++ }
++
++ buffer->pageCount = pageCount;
++ buffer->kernel = Kernel;
++
++ gcmkONERROR(gckOS_GetProcessID(&buffer->pid));
++
++ gcmkONERROR(gckMMU_AllocatePages(Kernel->mmu,
++ pageCount,
++ &buffer->pageTable,
++ &buffer->gpuAddress));
++
++ gcmkONERROR(gckOS_MapPagesEx(os,
++ Kernel->core,
++ buffer->physical,
++ pageCount,
++ buffer->pageTable));
++
++ gcmkONERROR(gckMMU_Flush(Kernel->mmu));
++
++ *Physical = buffer;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "gpuAddress = %x pageCount = %d kernelLogical = %x userLogical=%x",
++ buffer->gpuAddress, buffer->pageCount,
++ buffer->kernelLogical, buffer->userLogical);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ if (Kernel->virtualBufferHead == gcvNULL)
++ {
++ Kernel->virtualBufferHead =
++ Kernel->virtualBufferTail = buffer;
++ }
++ else
++ {
++ buffer->prev = Kernel->virtualBufferTail;
++ Kernel->virtualBufferTail->next = buffer;
++ Kernel->virtualBufferTail = buffer;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Kernel->virtualBufferLock));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (buffer->gpuAddress)
++ {
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(Kernel->mmu, buffer->pageTable, buffer->pageCount));
++ }
++
++ if (buffer->userLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_UnlockPages(os, buffer->physical, bytes, buffer->userLogical));
++ }
++
++ if (buffer->kernelLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DestroyKernelVirtualMapping(buffer->kernelLogical));
++ }
++
++ if (buffer->physical)
++ {
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(os, buffer->physical, bytes));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(os, buffer));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_DestroyVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ )
++{
++ gckOS os;
++ gckKERNEL kernel;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)Physical;
++
++ gcmkHEADER();
++ gcmkVERIFY_ARGUMENT(buffer != gcvNULL);
++
++ kernel = buffer->kernel;
++ os = kernel->os;
++
++ if (buffer->userLogical)
++ {
++ gcmkVERIFY_OK(gckOS_UnlockPages(os, buffer->physical, Bytes, Logical));
++ }
++ else
++ {
++ gcmkVERIFY_OK(gckOS_DestroyKernelVirtualMapping(Logical));
++ }
++
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(kernel->mmu, buffer->pageTable, buffer->pageCount));
++
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(os, buffer->physical, Bytes));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, kernel->virtualBufferLock, gcvINFINITE));
++
++ if (buffer == kernel->virtualBufferHead)
++ {
++ if ((kernel->virtualBufferHead = buffer->next) == gcvNULL)
++ {
++ kernel->virtualBufferTail = gcvNULL;
++ }
++ }
++ else
++ {
++ buffer->prev->next = buffer->next;
++
++ if (buffer == kernel->virtualBufferTail)
++ {
++ kernel->virtualBufferTail = buffer->prev;
++ }
++ else
++ {
++ buffer->next->prev = buffer->prev;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, kernel->virtualBufferLock));
++
++ gcmkVERIFY_OK(gckOS_Free(os, buffer));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckKERNEL_GetGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctPOINTER start;
++ gctINT pid;
++
++ gcmkHEADER_ARG("Logical = %x", Logical);
++
++ gckOS_GetProcessID(&pid);
++
++ status = gcvSTATUS_INVALID_ADDRESS;
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ /* Walk all command buffer. */
++ for (buffer = Kernel->virtualBufferHead; buffer != gcvNULL; buffer = buffer->next)
++ {
++ if (buffer->userLogical)
++ {
++ start = buffer->userLogical;
++ }
++ else
++ {
++ start = buffer->kernelLogical;
++ }
++
++ if (Logical >= start
++ && (Logical < (start + buffer->pageCount * 4096))
++ && pid == buffer->pid
++ )
++ {
++ * Address = buffer->gpuAddress + (Logical - start);
++ status = gcvSTATUS_OK;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->virtualBufferLock));
++
++ gcmkFOOTER_NO();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QueryGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GpuAddress,
++ OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer
++ )
++{
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctUINT32 start;
++ gceSTATUS status = gcvSTATUS_NOT_SUPPORTED;
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ /* Walk all command buffers. */
++ for (buffer = Kernel->virtualBufferHead; buffer != gcvNULL; buffer = buffer->next)
++ {
++ start = (gctUINT32)buffer->gpuAddress;
++
++ if (GpuAddress >= start && GpuAddress < (start + buffer->pageCount * 4096))
++ {
++ /* Find a range matched. */
++ *Buffer = buffer;
++ status = gcvSTATUS_OK;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->virtualBufferLock));
++
++ return status;
++}
++#endif
++
++#if gcdLINK_QUEUE_SIZE
++static void
++gckLINKQUEUE_Dequeue(
++ IN gckLINKQUEUE LinkQueue
++ )
++{
++ gcmkASSERT(LinkQueue->count == gcdLINK_QUEUE_SIZE);
++
++ LinkQueue->count--;
++ LinkQueue->front = (LinkQueue->front + 1) % gcdLINK_QUEUE_SIZE;
++}
++
++void
++gckLINKQUEUE_Enqueue(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 start,
++ IN gctUINT32 end
++ )
++{
++ if (LinkQueue->count == gcdLINK_QUEUE_SIZE)
++ {
++ gckLINKQUEUE_Dequeue(LinkQueue);
++ }
++
++ gcmkASSERT(LinkQueue->count < gcdLINK_QUEUE_SIZE);
++
++ LinkQueue->count++;
++
++ LinkQueue->data[LinkQueue->rear].start = start;
++ LinkQueue->data[LinkQueue->rear].end = end;
++
++ gcmkVERIFY_OK(
++ gckOS_GetProcessID(&LinkQueue->data[LinkQueue->rear].pid));
++
++ LinkQueue->rear = (LinkQueue->rear + 1) % gcdLINK_QUEUE_SIZE;
++}
++
++void
++gckLINKQUEUE_GetData(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 Index,
++ OUT gckLINKDATA * Data
++ )
++{
++ gcmkASSERT(Index >= 0 && Index < gcdLINK_QUEUE_SIZE);
++
++ *Data = &LinkQueue->data[(Index + LinkQueue->front) % gcdLINK_QUEUE_SIZE];
++}
++#endif
++
++/******************************************************************************\
++*************************** Pointer - ID translation ***************************
++\******************************************************************************/
++#define gcdID_TABLE_LENGTH 1024
++typedef struct _gcsINTEGERDB * gckINTEGERDB;
++typedef struct _gcsINTEGERDB
++{
++ gckOS os;
++ gctPOINTER* table;
++ gctPOINTER mutex;
++ gctUINT32 tableLen;
++ gctUINT32 currentID;
++ gctUINT32 unused;
++}
++gcsINTEGERDB;
++
++gceSTATUS
++gckKERNEL_CreateIntegerDatabase(
++ IN gckKERNEL Kernel,
++ OUT gctPOINTER * Database
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%08X Datbase=0x%08X", Kernel, Database);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Database != gcvNULL);
++
++ /* Allocate a database. */
++ gcmkONERROR(gckOS_Allocate(
++ Kernel->os, gcmSIZEOF(gcsINTEGERDB), (gctPOINTER *)&database));
++
++ gckOS_ZeroMemory(database, gcmSIZEOF(gcsINTEGERDB));
++
++ /* Allocate a pointer table. */
++ gcmkONERROR(gckOS_Allocate(
++ Kernel->os, gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH, (gctPOINTER *)&database->table));
++
++ gckOS_ZeroMemory(database->table, gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH);
++
++ /* Allocate a database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Kernel->os, &database->mutex));
++
++ /* Initialize. */
++ database->currentID = 0;
++ database->unused = gcdID_TABLE_LENGTH;
++ database->os = Kernel->os;
++ database->tableLen = gcdID_TABLE_LENGTH;
++
++ *Database = database;
++
++ gcmkFOOTER_ARG("*Database=0x%08X", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Rollback. */
++ if (database)
++ {
++ if (database->table)
++ {
++ gcmkOS_SAFE_FREE(Kernel->os, database->table);
++ }
++
++ gcmkOS_SAFE_FREE(Kernel->os, database);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_DestroyIntegerDatabase(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Database
++ )
++{
++ gckINTEGERDB database = Database;
++
++ gcmkHEADER_ARG("Kernel=0x%08X Datbase=0x%08X", Kernel, Database);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Database != gcvNULL);
++
++ /* Destroy pointer table. */
++ gcmkOS_SAFE_FREE(Kernel->os, database->table);
++
++ /* Destroy database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, database->mutex));
++
++ /* Destroy database. */
++ gcmkOS_SAFE_FREE(Kernel->os, database);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckKERNEL_AllocateIntegerId(
++ IN gctPOINTER Database,
++ IN gctPOINTER Pointer,
++ OUT gctUINT32 * Id
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gctUINT32 i, unused, currentID, tableLen;
++ gctPOINTER * table;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Pointer=0x%08X", Database, Pointer);
++
++ gcmkVERIFY_ARGUMENT(Id != gcvNULL);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (database->unused < 1)
++ {
++ /* Extend table. */
++ gcmkONERROR(
++ gckOS_Allocate(os,
++ gcmSIZEOF(gctPOINTER) * (database->tableLen + gcdID_TABLE_LENGTH),
++ (gctPOINTER *)&table));
++
++ gckOS_ZeroMemory(table + database->tableLen,
++ gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH);
++
++ /* Copy data from old table. */
++ gckOS_MemCopy(table,
++ database->table,
++ database->tableLen * gcmSIZEOF(gctPOINTER));
++
++ gcmkOS_SAFE_FREE(os, database->table);
++
++ /* Update databse with new allocated table. */
++ database->table = table;
++ database->currentID = database->tableLen;
++ database->tableLen += gcdID_TABLE_LENGTH;
++ database->unused += gcdID_TABLE_LENGTH;
++ }
++
++ table = database->table;
++ currentID = database->currentID;
++ tableLen = database->tableLen;
++ unused = database->unused;
++
++ /* Connect id with pointer. */
++ table[currentID] = Pointer;
++
++ *Id = currentID + 1;
++
++ /* Update the currentID. */
++ if (--unused > 0)
++ {
++ for (i = 0; i < tableLen; i++)
++ {
++ if (++currentID >= tableLen)
++ {
++ /* Wrap to the begin. */
++ currentID = 0;
++ }
++
++ if (table[currentID] == gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++
++ database->table = table;
++ database->currentID = currentID;
++ database->tableLen = tableLen;
++ database->unused = unused;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_ARG("*Id=%d", *Id);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_FreeIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Id=%d", Database, Id);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (!(Id > 0 && Id <= database->tableLen))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ Id -= 1;
++
++ database->table[Id] = gcvNULL;
++
++ if (database->unused++ == 0)
++ {
++ database->currentID = Id;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QueryIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * Pointer
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gctPOINTER pointer;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Id=%d", Database, Id);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (!(Id > 0 && Id <= database->tableLen))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ Id -= 1;
++
++ pointer = database->table[Id];
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ if (pointer)
++ {
++ *Pointer = pointer;
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ gcmkFOOTER_ARG("*Pointer=0x%08X", *Pointer);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++gctUINT32
++gckKERNEL_AllocateNameFromPointer(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Pointer
++ )
++{
++ gceSTATUS status;
++ gctUINT32 name;
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Pointer=0x%X", Kernel, Pointer);
++
++ gcmkONERROR(
++ gckKERNEL_AllocateIntegerId(database, Pointer, &name));
++
++ gcmkFOOTER_ARG("name=%d", name);
++ return name;
++
++OnError:
++ gcmkFOOTER();
++ return 0;
++}
++
++gctPOINTER
++gckKERNEL_QueryPointerFromName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ )
++{
++ gceSTATUS status;
++ gctPOINTER pointer = gcvNULL;
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Name=%d", Kernel, Name);
++
++ /* Lookup in database to get pointer. */
++ gcmkONERROR(gckKERNEL_QueryIntegerId(database, Name, &pointer));
++
++ gcmkFOOTER_ARG("pointer=0x%X", pointer);
++ return pointer;
++
++OnError:
++ gcmkFOOTER();
++ return gcvNULL;
++}
++
++gceSTATUS
++gckKERNEL_DeleteName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ )
++{
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Name=0x%X", Kernel, Name);
++
++ /* Free name if exists. */
++ gcmkVERIFY_OK(gckKERNEL_FreeIntegerId(database, Name));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++/*******************************************************************************
++***** Test Code ****************************************************************
++*******************************************************************************/
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command.c 2015-05-01 14:57:59.523427001 -0500
+@@ -0,0 +1,3042 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include "gc_hal_kernel_context.h"
++
++#ifdef __QNXNTO__
++#include <sys/slog.h>
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** _NewQueue
++**
++** Allocate a new command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** OUTPUT:
++**
++** gckCOMMAND Command
++** gckCOMMAND object has been updated with a new command queue.
++*/
++static gceSTATUS
++_NewQueue(
++ IN OUT gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctINT currentIndex, newIndex;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Switch to the next command buffer. */
++ currentIndex = Command->index;
++ newIndex = (currentIndex + 1) % gcdCOMMAND_QUEUES;
++
++ /* Wait for availability. */
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.waitsignal]");
++#endif
++
++ gcmkONERROR(gckOS_WaitSignal(
++ Command->os,
++ Command->queues[newIndex].signal,
++ gcvINFINITE
++ ));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ if (newIndex < currentIndex)
++ {
++ Command->wrapCount += 1;
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 2 * 4,
++ "%s(%d): queue array wrapped around.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 3 * 4,
++ "%s(%d): total queue wrap arounds %d.\n",
++ __FUNCTION__, __LINE__, Command->wrapCount
++ );
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 3 * 4,
++ "%s(%d): switched to queue %d.\n",
++ __FUNCTION__, __LINE__, newIndex
++ );
++#endif
++
++ /* Update gckCOMMAND object with new command queue. */
++ Command->index = newIndex;
++ Command->newQueue = gcvTRUE;
++ Command->logical = Command->queues[newIndex].logical;
++ Command->offset = 0;
++
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(
++ Command->os,
++ Command->logical,
++ (gctUINT32 *) &Command->physical
++ ));
++
++ if (currentIndex != -1)
++ {
++ /* Mark the command queue as available. */
++ gcmkONERROR(gckEVENT_Signal(
++ Command->kernel->eventObj,
++ Command->queues[currentIndex].signal,
++ gcvKERNEL_COMMAND
++ ));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("Command->index=%d", Command->index);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_IncrementCommitAtom(
++ IN gckCOMMAND Command,
++ IN gctBOOL Increment
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctINT32 atomValue;
++ gctBOOL powerAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Grab the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, hardware->powerMutex, gcvINFINITE
++ ));
++ powerAcquired = gcvTRUE;
++
++ /* Increment the commit atom. */
++ if (Increment)
++ {
++ gcmkONERROR(gckOS_AtomIncrement(
++ Command->os, Command->atomCommit, &atomValue
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AtomDecrement(
++ Command->os, Command->atomCommit, &atomValue
++ ));
++ }
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(
++ Command->os, hardware->powerMutex
++ ));
++ powerAcquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (powerAcquired)
++ {
++ /* Release the power mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ Command->os, hardware->powerMutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++static gceSTATUS
++_ProcessHints(
++ IN gckCOMMAND Command,
++ IN gctUINT32 ProcessID,
++ IN gcoCMDBUF CommandBuffer
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gckKERNEL kernel;
++ gctBOOL needCopy = gcvFALSE;
++ gcskSECURE_CACHE_PTR cache;
++ gctUINT8_PTR commandBufferLogical;
++ gctUINT8_PTR hintedData;
++ gctUINT32_PTR hintArray;
++ gctUINT i, hintCount;
++
++ gcmkHEADER_ARG(
++ "Command=0x%08X ProcessID=%d CommandBuffer=0x%08X",
++ Command, ProcessID, CommandBuffer
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Reset state array pointer. */
++ hintArray = gcvNULL;
++
++ /* Get the kernel object. */
++ kernel = Command->kernel;
++
++ /* Get the cache form the database. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache));
++
++ /* Determine the start of the command buffer. */
++ commandBufferLogical
++ = (gctUINT8_PTR) CommandBuffer->logical
++ + CommandBuffer->startOffset;
++
++ /* Determine the number of records in the state array. */
++ hintCount = CommandBuffer->hintArrayTail - CommandBuffer->hintArray;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Command->os, ProcessID, &needCopy));
++
++ /* Get access to the state array. */
++ if (needCopy)
++ {
++ gctUINT copySize;
++
++ if (Command->hintArrayAllocated &&
++ (Command->hintArraySize < CommandBuffer->hintArraySize))
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Command->os, gcmUINT64_TO_PTR(Command->hintArray)));
++ Command->hintArraySize = gcvFALSE;
++ }
++
++ if (!Command->hintArrayAllocated)
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkONERROR(gckOS_Allocate(
++ Command->os,
++ CommandBuffer->hintArraySize,
++ &pointer
++ ));
++
++ Command->hintArray = gcmPTR_TO_UINT64(pointer);
++ Command->hintArrayAllocated = gcvTRUE;
++ Command->hintArraySize = CommandBuffer->hintArraySize;
++ }
++
++ hintArray = gcmUINT64_TO_PTR(Command->hintArray);
++ copySize = hintCount * gcmSIZEOF(gctUINT32);
++
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os,
++ hintArray,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ copySize
++ ));
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ CommandBuffer->hintArraySize,
++ &pointer
++ ));
++
++ hintArray = pointer;
++ }
++
++ /* Scan through the buffer. */
++ for (i = 0; i < hintCount; i += 1)
++ {
++ /* Determine the location of the hinted data. */
++ hintedData = commandBufferLogical + hintArray[i];
++
++ /* Map handle into physical address. */
++ gcmkONERROR(gckKERNEL_MapLogicalToPhysical(
++ kernel, cache, (gctPOINTER) hintedData
++ ));
++ }
++
++OnError:
++ /* Get access to the state array. */
++ if (!needCopy && (hintArray != gcvNULL))
++ {
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ CommandBuffer->hintArraySize,
++ hintArray
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_FlushMMU(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctUINT32 oldValue;
++ gckHARDWARE hardware = Command->kernel->hardware;
++
++ gcmkONERROR(gckOS_AtomicExchange(Command->os,
++ hardware->pageTableDirty,
++ 0,
++ &oldValue));
++
++ if (oldValue)
++ {
++ /* Page Table is upated, flush mmu before commit. */
++ gcmkONERROR(gckHARDWARE_FlushMMU(hardware));
++ }
++
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++}
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++static void
++_DumpBuffer(
++ IN gctPOINTER Buffer,
++ IN gctUINT32 GpuAddress,
++ IN gctSIZE_T Size
++ )
++{
++ gctINT i, line, left;
++ gctUINT32_PTR data = Buffer;
++
++ line = Size / 32;
++ left = Size % 32;
++
++
++ for (i = 0; i < line; i++)
++ {
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]);
++ data += 8;
++ GpuAddress += 8 * 4;
++ }
++
++ switch(left)
++ {
++ case 28:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5], data[6]);
++ break;
++ case 24:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5]);
++ break;
++ case 20:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4]);
++ break;
++ case 16:
++ gcmkPRINT("%X : %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3]);
++ break;
++ case 12:
++ gcmkPRINT("%X : %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2]);
++ break;
++ case 8:
++ gcmkPRINT("%X : %08X %08X ",
++ GpuAddress, data[0], data[1]);
++ break;
++ case 4:
++ gcmkPRINT("%X : %08X ",
++ GpuAddress, data[0]);
++ break;
++ default:
++ break;
++ }
++}
++
++static void
++_DumpKernelCommandBuffer(
++ IN gckCOMMAND Command
++)
++{
++ gctINT i;
++ gctUINT32 physical;
++ gctPOINTER entry;
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; i++)
++ {
++ entry = Command->queues[i].logical;
++
++ gckOS_GetPhysicalAddress(Command->os, entry, &physical);
++
++ gcmkPRINT("Kernel command buffer %d\n", i);
++
++ _DumpBuffer(entry, physical, Command->pageSize);
++ }
++}
++#endif
++
++/******************************************************************************\
++****************************** gckCOMMAND API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckCOMMAND_Construct
++**
++** Construct a new gckCOMMAND object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gckCOMMAND * Command
++** Pointer to a variable that will hold the pointer to the gckCOMMAND
++** object.
++*/
++gceSTATUS
++gckCOMMAND_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckCOMMAND * Command
++ )
++{
++ gckOS os;
++ gckCOMMAND command = gcvNULL;
++ gceSTATUS status;
++ gctINT i;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Command != gcvNULL);
++
++ /* Extract the gckOS object. */
++ os = Kernel->os;
++
++ /* Allocate the gckCOMMAND structure. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckCOMMAND), &pointer));
++ command = pointer;
++
++ /* Reset the entire object. */
++ gcmkONERROR(gckOS_ZeroMemory(command, gcmSIZEOF(struct _gckCOMMAND)));
++
++ /* Initialize the gckCOMMAND object.*/
++ command->object.type = gcvOBJ_COMMAND;
++ command->kernel = Kernel;
++ command->os = os;
++
++ /* Get the command buffer requirements. */
++ gcmkONERROR(gckHARDWARE_QueryCommandBuffer(
++ Kernel->hardware,
++ &command->alignment,
++ &command->reservedHead,
++ &command->reservedTail
++ ));
++
++ /* Create the command queue mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexQueue));
++
++ /* Create the context switching mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexContext));
++
++#if VIVANTE_PROFILER_CONTEXT
++ /* Create the context switching mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexContextSeq));
++#endif
++
++ /* Create the power management semaphore. */
++ gcmkONERROR(gckOS_CreateSemaphore(os, &command->powerSemaphore));
++
++ /* Create the commit atom. */
++ gcmkONERROR(gckOS_AtomConstruct(os, &command->atomCommit));
++
++ /* Get the page size from teh OS. */
++ gcmkONERROR(gckOS_GetPageSize(os, &command->pageSize));
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&command->kernelProcessID));
++
++ /* Set hardware to pipe 0. */
++ command->pipeSelect = gcvPIPE_INVALID;
++
++ /* Pre-allocate the command queues. */
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(
++ os,
++ gcvFALSE,
++ &command->pageSize,
++ &command->queues[i].physical,
++ &command->queues[i].logical
++ ));
++
++ gcmkONERROR(gckOS_CreateSignal(
++ os, gcvFALSE, &command->queues[i].signal
++ ));
++
++ gcmkONERROR(gckOS_Signal(
++ os, command->queues[i].signal, gcvTRUE
++ ));
++ }
++
++ /* No command queue in use yet. */
++ command->index = -1;
++ command->logical = gcvNULL;
++ command->newQueue = gcvFALSE;
++
++ /* Command is not yet running. */
++ command->running = gcvFALSE;
++
++ /* Command queue is idle. */
++ command->idle = gcvTRUE;
++
++ /* Commit stamp is zero. */
++ command->commitStamp = 0;
++
++ /* END event signal not created. */
++ command->endEventSignal = gcvNULL;
++
++ /* Return pointer to the gckCOMMAND object. */
++ *Command = command;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Command=0x%x", *Command);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (command != gcvNULL)
++ {
++ if (command->atomCommit != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, command->atomCommit));
++ }
++
++ if (command->powerSemaphore != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(os, command->powerSemaphore));
++ }
++
++ if (command->mutexContext != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexContext));
++ }
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (command->mutexContextSeq != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexContextSeq));
++ }
++#endif
++
++ if (command->mutexQueue != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexQueue));
++ }
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ if (command->queues[i].signal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ os, command->queues[i].signal
++ ));
++ }
++
++ if (command->queues[i].logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ os,
++ command->pageSize,
++ command->queues[i].physical,
++ command->queues[i].logical
++ ));
++ }
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, command));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Destroy
++**
++** Destroy an gckCOMMAND object.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Destroy(
++ IN gckCOMMAND Command
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Stop the command queue. */
++ gcmkVERIFY_OK(gckCOMMAND_Stop(Command, gcvFALSE));
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ gcmkASSERT(Command->queues[i].signal != gcvNULL);
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, Command->queues[i].signal
++ ));
++
++ gcmkASSERT(Command->queues[i].logical != gcvNULL);
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ Command->os,
++ Command->pageSize,
++ Command->queues[i].physical,
++ Command->queues[i].logical
++ ));
++ }
++
++ /* END event signal. */
++ if (Command->endEventSignal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, Command->endEventSignal
++ ));
++ }
++
++ /* Delete the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContext));
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (Command->mutexContextSeq != gcvNULL)
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContextSeq));
++#endif
++
++ /* Delete the command queue mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexQueue));
++
++ /* Destroy the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Command->os, Command->powerSemaphore));
++
++ /* Destroy the commit atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Command->os, Command->atomCommit));
++
++#if gcdSECURE_USER
++ /* Free state array. */
++ if (Command->hintArrayAllocated)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Command->os, gcmUINT64_TO_PTR(Command->hintArray)));
++ Command->hintArrayAllocated = gcvFALSE;
++ }
++#endif
++
++ /* Mark object as unknown. */
++ Command->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckCOMMAND object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Command->os, Command));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_EnterCommit
++**
++** Acquire command queue synchronization objects.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_EnterCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctBOOL atomIncremented = gcvFALSE;
++ gctBOOL semaAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (!FromPower)
++ {
++ /* Increment COMMIT atom to let power management know that a commit is
++ ** in progress. */
++ gcmkONERROR(_IncrementCommitAtom(Command, gcvTRUE));
++ atomIncremented = gcvTRUE;
++
++ /* Notify the system the GPU has a commit. */
++ gcmkONERROR(gckOS_Broadcast(Command->os,
++ hardware,
++ gcvBROADCAST_GPU_COMMIT));
++
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(Command->os,
++ Command->powerSemaphore));
++ semaAcquired = gcvTRUE;
++ }
++
++ /* Grab the conmmand queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Command->os,
++ Command->mutexQueue,
++ gcvINFINITE));
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (semaAcquired)
++ {
++ /* Release the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore
++ ));
++ }
++
++ if (atomIncremented)
++ {
++ /* Decrement the commit atom. */
++ gcmkVERIFY_OK(_IncrementCommitAtom(
++ Command, gcvFALSE
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_ExitCommit
++**
++** Release command queue synchronization objects.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_ExitCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexQueue));
++
++ if (!FromPower)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(Command->os,
++ Command->powerSemaphore));
++
++ /* Decrement the commit atom. */
++ gcmkONERROR(_IncrementCommitAtom(Command, gcvFALSE));
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Start
++**
++** Start up the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to start.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Start(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctUINT32 waitOffset;
++ gctSIZE_T waitLinkBytes;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->running)
++ {
++ /* Command queue already running. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract the gckHARDWARE object. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (Command->logical == gcvNULL)
++ {
++ /* Start at beginning of a new queue. */
++ gcmkONERROR(_NewQueue(Command));
++ }
++
++ /* Start at beginning of page. */
++ Command->offset = 0;
++
++ /* Set abvailable number of bytes for WAIT/LINK command sequence. */
++ waitLinkBytes = Command->pageSize;
++
++ /* Append WAIT/LINK. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ Command->logical,
++ 0,
++ &waitLinkBytes,
++ &waitOffset,
++ &Command->waitSize
++ ));
++
++ Command->waitLogical = (gctUINT8_PTR) Command->logical + waitOffset;
++ Command->waitPhysical = (gctUINT8_PTR) Command->physical + waitOffset;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the wait/link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ Command->physical,
++ Command->logical,
++ waitLinkBytes
++ ));
++#endif
++
++ /* Adjust offset. */
++ Command->offset = waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Enable command processor. */
++#ifdef __QNXNTO__
++ gcmkONERROR(gckHARDWARE_Execute(
++ hardware,
++ Command->logical,
++ Command->physical,
++ gcvTRUE,
++ waitLinkBytes
++ ));
++#else
++ gcmkONERROR(gckHARDWARE_Execute(
++ hardware,
++ Command->logical,
++ waitLinkBytes
++ ));
++#endif
++
++ /* Command queue is running. */
++ Command->running = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Stop
++**
++** Stop the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to stop.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Stop(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromRecovery
++ )
++{
++ gckHARDWARE hardware;
++ gceSTATUS status;
++ gctUINT32 idle;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (!Command->running)
++ {
++ /* Command queue is not running. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract the gckHARDWARE object. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (gckHARDWARE_IsFeatureAvailable(hardware,
++ gcvFEATURE_END_EVENT) == gcvSTATUS_TRUE)
++ {
++ /* Allocate the signal. */
++ if (Command->endEventSignal == gcvNULL)
++ {
++ gcmkONERROR(gckOS_CreateSignal(Command->os,
++ gcvTRUE,
++ &Command->endEventSignal));
++ }
++
++ /* Append the END EVENT command to trigger the signal. */
++ gcmkONERROR(gckEVENT_Stop(Command->kernel->eventObj,
++ Command->kernelProcessID,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->endEventSignal,
++ &Command->waitSize));
++ }
++ else
++ {
++ /* Replace last WAIT with END. */
++ gcmkONERROR(gckHARDWARE_End(
++ hardware, Command->waitLogical, &Command->waitSize
++ ));
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(Command->kernel->hardware,
++ Command->logical,
++ Command->offset));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the END. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ /* Wait for idle. */
++ gcmkONERROR(gckHARDWARE_GetIdle(hardware, !FromRecovery, &idle));
++ }
++
++ /* Command queue is no longer running. */
++ Command->running = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Commit
++**
++** Commit a command buffer to the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gckCONTEXT Context
++** Pointer to a gckCONTEXT object.
++**
++** gcoCMDBUF CommandBuffer
++** Pointer to a gcoCMDBUF object.
++**
++** gcsSTATE_DELTA_PTR StateDelta
++** Pointer to the state delta.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gctBOOL commitEntered = gcvFALSE;
++ gctBOOL contextAcquired = gcvFALSE;
++ gckHARDWARE hardware;
++ gctBOOL needCopy = gcvFALSE;
++ gcsQUEUE_PTR eventRecord = gcvNULL;
++ gcsQUEUE _eventRecord;
++ gcsQUEUE_PTR nextEventRecord;
++ gctBOOL commandBufferMapped = gcvFALSE;
++ gcoCMDBUF commandBufferObject = gcvNULL;
++
++#if !gcdNULL_DRIVER
++ gcsCONTEXT_PTR contextBuffer;
++ struct _gcoCMDBUF _commandBufferObject;
++ gctPHYS_ADDR commandBufferPhysical;
++ gctUINT8_PTR commandBufferLogical;
++ gctUINT8_PTR commandBufferLink;
++ gctUINT commandBufferSize;
++ gctSIZE_T nopBytes;
++ gctSIZE_T pipeBytes;
++ gctSIZE_T linkBytes;
++ gctSIZE_T bytes;
++ gctUINT32 offset;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR entryPhysical;
++#endif
++ gctPOINTER entryLogical;
++ gctSIZE_T entryBytes;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR exitPhysical;
++#endif
++ gctPOINTER exitLogical;
++ gctSIZE_T exitBytes;
++ gctPHYS_ADDR waitLinkPhysical;
++ gctPOINTER waitLinkLogical;
++ gctSIZE_T waitLinkBytes;
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctUINT32 waitOffset;
++ gctSIZE_T waitSize;
++
++#if gcdDUMP_COMMAND
++ gctPOINTER contextDumpLogical = gcvNULL;
++ gctSIZE_T contextDumpBytes = 0;
++ gctPOINTER bufferDumpLogical = gcvNULL;
++ gctSIZE_T bufferDumpBytes = 0;
++# endif
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ gctBOOL sequenceAcquired = gcvFALSE;
++#endif
++
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG(
++ "Command=0x%x CommandBuffer=0x%x ProcessID=%d",
++ Command, CommandBuffer, ProcessID
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->kernel->core == gcvCORE_2D)
++ {
++ /* There is no context for 2D. */
++ Context = gcvNULL;
++ }
++
++ gcmkONERROR(_FlushMMU(Command));
++
++#if VIVANTE_PROFILER_CONTEXT
++ if((Command->kernel->hardware->gpuProfiler) && (Command->kernel->profileEnable))
++ {
++ /* Acquire the context sequnence mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContextSeq, gcvINFINITE
++ ));
++ sequenceAcquired = gcvTRUE;
++ }
++#endif
++
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(Command, gcvFALSE));
++ commitEntered = gcvTRUE;
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ contextAcquired = gcvTRUE;
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Command->os, ProcessID, &needCopy));
++
++#if gcdNULL_DRIVER
++ /* Context switch required? */
++ if ((Context != gcvNULL) && (Command->currContext != Context))
++ {
++ /* Yes, merge in the deltas. */
++ gckCONTEXT_Update(Context, ProcessID, StateDelta);
++
++ /* Update the current context. */
++ Command->currContext = Context;
++ }
++#else
++ if (needCopy)
++ {
++ commandBufferObject = &_commandBufferObject;
++
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os,
++ commandBufferObject,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF)
++ ));
++
++ gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER);
++ }
++ else
++ {
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ &pointer
++ ));
++
++ commandBufferObject = pointer;
++
++ gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER);
++ commandBufferMapped = gcvTRUE;
++ }
++
++ /* Query the size of NOP command. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware, gcvNULL, &nopBytes
++ ));
++
++ /* Query the size of pipe select command sequence. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ hardware, gcvNULL, gcvPIPE_3D, &pipeBytes
++ ));
++
++ /* Query the size of LINK command. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware, gcvNULL, gcvNULL, 0, &linkBytes
++ ));
++
++ /* Compute the command buffer entry and the size. */
++ commandBufferLogical
++ = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical)
++ + commandBufferObject->startOffset;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Command->os,
++ commandBufferLogical,
++ (gctUINT32_PTR)&commandBufferPhysical
++ ));
++
++ commandBufferSize
++ = commandBufferObject->offset
++ + Command->reservedTail
++ - commandBufferObject->startOffset;
++
++ /* Get the current offset. */
++ offset = Command->offset;
++
++ /* Compute number of bytes left in current kernel command queue. */
++ bytes = Command->pageSize - offset;
++
++ /* Query the size of WAIT/LINK command sequence. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ gcvNULL,
++ offset,
++ &waitLinkBytes,
++ gcvNULL,
++ gcvNULL
++ ));
++
++ /* Is there enough space in the current command queue? */
++ if (bytes < waitLinkBytes)
++ {
++ /* No, create a new one. */
++ gcmkONERROR(_NewQueue(Command));
++
++ /* Get the new current offset. */
++ offset = Command->offset;
++
++ /* Recompute the number of bytes in the new kernel command queue. */
++ bytes = Command->pageSize - offset;
++ gcmkASSERT(bytes >= waitLinkBytes);
++ }
++
++ /* Compute the location if WAIT/LINK command sequence. */
++ waitLinkPhysical = (gctUINT8_PTR) Command->physical + offset;
++ waitLinkLogical = (gctUINT8_PTR) Command->logical + offset;
++
++ /* Context switch required? */
++ if (Context == gcvNULL)
++ {
++ /* See if we have to switch pipes for the command buffer. */
++ if (commandBufferObject->entryPipe == Command->pipeSelect)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the entry command buffer pipes
++ ** are different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) commandBufferPhysical + offset;
++#endif
++ entryLogical = commandBufferLogical + offset;
++ entryBytes = commandBufferSize - offset;
++ }
++ else if (Command->currContext != Context)
++ {
++ /* Temporary disable context length oprimization. */
++ Context->dirty = gcvTRUE;
++
++ /* Get the current context buffer. */
++ contextBuffer = Context->buffer;
++
++ /* Yes, merge in the deltas. */
++ gcmkONERROR(gckCONTEXT_Update(Context, ProcessID, StateDelta));
++
++ /* Determine context entry and exit points. */
++ if (0)
++ {
++ /* Reset 2D dirty flag. */
++ Context->dirty2D = gcvFALSE;
++
++ if (Context->dirty || commandBufferObject->using3D)
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 2D and 3D are used.
++ */
++
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryBytes = Context->bufferSize - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryBytes = Context->bufferSize;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Ensure the NOP between 2D and 3D is in place so that the
++ execution falls through from 2D to 3D. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ contextBuffer->link2D,
++ &nopBytes
++ ));
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++
++ /* Mark context as not dirty. */
++ Context->dirty = gcvFALSE;
++ }
++ else
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 2D only command buffer.
++ */
++
++ /* Mark 3D as dirty. */
++ Context->dirty3D = gcvTRUE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryBytes = Context->entryOffset3D - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryBytes = Context->entryOffset3D;
++ }
++
++ /* Store the current context buffer. */
++ Context->dirtyBuffer = contextBuffer;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_2D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* 3D is not used, generate a LINK from the end of 2D part of
++ the context buffer to the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link2D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++
++ /* Not using 2D. */
++ else
++ {
++ /* Mark 2D as dirty. */
++ Context->dirty2D = gcvTRUE;
++
++ /* Store the current context buffer. */
++ Context->dirtyBuffer = contextBuffer;
++
++ if (Context->dirty || commandBufferObject->using3D)
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 3D only command buffer.
++ */
++
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Determine context buffer entry offset. */
++ offset = (Command->pipeSelect == gcvPIPE_3D)
++
++ /* Skip pipe switching sequence. */
++ ? Context->entryOffset3D + pipeBytes
++
++ /* Do not skip pipe switching sequence. */
++ : Context->entryOffset3D;
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + offset;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + offset;
++ entryBytes = Context->bufferSize - offset;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: "XD" command buffer - neither 2D nor 3D.
++ */
++
++ /* Mark 3D as dirty. */
++ Context->dirty3D = gcvTRUE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_3D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical
++ = (gctUINT8_PTR) contextBuffer->physical
++ + Context->entryOffsetXDFrom3D;
++#endif
++ entryLogical
++ = (gctUINT8_PTR) contextBuffer->logical
++ + Context->entryOffsetXDFrom3D;
++
++ entryBytes
++ = Context->bufferSize
++ - Context->entryOffsetXDFrom3D;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical
++ = (gctUINT8_PTR) contextBuffer->physical
++ + Context->entryOffsetXDFrom2D;
++#endif
++ entryLogical
++ = (gctUINT8_PTR) contextBuffer->logical
++ + Context->entryOffsetXDFrom2D;
++
++ entryBytes
++ = Context->totalSize
++ - Context->entryOffsetXDFrom2D;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the context buffer cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ entryPhysical,
++ entryLogical,
++ entryBytes
++ ));
++#endif
++
++ /* Update the current context. */
++ Command->currContext = Context;
++
++#if gcdDUMP_COMMAND
++ contextDumpLogical = entryLogical;
++ contextDumpBytes = entryBytes;
++#endif
++ }
++
++ /* Same context. */
++ else
++ {
++ /* Determine context entry and exit points. */
++ if (commandBufferObject->using2D && Context->dirty2D)
++ {
++ /* Reset 2D dirty flag. */
++ Context->dirty2D = gcvFALSE;
++
++ /* Get the "dirty" context buffer. */
++ contextBuffer = Context->dirtyBuffer;
++
++ if (commandBufferObject->using3D && Context->dirty3D)
++ {
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryBytes = Context->bufferSize - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryBytes = Context->bufferSize;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Ensure the NOP between 2D and 3D is in place so that the
++ execution falls through from 2D to 3D. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ contextBuffer->link2D,
++ &nopBytes
++ ));
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryBytes = Context->entryOffset3D - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryBytes = Context->entryOffset3D;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_2D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* 3D is not used, generate a LINK from the end of 2D part of
++ the context buffer to the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link2D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++ else
++ {
++ if (commandBufferObject->using3D && Context->dirty3D)
++ {
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Get the "dirty" context buffer. */
++ contextBuffer = Context->dirtyBuffer;
++
++ /* Determine context buffer entry offset. */
++ offset = (Command->pipeSelect == gcvPIPE_3D)
++
++ /* Skip pipe switching sequence. */
++ ? Context->entryOffset3D + pipeBytes
++
++ /* Do not skip pipe switching sequence. */
++ : Context->entryOffset3D;
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + offset;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + offset;
++ entryBytes = Context->bufferSize - offset;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /* See if we have to switch pipes for the command buffer. */
++ if (commandBufferObject->entryPipe == Command->pipeSelect)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the entry command buffer pipes
++ ** are different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) commandBufferPhysical + offset;
++#endif
++ entryLogical = commandBufferLogical + offset;
++ entryBytes = commandBufferSize - offset;
++ }
++ }
++ }
++
++#if gcdDUMP_COMMAND
++ bufferDumpLogical = commandBufferLogical + offset;
++ bufferDumpBytes = commandBufferSize - offset;
++#endif
++
++#if gcdSECURE_USER
++ /* Process user hints. */
++ gcmkONERROR(_ProcessHints(Command, ProcessID, commandBufferObject));
++#endif
++
++ /* Determine the location to jump to for the command buffer being
++ ** scheduled. */
++ if (Command->newQueue)
++ {
++ /* New command queue, jump to the beginning of it. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ exitPhysical = Command->physical;
++#endif
++ exitLogical = Command->logical;
++ exitBytes = Command->offset + waitLinkBytes;
++ }
++ else
++ {
++ /* Still within the preexisting command queue, jump to the new
++ WAIT/LINK command sequence. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ exitPhysical = waitLinkPhysical;
++#endif
++ exitLogical = waitLinkLogical;
++ exitBytes = waitLinkBytes;
++ }
++
++ /* Add a new WAIT/LINK command sequence. When the command buffer which is
++ currently being scheduled is fully executed by the GPU, the FE will
++ jump to this WAIT/LINK sequence. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ waitLinkLogical,
++ offset,
++ &waitLinkBytes,
++ &waitOffset,
++ &waitSize
++ ));
++
++ /* Compute the location if WAIT command. */
++ waitPhysical = (gctUINT8_PTR) waitLinkPhysical + waitOffset;
++ waitLogical = (gctUINT8_PTR) waitLinkLogical + waitOffset;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the command queue cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ exitPhysical,
++ exitLogical,
++ exitBytes
++ ));
++#endif
++
++ /* Determine the location of the LINK command in the command buffer. */
++ commandBufferLink
++ = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical)
++ + commandBufferObject->offset;
++
++ /* Generate a LINK from the end of the command buffer being scheduled
++ back to the kernel command queue. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ commandBufferLink,
++ exitLogical,
++ exitBytes,
++ &linkBytes
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the command buffer cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ ProcessID,
++ gcvNULL,
++ commandBufferPhysical,
++ commandBufferLogical,
++ commandBufferSize
++ ));
++#endif
++
++ /* Generate a LINK from the previous WAIT/LINK command sequence to the
++ entry determined above (either the context or the command buffer).
++ This LINK replaces the WAIT instruction from the previous WAIT/LINK
++ pair, therefore we use WAIT metrics for generation of this LINK.
++ This action will execute the entire sequence. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ Command->waitLogical,
++ entryLogical,
++ entryBytes,
++ &Command->waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ Command->waitLogical,
++ Command->waitSize,
++ gceDUMP_BUFFER_LINK,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ contextDumpLogical,
++ contextDumpBytes,
++ gceDUMP_BUFFER_CONTEXT,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ bufferDumpLogical,
++ bufferDumpBytes,
++ gceDUMP_BUFFER_USER,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ waitLinkLogical,
++ waitLinkBytes,
++ gceDUMP_BUFFER_WAITLINK,
++ gcvFALSE
++ );
++
++ /* Update the current pipe. */
++ Command->pipeSelect = commandBufferObject->exitPipe;
++
++ /* Update command queue offset. */
++ Command->offset += waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Update address of last WAIT. */
++ Command->waitPhysical = waitPhysical;
++ Command->waitLogical = waitLogical;
++ Command->waitSize = waitSize;
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ hardware, Command->logical, Command->offset
++ ));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.commit]");
++#endif
++#endif /* gcdNULL_DRIVER */
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ contextAcquired = gcvFALSE;
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(Command, gcvFALSE));
++ commitEntered = gcvFALSE;
++
++#if VIVANTE_PROFILER_CONTEXT
++ if(sequenceAcquired)
++ {
++ gcmkONERROR(gckCOMMAND_Stall(Command, gcvTRUE));
++ if (Command->currContext)
++ {
++ gcmkONERROR(gckHARDWARE_UpdateContextProfile(
++ hardware,
++ Command->currContext));
++ }
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContextSeq));
++ sequenceAcquired = gcvFALSE;
++ }
++#endif
++
++ /* Loop while there are records in the queue. */
++ while (EventQueue != gcvNULL)
++ {
++ if (needCopy)
++ {
++ /* Point to stack record. */
++ eventRecord = &_eventRecord;
++
++ /* Copy the data from the client. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os, eventRecord, EventQueue, gcmSIZEOF(gcsQUEUE)
++ ));
++ }
++ else
++ {
++ /* Map record into kernel memory. */
++ gcmkONERROR(gckOS_MapUserPointer(Command->os,
++ EventQueue,
++ gcmSIZEOF(gcsQUEUE),
++ &pointer));
++
++ eventRecord = pointer;
++ }
++
++ /* Append event record to event queue. */
++ gcmkONERROR(gckEVENT_AddList(
++ Command->kernel->eventObj, &eventRecord->iface, gcvKERNEL_PIXEL, gcvTRUE, gcvFALSE
++ ));
++
++ /* Next record in the queue. */
++ nextEventRecord = gcmUINT64_TO_PTR(eventRecord->next);
++
++ if (!needCopy)
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os, EventQueue, gcmSIZEOF(gcsQUEUE), (gctPOINTER *) eventRecord
++ ));
++
++ eventRecord = gcvNULL;
++ }
++
++ EventQueue = nextEventRecord;
++ }
++
++ if (Command->kernel->eventObj->queueHead == gcvNULL
++ && Command->kernel->hardware->powerManagement == gcvTRUE
++ )
++ {
++ /* Commit done event by which work thread knows all jobs done. */
++ gcmkVERIFY_OK(
++ gckEVENT_CommitDone(Command->kernel->eventObj, gcvKERNEL_PIXEL));
++ }
++
++ /* Submit events. */
++ status = gckEVENT_Submit(Command->kernel->eventObj, gcvTRUE, gcvFALSE);
++
++ if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkTRACE(
++ gcvLEVEL_INFO,
++ "%s(%d): Intterupted in gckEVENT_Submit",
++ __FUNCTION__, __LINE__
++ );
++ status = gcvSTATUS_OK;
++ }
++ else
++ {
++ gcmkONERROR(status);
++ }
++
++ /* Unmap the command buffer pointer. */
++ if (commandBufferMapped)
++ {
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ commandBufferObject
++ ));
++
++ commandBufferMapped = gcvFALSE;
++ }
++
++ /* Return status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if ((eventRecord != gcvNULL) && !needCopy)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ EventQueue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) eventRecord
++ ));
++ }
++
++ if (contextAcquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(Command, gcvFALSE));
++ }
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (sequenceAcquired)
++ {
++ /* Release the context sequence mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContextSeq));
++ }
++#endif
++
++ /* Unmap the command buffer pointer. */
++ if (commandBufferMapped)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ commandBufferObject
++ ));
++ }
++
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Reserve
++**
++** Reserve space in the command queue. Also acquire the command queue mutex.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctSIZE_T RequestedBytes
++** Number of bytes previously reserved.
++**
++** OUTPUT:
++**
++** gctPOINTER * Buffer
++** Pointer to a variable that will receive the address of the reserved
++** space.
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable that will receive the number of bytes
++** available in the command queue.
++*/
++gceSTATUS
++gckCOMMAND_Reserve(
++ IN gckCOMMAND Command,
++ IN gctSIZE_T RequestedBytes,
++ OUT gctPOINTER * Buffer,
++ OUT gctSIZE_T * BufferSize
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gctSIZE_T requiredBytes;
++ gctUINT32 requestedAligned;
++
++ gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu", Command, RequestedBytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Compute aligned number of reuested bytes. */
++ requestedAligned = gcmALIGN(RequestedBytes, Command->alignment);
++
++ /* Another WAIT/LINK command sequence will have to be appended after
++ the requested area being reserved. Compute the number of bytes
++ required for WAIT/LINK at the location after the reserved area. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ Command->kernel->hardware,
++ gcvNULL,
++ Command->offset + requestedAligned,
++ &requiredBytes,
++ gcvNULL,
++ gcvNULL
++ ));
++
++ /* Compute total number of bytes required. */
++ requiredBytes += requestedAligned;
++
++ /* Compute number of bytes available in command queue. */
++ bytes = Command->pageSize - Command->offset;
++
++ /* Is there enough space in the current command queue? */
++ if (bytes < requiredBytes)
++ {
++ /* Create a new command queue. */
++ gcmkONERROR(_NewQueue(Command));
++
++ /* Recompute the number of bytes in the new kernel command queue. */
++ bytes = Command->pageSize - Command->offset;
++
++ /* Still not enough space? */
++ if (bytes < requiredBytes)
++ {
++ /* Rare case, not enough room in command queue. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++ }
++
++ /* Return pointer to empty slot command queue. */
++ *Buffer = (gctUINT8 *) Command->logical + Command->offset;
++
++ /* Return number of bytes left in command queue. */
++ *BufferSize = bytes;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Buffer=0x%x *BufferSize=%lu", *Buffer, *BufferSize);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Execute
++**
++** Execute a previously reserved command queue by appending a WAIT/LINK command
++** sequence after it and modifying the last WAIT into a LINK command. The
++** command FIFO mutex will be released whether this function succeeds or not.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctSIZE_T RequestedBytes
++** Number of bytes previously reserved.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Execute(
++ IN gckCOMMAND Command,
++ IN gctSIZE_T RequestedBytes
++ )
++{
++ gceSTATUS status;
++
++ gctPHYS_ADDR waitLinkPhysical;
++ gctUINT8_PTR waitLinkLogical;
++ gctUINT32 waitLinkOffset;
++ gctSIZE_T waitLinkBytes;
++
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctUINT32 waitOffset;
++ gctSIZE_T waitBytes;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR execPhysical;
++#endif
++ gctPOINTER execLogical;
++ gctSIZE_T execBytes;
++
++ gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu", Command, RequestedBytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Compute offset for WAIT/LINK. */
++ waitLinkOffset = Command->offset + RequestedBytes;
++
++ /* Compute number of bytes left in command queue. */
++ waitLinkBytes = Command->pageSize - waitLinkOffset;
++
++ /* Compute the location if WAIT/LINK command sequence. */
++ waitLinkPhysical = (gctUINT8_PTR) Command->physical + waitLinkOffset;
++ waitLinkLogical = (gctUINT8_PTR) Command->logical + waitLinkOffset;
++
++ /* Append WAIT/LINK in command queue. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ Command->kernel->hardware,
++ waitLinkLogical,
++ waitLinkOffset,
++ &waitLinkBytes,
++ &waitOffset,
++ &waitBytes
++ ));
++
++ /* Compute the location if WAIT command. */
++ waitPhysical = (gctUINT8_PTR) waitLinkPhysical + waitOffset;
++ waitLogical = waitLinkLogical + waitOffset;
++
++ /* Determine the location to jump to for the command buffer being
++ ** scheduled. */
++ if (Command->newQueue)
++ {
++ /* New command queue, jump to the beginning of it. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ execPhysical = Command->physical;
++#endif
++ execLogical = Command->logical;
++ execBytes = waitLinkOffset + waitLinkBytes;
++ }
++ else
++ {
++ /* Still within the preexisting command queue, jump directly to the
++ reserved area. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ execPhysical = (gctUINT8 *) Command->physical + Command->offset;
++#endif
++ execLogical = (gctUINT8 *) Command->logical + Command->offset;
++ execBytes = RequestedBytes + waitLinkBytes;
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ execPhysical,
++ execLogical,
++ execBytes
++ ));
++#endif
++
++ /* Convert the last WAIT into a LINK. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Command->kernel->hardware,
++ Command->waitLogical,
++ execLogical,
++ execBytes,
++ &Command->waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ Command->waitLogical,
++ Command->waitSize,
++ gceDUMP_BUFFER_LINK,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ execLogical,
++ execBytes,
++ gceDUMP_BUFFER_KERNEL,
++ gcvFALSE
++ );
++
++ /* Update the pointer to the last WAIT. */
++ Command->waitPhysical = waitPhysical;
++ Command->waitLogical = waitLogical;
++ Command->waitSize = waitBytes;
++
++ /* Update the command queue. */
++ Command->offset += RequestedBytes + waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ Command->kernel->hardware, Command->logical, Command->offset
++ ));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.execute]");
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Stall
++**
++** The calling thread will be suspended until the command queue has been
++** completed.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++#if gcdNULL_DRIVER
++ /* Do nothing with infinite hardware. */
++ return gcvSTATUS_OK;
++#else
++ gckOS os;
++ gckHARDWARE hardware;
++ gckEVENT eventObject;
++ gceSTATUS status;
++ gctSIGNAL signal = gcvNULL;
++ gctUINT timer = 0;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Extract the gckOS object pointer. */
++ os = Command->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckHARDWARE object pointer. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Extract the gckEVENT object pointer. */
++ eventObject = Command->kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObject, gcvOBJ_EVENT);
++
++ /* Allocate the signal. */
++ gcmkONERROR(gckOS_CreateSignal(os, gcvTRUE, &signal));
++
++ /* Append the EVENT command to trigger the signal. */
++ gcmkONERROR(gckEVENT_Signal(eventObject, signal, gcvKERNEL_PIXEL));
++
++ /* Submit the event queue. */
++ gcmkONERROR(gckEVENT_Submit(eventObject, gcvTRUE, FromPower));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.stall]");
++#endif
++
++ if (status == gcvSTATUS_CHIP_NOT_READY)
++ {
++ /* Error. */
++ goto OnError;
++ }
++
++ do
++ {
++ /* Wait for the signal. */
++ status = gckOS_WaitSignal(os, signal, gcdGPU_ADVANCETIMER);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT32 idle;
++
++ /* Read idle register. */
++ gcmkVERIFY_OK(gckHARDWARE_GetIdle(
++ hardware, gcvFALSE, &idle
++ ));
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): idle=%08x",
++ __FUNCTION__, __LINE__, idle
++ );
++
++ gcmkONERROR(gckOS_MemoryBarrier(os, gcvNULL));
++
++#ifdef __QNXNTO__
++ gctUINT32 reg_cmdbuf_fetch;
++ gctUINT32 reg_intr;
++
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(
++ Command->kernel->hardware->os, Command->kernel->core, 0x0664, &reg_cmdbuf_fetch
++ ));
++
++ if (idle == 0x7FFFFFFE)
++ {
++ /*
++ * GPU is idle so there should not be pending interrupts.
++ * Just double check.
++ *
++ * Note that reading interrupt register clears it.
++ * That's why we don't read it in all cases.
++ */
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(
++ Command->kernel->hardware->os, Command->kernel->core, 0x10, &reg_intr
++ ));
++
++ slogf(
++ _SLOG_SETCODE(1, 0),
++ _SLOG_CRITICAL,
++ "GALcore: Stall timeout (idle = 0x%X, command buffer fetch = 0x%X, interrupt = 0x%X)",
++ idle, reg_cmdbuf_fetch, reg_intr
++ );
++ }
++ else
++ {
++ slogf(
++ _SLOG_SETCODE(1, 0),
++ _SLOG_CRITICAL,
++ "GALcore: Stall timeout (idle = 0x%X, command buffer fetch = 0x%X)",
++ idle, reg_cmdbuf_fetch
++ );
++ }
++#endif
++#endif
++ /* Advance timer. */
++ timer += gcdGPU_ADVANCETIMER;
++ }
++ else if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkONERROR(gcvSTATUS_INTERRUPTED);
++ }
++
++ }
++ while (gcmIS_ERROR(status)
++#if gcdGPU_TIMEOUT
++ && (timer < Command->kernel->timeOut)
++#endif
++ );
++
++ /* Bail out on timeout. */
++ if (gcmIS_ERROR(status))
++ {
++ /* Broadcast the stuck GPU. */
++ gcmkONERROR(gckOS_Broadcast(
++ os, hardware, gcvBROADCAST_GPU_STUCK
++ ));
++ }
++
++ /* Delete the signal. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(os, signal));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (signal != gcvNULL)
++ {
++ /* Free the signal. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(os, signal));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Attach
++**
++** Attach user process.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** OUTPUT:
++**
++** gckCONTEXT * Context
++** Pointer to a variable that will receive a pointer to a new
++** gckCONTEXT object.
++**
++** gctSIZE_T * StateCount
++** Pointer to a variable that will receive the number of states
++** in the context buffer.
++*/
++gceSTATUS
++gckCOMMAND_Attach(
++ IN gckCOMMAND Command,
++ OUT gckCONTEXT * Context,
++ OUT gctSIZE_T * StateCount,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ acquired = gcvTRUE;
++
++ /* Construct a gckCONTEXT object. */
++ gcmkONERROR(gckCONTEXT_Construct(
++ Command->os,
++ Command->kernel->hardware,
++ ProcessID,
++ Context
++ ));
++
++ /* Return the number of states in the context. */
++ * StateCount = (* Context)->stateCount;
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Context=0x%x", *Context);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release mutex. */
++ if (acquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Detach
++**
++** Detach user process.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gckCONTEXT Context
++** Pointer to a gckCONTEXT object to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Detach(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x Context=0x%x", Command, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ acquired = gcvTRUE;
++
++ /* Construct a gckCONTEXT object. */
++ gcmkONERROR(gckCONTEXT_Destroy(Context));
++
++ if (Command->currContext == Context)
++ {
++ /* Detach from gckCOMMAND object if the destoryed context is current context. */
++ Command->currContext = gcvNULL;
++ }
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release mutex. */
++ if (acquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++/*******************************************************************************
++**
++** gckCOMMAND_DumpExecutingBuffer
++**
++** Dump the command buffer which GPU is executing.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_DumpExecutingBuffer(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctUINT32 gpuAddress;
++ gctSIZE_T pageCount;
++ gctPOINTER entry;
++ gckOS os = Command->os;
++ gckKERNEL kernel = Command->kernel;
++#if gcdLINK_QUEUE_SIZE
++ gctINT pid;
++ gctINT i, rear;
++ gctUINT32 start, end;
++ gctUINT32 dumpFront, dumpRear;
++ gckLINKQUEUE queue = &kernel->hardware->linkQueue;
++ gckLINKQUEUE queueMirror;
++ gctUINT32 bytes;
++ gckLINKDATA linkData;
++#endif
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("**** COMMAND BUF DUMP ****\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(os, kernel->core, 0x664, &gpuAddress));
++
++ gcmkPRINT("DMA Address 0x%08X", gpuAddress);
++
++#if gcdLINK_QUEUE_SIZE
++ /* Duplicate queue because it will be changed.*/
++ gcmkONERROR(gckOS_AllocateMemory(os,
++ sizeof(struct _gckLINKQUEUE),
++ (gctPOINTER *)&queueMirror));
++
++ gcmkONERROR(gckOS_MemCopy(queueMirror,
++ queue,
++ sizeof(struct _gckLINKQUEUE)));
++
++ /* If kernel command buffer link to a context buffer, then link to a user command
++ ** buffer, the second link will be in queue first, so we must fix this.
++ ** In Queue: C1 U1 U2 C2 U3 U4 U5 C3
++ ** Real: C1 X1 U1 C2 U2 U3 U4 C3 U5
++ ** Command buffer X1 which is after C1 is out of queue, so C1 is meaningless.
++ */
++ for (i = 0; i < gcdLINK_QUEUE_SIZE; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, i, &linkData);
++
++ status = gckKERNEL_QueryGPUAddress(kernel, linkData->start, &buffer);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Can't find it in virtual command buffer list, ignore it. */
++ continue;
++ }
++
++ if (buffer->kernelLogical)
++ {
++ /* It is a context buffer. */
++ if (i == 0)
++ {
++ /* The real command buffer is out, so clear this slot. */
++ linkData->start = 0;
++ linkData->end = 0;
++ linkData->pid = 0;
++ }
++ else
++ {
++ /* switch context buffer and command buffer. */
++ struct _gckLINKDATA tmp = *linkData;
++ gckLINKDATA linkDataPrevious;
++
++ gckLINKQUEUE_GetData(queueMirror, i - 1, &linkDataPrevious);
++ *linkData = *linkDataPrevious;
++ *linkDataPrevious = tmp;
++ }
++ }
++ }
++
++ /* Clear search result. */
++ dumpFront = dumpRear = gcvINFINITE;
++
++ gcmkPRINT("Link Stack:");
++
++ /* Search stuck address in link queue from rear. */
++ rear = gcdLINK_QUEUE_SIZE - 1;
++ for (i = 0; i < gcdLINK_QUEUE_SIZE; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, rear, &linkData);
++
++ start = linkData->start;
++ end = linkData->end;
++ pid = linkData->pid;
++
++ if (gpuAddress >= start && gpuAddress < end)
++ {
++ /* Find latest matched command buffer. */
++ gcmkPRINT(" %d, [%08X - %08X]", pid, start, end);
++
++ /* Initiliaze dump information. */
++ dumpFront = dumpRear = rear;
++ }
++
++ /* Advance to previous one. */
++ rear--;
++
++ if (dumpFront != gcvINFINITE)
++ {
++ break;
++ }
++ }
++
++ if (dumpFront == gcvINFINITE)
++ {
++ /* Can't find matched record in link queue, dump kernel command buffer. */
++ _DumpKernelCommandBuffer(Command);
++
++ /* Free local copy. */
++ gcmkOS_SAFE_FREE(os, queueMirror);
++ return gcvSTATUS_OK;
++ }
++
++ /* Search the last context buffer linked. */
++ while (rear >= 0)
++ {
++ gckLINKQUEUE_GetData(queueMirror, rear, &linkData);
++
++ gcmkPRINT(" %d, [%08X - %08X]",
++ linkData->pid,
++ linkData->start,
++ linkData->end);
++
++ status = gckKERNEL_QueryGPUAddress(kernel, linkData->start, &buffer);
++
++ if (gcmIS_SUCCESS(status) && buffer->kernelLogical)
++ {
++ /* Find a context buffer. */
++ dumpFront = rear;
++ break;
++ }
++
++ rear--;
++ }
++
++ /* Dump from last context buffer to last command buffer where hang happens. */
++ for (i = dumpFront; i <= dumpRear; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, i, &linkData);
++
++ /* Get gpu address of this command buffer. */
++ gpuAddress = linkData->start;
++ bytes = linkData->end - gpuAddress;
++
++ /* Get the whole buffer. */
++ status = gckKERNEL_QueryGPUAddress(kernel, gpuAddress, &buffer);
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkPRINT("Buffer [%08X - %08X] is lost",
++ linkData->start,
++ linkData->end);
++ continue;
++ }
++
++ /* Get kernel logical for dump. */
++ if (buffer->kernelLogical)
++ {
++ /* Get kernel logical directly if it is a context buffer. */
++ entry = buffer->kernelLogical;
++ gcmkPRINT("Context Buffer:");
++ }
++ else
++ {
++ /* Make it accessiable by kernel if it is a user command buffer. */
++ gcmkVERIFY_OK(
++ gckOS_CreateKernelVirtualMapping(buffer->physical,
++ &pageCount,
++ &entry));
++ gcmkPRINT("User Command Buffer:");
++ }
++
++ /* Dump from the entry. */
++ _DumpBuffer(entry + (gpuAddress - buffer->gpuAddress), gpuAddress, bytes);
++
++ /* Release kernel logical address if neccessary. */
++ if (!buffer->kernelLogical)
++ {
++ gcmkVERIFY_OK(gckOS_DestroyKernelVirtualMapping(entry));
++ }
++ }
++
++ /* Free local copy. */
++ gcmkOS_SAFE_FREE(os, queueMirror);
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++#else
++ /* Without link queue information, we don't know the entry of last command
++ ** buffer, just dump the page where GPU stuck. */
++ status = gckKERNEL_QueryGPUAddress(kernel, gpuAddress, &buffer);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkVERIFY_OK(
++ gckOS_CreateKernelVirtualMapping(buffer->physical, &pageCount, &entry));
++
++ if (entry)
++ {
++ gctUINT32 offset = gpuAddress - buffer->gpuAddress;
++ gctPOINTER entryDump = entry;
++
++ /* Dump one pages. */
++ gctUINT32 bytes = 4096;
++
++ /* Align to page. */
++ offset &= 0xfffff000;
++
++ /* Kernel address of page where stall point stay. */
++ entryDump += offset;
++
++ /* Align to page. */
++ gpuAddress &= 0xfffff000;
++
++ gcmkPRINT("User Command Buffer:\n");
++ _DumpBuffer(entryDump, gpuAddress, bytes);
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_DestroyKernelVirtualMapping(entry));
++ }
++ else
++ {
++ _DumpKernelCommandBuffer(Command);
++ }
++
++ return gcvSTATUS_OK;
++#endif
++}
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command_vg.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command_vg.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_command_vg.c 2015-05-01 14:57:59.527427001 -0500
+@@ -0,0 +1,3677 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++*********************************** Debugging **********************************
++\******************************************************************************/
++
++#define gcvDISABLE_TIMEOUT 1
++#define gcvDUMP_COMMAND_BUFFER 0
++#define gcvDUMP_COMMAND_LINES 0
++
++
++#if gcvDEBUG || defined(EMULATOR) || gcvDISABLE_TIMEOUT
++# define gcvQUEUE_TIMEOUT ~0
++#else
++# define gcvQUEUE_TIMEOUT 10
++#endif
++
++
++/******************************************************************************\
++********************************** Definitions *********************************
++\******************************************************************************/
++
++/* Minimum buffer size. */
++#define gcvMINUMUM_BUFFER \
++ gcmSIZEOF(gcsKERNEL_QUEUE_HEADER) + \
++ gcmSIZEOF(gcsKERNEL_CMDQUEUE) * 2
++
++#define gcmDECLARE_INTERRUPT_HANDLER(Block, Number) \
++ static gceSTATUS \
++ _EventHandler_##Block##_##Number( \
++ IN gckVGKERNEL Kernel \
++ )
++
++#define gcmDEFINE_INTERRUPT_HANDLER(Block, Number) \
++ gcmDECLARE_INTERRUPT_HANDLER(Block, Number) \
++ { \
++ return _EventHandler_Block( \
++ Kernel, \
++ &Kernel->command->taskTable[gcvBLOCK_##Block], \
++ gcvFALSE \
++ ); \
++ }
++
++#define gcmDEFINE_INTERRUPT_HANDLER_ENTRY(Block, Number) \
++ { gcvBLOCK_##Block, _EventHandler_##Block##_##Number }
++
++/* Block interrupt handling table entry. */
++typedef struct _gcsBLOCK_INTERRUPT_HANDLER * gcsBLOCK_INTERRUPT_HANDLER_PTR;
++typedef struct _gcsBLOCK_INTERRUPT_HANDLER
++{
++ gceBLOCK block;
++ gctINTERRUPT_HANDLER handler;
++}
++gcsBLOCK_INTERRUPT_HANDLER;
++
++/* Queue control functions. */
++typedef struct _gcsQUEUE_UPDATE_CONTROL * gcsQUEUE_UPDATE_CONTROL_PTR;
++typedef struct _gcsQUEUE_UPDATE_CONTROL
++{
++ gctOBJECT_HANDLER execute;
++ gctOBJECT_HANDLER update;
++ gctOBJECT_HANDLER lastExecute;
++ gctOBJECT_HANDLER lastUpdate;
++}
++gcsQUEUE_UPDATE_CONTROL;
++
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_FlushMMU(
++ IN gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctUINT32 oldValue;
++ gckVGHARDWARE hardware = Command->hardware;
++
++ gcmkONERROR(gckOS_AtomicExchange(Command->os,
++ hardware->pageTableDirty,
++ 0,
++ &oldValue));
++
++ if (oldValue)
++ {
++ /* Page Table is upated, flush mmu before commit. */
++ gcmkONERROR(gckVGHARDWARE_FlushMMU(hardware));
++ }
++
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++}
++
++static gceSTATUS
++_WaitForIdle(
++ IN gckVGCOMMAND Command,
++ IN gcsKERNEL_QUEUE_HEADER_PTR Queue
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctUINT32 idle;
++ gctUINT timeout = 0;
++
++ /* Loop while not idle. */
++ while (Queue->pending)
++ {
++ /* Did we reach the timeout limit? */
++ if (timeout == gcvQUEUE_TIMEOUT)
++ {
++ /* Hardware is probably dead... */
++ return gcvSTATUS_TIMEOUT;
++ }
++
++ /* Sleep for 100ms. */
++ gcmkERR_BREAK(gckOS_Delay(Command->os, 100));
++
++ /* Not the first loop? */
++ if (timeout > 0)
++ {
++ /* Read IDLE register. */
++ gcmkVERIFY_OK(gckVGHARDWARE_GetIdle(Command->hardware, &idle));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_COMMAND,
++ "%s: timeout, IDLE=%08X\n",
++ __FUNCTION__, idle
++ );
++ }
++
++ /* Increment the timeout counter. */
++ timeout += 1;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gctINT32
++_GetNextInterrupt(
++ IN gckVGCOMMAND Command,
++ IN gceBLOCK Block
++ )
++{
++ gctUINT index;
++ gcsBLOCK_TASK_ENTRY_PTR entry;
++ gctINT32 interrupt;
++
++ /* Get the block entry. */
++ entry = &Command->taskTable[Block];
++
++ /* Make sure we have initialized interrupts. */
++ gcmkASSERT(entry->interruptCount > 0);
++
++ /* Decrement the interrupt usage semaphore. */
++ gcmkVERIFY_OK(gckOS_DecrementSemaphore(
++ Command->os, entry->interruptSemaphore
++ ));
++
++ /* Get the value index. */
++ index = entry->interruptIndex;
++
++ /* Get the interrupt value. */
++ interrupt = entry->interruptArray[index];
++
++ /* Must be a valid value. */
++ gcmkASSERT((interrupt >= 0) && (interrupt <= 31));
++
++ /* Advance the index to the next value. */
++ index += 1;
++
++ /* Set the new index. */
++ entry->interruptIndex = (index == entry->interruptCount)
++ ? 0
++ : index;
++
++ /* Return interrupt value. */
++ return interrupt;
++}
++
++
++/******************************************************************************\
++***************************** Task Storage Management **************************
++\******************************************************************************/
++
++/* Minimum task buffer size. */
++#define gcvMIN_TASK_BUFFER \
++( \
++ gcmSIZEOF(gcsTASK_CONTAINER) + 128 \
++)
++
++/* Free list terminator. */
++#define gcvFREE_TASK_TERMINATOR \
++( \
++ (gcsTASK_CONTAINER_PTR) gcmINT2PTR(~0) \
++)
++
++
++/*----------------------------------------------------------------------------*/
++/*------------------- Allocated Task Buffer List Management ------------------*/
++
++static void
++_InsertTaskBuffer(
++ IN gcsTASK_CONTAINER_PTR AddAfter,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR addBefore;
++
++ /* Cannot add before the first buffer. */
++ gcmkASSERT(AddAfter != gcvNULL);
++
++ /* Create a shortcut to the next buffer. */
++ addBefore = AddAfter->allocNext;
++
++ /* Initialize the links. */
++ Buffer->allocPrev = AddAfter;
++ Buffer->allocNext = addBefore;
++
++ /* Link to the previous buffer. */
++ AddAfter->allocNext = Buffer;
++
++ /* Link to the next buffer. */
++ if (addBefore != gcvNULL)
++ {
++ addBefore->allocPrev = Buffer;
++ }
++}
++
++static void
++_RemoveTaskBuffer(
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR prev;
++ gcsTASK_CONTAINER_PTR next;
++
++ /* Cannot remove the first buffer. */
++ gcmkASSERT(Buffer->allocPrev != gcvNULL);
++
++ /* Create shortcuts to the previous and next buffers. */
++ prev = Buffer->allocPrev;
++ next = Buffer->allocNext;
++
++ /* Tail buffer? */
++ if (next == gcvNULL)
++ {
++ /* Remove from the list. */
++ prev->allocNext = gcvNULL;
++ }
++
++ /* Buffer from the middle. */
++ else
++ {
++ prev->allocNext = next;
++ next->allocPrev = prev;
++ }
++}
++
++
++/*----------------------------------------------------------------------------*/
++/*--------------------- Free Task Buffer List Management ---------------------*/
++
++static void
++_AppendToFreeList(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ /* Cannot be a part of the free list already. */
++ gcmkASSERT(Buffer->freePrev == gcvNULL);
++ gcmkASSERT(Buffer->freeNext == gcvNULL);
++
++ /* First buffer to add? */
++ if (Command->taskFreeHead == gcvNULL)
++ {
++ /* Terminate the links. */
++ Buffer->freePrev = gcvFREE_TASK_TERMINATOR;
++ Buffer->freeNext = gcvFREE_TASK_TERMINATOR;
++
++ /* Initialize the list pointer. */
++ Command->taskFreeHead = Command->taskFreeTail = Buffer;
++ }
++
++ /* Not the first, add after the tail. */
++ else
++ {
++ /* Initialize the new tail buffer. */
++ Buffer->freePrev = Command->taskFreeTail;
++ Buffer->freeNext = gcvFREE_TASK_TERMINATOR;
++
++ /* Add after the tail. */
++ Command->taskFreeTail->freeNext = Buffer;
++ Command->taskFreeTail = Buffer;
++ }
++}
++
++static void
++_RemoveFromFreeList(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ /* Has to be a part of the free list. */
++ gcmkASSERT(Buffer->freePrev != gcvNULL);
++ gcmkASSERT(Buffer->freeNext != gcvNULL);
++
++ /* Head buffer? */
++ if (Buffer->freePrev == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Tail buffer as well? */
++ if (Buffer->freeNext == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Reset the list pointer. */
++ Command->taskFreeHead = Command->taskFreeTail = gcvNULL;
++ }
++
++ /* No, just the head. */
++ else
++ {
++ /* Update the head. */
++ Command->taskFreeHead = Buffer->freeNext;
++
++ /* Terminate the next buffer. */
++ Command->taskFreeHead->freePrev = gcvFREE_TASK_TERMINATOR;
++ }
++ }
++
++ /* Not the head. */
++ else
++ {
++ /* Tail buffer? */
++ if (Buffer->freeNext == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Update the tail. */
++ Command->taskFreeTail = Buffer->freePrev;
++
++ /* Terminate the previous buffer. */
++ Command->taskFreeTail->freeNext = gcvFREE_TASK_TERMINATOR;
++ }
++
++ /* A buffer in the middle. */
++ else
++ {
++ /* Remove the buffer from the list. */
++ Buffer->freePrev->freeNext = Buffer->freeNext;
++ Buffer->freeNext->freePrev = Buffer->freePrev;
++ }
++ }
++
++ /* Reset free list pointers. */
++ Buffer->freePrev = gcvNULL;
++ Buffer->freeNext = gcvNULL;
++}
++
++
++/*----------------------------------------------------------------------------*/
++/*-------------------------- Task Buffer Allocation --------------------------*/
++
++static void
++_SplitTaskBuffer(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer,
++ IN gctUINT Size
++ )
++{
++ /* Determine the size of the new buffer. */
++ gctINT splitBufferSize = Buffer->size - Size;
++ gcmkASSERT(splitBufferSize >= 0);
++
++ /* Is the split buffer big enough to become a separate buffer? */
++ if (splitBufferSize >= gcvMIN_TASK_BUFFER)
++ {
++ /* Place the new path data. */
++ gcsTASK_CONTAINER_PTR splitBuffer = (gcsTASK_CONTAINER_PTR)
++ (
++ (gctUINT8_PTR) Buffer + Size
++ );
++
++ /* Set the trimmed buffer size. */
++ Buffer->size = Size;
++
++ /* Initialize the split buffer. */
++ splitBuffer->referenceCount = 0;
++ splitBuffer->size = splitBufferSize;
++ splitBuffer->freePrev = gcvNULL;
++ splitBuffer->freeNext = gcvNULL;
++
++ /* Link in. */
++ _InsertTaskBuffer(Buffer, splitBuffer);
++ _AppendToFreeList(Command, splitBuffer);
++ }
++}
++
++static gceSTATUS
++_AllocateTaskContainer(
++ IN gckVGCOMMAND Command,
++ IN gctUINT Size,
++ OUT gcsTASK_CONTAINER_PTR * Buffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x Size=0x%x, Buffer ==0x%x", Command, Size, Buffer);
++
++ /* Verify arguments. */
++ gcmkVERIFY_ARGUMENT(Buffer != gcvNULL);
++
++ do
++ {
++ gcsTASK_STORAGE_PTR storage;
++ gcsTASK_CONTAINER_PTR buffer;
++
++ /* Adjust the size. */
++ Size += gcmSIZEOF(gcsTASK_CONTAINER);
++
++ /* Adjust the allocation size if not big enough. */
++ if (Size > Command->taskStorageUsable)
++ {
++ Command->taskStorageGranularity
++ = gcmALIGN(Size + gcmSIZEOF(gcsTASK_STORAGE), 1024);
++
++ Command->taskStorageUsable
++ = Command->taskStorageGranularity - gcmSIZEOF(gcsTASK_STORAGE);
++ }
++
++ /* Is there a free buffer available? */
++ else if (Command->taskFreeHead != gcvNULL)
++ {
++ /* Set the initial free buffer. */
++ gcsTASK_CONTAINER_PTR buffer = Command->taskFreeHead;
++
++ do
++ {
++ /* Is the buffer big enough? */
++ if (buffer->size >= Size)
++ {
++ /* Remove the buffer from the free list. */
++ _RemoveFromFreeList(Command, buffer);
++
++ /* Split the buffer. */
++ _SplitTaskBuffer(Command, buffer, Size);
++
++ /* Set the result. */
++ * Buffer = buffer;
++
++ gcmkFOOTER_ARG("*Buffer=0x%x",*Buffer);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++
++ /* Get the next free buffer. */
++ buffer = buffer->freeNext;
++ }
++ while (buffer != gcvFREE_TASK_TERMINATOR);
++ }
++
++ /* Allocate a container. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Command->os,
++ Command->taskStorageGranularity,
++ (gctPOINTER *) &storage
++ ));
++
++ /* Link in the storage buffer. */
++ storage->next = Command->taskStorage;
++ Command->taskStorage = storage;
++
++ /* Place the task buffer. */
++ buffer = (gcsTASK_CONTAINER_PTR) (storage + 1);
++
++ /* Determine the size of the buffer. */
++ buffer->size
++ = Command->taskStorageGranularity
++ - gcmSIZEOF(gcsTASK_STORAGE);
++
++ /* Initialize the task buffer. */
++ buffer->referenceCount = 0;
++ buffer->allocPrev = gcvNULL;
++ buffer->allocNext = gcvNULL;
++ buffer->freePrev = gcvNULL;
++ buffer->freeNext = gcvNULL;
++
++ /* Split the buffer. */
++ _SplitTaskBuffer(Command, buffer, Size);
++
++ /* Set the result. */
++ * Buffer = buffer;
++
++ gcmkFOOTER_ARG("*Buffer=0x%x",*Buffer);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++static void
++_FreeTaskContainer(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR prev;
++ gcsTASK_CONTAINER_PTR next;
++ gcsTASK_CONTAINER_PTR merged;
++
++ gctSIZE_T mergedSize;
++
++ /* Verify arguments. */
++ gcmkASSERT(Buffer != gcvNULL);
++ gcmkASSERT(Buffer->freePrev == gcvNULL);
++ gcmkASSERT(Buffer->freeNext == gcvNULL);
++
++ /* Get shortcuts to the previous and next path data buffers. */
++ prev = Buffer->allocPrev;
++ next = Buffer->allocNext;
++
++ /* Is the previous path data buffer already free? */
++ if (prev && prev->freeNext)
++ {
++ /* The previous path data buffer is the one that remains. */
++ merged = prev;
++
++ /* Is the next path data buffer already free? */
++ if (next && next->freeNext)
++ {
++ /* Merge all three path data buffers into the previous. */
++ mergedSize = prev->size + Buffer->size + next->size;
++
++ /* Remove the next path data buffer. */
++ _RemoveFromFreeList(Command, next);
++ _RemoveTaskBuffer(next);
++ }
++ else
++ {
++ /* Merge the current path data buffer into the previous. */
++ mergedSize = prev->size + Buffer->size;
++ }
++
++ /* Delete the current path data buffer. */
++ _RemoveTaskBuffer(Buffer);
++
++ /* Set new size. */
++ merged->size = mergedSize;
++ }
++ else
++ {
++ /* The current path data buffer is the one that remains. */
++ merged = Buffer;
++
++ /* Is the next buffer already free? */
++ if (next && next->freeNext)
++ {
++ /* Merge the next into the current. */
++ mergedSize = Buffer->size + next->size;
++
++ /* Remove the next buffer. */
++ _RemoveFromFreeList(Command, next);
++ _RemoveTaskBuffer(next);
++
++ /* Set new size. */
++ merged->size = mergedSize;
++ }
++
++ /* Add the current buffer into the free list. */
++ _AppendToFreeList(Command, merged);
++ }
++}
++
++gceSTATUS
++_RemoveRecordFromProcesDB(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_HEADER_PTR Task
++ )
++{
++ gcsTASK_PTR task = (gcsTASK_PTR)((gctUINT8_PTR)Task - sizeof(gcsTASK));
++ gcsTASK_FREE_VIDEO_MEMORY_PTR freeVideoMemory;
++ gcsTASK_UNLOCK_VIDEO_MEMORY_PTR unlockVideoMemory;
++ gctINT pid;
++ gctUINT32 size;
++
++ /* Get the total size of all tasks. */
++ size = task->size;
++
++ gcmkVERIFY_OK(gckOS_GetProcessID((gctUINT32_PTR)&pid));
++
++ do
++ {
++ switch (Task->id)
++ {
++ case gcvTASK_FREE_VIDEO_MEMORY:
++ freeVideoMemory = (gcsTASK_FREE_VIDEO_MEMORY_PTR)Task;
++
++ /* Remove record from process db. */
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Command->kernel->kernel,
++ pid,
++ gcvDB_VIDEO_MEMORY,
++ gcmUINT64_TO_PTR(freeVideoMemory->node)));
++
++ /* Advance to next task. */
++ size -= sizeof(gcsTASK_FREE_VIDEO_MEMORY);
++ Task = (gcsTASK_HEADER_PTR)(freeVideoMemory + 1);
++
++ break;
++ case gcvTASK_UNLOCK_VIDEO_MEMORY:
++ unlockVideoMemory = (gcsTASK_UNLOCK_VIDEO_MEMORY_PTR)Task;
++
++ /* Remove record from process db. */
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Command->kernel->kernel,
++ pid,
++ gcvDB_VIDEO_MEMORY_LOCKED,
++ gcmUINT64_TO_PTR(unlockVideoMemory->node)));
++
++ /* Advance to next task. */
++ size -= sizeof(gcsTASK_UNLOCK_VIDEO_MEMORY);
++ Task = (gcsTASK_HEADER_PTR)(unlockVideoMemory + 1);
++
++ break;
++ default:
++ /* Skip the whole task. */
++ size = 0;
++ break;
++ }
++ }
++ while(size);
++
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************\
++********************************* Task Scheduling ******************************
++\******************************************************************************/
++
++static gceSTATUS
++_ScheduleTasks(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable,
++ IN gctUINT8_PTR PreviousEnd
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gctINT block;
++ gcsTASK_CONTAINER_PTR container;
++ gcsTASK_MASTER_ENTRY_PTR userTaskEntry;
++ gcsBLOCK_TASK_ENTRY_PTR kernelTaskEntry;
++ gcsTASK_PTR userTask;
++ gctUINT8_PTR kernelTask;
++ gctINT32 interrupt;
++ gctUINT8_PTR eventCommand;
++
++ /* Nothing to schedule? */
++ if (TaskTable->size == 0)
++ {
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->taskMutex,
++ gcvINFINITE
++ ));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ do
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " number of tasks scheduled = %d\n"
++ " size of event data in bytes = %d\n",
++ TaskTable->count,
++ TaskTable->size
++ );
++
++ /* Allocate task buffer. */
++ gcmkERR_BREAK(_AllocateTaskContainer(
++ Command,
++ TaskTable->size,
++ &container
++ ));
++
++ /* Determine the task data pointer. */
++ kernelTask = (gctUINT8_PTR) (container + 1);
++
++ /* Initialize the reference count. */
++ container->referenceCount = TaskTable->count;
++
++ /* Process tasks. */
++ for (block = gcvBLOCK_COUNT - 1; block >= 0; block -= 1)
++ {
++ /* Get the current user table entry. */
++ userTaskEntry = &TaskTable->table[block];
++
++ /* Are there tasks scheduled? */
++ if (userTaskEntry->head == gcvNULL)
++ {
++ /* No, skip to the next block. */
++ continue;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " processing tasks for block %d\n",
++ block
++ );
++
++ /* Get the current kernel table entry. */
++ kernelTaskEntry = &Command->taskTable[block];
++
++ /* Are there tasks for the current block scheduled? */
++ if (kernelTaskEntry->container == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " first task container for the block added\n",
++ block
++ );
++
++ /* Nothing yet, set the container buffer pointer. */
++ kernelTaskEntry->container = container;
++ kernelTaskEntry->task = (gcsTASK_HEADER_PTR) kernelTask;
++ }
++
++ /* Yes, append to the end. */
++ else
++ {
++ kernelTaskEntry->link->cotainer = container;
++ kernelTaskEntry->link->task = (gcsTASK_HEADER_PTR) kernelTask;
++ }
++
++ /* Set initial task. */
++ userTask = userTaskEntry->head;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " copying user tasks over to the kernel\n"
++ );
++
++ /* Copy tasks. */
++ do
++ {
++ gcsTASK_HEADER_PTR taskHeader = (gcsTASK_HEADER_PTR) (userTask + 1);
++
++ gcmkVERIFY_OK(_RemoveRecordFromProcesDB(Command, taskHeader));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " task ID = %d, size = %d\n",
++ ((gcsTASK_HEADER_PTR) (userTask + 1))->id,
++ userTask->size
++ );
++
++#ifdef __QNXNTO__
++ if (taskHeader->id == gcvTASK_SIGNAL)
++ {
++ ((gcsTASK_SIGNAL_PTR)taskHeader)->coid = TaskTable->coid;
++ ((gcsTASK_SIGNAL_PTR)taskHeader)->rcvid = TaskTable->rcvid;
++ }
++#endif /* __QNXNTO__ */
++ /* Copy the task data. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ kernelTask, taskHeader, userTask->size
++ ));
++
++ /* Advance to the next task. */
++ kernelTask += userTask->size;
++ userTask = userTask->next;
++ }
++ while (userTask != gcvNULL);
++
++ /* Update link pointer in the header. */
++ kernelTaskEntry->link = (gcsTASK_LINK_PTR) kernelTask;
++
++ /* Initialize link task. */
++ kernelTaskEntry->link->id = gcvTASK_LINK;
++ kernelTaskEntry->link->cotainer = gcvNULL;
++ kernelTaskEntry->link->task = gcvNULL;
++
++ /* Advance the task data pointer. */
++ kernelTask += gcmSIZEOF(gcsTASK_LINK);
++ }
++ }
++ while (gcvFALSE);
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->taskMutex
++ ));
++
++ /* Assign interrupts to the blocks. */
++ eventCommand = PreviousEnd;
++
++ for (block = gcvBLOCK_COUNT - 1; block >= 0; block -= 1)
++ {
++ /* Get the current user table entry. */
++ userTaskEntry = &TaskTable->table[block];
++
++ /* Are there tasks scheduled? */
++ if (userTaskEntry->head == gcvNULL)
++ {
++ /* No, skip to the next block. */
++ continue;
++ }
++
++ /* Get the interrupt number. */
++ interrupt = _GetNextInterrupt(Command, block);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): block = %d interrupt = %d\n",
++ __FUNCTION__, __LINE__,
++ block, interrupt
++ );
++
++ /* Determine the command position. */
++ eventCommand -= Command->info.eventCommandSize;
++
++ /* Append an EVENT command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ Command, eventCommand, block, interrupt, gcvNULL
++ ));
++ }
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++******************************** Memory Management *****************************
++\******************************************************************************/
++
++static gceSTATUS
++_HardwareToKernel(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM memory;
++ gctUINT32 offset;
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY
++ gctUINT32 nodePhysical;
++#endif
++ status = gcvSTATUS_OK;
++ /* Assume a non-virtual node and get the pool manager object. */
++ memory = Node->VidMem.memory;
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY
++ nodePhysical = memory->baseAddress
++ + Node->VidMem.offset
++ + Node->VidMem.alignment;
++
++ if (Node->VidMem.kernelVirtual == gcvNULL)
++ {
++ status = gckOS_MapPhysical(Os,
++ nodePhysical,
++ Node->VidMem.bytes,
++ (gctPOINTER *)&Node->VidMem.kernelVirtual);
++
++ if (gcmkIS_ERROR(status))
++ {
++ return status;
++ }
++ }
++
++ offset = Address - nodePhysical;
++ *KernelPointer = (gctPOINTER)((gctUINT8_PTR)Node->VidMem.kernelVirtual + offset);
++#else
++ /* Determine the header offset within the pool it is allocated in. */
++ offset = Address - memory->baseAddress;
++
++ /* Translate the offset into the kernel side pointer. */
++ status = gckOS_GetKernelLogicalEx(
++ Os,
++ gcvCORE_VG,
++ offset,
++ KernelPointer
++ );
++#endif
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_ConvertUserCommandBufferPointer(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR UserCommandBuffer,
++ OUT gcsCMDBUFFER_PTR * KernelCommandBuffer
++ )
++{
++ gceSTATUS status, last;
++ gcsCMDBUFFER_PTR mappedUserCommandBuffer = gcvNULL;
++
++ do
++ {
++ gctUINT32 headerAddress;
++
++ /* Map the command buffer structure into the kernel space. */
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ UserCommandBuffer,
++ gcmSIZEOF(gcsCMDBUFFER),
++ (gctPOINTER *) &mappedUserCommandBuffer
++ ));
++
++ /* Determine the address of the header. */
++ headerAddress
++ = mappedUserCommandBuffer->address
++ - mappedUserCommandBuffer->bufferOffset;
++
++ /* Translate the logical address to the kernel space. */
++ gcmkERR_BREAK(_HardwareToKernel(
++ Command->os,
++ gcmUINT64_TO_PTR(mappedUserCommandBuffer->node),
++ headerAddress,
++ (gctPOINTER *) KernelCommandBuffer
++ ));
++ }
++ while (gcvFALSE);
++
++ /* Unmap the user command buffer. */
++ if (mappedUserCommandBuffer != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_UnmapUserPointer(
++ Command->os,
++ UserCommandBuffer,
++ gcmSIZEOF(gcsCMDBUFFER),
++ mappedUserCommandBuffer
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_AllocateLinear(
++ IN gckVGCOMMAND Command,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ OUT gcuVIDMEM_NODE_PTR * Node,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ gceSTATUS status, last;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctUINT32 address = (gctUINT32)~0;
++
++ do
++ {
++ gcePOOL pool;
++ gctPOINTER logical;
++
++ /* Allocate from the system pool. */
++ pool = gcvPOOL_SYSTEM;
++
++ /* Allocate memory. */
++ gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
++ Command->kernel->kernel, &pool,
++ Size, Alignment,
++ gcvSURF_TYPE_UNKNOWN,
++ &node
++ ));
++
++ /* Do not accept virtual pools for now because we don't handle the
++ kernel pointer translation at the moment. */
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ /* Lock the command buffer. */
++ gcmkERR_BREAK(gckVIDMEM_Lock(
++ Command->kernel->kernel,
++ node,
++ gcvFALSE,
++ &address
++ ));
++
++ /* Translate the logical address to the kernel space. */
++ gcmkERR_BREAK(_HardwareToKernel(
++ Command->os,
++ node,
++ address,
++ &logical
++ ));
++
++ /* Set return values. */
++ * Node = node;
++ * Address = address;
++ * Logical = logical;
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ /* Unlock the command buffer. */
++ if (address != ~0)
++ {
++ gcmkCHECK_STATUS(gckVIDMEM_Unlock(
++ Command->kernel->kernel, node, gcvSURF_TYPE_UNKNOWN, gcvNULL
++ ));
++ }
++
++ /* Free the command buffer. */
++ gcmkCHECK_STATUS(gckVIDMEM_Free(
++ node
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_FreeLinear(
++ IN gckVGKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Unlock the linear buffer. */
++ gcmkERR_BREAK(gckVIDMEM_Unlock(Kernel->kernel, Node, gcvSURF_TYPE_UNKNOWN, gcvNULL));
++
++ /* Free the linear buffer. */
++ gcmkERR_BREAK(gckVIDMEM_Free(Node));
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++_AllocateCommandBuffer(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer
++ )
++{
++ gceSTATUS status, last;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++
++ do
++ {
++ gctUINT alignedHeaderSize;
++ gctUINT requestedSize;
++ gctUINT allocationSize;
++ gctUINT32 address = 0;
++ gcsCMDBUFFER_PTR commandBuffer;
++ gctUINT8_PTR endCommand;
++
++ /* Determine the aligned header size. */
++ alignedHeaderSize
++ = gcmALIGN(gcmSIZEOF(gcsCMDBUFFER), Command->info.addressAlignment);
++
++ /* Align the requested size. */
++ requestedSize
++ = gcmALIGN(Size, Command->info.commandAlignment);
++
++ /* Determine the size of the buffer to allocate. */
++ allocationSize
++ = alignedHeaderSize
++ + requestedSize
++ + Command->info.staticTailSize;
++
++ /* Allocate the command buffer. */
++ gcmkERR_BREAK(_AllocateLinear(
++ Command,
++ allocationSize,
++ Command->info.addressAlignment,
++ &node,
++ &address,
++ (gctPOINTER *) &commandBuffer
++ ));
++
++ /* Initialize the structure. */
++ commandBuffer->completion = gcvVACANT_BUFFER;
++ commandBuffer->node = gcmPTR_TO_UINT64(node);
++ commandBuffer->address = address + alignedHeaderSize;
++ commandBuffer->bufferOffset = alignedHeaderSize;
++ commandBuffer->size = requestedSize;
++ commandBuffer->offset = requestedSize;
++ commandBuffer->nextAllocated = gcvNULL;
++ commandBuffer->nextSubBuffer = gcvNULL;
++
++ /* Determine the data count. */
++ commandBuffer->dataCount
++ = (requestedSize + Command->info.staticTailSize)
++ / Command->info.commandAlignment;
++
++ /* Determine the location of the END command. */
++ endCommand
++ = (gctUINT8_PTR) commandBuffer
++ + alignedHeaderSize
++ + requestedSize;
++
++ /* Append an END command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command,
++ endCommand,
++ Command->info.feBufferInt,
++ gcvNULL
++ ));
++
++ /* Set the return pointer. */
++ * CommandBuffer = commandBuffer;
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ /* Free the command buffer. */
++ gcmkCHECK_STATUS(_FreeLinear(Command->kernel, node));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_FreeCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ /* Free the buffer. */
++ status = _FreeLinear(Kernel, gcmUINT64_TO_PTR(CommandBuffer->node));
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++****************************** TS Overflow Handler *****************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_TSOverflow(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): **** TS OVERFLOW ENCOUNTERED ****\n",
++ __FUNCTION__, __LINE__
++ );
++
++ return gcvSTATUS_OK;
++}
++
++
++/******************************************************************************\
++****************************** Bus Error Handler *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_BusError(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): **** BUS ERROR ENCOUNTERED ****\n",
++ __FUNCTION__, __LINE__
++ );
++
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************\
++****************************** Power Stall Handler *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_PowerStall(
++ IN gckVGKERNEL Kernel
++ )
++{
++ /* Signal. */
++ return gckOS_Signal(
++ Kernel->os,
++ Kernel->command->powerStallSignal,
++ gcvTRUE);
++}
++
++/******************************************************************************\
++******************************** Task Routines *********************************
++\******************************************************************************/
++
++typedef gceSTATUS (* gctTASKROUTINE) (
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskLink(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskCluster(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskIncrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskDecrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskSignal(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskLockdown(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskUnlockVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskFreeVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskFreeContiguousMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskUnmapUserMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gctTASKROUTINE _taskRoutine[] =
++{
++ _TaskLink, /* gcvTASK_LINK */
++ _TaskCluster, /* gcvTASK_CLUSTER */
++ _TaskIncrement, /* gcvTASK_INCREMENT */
++ _TaskDecrement, /* gcvTASK_DECREMENT */
++ _TaskSignal, /* gcvTASK_SIGNAL */
++ _TaskLockdown, /* gcvTASK_LOCKDOWN */
++ _TaskUnlockVideoMemory, /* gcvTASK_UNLOCK_VIDEO_MEMORY */
++ _TaskFreeVideoMemory, /* gcvTASK_FREE_VIDEO_MEMORY */
++ _TaskFreeContiguousMemory, /* gcvTASK_FREE_CONTIGUOUS_MEMORY */
++ _TaskUnmapUserMemory, /* gcvTASK_UNMAP_USER_MEMORY */
++};
++
++static gceSTATUS
++_TaskLink(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ /* Cast the task pointer. */
++ gcsTASK_LINK_PTR task = (gcsTASK_LINK_PTR) TaskHeader->task;
++
++ /* Save the pointer to the container. */
++ gcsTASK_CONTAINER_PTR container = TaskHeader->container;
++
++ /* No more tasks in the list? */
++ if (task->task == gcvNULL)
++ {
++ /* Reset the entry. */
++ TaskHeader->container = gcvNULL;
++ TaskHeader->task = gcvNULL;
++ TaskHeader->link = gcvNULL;
++ }
++ else
++ {
++ /* Update the entry. */
++ TaskHeader->container = task->cotainer;
++ TaskHeader->task = task->task;
++ }
++
++ /* Decrement the task buffer reference. */
++ gcmkASSERT(container->referenceCount >= 0);
++ if (container->referenceCount == 0)
++ {
++ /* Free the container. */
++ _FreeTaskContainer(Command, container);
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_TaskCluster(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ /* Cast the task pointer. */
++ gcsTASK_CLUSTER_PTR cluster = (gcsTASK_CLUSTER_PTR) TaskHeader->task;
++
++ /* Get the number of tasks. */
++ gctUINT taskCount = cluster->taskCount;
++
++ /* Advance to the next task. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (cluster + 1);
++
++ /* Perform all tasks in the cluster. */
++ while (taskCount)
++ {
++ /* Perform the current task. */
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ Command,
++ TaskHeader
++ ));
++
++ /* Update the task count. */
++ taskCount -= 1;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskIncrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_INCREMENT_PTR task = (gcsTASK_INCREMENT_PTR) TaskHeader->task;
++
++ /* Convert physical into logical address. */
++ gctUINT32_PTR logical;
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->address,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &logical
++ ));
++
++ /* Increment data. */
++ (* logical) += 1;
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(gckOS_UnmapPhysical(
++ Command->os,
++ logical,
++ gcmSIZEOF(gctUINT32)
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskDecrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_DECREMENT_PTR task = (gcsTASK_DECREMENT_PTR) TaskHeader->task;
++
++ /* Convert physical into logical address. */
++ gctUINT32_PTR logical;
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->address,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &logical
++ ));
++
++ /* Decrement data. */
++ (* logical) -= 1;
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(gckOS_UnmapPhysical(
++ Command->os,
++ logical,
++ gcmSIZEOF(gctUINT32)
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskSignal(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_SIGNAL_PTR task = (gcsTASK_SIGNAL_PTR) TaskHeader->task;
++
++
++ /* Map the signal into kernel space. */
++#ifdef __QNXNTO__
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, task->signal, task->rcvid, task->coid
++ ));
++#else
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, task->signal, task->process
++ ));
++#endif /* __QNXNTO__ */
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskLockdown(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++ gctUINT32_PTR userCounter = gcvNULL;
++ gctUINT32_PTR kernelCounter = gcvNULL;
++ gctSIGNAL signal = gcvNULL;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_LOCKDOWN_PTR task = (gcsTASK_LOCKDOWN_PTR) TaskHeader->task;
++
++ /* Convert physical addresses into logical. */
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->userCounter,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &userCounter
++ ));
++
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->kernelCounter,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &kernelCounter
++ ));
++
++ /* Update the kernel counter. */
++ (* kernelCounter) += 1;
++
++ /* Are the counters equal? */
++ if ((* userCounter) == (* kernelCounter))
++ {
++ /* Map the signal into kernel space. */
++ gcmkERR_BREAK(gckOS_MapSignal(
++ Command->os, task->signal, task->process, &signal
++ ));
++
++ if (signal == gcvNULL)
++ {
++ /* Signal. */
++ gcmkERR_BREAK(gckOS_Signal(
++ Command->os, task->signal, gcvTRUE
++ ));
++ }
++ else
++ {
++ /* Signal. */
++ gcmkERR_BREAK(gckOS_Signal(
++ Command->os, signal, gcvTRUE
++ ));
++ }
++ }
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Destroy the mapped signal. */
++ if (signal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, signal
++ ));
++ }
++
++ /* Unmap the physical memory. */
++ if (kernelCounter != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapPhysical(
++ Command->os,
++ kernelCounter,
++ gcmSIZEOF(gctUINT32)
++ ));
++ }
++
++ if (userCounter != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapPhysical(
++ Command->os,
++ userCounter,
++ gcmSIZEOF(gctUINT32)
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskUnlockVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_UNLOCK_VIDEO_MEMORY_PTR task
++ = (gcsTASK_UNLOCK_VIDEO_MEMORY_PTR) TaskHeader->task;
++
++ /* Unlock video memory. */
++ gcmkERR_BREAK(gckVIDMEM_Unlock(
++ Command->kernel->kernel,
++ gcmUINT64_TO_PTR(task->node),
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskFreeVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_FREE_VIDEO_MEMORY_PTR task
++ = (gcsTASK_FREE_VIDEO_MEMORY_PTR) TaskHeader->task;
++
++ /* Free video memory. */
++ gcmkERR_BREAK(gckVIDMEM_Free(gcmUINT64_TO_PTR(task->node)));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskFreeContiguousMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR task
++ = (gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR) TaskHeader->task;
++
++ /* Free contiguous memory. */
++ gcmkERR_BREAK(gckOS_FreeContiguous(
++ Command->os, task->physical, task->logical, task->bytes
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskUnmapUserMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_UNMAP_USER_MEMORY_PTR task
++ = (gcsTASK_UNMAP_USER_MEMORY_PTR) TaskHeader->task;
++
++ /* Unmap the user memory. */
++ gcmkERR_BREAK(gckOS_UnmapUserMemory(
++ Command->os, gcvCORE_VG, task->memory, task->size, task->info, task->address
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++/******************************************************************************\
++************ Hardware Block Interrupt Handlers For Scheduled Events ************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_Block(
++ IN gckVGKERNEL Kernel,
++ IN gcsBLOCK_TASK_ENTRY_PTR TaskHeader,
++ IN gctBOOL ProcessAll
++ )
++{
++ gceSTATUS status, last;
++
++ gcmkHEADER_ARG("Kernel=0x%x TaskHeader=0x%x ProcessAll=0x%x", Kernel, TaskHeader, ProcessAll);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ do
++ {
++ gckVGCOMMAND command;
++
++ /* Get the command buffer object. */
++ command = Kernel->command;
++
++ /* Increment the interrupt usage semaphore. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ command->os, TaskHeader->interruptSemaphore
++ ));
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ command->os,
++ command->taskMutex,
++ gcvINFINITE
++ ));
++
++ /* Verify inputs. */
++ gcmkASSERT(TaskHeader != gcvNULL);
++ gcmkASSERT(TaskHeader->container != gcvNULL);
++ gcmkASSERT(TaskHeader->task != gcvNULL);
++ gcmkASSERT(TaskHeader->link != gcvNULL);
++
++ /* Process tasks. */
++ do
++ {
++ /* Process the current task. */
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ command,
++ TaskHeader
++ ));
++
++ /* Is the next task is LINK? */
++ if (TaskHeader->task->id == gcvTASK_LINK)
++ {
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ command,
++ TaskHeader
++ ));
++
++ /* Done. */
++ break;
++ }
++ }
++ while (ProcessAll);
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ command->os,
++ command->taskMutex
++ ));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gcmDECLARE_INTERRUPT_HANDLER(COMMAND, 0)
++{
++ gceSTATUS status, last;
++
++ gcmkHEADER_ARG("Kernel=0x%x ", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++
++ do
++ {
++ gckVGCOMMAND command;
++ gcsKERNEL_QUEUE_HEADER_PTR mergeQueue;
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++ gcsKERNEL_CMDQUEUE_PTR entry;
++ gctUINT entryCount;
++
++ /* Get the command buffer object. */
++ command = Kernel->command;
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ command->os,
++ command->queueMutex,
++ gcvINFINITE
++ ));
++
++ /* Get the current queue. */
++ queueTail = command->queueTail;
++
++ /* Get the current queue entry. */
++ entry = queueTail->currentEntry;
++
++ /* Get the number of entries in the queue. */
++ entryCount = queueTail->pending;
++
++ /* Process all entries. */
++ while (gcvTRUE)
++ {
++ /* Call post-execution function. */
++ status = entry->handler(Kernel, entry);
++
++ /* Failed? */
++ if (gcmkIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR,
++ gcvZONE_COMMAND,
++ "[%s] line %d: post action failed.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Executed the next buffer? */
++ if (status == gcvSTATUS_EXECUTED)
++ {
++ /* Update the queue. */
++ queueTail->pending = entryCount;
++ queueTail->currentEntry = entry;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++
++ /* Break out of the loop. */
++ break;
++ }
++
++ /* Advance to the next entry. */
++ entry += 1;
++ entryCount -= 1;
++
++ /* Last entry? */
++ if (entryCount == 0)
++ {
++ /* Reset the queue to idle. */
++ queueTail->pending = 0;
++
++ /* Get a shortcut to the queue to merge with. */
++ mergeQueue = command->mergeQueue;
++
++ /* Merge the queues if necessary. */
++ if (mergeQueue != queueTail)
++ {
++ gcmkASSERT(mergeQueue < queueTail);
++ gcmkASSERT(mergeQueue->next == queueTail);
++
++ mergeQueue->size
++ += gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ + queueTail->size;
++
++ mergeQueue->next = queueTail->next;
++ }
++
++ /* Advance to the next queue. */
++ queueTail = queueTail->next;
++
++ /* Did it wrap around? */
++ if (command->queue == queueTail)
++ {
++ /* Reset merge queue. */
++ command->mergeQueue = queueTail;
++ }
++
++ /* Set new queue. */
++ command->queueTail = queueTail;
++
++ /* Is the next queue scheduled? */
++ if (queueTail->pending > 0)
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* The first entry must be a command buffer. */
++ commandBuffer = queueTail->currentEntry->commandBuffer;
++
++ /* Start the command processor. */
++ status = gckVGHARDWARE_Execute(
++ command->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Failed? */
++ if (gcmkIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR,
++ gcvZONE_COMMAND,
++ "[%s] line %d: failed to start the next queue.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ }
++ else
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(
++ Kernel->command->hardware, gcvPOWER_IDLE_BROADCAST
++ );
++ }
++
++ /* Break out of the loop. */
++ break;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ command->os,
++ command->queueMutex
++ ));
++ }
++ while (gcvFALSE);
++
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/* Define standard block interrupt handlers. */
++gcmDEFINE_INTERRUPT_HANDLER(TESSELLATOR, 0)
++gcmDEFINE_INTERRUPT_HANDLER(VG, 0)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 0)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 1)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 2)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 3)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 4)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 5)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 6)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 7)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 8)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 9)
++
++/* The entries in the array are arranged by event priority. */
++static gcsBLOCK_INTERRUPT_HANDLER _blockHandlers[] =
++{
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(TESSELLATOR, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(VG, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 1),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 2),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 3),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 4),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 5),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 6),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 7),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 8),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 9),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(COMMAND, 0),
++};
++
++
++/******************************************************************************\
++************************* Static Command Buffer Handlers ***********************
++\******************************************************************************/
++
++static gceSTATUS
++_UpdateStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_ExecuteStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Cast the command buffer header. */
++ commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateStaticCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UpdateLastStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++#if gcvDEBUG || gcdFORCE_MESSAGES
++ /* Get the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Validate the command buffer. */
++ gcmkASSERT(commandBuffer->completion != gcvNULL);
++ gcmkASSERT(commandBuffer->completion != gcvVACANT_BUFFER);
++
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): processing all tasks scheduled for FE.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Perform scheduled tasks. */
++ return _EventHandler_Block(
++ Kernel,
++ &Kernel->command->taskTable[gcvBLOCK_COMMAND],
++ gcvTRUE
++ );
++}
++
++static gceSTATUS
++_ExecuteLastStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateLastStaticCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++************************* Dynamic Command Buffer Handlers **********************
++\******************************************************************************/
++
++static gceSTATUS
++_UpdateDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_ExecuteDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateDynamicCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UpdateLastDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++#if gcvDEBUG || gcdFORCE_MESSAGES
++ /* Get the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Validate the command buffer. */
++ gcmkASSERT(commandBuffer->completion != gcvNULL);
++ gcmkASSERT(commandBuffer->completion != gcvVACANT_BUFFER);
++
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): processing all tasks scheduled for FE.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Perform scheduled tasks. */
++ return _EventHandler_Block(
++ Kernel,
++ &Kernel->command->taskTable[gcvBLOCK_COMMAND],
++ gcvTRUE
++ );
++}
++
++static gceSTATUS
++_ExecuteLastDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateLastDynamicCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++********************************* Other Handlers *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_FreeKernelCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ /* Free the command buffer. */
++ status = _FreeCommandBuffer(Kernel, Entry->commandBuffer);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++******************************* Queue Management *******************************
++\******************************************************************************/
++
++#if gcvDUMP_COMMAND_BUFFER
++static void
++_DumpCommandQueue(
++ IN gckVGCOMMAND Command,
++ IN gcsKERNEL_QUEUE_HEADER_PTR QueueHeader,
++ IN gctUINT EntryCount
++ )
++{
++ gcsKERNEL_CMDQUEUE_PTR entry;
++ gctUINT queueIndex;
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ static gctUINT arrayCount = 0;
++#endif
++
++ /* Is dumpinng enabled? */
++ if (!Commad->enableDumping)
++ {
++ return;
++ }
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ "COMMAND QUEUE DUMP: %d entries\n", EntryCount
++ );
++#endif
++
++ /* Get the pointer to the first entry. */
++ entry = QueueHeader->currentEntry;
++
++ /* Iterate through the queue. */
++ for (queueIndex = 0; queueIndex < EntryCount; queueIndex += 1)
++ {
++ gcsCMDBUFFER_PTR buffer;
++ gctUINT bufferCount;
++ gctUINT bufferIndex;
++ gctUINT i, count;
++ gctUINT size;
++ gctUINT32_PTR data;
++
++#if gcvDUMP_COMMAND_LINES
++ gctUINT lineNumber;
++#endif
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ "ENTRY %d\n", queueIndex
++ );
++#endif
++
++ /* Reset the count. */
++ bufferCount = 0;
++
++ /* Set the initial buffer. */
++ buffer = entry->commandBuffer;
++
++ /* Loop through all subbuffers. */
++ while (buffer)
++ {
++ /* Update the count. */
++ bufferCount += 1;
++
++ /* Advance to the next subbuffer. */
++ buffer = buffer->nextSubBuffer;
++ }
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ if (bufferCount > 1)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER SET: %d buffers.\n",
++ bufferCount
++ );
++ }
++#endif
++
++ /* Reset the buffer index. */
++ bufferIndex = 0;
++
++ /* Set the initial buffer. */
++ buffer = entry->commandBuffer;
++
++ /* Loop through all subbuffers. */
++ while (buffer)
++ {
++ /* Determine the size of the buffer. */
++ size = buffer->dataCount * Command->info.commandAlignment;
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ /* A single buffer? */
++ if (bufferCount == 1)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER: count=%d (0x%X), size=%d bytes @ %08X.\n",
++ buffer->dataCount,
++ buffer->dataCount,
++ size,
++ buffer->address
++ );
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER %d: count=%d (0x%X), size=%d bytes @ %08X\n",
++ bufferIndex,
++ buffer->dataCount,
++ buffer->dataCount,
++ size,
++ buffer->address
++ );
++ }
++#endif
++
++ /* Determine the number of double words to print. */
++ count = size / 4;
++
++ /* Determine the buffer location. */
++ data = (gctUINT32_PTR)
++ (
++ (gctUINT8_PTR) buffer + buffer->bufferOffset
++ );
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "unsigned int _" gcvCOMMAND_BUFFER_NAME "_%d[] =\n",
++ arrayCount
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "{\n"
++ );
++
++ arrayCount += 1;
++#endif
++
++#if gcvDUMP_COMMAND_LINES
++ /* Reset the line number. */
++ lineNumber = 0;
++#endif
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ count -= 2;
++#endif
++
++ for (i = 0; i < count; i += 1)
++ {
++ if ((i % 8) == 0)
++ {
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "\t");
++#else
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, " ");
++#endif
++ }
++
++#if gcvDUMP_COMMAND_LINES
++ if (lineNumber == gcvDUMP_COMMAND_LINES)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, " . . . . . . . . .\n");
++ break;
++ }
++#endif
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "0x%08X", data[i]);
++
++ if (i + 1 == count)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "\n");
++
++#if gcvDUMP_COMMAND_LINES
++ lineNumber += 1;
++#endif
++ }
++ else
++ {
++ if (((i + 1) % 8) == 0)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, ",\n");
++
++#if gcvDUMP_COMMAND_LINES
++ lineNumber += 1;
++#endif
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, ", ");
++ }
++ }
++ }
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "};\n\n"
++ );
++#endif
++
++ /* Advance to the next subbuffer. */
++ buffer = buffer->nextSubBuffer;
++ bufferIndex += 1;
++ }
++
++ /* Advance to the next entry. */
++ entry += 1;
++ }
++}
++#endif
++
++static gceSTATUS
++_LockCurrentQueue(
++ IN gckVGCOMMAND Command,
++ OUT gcsKERNEL_CMDQUEUE_PTR * Entries,
++ OUT gctUINT_PTR EntryCount
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++
++ /* Get a shortcut to the head of the queue. */
++ queueHead = Command->queueHead;
++
++ /* Is the head buffer still being worked on? */
++ if (queueHead->pending)
++ {
++ /* Increment overflow count. */
++ Command->queueOverflow += 1;
++
++ /* Wait until the head becomes idle. */
++ gcmkERR_BREAK(_WaitForIdle(Command, queueHead));
++ }
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->queueMutex,
++ gcvINFINITE
++ ));
++
++ /* Determine the first queue entry. */
++ queueHead->currentEntry = (gcsKERNEL_CMDQUEUE_PTR)
++ (
++ (gctUINT8_PTR) queueHead + gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ );
++
++ /* Set the pointer to the first entry. */
++ * Entries = queueHead->currentEntry;
++
++ /* Determine the number of available entries. */
++ * EntryCount = queueHead->size / gcmSIZEOF(gcsKERNEL_CMDQUEUE);
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UnlockCurrentQueue(
++ IN gckVGCOMMAND Command,
++ IN gctUINT EntryCount
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++#if !gcdENABLE_INFINITE_SPEED_HW
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++ gcsKERNEL_QUEUE_HEADER_PTR queueNext;
++ gctUINT queueSize;
++ gctUINT newSize;
++ gctUINT unusedSize;
++
++ /* Get shortcut to the head and to the tail of the queue. */
++ queueTail = Command->queueTail;
++ queueHead = Command->queueHead;
++
++ /* Dump the command buffer. */
++#if gcvDUMP_COMMAND_BUFFER
++ _DumpCommandQueue(Command, queueHead, EntryCount);
++#endif
++
++ /* Get a shortcut to the current queue size. */
++ queueSize = queueHead->size;
++
++ /* Determine the new queue size. */
++ newSize = EntryCount * gcmSIZEOF(gcsKERNEL_CMDQUEUE);
++ gcmkASSERT(newSize <= queueSize);
++
++ /* Determine the size of the unused area. */
++ unusedSize = queueSize - newSize;
++
++ /* Is the unused area big enough to become a buffer? */
++ if (unusedSize >= gcvMINUMUM_BUFFER)
++ {
++ gcsKERNEL_QUEUE_HEADER_PTR nextHead;
++
++ /* Place the new header. */
++ nextHead = (gcsKERNEL_QUEUE_HEADER_PTR)
++ (
++ (gctUINT8_PTR) queueHead
++ + gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ + newSize
++ );
++
++ /* Initialize the buffer. */
++ nextHead->size = unusedSize - gcmSIZEOF(gcsKERNEL_QUEUE_HEADER);
++ nextHead->pending = 0;
++
++ /* Link the buffer in. */
++ nextHead->next = queueHead->next;
++ queueHead->next = nextHead;
++ queueNext = nextHead;
++
++ /* Update the size of the current buffer. */
++ queueHead->size = newSize;
++ }
++
++ /* Not big enough. */
++ else
++ {
++ /* Determine the next queue. */
++ queueNext = queueHead->next;
++ }
++
++ /* Mark the buffer as busy. */
++ queueHead->pending = EntryCount;
++
++ /* Advance to the next buffer. */
++ Command->queueHead = queueNext;
++
++ /* Start the command processor if the queue was empty. */
++ if (queueTail == queueHead)
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* The first entry must be a command buffer. */
++ commandBuffer = queueTail->currentEntry->commandBuffer;
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Command->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++ }
++
++ /* The queue was not empty. */
++ else
++ {
++ /* Advance the merge buffer if needed. */
++ if (queueHead == Command->mergeQueue)
++ {
++ Command->mergeQueue = queueNext;
++ }
++ }
++#endif
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->queueMutex
++ ));
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++
++/******************************************************************************\
++****************************** gckVGCOMMAND API Code *****************************
++\******************************************************************************/
++gceSTATUS
++gckVGCOMMAND_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT TaskGranularity,
++ IN gctUINT QueueSize,
++ OUT gckVGCOMMAND * Command
++ )
++{
++ gceSTATUS status, last;
++ gckVGCOMMAND command = gcvNULL;
++ gcsKERNEL_QUEUE_HEADER_PTR queue;
++ gctUINT i, j;
++
++ gcmkHEADER_ARG("Kernel=0x%x TaskGranularity=0x%x QueueSize=0x%x Command=0x%x",
++ Kernel, TaskGranularity, QueueSize, Command);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(QueueSize >= gcvMINUMUM_BUFFER);
++ gcmkVERIFY_ARGUMENT(Command != gcvNULL);
++
++ do
++ {
++ /***********************************************************************
++ ** Generic object initialization.
++ */
++
++ /* Allocate the gckVGCOMMAND structure. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ gcmSIZEOF(struct _gckVGCOMMAND),
++ (gctPOINTER *) &command
++ ));
++
++ /* Initialize the object. */
++ command->object.type = gcvOBJ_COMMAND;
++
++ /* Set the object pointers. */
++ command->kernel = Kernel;
++ command->os = Kernel->os;
++ command->hardware = Kernel->hardware;
++
++ /* Reset pointers. */
++ command->queue = gcvNULL;
++ command->queueMutex = gcvNULL;
++ command->taskMutex = gcvNULL;
++ command->commitMutex = gcvNULL;
++
++ command->powerStallBuffer = gcvNULL;
++ command->powerStallSignal = gcvNULL;
++ command->powerSemaphore = gcvNULL;
++
++ /* Reset context states. */
++ command->contextCounter = 0;
++ command->currentContext = 0;
++
++ /* Enable command buffer dumping. */
++ command->enableDumping = gcvTRUE;
++
++ /* Set features. */
++ command->fe20 = Kernel->hardware->fe20;
++ command->vg20 = Kernel->hardware->vg20;
++ command->vg21 = Kernel->hardware->vg21;
++
++ /* Reset task table .*/
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ command->taskTable, gcmSIZEOF(command->taskTable)
++ ));
++
++ /* Query command buffer attributes. */
++ gcmkERR_BREAK(gckVGCOMMAND_InitializeInfo(command));
++
++ /* Create the control mutexes. */
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->queueMutex));
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->taskMutex));
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->commitMutex));
++
++ /* Create the power management semaphore. */
++ gcmkERR_BREAK(gckOS_CreateSemaphore(Kernel->os,
++ &command->powerSemaphore));
++
++ gcmkERR_BREAK(gckOS_CreateSignal(Kernel->os,
++ gcvFALSE, &command->powerStallSignal));
++
++ /***********************************************************************
++ ** Command queue initialization.
++ */
++
++ /* Allocate the command queue. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ QueueSize,
++ (gctPOINTER *) &command->queue
++ ));
++
++ /* Initialize the command queue. */
++ queue = command->queue;
++
++ queue->size = QueueSize - gcmSIZEOF(gcsKERNEL_QUEUE_HEADER);
++ queue->pending = 0;
++ queue->next = queue;
++
++ command->queueHead =
++ command->queueTail =
++ command->mergeQueue = command->queue;
++
++ command->queueOverflow = 0;
++
++
++ /***********************************************************************
++ ** Enable TS overflow interrupt.
++ */
++
++ command->info.tsOverflowInt = 0;
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->info.tsOverflowInt,
++ _EventHandler_TSOverflow
++ ));
++
++ /* Mask out the interrupt. */
++ Kernel->hardware->eventMask &= ~(1 << command->info.tsOverflowInt);
++
++
++ /***********************************************************************
++ ** Enable Bus Error interrupt.
++ */
++
++ /* Hardwired to bit 31. */
++ command->busErrorInt = 31;
++
++ /* Enable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->busErrorInt,
++ _EventHandler_BusError
++ ));
++
++
++ command->powerStallInt = 30;
++ /* Enable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->powerStallInt,
++ _EventHandler_PowerStall
++ ));
++
++ /***********************************************************************
++ ** Task management initialization.
++ */
++
++ command->taskStorage = gcvNULL;
++ command->taskStorageGranularity = TaskGranularity;
++ command->taskStorageUsable = TaskGranularity - gcmSIZEOF(gcsTASK_STORAGE);
++
++ command->taskFreeHead = gcvNULL;
++ command->taskFreeTail = gcvNULL;
++
++ /* Enable block handlers. */
++ for (i = 0; i < gcmCOUNTOF(_blockHandlers); i += 1)
++ {
++ /* Get the target hardware block. */
++ gceBLOCK block = _blockHandlers[i].block;
++
++ /* Get the interrupt array entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &command->taskTable[block];
++
++ /* Determine the interrupt value index. */
++ gctUINT index = entry->interruptCount;
++
++ /* Create the block semaphore. */
++ if (entry->interruptSemaphore == gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_CreateSemaphoreVG(
++ command->os, &entry->interruptSemaphore
++ ));
++ }
++
++ /* Enable auto-detection. */
++ entry->interruptArray[index] = -1;
++
++ /* Enable interrupt for the block. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &entry->interruptArray[index],
++ _blockHandlers[i].handler
++ ));
++
++ /* Update the number of registered interrupts. */
++ entry->interruptCount += 1;
++
++ /* Inrement the semaphore to allow the usage of the registered
++ interrupt. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ command->os, entry->interruptSemaphore
++ ));
++
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Get the FE interrupt. */
++ command->info.feBufferInt
++ = command->taskTable[gcvBLOCK_COMMAND].interruptArray[0];
++
++ /* Return gckVGCOMMAND object pointer. */
++ *Command = command;
++
++ gcmkFOOTER_ARG("*Command=0x%x",*Command);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (command != gcvNULL)
++ {
++ /* Disable block handlers. */
++ for (i = 0; i < gcvBLOCK_COUNT; i += 1)
++ {
++ /* Get the task table entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &command->taskTable[i];
++
++ /* Destroy the semaphore. */
++ if (entry->interruptSemaphore != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DestroySemaphore(
++ command->os, entry->interruptSemaphore
++ ));
++ }
++
++ /* Disable all enabled interrupts. */
++ for (j = 0; j < entry->interruptCount; j += 1)
++ {
++ /* Must be a valid value. */
++ gcmkASSERT(entry->interruptArray[j] >= 0);
++ gcmkASSERT(entry->interruptArray[j] <= 31);
++
++ /* Disable the interrupt. */
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ entry->interruptArray[j]
++ ));
++ }
++ }
++
++ /* Disable the bus error interrupt. */
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ command->busErrorInt
++ ));
++
++ /* Disable TS overflow interrupt. */
++ if (command->info.tsOverflowInt != -1)
++ {
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ command->info.tsOverflowInt
++ ));
++ }
++
++ /* Delete the commit mutex. */
++ if (command->commitMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->commitMutex
++ ));
++ }
++
++ /* Delete the command queue mutex. */
++ if (command->taskMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->taskMutex
++ ));
++ }
++
++ /* Delete the command queue mutex. */
++ if (command->queueMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->queueMutex
++ ));
++ }
++
++ /* Delete the command queue. */
++ if (command->queue != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_Free(
++ Kernel->os, command->queue
++ ));
++ }
++
++ if (command->powerSemaphore != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(
++ Kernel->os, command->powerSemaphore));
++ }
++
++ if (command->powerStallSignal != gcvNULL)
++ {
++ /* Create the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Kernel->os,
++ command->powerStallSignal));
++ }
++
++ /* Free the gckVGCOMMAND structure. */
++ gcmkCHECK_STATUS(gckOS_Free(
++ Kernel->os, command
++ ));
++ }
++
++ gcmkFOOTER();
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Destroy(
++ OUT gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ do
++ {
++ gctUINT i;
++ gcsTASK_STORAGE_PTR nextStorage;
++
++ if (Command->queueHead != gcvNULL)
++ {
++ /* Wait until the head becomes idle. */
++ gcmkERR_BREAK(_WaitForIdle(Command, Command->queueHead));
++ }
++
++ /* Disable block handlers. */
++ for (i = 0; i < gcvBLOCK_COUNT; i += 1)
++ {
++ /* Get the interrupt array entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &Command->taskTable[i];
++
++ /* Determine the index of the last interrupt in the array. */
++ gctINT index = entry->interruptCount - 1;
++
++ /* Destroy the semaphore. */
++ if (entry->interruptSemaphore != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Command->os, entry->interruptSemaphore
++ ));
++ }
++
++ /* Disable all enabled interrupts. */
++ while (index >= 0)
++ {
++ /* Must be a valid value. */
++ gcmkASSERT(entry->interruptArray[index] >= 0);
++ gcmkASSERT(entry->interruptArray[index] <= 31);
++
++ /* Disable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ entry->interruptArray[index]
++ ));
++
++ /* Update to the next interrupt. */
++ index -= 1;
++ entry->interruptCount -= 1;
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Disable the bus error interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ Command->busErrorInt
++ ));
++
++ /* Disable TS overflow interrupt. */
++ if (Command->info.tsOverflowInt != -1)
++ {
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ Command->info.tsOverflowInt
++ ));
++
++ Command->info.tsOverflowInt = -1;
++ }
++
++ /* Delete the commit mutex. */
++ if (Command->commitMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->commitMutex
++ ));
++
++ Command->commitMutex = gcvNULL;
++ }
++
++ /* Delete the command queue mutex. */
++ if (Command->taskMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->taskMutex
++ ));
++
++ Command->taskMutex = gcvNULL;
++ }
++
++ /* Delete the command queue mutex. */
++ if (Command->queueMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->queueMutex
++ ));
++
++ Command->queueMutex = gcvNULL;
++ }
++
++ if (Command->powerSemaphore != gcvNULL)
++ {
++ /* Destroy the power management semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Command->os, Command->powerSemaphore));
++ }
++
++ if (Command->powerStallSignal != gcvNULL)
++ {
++ /* Create the power management semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySignal(
++ Command->os,
++ Command->powerStallSignal));
++ }
++
++ if (Command->queue != gcvNULL)
++ {
++ /* Delete the command queue. */
++ gcmkERR_BREAK(gckOS_Free(
++ Command->os, Command->queue
++ ));
++ }
++
++ /* Destroy all allocated buffers. */
++ while (Command->taskStorage)
++ {
++ /* Copy the buffer pointer. */
++ nextStorage = Command->taskStorage->next;
++
++ /* Free the current container. */
++ gcmkERR_BREAK(gckOS_Free(
++ Command->os, Command->taskStorage
++ ));
++
++ /* Advance to the next one. */
++ Command->taskStorage = nextStorage;
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Mark the object as unknown. */
++ Command->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGCOMMAND structure. */
++ gcmkERR_BREAK(gckOS_Free(Command->os, Command));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Restore the object type if failed. */
++ Command->object.type = gcvOBJ_COMMAND;
++
++ gcmkFOOTER();
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_QueryCommandBuffer(
++ IN gckVGCOMMAND Command,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Information=0x%x", Command, Information);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Information != gcvNULL);
++
++ /* Copy the information. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ Information, &Command->info, sizeof(gcsCOMMAND_BUFFER_INFO)
++ ));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGCOMMAND_Allocate(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer,
++ OUT gctPOINTER * Data
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x Size=0x%x CommandBuffer=0x%x Data=0x%x",
++ Command, Size, CommandBuffer, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ do
++ {
++ /* Allocate the buffer. */
++ gcmkERR_BREAK(_AllocateCommandBuffer(Command, Size, CommandBuffer));
++
++ /* Determine the data pointer. */
++ * Data = (gctUINT8_PTR) (*CommandBuffer) + (* CommandBuffer)->bufferOffset;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Free(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x CommandBuffer=0x%x",
++ Command, CommandBuffer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(CommandBuffer != gcvNULL);
++
++ /* Free command buffer. */
++ status = _FreeCommandBuffer(Command->kernel, CommandBuffer);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Execute(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x CommandBuffer=0x%x",
++ Command, CommandBuffer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(CommandBuffer != gcvNULL);
++
++ do
++ {
++ gctUINT queueLength;
++ gcsKERNEL_CMDQUEUE_PTR kernelEntry;
++
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_LockCurrentQueue(
++ Command, &kernelEntry, &queueLength
++ ));
++
++ /* Set the buffer. */
++ kernelEntry->commandBuffer = CommandBuffer;
++ kernelEntry->handler = _FreeKernelCommandBuffer;
++
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_UnlockCurrentQueue(
++ Command, 1
++ ));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Commit(
++ IN gckVGCOMMAND Command,
++ IN gcsVGCONTEXT_PTR Context,
++ IN gcsVGCMDQUEUE_PTR Queue,
++ IN gctUINT EntryCount,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable
++ )
++{
++ /*
++ The first buffer is executed through a direct gckVGHARDWARE_Execute call,
++ therefore only an update is needed after the execution is over. All
++ consequent buffers need to be executed upon the first update call from
++ the FE interrupt handler.
++ */
++
++ static gcsQUEUE_UPDATE_CONTROL _dynamicBuffer[] =
++ {
++ {
++ _UpdateDynamicCommandBuffer,
++ _UpdateDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer
++ },
++ {
++ _ExecuteDynamicCommandBuffer,
++ _UpdateDynamicCommandBuffer,
++ _ExecuteLastDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer
++ }
++ };
++
++ static gcsQUEUE_UPDATE_CONTROL _staticBuffer[] =
++ {
++ {
++ _UpdateStaticCommandBuffer,
++ _UpdateStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer
++ },
++ {
++ _ExecuteStaticCommandBuffer,
++ _UpdateStaticCommandBuffer,
++ _ExecuteLastStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer
++ }
++ };
++
++ gceSTATUS status, last;
++
++ gcmkHEADER_ARG("Command=0x%x Context=0x%x Queue=0x%x EntryCount=0x%x TaskTable=0x%x",
++ Command, Context, Queue, EntryCount, TaskTable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Context != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++ gcmkVERIFY_ARGUMENT(EntryCount > 1);
++
++#ifdef __QNXNTO__
++ TaskTable->coid = Context->coid;
++ TaskTable->rcvid = Context->rcvid;
++#endif /* __QNXNTO__ */
++
++ do
++ {
++ gctBOOL haveFETasks;
++ gctUINT queueSize;
++ gcsVGCMDQUEUE_PTR mappedQueue;
++ gcsVGCMDQUEUE_PTR userEntry;
++ gcsKERNEL_CMDQUEUE_PTR kernelEntry;
++ gcsQUEUE_UPDATE_CONTROL_PTR queueControl;
++ gctUINT currentLength;
++ gctUINT queueLength;
++ gctUINT entriesQueued;
++ gctUINT8_PTR previousEnd;
++ gctBOOL previousDynamic;
++ gctBOOL previousExecuted;
++ gctUINT controlIndex;
++
++ gcmkERR_BREAK(gckVGHARDWARE_SetPowerManagementState(
++ Command->hardware, gcvPOWER_ON_AUTO
++ ));
++
++ /* Acquire the power semaphore. */
++ gcmkERR_BREAK(gckOS_AcquireSemaphore(
++ Command->os, Command->powerSemaphore
++ ));
++
++ /* Acquire the mutex. */
++ status = gckOS_AcquireMutex(
++ Command->os,
++ Command->commitMutex,
++ gcvINFINITE
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore));
++ break;
++ }
++
++ do
++ {
++ gcmkERR_BREAK(_FlushMMU(Command));
++
++ /* Assign a context ID if not yet assigned. */
++ if (Context->id == 0)
++ {
++ /* Assign the next context number. */
++ Context->id = ++ Command->contextCounter;
++
++ /* See if we overflowed. */
++ if (Command->contextCounter == 0)
++ {
++ /* We actually did overflow, wow... */
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ break;
++ }
++ }
++
++ /* The first entry in the queue is always the context buffer.
++ Verify whether the user context is the same as the current
++ context and if that's the case, skip the first entry. */
++ if (Context->id == Command->currentContext)
++ {
++ /* Same context as before, skip the first entry. */
++ EntryCount -= 1;
++ Queue += 1;
++
++ /* Set the signal to avoid user waiting. */
++#ifdef __QNXNTO__
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, Context->signal, Context->rcvid, Context->coid
++ ));
++#else
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, Context->signal, Context->process
++ ));
++
++#endif /* __QNXNTO__ */
++
++ }
++ else
++ {
++ /* Different user context - keep the first entry.
++ Set the user context as the current one. */
++ Command->currentContext = Context->id;
++ }
++
++ /* Reset pointers. */
++ queueControl = gcvNULL;
++ previousEnd = gcvNULL;
++
++ /* Determine whether there are FE tasks to be performed. */
++ haveFETasks = (TaskTable->table[gcvBLOCK_COMMAND].head != gcvNULL);
++
++ /* Determine the size of the queue. */
++ queueSize = EntryCount * gcmSIZEOF(gcsVGCMDQUEUE);
++
++ /* Map the command queue into the kernel space. */
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ Queue,
++ queueSize,
++ (gctPOINTER *) &mappedQueue
++ ));
++
++ /* Set the first entry. */
++ userEntry = mappedQueue;
++
++ /* Process the command queue. */
++ while (EntryCount)
++ {
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_LockCurrentQueue(
++ Command, &kernelEntry, &queueLength
++ ));
++
++ /* Determine the number of entries to process. */
++ currentLength = (queueLength < EntryCount)
++ ? queueLength
++ : EntryCount;
++
++ /* Update the number of the entries left to process. */
++ EntryCount -= currentLength;
++
++ /* Reset previous flags. */
++ previousDynamic = gcvFALSE;
++ previousExecuted = gcvFALSE;
++
++ /* Set the initial control index. */
++ controlIndex = 0;
++
++ /* Process entries. */
++ for (entriesQueued = 0; entriesQueued < currentLength; entriesQueued += 1)
++ {
++ /* Get the kernel pointer to the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = gcvNULL;
++ gcmkERR_BREAK(_ConvertUserCommandBufferPointer(
++ Command,
++ userEntry->commandBuffer,
++ &commandBuffer
++ ));
++
++ /* Is it a dynamic command buffer? */
++ if (userEntry->dynamic)
++ {
++ /* Select dynamic buffer control functions. */
++ queueControl = &_dynamicBuffer[controlIndex];
++ }
++
++ /* No, a static command buffer. */
++ else
++ {
++ /* Select static buffer control functions. */
++ queueControl = &_staticBuffer[controlIndex];
++ }
++
++ /* Set the command buffer pointer to the entry. */
++ kernelEntry->commandBuffer = commandBuffer;
++
++ /* If the previous entry was a dynamic command buffer,
++ link it to the current. */
++ if (previousDynamic)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_FetchCommand(
++ Command,
++ previousEnd,
++ commandBuffer->address,
++ commandBuffer->dataCount,
++ gcvNULL
++ ));
++
++ /* The buffer will be auto-executed, only need to
++ update it after it has been executed. */
++ kernelEntry->handler = queueControl->update;
++
++ /* The buffer is only being updated. */
++ previousExecuted = gcvFALSE;
++ }
++ else
++ {
++ /* Set the buffer up for execution. */
++ kernelEntry->handler = queueControl->execute;
++
++ /* The buffer is being updated. */
++ previousExecuted = gcvTRUE;
++ }
++
++ /* The current buffer's END command becomes the last END. */
++ previousEnd
++ = ((gctUINT8_PTR) commandBuffer)
++ + commandBuffer->bufferOffset
++ + commandBuffer->dataCount * Command->info.commandAlignment
++ - Command->info.staticTailSize;
++
++ /* Update the last entry info. */
++ previousDynamic = userEntry->dynamic;
++
++ /* Advance entries. */
++ userEntry += 1;
++ kernelEntry += 1;
++
++ /* Update the control index. */
++ controlIndex = 1;
++ }
++
++ /* If the previous entry was a dynamic command buffer,
++ terminate it with an END. */
++ if (previousDynamic)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command,
++ previousEnd,
++ Command->info.feBufferInt,
++ gcvNULL
++ ));
++ }
++
++ /* Last buffer? */
++ if (EntryCount == 0)
++ {
++ /* Modify the last command buffer's routines to handle
++ tasks if any.*/
++ if (haveFETasks)
++ {
++ if (previousExecuted)
++ {
++ kernelEntry[-1].handler = queueControl->lastExecute;
++ }
++ else
++ {
++ kernelEntry[-1].handler = queueControl->lastUpdate;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->queueMutex
++ ));
++ /* Schedule tasks. */
++ gcmkERR_BREAK(_ScheduleTasks(Command, TaskTable, previousEnd));
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->queueMutex,
++ gcvINFINITE
++ ));
++ }
++
++ /* Unkock and schedule the current queue for execution. */
++ gcmkERR_BREAK(_UnlockCurrentQueue(
++ Command, currentLength
++ ));
++ }
++
++
++ /* Unmap the user command buffer. */
++ gcmkERR_BREAK(gckOS_UnmapUserPointer(
++ Command->os,
++ Queue,
++ queueSize,
++ mappedQueue
++ ));
++ }
++ while (gcvFALSE);
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ Command->os,
++ Command->commitMutex
++ ));
++
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_db.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_db.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_db.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_db.c 2015-05-01 14:57:59.527427001 -0500
+@@ -0,0 +1,1604 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_DATABASE
++
++/*******************************************************************************
++***** Private fuctions ********************************************************/
++
++#define _GetSlot(database, x) \
++ (gctUINT32)(((gcmPTR_TO_UINT64(x) >> 7) % gcmCOUNTOF(database->list)))
++
++/*******************************************************************************
++** gckKERNEL_NewDatabase
++**
++** Create a new database structure and insert it to the head of the hash list.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID that identifies the database.
++**
++** OUTPUT:
++**
++** gcsDATABASE_PTR * Database
++** Pointer to a variable receiving the database structure pointer on
++** success.
++*/
++static gceSTATUS
++gckKERNEL_NewDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcsDATABASE_PTR * Database
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctBOOL acquired = gcvFALSE;
++ gctSIZE_T slot;
++ gcsDATABASE_PTR existingDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Compute the hash for the database. */
++ slot = ProcessID % gcmCOUNTOF(Kernel->db->db);
++
++ /* Walk the hash list. */
++ for (existingDatabase = Kernel->db->db[slot];
++ existingDatabase != gcvNULL;
++ existingDatabase = existingDatabase->next)
++ {
++ if (existingDatabase->processID == ProcessID)
++ {
++ /* One process can't be added twice. */
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++ }
++
++ if (Kernel->db->freeDatabase != gcvNULL)
++ {
++ /* Allocate a database from the free list. */
++ database = Kernel->db->freeDatabase;
++ Kernel->db->freeDatabase = database->next;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Allocate a new database from the heap. */
++ gcmkONERROR(gckOS_Allocate(Kernel->os,
++ gcmSIZEOF(gcsDATABASE),
++ &pointer));
++
++ database = pointer;
++ }
++
++ /* Insert the database into the hash. */
++ database->next = Kernel->db->db[slot];
++ Kernel->db->db[slot] = database;
++
++ /* Save the hash slot. */
++ database->slot = slot;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the database. */
++ *Database = database;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Database=0x%x", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindDatabase
++**
++** Find a database identified by a process ID and move it to the head of the
++** hash list.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID that identifies the database.
++**
++** gctBOOL LastProcessID
++** gcvTRUE if searching for the last known process ID. gcvFALSE if
++** we need to search for the process ID specified by the ProcessID
++** argument.
++**
++** OUTPUT:
++**
++** gcsDATABASE_PTR * Database
++** Pointer to a variable receiving the database structure pointer on
++** success.
++*/
++static gceSTATUS
++gckKERNEL_FindDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ OUT gcsDATABASE_PTR * Database
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database, previous;
++ gctSIZE_T slot;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d LastProcessID=%d",
++ Kernel, ProcessID, LastProcessID);
++
++ /* Compute the hash for the database. */
++ slot = ProcessID % gcmCOUNTOF(Kernel->db->db);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Check whether we are getting the last known database. */
++ if (LastProcessID)
++ {
++ /* Use last database. */
++ database = Kernel->db->lastDatabase;
++
++ if (database == gcvNULL)
++ {
++ /* Database not found. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++ }
++ else
++ {
++ /* Walk the hash list. */
++ for (previous = gcvNULL, database = Kernel->db->db[slot];
++ database != gcvNULL;
++ database = database->next)
++ {
++ if (database->processID == ProcessID)
++ {
++ /* Found it! */
++ break;
++ }
++
++ previous = database;
++ }
++
++ if (database == gcvNULL)
++ {
++ /* Database not found. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (previous != gcvNULL)
++ {
++ /* Move database to the head of the hash list. */
++ previous->next = database->next;
++ database->next = Kernel->db->db[slot];
++ Kernel->db->db[slot] = database;
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the database. */
++ *Database = database;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Database=0x%x", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DeleteDatabase
++**
++** Remove a database from the hash list and delete its structure.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to the database structure to remove.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++static gceSTATUS
++gckKERNEL_DeleteDatabase(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Check slot value. */
++ gcmkVERIFY_ARGUMENT(Database->slot < gcmCOUNTOF(Kernel->db->db));
++
++ if (Database->slot < gcmCOUNTOF(Kernel->db->db))
++ {
++ /* Check if database if the head of the hash list. */
++ if (Kernel->db->db[Database->slot] == Database)
++ {
++ /* Remove the database from the hash list. */
++ Kernel->db->db[Database->slot] = Database->next;
++ }
++ else
++ {
++ /* Walk the has list to find the database. */
++ for (database = Kernel->db->db[Database->slot];
++ database != gcvNULL;
++ database = database->next
++ )
++ {
++ /* Check if the next list entry is this database. */
++ if (database->next == Database)
++ {
++ /* Remove the database from the hash list. */
++ database->next = Database->next;
++ break;
++ }
++ }
++
++ if (database == gcvNULL)
++ {
++ /* Ouch! Something got corrupted. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++ }
++ }
++
++ if (Kernel->db->lastDatabase != gcvNULL)
++ {
++ /* Insert database to the free list. */
++ Kernel->db->lastDatabase->next = Kernel->db->freeDatabase;
++ Kernel->db->freeDatabase = Kernel->db->lastDatabase;
++ }
++
++ /* Keep database as the last database. */
++ Kernel->db->lastDatabase = Database;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_NewRecord
++**
++** Create a new database record structure and insert it to the head of the
++** database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** OUTPUT:
++**
++** gcsDATABASE_RECORD_PTR * Record
++** Pointer to a variable receiving the database record structure
++** pointer on success.
++*/
++static gceSTATUS
++gckKERNEL_NewRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gctUINT32 Slot,
++ OUT gcsDATABASE_RECORD_PTR * Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_RECORD_PTR record = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (Kernel->db->freeRecord != gcvNULL)
++ {
++ /* Allocate the record from the free list. */
++ record = Kernel->db->freeRecord;
++ Kernel->db->freeRecord = record->next;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Allocate the record from the heap. */
++ gcmkONERROR(gckOS_Allocate(Kernel->os,
++ gcmSIZEOF(gcsDATABASE_RECORD),
++ &pointer));
++
++ record = pointer;
++ }
++
++ /* Insert the record in the database. */
++ record->next = Database->list[Slot];
++ Database->list[Slot] = record;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the record. */
++ *Record = record;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Record=0x%x", *Record);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++ if (record != gcvNULL)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DeleteRecord
++**
++** Remove a database record from the database and delete its structure.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to remove.
++**
++** gctPOINTER Data
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gctSIZE_T_PTR Bytes
++** Pointer to a variable that receives the size of the record deleted.
++** Can be gcvNULL if the size is not required.
++*/
++static gceSTATUS
++gckKERNEL_DeleteRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Data,
++ OUT gctSIZE_T_PTR Bytes OPTIONAL
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_RECORD_PTR record, previous;
++ gctUINT32 slot = _GetSlot(Database, Data);
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x",
++ Kernel, Database, Type, Data);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++
++ /* Scan the database for this record. */
++ for (record = Database->list[slot], previous = gcvNULL;
++ record != gcvNULL;
++ record = record->next
++ )
++ {
++ if ((record->type == Type)
++ && (record->data == Data)
++ )
++ {
++ /* Found it! */
++ break;
++ }
++
++ previous = record;
++ }
++
++ if (record == gcvNULL)
++ {
++ /* Ouch! This record is not found? */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return size of record. */
++ *Bytes = record->bytes;
++ }
++
++ /* Remove record from database. */
++ if (previous == gcvNULL)
++ {
++ Database->list[slot] = record->next;
++ }
++ else
++ {
++ previous->next = record->next;
++ }
++
++ /* Insert record in free list. */
++ record->next = Kernel->db->freeRecord;
++ Kernel->db->freeRecord = record;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindRecord
++**
++** Find a database record from the database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to remove.
++**
++** gctPOINTER Data
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gctSIZE_T_PTR Bytes
++** Pointer to a variable that receives the size of the record deleted.
++** Can be gcvNULL if the size is not required.
++*/
++static gceSTATUS
++gckKERNEL_FindRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Data,
++ OUT gcsDATABASE_RECORD_PTR Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_RECORD_PTR record;
++ gctUINT32 slot = _GetSlot(Database, Data);
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x",
++ Kernel, Database, Type, Data);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Scan the database for this record. */
++ for (record = Database->list[slot];
++ record != gcvNULL;
++ record = record->next
++ )
++ {
++ if ((record->type == Type)
++ && (record->data == Data)
++ )
++ {
++ /* Found it! */
++ break;
++ }
++ }
++
++ if (record == gcvNULL)
++ {
++ /* Ouch! This record is not found? */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (Record != gcvNULL)
++ {
++ /* Return information of record. */
++ gcmkONERROR(
++ gckOS_MemCopy(Record, record, sizeof(gcsDATABASE_RECORD)));
++ }
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("Record=0x%x", Record);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++
++/*******************************************************************************
++***** Public API **************************************************************/
++
++/*******************************************************************************
++** gckKERNEL_CreateProcessDB
++**
++** Create a new process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_CreateProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database = gcvNULL;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Create a new database. */
++ gcmkONERROR(gckKERNEL_NewDatabase(Kernel, ProcessID, &database));
++
++ /* Initialize the database. */
++ database->processID = ProcessID;
++ database->vidMem.bytes = 0;
++ database->vidMem.maxBytes = 0;
++ database->vidMem.totalBytes = 0;
++ database->nonPaged.bytes = 0;
++ database->nonPaged.maxBytes = 0;
++ database->nonPaged.totalBytes = 0;
++ database->contiguous.bytes = 0;
++ database->contiguous.maxBytes = 0;
++ database->contiguous.totalBytes = 0;
++ database->mapMemory.bytes = 0;
++ database->mapMemory.maxBytes = 0;
++ database->mapMemory.totalBytes = 0;
++ database->mapUserMemory.bytes = 0;
++ database->mapUserMemory.maxBytes = 0;
++ database->mapUserMemory.totalBytes = 0;
++ database->vidMemResv.bytes = 0;
++ database->vidMemResv.maxBytes = 0;
++ database->vidMemResv.totalBytes = 0;
++ database->vidMemCont.bytes = 0;
++ database->vidMemCont.maxBytes = 0;
++ database->vidMemCont.totalBytes = 0;
++ database->vidMemVirt.bytes = 0;
++ database->vidMemVirt.maxBytes = 0;
++ database->vidMemVirt.totalBytes = 0;
++
++ for (i = 0; i < gcmCOUNTOF(database->list); i++)
++ {
++ database->list[i] = gcvNULL;
++ }
++
++#if gcdSECURE_USER
++ {
++ gctINT slot;
++ gcskSECURE_CACHE * cache = &database->cache;
++
++ /* Setup the linked list of cache nodes. */
++ for (slot = 1; slot <= gcdSECURE_CACHE_SLOTS; ++slot)
++ {
++ cache->cache[slot].logical = gcvNULL;
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ cache->cache[slot].prev = &cache->cache[slot - 1];
++ cache->cache[slot].next = &cache->cache[slot + 1];
++# endif
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ cache->cache[slot].nextHash = gcvNULL;
++ cache->cache[slot].prevHash = gcvNULL;
++# endif
++ }
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ /* Setup the head and tail of the cache. */
++ cache->cache[0].next = &cache->cache[1];
++ cache->cache[0].prev = &cache->cache[gcdSECURE_CACHE_SLOTS];
++ cache->cache[0].logical = gcvNULL;
++
++ /* Fix up the head and tail pointers. */
++ cache->cache[0].next->prev = &cache->cache[0];
++ cache->cache[0].prev->next = &cache->cache[0];
++# endif
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Zero out the hash table. */
++ for (slot = 0; slot < gcmCOUNTOF(cache->hash); ++slot)
++ {
++ cache->hash[slot].logical = gcvNULL;
++ cache->hash[slot].nextHash = gcvNULL;
++ }
++# endif
++
++ /* Initialize cache index. */
++ cache->cacheIndex = gcvNULL;
++ cache->cacheFree = 1;
++ cache->cacheStamp = 0;
++ }
++#endif
++
++ /* Reset idle timer. */
++ Kernel->db->lastIdle = 0;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AddProcessDB
++**
++** Add a record to a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to add.
++**
++** gctPOINTER Pointer
++** Data of the record to add.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the record to add.
++**
++** gctSIZE_T Size
++** Size of the record to add.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AddProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcsDATABASE_RECORD_PTR record = gcvNULL;
++ gcsDATABASE_COUNTERS * count;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x "
++ "Physical=0x%x Size=%lu",
++ Kernel, ProcessID, Type, Pointer, Physical, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Special case the idle record. */
++ if (Type == gcvDB_IDLE)
++ {
++ gctUINT64 time;
++
++ /* Get the current profile time. */
++ gcmkONERROR(gckOS_GetProfileTick(&time));
++
++ if ((ProcessID == 0) && (Kernel->db->lastIdle != 0))
++ {
++ /* Out of idle, adjust time it was idle. */
++ Kernel->db->idleTime += time - Kernel->db->lastIdle;
++ Kernel->db->lastIdle = 0;
++ }
++ else if (ProcessID == 1)
++ {
++ /* Save current idle time. */
++ Kernel->db->lastIdle = time;
++ }
++
++#if gcdDYNAMIC_SPEED
++ {
++ /* Test for first call. */
++ if (Kernel->db->lastSlowdown == 0)
++ {
++ /* Save milliseconds. */
++ Kernel->db->lastSlowdown = time;
++ Kernel->db->lastSlowdownIdle = Kernel->db->idleTime;
++ }
++ else
++ {
++ /* Compute ellapsed time in milliseconds. */
++ gctUINT delta = gckOS_ProfileToMS(time - Kernel->db->lastSlowdown);
++
++ /* Test for end of period. */
++ if (delta >= gcdDYNAMIC_SPEED)
++ {
++ /* Compute number of idle milliseconds. */
++ gctUINT idle = gckOS_ProfileToMS(
++ Kernel->db->idleTime - Kernel->db->lastSlowdownIdle);
++
++ /* Broadcast to slow down the GPU. */
++ gcmkONERROR(gckOS_BroadcastCalibrateSpeed(Kernel->os,
++ Kernel->hardware,
++ idle,
++ delta));
++
++ /* Save current time. */
++ Kernel->db->lastSlowdown = time;
++ Kernel->db->lastSlowdownIdle = Kernel->db->idleTime;
++ }
++ }
++ }
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Create a new record in the database. */
++ gcmkONERROR(gckKERNEL_NewRecord(Kernel, database, _GetSlot(database, Pointer), &record));
++
++ /* Initialize the record. */
++ record->kernel = Kernel;
++ record->type = Type;
++ record->data = Pointer;
++ record->physical = Physical;
++ record->bytes = Size;
++
++ /* Get pointer to counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ count = &database->vidMem;
++ break;
++
++ case gcvDB_NON_PAGED:
++ count = &database->nonPaged;
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ count = &database->contiguous;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ count = &database->mapMemory;
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ count = &database->mapUserMemory;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_RESERVED:
++ count = &database->vidMemResv;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_CONTIGUOUS:
++ count = &database->vidMemCont;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_VIRTUAL:
++ count = &database->vidMemVirt;
++ break;
++
++ default:
++ count = gcvNULL;
++ break;
++ }
++
++ if (count != gcvNULL)
++ {
++ /* Adjust counters. */
++ count->totalBytes += Size;
++ count->bytes += Size;
++
++ if (count->bytes > count->maxBytes)
++ {
++ count->maxBytes = count->bytes;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_RemoveProcessDB
++**
++** Remove a record from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to remove.
++**
++** gctPOINTER Pointer
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_RemoveProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctSIZE_T bytes = 0;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x",
++ Kernel, ProcessID, Type, Pointer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Delete the record. */
++ gcmkONERROR(
++ gckKERNEL_DeleteRecord(Kernel, database, Type, Pointer, &bytes));
++
++ /* Update counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ database->vidMem.bytes -= bytes;
++ break;
++
++ case gcvDB_NON_PAGED:
++ database->nonPaged.bytes -= bytes;
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ database->contiguous.bytes -= bytes;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ database->mapMemory.bytes -= bytes;
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ database->mapUserMemory.bytes -= bytes;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_RESERVED:
++ database->vidMemResv.bytes -= bytes;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_CONTIGUOUS:
++ database->vidMemCont.bytes -= bytes;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_VIRTUAL:
++ database->vidMemVirt.bytes -= bytes;
++ break;
++
++ default:
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindProcessDB
++**
++** Find a record from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to remove.
++**
++** gctPOINTER Pointer
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gcsDATABASE_RECORD_PTR Record
++** Copy of record.
++*/
++gceSTATUS
++gckKERNEL_FindProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 ThreadID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ OUT gcsDATABASE_RECORD_PTR Record
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x",
++ Kernel, ProcessID, ThreadID, Type, Pointer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Find the record. */
++ gcmkONERROR(
++ gckKERNEL_FindRecord(Kernel, database, Type, Pointer, Record));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DestroyProcessDB
++**
++** Destroy a process database. If the database contains any records, the data
++** inside those records will be deleted as well. This aids in the cleanup if
++** a process has died unexpectedly or has memory leaks.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_DestroyProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcsDATABASE_RECORD_PTR record, next;
++ gctBOOL asynchronous;
++ gctPHYS_ADDR physical;
++ gcuVIDMEM_NODE_PTR node;
++ gckKERNEL kernel = Kernel;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): VidMem: total=%lu max=%lu",
++ ProcessID, database->vidMem.totalBytes,
++ database->vidMem.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): NonPaged: total=%lu max=%lu",
++ ProcessID, database->nonPaged.totalBytes,
++ database->nonPaged.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Contiguous: total=%lu max=%lu",
++ ProcessID, database->contiguous.totalBytes,
++ database->contiguous.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Idle time=%llu",
++ ProcessID, Kernel->db->idleTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Map: total=%lu max=%lu",
++ ProcessID, database->mapMemory.totalBytes,
++ database->mapMemory.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Map: total=%lu max=%lu",
++ ProcessID, database->mapUserMemory.totalBytes,
++ database->mapUserMemory.maxBytes);
++
++ if (database->list != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "Process %d has entries in its database:",
++ ProcessID);
++ }
++
++ for(i = 0; i < gcmCOUNTOF(database->list); i++)
++ {
++
++ /* Walk all records. */
++ for (record = database->list[i]; record != gcvNULL; record = next)
++ {
++ /* Next next record. */
++ next = record->next;
++
++ /* Dispatch on record type. */
++ switch (record->type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ /* Free the video memory. */
++ status = gckVIDMEM_Free(gcmUINT64_TO_PTR(record->data));
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: VIDEO_MEMORY 0x%x (status=%d)",
++ record->data, status);
++ break;
++
++ case gcvDB_NON_PAGED:
++ physical = gcmNAME_TO_PTR(record->physical);
++ /* Unmap user logical memory first. */
++ status = gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ record->bytes,
++ record->data);
++
++ /* Free the non paged memory. */
++ status = gckOS_FreeNonPagedMemory(Kernel->os,
++ record->bytes,
++ physical,
++ record->data);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: NON_PAGED 0x%x, bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ case gcvDB_COMMAND_BUFFER:
++ /* Free the command buffer. */
++ status = gckEVENT_DestroyVirtualCommandBuffer(record->kernel->eventObj,
++ record->bytes,
++ gcmNAME_TO_PTR(record->physical),
++ record->data,
++ gcvKERNEL_PIXEL);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: COMMAND_BUFFER 0x%x, bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++#endif
++
++ case gcvDB_CONTIGUOUS:
++ physical = gcmNAME_TO_PTR(record->physical);
++ /* Unmap user logical memory first. */
++ status = gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ record->bytes,
++ record->data);
++
++ /* Free the contiguous memory. */
++ status = gckEVENT_FreeContiguousMemory(Kernel->eventObj,
++ record->bytes,
++ physical,
++ record->data,
++ gcvKERNEL_PIXEL);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: CONTIGUOUS 0x%x bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++
++ case gcvDB_SIGNAL:
++#if USE_NEW_LINUX_SIGNAL
++ status = gcvSTATUS_NOT_SUPPORTED;
++#else
++ /* Free the user signal. */
++ status = gckOS_DestroyUserSignal(Kernel->os,
++ gcmPTR2INT(record->data));
++#endif /* USE_NEW_LINUX_SIGNAL */
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: SIGNAL %d (status=%d)",
++ (gctINT)(gctUINTPTR_T)record->data, status);
++ break;
++
++ case gcvDB_VIDEO_MEMORY_LOCKED:
++ node = gcmUINT64_TO_PTR(record->data);
++ /* Unlock what we still locked */
++ status = gckVIDMEM_Unlock(record->kernel,
++ node,
++ gcvSURF_TYPE_UNKNOWN,
++ &asynchronous);
++
++ if (gcmIS_SUCCESS(status) && (gcvTRUE == asynchronous))
++ {
++ /* TODO: we maybe need to schedule a event here */
++ status = gckVIDMEM_Unlock(record->kernel,
++ node,
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL);
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: VIDEO_MEMORY_LOCKED 0x%x (status=%d)",
++ node, status);
++ break;
++
++ case gcvDB_CONTEXT:
++ /* TODO: Free the context */
++ status = gckCOMMAND_Detach(Kernel->command, gcmNAME_TO_PTR(record->data));
++ gcmRELEASE_NAME(record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: CONTEXT 0x%x (status=%d)",
++ record->data, status);
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ /* Unmap memory. */
++ status = gckKERNEL_UnmapMemory(Kernel,
++ record->physical,
++ record->bytes,
++ record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: MAP MEMORY %d (status=%d)",
++ gcmPTR2INT(record->data), status);
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ /* TODO: Unmap user memory. */
++ status = gckOS_UnmapUserMemory(Kernel->os,
++ Kernel->core,
++ record->physical,
++ record->bytes,
++ gcmNAME_TO_PTR(record->data),
++ 0);
++ gcmRELEASE_NAME(record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: MAP USER MEMORY %d (status=%d)",
++ gcmPTR2INT(record->data), status);
++ break;
++
++ case gcvDB_SHARED_INFO:
++ status = gckOS_FreeMemory(Kernel->os, record->physical);
++ break;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvDB_SYNC_POINT:
++ /* Free the user signal. */
++ status = gckOS_DestroySyncPoint(Kernel->os,
++ (gctSYNC_POINT) record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: SYNC POINT %d (status=%d)",
++ (gctINT)(gctUINTPTR_T)record->data, status);
++ break;
++#endif
++
++ case gcvDB_VIDEO_MEMORY_RESERVED:
++ case gcvDB_VIDEO_MEMORY_CONTIGUOUS:
++ case gcvDB_VIDEO_MEMORY_VIRTUAL:
++ break;//Nothing to do
++
++ default:
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DATABASE,
++ "DB: Correcupted record=0x%08x type=%d",
++ record, record->type);
++ break;
++ }
++
++ /* Delete the record. */
++ gcmkONERROR(gckKERNEL_DeleteRecord(Kernel,
++ database,
++ record->type,
++ record->data,
++ gcvNULL));
++ }
++
++ }
++
++ /* Delete the database. */
++ gcmkONERROR(gckKERNEL_DeleteDatabase(Kernel, database));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_QueryProcessDB
++**
++** Query a process database for the current usage of a particular record type.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gctBOOL LastProcessID
++** gcvTRUE if searching for the last known process ID. gcvFALSE if
++** we need to search for the process ID specified by the ProcessID
++** argument.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to query.
++**
++** OUTPUT:
++**
++** gcuDATABASE_INFO * Info
++** Pointer to a variable that receives the requested information.
++*/
++gceSTATUS
++gckKERNEL_QueryProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ IN gceDATABASE_TYPE Type,
++ OUT gcuDATABASE_INFO * Info
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Info=0x%x",
++ Kernel, ProcessID, Type, Info);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(
++ gckKERNEL_FindDatabase(Kernel, ProcessID, LastProcessID, &database));
++
++ /* Get pointer to counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMem,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_NON_PAGED:
++ gckOS_MemCopy(&Info->counters,
++ &database->nonPaged,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ gckOS_MemCopy(&Info->counters,
++ &database->contiguous,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_IDLE:
++ Info->time = Kernel->db->idleTime;
++ Kernel->db->idleTime = 0;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->mapMemory,
++ gcmSIZEOF(database->mapMemory));
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->mapUserMemory,
++ gcmSIZEOF(database->mapUserMemory));
++ break;
++
++ case gcvDB_VIDEO_MEMORY_RESERVED:
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMemResv,
++ gcmSIZEOF(database->vidMemResv));
++ break;
++
++ case gcvDB_VIDEO_MEMORY_CONTIGUOUS:
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMemCont,
++ gcmSIZEOF(database->vidMemCont));
++ break;
++
++ case gcvDB_VIDEO_MEMORY_VIRTUAL:
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMemVirt,
++ gcmSIZEOF(database->vidMemVirt));
++ break;
++
++ default:
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++/*******************************************************************************
++** gckKERNEL_GetProcessDBCache
++**
++** Get teh secure cache from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** gcskSECURE_CACHE_PTR * Cache
++** Pointer to a variable that receives the secure cache pointer.
++*/
++gceSTATUS
++gckKERNEL_GetProcessDBCache(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcskSECURE_CACHE_PTR * Cache
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Cache != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Return the pointer to the cache. */
++ *Cache = &database->cache;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Cache=0x%x", *Cache);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++gckKERNEL_DumpProcessDB(
++ IN gckKERNEL Kernel
++ )
++{
++ gcsDATABASE_PTR database;
++ gctINT i, pid;
++ gctUINT8 name[24];
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Acquire the database mutex. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** PROCESS DB DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkPRINT_N(8, "%-8s%s\n", "PID", "NAME");
++ /* Walk the databases. */
++ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
++ {
++ for (database = Kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ pid = database->processID;
++
++ gcmkVERIFY_OK(gckOS_ZeroMemory(name, gcmSIZEOF(name)));
++
++ gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name));
++
++ gcmkPRINT_N(8, "%-8d%s\n", pid, name);
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_debug.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_debug.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_debug.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_debug.c 2015-05-01 14:57:59.527427001 -0500
+@@ -0,0 +1,2559 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include <gc_hal_kernel_debug.h>
++
++/******************************************************************************\
++******************************** Debug Variables *******************************
++\******************************************************************************/
++
++static gceSTATUS _lastError = gcvSTATUS_OK;
++static gctUINT32 _debugLevel = gcvLEVEL_ERROR;
++/*
++_debugZones config value
++Please Reference define in gc_hal_base.h
++*/
++static gctUINT32 _debugZones = gcvZONE_NONE;
++
++/******************************************************************************\
++********************************* Debug Switches *******************************
++\******************************************************************************/
++
++/*
++ gcdBUFFERED_OUTPUT
++
++ When set to non-zero, all output is collected into a buffer with the
++ specified size. Once the buffer gets full, the debug buffer will be
++ printed to the console. gcdBUFFERED_SIZE determines the size of the buffer.
++*/
++#define gcdBUFFERED_OUTPUT 0
++
++/*
++ gcdBUFFERED_SIZE
++
++ When set to non-zero, all output is collected into a buffer with the
++ specified size. Once the buffer gets full, the debug buffer will be
++ printed to the console.
++*/
++#define gcdBUFFERED_SIZE (1024 * 1024 * 2)
++
++/*
++ gcdDMA_BUFFER_COUNT
++
++ If greater then zero, the debugger will attempt to find the command buffer
++ where DMA is currently executing and then print this buffer and
++ (gcdDMA_BUFFER_COUNT - 1) buffers before the current one. If set to zero
++ or the current buffer is not found, all buffers are printed.
++*/
++#define gcdDMA_BUFFER_COUNT 0
++
++/*
++ gcdTHREAD_BUFFERS
++
++ When greater then one, will accumulate messages from the specified number
++ of threads in separate output buffers.
++*/
++#define gcdTHREAD_BUFFERS 1
++
++/*
++ gcdENABLE_OVERFLOW
++
++ When set to non-zero, and the output buffer gets full, instead of being
++ printed, it will be allowed to overflow removing the oldest messages.
++*/
++#define gcdENABLE_OVERFLOW 1
++
++/*
++ gcdSHOW_LINE_NUMBER
++
++ When enabledm each print statement will be preceeded with the current
++ line number.
++*/
++#define gcdSHOW_LINE_NUMBER 0
++
++/*
++ gcdSHOW_PROCESS_ID
++
++ When enabledm each print statement will be preceeded with the current
++ process ID.
++*/
++#define gcdSHOW_PROCESS_ID 0
++
++/*
++ gcdSHOW_THREAD_ID
++
++ When enabledm each print statement will be preceeded with the current
++ thread ID.
++*/
++#define gcdSHOW_THREAD_ID 0
++
++/*
++ gcdSHOW_TIME
++
++ When enabled each print statement will be preceeded with the current
++ high-resolution time.
++*/
++#define gcdSHOW_TIME 0
++
++
++/******************************************************************************\
++****************************** Miscellaneous Macros ****************************
++\******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmDBGASSERT(Expression, Format, Value) \
++ if (!(Expression)) \
++ { \
++ _DirectPrint( \
++ "*** gcmDBGASSERT ***************************\n" \
++ " function : %s\n" \
++ " line : %d\n" \
++ " expression : " #Expression "\n" \
++ " actual value : " Format "\n", \
++ __FUNCTION__, __LINE__, Value \
++ ); \
++ }
++#else
++# define gcmDBGASSERT(Expression, Format, Value)
++#endif
++
++#define gcmPTRALIGNMENT(Pointer, Alignemnt) \
++( \
++ gcmALIGN(gcmPTR2INT(Pointer), Alignemnt) - gcmPTR2INT(Pointer) \
++)
++
++#if gcdALIGNBYSIZE
++# define gcmISALIGNED(Offset, Alignment) \
++ (((Offset) & ((Alignment) - 1)) == 0)
++
++# define gcmkALIGNPTR(Type, Pointer, Alignment) \
++ Pointer = (Type) gcmINT2PTR(gcmALIGN(gcmPTR2INT(Pointer), Alignment))
++#else
++# define gcmISALIGNED(Offset, Alignment) \
++ gcvTRUE
++
++# define gcmkALIGNPTR(Type, Pointer, Alignment)
++#endif
++
++#define gcmALIGNSIZE(Offset, Size) \
++ ((Size - Offset) + Size)
++
++#define gcdHAVEPREFIX \
++( \
++ gcdSHOW_TIME \
++ || gcdSHOW_LINE_NUMBER \
++ || gcdSHOW_PROCESS_ID \
++ || gcdSHOW_THREAD_ID \
++)
++
++#if gcdHAVEPREFIX
++
++# define gcdOFFSET 0
++
++#if gcdSHOW_TIME
++#if gcmISALIGNED(gcdOFFSET, 8)
++# define gcdTIMESIZE gcmSIZEOF(gctUINT64)
++# elif gcdOFFSET == 4
++# define gcdTIMESIZE gcmALIGNSIZE(4, gcmSIZEOF(gctUINT64))
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 8
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT64)
++# define gcdTIMEFORMAT "0x%016llX"
++# else
++# define gcdTIMEFORMAT ", 0x%016llX"
++# endif
++# else
++# define gcdTIMESIZE 0
++# define gcdTIMEFORMAT
++# endif
++
++#if gcdSHOW_LINE_NUMBER
++#if gcmISALIGNED(gcdOFFSET, 8)
++# define gcdNUMSIZE gcmSIZEOF(gctUINT64)
++# elif gcdOFFSET == 4
++# define gcdNUMSIZE gcmALIGNSIZE(4, gcmSIZEOF(gctUINT64))
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 8
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT64)
++# define gcdNUMFORMAT "%8llu"
++# else
++# define gcdNUMFORMAT ", %8llu"
++# endif
++# else
++# define gcdNUMSIZE 0
++# define gcdNUMFORMAT
++# endif
++
++#if gcdSHOW_PROCESS_ID
++#if gcmISALIGNED(gcdOFFSET, 4)
++# define gcdPIDSIZE gcmSIZEOF(gctUINT32)
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 4
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdPIDFORMAT "pid=%5d"
++# else
++# define gcdPIDFORMAT ", pid=%5d"
++# endif
++# else
++# define gcdPIDSIZE 0
++# define gcdPIDFORMAT
++# endif
++
++#if gcdSHOW_THREAD_ID
++#if gcmISALIGNED(gcdOFFSET, 4)
++# define gcdTIDSIZE gcmSIZEOF(gctUINT32)
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 4
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdTIDFORMAT "tid=%5d"
++# else
++# define gcdTIDFORMAT ", tid=%5d"
++# endif
++# else
++# define gcdTIDSIZE 0
++# define gcdTIDFORMAT
++# endif
++
++# define gcdPREFIX_SIZE \
++ ( \
++ gcdTIMESIZE \
++ + gcdNUMSIZE \
++ + gcdPIDSIZE \
++ + gcdTIDSIZE \
++ )
++
++ static const char * _prefixFormat =
++ "["
++ gcdTIMEFORMAT
++ gcdNUMFORMAT
++ gcdPIDFORMAT
++ gcdTIDFORMAT
++ "] ";
++
++#else
++
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdPREFIX_SIZE 0
++
++#endif
++
++/* Assumed largest variable argument leader size. */
++#define gcdVARARG_LEADER gcmSIZEOF(gctUINT64)
++
++/* Alignnments. */
++#if gcdALIGNBYSIZE
++# define gcdPREFIX_ALIGNMENT gcdPREFIX_LEADER
++# define gcdVARARG_ALIGNMENT gcdVARARG_LEADER
++#else
++# define gcdPREFIX_ALIGNMENT 0
++# define gcdVARARG_ALIGNMENT 0
++#endif
++
++#if gcdBUFFERED_OUTPUT
++# define gcdOUTPUTPREFIX _AppendPrefix
++# define gcdOUTPUTSTRING _AppendString
++# define gcdOUTPUTCOPY _AppendCopy
++# define gcdOUTPUTBUFFER _AppendBuffer
++#else
++# define gcdOUTPUTPREFIX _PrintPrefix
++# define gcdOUTPUTSTRING _PrintString
++# define gcdOUTPUTCOPY _PrintString
++# define gcdOUTPUTBUFFER _PrintBuffer
++#endif
++
++/******************************************************************************\
++****************************** Private Structures ******************************
++\******************************************************************************/
++
++typedef enum _gceBUFITEM
++{
++ gceBUFITEM_NONE,
++ gcvBUFITEM_PREFIX,
++ gcvBUFITEM_STRING,
++ gcvBUFITEM_COPY,
++ gcvBUFITEM_BUFFER
++}
++gceBUFITEM;
++
++/* Common item head/buffer terminator. */
++typedef struct _gcsBUFITEM_HEAD * gcsBUFITEM_HEAD_PTR;
++typedef struct _gcsBUFITEM_HEAD
++{
++ gceBUFITEM type;
++}
++gcsBUFITEM_HEAD;
++
++/* String prefix (for ex. [ 1,tid=0x019A]) */
++typedef struct _gcsBUFITEM_PREFIX * gcsBUFITEM_PREFIX_PTR;
++typedef struct _gcsBUFITEM_PREFIX
++{
++ gceBUFITEM type;
++#if gcdHAVEPREFIX
++ gctPOINTER prefixData;
++#endif
++}
++gcsBUFITEM_PREFIX;
++
++/* Buffered string. */
++typedef struct _gcsBUFITEM_STRING * gcsBUFITEM_STRING_PTR;
++typedef struct _gcsBUFITEM_STRING
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gctCONST_STRING message;
++ gctPOINTER messageData;
++ gctUINT messageDataSize;
++}
++gcsBUFITEM_STRING;
++
++/* Buffered string (copy of the string is included with the record). */
++typedef struct _gcsBUFITEM_COPY * gcsBUFITEM_COPY_PTR;
++typedef struct _gcsBUFITEM_COPY
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gctPOINTER messageData;
++ gctUINT messageDataSize;
++}
++gcsBUFITEM_COPY;
++
++/* Memory buffer. */
++typedef struct _gcsBUFITEM_BUFFER * gcsBUFITEM_BUFFER_PTR;
++typedef struct _gcsBUFITEM_BUFFER
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gceDUMP_BUFFER bufferType;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ gctUINT32 dmaAddress;
++#endif
++
++ gctUINT dataSize;
++ gctUINT32 address;
++#if gcdHAVEPREFIX
++ gctPOINTER prefixData;
++#endif
++}
++gcsBUFITEM_BUFFER;
++
++typedef struct _gcsBUFFERED_OUTPUT * gcsBUFFERED_OUTPUT_PTR;
++typedef struct _gcsBUFFERED_OUTPUT
++{
++#if gcdTHREAD_BUFFERS > 1
++ gctUINT32 threadID;
++#endif
++
++#if gcdSHOW_LINE_NUMBER
++ gctUINT64 lineNumber;
++#endif
++
++ gctINT indent;
++
++#if gcdBUFFERED_OUTPUT
++ gctINT start;
++ gctINT index;
++ gctINT count;
++ gctUINT8 buffer[gcdBUFFERED_SIZE];
++#endif
++
++ gcsBUFFERED_OUTPUT_PTR prev;
++ gcsBUFFERED_OUTPUT_PTR next;
++}
++gcsBUFFERED_OUTPUT;
++
++typedef gctUINT (* gcfPRINTSTRING) (
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ );
++
++typedef gctINT (* gcfGETITEMSIZE) (
++ IN gcsBUFITEM_HEAD_PTR Item
++ );
++
++/******************************************************************************\
++******************************* Private Variables ******************************
++\******************************************************************************/
++
++static gcsBUFFERED_OUTPUT _outputBuffer[gcdTHREAD_BUFFERS];
++static gcsBUFFERED_OUTPUT_PTR _outputBufferHead = gcvNULL;
++static gcsBUFFERED_OUTPUT_PTR _outputBufferTail = gcvNULL;
++
++/******************************************************************************\
++****************************** Item Size Functions *****************************
++\******************************************************************************/
++
++#if gcdBUFFERED_OUTPUT
++static gctINT
++_GetTerminatorItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ return gcmSIZEOF(gcsBUFITEM_HEAD);
++}
++
++static gctINT
++_GetPrefixItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_PREFIX_PTR item = (gcsBUFITEM_PREFIX_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++ return vlen + gcdPREFIX_SIZE;
++#else
++ return gcmSIZEOF(gcsBUFITEM_PREFIX);
++#endif
++}
++
++static gctINT
++_GetStringItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_STRING_PTR item = (gcsBUFITEM_STRING_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++ return vlen + item->messageDataSize;
++}
++
++static gctINT
++_GetCopyItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_COPY_PTR item = (gcsBUFITEM_COPY_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++ return vlen + item->messageDataSize;
++}
++
++static gctINT
++_GetBufferItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_BUFFER_PTR item = (gcsBUFITEM_BUFFER_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++ return vlen + gcdPREFIX_SIZE + item->dataSize;
++#else
++ gcsBUFITEM_BUFFER_PTR item = (gcsBUFITEM_BUFFER_PTR) Item;
++ return gcmSIZEOF(gcsBUFITEM_BUFFER) + item->dataSize;
++#endif
++}
++
++static gcfGETITEMSIZE _itemSize[] =
++{
++ _GetTerminatorItemSize,
++ _GetPrefixItemSize,
++ _GetStringItemSize,
++ _GetCopyItemSize,
++ _GetBufferItemSize
++};
++#endif
++
++/******************************************************************************\
++******************************* Printing Functions *****************************
++\******************************************************************************/
++
++#if gcdDEBUG || gcdBUFFERED_OUTPUT
++static void
++_DirectPrint(
++ gctCONST_STRING Message,
++ ...
++ )
++{
++ gctINT len;
++ char buffer[768];
++ gctARGUMENTS arguments;
++
++ gcmkARGUMENTS_START(arguments, Message);
++ len = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), Message, arguments);
++ gcmkARGUMENTS_END(arguments);
++
++ buffer[len] = '\0';
++ gcmkOUTPUT_STRING(buffer);
++}
++#endif
++
++static int
++_AppendIndent(
++ IN gctINT Indent,
++ IN char * Buffer,
++ IN int BufferSize
++ )
++{
++ gctINT i;
++
++ gctINT len = 0;
++ gctINT indent = Indent % 40;
++
++ for (i = 0; i < indent; i += 1)
++ {
++ Buffer[len++] = ' ';
++ }
++
++ if (indent != Indent)
++ {
++ len += gcmkSPRINTF(
++ Buffer + len, BufferSize - len, " <%d> ", Indent
++ );
++
++ Buffer[len] = '\0';
++ }
++
++ return len;
++}
++
++#if gcdHAVEPREFIX
++static void
++_PrintPrefix(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ char buffer[768];
++ gctINT len;
++
++ /* Format the string. */
++ len = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), _prefixFormat, Data);
++ buffer[len] = '\0';
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++}
++#endif
++
++static void
++_PrintString(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ char buffer[768];
++ gctINT len;
++
++ /* Append the indent string. */
++ len = _AppendIndent(Indent, buffer, gcmSIZEOF(buffer));
++
++ /* Format the string. */
++ len += gcmkVSPRINTF(buffer + len, gcmSIZEOF(buffer) - len, Message, Data);
++ buffer[len] = '\0';
++
++ /* Add end-of-line if missing. */
++ if (buffer[len - 1] != '\n')
++ {
++ buffer[len++] = '\n';
++ buffer[len] = '\0';
++ }
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++}
++
++static void
++_PrintBuffer(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctPOINTER PrefixData,
++ IN gctPOINTER Data,
++ IN gctUINT Address,
++ IN gctUINT DataSize,
++ IN gceDUMP_BUFFER Type,
++ IN gctUINT32 DmaAddress
++ )
++{
++ static gctCONST_STRING _titleString[] =
++ {
++ "CONTEXT BUFFER",
++ "USER COMMAND BUFFER",
++ "KERNEL COMMAND BUFFER",
++ "LINK BUFFER",
++ "WAIT LINK BUFFER",
++ ""
++ };
++
++ static const gctINT COLUMN_COUNT = 8;
++
++ gctUINT i, count, column, address;
++ gctUINT32_PTR data;
++ gctCHAR buffer[768];
++ gctUINT indent, len;
++ gctBOOL command;
++
++ /* Append space for the prefix. */
++#if gcdHAVEPREFIX
++ indent = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), _prefixFormat, PrefixData);
++ buffer[indent] = '\0';
++#else
++ indent = 0;
++#endif
++
++ /* Append the indent string. */
++ indent += _AppendIndent(
++ Indent, buffer + indent, gcmSIZEOF(buffer) - indent
++ );
++
++ switch (Type)
++ {
++ case gceDUMP_BUFFER_CONTEXT:
++ case gceDUMP_BUFFER_USER:
++ case gceDUMP_BUFFER_KERNEL:
++ case gceDUMP_BUFFER_LINK:
++ case gceDUMP_BUFFER_WAITLINK:
++ /* Form and print the title string. */
++ gcmkSPRINTF2(
++ buffer + indent, gcmSIZEOF(buffer) - indent,
++ "%s%s\n", _titleString[Type],
++ ((DmaAddress >= Address) && (DmaAddress < Address + DataSize))
++ ? " (CURRENT)" : ""
++ );
++
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Terminate the string. */
++ buffer[indent] = '\0';
++
++ /* This is a command buffer. */
++ command = gcvTRUE;
++ break;
++
++ case gceDUMP_BUFFER_FROM_USER:
++ /* This is not a command buffer. */
++ command = gcvFALSE;
++
++ /* No title. */
++ break;
++
++ default:
++ gcmDBGASSERT(gcvFALSE, "%s", "invalid buffer type");
++
++ /* This is not a command buffer. */
++ command = gcvFALSE;
++ }
++
++ /* Overwrite the prefix with spaces. */
++ for (i = 0; i < indent; i += 1)
++ {
++ buffer[i] = ' ';
++ }
++
++ /* Form and print the opening string. */
++ if (command)
++ {
++ gcmkSPRINTF2(
++ buffer + indent, gcmSIZEOF(buffer) - indent,
++ "@[kernel.command %08X %08X\n", Address, DataSize
++ );
++
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Terminate the string. */
++ buffer[indent] = '\0';
++ }
++
++ /* Get initial address. */
++ address = Address;
++
++ /* Cast the data pointer. */
++ data = (gctUINT32_PTR) Data;
++
++ /* Compute the number of double words. */
++ count = DataSize / gcmSIZEOF(gctUINT32);
++
++ /* Print the buffer. */
++ for (i = 0, len = indent, column = 0; i < count; i += 1)
++ {
++ /* Append the address. */
++ if (column == 0)
++ {
++ len += gcmkSPRINTF(
++ buffer + len, gcmSIZEOF(buffer) - len, "0x%08X:", address
++ );
++ }
++
++ /* Append the data value. */
++ len += gcmkSPRINTF2(
++ buffer + len, gcmSIZEOF(buffer) - len, "%c%08X",
++ (address == DmaAddress)? '>' : ' ', data[i]
++ );
++
++ buffer[len] = '\0';
++
++ /* Update the address. */
++ address += gcmSIZEOF(gctUINT32);
++
++ /* Advance column count. */
++ column += 1;
++
++ /* End of line? */
++ if ((column % COLUMN_COUNT) == 0)
++ {
++ /* Append EOL. */
++ gcmkSTRCAT(buffer + len, gcmSIZEOF(buffer) - len, "\n");
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Reset. */
++ len = indent;
++ column = 0;
++ }
++ }
++
++ /* Print the last partial string. */
++ if (column != 0)
++ {
++ /* Append EOL. */
++ gcmkSTRCAT(buffer + len, gcmSIZEOF(buffer) - len, "\n");
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++ }
++
++ /* Form and print the opening string. */
++ if (command)
++ {
++ buffer[indent] = '\0';
++ gcmkSTRCAT(buffer, gcmSIZEOF(buffer), "] -- command\n");
++ gcmkOUTPUT_STRING(buffer);
++ }
++}
++
++#if gcdBUFFERED_OUTPUT
++static gctUINT
++_PrintNone(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ /* Return the size of the node. */
++ return gcmSIZEOF(gcsBUFITEM_HEAD);
++}
++
++static gctUINT
++_PrintPrefixWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_PREFIX_PTR item;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_PREFIX_PTR) Item;
++
++ /* Print the message. */
++ _PrintPrefix(OutputBuffer, item->prefixData);
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + gcdPREFIX_SIZE;
++#else
++ return gcmSIZEOF(gcsBUFITEM_PREFIX);
++#endif
++}
++
++static gctUINT
++_PrintStringWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_STRING_PTR item;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_STRING_PTR) Item;
++
++ /* Print the message. */
++ _PrintString(
++ OutputBuffer,
++ item->indent, item->message, item->messageDataSize, item->messageData
++ );
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + item->messageDataSize;
++}
++
++static gctUINT
++_PrintCopyWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_COPY_PTR item;
++ gctCONST_STRING message;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_COPY_PTR) Item;
++
++ /* Determine the string pointer. */
++ message = (gctCONST_STRING) (item + 1);
++
++ /* Print the message. */
++ _PrintString(
++ OutputBuffer,
++ item->indent, message, item->messageDataSize, item->messageData
++ );
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + item->messageDataSize;
++}
++
++static gctUINT
++_PrintBufferWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gctUINT32 dmaAddress;
++ gcsBUFITEM_BUFFER_PTR item;
++ gctPOINTER data;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_BUFFER_PTR) Item;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ dmaAddress = item->dmaAddress;
++#else
++ dmaAddress = 0xFFFFFFFF;
++#endif
++
++ if (dmaAddress != 0)
++ {
++ /* Compute the data address. */
++ data = ((gctUINT8_PTR) item->prefixData) + gcdPREFIX_SIZE;
++
++ /* Print buffer. */
++ _PrintBuffer(
++ OutputBuffer,
++ item->indent, item->prefixData,
++ data, item->address, item->dataSize,
++ item->bufferType, dmaAddress
++ );
++ }
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + gcdPREFIX_SIZE + item->dataSize;
++#else
++ gctUINT32 dmaAddress;
++ gcsBUFITEM_BUFFER_PTR item;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_BUFFER_PTR) Item;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ dmaAddress = item->dmaAddress;
++#else
++ dmaAddress = 0xFFFFFFFF;
++#endif
++
++ if (dmaAddress != 0)
++ {
++ /* Print buffer. */
++ _PrintBuffer(
++ OutputBuffer,
++ item->indent, gcvNULL,
++ item + 1, item->address, item->dataSize,
++ item->bufferType, dmaAddress
++ );
++ }
++
++ /* Return the size of the node. */
++ return gcmSIZEOF(gcsBUFITEM_BUFFER) + item->dataSize;
++#endif
++}
++
++static gcfPRINTSTRING _printArray[] =
++{
++ _PrintNone,
++ _PrintPrefixWrapper,
++ _PrintStringWrapper,
++ _PrintCopyWrapper,
++ _PrintBufferWrapper
++};
++#endif
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++#if gcdBUFFERED_OUTPUT
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++static gcsBUFITEM_BUFFER_PTR
++_FindCurrentDMABuffer(
++ gctUINT32 DmaAddress
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++ gcsBUFITEM_BUFFER_PTR dmaCurrent;
++
++ /* Reset the current buffer. */
++ dmaCurrent = gcvNULL;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ gcsBUFITEM_BUFFER_PTR buffer = (gcsBUFITEM_BUFFER_PTR) item;
++
++ if ((DmaAddress >= buffer->address) &&
++ (DmaAddress < buffer->address + buffer->dataSize))
++ {
++ dmaCurrent = buffer;
++ }
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++
++ /* Return result. */
++ return dmaCurrent;
++}
++
++static void
++_EnableAllDMABuffers(
++ void
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ gcsBUFITEM_BUFFER_PTR buffer = (gcsBUFITEM_BUFFER_PTR) item;
++
++ /* Enable the buffer. */
++ buffer->dmaAddress = ~0U;
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++}
++
++static void
++_EnableDMABuffers(
++ gctUINT32 DmaAddress,
++ gcsBUFITEM_BUFFER_PTR CurrentDMABuffer
++ )
++{
++ gctINT i, skip, index;
++ gcsBUFITEM_HEAD_PTR item;
++ gcsBUFITEM_BUFFER_PTR buffers[gcdDMA_BUFFER_COUNT];
++
++ /* Reset buffer pointers. */
++ gckOS_ZeroMemory(buffers, gcmSIZEOF(buffers));
++
++ /* Set the current buffer index. */
++ index = -1;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items until the current DMA buffer is found. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ /* Advance the index. */
++ index = (index + 1) % gcdDMA_BUFFER_COUNT;
++
++ /* Add to the buffer array. */
++ buffers[index] = (gcsBUFITEM_BUFFER_PTR) item;
++
++ /* Stop if this is the current DMA buffer. */
++ if ((gcsBUFITEM_BUFFER_PTR) item == CurrentDMABuffer)
++ {
++ break;
++ }
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++
++ /* Enable the found buffers. */
++ gcmDBGASSERT(index != -1, "%d", index);
++
++ for (i = 0; i < gcdDMA_BUFFER_COUNT; i += 1)
++ {
++ if (buffers[index] == gcvNULL)
++ {
++ break;
++ }
++
++ buffers[index]->dmaAddress = DmaAddress;
++
++ index -= 1;
++
++ if (index == -1)
++ {
++ index = gcdDMA_BUFFER_COUNT - 1;
++ }
++ }
++}
++#endif
++
++static void
++_Flush(
++ gctUINT32 DmaAddress
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++
++ gcsBUFFERED_OUTPUT_PTR outputBuffer = _outputBufferHead;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ if ((outputBuffer != gcvNULL) && (outputBuffer->count != 0))
++ {
++ /* Find the current DMA buffer. */
++ gcsBUFITEM_BUFFER_PTR dmaCurrent = _FindCurrentDMABuffer(DmaAddress);
++
++ /* Was the current buffer found? */
++ if (dmaCurrent == gcvNULL)
++ {
++ /* No, print all buffers. */
++ _EnableAllDMABuffers();
++ }
++ else
++ {
++ /* Yes, enable only specified number of buffers. */
++ _EnableDMABuffers(DmaAddress, dmaCurrent);
++ }
++ }
++#endif
++
++ while (outputBuffer != gcvNULL)
++ {
++ if (outputBuffer->count != 0)
++ {
++ _DirectPrint("********************************************************************************\n");
++ _DirectPrint("FLUSHING DEBUG OUTPUT BUFFER (%d elements).\n", outputBuffer->count);
++ _DirectPrint("********************************************************************************\n");
++
++ item = (gcsBUFITEM_HEAD_PTR) &outputBuffer->buffer[outputBuffer->start];
++
++ for (i = 0; i < outputBuffer->count; i += 1)
++ {
++ skip = (* _printArray[item->type]) (outputBuffer, item);
++
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) outputBuffer->buffer;
++ }
++ }
++
++ outputBuffer->start = 0;
++ outputBuffer->index = 0;
++ outputBuffer->count = 0;
++ }
++
++ outputBuffer = outputBuffer->next;
++ }
++}
++
++static gcsBUFITEM_HEAD_PTR
++_AllocateItem(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Size
++ )
++{
++ gctINT skip;
++ gcsBUFITEM_HEAD_PTR item, next;
++
++#if gcdENABLE_OVERFLOW
++ if (
++ (OutputBuffer->index + Size >= gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ ||
++ (
++ (OutputBuffer->index < OutputBuffer->start) &&
++ (OutputBuffer->index + Size >= OutputBuffer->start)
++ )
++ )
++ {
++ if (OutputBuffer->index + Size >= gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ {
++ if (OutputBuffer->index < OutputBuffer->start)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->start];
++
++ while (item->type != gceBUFITEM_NONE)
++ {
++ skip = (* _itemSize[item->type]) (item);
++
++ OutputBuffer->start += skip;
++ OutputBuffer->count -= 1;
++
++ item->type = gceBUFITEM_NONE;
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++ }
++
++ OutputBuffer->start = 0;
++ }
++
++ OutputBuffer->index = 0;
++ }
++
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->start];
++
++ while (OutputBuffer->start - OutputBuffer->index <= Size)
++ {
++ skip = (* _itemSize[item->type]) (item);
++
++ OutputBuffer->start += skip;
++ OutputBuffer->count -= 1;
++
++ item->type = gceBUFITEM_NONE;
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ if (item->type == gceBUFITEM_NONE)
++ {
++ OutputBuffer->start = 0;
++ break;
++ }
++ }
++ }
++#else
++ if (OutputBuffer->index + Size > gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ {
++ _DirectPrint("\nMessage buffer full; forcing message flush.\n\n");
++ _Flush(~0U);
++ }
++#endif
++
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->index];
++
++ OutputBuffer->index += Size;
++ OutputBuffer->count += 1;
++
++ next = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + Size);
++ next->type = gceBUFITEM_NONE;
++
++ return item;
++}
++
++#if gcdALIGNBYSIZE
++static void
++_FreeExtraSpace(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Item,
++ IN gctINT ItemSize,
++ IN gctINT FreeSize
++ )
++{
++ gcsBUFITEM_HEAD_PTR next;
++
++ OutputBuffer->index -= FreeSize;
++
++ next = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) Item + ItemSize);
++ next->type = gceBUFITEM_NONE;
++}
++#endif
++
++#if gcdHAVEPREFIX
++static void
++_AppendPrefix(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR prefixData;
++ gcsBUFITEM_PREFIX_PTR item;
++ gctINT allocSize;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_PREFIX)
++ + gcdPREFIX_SIZE
++ + gcdPREFIX_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_PREFIX_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial prefix data pointer. */
++ prefixData = (gctUINT8_PTR) (item + 1);
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ alignment = gcmPTRALIGNMENT(prefixData, gcdPREFIX_ALIGNMENT);
++ prefixData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_PREFIX;
++ item->prefixData = prefixData;
++
++ /* Copy argument value. */
++ memcpy(prefixData, Data, gcdPREFIX_SIZE);
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size = gcmSIZEOF(gcsBUFITEM_PREFIX) + gcdPREFIX_SIZE + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++#endif
++
++static void
++_AppendString(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR messageData;
++ gcsBUFITEM_STRING_PTR item;
++ gctINT allocSize;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_STRING)
++ + ArgumentSize
++ + gcdVARARG_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_STRING_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial message data pointer. */
++ messageData = (gctUINT8_PTR) (item + 1);
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ alignment = gcmPTRALIGNMENT(messageData, gcdVARARG_ALIGNMENT);
++ messageData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_STRING;
++ item->indent = Indent;
++ item->message = Message;
++ item->messageData = messageData;
++ item->messageDataSize = ArgumentSize;
++
++ /* Copy argument value. */
++ if (ArgumentSize != 0)
++ {
++ memcpy(messageData, Data, ArgumentSize);
++ }
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size = gcmSIZEOF(gcsBUFITEM_STRING) + ArgumentSize + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++
++static void
++_AppendCopy(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR messageData;
++ gcsBUFITEM_COPY_PTR item;
++ gctINT allocSize;
++ gctINT messageLength;
++ gctCONST_STRING message;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ /* Get the length of the string. */
++ messageLength = strlen(Message) + 1;
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_COPY)
++ + messageLength
++ + ArgumentSize
++ + gcdVARARG_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_COPY_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Determine the message placement. */
++ message = (gctCONST_STRING) (item + 1);
++
++ /* Compute the initial message data pointer. */
++ messageData = (gctUINT8_PTR) message + messageLength;
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ if (ArgumentSize == 0)
++ {
++ alignment = 0;
++ }
++ else
++ {
++ alignment = gcmPTRALIGNMENT(messageData, gcdVARARG_ALIGNMENT);
++ messageData += alignment;
++ }
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_COPY;
++ item->indent = Indent;
++ item->messageData = messageData;
++ item->messageDataSize = ArgumentSize;
++
++ /* Copy the message. */
++ memcpy((gctPOINTER) message, Message, messageLength);
++
++ /* Copy argument value. */
++ if (ArgumentSize != 0)
++ {
++ memcpy(messageData, Data, ArgumentSize);
++ }
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size
++ = gcmSIZEOF(gcsBUFITEM_COPY)
++ + messageLength
++ + ArgumentSize
++ + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++
++static void
++_AppendBuffer(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctPOINTER PrefixData,
++ IN gctPOINTER Data,
++ IN gctUINT Address,
++ IN gctUINT DataSize,
++ IN gceDUMP_BUFFER Type,
++ IN gctUINT32 DmaAddress
++ )
++{
++#if gcdHAVEPREFIX
++ gctUINT8_PTR prefixData;
++ gcsBUFITEM_BUFFER_PTR item;
++ gctINT allocSize;
++ gctPOINTER data;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ gcmDBGASSERT(DataSize != 0, "%d", DataSize);
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_BUFFER)
++ + gcdPREFIX_SIZE
++ + gcdPREFIX_ALIGNMENT
++ + DataSize;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_BUFFER_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial prefix data pointer. */
++ prefixData = (gctUINT8_PTR) (item + 1);
++
++#if gcdALIGNBYSIZE
++ /* Align the data pointer as necessary. */
++ alignment = gcmPTRALIGNMENT(prefixData, gcdPREFIX_ALIGNMENT);
++ prefixData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_BUFFER;
++ item->indent = Indent;
++ item->bufferType = Type;
++ item->dataSize = DataSize;
++ item->address = Address;
++ item->prefixData = prefixData;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ item->dmaAddress = DmaAddress;
++#endif
++
++ /* Copy prefix data. */
++ memcpy(prefixData, PrefixData, gcdPREFIX_SIZE);
++
++ /* Compute the data pointer. */
++ data = prefixData + gcdPREFIX_SIZE;
++
++ /* Copy argument value. */
++ memcpy(data, Data, DataSize);
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size
++ = gcmSIZEOF(gcsBUFITEM_BUFFER)
++ + gcdPREFIX_SIZE
++ + alignment
++ + DataSize;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++#else
++ gcsBUFITEM_BUFFER_PTR item;
++ gctINT size;
++
++ gcmDBGASSERT(DataSize != 0, "%d", DataSize);
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ size = gcmSIZEOF(gcsBUFITEM_BUFFER) + DataSize;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_BUFFER_PTR) _AllocateItem(OutputBuffer, size);
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_BUFFER;
++ item->indent = Indent;
++ item->dataSize = DataSize;
++ item->address = Address;
++
++ /* Copy argument value. */
++ memcpy(item + 1, Data, DataSize);
++#endif
++}
++#endif
++
++static gcmINLINE void
++_InitBuffers(
++ void
++ )
++{
++ int i;
++
++ if (_outputBufferHead == gcvNULL)
++ {
++ for (i = 0; i < gcdTHREAD_BUFFERS; i += 1)
++ {
++ if (_outputBufferTail == gcvNULL)
++ {
++ _outputBufferHead = &_outputBuffer[i];
++ }
++ else
++ {
++ _outputBufferTail->next = &_outputBuffer[i];
++ }
++
++#if gcdTHREAD_BUFFERS > 1
++ _outputBuffer[i].threadID = ~0U;
++#endif
++
++ _outputBuffer[i].prev = _outputBufferTail;
++ _outputBuffer[i].next = gcvNULL;
++
++ _outputBufferTail = &_outputBuffer[i];
++ }
++ }
++}
++
++static gcmINLINE gcsBUFFERED_OUTPUT_PTR
++_GetOutputBuffer(
++ void
++ )
++{
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++
++#if gcdTHREAD_BUFFERS > 1
++ /* Get the current thread ID. */
++ gctUINT32 ThreadID = gcmkGETTHREADID();
++
++ /* Locate the output buffer for the thread. */
++ outputBuffer = _outputBufferHead;
++
++ while (outputBuffer != gcvNULL)
++ {
++ if (outputBuffer->threadID == ThreadID)
++ {
++ break;
++ }
++
++ outputBuffer = outputBuffer->next;
++ }
++
++ /* No matching buffer found? */
++ if (outputBuffer == gcvNULL)
++ {
++ /* Get the tail for the buffer. */
++ outputBuffer = _outputBufferTail;
++
++ /* Move it to the head. */
++ _outputBufferTail = _outputBufferTail->prev;
++ _outputBufferTail->next = gcvNULL;
++
++ outputBuffer->prev = gcvNULL;
++ outputBuffer->next = _outputBufferHead;
++
++ _outputBufferHead->prev = outputBuffer;
++ _outputBufferHead = outputBuffer;
++
++ /* Reset the buffer. */
++ outputBuffer->threadID = ThreadID;
++#if gcdBUFFERED_OUTPUT
++ outputBuffer->start = 0;
++ outputBuffer->index = 0;
++ outputBuffer->count = 0;
++#endif
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber = 0;
++#endif
++ }
++#else
++ outputBuffer = _outputBufferHead;
++#endif
++
++ return outputBuffer;
++}
++
++static gcmINLINE int _GetArgumentSize(
++ IN gctCONST_STRING Message
++ )
++{
++ int i, count;
++
++ gcmDBGASSERT(Message != gcvNULL, "%p", Message);
++
++ for (i = 0, count = 0; Message[i]; i += 1)
++ {
++ if (Message[i] == '%')
++ {
++ count += 1;
++ }
++ }
++
++ return count * gcmSIZEOF(gctUINT32);
++}
++
++#if gcdHAVEPREFIX
++static void
++_InitPrefixData(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR data = (gctUINT8_PTR) Data;
++
++#if gcdSHOW_TIME
++ {
++ gctUINT64 time;
++ gckOS_GetProfileTick(&time);
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT64));
++ * ((gctUINT64_PTR) data) = time;
++ data += gcmSIZEOF(gctUINT64);
++ }
++#endif
++
++#if gcdSHOW_LINE_NUMBER
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT64));
++ * ((gctUINT64_PTR) data) = OutputBuffer->lineNumber;
++ data += gcmSIZEOF(gctUINT64);
++ }
++#endif
++
++#if gcdSHOW_PROCESS_ID
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT32));
++ * ((gctUINT32_PTR) data) = gcmkGETPROCESSID();
++ data += gcmSIZEOF(gctUINT32);
++ }
++#endif
++
++#if gcdSHOW_THREAD_ID
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT32));
++ * ((gctUINT32_PTR) data) = gcmkGETTHREADID();
++ }
++#endif
++}
++#endif
++
++static void
++_Print(
++ IN gctUINT ArgumentSize,
++ IN gctBOOL CopyMessage,
++ IN gctCONST_STRING Message,
++ IN gctARGUMENTS Arguments
++ )
++{
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++ gcmkDECLARE_LOCK(lockHandle);
++
++ gcmkLOCKSECTION(lockHandle);
++
++ /* Initialize output buffer list. */
++ _InitBuffers();
++
++ /* Locate the proper output buffer. */
++ outputBuffer = _GetOutputBuffer();
++
++ /* Update the line number. */
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber += 1;
++#endif
++
++ /* Print prefix. */
++#if gcdHAVEPREFIX
++ {
++ gctUINT8_PTR alignedPrefixData;
++ gctUINT8 prefixData[gcdPREFIX_SIZE + gcdPREFIX_ALIGNMENT];
++
++ /* Compute aligned pointer. */
++ alignedPrefixData = prefixData;
++ gcmkALIGNPTR(gctUINT8_PTR, alignedPrefixData, gcdPREFIX_ALIGNMENT);
++
++ /* Initialize the prefix data. */
++ _InitPrefixData(outputBuffer, alignedPrefixData);
++
++ /* Print the prefix. */
++ gcdOUTPUTPREFIX(outputBuffer, alignedPrefixData);
++ }
++#endif
++
++ /* Form the indent string. */
++ if (strncmp(Message, "--", 2) == 0)
++ {
++ outputBuffer->indent -= 2;
++ }
++
++ /* Print the message. */
++ if (CopyMessage)
++ {
++ gcdOUTPUTCOPY(
++ outputBuffer, outputBuffer->indent,
++ Message, ArgumentSize, * (gctPOINTER *) &Arguments
++ );
++ }
++ else
++ {
++ gcdOUTPUTSTRING(
++ outputBuffer, outputBuffer->indent,
++ Message, ArgumentSize, * (gctPOINTER *) &Arguments
++ );
++ }
++
++ /* Check increasing indent. */
++ if (strncmp(Message, "++", 2) == 0)
++ {
++ outputBuffer->indent += 2;
++ }
++
++ gcmkUNLOCKSECTION(lockHandle);
++}
++
++
++/******************************************************************************\
++********************************* Debug Macros *********************************
++\******************************************************************************/
++
++#ifdef __QNXNTO__
++
++extern volatile unsigned g_nQnxInIsrs;
++
++#define gcmDEBUGPRINT(ArgumentSize, CopyMessage, Message) \
++{ \
++ if (atomic_add_value(&g_nQnxInIsrs, 1) == 0) \
++ { \
++ gctARGUMENTS __arguments__; \
++ gcmkARGUMENTS_START(__arguments__, Message); \
++ _Print(ArgumentSize, CopyMessage, Message, __arguments__); \
++ gcmkARGUMENTS_END(__arguments__); \
++ } \
++ atomic_sub(&g_nQnxInIsrs, 1); \
++}
++
++#else
++
++#define gcmDEBUGPRINT(ArgumentSize, CopyMessage, Message) \
++{ \
++ gctARGUMENTS __arguments__; \
++ gcmkARGUMENTS_START(__arguments__, Message); \
++ _Print(ArgumentSize, CopyMessage, Message, __arguments__); \
++ gcmkARGUMENTS_END(__arguments__); \
++}
++
++#endif
++
++/******************************************************************************\
++********************************** Debug Code **********************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_Print
++**
++** Send a message to the debugger.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_PrintN
++**
++** Send a message to the debugger.
++**
++** INPUT:
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_PrintN(
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyPrint
++**
++** Send a message to the debugger. If in buffered output mode, the entire
++** message will be copied into the buffer instead of using the pointer to
++** the string.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_CopyPrint(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvTRUE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DumpBuffer
++**
++** Print the contents of the specified buffer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctPOINTER Buffer
++** Pointer to the buffer to print.
++**
++** gctUINT Size
++** Size of the buffer.
++**
++** gceDUMP_BUFFER Type
++** Buffer type.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DumpBuffer(
++ IN gckOS Os,
++ IN gctPOINTER Buffer,
++ IN gctUINT Size,
++ IN gceDUMP_BUFFER Type,
++ IN gctBOOL CopyMessage
++ )
++{
++ gctUINT32 address;
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++ static gctBOOL userLocked;
++ gctCHAR *buffer = (gctCHAR*)Buffer;
++
++ gcmkDECLARE_LOCK(lockHandle);
++
++ /* Request lock when not coming from user,
++ or coming from user and not yet locked
++ and message is starting with @[. */
++ if (Type == gceDUMP_BUFFER_FROM_USER)
++ {
++ if ((Size > 2)
++ && (buffer[0] == '@')
++ && (buffer[1] == '['))
++ {
++ /* Beginning of a user dump. */
++ gcmkLOCKSECTION(lockHandle);
++ userLocked = gcvTRUE;
++ }
++ /* Else, let it pass through. */
++ }
++ else
++ {
++ gcmkLOCKSECTION(lockHandle);
++ userLocked = gcvFALSE;
++ }
++
++ if (Buffer != gcvNULL)
++ {
++ /* Initialize output buffer list. */
++ _InitBuffers();
++
++ /* Locate the proper output buffer. */
++ outputBuffer = _GetOutputBuffer();
++
++ /* Update the line number. */
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber += 1;
++#endif
++
++ /* Get the physical address of the buffer. */
++ if (Type != gceDUMP_BUFFER_FROM_USER)
++ {
++ gcmkVERIFY_OK(gckOS_GetPhysicalAddress(Os, Buffer, &address));
++ }
++ else
++ {
++ address = 0;
++ }
++
++#if gcdHAVEPREFIX
++ {
++ gctUINT8_PTR alignedPrefixData;
++ gctUINT8 prefixData[gcdPREFIX_SIZE + gcdPREFIX_ALIGNMENT];
++
++ /* Compute aligned pointer. */
++ alignedPrefixData = prefixData;
++ gcmkALIGNPTR(gctUINT8_PTR, alignedPrefixData, gcdPREFIX_ALIGNMENT);
++
++ /* Initialize the prefix data. */
++ _InitPrefixData(outputBuffer, alignedPrefixData);
++
++ /* Print/schedule the buffer. */
++ gcdOUTPUTBUFFER(
++ outputBuffer, outputBuffer->indent,
++ alignedPrefixData, Buffer, address, Size, Type, 0
++ );
++ }
++#else
++ /* Print/schedule the buffer. */
++ if (Type == gceDUMP_BUFFER_FROM_USER)
++ {
++ gcdOUTPUTSTRING(
++ outputBuffer, outputBuffer->indent,
++ Buffer, 0, gcvNULL
++ );
++ }
++ else
++ {
++ gcdOUTPUTBUFFER(
++ outputBuffer, outputBuffer->indent,
++ gcvNULL, Buffer, address, Size, Type, 0
++ );
++ }
++#endif
++ }
++
++ /* Unlock when not coming from user,
++ or coming from user and not yet locked. */
++ if (userLocked)
++ {
++ if ((Size > 4)
++ && (buffer[0] == ']')
++ && (buffer[1] == ' ')
++ && (buffer[2] == '-')
++ && (buffer[3] == '-'))
++ {
++ /* End of a user dump. */
++ gcmkUNLOCKSECTION(lockHandle);
++ userLocked = gcvFALSE;
++ }
++ /* Else, let it pass through, don't unlock. */
++ }
++ else
++ {
++ gcmkUNLOCKSECTION(lockHandle);
++ }
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTrace
++**
++** Send a leveled message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level of message.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if (Level > _debugLevel)
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceN
++**
++** Send a leveled message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level of message.
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceN(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if (Level > _debugLevel)
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceZone
++**
++** Send a leveled and zoned message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level for message.
++**
++** gctUINT32 Zone
++** Debug zone for message.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if ((Level > _debugLevel) || !(Zone & _debugZones))
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceZoneN
++**
++** Send a leveled and zoned message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level for message.
++**
++** gctUINT32 Zone
++** Debug zone for message.
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceZoneN(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if ((Level > _debugLevel) || !(Zone & _debugZones))
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugBreak
++**
++** Break into the debugger.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_DebugBreak(
++ void
++ )
++{
++ gckOS_DebugTrace(gcvLEVEL_ERROR, "%s(%d)", __FUNCTION__, __LINE__);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugFatal
++**
++** Send a message to the debugger and break into the debugger.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmkPRINT_VERSION();
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++
++ /* Break into the debugger. */
++ gckOS_DebugBreak();
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugLevel
++**
++** Set the debug level.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** New debug level.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugLevel(
++ IN gctUINT32 Level
++ )
++{
++ _debugLevel = Level;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugZone
++**
++** Set the debug zone.
++**
++** INPUT:
++**
++** gctUINT32 Zone
++** New debug zone.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_SetDebugZone(
++ IN gctUINT32 Zone
++ )
++{
++ _debugZones = Zone;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugLevelZone
++**
++** Set the debug level and zone.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** New debug level.
++**
++** gctUINT32 Zone
++** New debug zone.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ )
++{
++ _debugLevel = Level;
++ _debugZones = Zone;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugZones
++**
++** Enable or disable debug zones.
++**
++** INPUT:
++**
++** gctUINT32 Zones
++** Debug zones to enable or disable.
++**
++** gctBOOL Enable
++** Set to gcvTRUE to enable the zones (or the Zones with the current
++** zones) or gcvFALSE to disable the specified Zones.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ )
++{
++ if (Enable)
++ {
++ /* Enable the zones. */
++ _debugZones |= Zones;
++ }
++ else
++ {
++ /* Disable the zones. */
++ _debugZones &= ~Zones;
++ }
++}
++
++/*******************************************************************************
++**
++** gckOS_Verify
++**
++** Called to verify the result of a function call.
++**
++** INPUT:
++**
++** gceSTATUS Status
++** Function call result.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_Verify(
++ IN gceSTATUS status
++ )
++{
++ _lastError = status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugFlush
++**
++** Force messages to be flushed out.
++**
++** INPUT:
++**
++** gctCONST_STRING CallerName
++** Name of the caller function.
++**
++** gctUINT LineNumber
++** Line number of the caller.
++**
++** gctUINT32 DmaAddress
++** The current DMA address or ~0U to ignore.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugFlush(
++ gctCONST_STRING CallerName,
++ gctUINT LineNumber,
++ gctUINT32 DmaAddress
++ )
++{
++#if gcdBUFFERED_OUTPUT
++ _DirectPrint("\nFlush requested by %s(%d).\n\n", CallerName, LineNumber);
++ _Flush(DmaAddress);
++#endif
++}
++gctCONST_STRING
++gckOS_DebugStatus2Name(
++ gceSTATUS status
++ )
++{
++ switch (status)
++ {
++ case gcvSTATUS_OK:
++ return "gcvSTATUS_OK";
++ case gcvSTATUS_TRUE:
++ return "gcvSTATUS_TRUE";
++ case gcvSTATUS_NO_MORE_DATA:
++ return "gcvSTATUS_NO_MORE_DATA";
++ case gcvSTATUS_CACHED:
++ return "gcvSTATUS_CACHED";
++ case gcvSTATUS_MIPMAP_TOO_LARGE:
++ return "gcvSTATUS_MIPMAP_TOO_LARGE";
++ case gcvSTATUS_NAME_NOT_FOUND:
++ return "gcvSTATUS_NAME_NOT_FOUND";
++ case gcvSTATUS_NOT_OUR_INTERRUPT:
++ return "gcvSTATUS_NOT_OUR_INTERRUPT";
++ case gcvSTATUS_MISMATCH:
++ return "gcvSTATUS_MISMATCH";
++ case gcvSTATUS_MIPMAP_TOO_SMALL:
++ return "gcvSTATUS_MIPMAP_TOO_SMALL";
++ case gcvSTATUS_LARGER:
++ return "gcvSTATUS_LARGER";
++ case gcvSTATUS_SMALLER:
++ return "gcvSTATUS_SMALLER";
++ case gcvSTATUS_CHIP_NOT_READY:
++ return "gcvSTATUS_CHIP_NOT_READY";
++ case gcvSTATUS_NEED_CONVERSION:
++ return "gcvSTATUS_NEED_CONVERSION";
++ case gcvSTATUS_SKIP:
++ return "gcvSTATUS_SKIP";
++ case gcvSTATUS_DATA_TOO_LARGE:
++ return "gcvSTATUS_DATA_TOO_LARGE";
++ case gcvSTATUS_INVALID_CONFIG:
++ return "gcvSTATUS_INVALID_CONFIG";
++ case gcvSTATUS_CHANGED:
++ return "gcvSTATUS_CHANGED";
++ case gcvSTATUS_NOT_SUPPORT_DITHER:
++ return "gcvSTATUS_NOT_SUPPORT_DITHER";
++
++ case gcvSTATUS_INVALID_ARGUMENT:
++ return "gcvSTATUS_INVALID_ARGUMENT";
++ case gcvSTATUS_INVALID_OBJECT:
++ return "gcvSTATUS_INVALID_OBJECT";
++ case gcvSTATUS_OUT_OF_MEMORY:
++ return "gcvSTATUS_OUT_OF_MEMORY";
++ case gcvSTATUS_MEMORY_LOCKED:
++ return "gcvSTATUS_MEMORY_LOCKED";
++ case gcvSTATUS_MEMORY_UNLOCKED:
++ return "gcvSTATUS_MEMORY_UNLOCKED";
++ case gcvSTATUS_HEAP_CORRUPTED:
++ return "gcvSTATUS_HEAP_CORRUPTED";
++ case gcvSTATUS_GENERIC_IO:
++ return "gcvSTATUS_GENERIC_IO";
++ case gcvSTATUS_INVALID_ADDRESS:
++ return "gcvSTATUS_INVALID_ADDRESS";
++ case gcvSTATUS_CONTEXT_LOSSED:
++ return "gcvSTATUS_CONTEXT_LOSSED";
++ case gcvSTATUS_TOO_COMPLEX:
++ return "gcvSTATUS_TOO_COMPLEX";
++ case gcvSTATUS_BUFFER_TOO_SMALL:
++ return "gcvSTATUS_BUFFER_TOO_SMALL";
++ case gcvSTATUS_INTERFACE_ERROR:
++ return "gcvSTATUS_INTERFACE_ERROR";
++ case gcvSTATUS_NOT_SUPPORTED:
++ return "gcvSTATUS_NOT_SUPPORTED";
++ case gcvSTATUS_MORE_DATA:
++ return "gcvSTATUS_MORE_DATA";
++ case gcvSTATUS_TIMEOUT:
++ return "gcvSTATUS_TIMEOUT";
++ case gcvSTATUS_OUT_OF_RESOURCES:
++ return "gcvSTATUS_OUT_OF_RESOURCES";
++ case gcvSTATUS_INVALID_DATA:
++ return "gcvSTATUS_INVALID_DATA";
++ case gcvSTATUS_INVALID_MIPMAP:
++ return "gcvSTATUS_INVALID_MIPMAP";
++ case gcvSTATUS_NOT_FOUND:
++ return "gcvSTATUS_NOT_FOUND";
++ case gcvSTATUS_NOT_ALIGNED:
++ return "gcvSTATUS_NOT_ALIGNED";
++ case gcvSTATUS_INVALID_REQUEST:
++ return "gcvSTATUS_INVALID_REQUEST";
++ case gcvSTATUS_GPU_NOT_RESPONDING:
++ return "gcvSTATUS_GPU_NOT_RESPONDING";
++ case gcvSTATUS_TIMER_OVERFLOW:
++ return "gcvSTATUS_TIMER_OVERFLOW";
++ case gcvSTATUS_VERSION_MISMATCH:
++ return "gcvSTATUS_VERSION_MISMATCH";
++ case gcvSTATUS_LOCKED:
++ return "gcvSTATUS_LOCKED";
++
++ /* Linker errors. */
++ case gcvSTATUS_GLOBAL_TYPE_MISMATCH:
++ return "gcvSTATUS_GLOBAL_TYPE_MISMATCH";
++ case gcvSTATUS_TOO_MANY_ATTRIBUTES:
++ return "gcvSTATUS_TOO_MANY_ATTRIBUTES";
++ case gcvSTATUS_TOO_MANY_UNIFORMS:
++ return "gcvSTATUS_TOO_MANY_UNIFORMS";
++ case gcvSTATUS_TOO_MANY_VARYINGS:
++ return "gcvSTATUS_TOO_MANY_VARYINGS";
++ case gcvSTATUS_UNDECLARED_VARYING:
++ return "gcvSTATUS_UNDECLARED_VARYING";
++ case gcvSTATUS_VARYING_TYPE_MISMATCH:
++ return "gcvSTATUS_VARYING_TYPE_MISMATCH";
++ case gcvSTATUS_MISSING_MAIN:
++ return "gcvSTATUS_MISSING_MAIN";
++ case gcvSTATUS_NAME_MISMATCH:
++ return "gcvSTATUS_NAME_MISMATCH";
++ case gcvSTATUS_INVALID_INDEX:
++ return "gcvSTATUS_INVALID_INDEX";
++ default:
++ return "nil";
++ }
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_event.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_event.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_event.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_event.c 2015-05-01 14:57:59.527427001 -0500
+@@ -0,0 +1,2898 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include "gc_hal_kernel_buffer.h"
++
++#ifdef __QNXNTO__
++#include <atomic.h>
++#include "gc_hal_kernel_qnx.h"
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_EVENT
++
++#define gcdEVENT_ALLOCATION_COUNT (4096 / gcmSIZEOF(gcsHAL_INTERFACE))
++#define gcdEVENT_MIN_THRESHOLD 4
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++
++static gceSTATUS
++gckEVENT_AllocateQueue(
++ IN gckEVENT Event,
++ OUT gcsEVENT_QUEUE_PTR * Queue
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++
++ /* Do we have free queues? */
++ if (Event->freeList == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Move one free queue from the free list. */
++ * Queue = Event->freeList;
++ Event->freeList = Event->freeList->next;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Queue=0x%x", gcmOPT_POINTER(Queue));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckEVENT_FreeQueue(
++ IN gckEVENT Event,
++ OUT gcsEVENT_QUEUE_PTR Queue
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++
++ /* Move one free queue from the free list. */
++ Queue->next = Event->freeList;
++ Event->freeList = Queue;
++
++ /* Success. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckEVENT_FreeRecord(
++ IN gckEVENT Event,
++ IN gcsEVENT_PTR Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->freeEventMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Push the record on the free list. */
++ Record->next = Event->freeEventList;
++ Event->freeEventList = Record;
++ Event->freeEventCount += 1;
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++gckEVENT_IsEmpty(
++ IN gckEVENT Event,
++ OUT gctBOOL_PTR IsEmpty
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T i;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(IsEmpty != gcvNULL);
++
++ /* Assume the event queue is empty. */
++ *IsEmpty = gcvTRUE;
++
++ /* Try acquiring the mutex. */
++ status = gckOS_AcquireMutex(Event->os, Event->eventQueueMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Timeout - queue is no longer empty. */
++ *IsEmpty = gcvFALSE;
++ }
++ else
++ {
++ /* Bail out on error. */
++ gcmkONERROR(status);
++
++ /* Walk the event queue. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ /* Check whether this event is in use. */
++ if (Event->queues[i].head != gcvNULL)
++ {
++ /* The event is in use, hence the queue is not empty. */
++ *IsEmpty = gcvFALSE;
++ break;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*IsEmpty=%d", gcmOPT_VALUE(IsEmpty));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_TryToIdleGPU(
++ IN gckEVENT Event
++)
++{
++ gceSTATUS status;
++ gctBOOL empty = gcvFALSE, idle = gcvFALSE;
++ gctBOOL powerLocked = gcvFALSE;
++ gckHARDWARE hardware;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Grab gckHARDWARE object. */
++ hardware = Event->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Check whether the event queue is empty. */
++ gcmkONERROR(gckEVENT_IsEmpty(Event, &empty));
++
++ if (empty)
++ {
++ status = gckOS_AcquireMutex(hardware->os, hardware->powerMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ powerLocked = gcvTRUE;
++
++ /* Query whether the hardware is idle. */
++ gcmkONERROR(gckHARDWARE_QueryIdle(Event->kernel->hardware, &idle));
++
++ gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex));
++ powerLocked = gcvFALSE;
++
++ if (idle)
++ {
++ /* Inform the system of idle GPU. */
++ gcmkONERROR(gckOS_Broadcast(Event->os,
++ Event->kernel->hardware,
++ gcvBROADCAST_GPU_IDLE));
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (powerLocked)
++ {
++ gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex));
++ powerLocked = gcvFALSE;
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++__RemoveRecordFromProcessDB(
++ IN gckEVENT Event,
++ IN gcsEVENT_PTR Record
++ )
++{
++ gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ while (Record != gcvNULL)
++ {
++ if (Record->info.command == gcvHAL_SIGNAL)
++ {
++ /* TODO: Find a better place to bind signal to hardware.*/
++ gcmkVERIFY_OK(gckOS_SignalSetHardware(Event->os,
++ gcmUINT64_TO_PTR(Record->info.u.Signal.signal),
++ Event->kernel->hardware));
++ }
++
++ if (Record->fromKernel)
++ {
++ /* No need to check db if event is from kernel. */
++ Record = Record->next;
++ continue;
++ }
++
++ switch (Record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_NON_PAGED,
++ gcmUINT64_TO_PTR(Record->info.u.FreeNonPagedMemory.logical)));
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_CONTIGUOUS,
++ gcmUINT64_TO_PTR(Record->info.u.FreeContiguousMemory.logical)));
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY,
++ gcmUINT64_TO_PTR(Record->info.u.FreeVideoMemory.node)));
++
++ {
++ gcuVIDMEM_NODE_PTR node = (gcuVIDMEM_NODE_PTR)(gcmUINT64_TO_PTR(Record->info.u.FreeVideoMemory.node));
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_RESERVED,
++ node));
++ }
++ else if(node->Virtual.contiguous)
++ {
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_CONTIGUOUS,
++ node));
++ }
++ else
++ {
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_VIRTUAL,
++ node));
++ }
++ }
++
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_LOCKED,
++ gcmUINT64_TO_PTR(Record->info.u.UnlockVideoMemory.node)));
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Record->info.u.UnmapUserMemory.info)));
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_COMMAND_BUFFER,
++ gcmUINT64_TO_PTR(Record->info.u.FreeVirtualCommandBuffer.logical)));
++ break;
++
++ default:
++ break;
++ }
++
++ Record = Record->next;
++ }
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_SubmitTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckEVENT event = (gckEVENT)Data;
++ gcmkVERIFY_OK(gckEVENT_Submit(event, gcvTRUE, gcvFALSE));
++}
++
++/******************************************************************************\
++******************************* gckEVENT API Code *******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckEVENT_Construct
++**
++** Construct a new gckEVENT object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gckEVENT * Event
++** Pointer to a variable that receives the gckEVENT object pointer.
++*/
++gceSTATUS
++gckEVENT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckEVENT * Event
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++ gckEVENT eventObj = gcvNULL;
++ int i;
++ gcsEVENT_PTR record;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Event != gcvNULL);
++
++ /* Extract the pointer to the gckOS object. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Allocate the gckEVENT object. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckEVENT), &pointer));
++
++ eventObj = pointer;
++
++ /* Reset the object. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(eventObj, gcmSIZEOF(struct _gckEVENT)));
++
++ /* Initialize the gckEVENT object. */
++ eventObj->object.type = gcvOBJ_EVENT;
++ eventObj->kernel = Kernel;
++ eventObj->os = os;
++
++ /* Create the mutexes. */
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventQueueMutex));
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->freeEventMutex));
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventListMutex));
++
++ /* Create a bunch of event reccords. */
++ for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1)
++ {
++ /* Allocate an event record. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsEVENT), &pointer));
++
++ record = pointer;
++
++ /* Push it on the free list. */
++ record->next = eventObj->freeEventList;
++ eventObj->freeEventList = record;
++ eventObj->freeEventCount += 1;
++ }
++
++ /* Initialize the free list of event queues. */
++ for (i = 0; i < gcdREPO_LIST_COUNT; i += 1)
++ {
++ eventObj->repoList[i].next = eventObj->freeList;
++ eventObj->freeList = &eventObj->repoList[i];
++ }
++
++ /* Construct the atom. */
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->freeAtom));
++ gcmkONERROR(gckOS_AtomSet(os,
++ eventObj->freeAtom,
++ gcmCOUNTOF(eventObj->queues)));
++
++#if gcdSMP
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending));
++#endif
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(os,
++ _SubmitTimerFunction,
++ (gctPOINTER)eventObj,
++ &eventObj->submitTimer));
++
++ /* Return pointer to the gckEVENT object. */
++ *Event = eventObj;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Event=0x%x", *Event);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (eventObj != gcvNULL)
++ {
++ if (eventObj->eventQueueMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventQueueMutex));
++ }
++
++ if (eventObj->freeEventMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->freeEventMutex));
++ }
++
++ if (eventObj->eventListMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventListMutex));
++ }
++
++ while (eventObj->freeEventList != gcvNULL)
++ {
++ record = eventObj->freeEventList;
++ eventObj->freeEventList = record->next;
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, record));
++ }
++
++ if (eventObj->freeAtom != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->freeAtom));
++ }
++
++#if gcdSMP
++ if (eventObj->pending != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending));
++ }
++#endif
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, eventObj));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Destroy
++**
++** Destroy an gckEVENT object.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Destroy(
++ IN gckEVENT Event
++ )
++{
++ gcsEVENT_PTR record;
++ gcsEVENT_QUEUE_PTR queue;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ if (Event->submitTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Event->os, Event->submitTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Event->os, Event->submitTimer));
++ }
++
++ /* Delete the queue mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventQueueMutex));
++
++ /* Free all free events. */
++ while (Event->freeEventList != gcvNULL)
++ {
++ record = Event->freeEventList;
++ Event->freeEventList = record->next;
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record));
++ }
++
++ /* Delete the free mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->freeEventMutex));
++
++ /* Free all pending queues. */
++ while (Event->queueHead != gcvNULL)
++ {
++ /* Get the current queue. */
++ queue = Event->queueHead;
++
++ /* Free all pending events. */
++ while (queue->head != gcvNULL)
++ {
++ record = queue->head;
++ queue->head = record->next;
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_WARNING, gcvZONE_EVENT,
++ gcmSIZEOF(record) + gcmSIZEOF(queue->source),
++ "Event record 0x%x is still pending for %d.",
++ record, queue->source
++ );
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record));
++ }
++
++ /* Remove the top queue from the list. */
++ if (Event->queueHead == Event->queueTail)
++ {
++ Event->queueHead =
++ Event->queueTail = gcvNULL;
++ }
++ else
++ {
++ Event->queueHead = Event->queueHead->next;
++ }
++
++ /* Free the queue. */
++ gcmkVERIFY_OK(gckEVENT_FreeQueue(Event, queue));
++ }
++
++ /* Delete the list mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventListMutex));
++
++ /* Delete the atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->freeAtom));
++
++#if gcdSMP
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending));
++#endif
++
++ /* Mark the gckEVENT object as unknown. */
++ Event->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckEVENT object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, Event));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_GetEvent
++**
++** Reserve the next available hardware event.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL Wait
++** Set to gcvTRUE to force the function to wait if no events are
++** immediately available.
++**
++** gceKERNEL_WHERE Source
++** Source of the event.
++**
++** OUTPUT:
++**
++** gctUINT8 * EventID
++** Reserved event ID.
++*/
++static gceSTATUS
++gckEVENT_GetEvent(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ OUT gctUINT8 * EventID,
++ IN gcsEVENT_PTR Head,
++ IN gceKERNEL_WHERE Source
++ )
++{
++ gctINT i, id;
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctINT32 free;
++
++#if gcdGPU_TIMEOUT
++ gctUINT32 timer = 0;
++#endif
++
++ gcmkHEADER_ARG("Event=0x%x Head=%p Source=%d", Event, Head, Source);
++
++ while (gcvTRUE)
++ {
++ /* Grab the queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Walk through all events. */
++ id = Event->lastID;
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ gctINT nextID = gckMATH_ModuloInt((id + 1),
++ gcmCOUNTOF(Event->queues));
++
++ if (Event->queues[id].head == gcvNULL)
++ {
++ *EventID = (gctUINT8) id;
++
++ Event->lastID = (gctUINT8) nextID;
++
++ /* Save time stamp of event. */
++ Event->queues[id].stamp = ++(Event->stamp);
++ Event->queues[id].head = Head;
++ Event->queues[id].source = Source;
++
++ gcmkONERROR(gckOS_AtomDecrement(Event->os,
++ Event->freeAtom,
++ &free));
++#if gcdDYNAMIC_SPEED
++ if (free <= gcdDYNAMIC_EVENT_THRESHOLD)
++ {
++ gcmkONERROR(gckOS_BroadcastHurry(
++ Event->os,
++ Event->kernel->hardware,
++ gcdDYNAMIC_EVENT_THRESHOLD - free));
++ }
++#endif
++
++ /* Release the queue mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os,
++ Event->eventQueueMutex));
++
++ /* Success. */
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(id),
++ "Using id=%d",
++ id
++ );
++
++ gcmkFOOTER_ARG("*EventID=%u", *EventID);
++ return gcvSTATUS_OK;
++ }
++
++ id = nextID;
++ }
++
++#if gcdDYNAMIC_SPEED
++ /* No free events, speed up the GPU right now! */
++ gcmkONERROR(gckOS_BroadcastHurry(Event->os,
++ Event->kernel->hardware,
++ gcdDYNAMIC_EVENT_THRESHOLD));
++#endif
++
++ /* Release the queue mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Fail if wait is not requested. */
++ if (!Wait)
++ {
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Delay a while. */
++ gcmkONERROR(gckOS_Delay(Event->os, 1));
++
++#if gcdGPU_TIMEOUT
++ /* Increment the wait timer. */
++ timer += 1;
++
++ if (timer == Event->kernel->timeOut)
++ {
++ /* Try to call any outstanding events. */
++ gcmkONERROR(gckHARDWARE_Interrupt(Event->kernel->hardware,
++ gcvTRUE));
++ }
++ else if (timer > Event->kernel->timeOut)
++ {
++ gcmkTRACE_N(
++ gcvLEVEL_ERROR,
++ gcmSIZEOF(gctCONST_STRING) + gcmSIZEOF(gctINT),
++ "%s(%d): no available events\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Bail out. */
++ gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING);
++ }
++#endif
++ }
++
++OnError:
++ if (acquired)
++ {
++ /* Release the queue mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_AllocateRecord
++**
++** Allocate a record for the new event.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL AllocateAllowed
++** State for allocation if out of free events.
++**
++** OUTPUT:
++**
++** gcsEVENT_PTR * Record
++** Allocated event record.
++*/
++gceSTATUS
++gckEVENT_AllocateRecord(
++ IN gckEVENT Event,
++ IN gctBOOL AllocateAllowed,
++ OUT gcsEVENT_PTR * Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctINT i;
++ gcsEVENT_PTR record;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Event=0x%x AllocateAllowed=%d", Event, AllocateAllowed);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->freeEventMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Test if we are below the allocation threshold. */
++ if ( (AllocateAllowed && (Event->freeEventCount < gcdEVENT_MIN_THRESHOLD)) ||
++ (Event->freeEventCount == 0) )
++ {
++ /* Allocate a bunch of records. */
++ for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1)
++ {
++ /* Allocate an event record. */
++ gcmkONERROR(gckOS_Allocate(Event->os,
++ gcmSIZEOF(gcsEVENT),
++ &pointer));
++
++ record = pointer;
++
++ /* Push it on the free list. */
++ record->next = Event->freeEventList;
++ Event->freeEventList = record;
++ Event->freeEventCount += 1;
++ }
++ }
++
++ *Record = Event->freeEventList;
++ Event->freeEventList = Event->freeEventList->next;
++ Event->freeEventCount -= 1;
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Record=0x%x", gcmOPT_POINTER(Record));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_AddList
++**
++** Add a new event to the list of events.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsHAL_INTERFACE_PTR Interface
++** Pointer to the interface for the event to be added.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** gctBOOL AllocateAllowed
++** State for allocation if out of free events.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_AddList(
++ IN gckEVENT Event,
++ IN gcsHAL_INTERFACE_PTR Interface,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctBOOL AllocateAllowed,
++ IN gctBOOL FromKernel
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsEVENT_PTR record = gcvNULL;
++ gcsEVENT_QUEUE_PTR queue;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Interface=0x%x",
++ Event, Interface);
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, _GC_OBJ_ZONE,
++ "FromWhere=%d AllocateAllowed=%d",
++ FromWhere, AllocateAllowed);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ /* Verify the event command. */
++ gcmkASSERT
++ ( (Interface->command == gcvHAL_FREE_NON_PAGED_MEMORY)
++ || (Interface->command == gcvHAL_FREE_CONTIGUOUS_MEMORY)
++ || (Interface->command == gcvHAL_FREE_VIDEO_MEMORY)
++ || (Interface->command == gcvHAL_WRITE_DATA)
++ || (Interface->command == gcvHAL_UNLOCK_VIDEO_MEMORY)
++ || (Interface->command == gcvHAL_SIGNAL)
++ || (Interface->command == gcvHAL_UNMAP_USER_MEMORY)
++ || (Interface->command == gcvHAL_TIMESTAMP)
++ || (Interface->command == gcvHAL_COMMIT_DONE)
++ || (Interface->command == gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER)
++ || (Interface->command == gcvHAL_SYNC_POINT)
++ );
++
++ /* Validate the source. */
++ if ((FromWhere != gcvKERNEL_COMMAND) && (FromWhere != gcvKERNEL_PIXEL))
++ {
++ /* Invalid argument. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Allocate a free record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, AllocateAllowed, &record));
++
++ /* Termninate the record. */
++ record->next = gcvNULL;
++
++ /* Record the committer. */
++ record->fromKernel = FromKernel;
++
++ /* Copy the event interface into the record. */
++ gckOS_MemCopy(&record->info, Interface, gcmSIZEOF(record->info));
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&record->processID));
++
++#ifdef __QNXNTO__
++ record->kernel = Event->kernel;
++#endif
++
++ gcmkONERROR(__RemoveRecordFromProcessDB(Event, record));
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->eventListMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Do we need to allocate a new queue? */
++ if ((Event->queueTail == gcvNULL) || (Event->queueTail->source < FromWhere))
++ {
++ /* Allocate a new queue. */
++ gcmkONERROR(gckEVENT_AllocateQueue(Event, &queue));
++
++ /* Initialize the queue. */
++ queue->source = FromWhere;
++ queue->head = gcvNULL;
++ queue->next = gcvNULL;
++
++ /* Attach it to the list of allocated queues. */
++ if (Event->queueTail == gcvNULL)
++ {
++ Event->queueHead =
++ Event->queueTail = queue;
++ }
++ else
++ {
++ Event->queueTail->next = queue;
++ Event->queueTail = queue;
++ }
++ }
++ else
++ {
++ queue = Event->queueTail;
++ }
++
++ /* Attach the record to the queue. */
++ if (queue->head == gcvNULL)
++ {
++ queue->head = record;
++ queue->tail = record;
++ }
++ else
++ {
++ queue->tail->next = record;
++ queue->tail = record;
++ }
++
++ /* Unmap user space logical address.
++ * Linux kernel does not support unmap the memory of other process any more since 3.5.
++ * Let's unmap memory of self process before submit the event to gpu.
++ * */
++ switch(Interface->command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkONERROR(gckOS_UnmapUserLogical(
++ Event->os,
++ gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical),
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++ break;
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkONERROR(gckOS_UnmapUserLogical(
++ Event->os,
++ gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical),
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical)));
++ break;
++ default:
++ break;
++ }
++
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ }
++
++ if (record != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Unlock
++**
++** Schedule an event to unlock virtual memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union that specifies the virtual memory
++** to unlock.
++**
++** gceSURF_TYPE Type
++** Type of surface to unlock.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Unlock(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gceSURF_TYPE Type
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x FromWhere=%d Node=0x%x Type=%d",
++ Event, FromWhere, Node, Type);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Mark the event as an unlock. */
++ iface.command = gcvHAL_UNLOCK_VIDEO_MEMORY;
++ iface.u.UnlockVideoMemory.node = gcmPTR_TO_UINT64(Node);
++ iface.u.UnlockVideoMemory.type = Type;
++ iface.u.UnlockVideoMemory.asynchroneous = 0;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeVideoMemory
++**
++** Schedule an event to free video memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcuVIDMEM_NODE_PTR VideoMemory
++** Pointer to a gcuVIDMEM_NODE object to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_FreeVideoMemory(
++ IN gckEVENT Event,
++ IN gcuVIDMEM_NODE_PTR VideoMemory,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x VideoMemory=0x%x FromWhere=%d",
++ Event, VideoMemory, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(VideoMemory != gcvNULL);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_VIDEO_MEMORY;
++ iface.u.FreeVideoMemory.node = gcmPTR_TO_UINT64(VideoMemory);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeNonPagedMemory
++**
++** Schedule an event to free non-paged memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIZE_T Bytes
++** Number of bytes of non-paged memory to free.
++**
++** gctPHYS_ADDR Physical
++** Physical address of non-paged memory to free.
++**
++** gctPOINTER Logical
++** Logical address of non-paged memory to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++*/
++gceSTATUS
++gckEVENT_FreeNonPagedMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_NON_PAGED_MEMORY;
++ iface.u.FreeNonPagedMemory.bytes = Bytes;
++ iface.u.FreeNonPagedMemory.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeNonPagedMemory.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckEVENT_DestroyVirtualCommandBuffer(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER;
++ iface.u.FreeVirtualCommandBuffer.bytes = Bytes;
++ iface.u.FreeVirtualCommandBuffer.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeContigiuousMemory
++**
++** Schedule an event to free contiguous memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIZE_T Bytes
++** Number of bytes of contiguous memory to free.
++**
++** gctPHYS_ADDR Physical
++** Physical address of contiguous memory to free.
++**
++** gctPOINTER Logical
++** Logical address of contiguous memory to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++*/
++gceSTATUS
++gckEVENT_FreeContiguousMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_CONTIGUOUS_MEMORY;
++ iface.u.FreeContiguousMemory.bytes = Bytes;
++ iface.u.FreeContiguousMemory.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeContiguousMemory.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Signal
++**
++** Schedule an event to trigger a signal.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIGNAL Signal
++** Pointer to the signal to trigger.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Signal(
++ IN gckEVENT Event,
++ IN gctSIGNAL Signal,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x Signal=0x%x FromWhere=%d",
++ Event, Signal, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ /* Mark the event as a signal. */
++ iface.command = gcvHAL_SIGNAL;
++ iface.u.Signal.signal = gcmPTR_TO_UINT64(Signal);
++#ifdef __QNXNTO__
++ iface.u.Signal.coid = 0;
++ iface.u.Signal.rcvid = 0;
++#endif
++ iface.u.Signal.auxSignal = 0;
++ iface.u.Signal.process = 0;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_CommitDone
++**
++** Schedule an event to wake up work thread when commit is done by GPU.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_CommitDone(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x FromWhere=%d", Event, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ iface.command = gcvHAL_COMMIT_DONE;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++/*******************************************************************************
++**
++** gckEVENT_Submit
++**
++** Submit the current event queue to the GPU.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL Wait
++** Submit requires one vacant event; if Wait is set to not zero,
++** and there are no vacant events at this time, the function will
++** wait until an event becomes vacant so that submission of the
++** queue is successful.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++ gctUINT8 id = 0xFF;
++ gcsEVENT_QUEUE_PTR queue;
++ gctBOOL acquired = gcvFALSE;
++ gckCOMMAND command = gcvNULL;
++ gctBOOL commitEntered = gcvFALSE;
++#if !gcdNULL_DRIVER
++ gctSIZE_T bytes;
++ gctPOINTER buffer;
++#endif
++
++ gcmkHEADER_ARG("Event=0x%x Wait=%d", Event, Wait);
++
++ /* Get gckCOMMAND object. */
++ command = Event->kernel->command;
++
++ /* Are there event queues? */
++ if (Event->queueHead != gcvNULL)
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, FromPower));
++ commitEntered = gcvTRUE;
++
++ /* Process all queues. */
++ while (Event->queueHead != gcvNULL)
++ {
++ /* Acquire the list mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventListMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Get the current queue. */
++ queue = Event->queueHead;
++
++ /* Allocate an event ID. */
++ gcmkONERROR(gckEVENT_GetEvent(Event, Wait, &id, queue->head, queue->source));
++
++ /* Copy event list to event ID queue. */
++ Event->queues[id].head = queue->head;
++
++ /* Remove the top queue from the list. */
++ if (Event->queueHead == Event->queueTail)
++ {
++ Event->queueHead = gcvNULL;
++ Event->queueTail = gcvNULL;
++ }
++ else
++ {
++ Event->queueHead = Event->queueHead->next;
++ }
++
++ /* Free the queue. */
++ gcmkONERROR(gckEVENT_FreeQueue(Event, queue));
++
++ /* Release the list mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ acquired = gcvFALSE;
++
++#if gcdNULL_DRIVER
++ /* Notify immediately on infinite hardware. */
++ gcmkONERROR(gckEVENT_Interrupt(Event, 1 << id));
++
++ gcmkONERROR(gckEVENT_Notify(Event, 0));
++#else
++ /* Get the size of the hardware event. */
++ gcmkONERROR(gckHARDWARE_Event(Event->kernel->hardware,
++ gcvNULL,
++ id,
++ Event->queues[id].source,
++ &bytes));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(command,
++ bytes,
++ &buffer,
++ &bytes));
++
++ /* Set the hardware event in the command queue. */
++ gcmkONERROR(gckHARDWARE_Event(Event->kernel->hardware,
++ buffer,
++ id,
++ Event->queues[id].source,
++ &bytes));
++
++ /* Execute the hardware event. */
++ gcmkONERROR(gckCOMMAND_Execute(command, bytes));
++#endif
++ }
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, FromPower));
++ commitEntered = gcvFALSE;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, FromPower));
++ }
++
++ if (acquired)
++ {
++ /* Need to unroll the mutex acquire. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ }
++
++ if (id != 0xFF)
++ {
++ /* Need to unroll the event allocation. */
++ Event->queues[id].head = gcvNULL;
++ }
++
++ if (status == gcvSTATUS_GPU_NOT_RESPONDING)
++ {
++ /* Broadcast GPU stuck. */
++ status = gckOS_Broadcast(Event->os,
++ Event->kernel->hardware,
++ gcvBROADCAST_GPU_STUCK);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Commit
++**
++** Commit an event queue from the user.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsQUEUE_PTR Queue
++** User event queue.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue
++ )
++{
++ gceSTATUS status;
++ gcsQUEUE_PTR record = gcvNULL, next;
++ gctUINT32 processID;
++ gctBOOL needCopy = gcvFALSE;
++
++ gcmkHEADER_ARG("Event=0x%x Queue=0x%x", Event, Queue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Get the current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Query if we need to copy the client data. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Event->os, processID, &needCopy));
++
++ /* Loop while there are records in the queue. */
++ while (Queue != gcvNULL)
++ {
++ gcsQUEUE queue;
++
++ if (needCopy)
++ {
++ /* Point to stack record. */
++ record = &queue;
++
++ /* Copy the data from the client. */
++ gcmkONERROR(gckOS_CopyFromUserData(Event->os,
++ record,
++ Queue,
++ gcmSIZEOF(gcsQUEUE)));
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Map record into kernel memory. */
++ gcmkONERROR(gckOS_MapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ &pointer));
++
++ record = pointer;
++ }
++
++ /* Append event record to event queue. */
++ gcmkONERROR(
++ gckEVENT_AddList(Event, &record->iface, gcvKERNEL_PIXEL, gcvTRUE, gcvFALSE));
++
++ /* Next record in the queue. */
++ next = gcmUINT64_TO_PTR(record->next);
++
++ if (!needCopy)
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(
++ gckOS_UnmapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) record));
++ record = gcvNULL;
++ }
++
++ Queue = next;
++ }
++
++ /* Submit the event list. */
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE));
++
++ /* Success */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if ((record != gcvNULL) && !needCopy)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Compose
++**
++** Schedule a composition event and start a composition.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsHAL_COMPOSE_PTR Info
++** Pointer to the composition structure.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Compose(
++ IN gckEVENT Event,
++ IN gcsHAL_COMPOSE_PTR Info
++ )
++{
++ gceSTATUS status;
++ gcsEVENT_PTR headRecord;
++ gcsEVENT_PTR tailRecord;
++ gcsEVENT_PTR tempRecord;
++ gctUINT8 id = 0xFF;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Event=0x%x Info=0x%x", Event, Info);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ headRecord = tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->process;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->signal;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++
++ /* Allocate another record for user signal #1. */
++ if (gcmUINT64_TO_PTR(Info->userSignal1) != gcvNULL)
++ {
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ tailRecord->next = tempRecord;
++ tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->userProcess;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->userSignal1;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++ }
++
++ /* Allocate another record for user signal #2. */
++ if (gcmUINT64_TO_PTR(Info->userSignal2) != gcvNULL)
++ {
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ tailRecord->next = tempRecord;
++ tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->userProcess;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->userSignal2;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++ }
++
++ /* Allocate an event ID. */
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, headRecord, gcvKERNEL_PIXEL));
++
++ /* Start composition. */
++ gcmkONERROR(gckHARDWARE_Compose(
++ Event->kernel->hardware, processID,
++ gcmUINT64_TO_PTR(Info->physical), gcmUINT64_TO_PTR(Info->logical), Info->offset, Info->size, id
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Interrupt
++**
++** Called by the interrupt service routine to store the triggered interrupt
++** mask to be later processed by gckEVENT_Notify.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 Data
++** Mask for the 32 interrupts.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Interrupt(
++ IN gckEVENT Event,
++ IN gctUINT32 Data
++ )
++{
++ unsigned long flags;
++ gcmkHEADER_ARG("Event=0x%x Data=0x%x", Event, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Combine current interrupt status with pending flags. */
++ spin_lock_irqsave(&Event->kernel->irq_lock, flags);
++#if gcdSMP
++ gckOS_AtomSetMask(Event->pending, Data);
++#elif defined(__QNXNTO__)
++ atomic_set(&Event->pending, Data);
++#else
++ Event->pending |= Data;
++#endif
++ spin_unlock_irqrestore(&Event->kernel->irq_lock, flags);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Notify
++**
++** Process all triggered interrupts.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Notify(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctINT i;
++ gcsEVENT_QUEUE * queue;
++ gctUINT mask = 0;
++ gctBOOL acquired = gcvFALSE;
++ gcuVIDMEM_NODE_PTR node;
++ gctPOINTER info;
++ gctSIGNAL signal;
++ gctUINT pending;
++ gckKERNEL kernel = Event->kernel;
++#if !gcdSMP
++ gctBOOL suspended = gcvFALSE;
++#endif
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gctINT eventNumber = 0;
++#endif
++ gctINT32 free;
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ unsigned long flags;
++
++ gcmkHEADER_ARG("Event=0x%x IDs=0x%x", Event, IDs);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ gcmDEBUG_ONLY(
++ if (IDs != 0)
++ {
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Queue(%d): stamp=%llu source=%d",
++ i,
++ Event->queues[i].stamp,
++ Event->queues[i].source);
++ }
++ }
++ }
++ );
++
++ for (;;)
++ {
++ gcsEVENT_PTR record;
++
++ spin_lock_irqsave(&Event->kernel->irq_lock, flags);
++#if gcdSMP
++ /* Get current interrupts. */
++ gckOS_AtomGet(Event->os, Event->pending, (gctINT32_PTR)&pending);
++#else
++ /* Get current interrupts. */
++ pending = Event->pending;
++#endif
++ spin_unlock_irqrestore(&Event->kernel->irq_lock, flags);
++
++ if (pending & 0x80000000)
++ {
++ //gckOS_Print("!!!!!!!!!!!!! AXI BUS ERROR !!!!!!!!!!!!!\n");
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_EVENT, "AXI BUS ERROR");
++ pending &= 0x7FFFFFFF;
++ }
++
++ if (pending & 0x40000000)
++ {
++ gckHARDWARE_DumpMMUException(Event->kernel->hardware);
++
++ pending &= 0x3FFFFFFF;
++ }
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(pending),
++ "Pending interrupts 0x%x",
++ pending
++ );
++
++ if (pending == 0)
++ {
++ /* No more pending interrupts - done. */
++ break;
++ }
++
++ queue = gcvNULL;
++
++ /* Grab the mutex queue. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmDEBUG_ONLY(
++ if (IDs == 0)
++ {
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Queue(%d): stamp=%llu source=%d",
++ i,
++ Event->queues[i].stamp,
++ Event->queues[i].source);
++ }
++ }
++ }
++ );
++
++ /* Find the oldest pending interrupt. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if ((Event->queues[i].head != gcvNULL)
++ && (pending & (1 << i))
++ )
++ {
++ if ((queue == gcvNULL)
++ || (Event->queues[i].stamp < queue->stamp)
++ )
++ {
++ queue = &Event->queues[i];
++ mask = 1 << i;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ eventNumber = i;
++#endif
++ }
++ }
++ }
++
++ if (queue == gcvNULL)
++ {
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(pending),
++ "Interrupts 0x%x are not pending.",
++ pending
++ );
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ spin_lock_irqsave(&Event->kernel->irq_lock, flags);
++#if gcdSMP
++ /* Mark pending interrupts as handled. */
++ gckOS_AtomClearMask(Event->pending, pending);
++#elif defined(__QNXNTO__)
++ /* Mark pending interrupts as handled. */
++ atomic_clr((gctUINT32_PTR)&Event->pending, pending);
++#else
++ /* Mark pending interrupts as handled. */
++ Event->pending &= ~pending;
++#endif
++ spin_unlock_irqrestore(&Event->kernel->irq_lock, flags);
++ break;
++ }
++
++ /* Check whether there is a missed interrupt. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if ((Event->queues[i].head != gcvNULL)
++ && (Event->queues[i].stamp < queue->stamp)
++ && (Event->queues[i].source <= queue->source)
++ )
++ {
++ gcmkTRACE_N(
++ gcvLEVEL_ERROR,
++ gcmSIZEOF(i) + gcmSIZEOF(Event->queues[i].stamp),
++ "Event %d lost (stamp %llu)",
++ i, Event->queues[i].stamp
++ );
++
++ /* Use this event instead. */
++ queue = &Event->queues[i];
++ mask = 0;
++ }
++ }
++
++ if (mask != 0)
++ {
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(eventNumber),
++ "Processing interrupt %d",
++ eventNumber
++ );
++#endif
++ }
++
++ spin_lock_irqsave(&Event->kernel->irq_lock, flags);
++#if gcdSMP
++ /* Mark pending interrupt as handled. */
++ gckOS_AtomClearMask(Event->pending, mask);
++#elif defined(__QNXNTO__)
++ /* Mark pending interrupt as handled. */
++ atomic_clr(&Event->pending, mask);
++#else
++ /* Mark pending interrupt as handled. */
++ Event->pending &= ~mask;
++#endif
++ spin_unlock_irqrestore(&Event->kernel->irq_lock, flags);
++
++ /* We are in the notify loop. */
++ Event->inNotify = gcvTRUE;
++
++ /* We are in the notify loop. */
++ Event->inNotify = gcvTRUE;
++
++ /* Grab the event head. */
++ record = queue->head;
++
++ /* Now quickly clear its event list. */
++ queue->head = gcvNULL;
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Increase the number of free events. */
++ gcmkONERROR(gckOS_AtomIncrement(Event->os, Event->freeAtom, &free));
++
++ /* Walk all events for this interrupt. */
++ while (record != gcvNULL)
++ {
++ gcsEVENT_PTR recordNext;
++#ifndef __QNXNTO__
++ gctPOINTER logical;
++#endif
++#if gcdSECURE_USER
++ gctSIZE_T bytes;
++#endif
++
++ /* Grab next record. */
++ recordNext = record->next;
++
++#ifdef __QNXNTO__
++ /* Assign record->processID as the pid for this galcore thread.
++ * Used in OS calls like gckOS_UnlockMemory() which do not take a pid.
++ */
++ drv_thread_specific_key_assign(record->processID, 0, Event->kernel->core);
++#endif
++
++#if gcdSECURE_USER
++ /* Get the cache that belongs to this process. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Event->kernel,
++ record->processID,
++ &cache));
++#endif
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.command),
++ "Processing event type: %d",
++ record->info.command
++ );
++
++ switch (record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_NON_PAGED_MEMORY: 0x%x",
++ gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical));
++
++ /* Free non-paged memory. */
++ status = gckOS_FreeNonPagedMemory(
++ Event->os,
++ (gctSIZE_T) record->info.u.FreeNonPagedMemory.bytes,
++ gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeNonPagedMemory.logical));
++
++ if (gcmIS_SUCCESS(status))
++ {
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->record.u.FreeNonPagedMemory.logical),
++ (gctSIZE_T) record->record.u.FreeNonPagedMemory.bytes));
++#endif
++ }
++ gcmRELEASE_NAME(record->info.u.FreeNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_CONTIGUOUS_MEMORY: 0x%x",
++ gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical));
++
++ /* Unmap the user memory. */
++ status = gckOS_FreeContiguous(
++ Event->os,
++ gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeContiguousMemory.logical),
++ (gctSIZE_T) record->info.u.FreeContiguousMemory.bytes);
++
++ if (gcmIS_SUCCESS(status))
++ {
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->record.u.FreeContiguousMemory.logical),
++ (gctSIZE_T) record->record.u.FreeContiguousMemory.bytes));
++#endif
++ }
++ gcmRELEASE_NAME(record->info.u.FreeContiguousMemory.physical);
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(record->info.u.FreeVideoMemory.node);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_VIDEO_MEMORY: 0x%x",
++ node);
++#ifdef __QNXNTO__
++#if gcdUSE_VIDMEM_PER_PID
++ /* Check if the VidMem object still exists. */
++ if (gckKERNEL_GetVideoMemoryPoolPid(record->kernel,
++ gcvPOOL_SYSTEM,
++ record->processID,
++ gcvNULL) == gcvSTATUS_NOT_FOUND)
++ {
++ /*printf("Vidmem not found for process:%d\n", queue->processID);*/
++ status = gcvSTATUS_OK;
++ break;
++ }
++#else
++ if ((node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ && (node->VidMem.logical != gcvNULL)
++ )
++ {
++ gcmkERR_BREAK(
++ gckKERNEL_UnmapVideoMemory(record->kernel,
++ node->VidMem.logical,
++ record->processID,
++ node->VidMem.bytes));
++ node->VidMem.logical = gcvNULL;
++ }
++#endif
++#endif
++
++ /* Free video memory. */
++ status =
++ gckVIDMEM_Free(node);
++
++ break;
++
++ case gcvHAL_WRITE_DATA:
++#ifndef __QNXNTO__
++ /* Convert physical into logical address. */
++ gcmkERR_BREAK(
++ gckOS_MapPhysical(Event->os,
++ record->info.u.WriteData.address,
++ gcmSIZEOF(gctUINT32),
++ &logical));
++
++ /* Write data. */
++ gcmkERR_BREAK(
++ gckOS_WriteMemory(Event->os,
++ logical,
++ record->info.u.WriteData.data));
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(
++ gckOS_UnmapPhysical(Event->os,
++ logical,
++ gcmSIZEOF(gctUINT32)));
++#else
++ /* Write data. */
++ gcmkERR_BREAK(
++ gckOS_WriteMemory(Event->os,
++ (gctPOINTER)
++ record->info.u.WriteData.address,
++ record->info.u.WriteData.data));
++#endif
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(record->info.u.UnlockVideoMemory.node);
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_UNLOCK_VIDEO_MEMORY: 0x%x",
++ node);
++
++ /* Save node information before it disappears. */
++#if gcdSECURE_USER
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock. */
++ status = gckVIDMEM_Unlock(
++ Event->kernel,
++ node,
++ record->info.u.UnlockVideoMemory.type,
++ gcvNULL);
++
++#if gcdSECURE_USER
++ if (gcmIS_SUCCESS(status) && (logical != gcvNULL))
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++ break;
++
++ case gcvHAL_SIGNAL:
++ signal = gcmUINT64_TO_PTR(record->info.u.Signal.signal);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_SIGNAL: 0x%x",
++ signal);
++
++#ifdef __QNXNTO__
++ if ((record->info.u.Signal.coid == 0)
++ && (record->info.u.Signal.rcvid == 0)
++ )
++ {
++ /* Kernel signal. */
++ gcmkERR_BREAK(
++ gckOS_Signal(Event->os,
++ signal,
++ gcvTRUE));
++ }
++ else
++ {
++ /* User signal. */
++ gcmkERR_BREAK(
++ gckOS_UserSignal(Event->os,
++ signal,
++ record->info.u.Signal.rcvid,
++ record->info.u.Signal.coid));
++ }
++#else
++ /* Set signal. */
++ if (gcmUINT64_TO_PTR(record->info.u.Signal.process) == gcvNULL)
++ {
++ /* Kernel signal. */
++ gcmkERR_BREAK(
++ gckOS_Signal(Event->os,
++ signal,
++ gcvTRUE));
++ }
++ else
++ {
++ /* User signal. */
++ gcmkERR_BREAK(
++ gckOS_UserSignal(Event->os,
++ signal,
++ gcmUINT64_TO_PTR(record->info.u.Signal.process)));
++ }
++
++ gcmkASSERT(record->info.u.Signal.auxSignal == 0);
++#endif
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ info = gcmNAME_TO_PTR(record->info.u.UnmapUserMemory.info);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_UNMAP_USER_MEMORY: 0x%x",
++ info);
++
++ /* Unmap the user memory. */
++ status = gckOS_UnmapUserMemory(
++ Event->os,
++ Event->kernel->core,
++ gcmUINT64_TO_PTR(record->info.u.UnmapUserMemory.memory),
++ (gctSIZE_T) record->info.u.UnmapUserMemory.size,
++ info,
++ record->info.u.UnmapUserMemory.address);
++
++#if gcdSECURE_USER
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->info.u.UnmapUserMemory.memory),
++ (gctSIZE_T) record->info.u.UnmapUserMemory.size));
++ }
++#endif
++ gcmRELEASE_NAME(record->info.u.UnmapUserMemory.info);
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_TIMESTAMP: %d %d",
++ record->info.u.TimeStamp.timer,
++ record->info.u.TimeStamp.request);
++
++ /* Process the timestamp. */
++ switch (record->info.u.TimeStamp.request)
++ {
++ case 0:
++ status = gckOS_GetTime(&Event->kernel->timers[
++ record->info.u.TimeStamp.timer].
++ stopTime);
++ break;
++
++ case 1:
++ status = gckOS_GetTime(&Event->kernel->timers[
++ record->info.u.TimeStamp.timer].
++ startTime);
++ break;
++
++ default:
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.u.TimeStamp.request),
++ "Invalid timestamp request: %d",
++ record->info.u.TimeStamp.request
++ );
++
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ break;
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkVERIFY_OK(
++ gckKERNEL_DestroyVirtualCommandBuffer(Event->kernel,
++ (gctSIZE_T) record->info.u.FreeVirtualCommandBuffer.bytes,
++ gcmNAME_TO_PTR(record->info.u.FreeVirtualCommandBuffer.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeVirtualCommandBuffer.logical)
++ ));
++ gcmRELEASE_NAME(record->info.u.FreeVirtualCommandBuffer.physical);
++ break;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvHAL_SYNC_POINT:
++ {
++ gctSYNC_POINT syncPoint;
++
++ syncPoint = gcmUINT64_TO_PTR(record->info.u.SyncPoint.syncPoint);
++ status = gckOS_SignalSyncPoint(Event->os, syncPoint);
++ }
++ break;
++#endif
++
++ case gcvHAL_COMMIT_DONE:
++ break;
++
++ default:
++ /* Invalid argument. */
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.command),
++ "Unknown event type: %d",
++ record->info.command
++ );
++
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ /* Make sure there are no errors generated. */
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_WARNING, gcvZONE_EVENT,
++ gcmSIZEOF(status),
++ "Event produced status: %d(%s)",
++ status, gckOS_DebugStatus2Name(status));
++ }
++
++ /* Free the event. */
++ gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record));
++
++ /* Advance to next record. */
++ record = recordNext;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Handled interrupt 0x%x", mask);
++ }
++
++ if (IDs == 0)
++ {
++ gcmkONERROR(_TryToIdleGPU(Event));
++ }
++
++ /* We are out the notify loop. */
++ Event->inNotify = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++#if !gcdSMP
++ if (suspended)
++ {
++ /* Resume interrupts. */
++ gcmkVERIFY_OK(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
++ }
++#endif
++
++ /* We are out the notify loop. */
++ Event->inNotify = gcvFALSE;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckEVENT_FreeProcess
++**
++** Free all events owned by a particular process ID.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 ProcessID
++** Process ID of the process to be freed up.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_FreeProcess(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID
++ )
++{
++ gctSIZE_T i;
++ gctBOOL acquired = gcvFALSE;
++ gcsEVENT_PTR record, next;
++ gceSTATUS status;
++ gcsEVENT_PTR deleteHead, deleteTail;
++
++ gcmkHEADER_ARG("Event=0x%x ProcessID=%d", Event, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Walk through all queues. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ /* Grab the event queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Grab the mutex head. */
++ record = Event->queues[i].head;
++ Event->queues[i].head = gcvNULL;
++ Event->queues[i].tail = gcvNULL;
++ deleteHead = gcvNULL;
++ deleteTail = gcvNULL;
++
++ while (record != gcvNULL)
++ {
++ next = record->next;
++ if (record->processID == ProcessID)
++ {
++ if (deleteHead == gcvNULL)
++ {
++ deleteHead = record;
++ }
++ else
++ {
++ deleteTail->next = record;
++ }
++
++ deleteTail = record;
++ }
++ else
++ {
++ if (Event->queues[i].head == gcvNULL)
++ {
++ Event->queues[i].head = record;
++ }
++ else
++ {
++ Event->queues[i].tail->next = record;
++ }
++
++ Event->queues[i].tail = record;
++ }
++
++ record->next = gcvNULL;
++ record = next;
++ }
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Loop through the entire list of events. */
++ for (record = deleteHead; record != gcvNULL; record = next)
++ {
++ /* Get the next event record. */
++ next = record->next;
++
++ /* Free the event record. */
++ gcmkONERROR(gckEVENT_FreeRecord(Event, record));
++ }
++ }
++ }
++
++ gcmkONERROR(_TryToIdleGPU(Event));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release the event queue mutex. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckEVENT_Stop
++**
++** Stop the hardware using the End event mechanism.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIGNAL Signal
++** Pointer to the signal to trigger.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Stop(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Logical,
++ IN gctSIGNAL Signal,
++ IN OUT gctSIZE_T * waitSize
++ )
++{
++ gceSTATUS status;
++ /* gctSIZE_T waitSize;*/
++ gcsEVENT_PTR record;
++ gctUINT8 id = 0xFF;
++
++ gcmkHEADER_ARG("Event=0x%x ProcessID=%u Handle=0x%x Logical=0x%x "
++ "Signal=0x%x",
++ Event, ProcessID, Handle, Logical, Signal);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Submit the current event queue. */
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE));
++
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &record));
++
++ /* Initialize the record. */
++ record->next = gcvNULL;
++ record->processID = ProcessID;
++ record->info.command = gcvHAL_SIGNAL;
++ record->info.u.Signal.signal = gcmPTR_TO_UINT64(Signal);
++#ifdef __QNXNTO__
++ record->info.u.Signal.coid = 0;
++ record->info.u.Signal.rcvid = 0;
++#endif
++ record->info.u.Signal.auxSignal = 0;
++ record->info.u.Signal.process = 0;
++
++
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, record, gcvKERNEL_PIXEL));
++
++ /* Replace last WAIT with END. */
++ gcmkONERROR(gckHARDWARE_End(
++ Event->kernel->hardware, Logical, waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the END. */
++ gcmkONERROR(gckOS_CacheClean(
++ Event->os,
++ ProcessID,
++ gcvNULL,
++ Handle,
++ Logical,
++ *waitSize
++ ));
++#endif
++
++ /* Wait for the signal. */
++ gcmkONERROR(gckOS_WaitSignal(Event->os, Signal, gcvINFINITE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static void
++_PrintRecord(
++ gcsEVENT_PTR record
++ )
++{
++ switch (record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_NON_PAGED_MEMORY");
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_CONTIGUOUS_MEMORY");
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_VIDEO_MEMORY");
++ break;
++
++ case gcvHAL_WRITE_DATA:
++ gcmkPRINT(" gcvHAL_WRITE_DATA");
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkPRINT(" gcvHAL_UNLOCK_VIDEO_MEMORY");
++ break;
++
++ case gcvHAL_SIGNAL:
++ gcmkPRINT(" gcvHAL_SIGNAL process=%d signal=0x%x",
++ record->info.u.Signal.process,
++ record->info.u.Signal.signal);
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ gcmkPRINT(" gcvHAL_UNMAP_USER_MEMORY");
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ gcmkPRINT(" gcvHAL_TIMESTAMP");
++ break;
++
++ case gcvHAL_COMMIT_DONE:
++ gcmkPRINT(" gcvHAL_COMMIT_DONE");
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkPRINT(" gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER logical=0x%08x",
++ record->info.u.FreeVirtualCommandBuffer.logical);
++ break;
++
++ default:
++ gcmkPRINT(" Illegal Event %d", record->info.command);
++ break;
++ }
++}
++
++/*******************************************************************************
++** gckEVENT_Dump
++**
++** Dump record in event queue when stuck happens.
++** No protection for the event queue.
++**/
++gceSTATUS
++gckEVENT_Dump(
++ IN gckEVENT Event
++ )
++{
++ gcsEVENT_QUEUE_PTR queueHead = Event->queueHead;
++ gcsEVENT_QUEUE_PTR queue;
++ gcsEVENT_PTR record = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** EVENT STATE DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++
++ gcmkPRINT(" Unsumbitted Event:");
++ while(queueHead)
++ {
++ queue = queueHead;
++ record = queueHead->head;
++
++ gcmkPRINT(" [%x]:", queue);
++ while(record)
++ {
++ _PrintRecord(record);
++ record = record->next;
++ }
++
++ if (queueHead == Event->queueTail)
++ {
++ queueHead = gcvNULL;
++ }
++ else
++ {
++ queueHead = queueHead->next;
++ }
++ }
++
++ gcmkPRINT(" Untriggered Event:");
++ for (i = 0; i < 30; i++)
++ {
++ queue = &Event->queues[i];
++ record = queue->head;
++
++ gcmkPRINT(" [%d]:", i);
++ while(record)
++ {
++ _PrintRecord(record);
++ record = record->next;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS gckEVENT_WaitEmpty(gckEVENT Event)
++{
++ gctBOOL isEmpty;
++
++ while (Event->inNotify || (gcmIS_SUCCESS(gckEVENT_IsEmpty(Event, &isEmpty)) && !isEmpty)) ;
++
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel.h 2015-05-01 14:57:59.531427001 -0500
+@@ -0,0 +1,1011 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_h_
++#define __gc_hal_kernel_h_
++
++#include <linux/spinlock.h>
++
++#include "gc_hal.h"
++#include "gc_hal_kernel_hardware.h"
++#include "gc_hal_driver.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_kernel_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++/*******************************************************************************
++***** New MMU Defination *******************************************************/
++#define gcdMMU_MTLB_SHIFT 22
++#define gcdMMU_STLB_4K_SHIFT 12
++#define gcdMMU_STLB_64K_SHIFT 16
++
++#define gcdMMU_MTLB_BITS (32 - gcdMMU_MTLB_SHIFT)
++#define gcdMMU_PAGE_4K_BITS gcdMMU_STLB_4K_SHIFT
++#define gcdMMU_STLB_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_4K_BITS)
++#define gcdMMU_PAGE_64K_BITS gcdMMU_STLB_64K_SHIFT
++#define gcdMMU_STLB_64K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_64K_BITS)
++
++#define gcdMMU_MTLB_ENTRY_NUM (1 << gcdMMU_MTLB_BITS)
++#define gcdMMU_MTLB_SIZE (gcdMMU_MTLB_ENTRY_NUM << 2)
++#define gcdMMU_STLB_4K_ENTRY_NUM (1 << gcdMMU_STLB_4K_BITS)
++#define gcdMMU_STLB_4K_SIZE (gcdMMU_STLB_4K_ENTRY_NUM << 2)
++#define gcdMMU_PAGE_4K_SIZE (1 << gcdMMU_STLB_4K_SHIFT)
++#define gcdMMU_STLB_64K_ENTRY_NUM (1 << gcdMMU_STLB_64K_BITS)
++#define gcdMMU_STLB_64K_SIZE (gcdMMU_STLB_64K_ENTRY_NUM << 2)
++#define gcdMMU_PAGE_64K_SIZE (1 << gcdMMU_STLB_64K_SHIFT)
++
++#define gcdMMU_MTLB_MASK (~((1U << gcdMMU_MTLB_SHIFT)-1))
++#define gcdMMU_STLB_4K_MASK ((~0U << gcdMMU_STLB_4K_SHIFT) ^ gcdMMU_MTLB_MASK)
++#define gcdMMU_PAGE_4K_MASK (gcdMMU_PAGE_4K_SIZE - 1)
++#define gcdMMU_STLB_64K_MASK ((~((1U << gcdMMU_STLB_64K_SHIFT)-1)) ^ gcdMMU_MTLB_MASK)
++#define gcdMMU_PAGE_64K_MASK (gcdMMU_PAGE_64K_SIZE - 1)
++
++/* Page offset definitions. */
++#define gcdMMU_OFFSET_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_4K_BITS)
++#define gcdMMU_OFFSET_4K_MASK ((1U << gcdMMU_OFFSET_4K_BITS) - 1)
++#define gcdMMU_OFFSET_16K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_16K_BITS)
++#define gcdMMU_OFFSET_16K_MASK ((1U << gcdMMU_OFFSET_16K_BITS) - 1)
++
++/*******************************************************************************
++***** Process Secure Cache ****************************************************/
++
++#define gcdSECURE_CACHE_LRU 1
++#define gcdSECURE_CACHE_LINEAR 2
++#define gcdSECURE_CACHE_HASH 3
++#define gcdSECURE_CACHE_TABLE 4
++
++typedef struct _gcskLOGICAL_CACHE * gcskLOGICAL_CACHE_PTR;
++typedef struct _gcskLOGICAL_CACHE gcskLOGICAL_CACHE;
++struct _gcskLOGICAL_CACHE
++{
++ /* Logical address. */
++ gctPOINTER logical;
++
++ /* DMAable address. */
++ gctUINT32 dma;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Pointer to the previous and next hash tables. */
++ gcskLOGICAL_CACHE_PTR nextHash;
++ gcskLOGICAL_CACHE_PTR prevHash;
++#endif
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ /* Pointer to the previous and next slot. */
++ gcskLOGICAL_CACHE_PTR next;
++ gcskLOGICAL_CACHE_PTR prev;
++#endif
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ /* Time stamp. */
++ gctUINT64 stamp;
++#endif
++};
++
++typedef struct _gcskSECURE_CACHE * gcskSECURE_CACHE_PTR;
++typedef struct _gcskSECURE_CACHE
++{
++ /* Cache memory. */
++ gcskLOGICAL_CACHE cache[1 + gcdSECURE_CACHE_SLOTS];
++
++ /* Last known index for LINEAR mode. */
++ gcskLOGICAL_CACHE_PTR cacheIndex;
++
++ /* Current free slot for LINEAR mode. */
++ gctUINT32 cacheFree;
++
++ /* Time stamp for LINEAR mode. */
++ gctUINT64 cacheStamp;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Hash table for HASH mode. */
++ gcskLOGICAL_CACHE hash[256];
++#endif
++}
++gcskSECURE_CACHE;
++
++/*******************************************************************************
++***** Process Database Management *********************************************/
++
++typedef enum _gceDATABASE_TYPE
++{
++ gcvDB_VIDEO_MEMORY = 1, /* Video memory created. */
++ gcvDB_COMMAND_BUFFER, /* Command Buffer. */
++ gcvDB_NON_PAGED, /* Non paged memory. */
++ gcvDB_CONTIGUOUS, /* Contiguous memory. */
++ gcvDB_SIGNAL, /* Signal. */
++ gcvDB_VIDEO_MEMORY_LOCKED, /* Video memory locked. */
++ gcvDB_CONTEXT, /* Context */
++ gcvDB_IDLE, /* GPU idle. */
++ gcvDB_MAP_MEMORY, /* Map memory */
++ gcvDB_SHARED_INFO, /* Private data */
++ gcvDB_MAP_USER_MEMORY, /* Map user memory */
++ gcvDB_SYNC_POINT, /* Sync point. */
++ gcvDB_VIDEO_MEMORY_RESERVED, /* Reserved video memory */
++ gcvDB_VIDEO_MEMORY_CONTIGUOUS, /* Contiguous video memory */
++ gcvDB_VIDEO_MEMORY_VIRTUAL, /* Virtual video memory */
++}
++gceDATABASE_TYPE;
++
++typedef struct _gcsDATABASE_RECORD * gcsDATABASE_RECORD_PTR;
++typedef struct _gcsDATABASE_RECORD
++{
++ /* Pointer to kernel. */
++ gckKERNEL kernel;
++
++ /* Pointer to next database record. */
++ gcsDATABASE_RECORD_PTR next;
++
++ /* Type of record. */
++ gceDATABASE_TYPE type;
++
++ /* Data for record. */
++ gctPOINTER data;
++ gctPHYS_ADDR physical;
++ gctSIZE_T bytes;
++}
++gcsDATABASE_RECORD;
++
++typedef struct _gcsDATABASE * gcsDATABASE_PTR;
++typedef struct _gcsDATABASE
++{
++ /* Pointer to next entry is hash list. */
++ gcsDATABASE_PTR next;
++ gctSIZE_T slot;
++
++ /* Process ID. */
++ gctUINT32 processID;
++
++ /* Sizes to query. */
++ gcsDATABASE_COUNTERS vidMem;
++ gcsDATABASE_COUNTERS nonPaged;
++ gcsDATABASE_COUNTERS contiguous;
++ gcsDATABASE_COUNTERS mapUserMemory;
++ gcsDATABASE_COUNTERS mapMemory;
++ gcsDATABASE_COUNTERS vidMemResv;
++ gcsDATABASE_COUNTERS vidMemCont;
++ gcsDATABASE_COUNTERS vidMemVirt;
++
++ /* Idle time management. */
++ gctUINT64 lastIdle;
++ gctUINT64 idle;
++
++ /* Pointer to database. */
++ gcsDATABASE_RECORD_PTR list[48];
++
++#if gcdSECURE_USER
++ /* Secure cache. */
++ gcskSECURE_CACHE cache;
++#endif
++
++ gctPOINTER handleDatabase;
++ gctPOINTER handleDatabaseMutex;
++}
++gcsDATABASE;
++
++/* Create a process database that will contain all its allocations. */
++gceSTATUS
++gckKERNEL_CreateProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ );
++
++/* Add a record to the process database. */
++gceSTATUS
++gckKERNEL_AddProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Size
++ );
++
++/* Remove a record to the process database. */
++gceSTATUS
++gckKERNEL_RemoveProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer
++ );
++
++/* Destroy the process database. */
++gceSTATUS
++gckKERNEL_DestroyProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ );
++
++/* Find a record to the process database. */
++gceSTATUS
++gckKERNEL_FindProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 ThreadID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ OUT gcsDATABASE_RECORD_PTR Record
++ );
++
++/* Query the process database. */
++gceSTATUS
++gckKERNEL_QueryProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ IN gceDATABASE_TYPE Type,
++ OUT gcuDATABASE_INFO * Info
++ );
++
++/* Dump the process database. */
++gceSTATUS
++gckKERNEL_DumpProcessDB(
++ IN gckKERNEL Kernel
++ );
++
++/* ID database */
++gceSTATUS
++gckKERNEL_CreateIntegerDatabase(
++ IN gckKERNEL Kernel,
++ OUT gctPOINTER * Database
++ );
++
++gceSTATUS
++gckKERNEL_DestroyIntegerDatabase(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Database
++ );
++
++gceSTATUS
++gckKERNEL_AllocateIntegerId(
++ IN gctPOINTER Database,
++ IN gctPOINTER Pointer,
++ OUT gctUINT32 * Id
++ );
++
++gceSTATUS
++gckKERNEL_FreeIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id
++ );
++
++gceSTATUS
++gckKERNEL_QueryIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * Pointer
++ );
++
++gctUINT32
++gckKERNEL_AllocateNameFromPointer(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Pointer
++ );
++
++gctPOINTER
++gckKERNEL_QueryPointerFromName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ );
++
++gceSTATUS
++gckKERNEL_DeleteName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ );
++
++#if gcdSECURE_USER
++/* Get secure cache from the process database. */
++gceSTATUS
++gckKERNEL_GetProcessDBCache(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcskSECURE_CACHE_PTR * Cache
++ );
++#endif
++
++/*******************************************************************************
++********* Timer Management ****************************************************/
++typedef struct _gcsTIMER * gcsTIMER_PTR;
++typedef struct _gcsTIMER
++{
++ /* Start and Stop time holders. */
++ gctUINT64 startTime;
++ gctUINT64 stopTime;
++}
++gcsTIMER;
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++
++/* gckDB object. */
++struct _gckDB
++{
++ /* Database management. */
++ gcsDATABASE_PTR db[16];
++ gctPOINTER dbMutex;
++ gcsDATABASE_PTR freeDatabase;
++ gcsDATABASE_RECORD_PTR freeRecord;
++ gcsDATABASE_PTR lastDatabase;
++ gctUINT32 lastProcessID;
++ gctUINT64 lastIdle;
++ gctUINT64 idleTime;
++ gctUINT64 lastSlowdown;
++ gctUINT64 lastSlowdownIdle;
++ /* ID - Pointer database*/
++ gctPOINTER pointerDatabase;
++ gctPOINTER pointerDatabaseMutex;
++};
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++typedef struct _gckVIRTUAL_COMMAND_BUFFER * gckVIRTUAL_COMMAND_BUFFER_PTR;
++typedef struct _gckVIRTUAL_COMMAND_BUFFER
++{
++ gctPHYS_ADDR physical;
++ gctPOINTER userLogical;
++ gctPOINTER kernelLogical;
++ gctSIZE_T pageCount;
++ gctPOINTER pageTable;
++ gctUINT32 gpuAddress;
++ gctUINT pid;
++ gckVIRTUAL_COMMAND_BUFFER_PTR next;
++ gckVIRTUAL_COMMAND_BUFFER_PTR prev;
++ gckKERNEL kernel;
++}
++gckVIRTUAL_COMMAND_BUFFER;
++#endif
++
++/* gckKERNEL object. */
++struct _gckKERNEL
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Core */
++ gceCORE core;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* Pointer to gckCOMMAND object. */
++ gckCOMMAND command;
++
++ /* Pointer to gckEVENT object. */
++ gckEVENT eventObj;
++
++ /* Pointer to context. */
++ gctPOINTER context;
++
++ /* Pointer to gckMMU object. */
++ gckMMU mmu;
++
++ /* Arom holding number of clients. */
++ gctPOINTER atomClients;
++
++#if VIVANTE_PROFILER
++ /* Enable profiling */
++ gctBOOL profileEnable;
++
++ /* Clear profile register or not*/
++ gctBOOL profileCleanRegister;
++
++#endif
++
++#ifdef QNX_SINGLE_THREADED_DEBUGGING
++ gctPOINTER debugMutex;
++#endif
++
++ /* Database management. */
++ gckDB db;
++ gctBOOL dbCreated;
++
++#if gcdENABLE_RECOVERY
++ gctPOINTER resetFlagClearTimer;
++ gctPOINTER resetAtom;
++ gctUINT64 resetTimeStamp;
++#endif
++
++ /* Pointer to gckEVENT object. */
++ gcsTIMER timers[8];
++ gctUINT32 timeOut;
++
++#if gcdENABLE_VG
++ gckVGKERNEL vg;
++#endif
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gckVIRTUAL_COMMAND_BUFFER_PTR virtualBufferHead;
++ gckVIRTUAL_COMMAND_BUFFER_PTR virtualBufferTail;
++ gctPOINTER virtualBufferLock;
++#endif
++
++#if gcdDVFS
++ gckDVFS dvfs;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gctHANDLE timeline;
++#endif
++
++ spinlock_t irq_lock;
++};
++
++struct _FrequencyHistory
++{
++ gctUINT32 frequency;
++ gctUINT32 count;
++};
++
++/* gckDVFS object. */
++struct _gckDVFS
++{
++ gckOS os;
++ gckHARDWARE hardware;
++ gctPOINTER timer;
++ gctUINT32 pollingTime;
++ gctBOOL stop;
++ gctUINT32 totalConfig;
++ gctUINT32 loads[8];
++ gctUINT8 currentScale;
++ struct _FrequencyHistory frequencyHistory[16];
++};
++
++/* gckCOMMAND object. */
++struct _gckCOMMAND
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to required object. */
++ gckKERNEL kernel;
++ gckOS os;
++
++ /* Number of bytes per page. */
++ gctSIZE_T pageSize;
++
++ /* Current pipe select. */
++ gcePIPE_SELECT pipeSelect;
++
++ /* Command queue running flag. */
++ gctBOOL running;
++
++ /* Idle flag and commit stamp. */
++ gctBOOL idle;
++ gctUINT64 commitStamp;
++
++ /* Command queue mutex. */
++ gctPOINTER mutexQueue;
++
++ /* Context switching mutex. */
++ gctPOINTER mutexContext;
++
++#if VIVANTE_PROFILER_CONTEXT
++ /* Context sequence mutex. */
++ gctPOINTER mutexContextSeq;
++#endif
++
++ /* Command queue power semaphore. */
++ gctPOINTER powerSemaphore;
++
++ /* Current command queue. */
++ struct _gcskCOMMAND_QUEUE
++ {
++ gctSIGNAL signal;
++ gctPHYS_ADDR physical;
++ gctPOINTER logical;
++ }
++ queues[gcdCOMMAND_QUEUES];
++
++ gctPHYS_ADDR physical;
++ gctPOINTER logical;
++ gctUINT32 offset;
++ gctINT index;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gctUINT wrapCount;
++#endif
++
++ /* The command queue is new. */
++ gctBOOL newQueue;
++
++ /* Context management. */
++ gckCONTEXT currContext;
++
++ /* Pointer to last WAIT command. */
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctSIZE_T waitSize;
++
++ /* Command buffer alignment. */
++ gctSIZE_T alignment;
++ gctSIZE_T reservedHead;
++ gctSIZE_T reservedTail;
++
++ /* Commit counter. */
++ gctPOINTER atomCommit;
++
++ /* Kernel process ID. */
++ gctUINT32 kernelProcessID;
++
++ /* End Event signal. */
++ gctSIGNAL endEventSignal;
++
++#if gcdSECURE_USER
++ /* Hint array copy buffer. */
++ gctBOOL hintArrayAllocated;
++ gctUINT hintArraySize;
++ gctUINT32_PTR hintArray;
++#endif
++};
++
++typedef struct _gcsEVENT * gcsEVENT_PTR;
++
++/* Structure holding one event to be processed. */
++typedef struct _gcsEVENT
++{
++ /* Pointer to next event in queue. */
++ gcsEVENT_PTR next;
++
++ /* Event information. */
++ gcsHAL_INTERFACE info;
++
++ /* Process ID owning the event. */
++ gctUINT32 processID;
++
++#ifdef __QNXNTO__
++ /* Kernel. */
++ gckKERNEL kernel;
++#endif
++
++ gctBOOL fromKernel;
++}
++gcsEVENT;
++
++/* Structure holding a list of events to be processed by an interrupt. */
++typedef struct _gcsEVENT_QUEUE * gcsEVENT_QUEUE_PTR;
++typedef struct _gcsEVENT_QUEUE
++{
++ /* Time stamp. */
++ gctUINT64 stamp;
++
++ /* Source of the event. */
++ gceKERNEL_WHERE source;
++
++ /* Pointer to head of event queue. */
++ gcsEVENT_PTR head;
++
++ /* Pointer to tail of event queue. */
++ gcsEVENT_PTR tail;
++
++ /* Next list of events. */
++ gcsEVENT_QUEUE_PTR next;
++}
++gcsEVENT_QUEUE;
++
++/*
++ gcdREPO_LIST_COUNT defines the maximum number of event queues with different
++ hardware module sources that may coexist at the same time. Only two sources
++ are supported - gcvKERNEL_COMMAND and gcvKERNEL_PIXEL. gcvKERNEL_COMMAND
++ source is used only for managing the kernel command queue and is only issued
++ when the current command queue gets full. Since we commit event queues every
++ time we commit command buffers, in the worst case we can have up to three
++ pending event queues:
++ - gcvKERNEL_PIXEL
++ - gcvKERNEL_COMMAND (queue overflow)
++ - gcvKERNEL_PIXEL
++*/
++#define gcdREPO_LIST_COUNT 3
++
++/* gckEVENT object. */
++struct _gckEVENT
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to required objects. */
++ gckOS os;
++ gckKERNEL kernel;
++
++ /* Time stamp. */
++ gctUINT64 stamp;
++ gctUINT64 lastCommitStamp;
++
++ /* Queue mutex. */
++ gctPOINTER eventQueueMutex;
++
++ /* Array of event queues. */
++ gcsEVENT_QUEUE queues[30];
++ gctUINT8 lastID;
++ gctPOINTER freeAtom;
++
++ /* Pending events. */
++#if gcdSMP
++ gctPOINTER pending;
++#else
++ volatile gctUINT pending;
++#endif
++
++ /* List of free event structures and its mutex. */
++ gcsEVENT_PTR freeEventList;
++ gctSIZE_T freeEventCount;
++ gctPOINTER freeEventMutex;
++
++ /* Event queues. */
++ gcsEVENT_QUEUE_PTR queueHead;
++ gcsEVENT_QUEUE_PTR queueTail;
++ gcsEVENT_QUEUE_PTR freeList;
++ gcsEVENT_QUEUE repoList[gcdREPO_LIST_COUNT];
++ gctPOINTER eventListMutex;
++
++ gctPOINTER submitTimer;
++
++ volatile gctBOOL inNotify;
++};
++
++/* Free all events belonging to a process. */
++gceSTATUS
++gckEVENT_FreeProcess(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID
++ );
++
++gceSTATUS
++gckEVENT_Stop(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Logical,
++ IN gctSIGNAL Signal,
++ IN OUT gctSIZE_T * waitSize
++ );
++
++gceSTATUS
++gckEVENT_WaitEmpty(
++ IN gckEVENT Event
++ );
++
++/* gcuVIDMEM_NODE structure. */
++typedef union _gcuVIDMEM_NODE
++{
++ /* Allocated from gckVIDMEM. */
++ struct _gcsVIDMEM_NODE_VIDMEM
++ {
++ /* Owner of this node. */
++ gckVIDMEM memory;
++
++ /* Dual-linked list of nodes. */
++ gcuVIDMEM_NODE_PTR next;
++ gcuVIDMEM_NODE_PTR prev;
++
++ /* Dual linked list of free nodes. */
++ gcuVIDMEM_NODE_PTR nextFree;
++ gcuVIDMEM_NODE_PTR prevFree;
++
++ /* Information for this node. */
++ gctUINT32 offset;
++ gctSIZE_T bytes;
++ gctUINT32 alignment;
++
++#ifdef __QNXNTO__
++ /* Client/server vaddr (mapped using mmap_join). */
++ gctPOINTER logical;
++#endif
++
++ /* Locked counter. */
++ gctINT32 locked;
++
++ /* Memory pool. */
++ gcePOOL pool;
++ gctUINT32 physical;
++
++ /* Process ID owning this memory. */
++ gctUINT32 processID;
++
++ /* Prevent compositor from freeing until client unlocks. */
++ gctBOOL freePending;
++
++ /* */
++ gcsVIDMEM_NODE_SHARED_INFO sharedInfo;
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ gctPOINTER kernelVirtual;
++#endif
++
++ /* Surface type. */
++ gceSURF_TYPE type;
++ }
++ VidMem;
++
++ /* Allocated from gckOS. */
++ struct _gcsVIDMEM_NODE_VIRTUAL
++ {
++ /* Pointer to gckKERNEL object. */
++ gckKERNEL kernel;
++
++ /* Information for this node. */
++ /* Contiguously allocated? */
++ gctBOOL contiguous;
++ /* mdl record pointer... a kmalloc address. Process agnostic. */
++ gctPHYS_ADDR physical;
++ gctSIZE_T bytes;
++ /* do_mmap_pgoff address... mapped per-process. */
++ gctPOINTER logical;
++
++ /* Page table information. */
++ /* Used only when node is not contiguous */
++ gctSIZE_T pageCount;
++
++ /* Used only when node is not contiguous */
++ gctPOINTER pageTables[gcdMAX_GPU_COUNT];
++ /* Pointer to gckKERNEL object who lock this. */
++ gckKERNEL lockKernels[gcdMAX_GPU_COUNT];
++ /* Actual physical address */
++ gctUINT32 addresses[gcdMAX_GPU_COUNT];
++
++ /* Mutex. */
++ gctPOINTER mutex;
++
++ /* Locked counter. */
++ gctINT32 lockeds[gcdMAX_GPU_COUNT];
++
++#ifdef __QNXNTO__
++ /* Single linked list of nodes. */
++ gcuVIDMEM_NODE_PTR next;
++
++ /* Unlock pending flag. */
++ gctBOOL unlockPendings[gcdMAX_GPU_COUNT];
++
++ /* Free pending flag. */
++ gctBOOL freePending;
++#endif
++
++ /* Process ID owning this memory. */
++ gctUINT32 processID;
++
++ /* Owner process sets freed to true
++ * when it trys to free a locked
++ * node */
++ gctBOOL freed;
++
++ /* */
++ gcsVIDMEM_NODE_SHARED_INFO sharedInfo;
++
++ /* Surface type. */
++ gceSURF_TYPE type;
++ }
++ Virtual;
++}
++gcuVIDMEM_NODE;
++
++/* gckVIDMEM object. */
++struct _gckVIDMEM
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Information for this video memory heap. */
++ gctUINT32 baseAddress;
++ gctSIZE_T bytes;
++ gctSIZE_T freeBytes;
++
++ /* Mapping for each type of surface. */
++ gctINT mapping[gcvSURF_NUM_TYPES];
++
++ /* Sentinel nodes for up to 8 banks. */
++ gcuVIDMEM_NODE sentinel[8];
++
++ /* Allocation threshold. */
++ gctSIZE_T threshold;
++
++ /* The heap mutex. */
++ gctPOINTER mutex;
++
++#if gcdUSE_VIDMEM_PER_PID
++ /* The Pid this VidMem belongs to. */
++ gctUINT32 pid;
++
++ struct _gckVIDMEM* next;
++#endif
++};
++
++/* gckMMU object. */
++struct _gckMMU
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* The page table mutex. */
++ gctPOINTER pageTableMutex;
++
++ /* Page table information. */
++ gctSIZE_T pageTableSize;
++ gctPHYS_ADDR pageTablePhysical;
++ gctUINT32_PTR pageTableLogical;
++ gctUINT32 pageTableEntries;
++
++ /* Master TLB information. */
++ gctSIZE_T mtlbSize;
++ gctPHYS_ADDR mtlbPhysical;
++ gctUINT32_PTR mtlbLogical;
++ gctUINT32 mtlbEntries;
++
++ /* Free entries. */
++ gctUINT32 heapList;
++ gctBOOL freeNodes;
++
++ gctPOINTER staticSTLB;
++ gctBOOL enabled;
++
++ gctUINT32 dynamicMappingStart;
++
++#ifdef __QNXNTO__
++ /* Single linked list of all allocated nodes. */
++ gctPOINTER nodeMutex;
++ gcuVIDMEM_NODE_PTR nodeList;
++#endif
++};
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++gceSTATUS
++gckOS_CreateKernelVirtualMapping(
++ IN gctPHYS_ADDR Physical,
++ OUT gctSIZE_T * PageCount,
++ OUT gctPOINTER * Logical
++ );
++
++gceSTATUS
++gckOS_DestroyKernelVirtualMapping(
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gckKERNEL_AllocateVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++gceSTATUS
++gckKERNEL_DestroyVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gckKERNEL_GetGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++gceSTATUS
++gckKERNEL_QueryGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GpuAddress,
++ OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer
++ );
++#endif
++
++gceSTATUS
++gckKERNEL_AttachProcess(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach
++ );
++
++gceSTATUS
++gckKERNEL_AttachProcessEx(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach,
++ IN gctUINT32 PID
++ );
++
++#if gcdSECURE_USER
++gceSTATUS
++gckKERNEL_MapLogicalToPhysical(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN OUT gctPOINTER * Data
++ );
++
++gceSTATUS
++gckKERNEL_FlushTranslationCache(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++#endif
++
++gceSTATUS
++gckHARDWARE_QueryIdle(
++ IN gckHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ );
++
++/******************************************************************************\
++******************************* gckCONTEXT Object *******************************
++\******************************************************************************/
++
++gceSTATUS
++gckCONTEXT_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ OUT gckCONTEXT * Context
++ );
++
++gceSTATUS
++gckCONTEXT_Destroy(
++ IN gckCONTEXT Context
++ );
++
++gceSTATUS
++gckCONTEXT_Update(
++ IN gckCONTEXT Context,
++ IN gctUINT32 ProcessID,
++ IN gcsSTATE_DELTA_PTR StateDelta
++ );
++
++#if gcdLINK_QUEUE_SIZE
++void
++gckLINKQUEUE_Enqueue(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 start,
++ IN gctUINT32 end
++ );
++
++void
++gckLINKQUEUE_GetData(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 Index,
++ OUT gckLINKDATA * Data
++ );
++#endif
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_heap.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_heap.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_heap.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_heap.c 2015-05-01 14:57:59.531427001 -0500
+@@ -0,0 +1,859 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/**
++** @file
++** gckHEAP object for kernel HAL layer. The heap implemented here is an arena-
++** based memory allocation. An arena-based memory heap allocates data quickly
++** from specified arenas and reduces memory fragmentation.
++**
++*/
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_HEAP
++
++/*******************************************************************************
++***** Structures ***************************************************************
++*******************************************************************************/
++
++#define gcdIN_USE ((gcskNODE_PTR) ~0)
++
++typedef struct _gcskNODE * gcskNODE_PTR;
++typedef struct _gcskNODE
++{
++ /* Number of byets in node. */
++ gctSIZE_T bytes;
++
++ /* Pointer to next free node, or gcvNULL to mark the node as freed, or
++ ** gcdIN_USE to mark the node as used. */
++ gcskNODE_PTR next;
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Time stamp of allocation. */
++ gctUINT64 timeStamp;
++#endif
++}
++gcskNODE;
++
++typedef struct _gcskHEAP * gcskHEAP_PTR;
++typedef struct _gcskHEAP
++{
++ /* Linked list. */
++ gcskHEAP_PTR next;
++ gcskHEAP_PTR prev;
++
++ /* Heap size. */
++ gctSIZE_T size;
++
++ /* Free list. */
++ gcskNODE_PTR freeList;
++}
++gcskHEAP;
++
++struct _gckHEAP
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to a gckOS object. */
++ gckOS os;
++
++ /* Locking mutex. */
++ gctPOINTER mutex;
++
++ /* Allocation parameters. */
++ gctSIZE_T allocationSize;
++
++ /* Heap list. */
++ gcskHEAP_PTR heap;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT64 timeStamp;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Profile information. */
++ gctUINT32 allocCount;
++ gctUINT64 allocBytes;
++ gctUINT64 allocBytesMax;
++ gctUINT64 allocBytesTotal;
++ gctUINT32 heapCount;
++ gctUINT32 heapCountMax;
++ gctUINT64 heapMemory;
++ gctUINT64 heapMemoryMax;
++#endif
++};
++
++/*******************************************************************************
++***** Static Support Functions *************************************************
++*******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++static gctSIZE_T
++_DumpHeap(
++ IN gcskHEAP_PTR Heap
++ )
++{
++ gctPOINTER p;
++ gctSIZE_T leaked = 0;
++
++ /* Start at first node. */
++ for (p = Heap + 1;;)
++ {
++ /* Convert the pointer. */
++ gcskNODE_PTR node = (gcskNODE_PTR) p;
++
++ /* Check if this is a used node. */
++ if (node->next == gcdIN_USE)
++ {
++ /* Print the leaking node. */
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_HEAP,
++ "Detected leaking: node=0x%x bytes=%lu timeStamp=%llu "
++ "(%08X %c%c%c%c)",
++ node, node->bytes, node->timeStamp,
++ ((gctUINT32_PTR) (node + 1))[0],
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[0]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[1]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[2]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[3]));
++
++ /* Add leaking byte count. */
++ leaked += node->bytes;
++ }
++
++ /* Test for end of heap. */
++ if (node->bytes == 0)
++ {
++ break;
++ }
++
++ else
++ {
++ /* Move to next node. */
++ p = (gctUINT8_PTR) node + node->bytes;
++ }
++ }
++
++ /* Return the number of leaked bytes. */
++ return leaked;
++}
++#endif
++
++static gceSTATUS
++_CompactKernelHeap(
++ IN gckHEAP Heap
++ )
++{
++ gcskHEAP_PTR heap, next;
++ gctPOINTER p;
++ gcskHEAP_PTR freeList = gcvNULL;
++
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ /* Walk all the heaps. */
++ for (heap = Heap->heap; heap != gcvNULL; heap = next)
++ {
++ gcskNODE_PTR lastFree = gcvNULL;
++
++ /* Zero out the free list. */
++ heap->freeList = gcvNULL;
++
++ /* Start at the first node. */
++ for (p = (gctUINT8_PTR) (heap + 1);;)
++ {
++ /* Convert the pointer. */
++ gcskNODE_PTR node = (gcskNODE_PTR) p;
++
++ gcmkASSERT(p <= (gctPOINTER) ((gctUINT8_PTR) (heap + 1) + heap->size));
++
++ /* Test if this node not used. */
++ if (node->next != gcdIN_USE)
++ {
++ /* Test if this is the end of the heap. */
++ if (node->bytes == 0)
++ {
++ break;
++ }
++
++ /* Test of this is the first free node. */
++ else if (lastFree == gcvNULL)
++ {
++ /* Initialzie the free list. */
++ heap->freeList = node;
++ lastFree = node;
++ }
++
++ else
++ {
++ /* Test if this free node is contiguous with the previous
++ ** free node. */
++ if ((gctUINT8_PTR) lastFree + lastFree->bytes == p)
++ {
++ /* Just increase the size of the previous free node. */
++ lastFree->bytes += node->bytes;
++ }
++ else
++ {
++ /* Add to linked list. */
++ lastFree->next = node;
++ lastFree = node;
++ }
++ }
++ }
++
++ /* Move to next node. */
++ p = (gctUINT8_PTR) node + node->bytes;
++ }
++
++ /* Mark the end of the chain. */
++ if (lastFree != gcvNULL)
++ {
++ lastFree->next = gcvNULL;
++ }
++
++ /* Get next heap. */
++ next = heap->next;
++
++ /* Check if the entire heap is free. */
++ if ((heap->freeList != gcvNULL)
++ && (heap->freeList->bytes == heap->size - gcmSIZEOF(gcskNODE))
++ )
++ {
++ /* Remove the heap from the linked list. */
++ if (heap->prev == gcvNULL)
++ {
++ Heap->heap = next;
++ }
++ else
++ {
++ heap->prev->next = next;
++ }
++
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap->prev;
++ }
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profiling. */
++ Heap->heapCount -= 1;
++ Heap->heapMemory -= heap->size + gcmSIZEOF(gcskHEAP);
++#endif
++
++ /* Add this heap to the list of heaps that need to be freed. */
++ heap->next = freeList;
++ freeList = heap;
++ }
++ }
++
++ if (freeList != gcvNULL)
++ {
++ /* Release the mutex, remove any chance for a dead lock. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Free all heaps in the free list. */
++ for (heap = freeList; heap != gcvNULL; heap = next)
++ {
++ /* Get pointer to the next heap. */
++ next = heap->next;
++
++ /* Free the heap. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP,
++ "Freeing heap 0x%x (%lu bytes)",
++ heap, heap->size + gcmSIZEOF(gcskHEAP));
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap));
++ }
++
++ /* Acquire the mutex again. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++***** gckHEAP API Code *********************************************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckHEAP_Construct
++**
++** Construct a new gckHEAP object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctSIZE_T AllocationSize
++** Minimum size per arena.
++**
++** OUTPUT:
++**
++** gckHEAP * Heap
++** Pointer to a variable that will hold the pointer to the gckHEAP
++** object.
++*/
++gceSTATUS
++gckHEAP_Construct(
++ IN gckOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gckHEAP * Heap
++ )
++{
++ gceSTATUS status;
++ gckHEAP heap = gcvNULL;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x AllocationSize=%lu", Os, AllocationSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Heap != gcvNULL);
++
++ /* Allocate the gckHEAP object. */
++ gcmkONERROR(gckOS_AllocateMemory(Os,
++ gcmSIZEOF(struct _gckHEAP),
++ &pointer));
++
++ heap = pointer;
++
++ /* Initialize the gckHEAP object. */
++ heap->object.type = gcvOBJ_HEAP;
++ heap->os = Os;
++ heap->allocationSize = AllocationSize;
++ heap->heap = gcvNULL;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ heap->timeStamp = 0;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Zero the counters. */
++ heap->allocCount = 0;
++ heap->allocBytes = 0;
++ heap->allocBytesMax = 0;
++ heap->allocBytesTotal = 0;
++ heap->heapCount = 0;
++ heap->heapCountMax = 0;
++ heap->heapMemory = 0;
++ heap->heapMemoryMax = 0;
++#endif
++
++ /* Create the mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &heap->mutex));
++
++ /* Return the pointer to the gckHEAP object. */
++ *Heap = heap;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Heap=0x%x", *Heap);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (heap != gcvNULL)
++ {
++ /* Free the heap structure. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Os, heap));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Destroy
++**
++** Destroy a gckHEAP object.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHEAP_Destroy(
++ IN gckHEAP Heap
++ )
++{
++ gcskHEAP_PTR heap;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctSIZE_T leaked = 0;
++#endif
++
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ for (heap = Heap->heap; heap != gcvNULL; heap = Heap->heap)
++ {
++ /* Unlink heap from linked list. */
++ Heap->heap = heap->next;
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Check for leaked memory. */
++ leaked += _DumpHeap(heap);
++#endif
++
++ /* Free the heap. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap));
++ }
++
++ /* Free the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Heap->os, Heap->mutex));
++
++ /* Free the heap structure. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, Heap));
++
++ /* Success. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gcmkFOOTER_ARG("leaked=%lu", leaked);
++#else
++ gcmkFOOTER_NO();
++#endif
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Allocate
++**
++** Allocate data from the heap.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object.
++**
++** IN gctSIZE_T Bytes
++** Number of byte to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the address of the allocated
++** memory.
++*/
++gceSTATUS
++gckHEAP_Allocate(
++ IN gckHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gctBOOL acquired = gcvFALSE;
++ gcskHEAP_PTR heap;
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gcskNODE_PTR node, used, prevFree = gcvNULL;
++ gctPOINTER memory = gcvNULL;
++
++ gcmkHEADER_ARG("Heap=0x%x Bytes=%lu", Heap, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Determine number of bytes required for a node. */
++ bytes = gcmALIGN(Bytes + gcmSIZEOF(gcskNODE), 8);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ /* Check if this allocation is bigger than the default allocation size. */
++ if (bytes > Heap->allocationSize - gcmSIZEOF(gcskHEAP) - gcmSIZEOF(gcskNODE))
++ {
++ /* Adjust allocation size. */
++ Heap->allocationSize = bytes * 2;
++ }
++
++ else if (Heap->heap != gcvNULL)
++ {
++ gctINT i;
++
++ /* 2 retries, since we might need to compact. */
++ for (i = 0; i < 2; ++i)
++ {
++ /* Walk all the heaps. */
++ for (heap = Heap->heap; heap != gcvNULL; heap = heap->next)
++ {
++ /* Check if this heap has enough bytes to hold the request. */
++ if (bytes <= heap->size - gcmSIZEOF(gcskNODE))
++ {
++ prevFree = gcvNULL;
++
++ /* Walk the chain of free nodes. */
++ for (node = heap->freeList;
++ node != gcvNULL;
++ node = node->next
++ )
++ {
++ gcmkASSERT(node->next != gcdIN_USE);
++
++ /* Check if this free node has enough bytes. */
++ if (node->bytes >= bytes)
++ {
++ /* Use the node. */
++ goto UseNode;
++ }
++
++ /* Save current free node for linked list management. */
++ prevFree = node;
++ }
++ }
++ }
++
++ if (i == 0)
++ {
++ /* Compact the heap. */
++ gcmkVERIFY_OK(_CompactKernelHeap(Heap));
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "===== KERNEL HEAP =====");
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of allocations : %12u",
++ Heap->allocCount);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of bytes allocated : %12llu",
++ Heap->allocBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum allocation size : %12llu",
++ Heap->allocBytesMax);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Total number of bytes allocated : %12llu",
++ Heap->allocBytesTotal);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of heaps : %12u",
++ Heap->heapCount);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Heap memory in bytes : %12llu",
++ Heap->heapMemory);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum number of heaps : %12u",
++ Heap->heapCountMax);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum heap memory in bytes : %12llu",
++ Heap->heapMemoryMax);
++#endif
++ }
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkONERROR(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ acquired = gcvFALSE;
++
++ /* Allocate a new heap. */
++ gcmkONERROR(
++ gckOS_AllocateMemory(Heap->os,
++ Heap->allocationSize,
++ &memory));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP,
++ "Allocated heap 0x%x (%lu bytes)",
++ memory, Heap->allocationSize);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ /* Use the allocated memory as the heap. */
++ heap = (gcskHEAP_PTR) memory;
++
++ /* Insert this heap to the head of the chain. */
++ heap->next = Heap->heap;
++ heap->prev = gcvNULL;
++ heap->size = Heap->allocationSize - gcmSIZEOF(gcskHEAP);
++
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap;
++ }
++ Heap->heap = heap;
++
++ /* Mark the end of the heap. */
++ node = (gcskNODE_PTR) ( (gctUINT8_PTR) heap
++ + Heap->allocationSize
++ - gcmSIZEOF(gcskNODE)
++ );
++ node->bytes = 0;
++ node->next = gcvNULL;
++
++ /* Create a free list. */
++ node = (gcskNODE_PTR) (heap + 1);
++ heap->freeList = node;
++
++ /* Initialize the free list. */
++ node->bytes = heap->size - gcmSIZEOF(gcskNODE);
++ node->next = gcvNULL;
++
++ /* No previous free. */
++ prevFree = gcvNULL;
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profiling. */
++ Heap->heapCount += 1;
++ Heap->heapMemory += Heap->allocationSize;
++
++ if (Heap->heapCount > Heap->heapCountMax)
++ {
++ Heap->heapCountMax = Heap->heapCount;
++ }
++ if (Heap->heapMemory > Heap->heapMemoryMax)
++ {
++ Heap->heapMemoryMax = Heap->heapMemory;
++ }
++#endif
++
++UseNode:
++ /* Verify some stuff. */
++ gcmkASSERT(heap != gcvNULL);
++ gcmkASSERT(node != gcvNULL);
++ gcmkASSERT(node->bytes >= bytes);
++
++ if (heap->prev != gcvNULL)
++ {
++ /* Unlink the heap from the linked list. */
++ heap->prev->next = heap->next;
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap->prev;
++ }
++
++ /* Move the heap to the front of the list. */
++ heap->next = Heap->heap;
++ heap->prev = gcvNULL;
++ Heap->heap = heap;
++ heap->next->prev = heap;
++ }
++
++ /* Check if there is enough free space left after usage for another free
++ ** node. */
++ if (node->bytes - bytes >= gcmSIZEOF(gcskNODE))
++ {
++ /* Allocated used space from the back of the free list. */
++ used = (gcskNODE_PTR) ((gctUINT8_PTR) node + node->bytes - bytes);
++
++ /* Adjust the number of free bytes. */
++ node->bytes -= bytes;
++ gcmkASSERT(node->bytes >= gcmSIZEOF(gcskNODE));
++ }
++ else
++ {
++ /* Remove this free list from the chain. */
++ if (prevFree == gcvNULL)
++ {
++ heap->freeList = node->next;
++ }
++ else
++ {
++ prevFree->next = node->next;
++ }
++
++ /* Consume the entire free node. */
++ used = (gcskNODE_PTR) node;
++ bytes = node->bytes;
++ }
++
++ /* Mark node as used. */
++ used->bytes = bytes;
++ used->next = gcdIN_USE;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ used->timeStamp = ++Heap->timeStamp;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profile counters. */
++ Heap->allocCount += 1;
++ Heap->allocBytes += bytes;
++ Heap->allocBytesMax = gcmMAX(Heap->allocBytes, Heap->allocBytesMax);
++ Heap->allocBytesTotal += bytes;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Return pointer to memory. */
++ *Memory = used + 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++ }
++
++ if (memory != gcvNULL)
++ {
++ /* Free the heap memory. */
++ gckOS_FreeMemory(Heap->os, memory);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Free
++**
++** Free allocated memory from the heap.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object.
++**
++** IN gctPOINTER Memory
++** Pointer to memory to free.
++**
++** OUTPUT:
++**
++** NOTHING.
++*/
++gceSTATUS
++gckHEAP_Free(
++ IN gckHEAP Heap,
++ IN gctPOINTER Memory
++ )
++{
++ gcskNODE_PTR node;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Heap=0x%x Memory=0x%x", Heap, Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ /* Pointer to structure. */
++ node = (gcskNODE_PTR) Memory - 1;
++
++ /* Mark the node as freed. */
++ node->next = gcvNULL;
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profile counters. */
++ Heap->allocBytes -= node->bytes;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHEAP_ProfileStart(
++ IN gckHEAP Heap
++ )
++{
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++
++ /* Zero the counters. */
++ Heap->allocCount = 0;
++ Heap->allocBytes = 0;
++ Heap->allocBytesMax = 0;
++ Heap->allocBytesTotal = 0;
++ Heap->heapCount = 0;
++ Heap->heapCountMax = 0;
++ Heap->heapMemory = 0;
++ Heap->heapMemoryMax = 0;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHEAP_ProfileEnd(
++ IN gckHEAP Heap,
++ IN gctCONST_STRING Title
++ )
++{
++ gcmkHEADER_ARG("Heap=0x%x Title=0x%x", Heap, Title);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Title != gcvNULL);
++
++ gcmkPRINT("");
++ gcmkPRINT("=====[ HEAP - %s ]=====", Title);
++ gcmkPRINT("Number of allocations : %12u", Heap->allocCount);
++ gcmkPRINT("Number of bytes allocated : %12llu", Heap->allocBytes);
++ gcmkPRINT("Maximum allocation size : %12llu", Heap->allocBytesMax);
++ gcmkPRINT("Total number of bytes allocated : %12llu", Heap->allocBytesTotal);
++ gcmkPRINT("Number of heaps : %12u", Heap->heapCount);
++ gcmkPRINT("Heap memory in bytes : %12llu", Heap->heapMemory);
++ gcmkPRINT("Maximum number of heaps : %12u", Heap->heapCountMax);
++ gcmkPRINT("Maximum heap memory in bytes : %12llu", Heap->heapMemoryMax);
++ gcmkPRINT("==============================================");
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif /* VIVANTE_PROFILER */
++
++/*******************************************************************************
++***** Test Code ****************************************************************
++*******************************************************************************/
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_interrupt_vg.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_interrupt_vg.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_interrupt_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_interrupt_vg.c 2015-05-01 14:57:59.531427001 -0500
+@@ -0,0 +1,877 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++/******************************************************************************\
++*********************** Support Functions and Definitions **********************
++\******************************************************************************/
++
++/* Interruot statistics will be accumulated if not zero. */
++#define gcmENABLE_INTERRUPT_STATISTICS 0
++
++#define _GC_OBJ_ZONE gcvZONE_INTERRUPT
++
++/* Object structure. */
++struct _gckVGINTERRUPT
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* gckVGKERNEL pointer. */
++ gckVGKERNEL kernel;
++
++ /* gckOS pointer. */
++ gckOS os;
++
++ /* Interrupt handlers. */
++ gctINTERRUPT_HANDLER handlers[32];
++
++ /* Main interrupt handler thread. */
++ gctTHREAD handler;
++ gctBOOL terminate;
++
++ /* Interrupt FIFO. */
++ gctSEMAPHORE fifoValid;
++ gctUINT32 fifo[256];
++ gctUINT fifoItems;
++ gctUINT8 head;
++ gctUINT8 tail;
++
++ /* Interrupt statistics. */
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gctUINT maxFifoItems;
++ gctUINT fifoOverflow;
++ gctUINT maxSimultaneous;
++ gctUINT multipleCount;
++#endif
++};
++
++
++/*******************************************************************************
++**
++** _ProcessInterrupt
++**
++** The interrupt processor.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++static void
++_ProcessInterrupt(
++ gckVGINTERRUPT Interrupt,
++ gctUINT_PTR TriggeredCount
++ )
++#else
++static void
++_ProcessInterrupt(
++ gckVGINTERRUPT Interrupt
++ )
++#endif
++{
++ gceSTATUS status;
++ gctUINT32 triggered;
++ gctUINT i;
++
++ /* Advance to the next entry. */
++ Interrupt->tail += 1;
++ Interrupt->fifoItems -= 1;
++
++ /* Get the interrupt value. */
++ triggered = Interrupt->fifo[Interrupt->tail];
++ gcmkASSERT(triggered != 0);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: triggered=0x%08X\n",
++ __FUNCTION__,
++ triggered
++ );
++
++ /* Walk through all possible interrupts. */
++ for (i = 0; i < gcmSIZEOF(Interrupt->handlers); i += 1)
++ {
++ /* Test if interrupt happened. */
++ if ((triggered & 1) == 1)
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ if (TriggeredCount != gcvNULL)
++ {
++ (* TriggeredCount) += 1;
++ }
++#endif
++
++ /* Make sure we have valid handler. */
++ if (Interrupt->handlers[i] == gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s: Interrupt %d isn't registered.\n",
++ __FUNCTION__, i
++ );
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: interrupt=%d\n",
++ __FUNCTION__,
++ i
++ );
++
++ /* Call the handler. */
++ status = Interrupt->handlers[i] (Interrupt->kernel);
++
++ if (gcmkIS_ERROR(status))
++ {
++ /* Failed to signal the semaphore. */
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s: Error %d incrementing the semaphore #%d.\n",
++ __FUNCTION__, status, i
++ );
++ }
++ }
++ }
++
++ /* Next interrupt. */
++ triggered >>= 1;
++
++ /* No more interrupts to handle? */
++ if (triggered == 0)
++ {
++ break;
++ }
++ }
++}
++
++
++/*******************************************************************************
++**
++** _MainInterruptHandler
++**
++** The main interrupt thread serves the interrupt FIFO and calls registered
++** handlers for the interrupts that occured. The handlers are called in the
++** sequence interrupts occured with the exception when multiple interrupts
++** occured at the same time. In that case the handler calls are "sorted" by
++** the interrupt number therefore giving the interrupts with lower numbers
++** higher priority.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++static gctTHREADFUNCRESULT gctTHREADFUNCTYPE
++_MainInterruptHandler(
++ gctTHREADFUNCPARAMETER ThreadParameter
++ )
++{
++ gceSTATUS status;
++ gckVGINTERRUPT interrupt;
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gctUINT count;
++#endif
++
++ /* Cast the object. */
++ interrupt = (gckVGINTERRUPT) ThreadParameter;
++
++ /* Enter the loop. */
++ while (gcvTRUE)
++ {
++ /* Wait for an interrupt. */
++ status = gckOS_DecrementSemaphore(interrupt->os, interrupt->fifoValid);
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* System termination request? */
++ if (status == gcvSTATUS_TERMINATE)
++ {
++ break;
++ }
++
++ /* Driver is shutting down? */
++ if (interrupt->terminate)
++ {
++ break;
++ }
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ /* Reset triggered count. */
++ count = 0;
++
++ /* Process the interrupt. */
++ _ProcessInterrupt(interrupt, &count);
++
++ /* Update conters. */
++ if (count > interrupt->maxSimultaneous)
++ {
++ interrupt->maxSimultaneous = count;
++ }
++
++ if (count > 1)
++ {
++ interrupt->multipleCount += 1;
++ }
++#else
++ /* Process the interrupt. */
++ _ProcessInterrupt(interrupt);
++#endif
++ }
++
++ return 0;
++}
++
++
++/*******************************************************************************
++**
++** _StartInterruptHandler / _StopInterruptHandler
++**
++** Main interrupt handler routine control.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++static gceSTATUS
++_StartInterruptHandler(
++ gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status, last;
++
++ do
++ {
++ /* Objects must not be already created. */
++ gcmkASSERT(Interrupt->fifoValid == gcvNULL);
++ gcmkASSERT(Interrupt->handler == gcvNULL);
++
++ /* Reset the termination request. */
++ Interrupt->terminate = gcvFALSE;
++
++#if !gcdENABLE_INFINITE_SPEED_HW
++ /* Construct the fifo semaphore. */
++ gcmkERR_BREAK(gckOS_CreateSemaphoreVG(
++ Interrupt->os, &Interrupt->fifoValid
++ ));
++
++ /* Start the interrupt handler thread. */
++ gcmkERR_BREAK(gckOS_StartThread(
++ Interrupt->os,
++ _MainInterruptHandler,
++ Interrupt,
++ &Interrupt->handler
++ ));
++#endif
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (Interrupt->fifoValid != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DestroySemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ Interrupt->fifoValid = gcvNULL;
++ }
++
++ /* Return the status. */
++ return status;
++}
++
++static gceSTATUS
++_StopInterruptHandler(
++ gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Does the thread exist? */
++ if (Interrupt->handler == gcvNULL)
++ {
++ /* The semaphore must be NULL as well. */
++ gcmkASSERT(Interrupt->fifoValid == gcvNULL);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* The semaphore must exist as well. */
++ gcmkASSERT(Interrupt->fifoValid != gcvNULL);
++
++ /* Set the termination request. */
++ Interrupt->terminate = gcvTRUE;
++
++ /* Unlock the thread. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ /* Wait until the thread quits. */
++ gcmkERR_BREAK(gckOS_StopThread(
++ Interrupt->os,
++ Interrupt->handler
++ ));
++
++ /* Destroy the semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ /* Reset handles. */
++ Interrupt->handler = gcvNULL;
++ Interrupt->fifoValid = gcvNULL;
++ }
++ while (gcvFALSE);
++
++ /* Return the status. */
++ return status;
++}
++
++
++/******************************************************************************\
++***************************** Interrupt Object API *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Construct
++**
++** Construct an interrupt object.
++**
++** INPUT:
++**
++** Kernel
++** Pointer to the gckVGKERNEL object.
++**
++** OUTPUT:
++**
++** Interrupt
++** Pointer to the new gckVGINTERRUPT object.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Construct(
++ IN gckVGKERNEL Kernel,
++ OUT gckVGINTERRUPT * Interrupt
++ )
++{
++ gceSTATUS status;
++ gckVGINTERRUPT interrupt = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x Interrupt=0x%x", Kernel, Interrupt);
++
++ /* Verify argeuments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interrupt != gcvNULL);
++
++ do
++ {
++ /* Allocate the gckVGINTERRUPT structure. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ gcmSIZEOF(struct _gckVGINTERRUPT),
++ (gctPOINTER *) &interrupt
++ ));
++
++ /* Reset the object data. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ interrupt, gcmSIZEOF(struct _gckVGINTERRUPT)
++ ));
++
++ /* Initialize the object. */
++ interrupt->object.type = gcvOBJ_INTERRUPT;
++
++ /* Initialize the object pointers. */
++ interrupt->kernel = Kernel;
++ interrupt->os = Kernel->os;
++
++ /* Initialize the current FIFO position. */
++ interrupt->head = (gctUINT8)~0;
++ interrupt->tail = (gctUINT8)~0;
++
++ /* Start the thread. */
++ gcmkERR_BREAK(_StartInterruptHandler(interrupt));
++
++ /* Return interrupt object. */
++ *Interrupt = interrupt;
++
++ gcmkFOOTER_ARG("*Interrup=0x%x", *Interrupt);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (interrupt != gcvNULL)
++ {
++ /* Free the gckVGINTERRUPT structure. */
++ gcmkVERIFY_OK(gckOS_Free(interrupt->os, interrupt));
++ }
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Destroy
++**
++** Destroy an interrupt object.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to the gckVGINTERRUPT object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Destroy(
++ IN gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++ do
++ {
++ /* Stop the interrupt thread. */
++ gcmkERR_BREAK(_StopInterruptHandler(Interrupt));
++
++ /* Mark the object as unknown. */
++ Interrupt->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGINTERRUPT structure. */
++ gcmkERR_BREAK(gckOS_Free(Interrupt->os, Interrupt));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_DumpState
++**
++** Print the current state of the interrupt manager.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#if gcvDEBUG
++gceSTATUS
++gckVGINTERRUPT_DumpState(
++ IN gckVGINTERRUPT Interrupt
++ )
++{
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++ /* Print the header. */
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: INTERRUPT OBJECT STATUS\n",
++ __FUNCTION__
++ );
++
++ /* Print statistics. */
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Maximum number of FIFO items accumulated at a single time: %d\n",
++ Interrupt->maxFifoItems
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Interrupt FIFO overflow happened times: %d\n",
++ Interrupt->fifoOverflow
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Maximum number of interrupts simultaneously generated: %d\n",
++ Interrupt->maxSimultaneous
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Number of times when there were multiple interrupts generated: %d\n",
++ Interrupt->multipleCount
++ );
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " The current number of entries in the FIFO: %d\n",
++ Interrupt->fifoItems
++ );
++
++ /* Print the FIFO contents. */
++ if (Interrupt->fifoItems != 0)
++ {
++ gctUINT8 index;
++ gctUINT8 last;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " FIFO current contents:\n"
++ );
++
++ /* Get the current pointers. */
++ index = Interrupt->tail;
++ last = Interrupt->head;
++
++ while (index != last)
++ {
++ /* Advance to the next entry. */
++ index += 1;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " %d: 0x%08X\n",
++ index, Interrupt->fifo[index]
++ );
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Enable
++**
++** Enable the specified interrupt.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** Id
++** Pointer to the variable that holds the interrupt number to be
++** registered in range 0..31.
++** If the value is less then 0, gckVGINTERRUPT_Enable will attempt
++** to find an unused interrupt. If such interrupt is found, the number
++** will be assigned to the variable if the functuion call succeedes.
++**
++** Handler
++** Pointer to the handler to register for the interrupt.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Enable(
++ IN gckVGINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ )
++{
++ gceSTATUS status;
++ gctINT32 i;
++
++ gcmkHEADER_ARG("Interrupt=0x%x Id=0x%x Handler=0x%x", Interrupt, Id, Handler);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++ gcmkVERIFY_ARGUMENT(Id != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Handler != gcvNULL);
++
++ do
++ {
++ /* See if we need to allocate an ID. */
++ if (*Id < 0)
++ {
++ /* Find the first unused interrupt handler. */
++ for (i = 0; i < gcmCOUNTOF(Interrupt->handlers); ++i)
++ {
++ if (Interrupt->handlers[i] == gcvNULL)
++ {
++ break;
++ }
++ }
++
++ /* No unused innterrupts? */
++ if (i == gcmCOUNTOF(Interrupt->handlers))
++ {
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ break;
++ }
++
++ /* Update the interrupt ID. */
++ *Id = i;
++ }
++
++ /* Make sure the ID is in range. */
++ else if (*Id >= gcmCOUNTOF(Interrupt->handlers))
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ /* Set interrupt handler. */
++ Interrupt->handlers[*Id] = Handler;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Disable
++**
++** Disable the specified interrupt.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** Id
++** Interrupt number to be disabled in range 0..31.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Disable(
++ IN gckVGINTERRUPT Interrupt,
++ IN gctINT32 Id
++ )
++{
++ gcmkHEADER_ARG("Interrupt=0x%x Id=0x%x", Interrupt, Id);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++ gcmkVERIFY_ARGUMENT((Id >= 0) && (Id < gcmCOUNTOF(Interrupt->handlers)));
++
++ /* Reset interrupt handler. */
++ Interrupt->handlers[Id] = gcvNULL;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Enque
++**
++** Read the interrupt status register and put the value in the interrupt FIFO.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#ifndef __QNXNTO__
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt
++ )
++#else
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt,
++ OUT gckOS *Os,
++ OUT gctSEMAPHORE *Semaphore
++ )
++#endif
++{
++ gceSTATUS status;
++ gctUINT32 triggered;
++
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++#ifdef __QNXNTO__
++ *Os = gcvNULL;
++ *Semaphore = gcvNULL;
++#endif
++
++ do
++ {
++ /* Read interrupt status register. */
++ gcmkERR_BREAK(gckVGHARDWARE_ReadInterrupt(
++ Interrupt->kernel->hardware, &triggered
++ ));
++
++ /* Mask out TS overflow interrupt */
++ triggered &= 0xfffffffe;
++
++ /* No interrupts to process? */
++ if (triggered == 0)
++ {
++ status = gcvSTATUS_NOT_OUR_INTERRUPT;
++ break;
++ }
++
++ /* FIFO overflow? */
++ if (Interrupt->fifoItems == gcmCOUNTOF(Interrupt->fifo))
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ Interrupt->fifoOverflow += 1;
++#endif
++
++ /* OR the interrupt with the last value in the FIFO. */
++ Interrupt->fifo[Interrupt->head] |= triggered;
++
++ /* Success (kind of). */
++ status = gcvSTATUS_OK;
++ }
++ else
++ {
++ /* Advance to the next entry. */
++ Interrupt->head += 1;
++ Interrupt->fifoItems += 1;
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ if (Interrupt->fifoItems > Interrupt->maxFifoItems)
++ {
++ Interrupt->maxFifoItems = Interrupt->fifoItems;
++ }
++#endif
++
++ /* Set the new value. */
++ Interrupt->fifo[Interrupt->head] = triggered;
++
++#ifndef __QNXNTO__
++ /* Increment the FIFO semaphore. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++#else
++ *Os = Interrupt->os;
++ *Semaphore = Interrupt->fifoValid;
++#endif
++
++ /* Windows kills our threads prematurely when the application
++ exists. Verify here that the thread is still alive. */
++ status = gckOS_VerifyThread(Interrupt->os, Interrupt->handler);
++
++ /* Has the thread been prematurely terminated? */
++ if (status != gcvSTATUS_OK)
++ {
++ /* Process all accumulated interrupts. */
++ while (Interrupt->head != Interrupt->tail)
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ /* Process the interrupt. */
++ _ProcessInterrupt(Interrupt, gcvNULL);
++#else
++ /* Process the interrupt. */
++ _ProcessInterrupt(Interrupt);
++#endif
++ }
++
++ /* Set success. */
++ status = gcvSTATUS_OK;
++ }
++ }
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu.c 2015-05-01 14:57:59.531427001 -0500
+@@ -0,0 +1,1982 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_MMU
++
++typedef enum _gceMMU_TYPE
++{
++ gcvMMU_USED = (0 << 4),
++ gcvMMU_SINGLE = (1 << 4),
++ gcvMMU_FREE = (2 << 4),
++}
++gceMMU_TYPE;
++
++#define gcmENTRY_TYPE(x) (x & 0xF0)
++
++#define gcdMMU_TABLE_DUMP 0
++
++#define gcdUSE_MMU_EXCEPTION 0
++
++/*
++ gcdMMU_CLEAR_VALUE
++
++ The clear value for the entry of the old MMU.
++*/
++#ifndef gcdMMU_CLEAR_VALUE
++# define gcdMMU_CLEAR_VALUE 0x00000ABC
++#endif
++
++/* VIV: Start GPU address for gcvSURF_VERTEX. */
++#define gcdVERTEX_START (128 << 10)
++
++typedef struct _gcsMMU_STLB *gcsMMU_STLB_PTR;
++
++typedef struct _gcsMMU_STLB
++{
++ gctPHYS_ADDR physical;
++ gctUINT32_PTR logical;
++ gctSIZE_T size;
++ gctUINT32 physBase;
++ gctSIZE_T pageCount;
++ gctUINT32 mtlbIndex;
++ gctUINT32 mtlbEntryNum;
++ gcsMMU_STLB_PTR next;
++} gcsMMU_STLB;
++
++#if gcdSHARED_PAGETABLE
++typedef struct _gcsSharedPageTable * gcsSharedPageTable_PTR;
++typedef struct _gcsSharedPageTable
++{
++ /* Shared gckMMU object. */
++ gckMMU mmu;
++
++ /* Hardwares which use this shared pagetable. */
++ gckHARDWARE hardwares[gcdMAX_GPU_COUNT];
++
++ /* Number of cores use this shared pagetable. */
++ gctUINT32 reference;
++}
++gcsSharedPageTable;
++
++static gcsSharedPageTable_PTR sharedPageTable = gcvNULL;
++#endif
++
++#if gcdMIRROR_PAGETABLE
++typedef struct _gcsMirrorPageTable * gcsMirrorPageTable_PTR;
++typedef struct _gcsMirrorPageTable
++{
++ /* gckMMU objects. */
++ gckMMU mmus[gcdMAX_GPU_COUNT];
++
++ /* Hardwares which use this shared pagetable. */
++ gckHARDWARE hardwares[gcdMAX_GPU_COUNT];
++
++ /* Number of cores use this shared pagetable. */
++ gctUINT32 reference;
++}
++gcsMirrorPageTable;
++
++static gcsMirrorPageTable_PTR mirrorPageTable = gcvNULL;
++static gctPOINTER mirrorPageTableMutex = gcvNULL;
++#endif
++
++typedef struct _gcsDynamicSpaceNode * gcsDynamicSpaceNode_PTR;
++typedef struct _gcsDynamicSpaceNode
++{
++ gctUINT32 start;
++ gctINT32 entries;
++}
++gcsDynamicSpaceNode;
++
++static void
++_WritePageEntry(
++ IN gctUINT32_PTR PageEntry,
++ IN gctUINT32 EntryValue
++ )
++{
++ static gctUINT16 data = 0xff00;
++
++ if (*(gctUINT8 *)&data == 0xff)
++ {
++ *PageEntry = gcmSWAB32(EntryValue);
++ }
++ else
++ {
++ *PageEntry = EntryValue;
++ }
++}
++
++static gctUINT32
++_ReadPageEntry(
++ IN gctUINT32_PTR PageEntry
++ )
++{
++ static gctUINT16 data = 0xff00;
++ gctUINT32 entryValue;
++
++ if (*(gctUINT8 *)&data == 0xff)
++ {
++ entryValue = *PageEntry;
++ return gcmSWAB32(entryValue);
++ }
++ else
++ {
++ return *PageEntry;
++ }
++}
++
++static gceSTATUS
++_FillPageTable(
++ IN gctUINT32_PTR PageTable,
++ IN gctUINT32 PageCount,
++ IN gctUINT32 EntryValue
++)
++{
++ gctUINT i;
++
++ for (i = 0; i < PageCount; i++)
++ {
++ _WritePageEntry(PageTable + i, EntryValue);
++ }
++
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_Link(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 Next
++ )
++{
++ if (Index >= Mmu->pageTableEntries)
++ {
++ /* Just move heap pointer. */
++ Mmu->heapList = Next;
++ }
++ else
++ {
++ /* Address page table. */
++ gctUINT32_PTR pageTable = Mmu->pageTableLogical;
++
++ /* Dispatch on node type. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[Index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Set single index. */
++ _WritePageEntry(&pageTable[Index], (Next << 8) | gcvMMU_SINGLE);
++ break;
++
++ case gcvMMU_FREE:
++ /* Set index. */
++ _WritePageEntry(&pageTable[Index + 1], Next);
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", Index);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_AddFree(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 Node,
++ IN gctUINT32 Count
++ )
++{
++ gctUINT32_PTR pageTable = Mmu->pageTableLogical;
++
++ if (Count == 1)
++ {
++ /* Initialize a single page node. */
++ _WritePageEntry(pageTable + Node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
++ }
++ else
++ {
++ /* Initialize the node. */
++ _WritePageEntry(pageTable + Node + 0, (Count << 8) | gcvMMU_FREE);
++ _WritePageEntry(pageTable + Node + 1, ~0U);
++ }
++
++ /* Append the node. */
++ return _Link(Mmu, Index, Node);
++}
++
++static gceSTATUS
++_Collect(
++ IN gckMMU Mmu
++ )
++{
++ gctUINT32_PTR pageTable = Mmu->pageTableLogical;
++ gceSTATUS status;
++ gctUINT32 i, previous, start = 0, count = 0;
++
++ previous = Mmu->heapList = ~0U;
++ Mmu->freeNodes = gcvFALSE;
++
++ /* Walk the entire page table. */
++ for (i = 0; i < Mmu->pageTableEntries; ++i)
++ {
++ /* Dispatch based on type of page. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[i])))
++ {
++ case gcvMMU_USED:
++ /* Used page, so close any open node. */
++ if (count > 0)
++ {
++ /* Add the node. */
++ gcmkONERROR(_AddFree(Mmu, previous, start, count));
++
++ /* Reset the node. */
++ previous = start;
++ count = 0;
++ }
++ break;
++
++ case gcvMMU_SINGLE:
++ /* Single free node. */
++ if (count++ == 0)
++ {
++ /* Start a new node. */
++ start = i;
++ }
++ break;
++
++ case gcvMMU_FREE:
++ /* A free node. */
++ if (count == 0)
++ {
++ /* Start a new node. */
++ start = i;
++ }
++
++ /* Advance the count. */
++ count += _ReadPageEntry(&pageTable[i]) >> 8;
++
++ /* Advance the index into the page table. */
++ i += (_ReadPageEntry(&pageTable[i]) >> 8) - 1;
++ break;
++
++ default:
++ gcmkFATAL("MMU page table correcupted at index %u!", i);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++ }
++
++ /* See if we have an open node left. */
++ if (count > 0)
++ {
++ /* Add the node to the list. */
++ gcmkONERROR(_AddFree(Mmu, previous, start, count));
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_MMU,
++ "Performed a garbage collection of the MMU heap.");
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the staus. */
++ return status;
++}
++
++static gctUINT32
++_SetPage(gctUINT32 PageAddress)
++{
++ return PageAddress
++ /* writable */
++ | (1 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0);
++}
++
++static gceSTATUS
++_FillFlatMapping(
++ IN gckMMU Mmu,
++ IN gctUINT32 PhysBase,
++ OUT gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gcsMMU_STLB_PTR head = gcvNULL, pre = gcvNULL;
++ gctUINT32 start = PhysBase & (~gcdMMU_PAGE_64K_MASK);
++ gctUINT32 end = (PhysBase + Size - 1) & (~gcdMMU_PAGE_64K_MASK);
++ gctUINT32 mStart = start >> gcdMMU_MTLB_SHIFT;
++ gctUINT32 mEnd = end >> gcdMMU_MTLB_SHIFT;
++ gctUINT32 sStart = (start & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
++ gctUINT32 sEnd = (end & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ while (mStart <= mEnd)
++ {
++ gcmkASSERT(mStart < gcdMMU_MTLB_ENTRY_NUM);
++ if (*(Mmu->mtlbLogical + mStart) == 0)
++ {
++ gcsMMU_STLB_PTR stlb;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 last = (mStart == mEnd) ? sEnd : (gcdMMU_STLB_64K_ENTRY_NUM - 1);
++
++ gcmkONERROR(gckOS_Allocate(Mmu->os, sizeof(struct _gcsMMU_STLB), &pointer));
++ stlb = pointer;
++
++ stlb->mtlbEntryNum = 0;
++ stlb->next = gcvNULL;
++ stlb->physical = gcvNULL;
++ stlb->logical = gcvNULL;
++ stlb->size = gcdMMU_STLB_64K_SIZE;
++ stlb->pageCount = 0;
++
++ if (pre == gcvNULL)
++ {
++ pre = head = stlb;
++ }
++ else
++ {
++ gcmkASSERT(pre->next == gcvNULL);
++ pre->next = stlb;
++ pre = stlb;
++ }
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(Mmu->os,
++ gcvFALSE,
++ &stlb->size,
++ &stlb->physical,
++ (gctPOINTER)&stlb->logical));
++
++ gcmkONERROR(gckOS_ZeroMemory(stlb->logical, stlb->size));
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Mmu->os,
++ stlb->logical,
++ &stlb->physBase));
++
++ if (stlb->physBase & (gcdMMU_STLB_64K_SIZE - 1))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ _WritePageEntry(Mmu->mtlbLogical + mStart,
++ stlb->physBase
++ /* 64KB page size */
++ | (1 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0)
++ );
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ mStart,
++ _ReadPageEntry(Mmu->mtlbLogical + mStart));
++#endif
++
++ stlb->mtlbIndex = mStart;
++ stlb->mtlbEntryNum = 1;
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): STLB: logical:%08x -> physical:%08x\n",
++ __FUNCTION__, __LINE__,
++ stlb->logical,
++ stlb->physBase);
++#endif
++
++ while (sStart <= last)
++ {
++ gcmkASSERT(!(start & gcdMMU_PAGE_64K_MASK));
++ _WritePageEntry(stlb->logical + sStart, _SetPage(start));
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert STLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ sStart,
++ _ReadPageEntry(stlb->logical + sStart));
++#endif
++ /* next page. */
++ start += gcdMMU_PAGE_64K_SIZE;
++ sStart++;
++ stlb->pageCount++;
++ }
++
++ sStart = 0;
++ ++mStart;
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ }
++
++ /* Insert the stlb into staticSTLB. */
++ if (Mmu->staticSTLB == gcvNULL)
++ {
++ Mmu->staticSTLB = head;
++ }
++ else
++ {
++ gcmkASSERT(pre == gcvNULL);
++ gcmkASSERT(pre->next == gcvNULL);
++ pre->next = Mmu->staticSTLB;
++ Mmu->staticSTLB = head;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Roll back. */
++ while (head != gcvNULL)
++ {
++ pre = head;
++ head = head->next;
++
++ if (pre->physical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ pre->physical,
++ pre->logical,
++ pre->size));
++ }
++
++ if (pre->mtlbEntryNum != 0)
++ {
++ gcmkASSERT(pre->mtlbEntryNum == 1);
++ _WritePageEntry(Mmu->mtlbLogical + pre->mtlbIndex, 0);
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, pre));
++ }
++
++ if (mutex)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ return status;
++}
++
++static gceSTATUS
++_FindDynamicSpace(
++ IN gckMMU Mmu,
++ OUT gcsDynamicSpaceNode_PTR *Array,
++ OUT gctINT * Size
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctPOINTER pointer = gcvNULL;
++ gcsDynamicSpaceNode_PTR array = gcvNULL;
++ gctINT size = 0;
++ gctINT i = 0, nodeStart = -1, nodeEntries = 0;
++
++ /* Allocate memory for the array. */
++ gcmkONERROR(gckOS_Allocate(Mmu->os,
++ gcmSIZEOF(*array) * (gcdMMU_MTLB_ENTRY_NUM / 2),
++ &pointer));
++
++ array = (gcsDynamicSpaceNode_PTR)pointer;
++
++ /* Loop all the entries. */
++ while (i < gcdMMU_MTLB_ENTRY_NUM)
++ {
++ if (!Mmu->mtlbLogical[i])
++ {
++ if (nodeStart < 0)
++ {
++ /* This is the first entry of the dynamic space. */
++ nodeStart = i;
++ nodeEntries = 1;
++ }
++ else
++ {
++ /* Other entries of the dynamic space. */
++ nodeEntries++;
++ }
++ }
++ else if (nodeStart >= 0)
++ {
++ /* Save the previous node. */
++ array[size].start = nodeStart;
++ array[size].entries = nodeEntries;
++ size++;
++
++ /* Reset the start. */
++ nodeStart = -1;
++ nodeEntries = 0;
++ }
++
++ i++;
++ }
++
++ /* Save the previous node. */
++ if (nodeStart >= 0)
++ {
++ array[size].start = nodeStart;
++ array[size].entries = nodeEntries;
++ size++;
++ }
++
++#if gcdMMU_TABLE_DUMP
++ for (i = 0; i < size; i++)
++ {
++ gckOS_Print("%s(%d): [%d]: start=%d, entries=%d.\n",
++ __FUNCTION__, __LINE__,
++ i,
++ array[i].start,
++ array[i].entries);
++ }
++#endif
++
++ *Array = array;
++ *Size = size;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (pointer != gcvNULL)
++ {
++ gckOS_Free(Mmu->os, pointer);
++ }
++
++ return status;
++}
++
++static gceSTATUS
++_SetupDynamicSpace(
++ IN gckMMU Mmu
++ )
++{
++ gceSTATUS status;
++ gcsDynamicSpaceNode_PTR nodeArray = gcvNULL;
++ gctINT i, nodeArraySize = 0;
++ gctUINT32 physical;
++ gctINT numEntries = 0;
++ gctUINT32_PTR pageTable;
++ gctBOOL acquired = gcvFALSE;
++
++ /* Find all the dynamic address space. */
++ gcmkONERROR(_FindDynamicSpace(Mmu, &nodeArray, &nodeArraySize));
++
++ /* TODO: We only use the largest one for now. */
++ for (i = 0; i < nodeArraySize; i++)
++ {
++ if (nodeArray[i].entries > numEntries)
++ {
++ Mmu->dynamicMappingStart = nodeArray[i].start;
++ numEntries = nodeArray[i].entries;
++ }
++ }
++
++ gckOS_Free(Mmu->os, (gctPOINTER)nodeArray);
++
++ Mmu->pageTableSize = numEntries * 4096;
++
++ Mmu->pageTableEntries = Mmu->pageTableSize / gcmSIZEOF(gctUINT32);
++
++ /* Construct Slave TLB. */
++ gcmkONERROR(gckOS_AllocateContiguous(Mmu->os,
++ gcvFALSE,
++ &Mmu->pageTableSize,
++ &Mmu->pageTablePhysical,
++ (gctPOINTER)&Mmu->pageTableLogical));
++
++#if gcdUSE_MMU_EXCEPTION
++ gcmkONERROR(_FillPageTable(Mmu->pageTableLogical,
++ Mmu->pageTableEntries,
++ /* Enable exception */
++ 1 << 1));
++#else
++ /* Invalidate all entries. */
++ gcmkONERROR(gckOS_ZeroMemory(Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++#endif
++
++ /* Initilization. */
++ pageTable = Mmu->pageTableLogical;
++ _WritePageEntry(pageTable, (Mmu->pageTableEntries << 8) | gcvMMU_FREE);
++ _WritePageEntry(pageTable + 1, ~0U);
++ Mmu->heapList = 0;
++ Mmu->freeNodes = gcvFALSE;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(Mmu->os,
++ Mmu->pageTableLogical,
++ &physical));
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Map to Master TLB. */
++ for (i = (gctINT)Mmu->dynamicMappingStart;
++ i < (gctINT)Mmu->dynamicMappingStart + numEntries;
++ i++)
++ {
++ _WritePageEntry(Mmu->mtlbLogical + i,
++ physical
++ /* 4KB page size */
++ | (0 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0)
++ );
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ i,
++ _ReadPageEntry(Mmu->mtlbLogical + i));
++#endif
++ physical += gcdMMU_STLB_4K_SIZE;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (Mmu->pageTableLogical)
++ {
++ /* Free the page table. */
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ (gctPOINTER) Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ return status;
++}
++
++/*******************************************************************************
++**
++** _Construct
++**
++** Construct a new gckMMU object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSIZE_T MmuSize
++** Number of bytes for the page table.
++**
++** OUTPUT:
++**
++** gckMMU * Mmu
++** Pointer to a variable that receives the gckMMU object pointer.
++*/
++gceSTATUS
++_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ )
++{
++ gckOS os;
++ gckHARDWARE hardware;
++ gceSTATUS status;
++ gckMMU mmu = gcvNULL;
++ gctUINT32_PTR pageTable;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x MmuSize=%lu", Kernel, MmuSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(MmuSize > 0);
++ gcmkVERIFY_ARGUMENT(Mmu != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Allocate memory for the gckMMU object. */
++ gcmkONERROR(gckOS_Allocate(os, sizeof(struct _gckMMU), &pointer));
++
++ mmu = pointer;
++
++ /* Initialize the gckMMU object. */
++ mmu->object.type = gcvOBJ_MMU;
++ mmu->os = os;
++ mmu->hardware = hardware;
++ mmu->pageTableMutex = gcvNULL;
++ mmu->pageTableLogical = gcvNULL;
++ mmu->mtlbLogical = gcvNULL;
++ mmu->staticSTLB = gcvNULL;
++ mmu->enabled = gcvFALSE;
++#ifdef __QNXNTO__
++ mmu->nodeList = gcvNULL;
++ mmu->nodeMutex = gcvNULL;
++#endif
++
++ /* Create the page table mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &mmu->pageTableMutex));
++
++#ifdef __QNXNTO__
++ /* Create the node list mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &mmu->nodeMutex));
++#endif
++
++ if (hardware->mmuVersion == 0)
++ {
++ mmu->pageTableSize = MmuSize;
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->pageTableSize,
++ &mmu->pageTablePhysical,
++ &pointer));
++
++ mmu->pageTableLogical = pointer;
++
++ /* Compute number of entries in page table. */
++ mmu->pageTableEntries = mmu->pageTableSize / sizeof(gctUINT32);
++
++ /* Mark all pages as free. */
++ pageTable = mmu->pageTableLogical;
++
++#if gcdMMU_CLEAR_VALUE
++ _FillPageTable(pageTable, mmu->pageTableEntries, gcdMMU_CLEAR_VALUE);
++#endif
++
++ _WritePageEntry(pageTable, (mmu->pageTableEntries << 8) | gcvMMU_FREE);
++ _WritePageEntry(pageTable + 1, ~0U);
++ mmu->heapList = 0;
++ mmu->freeNodes = gcvFALSE;
++
++ /* Set page table address. */
++ gcmkONERROR(
++ gckHARDWARE_SetMMU(hardware, (gctPOINTER) mmu->pageTableLogical));
++ }
++ else
++ {
++ /* Allocate the 4K mode MTLB table. */
++ mmu->mtlbSize = gcdMMU_MTLB_SIZE + 64;
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->mtlbSize,
++ &mmu->mtlbPhysical,
++ &pointer));
++
++ mmu->mtlbLogical = pointer;
++
++ /* Invalid all the entries. */
++ gcmkONERROR(
++ gckOS_ZeroMemory(pointer, mmu->mtlbSize));
++ }
++
++ /* Return the gckMMU object pointer. */
++ *Mmu = mmu;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Mmu=0x%x", *Mmu);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (mmu != gcvNULL)
++ {
++ if (mmu->pageTableLogical != gcvNULL)
++ {
++ /* Free the page table. */
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(os,
++ mmu->pageTablePhysical,
++ (gctPOINTER) mmu->pageTableLogical,
++ mmu->pageTableSize));
++
++ }
++
++ if (mmu->mtlbLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(os,
++ mmu->mtlbPhysical,
++ (gctPOINTER) mmu->mtlbLogical,
++ mmu->mtlbSize));
++ }
++
++ if (mmu->pageTableMutex != gcvNULL)
++ {
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, mmu->pageTableMutex));
++ }
++
++#ifdef __QNXNTO__
++ if (mmu->nodeMutex != gcvNULL)
++ {
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, mmu->nodeMutex));
++ }
++#endif
++
++ /* Mark the gckMMU object as unknown. */
++ mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the allocates memory. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, mmu));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** _Destroy
++**
++** Destroy a gckMMU object.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++_Destroy(
++ IN gckMMU Mmu
++ )
++{
++#ifdef __QNXNTO__
++ gcuVIDMEM_NODE_PTR node, next;
++#endif
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++#ifdef __QNXNTO__
++ /* Free all associated virtual memory. */
++ for (node = Mmu->nodeList; node != gcvNULL; node = next)
++ {
++ next = node->Virtual.next;
++ gcmkVERIFY_OK(gckVIDMEM_Free(node));
++ }
++#endif
++
++ while (Mmu->staticSTLB != gcvNULL)
++ {
++ gcsMMU_STLB_PTR pre = Mmu->staticSTLB;
++ Mmu->staticSTLB = pre->next;
++
++ if (pre->physical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ pre->physical,
++ pre->logical,
++ pre->size));
++ }
++
++ if (pre->mtlbEntryNum != 0)
++ {
++ gcmkASSERT(pre->mtlbEntryNum == 1);
++ _WritePageEntry(Mmu->mtlbLogical + pre->mtlbIndex, 0);
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): clean MTLB[%d]\n",
++ __FUNCTION__, __LINE__,
++ pre->mtlbIndex);
++#endif
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, pre));
++ }
++
++ if (Mmu->hardware->mmuVersion != 0)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->mtlbPhysical,
++ (gctPOINTER) Mmu->mtlbLogical,
++ Mmu->mtlbSize));
++ }
++
++ /* Free the page table. */
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ (gctPOINTER) Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++
++#ifdef __QNXNTO__
++ /* Delete the node list mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->nodeMutex));
++#endif
++
++ /* Delete the page table mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->pageTableMutex));
++
++ /* Mark the gckMMU object as unknown. */
++ Mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckMMU object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, Mmu));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** _AdjstIndex
++**
++** Adjust the index from which we search for a usable node to make sure
++** index allocated is greater than Start.
++*/
++gceSTATUS
++_AdjustIndex(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 PageCount,
++ IN gctUINT32 Start,
++ OUT gctUINT32 * IndexAdjusted
++ )
++{
++ gceSTATUS status;
++ gctUINT32 index = Index;
++ gctUINT32_PTR map = Mmu->pageTableLogical;
++
++ gcmkHEADER();
++
++ for (; index < Mmu->pageTableEntries;)
++ {
++ gctUINT32 result = 0;
++ gctUINT32 nodeSize = 0;
++
++ if (index >= Start)
++ {
++ break;
++ }
++
++ switch (gcmENTRY_TYPE(map[index]))
++ {
++ case gcvMMU_SINGLE:
++ nodeSize = 1;
++ break;
++
++ case gcvMMU_FREE:
++ nodeSize = map[index] >> 8;
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ if (nodeSize > PageCount)
++ {
++ result = index + (nodeSize - PageCount);
++
++ if (result >= Start)
++ {
++ break;
++ }
++ }
++
++ switch (gcmENTRY_TYPE(map[index]))
++ {
++ case gcvMMU_SINGLE:
++ index = map[index] >> 8;
++ break;
++
++ case gcvMMU_FREE:
++ index = map[index + 1];
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ *IndexAdjusted = index;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ )
++{
++#if gcdSHARED_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG("Kernel=0x%08x", Kernel);
++
++ if (sharedPageTable == gcvNULL)
++ {
++ gcmkONERROR(
++ gckOS_Allocate(Kernel->os,
++ sizeof(struct _gcsSharedPageTable),
++ &pointer));
++ sharedPageTable = pointer;
++
++ gcmkONERROR(
++ gckOS_ZeroMemory(sharedPageTable,
++ sizeof(struct _gcsSharedPageTable)));
++
++ gcmkONERROR(_Construct(Kernel, MmuSize, &sharedPageTable->mmu));
++ }
++ else if (Kernel->hardware->mmuVersion == 0)
++ {
++ /* Set page table address. */
++ gcmkONERROR(
++ gckHARDWARE_SetMMU(Kernel->hardware, (gctPOINTER) sharedPageTable->mmu->pageTableLogical));
++ }
++
++ *Mmu = sharedPageTable->mmu;
++
++ sharedPageTable->hardwares[sharedPageTable->reference] = Kernel->hardware;
++
++ sharedPageTable->reference++;
++
++ gcmkFOOTER_ARG("sharedPageTable->reference=%lu", sharedPageTable->reference);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (sharedPageTable)
++ {
++ if (sharedPageTable->mmu)
++ {
++ gcmkVERIFY_OK(gckMMU_Destroy(sharedPageTable->mmu));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, sharedPageTable));
++ }
++
++ gcmkFOOTER();
++ return status;
++#elif gcdMIRROR_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG("Kernel=0x%08x", Kernel);
++
++ if (mirrorPageTable == gcvNULL)
++ {
++ gcmkONERROR(
++ gckOS_Allocate(Kernel->os,
++ sizeof(struct _gcsMirrorPageTable),
++ &pointer));
++ mirrorPageTable = pointer;
++
++ gcmkONERROR(
++ gckOS_ZeroMemory(mirrorPageTable,
++ sizeof(struct _gcsMirrorPageTable)));
++
++ gcmkONERROR(
++ gckOS_CreateMutex(Kernel->os, &mirrorPageTableMutex));
++ }
++
++ gcmkONERROR(_Construct(Kernel, MmuSize, Mmu));
++
++ mirrorPageTable->mmus[mirrorPageTable->reference] = *Mmu;
++
++ mirrorPageTable->hardwares[mirrorPageTable->reference] = Kernel->hardware;
++
++ mirrorPageTable->reference++;
++
++ gcmkFOOTER_ARG("mirrorPageTable->reference=%lu", mirrorPageTable->reference);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mirrorPageTable && mirrorPageTable->reference == 0)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, mirrorPageTable));
++ }
++
++ gcmkFOOTER();
++ return status;
++#else
++ return _Construct(Kernel, MmuSize, Mmu);
++#endif
++}
++
++gceSTATUS
++gckMMU_Destroy(
++ IN gckMMU Mmu
++ )
++{
++#if gcdSHARED_PAGETABLE
++ sharedPageTable->reference--;
++
++ if (sharedPageTable->reference == 0)
++ {
++ if (sharedPageTable->mmu)
++ {
++ gcmkVERIFY_OK(_Destroy(Mmu));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, sharedPageTable));
++ }
++
++ return gcvSTATUS_OK;
++#elif gcdMIRROR_PAGETABLE
++ mirrorPageTable->reference--;
++
++ if (mirrorPageTable->reference == 0)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, mirrorPageTable));
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, mirrorPageTableMutex));
++ }
++
++ return _Destroy(Mmu);
++#else
++ return _Destroy(Mmu);
++#endif
++}
++
++/*******************************************************************************
++**
++** gckMMU_AllocatePages
++**
++** Allocate pages inside the page table.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** gctSIZE_T PageCount
++** Number of pages to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * PageTable
++** Pointer to a variable that receives the base address of the page
++** table.
++**
++** gctUINT32 * Address
++** Pointer to a variable that receives the hardware specific address.
++*/
++gceSTATUS
++_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gctUINT32 index = 0, previous = ~0U, left;
++ gctUINT32_PTR pageTable;
++ gctBOOL gotIt;
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageCount=%lu", Mmu, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++
++ if (PageCount > Mmu->pageTableEntries)
++ {
++ gcmkPRINT("[galcore]: %s(%d): Run out of free page entry.",
++ __FUNCTION__, __LINE__);
++
++ /* Not enough pages avaiable. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ /* Cast pointer to page table. */
++ for (pageTable = Mmu->pageTableLogical, gotIt = gcvFALSE; !gotIt;)
++ {
++ index = Mmu->heapList;
++
++ if ((Mmu->hardware->mmuVersion == 0) && (Type == gcvSURF_VERTEX))
++ {
++ gcmkONERROR(_AdjustIndex(
++ Mmu,
++ index,
++ PageCount,
++ gcdVERTEX_START / gcmSIZEOF(gctUINT32),
++ &index
++ ));
++ }
++
++ /* Walk the heap list. */
++ for (; !gotIt && (index < Mmu->pageTableEntries);)
++ {
++ /* Check the node type. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Single odes are valid if we only need 1 page. */
++ if (PageCount == 1)
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ /* Move to next node. */
++ previous = index;
++ index = _ReadPageEntry(&pageTable[index]) >> 8;
++ }
++ break;
++
++ case gcvMMU_FREE:
++ /* Test if the node has enough space. */
++ if (PageCount <= (_ReadPageEntry(&pageTable[index]) >> 8))
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ /* Move to next node. */
++ previous = index;
++ index = _ReadPageEntry(&pageTable[index + 1]);
++ }
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ /* Test if we are out of memory. */
++ if (index >= Mmu->pageTableEntries)
++ {
++ if (Mmu->freeNodes)
++ {
++ /* Time to move out the trash! */
++ gcmkONERROR(_Collect(Mmu));
++ }
++ else
++ {
++ gcmkPRINT("[galcore]: %s(%d): Run out of free page entry.",
++ __FUNCTION__, __LINE__);
++
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++ }
++
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Unlink single node from free list. */
++ gcmkONERROR(
++ _Link(Mmu, previous, _ReadPageEntry(&pageTable[index]) >> 8));
++ break;
++
++ case gcvMMU_FREE:
++ /* Check how many pages will be left. */
++ left = (_ReadPageEntry(&pageTable[index]) >> 8) - PageCount;
++ switch (left)
++ {
++ case 0:
++ /* The entire node is consumed, just unlink it. */
++ gcmkONERROR(
++ _Link(Mmu, previous, _ReadPageEntry(&pageTable[index + 1])));
++ break;
++
++ case 1:
++ /* One page will remain. Convert the node to a single node and
++ ** advance the index. */
++ _WritePageEntry(&pageTable[index], (_ReadPageEntry(&pageTable[index + 1]) << 8) | gcvMMU_SINGLE);
++ index ++;
++ break;
++
++ default:
++ /* Enough pages remain for a new node. However, we will just adjust
++ ** the size of the current node and advance the index. */
++ _WritePageEntry(&pageTable[index], (left << 8) | gcvMMU_FREE);
++ index += left;
++ break;
++ }
++ break;
++ }
++
++ /* Mark node as used. */
++ gcmkONERROR(_FillPageTable(&pageTable[index], PageCount, gcvMMU_USED));
++
++ /* Return pointer to page table. */
++ *PageTable = &pageTable[index];
++
++ /* Build virtual address. */
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(
++ gckHARDWARE_BuildVirtualAddress(Mmu->hardware, index, 0, &address));
++ }
++ else
++ {
++ gctUINT32 masterOffset = index / gcdMMU_STLB_4K_ENTRY_NUM
++ + Mmu->dynamicMappingStart;
++ gctUINT32 slaveOffset = index % gcdMMU_STLB_4K_ENTRY_NUM;
++
++ address = (masterOffset << gcdMMU_MTLB_SHIFT)
++ | (slaveOffset << gcdMMU_STLB_4K_SHIFT);
++ }
++
++ if (Address != gcvNULL)
++ {
++ *Address = address;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*PageTable=0x%x *Address=%08x",
++ *PageTable, gcmOPT_VALUE(Address));
++ return gcvSTATUS_OK;
++
++OnError:
++
++ if (mutex)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckMMU_FreePages
++**
++** Free pages inside the page table.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** gctPOINTER PageTable
++** Base address of the page table to free.
++**
++** gctSIZE_T PageCount
++** Number of pages to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++ gctUINT32_PTR pageTable;
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=%lu",
++ Mmu, PageTable, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ /* Convert the pointer. */
++ pageTable = (gctUINT32_PTR) PageTable;
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++#if gcdMMU_CLEAR_VALUE
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ _FillPageTable(pageTable, PageCount, gcdMMU_CLEAR_VALUE);
++ }
++#endif
++
++ if (PageCount == 1)
++ {
++ /* Single page node. */
++ _WritePageEntry(pageTable,
++ (~((1U<<8)-1)) | gcvMMU_SINGLE
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ | 1 << 1
++#endif
++ );
++ }
++ else
++ {
++ /* Mark the node as free. */
++ _WritePageEntry(pageTable,
++ (PageCount << 8) | gcvMMU_FREE
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ | 1 << 1
++#endif
++ );
++ _WritePageEntry(pageTable + 1, ~0U);
++
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ gcmkVERIFY_OK(_FillPageTable(pageTable + 2, PageCount - 2, 1 << 1));
++#endif
++ }
++
++ /* We have free nodes. */
++ Mmu->freeNodes = gcvTRUE;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ return gckMMU_AllocatePagesEx(
++ Mmu, PageCount, gcvSURF_UNKNOWN, PageTable, Address);
++}
++
++gceSTATUS
++gckMMU_AllocatePagesEx(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pageTable;
++ gctUINT32 address;
++ gctINT i;
++ gckMMU mmu;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL allocated = gcvFALSE;
++
++ gckOS_AcquireMutex(Mmu->os, mirrorPageTableMutex, gcvINFINITE);
++ acquired = gcvTRUE;
++
++ /* Allocate page table for current MMU. */
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ if (Mmu == mirrorPageTable->mmus[i])
++ {
++ gcmkONERROR(_AllocatePages(Mmu, PageCount, Type, PageTable, Address));
++ allocated = gcvTRUE;
++ }
++ }
++
++ /* Allocate page table for other MMUs. */
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (Mmu != mmu)
++ {
++ gcmkONERROR(_AllocatePages(mmu, PageCount, Type, &pageTable, &address));
++ gcmkASSERT(address == *Address);
++ }
++ }
++
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++ acquired = gcvFALSE;
++
++ return gcvSTATUS_OK;
++OnError:
++
++ if (allocated)
++ {
++ /* Page tables for multiple GPU always keep the same. So it is impossible
++ * the fist one allocates successfully but others fail.
++ */
++ gcmkASSERT(0);
++ }
++
++ if (acquired)
++ {
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++ }
++
++ return status;
++#else
++ return _AllocatePages(Mmu, PageCount, Type, PageTable, Address);
++#endif
++}
++
++gceSTATUS
++gckMMU_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gctINT i;
++ gctUINT32 offset;
++ gckMMU mmu;
++
++ gckOS_AcquireMutex(Mmu->os, mirrorPageTableMutex, gcvINFINITE);
++
++ gcmkVERIFY_OK(_FreePages(Mmu, PageTable, PageCount));
++
++ offset = (gctUINT32)PageTable - (gctUINT32)Mmu->pageTableLogical;
++
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (mmu != Mmu)
++ {
++ gcmkVERIFY_OK(_FreePages(mmu, mmu->pageTableLogical + offset/4, PageCount));
++ }
++ }
++
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++
++ return gcvSTATUS_OK;
++#else
++ return _FreePages(Mmu, PageTable, PageCount);
++#endif
++}
++
++gceSTATUS
++gckMMU_Enable(
++ IN gckMMU Mmu,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize
++ )
++{
++ gceSTATUS status;
++#if gcdSHARED_PAGETABLE
++ gckHARDWARE hardware;
++ gctINT i;
++#endif
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++#if gcdSHARED_PAGETABLE
++ if (Mmu->enabled)
++ {
++ gcmkFOOTER_ARG("Status=%d", gcvSTATUS_SKIP);
++ return gcvSTATUS_SKIP;
++ }
++#endif
++
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ /* Success. */
++ gcmkFOOTER_ARG("Status=%d", gcvSTATUS_SKIP);
++ return gcvSTATUS_SKIP;
++ }
++ else
++ {
++ if (PhysSize != 0)
++ {
++ gcmkONERROR(_FillFlatMapping(
++ Mmu,
++ PhysBaseAddr,
++ PhysSize
++ ));
++ }
++
++ gcmkONERROR(_SetupDynamicSpace(Mmu));
++
++#if gcdSHARED_PAGETABLE
++ for(i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ hardware = sharedPageTable->hardwares[i];
++ if (hardware != gcvNULL)
++ {
++ gcmkONERROR(
++ gckHARDWARE_SetMMUv2(
++ hardware,
++ gcvTRUE,
++ Mmu->mtlbLogical,
++ gcvMMU_MODE_4K,
++ (gctUINT8_PTR)Mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
++ gcvFALSE
++ ));
++ }
++ }
++#else
++ gcmkONERROR(
++ gckHARDWARE_SetMMUv2(
++ Mmu->hardware,
++ gcvTRUE,
++ Mmu->mtlbLogical,
++ gcvMMU_MODE_4K,
++ (gctUINT8_PTR)Mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
++ gcvFALSE
++ ));
++#endif
++
++ Mmu->enabled = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_SetPage(
++ IN gckMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gctUINT32_PTR pageEntry;
++ gctINT i;
++ gckMMU mmu;
++ gctUINT32 offset = (gctUINT32)PageEntry - (gctUINT32)Mmu->pageTableLogical;
++#endif
++
++ gctUINT32 data;
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageEntry != gcvNULL);
++ gcmkVERIFY_ARGUMENT(!(PageAddress & 0xFFF));
++
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ data = PageAddress;
++ }
++ else
++ {
++ data = _SetPage(PageAddress);
++ }
++
++ _WritePageEntry(PageEntry, data);
++
++#if gcdMIRROR_PAGETABLE
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (mmu != Mmu)
++ {
++ pageEntry = mmu->pageTableLogical + offset / 4;
++
++ if (mmu->hardware->mmuVersion == 0)
++ {
++ _WritePageEntry(pageEntry, PageAddress);
++ }
++ else
++ {
++ _WritePageEntry(pageEntry, _SetPage(PageAddress));
++ }
++ }
++
++ }
++#endif
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#ifdef __QNXNTO__
++gceSTATUS
++gckMMU_InsertNode(
++ IN gckMMU Mmu,
++ IN gcuVIDMEM_NODE_PTR Node)
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++
++ gcmkHEADER_ARG("Mmu=0x%x Node=0x%x", Mmu, Node);
++
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->nodeMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ Node->Virtual.next = Mmu->nodeList;
++ Mmu->nodeList = Node;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutex)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_RemoveNode(
++ IN gckMMU Mmu,
++ IN gcuVIDMEM_NODE_PTR Node)
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gcuVIDMEM_NODE_PTR *iter;
++
++ gcmkHEADER_ARG("Mmu=0x%x Node=0x%x", Mmu, Node);
++
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->nodeMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ for (iter = &Mmu->nodeList; *iter; iter = &(*iter)->Virtual.next)
++ {
++ if (*iter == Node)
++ {
++ *iter = Node->Virtual.next;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutex)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_FreeHandleMemory(
++ IN gckKERNEL Kernel,
++ IN gckMMU Mmu,
++ IN gctUINT32 Pid
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcuVIDMEM_NODE_PTR curr, next;
++
++ gcmkHEADER_ARG("Kernel=0x%x, Mmu=0x%x Pid=%u", Kernel, Mmu, Pid);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->nodeMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ for (curr = Mmu->nodeList; curr != gcvNULL; curr = next)
++ {
++ next = curr->Virtual.next;
++
++ if (curr->Virtual.processID == Pid)
++ {
++ while (curr->Virtual.unlockPendings[Kernel->core] == 0 && curr->Virtual.lockeds[Kernel->core] > 0)
++ {
++ gcmkONERROR(gckVIDMEM_Unlock(Kernel, curr, gcvSURF_TYPE_UNKNOWN, gcvNULL));
++ }
++
++ gcmkVERIFY_OK(gckVIDMEM_Free(curr));
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++gckMMU_Flush(
++ IN gckMMU Mmu
++ )
++{
++ gckHARDWARE hardware;
++#if gcdSHARED_PAGETABLE
++ gctINT i;
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ continue;
++ }
++#endif
++ hardware = sharedPageTable->hardwares[i];
++ if (hardware)
++ {
++ /* Notify cores who use this page table. */
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++ }
++ }
++#elif gcdMIRROR_PAGETABLE
++ gctINT i;
++ for (i = 0; i < mirrorPageTable->reference; i++)
++ {
++ hardware = mirrorPageTable->hardwares[i];
++
++ /* Notify cores who use this page table. */
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++ }
++#else
++ hardware = Mmu->hardware;
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckMMU_DumpPageTableEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address
++ )
++{
++ gctUINT32_PTR pageTable;
++ gctUINT32 index;
++ gctUINT32 mtlb, stlb;
++
++ gcmkHEADER_ARG("Mmu=0x%08X Address=0x%08X", Mmu, Address);
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkASSERT(Mmu->hardware->mmuVersion > 0);
++
++ mtlb = (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++ stlb = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++
++ if (Address >= 0x80000000)
++ {
++ pageTable = Mmu->pageTableLogical;
++
++ index = (mtlb - Mmu->dynamicMappingStart)
++ * gcdMMU_STLB_4K_ENTRY_NUM
++ + stlb;
++
++ gcmkPRINT(" Page table entry = 0x%08X", _ReadPageEntry(pageTable + index));
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************
++****************************** T E S T C O D E ******************************
++******************************************************************************/
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu_vg.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu_vg.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_mmu_vg.c 2015-05-01 14:57:59.531427001 -0500
+@@ -0,0 +1,522 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_MMU
++
++/*******************************************************************************
++**
++** gckVGMMU_Construct
++**
++** Construct a new gckVGMMU object.
++**
++** INPUT:
++**
++** gckVGKERNEL Kernel
++** Pointer to an gckVGKERNEL object.
++**
++** gctSIZE_T MmuSize
++** Number of bytes for the page table.
++**
++** OUTPUT:
++**
++** gckVGMMU * Mmu
++** Pointer to a variable that receives the gckVGMMU object pointer.
++*/
++gceSTATUS gckVGMMU_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckVGMMU * Mmu
++ )
++{
++ gckOS os;
++ gckVGHARDWARE hardware;
++ gceSTATUS status;
++ gckVGMMU mmu;
++ gctUINT32 * pageTable;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x MmuSize=0x%x Mmu=0x%x", Kernel, MmuSize, Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(MmuSize > 0);
++ gcmkVERIFY_ARGUMENT(Mmu != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckVGHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Allocate memory for the gckVGMMU object. */
++ status = gckOS_Allocate(os, sizeof(struct _gckVGMMU), (gctPOINTER *) &mmu);
++
++ if (status < 0)
++ {
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not allocate gckVGMMU object.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Initialize the gckVGMMU object. */
++ mmu->object.type = gcvOBJ_MMU;
++ mmu->os = os;
++ mmu->hardware = hardware;
++
++ /* Create the mutex. */
++ status = gckOS_CreateMutex(os, &mmu->mutex);
++
++ if (status < 0)
++ {
++ /* Roll back. */
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ gcmkFOOTER();
++ /* Error. */
++ return status;
++ }
++
++ /* Allocate the page table. */
++ mmu->pageTableSize = MmuSize;
++ status = gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->pageTableSize,
++ &mmu->pageTablePhysical,
++ &mmu->pageTableLogical);
++
++ if (status < 0)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex));
++
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not allocate page table.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Compute number of entries in page table. */
++ mmu->entryCount = mmu->pageTableSize / sizeof(gctUINT32);
++ mmu->entry = 0;
++
++ /* Mark the entire page table as available. */
++ pageTable = (gctUINT32 *) mmu->pageTableLogical;
++ for (i = 0; i < mmu->entryCount; i++)
++ {
++ pageTable[i] = (gctUINT32)~0;
++ }
++
++ /* Set page table address. */
++ status = gckVGHARDWARE_SetMMU(hardware, mmu->pageTableLogical);
++
++ if (status < 0)
++ {
++ /* Free the page table. */
++ gcmkVERIFY_OK(gckOS_FreeContiguous(mmu->os,
++ mmu->pageTablePhysical,
++ mmu->pageTableLogical,
++ mmu->pageTableSize));
++
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex));
++
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not program page table.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Return the gckVGMMU object pointer. */
++ *Mmu = mmu;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): %u entries at %p.(0x%08X)\n",
++ __FUNCTION__, __LINE__,
++ mmu->entryCount,
++ mmu->pageTableLogical,
++ mmu->pageTablePhysical
++ );
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_Destroy
++**
++** Destroy a nAQMMU object.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGMMU_Destroy(
++ IN gckVGMMU Mmu
++ )
++{
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ /* Free the page table. */
++ gcmkVERIFY_OK(gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->mutex));
++
++ /* Mark the gckVGMMU object as unknown. */
++ Mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGMMU object. */
++ gcmkVERIFY_OK(gckOS_Free(Mmu->os, Mmu));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_AllocatePages
++**
++** Allocate pages inside the page table.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** gctSIZE_T PageCount
++** Number of pages to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * PageTable
++** Pointer to a variable that receives the base address of the page
++** table.
++**
++** gctUINT32 * Address
++** Pointer to a variable that receives the hardware specific address.
++*/
++gceSTATUS gckVGMMU_AllocatePages(
++ IN gckVGMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctUINT32 tail, index, i;
++ gctUINT32 * table;
++ gctBOOL allocated = gcvFALSE;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageCount=0x%x PageTable=0x%x Address=0x%x",
++ Mmu, PageCount, PageTable, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ if (PageCount > Mmu->entryCount)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): page table too small for %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ gcmkFOOTER_NO();
++ /* Not enough pages avaiable. */
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ /* Grab the mutex. */
++ status = gckOS_AcquireMutex(Mmu->os, Mmu->mutex, gcvINFINITE);
++
++ if (status < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): could not acquire mutex.\n"
++ ,__FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ /* Error. */
++ return status;
++ }
++
++ /* Compute the tail for this allocation. */
++ tail = Mmu->entryCount - PageCount;
++
++ /* Walk all entries until we find enough slots. */
++ for (index = Mmu->entry; index <= tail;)
++ {
++ /* Access page table. */
++ table = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ /* See if all slots are available. */
++ for (i = 0; i < PageCount; i++, table++)
++ {
++ if (*table != ~0)
++ {
++ /* Start from next slot. */
++ index += i + 1;
++ break;
++ }
++ }
++
++ if (i == PageCount)
++ {
++ /* Bail out if we have enough page entries. */
++ allocated = gcvTRUE;
++ break;
++ }
++ }
++
++ if (!allocated)
++ {
++ if (status >= 0)
++ {
++ /* Walk all entries until we find enough slots. */
++ for (index = 0; index <= tail;)
++ {
++ /* Access page table. */
++ table = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ /* See if all slots are available. */
++ for (i = 0; i < PageCount; i++, table++)
++ {
++ if (*table != ~0)
++ {
++ /* Start from next slot. */
++ index += i + 1;
++ break;
++ }
++ }
++
++ if (i == PageCount)
++ {
++ /* Bail out if we have enough page entries. */
++ allocated = gcvTRUE;
++ break;
++ }
++ }
++ }
++ }
++
++ if (!allocated && (status >= 0))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): not enough free pages for %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ /* Not enough empty slots available. */
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ if (status >= 0)
++ {
++ /* Build virtual address. */
++ status = gckVGHARDWARE_BuildVirtualAddress(Mmu->hardware,
++ index,
++ 0,
++ Address);
++
++ if (status >= 0)
++ {
++ /* Update current entry into page table. */
++ Mmu->entry = index + PageCount;
++
++ /* Return pointer to page table. */
++ *PageTable = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): allocated %u pages at index %u (0x%08X) @ %p.\n",
++ __FUNCTION__, __LINE__,
++ PageCount,
++ index,
++ *Address,
++ *PageTable
++ );
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->mutex));
++ gcmkFOOTER();
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_FreePages
++**
++** Free pages inside the page table.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** gctPOINTER PageTable
++** Base address of the page table to free.
++**
++** gctSIZE_T PageCount
++** Number of pages to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGMMU_FreePages(
++ IN gckVGMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++ gctUINT32 * table;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=0x%x",
++ Mmu, PageTable, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): freeing %u pages at index %u @ %p.\n",
++ __FUNCTION__, __LINE__,
++ PageCount,
++ ((gctUINT32 *) PageTable - (gctUINT32 *) Mmu->pageTableLogical),
++ PageTable
++ );
++
++ /* Convert pointer. */
++ table = (gctUINT32 *) PageTable;
++
++ /* Mark the page table entries as available. */
++ while (PageCount-- > 0)
++ {
++ *table++ = (gctUINT32)~0;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGMMU_SetPage(
++ IN gckVGMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ )
++{
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageEntry != gcvNULL);
++ gcmkVERIFY_ARGUMENT(!(PageAddress & 0xFFF));
++
++ *PageEntry = PageAddress;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGMMU_Flush(
++ IN gckVGMMU Mmu
++ )
++{
++ gckVGHARDWARE hardware;
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ hardware = Mmu->hardware;
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_power.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_power.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_power.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_power.c 2015-05-01 14:57:59.531427001 -0500
+@@ -0,0 +1,347 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_POWER
++
++/******************************************************************************\
++************************ Dynamic Voltage Frequency Setting *********************
++\******************************************************************************/
++#if gcdDVFS
++static gctUINT32
++_GetLoadHistory(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Select,
++ IN gctUINT32 Index
++)
++{
++ return Dvfs->loads[Index];
++}
++
++static void
++_IncreaseScale(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Load,
++ OUT gctUINT8 *Scale
++ )
++{
++ if (Dvfs->currentScale < 32)
++ {
++ *Scale = Dvfs->currentScale + 8;
++ }
++ else
++ {
++ *Scale = Dvfs->currentScale + 8;
++ *Scale = gcmMIN(64, *Scale);
++ }
++}
++
++static void
++_RecordFrequencyHistory(
++ gckDVFS Dvfs,
++ gctUINT32 Frequency
++ )
++{
++ gctUINT32 i = 0;
++
++ struct _FrequencyHistory *history = Dvfs->frequencyHistory;
++
++ for (i = 0; i < 16; i++)
++ {
++ if (history->frequency == Frequency)
++ {
++ break;
++ }
++
++ if (history->frequency == 0)
++ {
++ history->frequency = Frequency;
++ break;
++ }
++
++ history++;
++ }
++
++ if (i < 16)
++ {
++ history->count++;
++ }
++}
++
++static gctUINT32
++_GetFrequencyHistory(
++ gckDVFS Dvfs,
++ gctUINT32 Frequency
++ )
++{
++ gctUINT32 i = 0;
++
++ struct _FrequencyHistory * history = Dvfs->frequencyHistory;
++
++ for (i = 0; i < 16; i++)
++ {
++ if (history->frequency == Frequency)
++ {
++ break;
++ }
++
++ history++;
++ }
++
++ if (i < 16)
++ {
++ return history->count;
++ }
++
++ return 0;
++}
++
++static void
++_Policy(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Load,
++ OUT gctUINT8 *Scale
++ )
++{
++ gctUINT8 load[4], nextLoad;
++ gctUINT8 scale;
++
++ /* Last 4 history. */
++ load[0] = (Load & 0xFF);
++ load[1] = (Load & 0xFF00) >> 8;
++ load[2] = (Load & 0xFF0000) >> 16;
++ load[3] = (Load & 0xFF000000) >> 24;
++
++ /* Determine target scale. */
++ if (load[0] > 54)
++ {
++ _IncreaseScale(Dvfs, Load, &scale);
++ }
++ else
++ {
++ nextLoad = (load[0] + load[1] + load[2] + load[3])/4;
++
++ scale = Dvfs->currentScale * (nextLoad) / 54;
++
++ scale = gcmMAX(1, scale);
++ scale = gcmMIN(64, scale);
++ }
++
++ Dvfs->totalConfig++;
++
++ Dvfs->loads[(load[0]-1)/8]++;
++
++ *Scale = scale;
++
++
++ if (Dvfs->totalConfig % 100 == 0)
++ {
++ gcmkPRINT("=======================================================");
++ gcmkPRINT("GPU Load: %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d",
++ 8, 16, 24, 32, 40, 48, 56, 64);
++ gcmkPRINT(" %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d",
++ _GetLoadHistory(Dvfs,2, 0),
++ _GetLoadHistory(Dvfs,2, 1),
++ _GetLoadHistory(Dvfs,2, 2),
++ _GetLoadHistory(Dvfs,2, 3),
++ _GetLoadHistory(Dvfs,2, 4),
++ _GetLoadHistory(Dvfs,2, 5),
++ _GetLoadHistory(Dvfs,2, 6),
++ _GetLoadHistory(Dvfs,2, 7)
++ );
++
++ gcmkPRINT("Frequency(MHz) %-8d %-8d %-8d %-8d %-8d",
++ 58, 120, 240, 360, 480);
++ gcmkPRINT(" %-8d %-8d %-8d %-8d %-8d",
++ _GetFrequencyHistory(Dvfs, 58),
++ _GetFrequencyHistory(Dvfs,120),
++ _GetFrequencyHistory(Dvfs,240),
++ _GetFrequencyHistory(Dvfs,360),
++ _GetFrequencyHistory(Dvfs,480)
++ );
++ }
++}
++
++static void
++_TimerFunction(
++ gctPOINTER Data
++ )
++{
++ gceSTATUS status;
++ gckDVFS dvfs = (gckDVFS) Data;
++ gckHARDWARE hardware = dvfs->hardware;
++ gctUINT32 value;
++ gctUINT32 frequency;
++ gctUINT8 scale;
++ gctUINT32 t1, t2, consumed;
++
++ gckOS_GetTicks(&t1);
++
++ gcmkONERROR(gckHARDWARE_QueryLoad(hardware, &value));
++
++ /* determine target sacle. */
++ _Policy(dvfs, value, &scale);
++
++ /* Set frequency and voltage. */
++ gcmkONERROR(gckOS_SetGPUFrequency(hardware->os, hardware->core, scale));
++
++ /* Query real frequency. */
++ gcmkONERROR(
++ gckOS_QueryGPUFrequency(hardware->os,
++ hardware->core,
++ &frequency,
++ &dvfs->currentScale));
++
++ _RecordFrequencyHistory(dvfs, frequency);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER,
++ "Current frequency = %d",
++ frequency);
++
++ /* Set period. */
++ gcmkONERROR(gckHARDWARE_SetDVFSPeroid(hardware, frequency));
++
++OnError:
++ /* Determine next querying time. */
++ gckOS_GetTicks(&t2);
++
++ consumed = gcmMIN(((long)t2 - (long)t1), 5);
++
++ if (dvfs->stop == gcvFALSE)
++ {
++ gcmkVERIFY_OK(gckOS_StartTimer(hardware->os,
++ dvfs->timer,
++ dvfs->pollingTime - consumed));
++ }
++
++ return;
++}
++
++gceSTATUS
++gckDVFS_Construct(
++ IN gckHARDWARE Hardware,
++ OUT gckDVFS * Dvfs
++ )
++{
++ gceSTATUS status;
++ gctPOINTER pointer;
++ gckDVFS dvfs = gcvNULL;
++ gckOS os = Hardware->os;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ /* Allocate a gckDVFS manager. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckDVFS), &pointer));
++
++ gckOS_ZeroMemory(pointer, gcmSIZEOF(struct _gckDVFS));
++
++ dvfs = pointer;
++
++ /* Initialization. */
++ dvfs->hardware = Hardware;
++ dvfs->pollingTime = gcdDVFS_POLLING_TIME;
++ dvfs->os = Hardware->os;
++ dvfs->currentScale = 64;
++
++ /* Create a polling timer. */
++ gcmkONERROR(gckOS_CreateTimer(os, _TimerFunction, pointer, &dvfs->timer));
++
++ /* Initialize frequency and voltage adjustment helper. */
++ gcmkONERROR(gckOS_PrepareGPUFrequency(os, Hardware->core));
++
++ /* Return result. */
++ *Dvfs = dvfs;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (dvfs)
++ {
++ if (dvfs->timer)
++ {
++ gcmkVERIFY_OK(gckOS_DestroyTimer(os, dvfs->timer));
++ }
++
++ gcmkOS_SAFE_FREE(os, dvfs);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckDVFS_Destroy(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ /* Deinitialize helper fuunction. */
++ gcmkVERIFY_OK(gckOS_FinishGPUFrequency(Dvfs->os, Dvfs->hardware->core));
++
++ /* DestroyTimer. */
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Dvfs->os, Dvfs->timer));
++
++ gcmkOS_SAFE_FREE(Dvfs->os, Dvfs);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckDVFS_Start(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ gckHARDWARE_InitDVFS(Dvfs->hardware);
++
++ Dvfs->stop = gcvFALSE;
++
++ gckOS_StartTimer(Dvfs->os, Dvfs->timer, Dvfs->pollingTime);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckDVFS_Stop(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ Dvfs->stop = gcvTRUE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_precomp.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_precomp.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_precomp.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_precomp.h 2015-05-01 14:57:59.531427001 -0500
+@@ -0,0 +1,29 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_precomp_h_
++#define __gc_hal_kernel_precomp_h_
++
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel.h"
++
++#endif /* __gc_hal_kernel_precomp_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.c 2015-05-01 14:57:59.531427001 -0500
+@@ -0,0 +1,895 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#define ENABLE_VG_TRY_VIRTUAL_MEMORY 0
++
++#define _GC_OBJ_ZONE gcvZONE_VG
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckKERNEL_Construct
++**
++** Construct a new gckKERNEL object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN gctPOINTER Context
++** Pointer to a driver defined context.
++**
++** OUTPUT:
++**
++** gckKERNEL * Kernel
++** Pointer to a variable that will hold the pointer to the gckKERNEL
++** object.
++*/
++gceSTATUS gckVGKERNEL_Construct(
++ IN gckOS Os,
++ IN gctPOINTER Context,
++ IN gckKERNEL inKernel,
++ OUT gckVGKERNEL * Kernel
++ )
++{
++ gceSTATUS status;
++ gckVGKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++
++ do
++ {
++ /* Allocate the gckKERNEL object. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Os,
++ sizeof(struct _gckVGKERNEL),
++ (gctPOINTER *) &kernel
++ ));
++
++ /* Initialize the gckKERNEL object. */
++ kernel->object.type = gcvOBJ_KERNEL;
++ kernel->os = Os;
++ kernel->context = Context;
++ kernel->hardware = gcvNULL;
++ kernel->interrupt = gcvNULL;
++ kernel->command = gcvNULL;
++ kernel->mmu = gcvNULL;
++ kernel->kernel = inKernel;
++
++ /* Construct the gckVGHARDWARE object. */
++ gcmkERR_BREAK(gckVGHARDWARE_Construct(
++ Os, &kernel->hardware
++ ));
++
++ /* Set pointer to gckKERNEL object in gckVGHARDWARE object. */
++ kernel->hardware->kernel = kernel;
++
++ /* Construct the gckVGINTERRUPT object. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Construct(
++ kernel, &kernel->interrupt
++ ));
++
++ /* Construct the gckVGCOMMAND object. */
++ gcmkERR_BREAK(gckVGCOMMAND_Construct(
++ kernel, gcmKB2BYTES(8), gcmKB2BYTES(2), &kernel->command
++ ));
++
++ /* Construct the gckVGMMU object. */
++ gcmkERR_BREAK(gckVGMMU_Construct(
++ kernel, gcmKB2BYTES(32), &kernel->mmu
++ ));
++
++ /* Return pointer to the gckKERNEL object. */
++ *Kernel = kernel;
++
++ gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (kernel != gcvNULL)
++ {
++ if (kernel->mmu != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGMMU_Destroy(kernel->mmu));
++ }
++
++ if (kernel->command != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGCOMMAND_Destroy(kernel->command));
++ }
++
++ if (kernel->interrupt != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGINTERRUPT_Destroy(kernel->interrupt));
++ }
++
++ if (kernel->hardware != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGHARDWARE_Destroy(kernel->hardware));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(Os, kernel));
++ }
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Destroy
++**
++** Destroy an gckKERNEL object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGKERNEL_Destroy(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ do
++ {
++ /* Destroy the gckVGMMU object. */
++ if (Kernel->mmu != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGMMU_Destroy(Kernel->mmu));
++ Kernel->mmu = gcvNULL;
++ }
++
++ /* Destroy the gckVGCOMMAND object. */
++ if (Kernel->command != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_Destroy(Kernel->command));
++ Kernel->command = gcvNULL;
++ }
++
++ /* Destroy the gckVGINTERRUPT object. */
++ if (Kernel->interrupt != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGINTERRUPT_Destroy(Kernel->interrupt));
++ Kernel->interrupt = gcvNULL;
++ }
++
++ /* Destroy the gckVGHARDWARE object. */
++ if (Kernel->hardware != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGHARDWARE_Destroy(Kernel->hardware));
++ Kernel->hardware = gcvNULL;
++ }
++
++ /* Mark the gckKERNEL object as unknown. */
++ Kernel->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckKERNEL object. */
++ gcmkERR_BREAK(gckOS_Free(Kernel->os, Kernel));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_AllocateLinearMemory
++**
++** Function walks all required memory pools and allocates the requested
++** amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcePOOL * Pool
++** Pointer the desired memory pool.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** gctSIZE_T Alignment
++** Required buffer alignment.
++**
++** gceSURF_TYPE Type
++** Surface type.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to the actual pool where the memory was allocated.
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Allocated node.
++*/
++gceSTATUS
++gckKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gcePOOL pool;
++ gceSTATUS status;
++ gckVIDMEM videoMemory;
++
++ /* Get initial pool. */
++ switch (pool = *Pool)
++ {
++ case gcvPOOL_DEFAULT:
++ case gcvPOOL_LOCAL:
++ pool = gcvPOOL_LOCAL_INTERNAL;
++ break;
++
++ case gcvPOOL_UNIFIED:
++ pool = gcvPOOL_SYSTEM;
++ break;
++
++ default:
++ break;
++ }
++
++ do
++ {
++ /* Verify the number of bytes to allocate. */
++ if (Bytes == 0)
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ /* Create a gcuVIDMEM_NODE for virtual memory. */
++ gcmkERR_BREAK(gckVIDMEM_ConstructVirtual(Kernel, gcvFALSE, Bytes, Node));
++
++ /* Success. */
++ break;
++ }
++
++ else
++ {
++ /* Get pointer to gckVIDMEM object for pool. */
++ status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory);
++
++ if (status == gcvSTATUS_OK)
++ {
++ if(*Pool == gcvPOOL_SYSTEM)
++ Type |= gcvSURF_VG;
++ /* Allocate memory. */
++ status = gckVIDMEM_AllocateLinear(videoMemory,
++ Bytes,
++ Alignment,
++ Type,
++ Node);
++
++ if (status == gcvSTATUS_OK)
++ {
++ /* Memory allocated. */
++ break;
++ }
++ }
++ }
++
++ if (pool == gcvPOOL_LOCAL_INTERNAL)
++ {
++ /* Advance to external memory. */
++ pool = gcvPOOL_LOCAL_EXTERNAL;
++ }
++ else if (pool == gcvPOOL_LOCAL_EXTERNAL)
++ {
++ /* Advance to contiguous system memory. */
++ pool = gcvPOOL_SYSTEM;
++ }
++ else if (pool == gcvPOOL_SYSTEM)
++ {
++ /* Advance to virtual memory. */
++#if ENABLE_VG_TRY_VIRTUAL_MEMORY
++ pool = gcvPOOL_VIRTUAL;
++#else
++ /*VG non-contiguous memory support is not ready yet, disable it temporary*/
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++#endif
++ }
++ else
++ {
++ /* Out of pools. */
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++ }
++ /* Loop only for multiple selection pools. */
++ while ((*Pool == gcvPOOL_DEFAULT)
++ || (*Pool == gcvPOOL_LOCAL)
++ || (*Pool == gcvPOOL_UNIFIED)
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Return pool used for allocation. */
++ *Pool = pool;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Dispatch
++**
++** Dispatch a command received from the user HAL layer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS gckVGKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE * kernelInterface = Interface;
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 processID;
++ gckKERNEL kernel = Kernel;
++ gctPOINTER info = gcvNULL;
++ gctPHYS_ADDR physical = gcvNULL;
++ gctPOINTER logical = gcvNULL;
++ gctSIZE_T bytes = 0;
++
++ gcmkHEADER_ARG("Kernel=0x%x Interface=0x%x ", Kernel, Interface);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Dispatch on command. */
++ switch (Interface->command)
++ {
++ case gcvHAL_QUERY_VIDEO_MEMORY:
++ /* Query video memory size. */
++ gcmkERR_BREAK(gckKERNEL_QueryVideoMemory(
++ Kernel, kernelInterface
++ ));
++ break;
++
++ case gcvHAL_QUERY_CHIP_IDENTITY:
++ /* Query chip identity. */
++ gcmkERR_BREAK(gckVGHARDWARE_QueryChipIdentity(
++ Kernel->vg->hardware,
++ &kernelInterface->u.QueryChipIdentity.chipModel,
++ &kernelInterface->u.QueryChipIdentity.chipRevision,
++ &kernelInterface->u.QueryChipIdentity.chipFeatures,
++ &kernelInterface->u.QueryChipIdentity.chipMinorFeatures,
++ &kernelInterface->u.QueryChipIdentity.chipMinorFeatures2
++ ));
++ break;
++
++ case gcvHAL_QUERY_COMMAND_BUFFER:
++ /* Query command buffer information. */
++ gcmkERR_BREAK(gckKERNEL_QueryCommandBuffer(
++ Kernel,
++ &kernelInterface->u.QueryCommandBuffer.information
++ ));
++ break;
++ case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
++ bytes = (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes;
++ /* Allocate non-paged memory. */
++ gcmkERR_BREAK(gckOS_AllocateContiguous(
++ Kernel->os,
++ gcvTRUE,
++ &bytes,
++ &physical,
++ &logical
++ ));
++
++ kernelInterface->u.AllocateNonPagedMemory.bytes = bytes;
++ kernelInterface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ kernelInterface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++ break;
++
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ physical = gcmNAME_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.physical);
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkERR_BREAK(gckOS_UnmapUserLogical(
++ Kernel->os,
++ physical,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ /* Free non-paged memory. */
++ gcmkERR_BREAK(gckOS_FreeNonPagedMemory(
++ Kernel->os,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ physical,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ gcmRELEASE_NAME(kernelInterface->u.AllocateNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY:
++ bytes = (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes;
++ /* Allocate contiguous memory. */
++ gcmkERR_BREAK(gckOS_AllocateContiguous(
++ Kernel->os,
++ gcvTRUE,
++ &bytes,
++ &physical,
++ &logical
++ ));
++
++ kernelInterface->u.AllocateNonPagedMemory.bytes = bytes;
++ kernelInterface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ kernelInterface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ physical = gcmNAME_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.physical);
++ /* Unmap user logical out of physical memory first. */
++ gcmkERR_BREAK(gckOS_UnmapUserLogical(
++ Kernel->os,
++ physical,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ /* Free contiguous memory. */
++ gcmkERR_BREAK(gckOS_FreeContiguous(
++ Kernel->os,
++ physical,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical),
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes
++ ));
++
++ gcmRELEASE_NAME(kernelInterface->u.AllocateNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_VIDEO_MEMORY:
++ {
++ gctSIZE_T bytes;
++ gctUINT32 bitsPerPixel;
++ gctUINT32 bits;
++
++ /* Align width and height to tiles. */
++ gcmkERR_BREAK(gckVGHARDWARE_AlignToTile(
++ Kernel->vg->hardware,
++ kernelInterface->u.AllocateVideoMemory.type,
++ &kernelInterface->u.AllocateVideoMemory.width,
++ &kernelInterface->u.AllocateVideoMemory.height
++ ));
++
++ /* Convert format into bytes per pixel and bytes per tile. */
++ gcmkERR_BREAK(gckVGHARDWARE_ConvertFormat(
++ Kernel->vg->hardware,
++ kernelInterface->u.AllocateVideoMemory.format,
++ &bitsPerPixel,
++ gcvNULL
++ ));
++
++ /* Compute number of bits for the allocation. */
++ bits
++ = kernelInterface->u.AllocateVideoMemory.width
++ * kernelInterface->u.AllocateVideoMemory.height
++ * kernelInterface->u.AllocateVideoMemory.depth
++ * bitsPerPixel;
++
++ /* Compute number of bytes for the allocation. */
++ bytes = gcmALIGN(bits, 8) / 8;
++
++ /* Allocate memory. */
++ gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
++ Kernel,
++ &kernelInterface->u.AllocateVideoMemory.pool,
++ bytes,
++ 64,
++ kernelInterface->u.AllocateVideoMemory.type,
++ &node
++ ));
++
++ kernelInterface->u.AllocateVideoMemory.node = gcmPTR_TO_UINT64(node);
++ }
++ break;
++
++ case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
++ /* Allocate memory. */
++ gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
++ Kernel,
++ &kernelInterface->u.AllocateLinearVideoMemory.pool,
++ kernelInterface->u.AllocateLinearVideoMemory.bytes,
++ kernelInterface->u.AllocateLinearVideoMemory.alignment,
++ kernelInterface->u.AllocateLinearVideoMemory.type,
++ &node
++ ));
++
++ gcmkERR_BREAK(gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ node,
++ gcvNULL,
++ kernelInterface->u.AllocateLinearVideoMemory.bytes
++ ));
++
++ kernelInterface->u.AllocateLinearVideoMemory.node = gcmPTR_TO_UINT64(node);
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(Interface->u.FreeVideoMemory.node);
++#ifdef __QNXNTO__
++ /* Unmap the video memory */
++
++ if ((node->VidMem.memory->object.type == gcvOBJ_VIDMEM) &&
++ (node->VidMem.logical != gcvNULL))
++ {
++ gckKERNEL_UnmapVideoMemory(Kernel,
++ node->VidMem.logical,
++ processID,
++ node->VidMem.bytes);
++ node->VidMem.logical = gcvNULL;
++ }
++#endif /* __QNXNTO__ */
++
++ /* Free video memory. */
++ gcmkERR_BREAK(gckVIDMEM_Free(
++ node
++ ));
++
++ gcmkERR_BREAK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ node
++ ));
++
++ break;
++
++ case gcvHAL_MAP_MEMORY:
++ /* Map memory. */
++ gcmkERR_BREAK(gckKERNEL_MapMemory(
++ Kernel,
++ gcmINT2PTR(kernelInterface->u.MapMemory.physical),
++ (gctSIZE_T) kernelInterface->u.MapMemory.bytes,
++ &logical
++ ));
++ kernelInterface->u.MapMemory.logical = gcmPTR_TO_UINT64(logical);
++ break;
++
++ case gcvHAL_UNMAP_MEMORY:
++ /* Unmap memory. */
++ gcmkERR_BREAK(gckKERNEL_UnmapMemory(
++ Kernel,
++ gcmINT2PTR(kernelInterface->u.MapMemory.physical),
++ (gctSIZE_T) kernelInterface->u.MapMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.MapMemory.logical)
++ ));
++ break;
++
++ case gcvHAL_MAP_USER_MEMORY:
++ /* Map user memory to DMA. */
++ gcmkERR_BREAK(gckOS_MapUserMemory(
++ Kernel->os,
++ gcvCORE_VG,
++ gcmUINT64_TO_PTR(kernelInterface->u.MapUserMemory.memory),
++ kernelInterface->u.MapUserMemory.physical,
++ (gctSIZE_T) kernelInterface->u.MapUserMemory.size,
++ &info,
++ &kernelInterface->u.MapUserMemory.address
++ ));
++
++ kernelInterface->u.MapUserMemory.info = gcmPTR_TO_NAME(info);
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ /* Unmap user memory. */
++ gcmkERR_BREAK(gckOS_UnmapUserMemory(
++ Kernel->os,
++ gcvCORE_VG,
++ gcmUINT64_TO_PTR(kernelInterface->u.UnmapUserMemory.memory),
++ (gctSIZE_T) kernelInterface->u.UnmapUserMemory.size,
++ gcmNAME_TO_PTR(kernelInterface->u.UnmapUserMemory.info),
++ kernelInterface->u.UnmapUserMemory.address
++ ));
++ gcmRELEASE_NAME(kernelInterface->u.UnmapUserMemory.info);
++ break;
++ case gcvHAL_LOCK_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(Interface->u.LockVideoMemory.node);
++
++ /* Lock video memory. */
++ gcmkERR_BREAK(
++ gckVIDMEM_Lock(Kernel,
++ node,
++ gcvFALSE,
++ &Interface->u.LockVideoMemory.address));
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Map video memory address into user space. */
++#ifdef __QNXNTO__
++ if (node->VidMem.logical == gcvNULL)
++ {
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemory(Kernel,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ processID,
++ node->VidMem.bytes,
++ &node->VidMem.logical));
++ }
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->VidMem.logical);
++#else
++ gcmkERR_BREAK(
++ gckKERNEL_MapVideoMemoryEx(Kernel,
++ gcvCORE_VG,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ &logical));
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(logical);
++#endif
++ }
++ else
++ {
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->Virtual.logical);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++
++#if gcdSECURE_USER
++ /* Return logical address as physical address. */
++ Interface->u.LockVideoMemory.address =
++ (gctUINT32)(Interface->u.LockVideoMemory.memory);
++#endif
++ gcmkERR_BREAK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_LOCKED,
++ node,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ /* Unlock video memory. */
++ node = gcmUINT64_TO_PTR(Interface->u.UnlockVideoMemory.node);
++
++#if gcdSECURE_USER
++ /* Save node information before it disappears. */
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock video memory. */
++ gcmkERR_BREAK(
++ gckVIDMEM_Unlock(Kernel,
++ node,
++ Interface->u.UnlockVideoMemory.type,
++ &Interface->u.UnlockVideoMemory.asynchroneous));
++
++#if gcdSECURE_USER
++ /* Flush the translation cache for virtual surfaces. */
++ if (logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(Kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++
++ if (Interface->u.UnlockVideoMemory.asynchroneous == gcvFALSE)
++ {
++ /* There isn't a event to unlock this node, remove record now */
++ gcmkERR_BREAK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_LOCKED,
++ node));
++ }
++
++ break;
++ case gcvHAL_USER_SIGNAL:
++#if !USE_NEW_LINUX_SIGNAL
++ /* Dispatch depends on the user signal subcommands. */
++ switch(Interface->u.UserSignal.command)
++ {
++ case gcvUSER_SIGNAL_CREATE:
++ /* Create a signal used in the user space. */
++ gcmkERR_BREAK(
++ gckOS_CreateUserSignal(Kernel->os,
++ Interface->u.UserSignal.manualReset,
++ &Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_DESTROY:
++ /* Destroy the signal. */
++ gcmkERR_BREAK(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++ break;
++
++ case gcvUSER_SIGNAL_SIGNAL:
++ /* Signal the signal. */
++ gcmkERR_BREAK(
++ gckOS_SignalUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.state));
++ break;
++
++ case gcvUSER_SIGNAL_WAIT:
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.wait);
++ break;
++
++ default:
++ /* Invalid user signal command. */
++ gcmkERR_BREAK(gcvSTATUS_INVALID_ARGUMENT);
++ }
++#endif
++ break;
++
++ case gcvHAL_COMMIT:
++ /* Commit a command and context buffer. */
++ gcmkERR_BREAK(gckVGCOMMAND_Commit(
++ Kernel->vg->command,
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.context),
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.queue),
++ kernelInterface->u.VGCommit.entryCount,
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.taskTable)
++ ));
++ break;
++ case gcvHAL_VERSION:
++ kernelInterface->u.Version.major = gcvVERSION_MAJOR;
++ kernelInterface->u.Version.minor = gcvVERSION_MINOR;
++ kernelInterface->u.Version.patch = gcvVERSION_PATCH;
++ kernelInterface->u.Version.build = gcvVERSION_BUILD;
++ status = gcvSTATUS_OK;
++ break;
++
++ case gcvHAL_GET_BASE_ADDRESS:
++ /* Get base address. */
++ gcmkERR_BREAK(
++ gckOS_GetBaseAddress(Kernel->os,
++ &kernelInterface->u.GetBaseAddress.baseAddress));
++ break;
++ default:
++ /* Invalid command. */
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++OnError:
++ /* Save status. */
++ kernelInterface->status = status;
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_QueryCommandBuffer
++**
++** Query command buffer attributes.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gcsCOMMAND_BUFFER_INFO_PTR Information
++** Pointer to the information structure to receive buffer attributes.
++*/
++gceSTATUS
++gckKERNEL_QueryCommandBuffer(
++ IN gckKERNEL Kernel,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=0x%x *Pool=0x%x",
++ Kernel, Information);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Get the information. */
++ status = gckVGCOMMAND_QueryCommandBuffer(Kernel->vg->command, Information);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_vg.h 2015-05-01 14:57:59.531427001 -0500
+@@ -0,0 +1,85 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_vg_h_
++#define __gc_hal_kernel_vg_h_
++
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel_hardware.h"
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++
++/* gckKERNEL object. */
++struct _gckVGKERNEL
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckVGHARDWARE hardware;
++
++ /* Pointer to gckINTERRUPT object. */
++ gckVGINTERRUPT interrupt;
++
++ /* Pointer to gckCOMMAND object. */
++ gckVGCOMMAND command;
++
++ /* Pointer to context. */
++ gctPOINTER context;
++
++ /* Pointer to gckMMU object. */
++ gckVGMMU mmu;
++
++ gckKERNEL kernel;
++};
++
++/* gckMMU object. */
++struct _gckVGMMU
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckVGHARDWARE hardware;
++
++ /* The page table mutex. */
++ gctPOINTER mutex;
++
++ /* Page table information. */
++ gctSIZE_T pageTableSize;
++ gctPHYS_ADDR pageTablePhysical;
++ gctPOINTER pageTableLogical;
++
++ /* Allocation index. */
++ gctUINT32 entryCount;
++ gctUINT32 entry;
++};
++
++#endif /* __gc_hal_kernel_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_video_memory.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_video_memory.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_video_memory.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/gc_hal_kernel_video_memory.c 2015-05-01 14:57:59.535427001 -0500
+@@ -0,0 +1,2264 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_VIDMEM
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** _Split
++**
++** Split a node on the required byte boundary.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to the node to split.
++**
++** gctSIZE_T Bytes
++** Number of bytes to keep in the node.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gctBOOL
++** gcvTRUE if the node was split successfully, or gcvFALSE if there is an
++** error.
++**
++*/
++static gctBOOL
++_Split(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gctPOINTER pointer = gcvNULL;
++
++ /* Make sure the byte boundary makes sense. */
++ if ((Bytes <= 0) || (Bytes > Node->VidMem.bytes))
++ {
++ return gcvFALSE;
++ }
++
++ /* Allocate a new gcuVIDMEM_NODE object. */
++ if (gcmIS_ERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(gcuVIDMEM_NODE),
++ &pointer)))
++ {
++ /* Error. */
++ return gcvFALSE;
++ }
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE structure. */
++ node->VidMem.offset = Node->VidMem.offset + Bytes;
++ node->VidMem.bytes = Node->VidMem.bytes - Bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.locked = 0;
++ node->VidMem.memory = Node->VidMem.memory;
++ node->VidMem.pool = Node->VidMem.pool;
++ node->VidMem.physical = Node->VidMem.physical;
++#ifdef __QNXNTO__
++#if gcdUSE_VIDMEM_PER_PID
++ gcmkASSERT(Node->VidMem.physical != 0);
++ gcmkASSERT(Node->VidMem.logical != gcvNULL);
++ node->VidMem.processID = Node->VidMem.processID;
++ node->VidMem.physical = Node->VidMem.physical + Bytes;
++ node->VidMem.logical = Node->VidMem.logical + Bytes;
++#else
++ node->VidMem.processID = 0;
++ node->VidMem.logical = gcvNULL;
++#endif
++#endif
++
++ /* Insert node behind specified node. */
++ node->VidMem.next = Node->VidMem.next;
++ node->VidMem.prev = Node;
++ Node->VidMem.next = node->VidMem.next->VidMem.prev = node;
++
++ /* Insert free node behind specified node. */
++ node->VidMem.nextFree = Node->VidMem.nextFree;
++ node->VidMem.prevFree = Node;
++ Node->VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node;
++
++ /* Adjust size of specified node. */
++ Node->VidMem.bytes = Bytes;
++
++ /* Success. */
++ return gcvTRUE;
++}
++
++/*******************************************************************************
++**
++** _Merge
++**
++** Merge two adjacent nodes together.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to the first of the two nodes to merge.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++*/
++static gceSTATUS
++_Merge(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gceSTATUS status;
++
++ /* Save pointer to next node. */
++ node = Node->VidMem.next;
++#if gcdUSE_VIDMEM_PER_PID
++ /* Check if the nodes are adjacent physically. */
++ if ( ((Node->VidMem.physical + Node->VidMem.bytes) != node->VidMem.physical) ||
++ ((Node->VidMem.logical + Node->VidMem.bytes) != node->VidMem.logical) )
++ {
++ /* Can't merge. */
++ return gcvSTATUS_OK;
++ }
++#else
++
++ /* This is a good time to make sure the heap is not corrupted. */
++ if (Node->VidMem.offset + Node->VidMem.bytes != node->VidMem.offset)
++ {
++ /* Corrupted heap. */
++ gcmkASSERT(
++ Node->VidMem.offset + Node->VidMem.bytes == node->VidMem.offset);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++#endif
++
++ /* Adjust byte count. */
++ Node->VidMem.bytes += node->VidMem.bytes;
++
++ /* Unlink next node from linked list. */
++ Node->VidMem.next = node->VidMem.next;
++ Node->VidMem.nextFree = node->VidMem.nextFree;
++
++ Node->VidMem.next->VidMem.prev =
++ Node->VidMem.nextFree->VidMem.prevFree = Node;
++
++ /* Free next node. */
++ status = gcmkOS_SAFE_FREE(Os, node);
++ return status;
++}
++
++/******************************************************************************\
++******************************* gckVIDMEM API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVIDMEM_ConstructVirtual
++**
++** Construct a new gcuVIDMEM_NODE union for virtual memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSIZE_T Bytes
++** Number of byte to allocate.
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that receives the gcuVIDMEM_NODE union pointer.
++*/
++gceSTATUS
++gckVIDMEM_ConstructVirtual(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Contiguous,
++ IN gctSIZE_T Bytes,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctPOINTER pointer = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("Kernel=0x%x Contiguous=%d Bytes=%lu", Kernel, Contiguous, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Allocate an gcuVIDMEM_NODE union. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE union for virtual memory. */
++ node->Virtual.kernel = Kernel;
++ node->Virtual.contiguous = Contiguous;
++ node->Virtual.logical = gcvNULL;
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ node->Virtual.lockeds[i] = 0;
++ node->Virtual.pageTables[i] = gcvNULL;
++ node->Virtual.lockKernels[i] = gcvNULL;
++ }
++
++ node->Virtual.mutex = gcvNULL;
++
++ gcmkONERROR(gckOS_GetProcessID(&node->Virtual.processID));
++
++#ifdef __QNXNTO__
++ node->Virtual.next = gcvNULL;
++ node->Virtual.freePending = gcvFALSE;
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ node->Virtual.unlockPendings[i] = gcvFALSE;
++ }
++#endif
++
++ node->Virtual.freed = gcvFALSE;
++
++ gcmkONERROR(gckOS_ZeroMemory(&node->Virtual.sharedInfo, gcmSIZEOF(gcsVIDMEM_NODE_SHARED_INFO)));
++
++ /* Create the mutex. */
++ gcmkONERROR(
++ gckOS_CreateMutex(os, &node->Virtual.mutex));
++
++ /* Allocate the virtual memory. */
++ gcmkONERROR(
++ gckOS_AllocatePagedMemoryEx(os,
++ node->Virtual.contiguous,
++ node->Virtual.bytes = Bytes,
++ &node->Virtual.physical));
++
++#ifdef __QNXNTO__
++ /* Register. */
++#if gcdENABLE_VG
++ if (Kernel->core != gcvCORE_VG)
++#endif
++ {
++ gckMMU_InsertNode(Kernel->mmu, node);
++ }
++#endif
++
++ /* Return pointer to the gcuVIDMEM_NODE union. */
++ *Node = node;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Created virtual node 0x%x for %u bytes @ 0x%x",
++ node, Bytes, node->Virtual.physical);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ if (node->Virtual.mutex != gcvNULL)
++ {
++ /* Destroy the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->Virtual.mutex));
++ }
++
++ /* Free the structure. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, node));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_DestroyVirtual
++**
++** Destroy an gcuVIDMEM_NODE union for virtual memory.
++**
++** INPUT:
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_DestroyVirtual(
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gckOS os;
++ gctINT i;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);
++
++ /* Extact the gckOS object pointer. */
++ os = Node->Virtual.kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++#ifdef __QNXNTO__
++ /* Unregister. */
++#if gcdENABLE_VG
++ if (Node->Virtual.kernel->core != gcvCORE_VG)
++#endif
++ {
++ gcmkVERIFY_OK(
++ gckMMU_RemoveNode(Node->Virtual.kernel->mmu, Node));
++ }
++#endif
++
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, Node->Virtual.mutex));
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Node->Virtual.pageTables[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ /* Free the pages. */
++ gcmkVERIFY_OK(gckVGMMU_FreePages(Node->Virtual.lockKernels[i]->vg->mmu,
++ Node->Virtual.pageTables[i],
++ Node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ /* Free the pages. */
++ gcmkVERIFY_OK(gckMMU_FreePages(Node->Virtual.lockKernels[i]->mmu,
++ Node->Virtual.pageTables[i],
++ Node->Virtual.pageCount));
++ }
++ }
++ }
++
++ /* Delete the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, Node));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Construct
++**
++** Construct a new gckVIDMEM object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 BaseAddress
++** Base address for the video memory heap.
++**
++** gctSIZE_T Bytes
++** Number of bytes in the video memory heap.
++**
++** gctSIZE_T Threshold
++** Minimum number of bytes beyond am allocation before the node is
++** split. Can be used as a minimum alignment requirement.
++**
++** gctSIZE_T BankSize
++** Number of bytes per physical memory bank. Used by bank
++** optimization.
++**
++** OUTPUT:
++**
++** gckVIDMEM * Memory
++** Pointer to a variable that will hold the pointer to the gckVIDMEM
++** object.
++*/
++gceSTATUS
++gckVIDMEM_Construct(
++ IN gckOS Os,
++ IN gctUINT32 BaseAddress,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Threshold,
++ IN gctSIZE_T BankSize,
++ OUT gckVIDMEM * Memory
++ )
++{
++ gckVIDMEM memory = gcvNULL;
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node;
++ gctINT i, banks = 0;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x BaseAddress=%08x Bytes=%lu Threshold=%lu "
++ "BankSize=%lu",
++ Os, BaseAddress, Bytes, Threshold, BankSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Allocate the gckVIDMEM object. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct _gckVIDMEM), &pointer));
++
++ memory = pointer;
++
++ /* Initialize the gckVIDMEM object. */
++ memory->object.type = gcvOBJ_VIDMEM;
++ memory->os = Os;
++
++ /* Set video memory heap information. */
++ memory->baseAddress = BaseAddress;
++ memory->bytes = Bytes;
++ memory->freeBytes = Bytes;
++ memory->threshold = Threshold;
++ memory->mutex = gcvNULL;
++#if gcdUSE_VIDMEM_PER_PID
++ gcmkONERROR(gckOS_GetProcessID(&memory->pid));
++#endif
++
++ BaseAddress = 0;
++
++ /* Walk all possible banks. */
++ for (i = 0; i < gcmCOUNTOF(memory->sentinel); ++i)
++ {
++ gctSIZE_T bytes;
++
++ if (BankSize == 0)
++ {
++ /* Use all bytes for the first bank. */
++ bytes = Bytes;
++ }
++ else
++ {
++ /* Compute number of bytes for this bank. */
++ bytes = gcmALIGN(BaseAddress + 1, BankSize) - BaseAddress;
++
++ if (bytes > Bytes)
++ {
++ /* Make sure we don't exceed the total number of bytes. */
++ bytes = Bytes;
++ }
++ }
++
++ if (bytes == 0)
++ {
++ /* Mark heap is not used. */
++ memory->sentinel[i].VidMem.next =
++ memory->sentinel[i].VidMem.prev =
++ memory->sentinel[i].VidMem.nextFree =
++ memory->sentinel[i].VidMem.prevFree = gcvNULL;
++ continue;
++ }
++
++ /* Allocate one gcuVIDMEM_NODE union. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE union. */
++ node->VidMem.memory = memory;
++
++ node->VidMem.next =
++ node->VidMem.prev =
++ node->VidMem.nextFree =
++ node->VidMem.prevFree = &memory->sentinel[i];
++
++ node->VidMem.offset = BaseAddress;
++ node->VidMem.bytes = bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.physical = 0;
++ node->VidMem.pool = gcvPOOL_UNKNOWN;
++
++ node->VidMem.locked = 0;
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ node->VidMem.kernelVirtual = gcvNULL;
++#endif
++
++ gcmkONERROR(gckOS_ZeroMemory(&node->VidMem.sharedInfo, gcmSIZEOF(gcsVIDMEM_NODE_SHARED_INFO)));
++
++#ifdef __QNXNTO__
++#if gcdUSE_VIDMEM_PER_PID
++ node->VidMem.processID = memory->pid;
++ node->VidMem.physical = memory->baseAddress + BaseAddress;
++ gcmkONERROR(gckOS_GetLogicalAddressProcess(Os,
++ node->VidMem.processID,
++ node->VidMem.physical,
++ &node->VidMem.logical));
++#else
++ node->VidMem.processID = 0;
++ node->VidMem.logical = gcvNULL;
++#endif
++#endif
++
++ /* Initialize the linked list of nodes. */
++ memory->sentinel[i].VidMem.next =
++ memory->sentinel[i].VidMem.prev =
++ memory->sentinel[i].VidMem.nextFree =
++ memory->sentinel[i].VidMem.prevFree = node;
++
++ /* Mark sentinel. */
++ memory->sentinel[i].VidMem.bytes = 0;
++
++ /* Adjust address for next bank. */
++ BaseAddress += bytes;
++ Bytes -= bytes;
++ banks ++;
++ }
++
++ /* Assign all the bank mappings. */
++ memory->mapping[gcvSURF_RENDER_TARGET] = banks - 1;
++ memory->mapping[gcvSURF_BITMAP] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_DEPTH] = banks - 1;
++ memory->mapping[gcvSURF_HIERARCHICAL_DEPTH] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TEXTURE] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_VERTEX] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_INDEX] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TILE_STATUS] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TYPE_UNKNOWN] = 0;
++
++#if gcdENABLE_VG
++ memory->mapping[gcvSURF_IMAGE] = 0;
++ memory->mapping[gcvSURF_MASK] = 0;
++ memory->mapping[gcvSURF_SCISSOR] = 0;
++#endif
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] INDEX: bank %d",
++ memory->mapping[gcvSURF_INDEX]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] VERTEX: bank %d",
++ memory->mapping[gcvSURF_VERTEX]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] TEXTURE: bank %d",
++ memory->mapping[gcvSURF_TEXTURE]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] RENDER_TARGET: bank %d",
++ memory->mapping[gcvSURF_RENDER_TARGET]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] DEPTH: bank %d",
++ memory->mapping[gcvSURF_DEPTH]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] TILE_STATUS: bank %d",
++ memory->mapping[gcvSURF_TILE_STATUS]);
++
++ /* Allocate the mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &memory->mutex));
++
++ /* Return pointer to the gckVIDMEM object. */
++ *Memory = memory;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (memory != gcvNULL)
++ {
++ if (memory->mutex != gcvNULL)
++ {
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, memory->mutex));
++ }
++
++ for (i = 0; i < banks; ++i)
++ {
++ /* Free the heap. */
++ gcmkASSERT(memory->sentinel[i].VidMem.next != gcvNULL);
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory->sentinel[i].VidMem.next));
++ }
++
++ /* Free the object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Destroy
++**
++** Destroy an gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_Destroy(
++ IN gckVIDMEM Memory
++ )
++{
++ gcuVIDMEM_NODE_PTR node, next;
++ gctINT i;
++
++ gcmkHEADER_ARG("Memory=0x%x", Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++
++ /* Walk all sentinels. */
++ for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ /* Bail out of the heap is not used. */
++ if (Memory->sentinel[i].VidMem.next == gcvNULL)
++ {
++ break;
++ }
++
++ /* Walk all the nodes until we reach the sentinel. */
++ for (node = Memory->sentinel[i].VidMem.next;
++ node->VidMem.bytes != 0;
++ node = next)
++ {
++ /* Save pointer to the next node. */
++ next = node->VidMem.next;
++
++ /* Free the node. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, node));
++ }
++ }
++
++ /* Free the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Memory->os, Memory->mutex));
++
++ /* Mark the object as unknown. */
++ Memory->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVIDMEM object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, Memory));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Allocate
++**
++** Allocate rectangular memory from the gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object.
++**
++** gctUINT Width
++** Width of rectangle to allocate. Make sure the width is properly
++** aligned.
++**
++** gctUINT Height
++** Height of rectangle to allocate. Make sure the height is properly
++** aligned.
++**
++** gctUINT Depth
++** Depth of rectangle to allocate. This equals to the number of
++** rectangles to allocate contiguously (i.e., for cubic maps and volume
++** textures).
++**
++** gctUINT BytesPerPixel
++** Number of bytes per pixel.
++**
++** gctUINT32 Alignment
++** Byte alignment for allocation.
++**
++** gceSURF_TYPE Type
++** Type of surface to allocate (use by bank optimization).
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that will hold the allocated memory node.
++*/
++gceSTATUS
++gckVIDMEM_Allocate(
++ IN gckVIDMEM Memory,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT BytesPerPixel,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gctSIZE_T bytes;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Memory=0x%x Width=%u Height=%u Depth=%u BytesPerPixel=%u "
++ "Alignment=%u Type=%d",
++ Memory, Width, Height, Depth, BytesPerPixel, Alignment,
++ Type);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++ gcmkVERIFY_ARGUMENT(Width > 0);
++ gcmkVERIFY_ARGUMENT(Height > 0);
++ gcmkVERIFY_ARGUMENT(Depth > 0);
++ gcmkVERIFY_ARGUMENT(BytesPerPixel > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Compute linear size. */
++ bytes = Width * Height * Depth * BytesPerPixel;
++
++ /* Allocate through linear function. */
++ gcmkONERROR(
++ gckVIDMEM_AllocateLinear(Memory, bytes, Alignment, Type, Node));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_BANK_ALIGNMENT
++
++#if !gcdBANK_BIT_START
++#error gcdBANK_BIT_START not defined.
++#endif
++
++#if !gcdBANK_BIT_END
++#error gcdBANK_BIT_END not defined.
++#endif
++/*******************************************************************************
++** _GetSurfaceBankAlignment
++**
++** Return the required offset alignment required to the make BaseAddress
++** aligned properly.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gcoOS object.
++**
++** gceSURF_TYPE Type
++** Type of allocation.
++**
++** gctUINT32 BaseAddress
++** Base address of current video memory node.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR AlignmentOffset
++** Pointer to a variable that will hold the number of bytes to skip in
++** the current video memory node in order to make the alignment bank
++** aligned.
++*/
++static gceSTATUS
++_GetSurfaceBankAlignment(
++ IN gceSURF_TYPE Type,
++ IN gctUINT32 BaseAddress,
++ OUT gctUINT32_PTR AlignmentOffset
++ )
++{
++ gctUINT32 bank;
++ /* To retrieve the bank. */
++ static const gctUINT32 bankMask = (0xFFFFFFFF << gcdBANK_BIT_START)
++ ^ (0xFFFFFFFF << (gcdBANK_BIT_END + 1));
++
++ /* To retrieve the bank and all the lower bytes. */
++ static const gctUINT32 byteMask = ~(0xFFFFFFFF << (gcdBANK_BIT_END + 1));
++
++ gcmkHEADER_ARG("Type=%d BaseAddress=0x%x ", Type, BaseAddress);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(AlignmentOffset != gcvNULL);
++
++ switch (Type)
++ {
++ case gcvSURF_RENDER_TARGET:
++ bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
++
++ /* Align to the first bank. */
++ *AlignmentOffset = (bank == 0) ?
++ 0 :
++ ((1 << (gcdBANK_BIT_END + 1)) + 0) - (BaseAddress & byteMask);
++ break;
++
++ case gcvSURF_DEPTH:
++ bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
++
++ /* Align to the third bank. */
++ *AlignmentOffset = (bank == 2) ?
++ 0 :
++ ((1 << (gcdBANK_BIT_END + 1)) + (2 << gcdBANK_BIT_START)) - (BaseAddress & byteMask);
++
++ /* Add a channel offset at the channel bit. */
++ *AlignmentOffset += (1 << gcdBANK_CHANNEL_BIT);
++ break;
++
++ default:
++ /* no alignment needed. */
++ *AlignmentOffset = 0;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER_ARG("*AlignmentOffset=%u", *AlignmentOffset);
++ return gcvSTATUS_OK;
++}
++#endif
++
++static gcuVIDMEM_NODE_PTR
++_FindNode(
++ IN gckVIDMEM Memory,
++ IN gctINT Bank,
++ IN gctSIZE_T Bytes,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Alignment
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 alignment;
++
++#if gcdENABLE_BANK_ALIGNMENT
++ gctUINT32 bankAlignment;
++ gceSTATUS status;
++#endif
++
++ if (Memory->sentinel[Bank].VidMem.nextFree == gcvNULL)
++ {
++ /* No free nodes left. */
++ return gcvNULL;
++ }
++
++#if gcdENABLE_BANK_ALIGNMENT
++ /* Walk all free nodes until we have one that is big enough or we have
++ ** reached the sentinel. */
++ for (node = Memory->sentinel[Bank].VidMem.nextFree;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.nextFree)
++ {
++ gcmkONERROR(_GetSurfaceBankAlignment(
++ Type,
++ node->VidMem.memory->baseAddress + node->VidMem.offset,
++ &bankAlignment));
++
++ bankAlignment = gcmALIGN(bankAlignment, *Alignment);
++
++ /* Compute number of bytes to skip for alignment. */
++ alignment = (*Alignment == 0)
++ ? 0
++ : (*Alignment - (node->VidMem.offset % *Alignment));
++
++ if (alignment == *Alignment)
++ {
++ /* Node is already aligned. */
++ alignment = 0;
++ }
++
++ if (node->VidMem.bytes >= Bytes + alignment + bankAlignment)
++ {
++ /* This node is big enough. */
++ *Alignment = alignment + bankAlignment;
++ return node;
++ }
++ }
++#endif
++
++ /* Walk all free nodes until we have one that is big enough or we have
++ reached the sentinel. */
++ for (node = Memory->sentinel[Bank].VidMem.nextFree;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.nextFree)
++ {
++
++ gctINT modulo = gckMATH_ModuloInt(node->VidMem.offset, *Alignment);
++
++ /* Compute number of bytes to skip for alignment. */
++ alignment = (*Alignment == 0) ? 0 : (*Alignment - modulo);
++
++ if (alignment == *Alignment)
++ {
++ /* Node is already aligned. */
++ alignment = 0;
++ }
++
++ if (node->VidMem.bytes >= Bytes + alignment)
++ {
++ /* This node is big enough. */
++ *Alignment = alignment;
++ return node;
++ }
++ }
++
++#if gcdENABLE_BANK_ALIGNMENT
++OnError:
++#endif
++ /* Not enough memory. */
++ return gcvNULL;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_AllocateLinear
++**
++** Allocate linear memory from the gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** gctUINT32 Alignment
++** Byte alignment for allocation.
++**
++** gceSURF_TYPE Type
++** Type of surface to allocate (use by bank optimization).
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that will hold the allocated memory node.
++*/
++gceSTATUS
++gckVIDMEM_AllocateLinear(
++ IN gckVIDMEM Memory,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 alignment;
++ gctINT bank, i;
++ gctBOOL acquired = gcvFALSE;
++#if gcdSMALL_BLOCK_SIZE
++ gctBOOL force_allocate = (Type == gcvSURF_TILE_STATUS) || (Type & gcvSURF_VG);
++#endif
++
++ gcmkHEADER_ARG("Memory=0x%x Bytes=%lu Alignment=%u Type=%d",
++ Memory, Bytes, Alignment, Type);
++
++ Type &= ~gcvSURF_VG;
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Type < gcvSURF_NUM_TYPES);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++#if !gcdUSE_VIDMEM_PER_PID
++
++ if (Bytes > Memory->freeBytes)
++ {
++ /* Not enough memory. */
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++ }
++#endif
++
++#if gcdSMALL_BLOCK_SIZE
++ if ((!force_allocate) && (Memory->freeBytes < (Memory->bytes/gcdRATIO_FOR_SMALL_MEMORY))
++ && (Bytes >= gcdSMALL_BLOCK_SIZE)
++ )
++ {
++ /* The left memory is for small memory.*/
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++ }
++#endif
++
++ /* Find the default bank for this surface type. */
++ gcmkASSERT((gctINT) Type < gcmCOUNTOF(Memory->mapping));
++ bank = Memory->mapping[Type];
++ alignment = Alignment;
++
++#if gcdUSE_VIDMEM_PER_PID
++ if (Bytes <= Memory->freeBytes)
++ {
++#endif
++ /* Find a free node in the default bank. */
++ node = _FindNode(Memory, bank, Bytes, Type, &alignment);
++
++ /* Out of memory? */
++ if (node == gcvNULL)
++ {
++ /* Walk all lower banks. */
++ for (i = bank - 1; i >= 0; --i)
++ {
++ /* Find a free node inside the current bank. */
++ node = _FindNode(Memory, i, Bytes, Type, &alignment);
++ if (node != gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++
++ if (node == gcvNULL)
++ {
++ /* Walk all upper banks. */
++ for (i = bank + 1; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ if (Memory->sentinel[i].VidMem.nextFree == gcvNULL)
++ {
++ /* Abort when we reach unused banks. */
++ break;
++ }
++
++ /* Find a free node inside the current bank. */
++ node = _FindNode(Memory, i, Bytes, Type, &alignment);
++ if (node != gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++#if gcdUSE_VIDMEM_PER_PID
++ }
++#endif
++
++ if (node == gcvNULL)
++ {
++ /* Out of memory. */
++#if gcdUSE_VIDMEM_PER_PID
++ /* Allocate more memory from shared pool. */
++ gctSIZE_T bytes;
++ gctPHYS_ADDR physical_temp;
++ gctUINT32 physical;
++ gctPOINTER logical;
++
++ bytes = gcmALIGN(Bytes, gcdUSE_VIDMEM_PER_PID_SIZE);
++
++ gcmkONERROR(gckOS_AllocateContiguous(Memory->os,
++ gcvTRUE,
++ &bytes,
++ &physical_temp,
++ &logical));
++
++ /* physical address is returned as 0 for user space. workaround. */
++ if (physical_temp == gcvNULL)
++ {
++ gcmkONERROR(gckOS_GetPhysicalAddress(Memory->os, logical, &physical));
++ }
++
++ /* Allocate one gcuVIDMEM_NODE union. */
++ gcmkONERROR(
++ gckOS_Allocate(Memory->os,
++ gcmSIZEOF(gcuVIDMEM_NODE),
++ (gctPOINTER *) &node));
++
++ /* Initialize gcuVIDMEM_NODE union. */
++ node->VidMem.memory = Memory;
++
++ node->VidMem.offset = 0;
++ node->VidMem.bytes = bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.physical = physical;
++ node->VidMem.pool = gcvPOOL_UNKNOWN;
++
++ node->VidMem.locked = 0;
++
++#ifdef __QNXNTO__
++ gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID));
++ node->VidMem.logical = logical;
++ gcmkASSERT(logical != gcvNULL);
++#endif
++
++ /* Insert node behind sentinel node. */
++ node->VidMem.next = Memory->sentinel[bank].VidMem.next;
++ node->VidMem.prev = &Memory->sentinel[bank];
++ Memory->sentinel[bank].VidMem.next = node->VidMem.next->VidMem.prev = node;
++
++ /* Insert free node behind sentinel node. */
++ node->VidMem.nextFree = Memory->sentinel[bank].VidMem.nextFree;
++ node->VidMem.prevFree = &Memory->sentinel[bank];
++ Memory->sentinel[bank].VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node;
++
++ Memory->freeBytes += bytes;
++#else
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++#endif
++ }
++
++ /* Do we have an alignment? */
++ if (alignment > 0)
++ {
++ /* Split the node so it is aligned. */
++ if (_Split(Memory->os, node, alignment))
++ {
++ /* Successful split, move to aligned node. */
++ node = node->VidMem.next;
++
++ /* Remove alignment. */
++ alignment = 0;
++ }
++ }
++
++ /* Do we have enough memory after the allocation to split it? */
++ if (node->VidMem.bytes - Bytes > Memory->threshold)
++ {
++ /* Adjust the node size. */
++ _Split(Memory->os, node, Bytes);
++ }
++
++ /* Remove the node from the free list. */
++ node->VidMem.prevFree->VidMem.nextFree = node->VidMem.nextFree;
++ node->VidMem.nextFree->VidMem.prevFree = node->VidMem.prevFree;
++ node->VidMem.nextFree =
++ node->VidMem.prevFree = gcvNULL;
++
++ /* Fill in the information. */
++ node->VidMem.alignment = alignment;
++ node->VidMem.memory = Memory;
++#ifdef __QNXNTO__
++#if !gcdUSE_VIDMEM_PER_PID
++ node->VidMem.logical = gcvNULL;
++ gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID));
++#else
++ gcmkASSERT(node->VidMem.logical != gcvNULL);
++#endif
++#endif
++
++ /* Adjust the number of free bytes. */
++ Memory->freeBytes -= node->VidMem.bytes;
++
++ node->VidMem.freePending = gcvFALSE;
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ node->VidMem.kernelVirtual = gcvNULL;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++
++ /* Return the pointer to the node. */
++ *Node = node;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Allocated %u bytes @ 0x%x [0x%08X]",
++ node->VidMem.bytes, node, node->VidMem.offset);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Free
++**
++** Free an allocated video memory node.
++**
++** INPUT:
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_Free(
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gceSTATUS status;
++ gckKERNEL kernel = gcvNULL;
++ gckVIDMEM memory = gcvNULL;
++ gcuVIDMEM_NODE_PTR node;
++ gctBOOL mutexAcquired = gcvFALSE;
++ gckOS os = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++ gctINT32 i, totalLocked;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ if ((Node == gcvNULL)
++ || (Node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /**************************** Video Memory ********************************/
++
++ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ if (Node->VidMem.locked > 0)
++ {
++ /* Client still has a lock, defer free op 'till when lock reaches 0. */
++ Node->VidMem.freePending = gcvTRUE;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Node 0x%x is locked (%d)... deferring free.",
++ Node, Node->VidMem.locked);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract pointer to gckVIDMEM object owning the node. */
++ memory = Node->VidMem.memory;
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE));
++
++ mutexAcquired = gcvTRUE;
++
++#ifdef __QNXNTO__
++#if !gcdUSE_VIDMEM_PER_PID
++ /* Reset. */
++ Node->VidMem.processID = 0;
++ Node->VidMem.logical = gcvNULL;
++#endif
++
++ /* Don't try to re-free an already freed node. */
++ if ((Node->VidMem.nextFree == gcvNULL)
++ && (Node->VidMem.prevFree == gcvNULL)
++ )
++#endif
++ {
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ if (Node->VidMem.kernelVirtual)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "%s(%d) Unmap %x from kernel space.",
++ __FUNCTION__, __LINE__,
++ Node->VidMem.kernelVirtual);
++
++ gcmkVERIFY_OK(
++ gckOS_UnmapPhysical(memory->os,
++ Node->VidMem.kernelVirtual,
++ Node->VidMem.bytes));
++
++ Node->VidMem.kernelVirtual = gcvNULL;
++ }
++#endif
++
++ /* Check if Node is already freed. */
++ if (Node->VidMem.nextFree)
++ {
++ /* Node is alread freed. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ /* Update the number of free bytes. */
++ memory->freeBytes += Node->VidMem.bytes;
++
++ /* Find the next free node. */
++ for (node = Node->VidMem.next;
++ node != gcvNULL && node->VidMem.nextFree == gcvNULL;
++ node = node->VidMem.next) ;
++
++ /* Insert this node in the free list. */
++ Node->VidMem.nextFree = node;
++ Node->VidMem.prevFree = node->VidMem.prevFree;
++
++ Node->VidMem.prevFree->VidMem.nextFree =
++ node->VidMem.prevFree = Node;
++
++ /* Is the next node a free node and not the sentinel? */
++ if ((Node->VidMem.next == Node->VidMem.nextFree)
++ && (Node->VidMem.next->VidMem.bytes != 0)
++ )
++ {
++ /* Merge this node with the next node. */
++ gcmkONERROR(_Merge(memory->os, node = Node));
++ gcmkASSERT(node->VidMem.nextFree != node);
++ gcmkASSERT(node->VidMem.prevFree != node);
++ }
++
++ /* Is the previous node a free node and not the sentinel? */
++ if ((Node->VidMem.prev == Node->VidMem.prevFree)
++ && (Node->VidMem.prev->VidMem.bytes != 0)
++ )
++ {
++ /* Merge this node with the previous node. */
++ gcmkONERROR(_Merge(memory->os, node = Node->VidMem.prev));
++ gcmkASSERT(node->VidMem.nextFree != node);
++ gcmkASSERT(node->VidMem.prevFree != node);
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Node 0x%x is freed.",
++ Node);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ /* Get gckKERNEL object. */
++ kernel = Node->Virtual.kernel;
++
++ /* Verify the gckKERNEL object pointer. */
++ gcmkVERIFY_OBJECT(kernel, gcvOBJ_KERNEL);
++
++ /* Get the gckOS object pointer. */
++ os = kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Grab the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ for (i = 0, totalLocked = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ totalLocked += Node->Virtual.lockeds[i];
++ }
++
++ if (totalLocked > 0)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_VIDMEM,
++ "gckVIDMEM_Free: Virtual node 0x%x is locked (%d)",
++ Node, totalLocked);
++
++ /* Set Flag */
++ Node->Virtual.freed = gcvTRUE;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++ }
++ else
++ {
++ /* Free the virtual memory. */
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(kernel->os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes));
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++
++ /* Destroy the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutexAcquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ memory->os, memory->mutex
++ ));
++ }
++
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++
++#ifdef __QNXNTO__
++/*******************************************************************************
++**
++** gcoVIDMEM_FreeHandleMemory
++**
++** Free all allocated video memory nodes for a handle.
++**
++** INPUT:
++**
++** gcoVIDMEM Memory
++** Pointer to an gcoVIDMEM object..
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_FreeHandleMemory(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctUINT32 Pid
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gcuVIDMEM_NODE_PTR node;
++ gctINT i;
++ gctUINT32 nodeCount = 0, byteCount = 0;
++ gctBOOL again;
++
++ gcmkHEADER_ARG("Kernel=0x%x, Memory=0x%x Pid=0x%u", Kernel, Memory, Pid);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++
++ gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ /* Walk all sentinels. */
++ for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ /* Bail out of the heap if it is not used. */
++ if (Memory->sentinel[i].VidMem.next == gcvNULL)
++ {
++ break;
++ }
++
++ do
++ {
++ again = gcvFALSE;
++
++ /* Walk all the nodes until we reach the sentinel. */
++ for (node = Memory->sentinel[i].VidMem.next;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.next)
++ {
++ /* Free the node if it was allocated by Handle. */
++ if (node->VidMem.processID == Pid)
++ {
++ /* Unlock video memory. */
++ while (node->VidMem.locked > 0)
++ {
++ gckVIDMEM_Unlock(Kernel, node, gcvSURF_TYPE_UNKNOWN, gcvNULL);
++ }
++
++ nodeCount++;
++ byteCount += node->VidMem.bytes;
++
++ /* Free video memory. */
++ gcmkVERIFY_OK(gckVIDMEM_Free(node));
++
++ /*
++ * Freeing may cause a merge which will invalidate our iteration.
++ * Don't be clever, just restart.
++ */
++ again = gcvTRUE;
++
++ break;
++ }
++#if gcdUSE_VIDMEM_PER_PID
++ else
++ {
++ gcmkASSERT(node->VidMem.processID == Pid);
++ }
++#endif
++ }
++ }
++ while (again);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutex)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** _NeedVirtualMapping
++**
++** Whether setup GPU page table for video node.
++**
++** INPUT:
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** gceCORE Core
++** Id of current GPU.
++**
++** OUTPUT:
++** gctBOOL * NeedMapping
++** A pointer hold the result whether Node should be mapping.
++*/
++static gceSTATUS
++_NeedVirtualMapping(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gcuVIDMEM_NODE_PTR Node,
++ OUT gctBOOL * NeedMapping
++)
++{
++ gceSTATUS status;
++ gctUINT32 phys;
++ gctUINT32 end;
++ gcePOOL pool;
++ gctUINT32 offset;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Node=0x%X", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++ gcmkVERIFY_ARGUMENT(NeedMapping != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Core < gcdMAX_GPU_COUNT);
++
++ if (Node->Virtual.contiguous)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ *NeedMapping = gcvFALSE;
++ }
++ else
++#endif
++ {
++ /* Convert logical address into a physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, Node->Virtual.logical, &phys));
++
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++
++ gcmkASSERT(phys >= baseAddress);
++
++ /* Subtract baseAddress to get a GPU address used for programming. */
++ phys -= baseAddress;
++
++ /* If part of region is belong to gcvPOOL_VIRTUAL,
++ ** whole region has to be mapped. */
++ end = phys + Node->Virtual.bytes - 1;
++
++ gcmkONERROR(gckHARDWARE_SplitMemory(
++ Kernel->hardware, end, &pool, &offset
++ ));
++
++ *NeedMapping = (pool == gcvPOOL_VIRTUAL);
++ }
++ }
++ else
++ {
++ *NeedMapping = gcvTRUE;
++ }
++
++ gcmkFOOTER_ARG("*NeedMapping=%d", *NeedMapping);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Lock
++**
++** Lock a video memory node and return its hardware specific address.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable that will hold the hardware specific address.
++*/
++gceSTATUS
++gckVIDMEM_Lock(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctBOOL Cacheable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL locked = gcvFALSE;
++ gckOS os = gcvNULL;
++ gctBOOL needMapping;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ if ((Node == gcvNULL)
++ || (Node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /**************************** Video Memory ********************************/
++
++ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ if (Cacheable == gcvTRUE)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++
++ /* Increment the lock count. */
++ Node->VidMem.locked ++;
++
++ /* Return the physical address of the node. */
++#if !gcdUSE_VIDMEM_PER_PID
++ *Address = Node->VidMem.memory->baseAddress
++ + Node->VidMem.offset
++ + Node->VidMem.alignment;
++#else
++ *Address = Node->VidMem.physical;
++#endif
++
++ /* Get hardware specific address. */
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ if (Kernel->hardware->mmuVersion == 0)
++ {
++ /* Convert physical to GPU address for old mmu. */
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++ gcmkASSERT(*Address > baseAddress);
++ *Address -= baseAddress;
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Locked node 0x%x (%d) @ 0x%08X",
++ Node,
++ Node->VidMem.locked,
++ *Address);
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ else
++ {
++ /* Verify the gckKERNEL object pointer. */
++ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);
++
++ /* Extract the gckOS object pointer. */
++ os = Node->Virtual.kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++#if gcdPAGED_MEMORY_CACHEABLE
++ /* Force video memory cacheable. */
++ Cacheable = gcvTRUE;
++#endif
++
++ gcmkONERROR(
++ gckOS_LockPages(os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes,
++ Cacheable,
++ &Node->Virtual.logical,
++ &Node->Virtual.pageCount));
++
++ /* Increment the lock count. */
++ if (Node->Virtual.lockeds[Kernel->core] ++ == 0)
++ {
++ /* Is this node pending for a final unlock? */
++#ifdef __QNXNTO__
++ if (!Node->Virtual.contiguous && Node->Virtual.unlockPendings[Kernel->core])
++ {
++ /* Make sure we have a page table. */
++ gcmkASSERT(Node->Virtual.pageTables[Kernel->core] != gcvNULL);
++
++ /* Remove pending unlock. */
++ Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
++ }
++
++ /* First lock - create a page table. */
++ gcmkASSERT(Node->Virtual.pageTables[Kernel->core] == gcvNULL);
++
++ /* Make sure we mark our node as not flushed. */
++ Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
++#endif
++
++ locked = gcvTRUE;
++
++ gcmkONERROR(_NeedVirtualMapping(Kernel, Kernel->core, Node, &needMapping));
++
++ if (needMapping == gcvFALSE)
++ {
++ /* Get hardware specific address. */
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ gcmkONERROR(gckVGHARDWARE_ConvertLogical(Kernel->vg->hardware,
++ Node->Virtual.logical,
++ &Node->Virtual.addresses[Kernel->core]));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckHARDWARE_ConvertLogical(Kernel->hardware,
++ Node->Virtual.logical,
++ &Node->Virtual.addresses[Kernel->core]));
++ }
++ }
++ else
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ /* Allocate pages inside the MMU. */
++ gcmkONERROR(
++ gckVGMMU_AllocatePages(Kernel->vg->mmu,
++ Node->Virtual.pageCount,
++ &Node->Virtual.pageTables[Kernel->core],
++ &Node->Virtual.addresses[Kernel->core]));
++ }
++ else
++#endif
++ {
++ /* Allocate pages inside the MMU. */
++ gcmkONERROR(
++ gckMMU_AllocatePagesEx(Kernel->mmu,
++ Node->Virtual.pageCount,
++ Node->Virtual.type,
++ &Node->Virtual.pageTables[Kernel->core],
++ &Node->Virtual.addresses[Kernel->core]));
++ }
++
++ Node->Virtual.lockKernels[Kernel->core] = Kernel;
++
++ /* Map the pages. */
++#ifdef __QNXNTO__
++ gcmkONERROR(
++ gckOS_MapPagesEx(os,
++ Kernel->core,
++ Node->Virtual.physical,
++ Node->Virtual.logical,
++ Node->Virtual.pageCount,
++ Node->Virtual.pageTables[Kernel->core]));
++#else
++ gcmkONERROR(
++ gckOS_MapPagesEx(os,
++ Kernel->core,
++ Node->Virtual.physical,
++ Node->Virtual.pageCount,
++ Node->Virtual.pageTables[Kernel->core]));
++#endif
++
++#if gcdENABLE_VG
++ if (Kernel->core == gcvCORE_VG)
++ {
++ gcmkONERROR(gckVGMMU_Flush(Kernel->vg->mmu));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckMMU_Flush(Kernel->mmu));
++ }
++ }
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Mapped virtual node 0x%x to 0x%08X",
++ Node,
++ Node->Virtual.addresses[Kernel->core]);
++ }
++
++ /* Return hardware address. */
++ *Address = Node->Virtual.addresses[Kernel->core];
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (locked)
++ {
++ if (Node->Virtual.pageTables[Kernel->core] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ /* Free the pages from the MMU. */
++ gcmkVERIFY_OK(
++ gckVGMMU_FreePages(Kernel->vg->mmu,
++ Node->Virtual.pageTables[Kernel->core],
++ Node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ /* Free the pages from the MMU. */
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(Kernel->mmu,
++ Node->Virtual.pageTables[Kernel->core],
++ Node->Virtual.pageCount));
++ }
++ Node->Virtual.pageTables[Kernel->core] = gcvNULL;
++ Node->Virtual.lockKernels[Kernel->core] = gcvNULL;
++ }
++
++ /* Unlock the pages. */
++ gcmkVERIFY_OK(
++ gckOS_UnlockPages(os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes,
++ Node->Virtual.logical
++ ));
++
++ Node->Virtual.lockeds[Kernel->core]--;
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Unlock
++**
++** Unlock a video memory node.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a locked gcuVIDMEM_NODE union.
++**
++** gceSURF_TYPE Type
++** Type of surface to unlock.
++**
++** gctBOOL * Asynchroneous
++** Pointer to a variable specifying whether the surface should be
++** unlocked asynchroneously or not.
++**
++** OUTPUT:
++**
++** gctBOOL * Asynchroneous
++** Pointer to a variable receiving the number of bytes used in the
++** command buffer specified by 'Commands'. If gcvNULL, there is no
++** command buffer.
++*/
++gceSTATUS
++gckVIDMEM_Unlock(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gceSURF_TYPE Type,
++ IN OUT gctBOOL * Asynchroneous
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctPOINTER buffer;
++ gctSIZE_T requested, bufferSize;
++ gckCOMMAND command = gcvNULL;
++ gceKERNEL_FLUSH flush;
++ gckOS os = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL commitEntered = gcvFALSE;
++ gctINT32 i, totalLocked;
++
++ gcmkHEADER_ARG("Node=0x%x Type=%d *Asynchroneous=%d",
++ Node, Type, gcmOPT_VALUE(Asynchroneous));
++
++ /* Verify the arguments. */
++ if ((Node == gcvNULL)
++ || (Node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /**************************** Video Memory ********************************/
++
++ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ if (Node->VidMem.locked <= 0)
++ {
++ /* The surface was not locked. */
++ status = gcvSTATUS_MEMORY_UNLOCKED;
++ goto OnError;
++ }
++
++ /* Decrement the lock count. */
++ Node->VidMem.locked --;
++
++ if (Asynchroneous != gcvNULL)
++ {
++ /* No need for any events. */
++ *Asynchroneous = gcvFALSE;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Unlocked node 0x%x (%d)",
++ Node,
++ Node->VidMem.locked);
++
++#ifdef __QNXNTO__
++ /* Unmap the video memory */
++ if ((Node->VidMem.locked == 0) && (Node->VidMem.logical != gcvNULL))
++ {
++ if (Kernel->core == gcvCORE_VG)
++ {
++ gckKERNEL_UnmapVideoMemory(Kernel,
++ Node->VidMem.logical,
++ Node->VidMem.processID,
++ Node->VidMem.bytes);
++ Node->VidMem.logical = gcvNULL;
++ }
++ }
++#endif /* __QNXNTO__ */
++
++ if (Node->VidMem.freePending && (Node->VidMem.locked == 0))
++ {
++ /* Client has unlocked node previously attempted to be freed by compositor. Free now. */
++ Node->VidMem.freePending = gcvFALSE;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Deferred-freeing Node 0x%x.",
++ Node);
++ gcmkONERROR(gckVIDMEM_Free(Node));
++ }
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ else
++ {
++ /* Verify the gckHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Verify the gckCOMMAND object pointer. */
++ command = Kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ /* Get the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Grab the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ if (Asynchroneous == gcvNULL)
++ {
++ if (Node->Virtual.lockeds[Kernel->core] == 0)
++ {
++ status = gcvSTATUS_MEMORY_UNLOCKED;
++ goto OnError;
++ }
++
++ /* Decrement lock count. */
++ -- Node->Virtual.lockeds[Kernel->core];
++
++ /* See if we can unlock the resources. */
++ if (Node->Virtual.lockeds[Kernel->core] == 0)
++ {
++ /* Free the page table. */
++ if (Node->Virtual.pageTables[Kernel->core] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ gcmkONERROR(
++ gckVGMMU_FreePages(Kernel->vg->mmu,
++ Node->Virtual.pageTables[Kernel->core],
++ Node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(
++ gckMMU_FreePages(Kernel->mmu,
++ Node->Virtual.pageTables[Kernel->core],
++ Node->Virtual.pageCount));
++ }
++ /* Mark page table as freed. */
++ Node->Virtual.pageTables[Kernel->core] = gcvNULL;
++ Node->Virtual.lockKernels[Kernel->core] = gcvNULL;
++ }
++
++#ifdef __QNXNTO__
++ /* Mark node as unlocked. */
++ Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
++#endif
++ }
++
++ for (i = 0, totalLocked = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ totalLocked += Node->Virtual.lockeds[i];
++ }
++
++ if (totalLocked == 0)
++ {
++ /* Owner have already freed this node
++ ** and we are the last one to unlock, do
++ ** real free */
++ if (Node->Virtual.freed)
++ {
++ /* Free the virtual memory. */
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(Kernel->os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes));
++
++ /* Release mutex before node is destroyed */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++
++ acquired = gcvFALSE;
++
++ /* Destroy the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
++
++ /* Node has been destroyed, so we should not touch it any more */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Unmapped virtual node 0x%x from 0x%08X",
++ Node, Node->Virtual.addresses[Kernel->core]);
++
++ }
++
++ else
++ {
++ /* If we need to unlock a node from virtual memory we have to be
++ ** very carefull. If the node is still inside the caches we
++ ** might get a bus error later if the cache line needs to be
++ ** replaced. So - we have to flush the caches before we do
++ ** anything. */
++
++ /* gckCommand_EnterCommit() can't be called in interrupt handler because
++ ** of a dead lock situation:
++ ** process call Command_Commit(), and acquire Command->mutexQueue in
++ ** gckCOMMAND_EnterCommit(). Then it will wait for a signal which depends
++ ** on interrupt handler to generate, if interrupt handler enter
++ ** gckCommand_EnterCommit(), process will never get the signal. */
++
++ /* So, flush cache when we still in process context, and then ask caller to
++ ** schedule a event. */
++
++ gcmkONERROR(
++ gckOS_UnlockPages(os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes,
++ Node->Virtual.logical));
++
++ if (!Node->Virtual.contiguous
++ && (Node->Virtual.lockeds[Kernel->core] == 1)
++#if gcdENABLE_VG
++ && (Kernel->vg == gcvNULL)
++#endif
++ )
++ {
++ if (Type == gcvSURF_BITMAP)
++ {
++ /* Flush 2D cache. */
++ flush = gcvFLUSH_2D;
++ }
++ else if (Type == gcvSURF_RENDER_TARGET)
++ {
++ /* Flush color cache. */
++ flush = gcvFLUSH_COLOR;
++ }
++ else if (Type == gcvSURF_DEPTH)
++ {
++ /* Flush depth cache. */
++ flush = gcvFLUSH_DEPTH;
++ }
++ else
++ {
++ /* No flush required. */
++ flush = (gceKERNEL_FLUSH) 0;
++ }
++ if(hardware)
++ {
++ gcmkONERROR(
++ gckHARDWARE_Flush(hardware, flush, gcvNULL, &requested));
++
++ if (requested != 0)
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvFALSE));
++ commitEntered = gcvTRUE;
++
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, requested, &buffer, &bufferSize
++ ));
++
++ gcmkONERROR(gckHARDWARE_Flush(
++ hardware, flush, buffer, &bufferSize
++ ));
++
++ /* Mark node as pending. */
++#ifdef __QNXNTO__
++ Node->Virtual.unlockPendings[Kernel->core] = gcvTRUE;
++#endif
++
++ gcmkONERROR(gckCOMMAND_Execute(command, requested));
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvFALSE));
++ commitEntered = gcvFALSE;
++ }
++ }
++ else
++ {
++ gckOS_Print("Hardware already is freed.\n");
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Scheduled unlock for virtual node 0x%x",
++ Node);
++
++ /* Schedule the surface to be unlocked. */
++ *Asynchroneous = gcvTRUE;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++
++ acquired = gcvFALSE;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Asynchroneous=%d", gcmOPT_VALUE(Asynchroneous));
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, gcvFALSE));
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_base.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_base.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_base.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_base.h 2015-05-01 14:57:59.535427001 -0500
+@@ -0,0 +1,3896 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_base_h_
++#define __gc_hal_base_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#include "gc_hal_dump.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gckOS * gckOS;
++typedef struct _gcoHAL * gcoHAL;
++typedef struct _gcoOS * gcoOS;
++typedef struct _gco2D * gco2D;
++
++#ifndef VIVANTE_NO_3D
++typedef struct _gco3D * gco3D;
++#endif
++
++typedef struct _gcoSURF * gcoSURF;
++typedef struct _gcsSURF_INFO * gcsSURF_INFO_PTR;
++typedef struct _gcsSURF_NODE * gcsSURF_NODE_PTR;
++typedef struct _gcsSURF_FORMAT_INFO * gcsSURF_FORMAT_INFO_PTR;
++typedef struct _gcsPOINT * gcsPOINT_PTR;
++typedef struct _gcsSIZE * gcsSIZE_PTR;
++typedef struct _gcsRECT * gcsRECT_PTR;
++typedef struct _gcsBOUNDARY * gcsBOUNDARY_PTR;
++typedef struct _gcoDUMP * gcoDUMP;
++typedef struct _gcoHARDWARE * gcoHARDWARE;
++typedef union _gcuVIDMEM_NODE * gcuVIDMEM_NODE_PTR;
++
++typedef struct gcsATOM * gcsATOM_PTR;
++
++#if gcdENABLE_VG
++typedef struct _gcoVG * gcoVG;
++typedef struct _gcsCOMPLETION_SIGNAL * gcsCOMPLETION_SIGNAL_PTR;
++typedef struct _gcsCONTEXT_MAP * gcsCONTEXT_MAP_PTR;
++#else
++typedef void * gcoVG;
++#endif
++
++#if gcdSYNC
++typedef struct _gcoFENCE * gcoFENCE;
++typedef struct _gcsSYNC_CONTEXT * gcsSYNC_CONTEXT_PTR;
++#endif
++
++typedef struct _gcoOS_SymbolsList gcoOS_SymbolsList;
++
++/******************************************************************************\
++******************************* Process local storage *************************
++\******************************************************************************/
++typedef struct _gcsPLS * gcsPLS_PTR;
++
++typedef void (* gctPLS_DESTRUCTOR) (
++ gcsPLS_PTR
++ );
++
++typedef struct _gcsPLS
++{
++ /* Global objects. */
++ gcoOS os;
++ gcoHAL hal;
++
++ /* Internal memory pool. */
++ gctSIZE_T internalSize;
++ gctPHYS_ADDR internalPhysical;
++ gctPOINTER internalLogical;
++
++ /* External memory pool. */
++ gctSIZE_T externalSize;
++ gctPHYS_ADDR externalPhysical;
++ gctPOINTER externalLogical;
++
++ /* Contiguous memory pool. */
++ gctSIZE_T contiguousSize;
++ gctPHYS_ADDR contiguousPhysical;
++ gctPOINTER contiguousLogical;
++
++ /* EGL-specific process-wide objects. */
++ gctPOINTER eglDisplayInfo;
++ gctPOINTER eglSurfaceInfo;
++ gceSURF_FORMAT eglConfigFormat;
++
++ /* PorcessID of the constrcutor process */
++ gctUINT32 processID;
++#if gcdFORCE_GAL_LOAD_TWICE
++ /* ThreadID of the constrcutor process. */
++ gctSIZE_T threadID;
++ /* Flag for calling module destructor. */
++ gctBOOL exiting;
++#endif
++
++ /* Reference count for destructor. */
++ gcsATOM_PTR reference;
++ gctBOOL bKFS;
++#if gcdUSE_NPOT_PATCH
++ gctBOOL bNeedSupportNP2Texture;
++#endif
++
++ /* Destructor for eglDisplayInfo. */
++ gctPLS_DESTRUCTOR destructor;
++}
++gcsPLS;
++
++extern gcsPLS gcPLS;
++
++/******************************************************************************\
++******************************* Thread local storage *************************
++\******************************************************************************/
++
++typedef struct _gcsTLS * gcsTLS_PTR;
++
++typedef void (* gctTLS_DESTRUCTOR) (
++ gcsTLS_PTR
++ );
++
++typedef struct _gcsTLS
++{
++ gceHARDWARE_TYPE currentType;
++ gcoHARDWARE hardware;
++ /* Only for separated 3D and 2D */
++ gcoHARDWARE hardware2D;
++#if gcdENABLE_VG
++ gcoVGHARDWARE vg;
++ gcoVG engineVG;
++#endif /* gcdENABLE_VG */
++ gctPOINTER context;
++ gctTLS_DESTRUCTOR destructor;
++ gctBOOL ProcessExiting;
++
++#ifndef VIVANTE_NO_3D
++ gco3D engine3D;
++#endif
++#if gcdSYNC
++ gctBOOL fenceEnable;
++#endif
++ gco2D engine2D;
++ gctBOOL copied;
++
++#if gcdFORCE_GAL_LOAD_TWICE
++ /* libGAL.so handle */
++ gctHANDLE handle;
++#endif
++}
++gcsTLS;
++
++/******************************************************************************\
++********************************* Enumerations *********************************
++\******************************************************************************/
++
++typedef enum _gcePLS_VALUE
++{
++ gcePLS_VALUE_EGL_DISPLAY_INFO,
++ gcePLS_VALUE_EGL_SURFACE_INFO,
++ gcePLS_VALUE_EGL_CONFIG_FORMAT_INFO,
++ gcePLS_VALUE_EGL_DESTRUCTOR_INFO,
++}
++gcePLS_VALUE;
++
++/* Video memory pool type. */
++typedef enum _gcePOOL
++{
++ gcvPOOL_UNKNOWN = 0,
++ gcvPOOL_DEFAULT,
++ gcvPOOL_LOCAL,
++ gcvPOOL_LOCAL_INTERNAL,
++ gcvPOOL_LOCAL_EXTERNAL,
++ gcvPOOL_UNIFIED,
++ gcvPOOL_SYSTEM,
++ gcvPOOL_VIRTUAL,
++ gcvPOOL_USER,
++ gcvPOOL_CONTIGUOUS,
++ gcvPOOL_DEFAULT_FORCE_CONTIGUOUS,
++ gcvPOOL_DEFAULT_FORCE_CONTIGUOUS_CACHEABLE,
++
++ gcvPOOL_NUMBER_OF_POOLS
++}
++gcePOOL;
++
++#ifndef VIVANTE_NO_3D
++/* Blending functions. */
++typedef enum _gceBLEND_FUNCTION
++{
++ gcvBLEND_ZERO,
++ gcvBLEND_ONE,
++ gcvBLEND_SOURCE_COLOR,
++ gcvBLEND_INV_SOURCE_COLOR,
++ gcvBLEND_SOURCE_ALPHA,
++ gcvBLEND_INV_SOURCE_ALPHA,
++ gcvBLEND_TARGET_COLOR,
++ gcvBLEND_INV_TARGET_COLOR,
++ gcvBLEND_TARGET_ALPHA,
++ gcvBLEND_INV_TARGET_ALPHA,
++ gcvBLEND_SOURCE_ALPHA_SATURATE,
++ gcvBLEND_CONST_COLOR,
++ gcvBLEND_INV_CONST_COLOR,
++ gcvBLEND_CONST_ALPHA,
++ gcvBLEND_INV_CONST_ALPHA,
++}
++gceBLEND_FUNCTION;
++
++/* Blending modes. */
++typedef enum _gceBLEND_MODE
++{
++ gcvBLEND_ADD,
++ gcvBLEND_SUBTRACT,
++ gcvBLEND_REVERSE_SUBTRACT,
++ gcvBLEND_MIN,
++ gcvBLEND_MAX,
++}
++gceBLEND_MODE;
++
++/* API flags. */
++typedef enum _gceAPI
++{
++ gcvAPI_D3D = 0x1,
++ gcvAPI_OPENGL = 0x2,
++ gcvAPI_OPENVG = 0x3,
++ gcvAPI_OPENCL = 0x4,
++}
++gceAPI;
++
++/* Depth modes. */
++typedef enum _gceDEPTH_MODE
++{
++ gcvDEPTH_NONE,
++ gcvDEPTH_Z,
++ gcvDEPTH_W,
++}
++gceDEPTH_MODE;
++#endif /* VIVANTE_NO_3D */
++
++typedef enum _gceWHERE
++{
++ gcvWHERE_COMMAND,
++ gcvWHERE_RASTER,
++ gcvWHERE_PIXEL,
++}
++gceWHERE;
++
++typedef enum _gceHOW
++{
++ gcvHOW_SEMAPHORE = 0x1,
++ gcvHOW_STALL = 0x2,
++ gcvHOW_SEMAPHORE_STALL = 0x3,
++}
++gceHOW;
++
++typedef enum _gceSignalHandlerType
++{
++ gcvHANDLE_SIGFPE_WHEN_SIGNAL_CODE_IS_0 = 0x1,
++}
++gceSignalHandlerType;
++
++
++#if gcdENABLE_VG
++/* gcsHAL_Limits*/
++typedef struct _gcsHAL_LIMITS
++{
++ /* chip info */
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 featureCount;
++ gctUINT32 *chipFeatures;
++
++ /* target caps */
++ gctUINT32 maxWidth;
++ gctUINT32 maxHeight;
++ gctUINT32 multiTargetCount;
++ gctUINT32 maxSamples;
++
++}gcsHAL_LIMITS;
++#endif
++
++/******************************************************************************\
++*********** Generic Memory Allocation Optimization Using Containers ************
++\******************************************************************************/
++
++/* Generic container definition. */
++typedef struct _gcsCONTAINER_LINK * gcsCONTAINER_LINK_PTR;
++typedef struct _gcsCONTAINER_LINK
++{
++ /* Points to the next container. */
++ gcsCONTAINER_LINK_PTR next;
++}
++gcsCONTAINER_LINK;
++
++typedef struct _gcsCONTAINER_RECORD * gcsCONTAINER_RECORD_PTR;
++typedef struct _gcsCONTAINER_RECORD
++{
++ gcsCONTAINER_RECORD_PTR prev;
++ gcsCONTAINER_RECORD_PTR next;
++}
++gcsCONTAINER_RECORD;
++
++typedef struct _gcsCONTAINER * gcsCONTAINER_PTR;
++typedef struct _gcsCONTAINER
++{
++ gctUINT containerSize;
++ gctUINT recordSize;
++ gctUINT recordCount;
++ gcsCONTAINER_LINK_PTR containers;
++ gcsCONTAINER_RECORD freeList;
++ gcsCONTAINER_RECORD allocList;
++}
++gcsCONTAINER;
++
++gceSTATUS
++gcsCONTAINER_Construct(
++ IN gcsCONTAINER_PTR Container,
++ gctUINT RecordsPerContainer,
++ gctUINT RecordSize
++ );
++
++gceSTATUS
++gcsCONTAINER_Destroy(
++ IN gcsCONTAINER_PTR Container
++ );
++
++gceSTATUS
++gcsCONTAINER_AllocateRecord(
++ IN gcsCONTAINER_PTR Container,
++ OUT gctPOINTER * Record
++ );
++
++gceSTATUS
++gcsCONTAINER_FreeRecord(
++ IN gcsCONTAINER_PTR Container,
++ IN gctPOINTER Record
++ );
++
++gceSTATUS
++gcsCONTAINER_FreeAll(
++ IN gcsCONTAINER_PTR Container
++ );
++
++/******************************************************************************\
++********************************* gcoHAL Object *********************************
++\******************************************************************************/
++
++/* Construct a new gcoHAL object. */
++gceSTATUS
++gcoHAL_Construct(
++ IN gctPOINTER Context,
++ IN gcoOS Os,
++ OUT gcoHAL * Hal
++ );
++
++/* Destroy an gcoHAL object. */
++gceSTATUS
++gcoHAL_Destroy(
++ IN gcoHAL Hal
++ );
++
++/* Get pointer to gco2D object. */
++gceSTATUS
++gcoHAL_Get2DEngine(
++ IN gcoHAL Hal,
++ OUT gco2D * Engine
++ );
++
++gceSTATUS
++gcoHAL_SetFscaleValue(
++ IN gctUINT FscaleValue
++ );
++
++gceSTATUS
++gcoHAL_GetFscaleValue(
++ OUT gctUINT * FscaleValue,
++ OUT gctUINT * MinFscaleValue,
++ OUT gctUINT * MaxFscaleValue
++ );
++
++gceSTATUS
++gcoHAL_SetBltNP2Texture(
++ gctBOOL enable
++ );
++
++#ifndef VIVANTE_NO_3D
++/* Get pointer to gco3D object. */
++gceSTATUS
++gcoHAL_Get3DEngine(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++
++gceSTATUS
++gcoHAL_Query3DEngine(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++
++gceSTATUS
++gcoHAL_Set3DEngine(
++ IN gcoHAL Hal,
++ IN gco3D Engine
++ );
++
++gceSTATUS
++gcoHAL_Get3DHardware(
++ IN gcoHAL Hal,
++ OUT gcoHARDWARE * Hardware
++ );
++
++gceSTATUS
++gcoHAL_Set3DHardware(
++ IN gcoHAL Hal,
++ IN gcoHARDWARE Hardware
++ );
++
++
++#endif /* VIVANTE_NO_3D */
++
++/* Verify whether the specified feature is available in hardware. */
++gceSTATUS
++gcoHAL_IsFeatureAvailable(
++ IN gcoHAL Hal,
++ IN gceFEATURE Feature
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gcoHAL_QueryChipIdentity(
++ IN gcoHAL Hal,
++ OUT gceCHIPMODEL* ChipModel,
++ OUT gctUINT32* ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures
++ );
++
++/* Query the minor features of the hardware. */
++gceSTATUS gcoHAL_QueryChipMinorFeatures(
++ IN gcoHAL Hal,
++ OUT gctUINT32* NumFeatures,
++ OUT gctUINT32* ChipMinorFeatures
++ );
++
++/* Query the amount of video memory. */
++gceSTATUS
++gcoHAL_QueryVideoMemory(
++ IN gcoHAL Hal,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Map video memory. */
++gceSTATUS
++gcoHAL_MapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap video memory. */
++gceSTATUS
++gcoHAL_UnmapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ IN gctPOINTER Logical
++ );
++
++/* Schedule an unmap of a buffer mapped through its physical address. */
++gceSTATUS
++gcoHAL_ScheduleUnmapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ IN gctPOINTER Logical
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoHAL_MapUserMemory(
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR GPUAddress
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gcoHAL_UnmapUserMemory(
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 GPUAddress
++ );
++
++/* Schedule an unmap of a user buffer using event mechanism. */
++gceSTATUS
++gcoHAL_ScheduleUnmapUserMemory(
++ IN gcoHAL Hal,
++ IN gctPOINTER Info,
++ IN gctSIZE_T Size,
++ IN gctUINT32 Address,
++ IN gctPOINTER Memory
++ );
++
++/* Commit the current command buffer. */
++gceSTATUS
++gcoHAL_Commit(
++ IN gcoHAL Hal,
++ IN gctBOOL Stall
++ );
++
++/* Query the tile capabilities. */
++gceSTATUS
++gcoHAL_QueryTiled(
++ IN gcoHAL Hal,
++ OUT gctINT32 * TileWidth2D,
++ OUT gctINT32 * TileHeight2D,
++ OUT gctINT32 * TileWidth3D,
++ OUT gctINT32 * TileHeight3D
++ );
++
++gceSTATUS
++gcoHAL_Compact(
++ IN gcoHAL Hal
++ );
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gcoHAL_ProfileStart(
++ IN gcoHAL Hal
++ );
++
++gceSTATUS
++gcoHAL_ProfileEnd(
++ IN gcoHAL Hal,
++ IN gctCONST_STRING Title
++ );
++#endif
++
++/* Power Management */
++gceSTATUS
++gcoHAL_SetPowerManagementState(
++ IN gcoHAL Hal,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gcoHAL_QueryPowerManagementState(
++ IN gcoHAL Hal,
++ OUT gceCHIPPOWERSTATE *State
++ );
++
++/* Set the filter type for filter blit. */
++gceSTATUS
++gcoHAL_SetFilterType(
++ IN gcoHAL Hal,
++ IN gceFILTER_TYPE FilterType
++ );
++
++gceSTATUS
++gcoHAL_GetDump(
++ IN gcoHAL Hal,
++ OUT gcoDUMP * Dump
++ );
++
++/* Call the kernel HAL layer. */
++gceSTATUS
++gcoHAL_Call(
++ IN gcoHAL Hal,
++ IN OUT gcsHAL_INTERFACE_PTR Interface
++ );
++
++gceSTATUS
++gcoHAL_GetPatchID(
++ IN gcoHAL Hal,
++ OUT gcePATCH_ID * PatchID
++ );
++
++/* Schedule an event. */
++gceSTATUS
++gcoHAL_ScheduleEvent(
++ IN gcoHAL Hal,
++ IN OUT gcsHAL_INTERFACE_PTR Interface
++ );
++
++/* Destroy a surface. */
++gceSTATUS
++gcoHAL_DestroySurface(
++ IN gcoHAL Hal,
++ IN gcoSURF Surface
++ );
++
++/* Request a start/stop timestamp. */
++gceSTATUS
++gcoHAL_SetTimer(
++ IN gcoHAL Hal,
++ IN gctUINT32 Index,
++ IN gctBOOL Start
++ );
++
++/* Get Time delta from a Timer in microseconds. */
++gceSTATUS
++gcoHAL_GetTimerTime(
++ IN gcoHAL Hal,
++ IN gctUINT32 Timer,
++ OUT gctINT32_PTR TimeDelta
++ );
++
++/* set timeout value. */
++gceSTATUS
++gcoHAL_SetTimeOut(
++ IN gcoHAL Hal,
++ IN gctUINT32 timeOut
++ );
++
++gceSTATUS
++gcoHAL_SetHardwareType(
++ IN gcoHAL Hal,
++ IN gceHARDWARE_TYPE HardwardType
++ );
++
++gceSTATUS
++gcoHAL_GetHardwareType(
++ IN gcoHAL Hal,
++ OUT gceHARDWARE_TYPE * HardwardType
++ );
++
++gceSTATUS
++gcoHAL_QueryChipCount(
++ IN gcoHAL Hal,
++ OUT gctINT32 * Count
++ );
++
++gceSTATUS
++gcoHAL_QuerySeparated3D2D(
++ IN gcoHAL Hal
++ );
++
++gceSTATUS
++gcoHAL_QuerySpecialHint(
++ IN gceSPECIAL_HINT Hint
++ );
++
++gceSTATUS
++gcoHAL_SetSpecialHintData(
++ IN gcoHARDWARE Hardware
++ );
++
++/* Get pointer to gcoVG object. */
++gceSTATUS
++gcoHAL_GetVGEngine(
++ IN gcoHAL Hal,
++ OUT gcoVG * Engine
++ );
++
++#if gcdENABLE_VG
++gceSTATUS
++gcoHAL_QueryChipLimits(
++ IN gcoHAL Hal,
++ IN gctINT32 Chip,
++ OUT gcsHAL_LIMITS *Limits);
++
++gceSTATUS
++gcoHAL_QueryChipFeature(
++ IN gcoHAL Hal,
++ IN gctINT32 Chip,
++ IN gceFEATURE Feature);
++
++#endif
++/******************************************************************************\
++********************************** gcoOS Object *********************************
++\******************************************************************************/
++
++/* Get PLS value for given key */
++gctPOINTER
++gcoOS_GetPLSValue(
++ IN gcePLS_VALUE key
++ );
++
++/* Set PLS value of a given key */
++void
++gcoOS_SetPLSValue(
++ IN gcePLS_VALUE key,
++ OUT gctPOINTER value
++ );
++
++/* Get access to the thread local storage. */
++gceSTATUS
++gcoOS_GetTLS(
++ OUT gcsTLS_PTR * TLS
++ );
++
++ /* Copy the TLS from a source thread. */
++ gceSTATUS gcoOS_CopyTLS(IN gcsTLS_PTR Source);
++
++/* Destroy the objects associated with the current thread. */
++void
++gcoOS_FreeThreadData(
++ IN gctBOOL ProcessExiting
++ );
++
++/* Construct a new gcoOS object. */
++gceSTATUS
++gcoOS_Construct(
++ IN gctPOINTER Context,
++ OUT gcoOS * Os
++ );
++
++/* Destroy an gcoOS object. */
++gceSTATUS
++gcoOS_Destroy(
++ IN gcoOS Os
++ );
++
++/* Get the base address for the physical memory. */
++gceSTATUS
++gcoOS_GetBaseAddress(
++ IN gcoOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++/* Allocate memory from the heap. */
++gceSTATUS
++gcoOS_Allocate(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Get allocated memory size. */
++gceSTATUS
++gcoOS_GetMemorySize(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ OUT gctSIZE_T_PTR MemorySize
++ );
++
++/* Free allocated memory. */
++gceSTATUS
++gcoOS_Free(
++ IN gcoOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gcoOS_AllocateMemory(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Free memory. */
++gceSTATUS
++gcoOS_FreeMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate contiguous memory. */
++gceSTATUS
++gcoOS_AllocateContiguous(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free contiguous memory. */
++gceSTATUS
++gcoOS_FreeContiguous(
++ IN gcoOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Allocate video memory. */
++gceSTATUS
++gcoOS_AllocateVideoMemory(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN gctBOOL InCacheable,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctUINT32 * Physical,
++ OUT gctPOINTER * Logical,
++ OUT gctPOINTER * Handle
++ );
++
++/* Free video memory. */
++gceSTATUS
++gcoOS_FreeVideoMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Handle
++ );
++
++gceSTATUS
++gcoSURF_GetBankOffsetBytes(
++ IN gcoSURF Surfce,
++ IN gceSURF_TYPE Type,
++ IN gctUINT32 Stride,
++ IN gctUINT32_PTR Bytes
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoOS_MapUserMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoOS_MapUserMemoryEx(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gcoOS_UnmapUserMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ );
++
++/* Device I/O Control call to the kernel HAL layer. */
++gceSTATUS
++gcoOS_DeviceControl(
++ IN gcoOS Os,
++ IN gctUINT32 IoControlCode,
++ IN gctPOINTER InputBuffer,
++ IN gctSIZE_T InputBufferSize,
++ IN gctPOINTER OutputBuffer,
++ IN gctSIZE_T OutputBufferSize
++ );
++
++/* Allocate non paged memory. */
++gceSTATUS
++gcoOS_AllocateNonPagedMemory(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free non paged memory. */
++gceSTATUS
++gcoOS_FreeNonPagedMemory(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++#define gcmOS_SAFE_FREE(os, mem) \
++ gcoOS_Free(os, mem); \
++ mem = gcvNULL
++
++#define gcmkOS_SAFE_FREE(os, mem) \
++ gckOS_Free(os, mem); \
++ mem = gcvNULL
++
++typedef enum _gceFILE_MODE
++{
++ gcvFILE_CREATE = 0,
++ gcvFILE_APPEND,
++ gcvFILE_READ,
++ gcvFILE_CREATETEXT,
++ gcvFILE_APPENDTEXT,
++ gcvFILE_READTEXT,
++}
++gceFILE_MODE;
++
++/* Open a file. */
++gceSTATUS
++gcoOS_Open(
++ IN gcoOS Os,
++ IN gctCONST_STRING FileName,
++ IN gceFILE_MODE Mode,
++ OUT gctFILE * File
++ );
++
++/* Close a file. */
++gceSTATUS
++gcoOS_Close(
++ IN gcoOS Os,
++ IN gctFILE File
++ );
++
++/* Read data from a file. */
++gceSTATUS
++gcoOS_Read(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctSIZE_T ByteCount,
++ IN gctPOINTER Data,
++ OUT gctSIZE_T * ByteRead
++ );
++
++/* Write data to a file. */
++gceSTATUS
++gcoOS_Write(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Flush data to a file. */
++gceSTATUS
++gcoOS_Flush(
++ IN gcoOS Os,
++ IN gctFILE File
++ );
++
++/* Close a file descriptor. */
++gceSTATUS
++gcoOS_CloseFD(
++ IN gcoOS Os,
++ IN gctINT FD
++ );
++
++/* Dup file descriptor to another. */
++gceSTATUS
++gcoOS_DupFD(
++ IN gcoOS Os,
++ IN gctINT FD,
++ OUT gctINT * FD2
++ );
++
++/* Create an endpoint for communication. */
++gceSTATUS
++gcoOS_Socket(
++ IN gcoOS Os,
++ IN gctINT Domain,
++ IN gctINT Type,
++ IN gctINT Protocol,
++ OUT gctINT *SockFd
++ );
++
++/* Close a socket. */
++gceSTATUS
++gcoOS_CloseSocket(
++ IN gcoOS Os,
++ IN gctINT SockFd
++ );
++
++/* Initiate a connection on a socket. */
++gceSTATUS
++gcoOS_Connect(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctCONST_POINTER HostName,
++ IN gctUINT Port);
++
++/* Shut down part of connection on a socket. */
++gceSTATUS
++gcoOS_Shutdown(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctINT How
++ );
++
++/* Send a message on a socket. */
++gceSTATUS
++gcoOS_Send(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data,
++ IN gctINT Flags
++ );
++
++/* Initiate a connection on a socket. */
++gceSTATUS
++gcoOS_WaitForSend(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctINT Seconds,
++ IN gctINT MicroSeconds);
++
++/* Get environment variable value. */
++gceSTATUS
++gcoOS_GetEnv(
++ IN gcoOS Os,
++ IN gctCONST_STRING VarName,
++ OUT gctSTRING * Value
++ );
++
++/* Set environment variable value. */
++gceSTATUS
++gcoOS_SetEnv(
++ IN gcoOS Os,
++ IN gctCONST_STRING VarName,
++ IN gctSTRING Value
++ );
++
++/* Get current working directory. */
++gceSTATUS
++gcoOS_GetCwd(
++ IN gcoOS Os,
++ IN gctINT SizeInBytes,
++ OUT gctSTRING Buffer
++ );
++
++/* Get file status info. */
++gceSTATUS
++gcoOS_Stat(
++ IN gcoOS Os,
++ IN gctCONST_STRING FileName,
++ OUT gctPOINTER Buffer
++ );
++
++typedef enum _gceFILE_WHENCE
++{
++ gcvFILE_SEEK_SET,
++ gcvFILE_SEEK_CUR,
++ gcvFILE_SEEK_END
++}
++gceFILE_WHENCE;
++
++/* Set the current position of a file. */
++gceSTATUS
++gcoOS_Seek(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctUINT32 Offset,
++ IN gceFILE_WHENCE Whence
++ );
++
++/* Set the current position of a file. */
++gceSTATUS
++gcoOS_SetPos(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctUINT32 Position
++ );
++
++/* Get the current position of a file. */
++gceSTATUS
++gcoOS_GetPos(
++ IN gcoOS Os,
++ IN gctFILE File,
++ OUT gctUINT32 * Position
++ );
++
++/* Same as strstr. */
++gceSTATUS
++gcoOS_StrStr(
++ IN gctCONST_STRING String,
++ IN gctCONST_STRING SubString,
++ OUT gctSTRING * Output
++ );
++
++/* Find the last occurance of a character inside a string. */
++gceSTATUS
++gcoOS_StrFindReverse(
++ IN gctCONST_STRING String,
++ IN gctINT8 Character,
++ OUT gctSTRING * Output
++ );
++
++gceSTATUS
++gcoOS_StrDup(
++ IN gcoOS Os,
++ IN gctCONST_STRING String,
++ OUT gctSTRING * Target
++ );
++
++/* Copy a string. */
++gceSTATUS
++gcoOS_StrCopySafe(
++ IN gctSTRING Destination,
++ IN gctSIZE_T DestinationSize,
++ IN gctCONST_STRING Source
++ );
++
++/* Append a string. */
++gceSTATUS
++gcoOS_StrCatSafe(
++ IN gctSTRING Destination,
++ IN gctSIZE_T DestinationSize,
++ IN gctCONST_STRING Source
++ );
++
++/* Compare two strings. */
++gceSTATUS
++gcoOS_StrCmp(
++ IN gctCONST_STRING String1,
++ IN gctCONST_STRING String2
++ );
++
++/* Compare characters of two strings. */
++gceSTATUS
++gcoOS_StrNCmp(
++ IN gctCONST_STRING String1,
++ IN gctCONST_STRING String2,
++ IN gctSIZE_T Count
++ );
++
++/* Convert string to float. */
++gceSTATUS
++gcoOS_StrToFloat(
++ IN gctCONST_STRING String,
++ OUT gctFLOAT * Float
++ );
++
++/* Convert hex string to integer. */
++gceSTATUS
++gcoOS_HexStrToInt(
++ IN gctCONST_STRING String,
++ OUT gctINT * Int
++ );
++
++/* Convert hex string to float. */
++gceSTATUS
++gcoOS_HexStrToFloat(
++ IN gctCONST_STRING String,
++ OUT gctFLOAT * Float
++ );
++
++/* Convert string to integer. */
++gceSTATUS
++gcoOS_StrToInt(
++ IN gctCONST_STRING String,
++ OUT gctINT * Int
++ );
++
++gceSTATUS
++gcoOS_MemCmp(
++ IN gctCONST_POINTER Memory1,
++ IN gctCONST_POINTER Memory2,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_PrintStrSafe(
++ OUT gctSTRING String,
++ IN gctSIZE_T StringSize,
++ IN OUT gctUINT * Offset,
++ IN gctCONST_STRING Format,
++ ...
++ );
++
++gceSTATUS
++gcoOS_LoadLibrary(
++ IN gcoOS Os,
++ IN gctCONST_STRING Library,
++ OUT gctHANDLE * Handle
++ );
++
++gceSTATUS
++gcoOS_FreeLibrary(
++ IN gcoOS Os,
++ IN gctHANDLE Handle
++ );
++
++gceSTATUS
++gcoOS_GetProcAddress(
++ IN gcoOS Os,
++ IN gctHANDLE Handle,
++ IN gctCONST_STRING Name,
++ OUT gctPOINTER * Function
++ );
++
++gceSTATUS
++gcoOS_Compact(
++ IN gcoOS Os
++ );
++
++gceSTATUS
++gcoOS_AddSignalHandler (
++ IN gceSignalHandlerType SignalHandlerType
++ );
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gcoOS_ProfileStart(
++ IN gcoOS Os
++ );
++
++gceSTATUS
++gcoOS_ProfileEnd(
++ IN gcoOS Os,
++ IN gctCONST_STRING Title
++ );
++
++gceSTATUS
++gcoOS_SetProfileSetting(
++ IN gcoOS Os,
++ IN gctBOOL Enable,
++ IN gctCONST_STRING FileName
++ );
++#endif
++
++gctBOOL
++gcoOS_IsNeededSupportNP2Texture(
++ IN gctCHAR* ProcName
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gcoOS_QueryVideoMemory(
++ IN gcoOS Os,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Detect if the process is the executable specified. */
++gceSTATUS
++gcoOS_DetectProcessByNamePid(
++ IN gctCONST_STRING Name,
++ IN gctHANDLE Pid
++ );
++
++/* Detect if the current process is the executable specified. */
++gceSTATUS
++gcoOS_DetectProcessByName(
++ IN gctCONST_STRING Name
++ );
++
++gceSTATUS
++gcoOS_DetectProcessByEncryptedName(
++ IN gctCONST_STRING Name
++ );
++
++#if defined(ANDROID)
++gceSTATUS
++gcoOS_DetectProgrameByEncryptedSymbols(
++ IN gcoOS_SymbolsList Symbols
++ );
++#endif
++
++/*----------------------------------------------------------------------------*/
++/*----- Atoms ----------------------------------------------------------------*/
++
++/* Construct an atom. */
++gceSTATUS
++gcoOS_AtomConstruct(
++ IN gcoOS Os,
++ OUT gcsATOM_PTR * Atom
++ );
++
++/* Destroy an atom. */
++gceSTATUS
++gcoOS_AtomDestroy(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom
++ );
++
++/* Increment an atom. */
++gceSTATUS
++gcoOS_AtomIncrement(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ OUT gctINT32_PTR OldValue
++ );
++
++/* Decrement an atom. */
++gceSTATUS
++gcoOS_AtomDecrement(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ OUT gctINT32_PTR OldValue
++ );
++
++gctHANDLE
++gcoOS_GetCurrentProcessID(
++ void
++ );
++
++gctHANDLE
++gcoOS_GetCurrentThreadID(
++ void
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Time -----------------------------------------------------------------*/
++
++/* Get the number of milliseconds since the system started. */
++gctUINT32
++gcoOS_GetTicks(
++ void
++ );
++
++/* Get time in microseconds. */
++gceSTATUS
++gcoOS_GetTime(
++ gctUINT64_PTR Time
++ );
++
++/* Get CPU usage in microseconds. */
++gceSTATUS
++gcoOS_GetCPUTime(
++ gctUINT64_PTR CPUTime
++ );
++
++/* Get memory usage. */
++gceSTATUS
++gcoOS_GetMemoryUsage(
++ gctUINT32_PTR MaxRSS,
++ gctUINT32_PTR IxRSS,
++ gctUINT32_PTR IdRSS,
++ gctUINT32_PTR IsRSS
++ );
++
++/* Delay a number of microseconds. */
++gceSTATUS
++gcoOS_Delay(
++ IN gcoOS Os,
++ IN gctUINT32 Delay
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Threads --------------------------------------------------------------*/
++
++#ifdef _WIN32
++/* Cannot include windows.h here becuase "near" and "far"
++ * which are used in gcsDEPTH_INFO, are defined to nothing in WinDef.h.
++ * So, use the real value of DWORD and WINAPI, instead.
++ * DWORD is unsigned long, and WINAPI is __stdcall.
++ * If these two are change in WinDef.h, the following two typdefs
++ * need to be changed, too.
++ */
++typedef unsigned long gctTHREAD_RETURN;
++typedef unsigned long (__stdcall * gcTHREAD_ROUTINE)(void * Argument);
++#else
++typedef void * gctTHREAD_RETURN;
++typedef void * (* gcTHREAD_ROUTINE)(void *);
++#endif
++
++/* Create a new thread. */
++gceSTATUS
++gcoOS_CreateThread(
++ IN gcoOS Os,
++ IN gcTHREAD_ROUTINE Worker,
++ IN gctPOINTER Argument,
++ OUT gctPOINTER * Thread
++ );
++
++/* Close a thread. */
++gceSTATUS
++gcoOS_CloseThread(
++ IN gcoOS Os,
++ IN gctPOINTER Thread
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Mutexes --------------------------------------------------------------*/
++
++/* Create a new mutex. */
++gceSTATUS
++gcoOS_CreateMutex(
++ IN gcoOS Os,
++ OUT gctPOINTER * Mutex
++ );
++
++/* Delete a mutex. */
++gceSTATUS
++gcoOS_DeleteMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Acquire a mutex. */
++gceSTATUS
++gcoOS_AcquireMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ );
++
++/* Release a mutex. */
++gceSTATUS
++gcoOS_ReleaseMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Signals --------------------------------------------------------------*/
++
++/* Create a signal. */
++gceSTATUS
++gcoOS_CreateSignal(
++ IN gcoOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ );
++
++/* Destroy a signal. */
++gceSTATUS
++gcoOS_DestroySignal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Signal a signal. */
++gceSTATUS
++gcoOS_Signal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ );
++
++/* Wait for a signal. */
++gceSTATUS
++gcoOS_WaitSignal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ );
++
++/* Map a signal from another process */
++gceSTATUS
++gcoOS_MapSignal(
++ IN gctSIGNAL RemoteSignal,
++ OUT gctSIGNAL * LocalSignal
++ );
++
++/* Unmap a signal mapped from another process */
++gceSTATUS
++gcoOS_UnmapSignal(
++ IN gctSIGNAL Signal
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Android Native Fence -------------------------------------------------*/
++
++/* Create sync point. */
++gceSTATUS
++gcoOS_CreateSyncPoint(
++ IN gcoOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ );
++
++/* Destroy sync point. */
++gceSTATUS
++gcoOS_DestroySyncPoint(
++ IN gcoOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++/* Create native fence. */
++gceSTATUS
++gcoOS_CreateNativeFence(
++ IN gcoOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ );
++
++/* Wait on native fence. */
++gceSTATUS
++gcoOS_WaitNativeFence(
++ IN gcoOS Os,
++ IN gctINT FenceFD,
++ IN gctUINT32 Timeout
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Memory Access and Cache ----------------------------------------------*/
++
++/* Write a register. */
++gceSTATUS
++gcoOS_WriteRegister(
++ IN gcoOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Read a register. */
++gceSTATUS
++gcoOS_ReadRegister(
++ IN gcoOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++gceSTATUS
++gcoOS_CacheClean(
++ IN gcoOS Os,
++ IN gctUINT64 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_CacheFlush(
++ IN gcoOS Os,
++ IN gctUINT64 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_CacheInvalidate(
++ IN gcoOS Os,
++ IN gctUINT64 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_MemoryBarrier(
++ IN gcoOS Os,
++ IN gctPOINTER Logical
++ );
++
++
++/*----------------------------------------------------------------------------*/
++/*----- Profile --------------------------------------------------------------*/
++
++gceSTATUS
++gckOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ );
++
++gceSTATUS
++gckOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ );
++
++gctUINT32
++gckOS_ProfileToMS(
++ IN gctUINT64 Ticks
++ );
++
++gceSTATUS
++gcoOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ );
++
++gceSTATUS
++gcoOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ );
++
++#define _gcmPROFILE_INIT(prefix, freq, start) \
++ do { \
++ prefix ## OS_QueryProfileTickRate(&(freq)); \
++ prefix ## OS_GetProfileTick(&(start)); \
++ } while (gcvFALSE)
++
++#define _gcmPROFILE_QUERY(prefix, start, ticks) \
++ do { \
++ prefix ## OS_GetProfileTick(&(ticks)); \
++ (ticks) = ((ticks) > (start)) ? ((ticks) - (start)) \
++ : (~0ull - (start) + (ticks) + 1); \
++ } while (gcvFALSE)
++
++#if gcdENABLE_PROFILING
++# define gcmkPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gck, freq, start)
++# define gcmkPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gck, start, ticks)
++# define gcmPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gco, freq, start)
++# define gcmPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gco, start, ticks)
++# define gcmPROFILE_ONLY(x) x
++# define gcmPROFILE_ELSE(x) do { } while (gcvFALSE)
++# define gcmPROFILE_DECLARE_ONLY(x) x
++# define gcmPROFILE_DECLARE_ELSE(x) typedef x
++#else
++# define gcmkPROFILE_INIT(start, freq) do { } while (gcvFALSE)
++# define gcmkPROFILE_QUERY(start, ticks) do { } while (gcvFALSE)
++# define gcmPROFILE_INIT(start, freq) do { } while (gcvFALSE)
++# define gcmPROFILE_QUERY(start, ticks) do { } while (gcvFALSE)
++# define gcmPROFILE_ONLY(x) do { } while (gcvFALSE)
++# define gcmPROFILE_ELSE(x) x
++# define gcmPROFILE_DECLARE_ONLY(x) do { } while (gcvFALSE)
++# define gcmPROFILE_DECLARE_ELSE(x) x
++#endif
++
++/*******************************************************************************
++** gcoMATH object
++*/
++
++#define gcdPI 3.14159265358979323846f
++
++/* Kernel. */
++gctINT
++gckMATH_ModuloInt(
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++/* User. */
++gctUINT32
++gcoMATH_Log2in5dot5(
++ IN gctINT X
++ );
++
++
++gctFLOAT
++gcoMATH_UIntAsFloat(
++ IN gctUINT32 X
++ );
++
++gctUINT32
++gcoMATH_FloatAsUInt(
++ IN gctFLOAT X
++ );
++
++gctBOOL
++gcoMATH_CompareEqualF(
++ IN gctFLOAT X,
++ IN gctFLOAT Y
++ );
++
++gctUINT16
++gcoMATH_UInt8AsFloat16(
++ IN gctUINT8 X
++ );
++
++/******************************************************************************\
++**************************** Coordinate Structures *****************************
++\******************************************************************************/
++
++typedef struct _gcsPOINT
++{
++ gctINT32 x;
++ gctINT32 y;
++}
++gcsPOINT;
++
++typedef struct _gcsSIZE
++{
++ gctINT32 width;
++ gctINT32 height;
++}
++gcsSIZE;
++
++typedef struct _gcsRECT
++{
++ gctINT32 left;
++ gctINT32 top;
++ gctINT32 right;
++ gctINT32 bottom;
++}
++gcsRECT;
++
++typedef union _gcsPIXEL
++{
++ struct
++ {
++ gctFLOAT r, g, b, a;
++ gctFLOAT d, s;
++ } pf;
++
++ struct
++ {
++ gctINT32 r, g, b, a;
++ gctINT32 d, s;
++ } pi;
++
++ struct
++ {
++ gctUINT32 r, g, b, a;
++ gctUINT32 d, s;
++ } pui;
++
++} gcsPIXEL;
++
++
++/******************************************************************************\
++********************************* gcoSURF Object ********************************
++\******************************************************************************/
++
++/*----------------------------------------------------------------------------*/
++/*------------------------------- gcoSURF Common ------------------------------*/
++
++/* Color format classes. */
++typedef enum _gceFORMAT_CLASS
++{
++ gcvFORMAT_CLASS_RGBA = 4500,
++ gcvFORMAT_CLASS_YUV,
++ gcvFORMAT_CLASS_INDEX,
++ gcvFORMAT_CLASS_LUMINANCE,
++ gcvFORMAT_CLASS_BUMP,
++ gcvFORMAT_CLASS_DEPTH,
++}
++gceFORMAT_CLASS;
++
++/* Special enums for width field in gcsFORMAT_COMPONENT. */
++typedef enum _gceCOMPONENT_CONTROL
++{
++ gcvCOMPONENT_NOTPRESENT = 0x00,
++ gcvCOMPONENT_DONTCARE = 0x80,
++ gcvCOMPONENT_WIDTHMASK = 0x7F,
++ gcvCOMPONENT_ODD = 0x80
++}
++gceCOMPONENT_CONTROL;
++
++/* Color format component parameters. */
++typedef struct _gcsFORMAT_COMPONENT
++{
++ gctUINT8 start;
++ gctUINT8 width;
++}
++gcsFORMAT_COMPONENT;
++
++/* RGBA color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_RGBA
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT red;
++ gcsFORMAT_COMPONENT green;
++ gcsFORMAT_COMPONENT blue;
++}
++gcsFORMAT_CLASS_TYPE_RGBA;
++
++/* YUV color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_YUV
++{
++ gcsFORMAT_COMPONENT y;
++ gcsFORMAT_COMPONENT u;
++ gcsFORMAT_COMPONENT v;
++}
++gcsFORMAT_CLASS_TYPE_YUV;
++
++/* Index color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_INDEX
++{
++ gcsFORMAT_COMPONENT value;
++}
++gcsFORMAT_CLASS_TYPE_INDEX;
++
++/* Luminance color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_LUMINANCE
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT value;
++}
++gcsFORMAT_CLASS_TYPE_LUMINANCE;
++
++/* Bump map color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_BUMP
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT l;
++ gcsFORMAT_COMPONENT v;
++ gcsFORMAT_COMPONENT u;
++ gcsFORMAT_COMPONENT q;
++ gcsFORMAT_COMPONENT w;
++}
++gcsFORMAT_CLASS_TYPE_BUMP;
++
++/* Depth and stencil format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_DEPTH
++{
++ gcsFORMAT_COMPONENT depth;
++ gcsFORMAT_COMPONENT stencil;
++}
++gcsFORMAT_CLASS_TYPE_DEPTH;
++
++/* Format parameters. */
++typedef struct _gcsSURF_FORMAT_INFO
++{
++ /* Format code and class. */
++ gceSURF_FORMAT format;
++ gceFORMAT_CLASS fmtClass;
++
++ /* The size of one pixel in bits. */
++ gctUINT8 bitsPerPixel;
++
++ /* Component swizzle. */
++ gceSURF_SWIZZLE swizzle;
++
++ /* Some formats have two neighbour pixels interleaved together. */
++ /* To describe such format, set the flag to 1 and add another */
++ /* like this one describing the odd pixel format. */
++ gctUINT8 interleaved;
++
++ /* Format components. */
++ union
++ {
++ gcsFORMAT_CLASS_TYPE_BUMP bump;
++ gcsFORMAT_CLASS_TYPE_RGBA rgba;
++ gcsFORMAT_CLASS_TYPE_YUV yuv;
++ gcsFORMAT_CLASS_TYPE_LUMINANCE lum;
++ gcsFORMAT_CLASS_TYPE_INDEX index;
++ gcsFORMAT_CLASS_TYPE_DEPTH depth;
++ } u;
++}
++gcsSURF_FORMAT_INFO;
++
++/* Frame buffer information. */
++typedef struct _gcsSURF_FRAMEBUFFER
++{
++ gctPOINTER logical;
++ gctUINT width, height;
++ gctINT stride;
++ gceSURF_FORMAT format;
++}
++gcsSURF_FRAMEBUFFER;
++
++typedef struct _gcsVIDMEM_NODE_SHARED_INFO
++{
++ gctBOOL tileStatusDisabled;
++ gcsPOINT SrcOrigin;
++ gcsPOINT DestOrigin;
++ gcsSIZE RectSize;
++ gctUINT32 clearValue;
++}
++gcsVIDMEM_NODE_SHARED_INFO;
++
++/* Generic pixel component descriptors. */
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XXX8;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XX8X;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_X8XX;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_8XXX;
++
++typedef enum _gceORIENTATION
++{
++ gcvORIENTATION_TOP_BOTTOM,
++ gcvORIENTATION_BOTTOM_TOP,
++}
++gceORIENTATION;
++
++
++/* Construct a new gcoSURF object. */
++gceSTATUS
++gcoSURF_Construct(
++ IN gcoHAL Hal,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gcePOOL Pool,
++ OUT gcoSURF * Surface
++ );
++
++/* Destroy an gcoSURF object. */
++gceSTATUS
++gcoSURF_Destroy(
++ IN gcoSURF Surface
++ );
++
++/* Map user-allocated surface. */
++gceSTATUS
++gcoSURF_MapUserSurface(
++ IN gcoSURF Surface,
++ IN gctUINT Alignment,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical
++ );
++
++/* Query vid mem node info. */
++gceSTATUS
++gcoSURF_QueryVidMemNode(
++ IN gcoSURF Surface,
++ OUT gctUINT64 * Node,
++ OUT gcePOOL * Pool,
++ OUT gctUINT_PTR Bytes
++ );
++
++/* Set the color type of the surface. */
++gceSTATUS
++gcoSURF_SetColorType(
++ IN gcoSURF Surface,
++ IN gceSURF_COLOR_TYPE ColorType
++ );
++
++/* Get the color type of the surface. */
++gceSTATUS
++gcoSURF_GetColorType(
++ IN gcoSURF Surface,
++ OUT gceSURF_COLOR_TYPE *ColorType
++ );
++
++/* Set the surface ration angle. */
++gceSTATUS
++gcoSURF_SetRotation(
++ IN gcoSURF Surface,
++ IN gceSURF_ROTATION Rotation
++ );
++
++gceSTATUS
++gcoSURF_SetPreRotation(
++ IN gcoSURF Surface,
++ IN gceSURF_ROTATION Rotation
++ );
++
++gceSTATUS
++gcoSURF_GetPreRotation(
++ IN gcoSURF Surface,
++ IN gceSURF_ROTATION *Rotation
++ );
++
++gceSTATUS
++gcoSURF_IsValid(
++ IN gcoSURF Surface
++ );
++
++#ifndef VIVANTE_NO_3D
++/* Verify and return the state of the tile status mechanism. */
++gceSTATUS
++gcoSURF_IsTileStatusSupported(
++ IN gcoSURF Surface
++ );
++
++/* Process tile status for the specified surface. */
++gceSTATUS
++gcoSURF_SetTileStatus(
++ IN gcoSURF Surface
++ );
++
++/* Enable tile status for the specified surface. */
++gceSTATUS
++gcoSURF_EnableTileStatus(
++ IN gcoSURF Surface
++ );
++
++/* Disable tile status for the specified surface. */
++gceSTATUS
++gcoSURF_DisableTileStatus(
++ IN gcoSURF Surface,
++ IN gctBOOL Decompress
++ );
++
++gceSTATUS
++gcoSURF_AlignResolveRect(
++ IN gcoSURF Surf,
++ IN gcsPOINT_PTR RectOrigin,
++ IN gcsPOINT_PTR RectSize,
++ OUT gcsPOINT_PTR AlignedOrigin,
++ OUT gcsPOINT_PTR AlignedSize
++ );
++#endif /* VIVANTE_NO_3D */
++
++/* Get surface size. */
++gceSTATUS
++gcoSURF_GetSize(
++ IN gcoSURF Surface,
++ OUT gctUINT * Width,
++ OUT gctUINT * Height,
++ OUT gctUINT * Depth
++ );
++
++/* Get surface aligned sizes. */
++gceSTATUS
++gcoSURF_GetAlignedSize(
++ IN gcoSURF Surface,
++ OUT gctUINT * Width,
++ OUT gctUINT * Height,
++ OUT gctINT * Stride
++ );
++
++/* Get alignments. */
++gceSTATUS
++gcoSURF_GetAlignment(
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT * AddressAlignment,
++ OUT gctUINT * XAlignment,
++ OUT gctUINT * YAlignment
++ );
++
++/* Get surface type and format. */
++gceSTATUS
++gcoSURF_GetFormat(
++ IN gcoSURF Surface,
++ OUT gceSURF_TYPE * Type,
++ OUT gceSURF_FORMAT * Format
++ );
++
++/* Get surface tiling. */
++gceSTATUS
++gcoSURF_GetTiling(
++ IN gcoSURF Surface,
++ OUT gceTILING * Tiling
++ );
++
++/* Lock the surface. */
++gceSTATUS
++gcoSURF_Lock(
++ IN gcoSURF Surface,
++ IN OUT gctUINT32 * Address,
++ IN OUT gctPOINTER * Memory
++ );
++
++/* Unlock the surface. */
++gceSTATUS
++gcoSURF_Unlock(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory
++ );
++
++/* Return pixel format parameters. */
++gceSTATUS
++gcoSURF_QueryFormat(
++ IN gceSURF_FORMAT Format,
++ OUT gcsSURF_FORMAT_INFO_PTR * Info
++ );
++
++/* Compute the color pixel mask. */
++gceSTATUS
++gcoSURF_ComputeColorMask(
++ IN gcsSURF_FORMAT_INFO_PTR Format,
++ OUT gctUINT32_PTR ColorMask
++ );
++
++/* Flush the surface. */
++gceSTATUS
++gcoSURF_Flush(
++ IN gcoSURF Surface
++ );
++
++/* Fill surface from it's tile status buffer. */
++gceSTATUS
++gcoSURF_FillFromTile(
++ IN gcoSURF Surface
++ );
++
++/* Check if surface needs a filler. */
++gceSTATUS gcoSURF_NeedFiller(IN gcoSURF Surface);
++
++/* Fill surface with a value. */
++gceSTATUS
++gcoSURF_Fill(
++ IN gcoSURF Surface,
++ IN gcsPOINT_PTR Origin,
++ IN gcsSIZE_PTR Size,
++ IN gctUINT32 Value,
++ IN gctUINT32 Mask
++ );
++
++/* Alpha blend two surfaces together. */
++gceSTATUS
++gcoSURF_Blend(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrig,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsSIZE_PTR Size,
++ IN gceSURF_BLEND_MODE Mode
++ );
++
++/* Create a new gcoSURF wrapper object. */
++gceSTATUS
++gcoSURF_ConstructWrapper(
++ IN gcoHAL Hal,
++ OUT gcoSURF * Surface
++ );
++
++/* Set the underlying buffer for the surface wrapper. */
++gceSTATUS
++gcoSURF_SetBuffer(
++ IN gcoSURF Surface,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Stride,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical
++ );
++
++/* Set the underlying video buffer for the surface wrapper. */
++gceSTATUS
++gcoSURF_SetVideoBuffer(
++ IN gcoSURF Surface,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Stride,
++ IN gctPOINTER *LogicalPlane1,
++ IN gctUINT32 *PhysicalPlane1
++ );
++
++/* Set the size of the surface in pixels and map the underlying buffer. */
++gceSTATUS
++gcoSURF_SetWindow(
++ IN gcoSURF Surface,
++ IN gctUINT X,
++ IN gctUINT Y,
++ IN gctUINT Width,
++ IN gctUINT Height
++ );
++
++/* Set width/height alignment of the surface directly and calculate stride/size. This is only for dri backend now. Please be careful before use. */
++gceSTATUS
++gcoSURF_SetAlignment(
++ IN gcoSURF Surface,
++ IN gctUINT Width,
++ IN gctUINT Height
++ );
++
++/* Increase reference count of the surface. */
++gceSTATUS
++gcoSURF_ReferenceSurface(
++ IN gcoSURF Surface
++ );
++
++/* Get surface reference count. */
++gceSTATUS
++gcoSURF_QueryReferenceCount(
++ IN gcoSURF Surface,
++ OUT gctINT32 * ReferenceCount
++ );
++
++/* Set surface orientation. */
++gceSTATUS
++gcoSURF_SetOrientation(
++ IN gcoSURF Surface,
++ IN gceORIENTATION Orientation
++ );
++
++/* Query surface orientation. */
++gceSTATUS
++gcoSURF_QueryOrientation(
++ IN gcoSURF Surface,
++ OUT gceORIENTATION * Orientation
++ );
++
++gceSTATUS
++gcoSURF_SetOffset(
++ IN gcoSURF Surface,
++ IN gctUINT Offset
++ );
++
++gceSTATUS
++gcoSURF_GetOffset(
++ IN gcoSURF Surface,
++ OUT gctUINT *Offset
++ );
++
++gceSTATUS
++gcoSURF_NODE_Cache(
++ IN gcsSURF_NODE_PTR Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++
++/* Perform CPU cache operation on surface */
++gceSTATUS
++gcoSURF_CPUCacheOperation(
++ IN gcoSURF Surface,
++ IN gceCACHEOPERATION Operation
++ );
++
++
++gceSTATUS
++gcoSURF_SetLinearResolveAddress(
++ IN gcoSURF Surface,
++ IN gctUINT32 Address,
++ IN gctPOINTER Memory
++ );
++
++ gceSTATUS
++ gcoSURF_Swap(IN gcoSURF Surface1, IN gcoSURF Surface2);
++
++/******************************************************************************\
++********************************* gcoDUMP Object ********************************
++\******************************************************************************/
++
++/* Construct a new gcoDUMP object. */
++gceSTATUS
++gcoDUMP_Construct(
++ IN gcoOS Os,
++ IN gcoHAL Hal,
++ OUT gcoDUMP * Dump
++ );
++
++/* Destroy a gcoDUMP object. */
++gceSTATUS
++gcoDUMP_Destroy(
++ IN gcoDUMP Dump
++ );
++
++/* Enable/disable dumping. */
++gceSTATUS
++gcoDUMP_Control(
++ IN gcoDUMP Dump,
++ IN gctSTRING FileName
++ );
++
++gceSTATUS
++gcoDUMP_IsEnabled(
++ IN gcoDUMP Dump,
++ OUT gctBOOL * Enabled
++ );
++
++/* Add surface. */
++gceSTATUS
++gcoDUMP_AddSurface(
++ IN gcoDUMP Dump,
++ IN gctINT32 Width,
++ IN gctINT32 Height,
++ IN gceSURF_FORMAT PixelFormat,
++ IN gctUINT32 Address,
++ IN gctSIZE_T ByteCount
++ );
++
++/* Mark the beginning of a frame. */
++gceSTATUS
++gcoDUMP_FrameBegin(
++ IN gcoDUMP Dump
++ );
++
++/* Mark the end of a frame. */
++gceSTATUS
++gcoDUMP_FrameEnd(
++ IN gcoDUMP Dump
++ );
++
++/* Dump data. */
++gceSTATUS
++gcoDUMP_DumpData(
++ IN gcoDUMP Dump,
++ IN gceDUMP_TAG Type,
++ IN gctUINT32 Address,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Delete an address. */
++gceSTATUS
++gcoDUMP_Delete(
++ IN gcoDUMP Dump,
++ IN gctUINT32 Address
++ );
++
++/* Enable dump or not. */
++gceSTATUS
++gcoDUMP_SetDumpFlag(
++ IN gctBOOL DumpState
++ );
++
++/******************************************************************************\
++******************************* gcsRECT Structure ******************************
++\******************************************************************************/
++
++/* Initialize rectangle structure. */
++gceSTATUS
++gcsRECT_Set(
++ OUT gcsRECT_PTR Rect,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Return the width of the rectangle. */
++gceSTATUS
++gcsRECT_Width(
++ IN gcsRECT_PTR Rect,
++ OUT gctINT32 * Width
++ );
++
++/* Return the height of the rectangle. */
++gceSTATUS
++gcsRECT_Height(
++ IN gcsRECT_PTR Rect,
++ OUT gctINT32 * Height
++ );
++
++/* Ensure that top left corner is to the left and above the right bottom. */
++gceSTATUS
++gcsRECT_Normalize(
++ IN OUT gcsRECT_PTR Rect
++ );
++
++/* Compare two rectangles. */
++gceSTATUS
++gcsRECT_IsEqual(
++ IN gcsRECT_PTR Rect1,
++ IN gcsRECT_PTR Rect2,
++ OUT gctBOOL * Equal
++ );
++
++/* Compare the sizes of two rectangles. */
++gceSTATUS
++gcsRECT_IsOfEqualSize(
++ IN gcsRECT_PTR Rect1,
++ IN gcsRECT_PTR Rect2,
++ OUT gctBOOL * EqualSize
++ );
++
++gceSTATUS
++gcsRECT_RelativeRotation(
++ IN gceSURF_ROTATION Orientation,
++ IN OUT gceSURF_ROTATION *Relation);
++
++gceSTATUS
++
++gcsRECT_Rotate(
++
++ IN OUT gcsRECT_PTR Rect,
++
++ IN gceSURF_ROTATION Rotation,
++
++ IN gceSURF_ROTATION toRotation,
++
++ IN gctINT32 SurfaceWidth,
++
++ IN gctINT32 SurfaceHeight
++
++ );
++
++/******************************************************************************\
++**************************** gcsBOUNDARY Structure *****************************
++\******************************************************************************/
++
++typedef struct _gcsBOUNDARY
++{
++ gctINT x;
++ gctINT y;
++ gctINT width;
++ gctINT height;
++}
++gcsBOUNDARY;
++
++/******************************************************************************\
++********************************* gcoHEAP Object ********************************
++\******************************************************************************/
++
++typedef struct _gcoHEAP * gcoHEAP;
++
++/* Construct a new gcoHEAP object. */
++gceSTATUS
++gcoHEAP_Construct(
++ IN gcoOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gcoHEAP * Heap
++ );
++
++/* Destroy an gcoHEAP object. */
++gceSTATUS
++gcoHEAP_Destroy(
++ IN gcoHEAP Heap
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gcoHEAP_Allocate(
++ IN gcoHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcoHEAP_GetMemorySize(
++ IN gcoHEAP Heap,
++ IN gctPOINTER Memory,
++ OUT gctSIZE_T_PTR MemorySize
++ );
++
++/* Free memory. */
++gceSTATUS
++gcoHEAP_Free(
++ IN gcoHEAP Heap,
++ IN gctPOINTER Node
++ );
++
++#if (VIVANTE_PROFILER || gcdDEBUG)
++/* Profile the heap. */
++gceSTATUS
++gcoHEAP_ProfileStart(
++ IN gcoHEAP Heap
++ );
++
++gceSTATUS
++gcoHEAP_ProfileEnd(
++ IN gcoHEAP Heap,
++ IN gctCONST_STRING Title
++ );
++#endif
++
++
++/******************************************************************************\
++******************************* Debugging Macros *******************************
++\******************************************************************************/
++
++void
++gcoOS_SetDebugLevel(
++ IN gctUINT32 Level
++ );
++
++void
++gcoOS_GetDebugLevel(
++ OUT gctUINT32_PTR DebugLevel
++ );
++
++void
++gcoOS_SetDebugZone(
++ IN gctUINT32 Zone
++ );
++
++void
++gcoOS_GetDebugZone(
++ IN gctUINT32 Zone,
++ OUT gctUINT32_PTR DebugZone
++ );
++
++void
++gcoOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ );
++
++void
++gcoOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ );
++
++void
++gcoOS_SetDebugFile(
++ IN gctCONST_STRING FileName
++ );
++
++gctFILE
++gcoOS_ReplaceDebugFile(
++ IN gctFILE fp
++ );
++
++/*******************************************************************************
++**
++** gcmFATAL
++**
++** Print a message to the debugger and execute a break point.
++**
++** ARGUMENTS:
++**
++** message Message.
++** ... Optional arguments.
++*/
++
++void
++gckOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_FATAL)
++# define gcmFATAL gcoOS_DebugFatal
++# define gcmkFATAL gckOS_DebugFatal
++#elif gcdHAS_ELLIPSES
++# define gcmFATAL(...)
++# define gcmkFATAL(...)
++#else
++ gcmINLINE static void
++ __dummy_fatal(
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmFATAL __dummy_fatal
++# define gcmkFATAL __dummy_fatal
++#endif
++
++#define gcmENUM2TEXT(e) case e: return #e
++
++/*******************************************************************************
++**
++** gcmTRACE
++**
++** Print a message to the debugfer if the correct level has been set. In
++** retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** level Level of message.
++** message Message.
++** ... Optional arguments.
++*/
++#define gcvLEVEL_NONE -1
++#define gcvLEVEL_ERROR 0
++#define gcvLEVEL_WARNING 1
++#define gcvLEVEL_INFO 2
++#define gcvLEVEL_VERBOSE 3
++
++void
++gckOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_DebugTraceN(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmTRACE gcoOS_DebugTrace
++# define gcmkTRACE gckOS_DebugTrace
++# define gcmkTRACE_N gckOS_DebugTraceN
++#elif gcdHAS_ELLIPSES
++# define gcmTRACE(...)
++# define gcmkTRACE(...)
++# define gcmkTRACE_N(...)
++#else
++ gcmINLINE static void
++ __dummy_trace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++ gcmINLINE static void
++ __dummy_trace_n(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++# define gcmTRACE __dummy_trace
++# define gcmkTRACE __dummy_trace
++# define gcmkTRACE_N __dummy_trace_n
++#endif
++
++/* Zones common for kernel and user. */
++#define gcvZONE_OS (1 << 0)
++#define gcvZONE_HARDWARE (1 << 1)
++#define gcvZONE_HEAP (1 << 2)
++#define gcvZONE_SIGNAL (1 << 27)
++
++/* Kernel zones. */
++#define gcvZONE_KERNEL (1 << 3)
++#define gcvZONE_VIDMEM (1 << 4)
++#define gcvZONE_COMMAND (1 << 5)
++#define gcvZONE_DRIVER (1 << 6)
++#define gcvZONE_CMODEL (1 << 7)
++#define gcvZONE_MMU (1 << 8)
++#define gcvZONE_EVENT (1 << 9)
++#define gcvZONE_DEVICE (1 << 10)
++#define gcvZONE_DATABASE (1 << 11)
++#define gcvZONE_INTERRUPT (1 << 12)
++#define gcvZONE_POWER (1 << 13)
++
++/* User zones. */
++#define gcvZONE_HAL (1 << 3)
++#define gcvZONE_BUFFER (1 << 4)
++#define gcvZONE_CONTEXT (1 << 5)
++#define gcvZONE_SURFACE (1 << 6)
++#define gcvZONE_INDEX (1 << 7)
++#define gcvZONE_STREAM (1 << 8)
++#define gcvZONE_TEXTURE (1 << 9)
++#define gcvZONE_2D (1 << 10)
++#define gcvZONE_3D (1 << 11)
++#define gcvZONE_COMPILER (1 << 12)
++#define gcvZONE_MEMORY (1 << 13)
++#define gcvZONE_STATE (1 << 14)
++#define gcvZONE_AUX (1 << 15)
++#define gcvZONE_VERTEX (1 << 16)
++#define gcvZONE_CL (1 << 17)
++#define gcvZONE_COMPOSITION (1 << 17)
++#define gcvZONE_VG (1 << 18)
++#define gcvZONE_IMAGE (1 << 19)
++#define gcvZONE_UTILITY (1 << 20)
++#define gcvZONE_PARAMETERS (1 << 21)
++
++/* API definitions. */
++#define gcvZONE_API_HAL (1 << 28)
++#define gcvZONE_API_EGL (2 << 28)
++#define gcvZONE_API_ES11 (3 << 28)
++#define gcvZONE_API_ES20 (4 << 28)
++#define gcvZONE_API_VG11 (5 << 28)
++#define gcvZONE_API_GL (6 << 28)
++#define gcvZONE_API_DFB (7 << 28)
++#define gcvZONE_API_GDI (8 << 28)
++#define gcvZONE_API_D3D (9 << 28)
++#define gcvZONE_API_ES30 (10 << 28)
++
++
++#define gcmZONE_GET_API(zone) ((zone) >> 28)
++/*Set gcdZONE_MASE like 0x0 | gcvZONE_API_EGL
++will enable print EGL module debug info*/
++#define gcdZONE_MASK 0x0FFFFFFF
++
++/* Handy zones. */
++#define gcvZONE_NONE 0
++#define gcvZONE_ALL 0x0FFFFFFF
++
++/*Dump API depth set 1 for API, 2 for API and API behavior*/
++#define gcvDUMP_API_DEPTH 1
++
++/*******************************************************************************
++**
++** gcmTRACE_ZONE
++**
++** Print a message to the debugger if the correct level and zone has been
++** set. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** Level Level of message.
++** Zone Zone of message.
++** Message Message.
++** ... Optional arguments.
++*/
++
++void
++gckOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_DebugTraceZoneN(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmTRACE_ZONE gcoOS_DebugTraceZone
++# define gcmkTRACE_ZONE gckOS_DebugTraceZone
++# define gcmkTRACE_ZONE_N gckOS_DebugTraceZoneN
++#elif gcdHAS_ELLIPSES
++# define gcmTRACE_ZONE(...)
++# define gcmkTRACE_ZONE(...)
++# define gcmkTRACE_ZONE_N(...)
++#else
++ gcmINLINE static void
++ __dummy_trace_zone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++ gcmINLINE static void
++ __dummy_trace_zone_n(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++# define gcmTRACE_ZONE __dummy_trace_zone
++# define gcmkTRACE_ZONE __dummy_trace_zone
++# define gcmkTRACE_ZONE_N __dummy_trace_zone_n
++#endif
++
++/*******************************************************************************
++**
++** gcmDEBUG_ONLY
++**
++** Execute a statement or function only in DEBUG mode.
++**
++** ARGUMENTS:
++**
++** f Statement or function to execute.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++# define gcmDEBUG_ONLY(f) f
++#else
++# define gcmDEBUG_ONLY(f)
++#endif
++
++/*******************************************************************************
++**
++** gcmSTACK_PUSH
++** gcmSTACK_POP
++** gcmSTACK_DUMP
++**
++** Push or pop a function with entry arguments on the trace stack.
++**
++** ARGUMENTS:
++**
++** Function Name of function.
++** Line Line number.
++** Text Optional text.
++** ... Optional arguments for text.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_STACK)
++ void
++ gcoOS_StackPush(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text,
++ ...
++ );
++ void
++ gcoOS_StackPop(
++ IN gctCONST_STRING Function
++ );
++ void
++ gcoOS_StackDump(
++ void
++ );
++# define gcmSTACK_PUSH gcoOS_StackPush
++# define gcmSTACK_POP gcoOS_StackPop
++# define gcmSTACK_DUMP gcoOS_StackDump
++#elif gcdHAS_ELLIPSES
++# define gcmSTACK_PUSH(...) do { } while (0)
++# define gcmSTACK_POP(Function) do { } while (0)
++# define gcmSTACK_DUMP() do { } while (0)
++#else
++ gcmINLINE static void
++ __dummy_stack_push(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text, ...
++ )
++ {
++ }
++# define gcmSTACK_PUSH __dummy_stack_push
++# define gcmSTACK_POP(Function) do { } while (0)
++# define gcmSTACK_DUMP() do { } while (0)
++#endif
++
++/******************************************************************************\
++******************************** Logging Macros ********************************
++\******************************************************************************/
++
++#define gcdHEADER_LEVEL gcvLEVEL_VERBOSE
++
++
++#if gcdENABLE_PROFILING
++void
++gcoOS_ProfileDB(
++ IN gctCONST_STRING Function,
++ IN OUT gctBOOL_PTR Initialized
++ );
++
++#define gcmHEADER() \
++ static gctBOOL __profile__initialized__ = gcvFALSE; \
++ gcmSTACK_PUSH(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__)
++#define gcmHEADER_ARG(...) \
++ static gctBOOL __profile__initialized__ = gcvFALSE; \
++ gcmSTACK_PUSH(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__)
++#define gcmFOOTER() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_NO() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_ARG(...) \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_KILL() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcoOS_ProfileDB(gcvNULL, gcvNULL)
++
++#else /* gcdENABLE_PROFILING */
++
++#if gcdHAS_ELLIPSES
++#define gcmHEADER() \
++ gctINT8 __user__ = 1; \
++ gctINT8_PTR __user_ptr__ = &__user__; \
++ gcmSTACK_PUSH(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d)", __FUNCTION__, __LINE__)
++#else
++ gcmINLINE static void
++ __dummy_header(void)
++ {
++ }
++# define gcmHEADER __dummy_header
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmHEADER_ARG(Text, ...) \
++ gctINT8 __user__ = 1; \
++ gctINT8_PTR __user_ptr__ = &__user__; \
++ gcmSTACK_PUSH(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__)
++#else
++ gcmINLINE static void
++ __dummy_header_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmHEADER_ARG __dummy_header_arg
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmFOOTER() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcmPROFILE_ONLY(gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d) [%llu,%llu]: status=%d(%s)", \
++ __FUNCTION__, __LINE__, \
++ __ticks__, __total__, \
++ status, gcoOS_DebugStatus2Name(status))); \
++ gcmPROFILE_ELSE(gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): status=%d(%s)", \
++ __FUNCTION__, __LINE__, \
++ status, gcoOS_DebugStatus2Name(status))); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer(void)
++ {
++ }
++# define gcmFOOTER __dummy_footer
++#endif
++
++#if gcdHAS_ELLIPSES
++#define gcmFOOTER_NO() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_no(void)
++ {
++ }
++# define gcmFOOTER_NO __dummy_footer_no
++#endif
++
++#if gcdHAS_ELLIPSES
++#define gcmFOOTER_KILL() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_kill(void)
++ {
++ }
++# define gcmFOOTER_KILL __dummy_footer_kill
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmFOOTER_ARG(Text, ...) \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmFOOTER_ARG __dummy_footer_arg
++#endif
++
++#endif /* gcdENABLE_PROFILING */
++
++#if gcdHAS_ELLIPSES
++#define gcmkHEADER() \
++ gctINT8 __kernel__ = 1; \
++ gctINT8_PTR __kernel_ptr__ = &__kernel__; \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d)", __FUNCTION__, __LINE__)
++#else
++ gcmINLINE static void
++ __dummy_kheader(void)
++ {
++ }
++# define gcmkHEADER __dummy_kheader
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmkHEADER_ARG(Text, ...) \
++ gctINT8 __kernel__ = 1; \
++ gctINT8_PTR __kernel_ptr__ = &__kernel__; \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__)
++#else
++ gcmINLINE static void
++ __dummy_kheader_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmkHEADER_ARG __dummy_kheader_arg
++#endif
++
++#if gcdHAS_ELLIPSES
++#define gcmkFOOTER() \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): status=%d(%s)", \
++ __FUNCTION__, __LINE__, status, gckOS_DebugStatus2Name(status)); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter(void)
++ {
++ }
++# define gcmkFOOTER __dummy_kfooter
++#endif
++
++#if gcdHAS_ELLIPSES
++#define gcmkFOOTER_NO() \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter_no(void)
++ {
++ }
++# define gcmkFOOTER_NO __dummy_kfooter_no
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmkFOOTER_ARG(Text, ...) \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): " Text, \
++ __FUNCTION__, __LINE__, __VA_ARGS__); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmkFOOTER_ARG __dummy_kfooter_arg
++#endif
++
++#define gcmOPT_VALUE(ptr) (((ptr) == gcvNULL) ? 0 : *(ptr))
++#define gcmOPT_VALUE_INDEX(ptr, index) (((ptr) == gcvNULL) ? 0 : ptr[index])
++#define gcmOPT_POINTER(ptr) (((ptr) == gcvNULL) ? gcvNULL : *(ptr))
++#define gcmOPT_STRING(ptr) (((ptr) == gcvNULL) ? "(nil)" : (ptr))
++
++void
++gckOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_PrintN(
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_CopyPrint(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#define gcmPRINT gcoOS_Print
++#define gcmkPRINT gckOS_Print
++#define gcmkPRINT_N gckOS_PrintN
++
++#if gcdPRINT_VERSION
++# define gcmPRINT_VERSION() do { \
++ _gcmPRINT_VERSION(gcm); \
++ gcmSTACK_DUMP(); \
++ } while (0)
++# define gcmkPRINT_VERSION() _gcmPRINT_VERSION(gcmk)
++# define _gcmPRINT_VERSION(prefix) \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ "Vivante HAL version %d.%d.%d build %d %s %s", \
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, \
++ gcvVERSION_BUILD, gcvVERSION_DATE, gcvVERSION_TIME )
++#else
++# define gcmPRINT_VERSION() do { gcmSTACK_DUMP(); } while (gcvFALSE)
++# define gcmkPRINT_VERSION() do { } while (gcvFALSE)
++#endif
++
++typedef enum _gceDUMP_BUFFER
++{
++ gceDUMP_BUFFER_CONTEXT,
++ gceDUMP_BUFFER_USER,
++ gceDUMP_BUFFER_KERNEL,
++ gceDUMP_BUFFER_LINK,
++ gceDUMP_BUFFER_WAITLINK,
++ gceDUMP_BUFFER_FROM_USER,
++}
++gceDUMP_BUFFER;
++
++void
++gckOS_DumpBuffer(
++ IN gckOS Os,
++ IN gctPOINTER Buffer,
++ IN gctUINT Size,
++ IN gceDUMP_BUFFER Type,
++ IN gctBOOL CopyMessage
++ );
++
++#define gcmkDUMPBUFFER gckOS_DumpBuffer
++
++#if gcdDUMP_COMMAND
++# define gcmkDUMPCOMMAND(Os, Buffer, Size, Type, CopyMessage) \
++ gcmkDUMPBUFFER(Os, Buffer, Size, Type, CopyMessage)
++#else
++# define gcmkDUMPCOMMAND(Os, Buffer, Size, Type, CopyMessage)
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++
++void
++gckOS_DebugFlush(
++ gctCONST_STRING CallerName,
++ gctUINT LineNumber,
++ gctUINT32 DmaAddress
++ );
++
++# define gcmkDEBUGFLUSH(DmaAddress) \
++ gckOS_DebugFlush(__FUNCTION__, __LINE__, DmaAddress)
++#else
++# define gcmkDEBUGFLUSH(DmaAddress)
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_FRAMERATE
++**
++** Print average frame rate
++**
++*/
++#if gcdDUMP_FRAMERATE
++ gceSTATUS
++ gcfDumpFrameRate(
++ void
++ );
++# define gcmDUMP_FRAMERATE gcfDumpFrameRate
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_FRAMERATE(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_frame_rate(
++ void
++ )
++ {
++ }
++# define gcmDUMP_FRAMERATE __dummy_dump_frame_rate
++#endif
++
++
++/*******************************************************************************
++**
++** gcmDUMP
++**
++** Print a dump message.
++**
++** ARGUMENTS:
++**
++** gctSTRING Message.
++**
++** ... Optional arguments.
++*/
++#if gcdDUMP
++ gceSTATUS
++ gcfDump(
++ IN gcoOS Os,
++ IN gctCONST_STRING String,
++ ...
++ );
++# define gcmDUMP gcfDump
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP(...)
++#else
++ gcmINLINE static void
++ __dummy_dump(
++ IN gcoOS Os,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmDUMP __dummy_dump
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_DATA
++**
++** Add data to the dump.
++**
++** ARGUMENTS:
++**
++** gctSTRING Tag
++** Tag for dump.
++**
++** gctPOINTER Logical
++** Logical address of buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes.
++*/
++
++#if gcdDUMP || gcdDUMP_COMMAND
++ gceSTATUS
++ gcfDumpData(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++# define gcmDUMP_DATA gcfDumpData
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_DATA(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_data(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++ {
++ }
++# define gcmDUMP_DATA __dummy_dump_data
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_BUFFER
++**
++** Print a buffer to the dump.
++**
++** ARGUMENTS:
++**
++** gctSTRING Tag
++** Tag for dump.
++**
++** gctUINT32 Physical
++** Physical address of buffer.
++**
++** gctPOINTER Logical
++** Logical address of buffer.
++**
++** gctUINT32 Offset
++** Offset into buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes.
++*/
++
++#if gcdDUMP || gcdDUMP_COMMAND
++gceSTATUS
++gcfDumpBuffer(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes
++ );
++# define gcmDUMP_BUFFER gcfDumpBuffer
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_BUFFER(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_buffer(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes
++ )
++ {
++ }
++# define gcmDUMP_BUFFER __dummy_dump_buffer
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API
++**
++** Print a dump message for a high level API prefixed by the function name.
++**
++** ARGUMENTS:
++**
++** gctSTRING Message.
++**
++** ... Optional arguments.
++*/
++gceSTATUS gcfDumpApi(IN gctCONST_STRING String, ...);
++#if gcdDUMP_API
++# define gcmDUMP_API gcfDumpApi
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_API(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api(
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmDUMP_API __dummy_dump_api
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_ARRAY
++**
++** Print an array of data.
++**
++** ARGUMENTS:
++**
++** gctUINT32_PTR Pointer to array.
++** gctUINT32 Size.
++*/
++gceSTATUS gcfDumpArray(IN gctCONST_POINTER Data, IN gctUINT32 Size);
++#if gcdDUMP_API
++# define gcmDUMP_API_ARRAY gcfDumpArray
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_API_ARRAY(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_array(
++ IN gctCONST_POINTER Data,
++ IN gctUINT32 Size
++ )
++ {
++ }
++# define gcmDUMP_API_ARRAY __dummy_dump_api_array
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_ARRAY_TOKEN
++**
++** Print an array of data terminated by a token.
++**
++** ARGUMENTS:
++**
++** gctUINT32_PTR Pointer to array.
++** gctUINT32 Termination.
++*/
++gceSTATUS gcfDumpArrayToken(IN gctCONST_POINTER Data, IN gctUINT32 Termination);
++#if gcdDUMP_API
++# define gcmDUMP_API_ARRAY_TOKEN gcfDumpArrayToken
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_API_ARRAY_TOKEN(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_array_token(
++ IN gctCONST_POINTER Data,
++ IN gctUINT32 Termination
++ )
++ {
++ }
++# define gcmDUMP_API_ARRAY_TOKEN __dummy_dump_api_array_token
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_DATA
++**
++** Print an array of bytes.
++**
++** ARGUMENTS:
++**
++** gctCONST_POINTER Pointer to array.
++** gctSIZE_T Size.
++*/
++gceSTATUS gcfDumpApiData(IN gctCONST_POINTER Data, IN gctSIZE_T Size);
++#if gcdDUMP_API
++# define gcmDUMP_API_DATA gcfDumpApiData
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_API_DATA(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_data(
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Size
++ )
++ {
++ }
++# define gcmDUMP_API_DATA __dummy_dump_api_data
++#endif
++
++/*******************************************************************************
++**
++** gcmTRACE_RELEASE
++**
++** Print a message to the shader debugger.
++**
++** ARGUMENTS:
++**
++** message Message.
++** ... Optional arguments.
++*/
++
++#define gcmTRACE_RELEASE gcoOS_DebugShaderTrace
++
++void
++gcoOS_DebugShaderTrace(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_SetDebugShaderFiles(
++ IN gctCONST_STRING VSFileName,
++ IN gctCONST_STRING FSFileName
++ );
++
++void
++gcoOS_SetDebugShaderFileType(
++ IN gctUINT32 ShaderType
++ );
++
++void
++gcoOS_EnableDebugBuffer(
++ IN gctBOOL Enable
++ );
++
++/*******************************************************************************
++**
++** gcmBREAK
++**
++** Break into the debugger. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** None.
++*/
++
++void
++gcoOS_DebugBreak(
++ void
++ );
++
++void
++gckOS_DebugBreak(
++ void
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_BREAK)
++# define gcmBREAK gcoOS_DebugBreak
++# define gcmkBREAK gckOS_DebugBreak
++#else
++# define gcmBREAK()
++# define gcmkBREAK()
++#endif
++
++/*******************************************************************************
++**
++** gcmASSERT
++**
++** Evaluate an expression and break into the debugger if the expression
++** evaluates to false. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** exp Expression to evaluate.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define _gcmASSERT(prefix, exp) \
++ do \
++ { \
++ if (!(exp)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ASSERT at %s(%d)", \
++ __FUNCTION__, __LINE__); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ "(%s)", #exp); \
++ prefix##BREAK(); \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmASSERT(exp) _gcmASSERT(gcm, exp)
++# define gcmkASSERT(exp) _gcmASSERT(gcmk, exp)
++#else
++# define gcmASSERT(exp)
++# define gcmkASSERT(exp)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY
++**
++** Verify if an expression returns true. If the expression does not
++** evaluates to true, an assertion will happen in debug mode.
++**
++** ARGUMENTS:
++**
++** exp Expression to evaluate.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define gcmVERIFY(exp) gcmASSERT(exp)
++# define gcmkVERIFY(exp) gcmkASSERT(exp)
++#else
++# define gcmVERIFY(exp) exp
++# define gcmkVERIFY(exp) exp
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY_OK
++**
++** Verify a fucntion returns gcvSTATUS_OK. If the function does not return
++** gcvSTATUS_OK, an assertion will happen in debug mode.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++
++void
++gcoOS_Verify(
++ IN gceSTATUS status
++ );
++
++void
++gckOS_Verify(
++ IN gceSTATUS status
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define gcmVERIFY_OK(func) \
++ do \
++ { \
++ gceSTATUS verifyStatus = func; \
++ gcoOS_Verify(verifyStatus); \
++ if (verifyStatus != gcvSTATUS_OK) \
++ { \
++ gcmTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmVERIFY_OK(%d): function returned %d", \
++ __LINE__, verifyStatus \
++ ); \
++ } \
++ gcmASSERT(verifyStatus == gcvSTATUS_OK); \
++ } \
++ while (gcvFALSE)
++# define gcmkVERIFY_OK(func) \
++ do \
++ { \
++ gceSTATUS verifyStatus = func; \
++ if (verifyStatus != gcvSTATUS_OK) \
++ { \
++ gcmkTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmkVERIFY_OK(%d): function returned %d", \
++ __LINE__, verifyStatus \
++ ); \
++ } \
++ gckOS_Verify(verifyStatus); \
++ gcmkASSERT(verifyStatus == gcvSTATUS_OK); \
++ } \
++ while (gcvFALSE)
++#else
++# define gcmVERIFY_OK(func) func
++# define gcmkVERIFY_OK(func) func
++#endif
++
++gctCONST_STRING
++gcoOS_DebugStatus2Name(
++ gceSTATUS status
++ );
++
++gctCONST_STRING
++gckOS_DebugStatus2Name(
++ gceSTATUS status
++ );
++
++/*******************************************************************************
++**
++** gcmERR_BREAK
++**
++** Executes a break statement on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmERR_BREAK(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++#define _gcmkERR_BREAK(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++#define gcmERR_BREAK(func) _gcmERR_BREAK(gcm, func)
++#define gcmkERR_BREAK(func) _gcmkERR_BREAK(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmERR_RETURN
++**
++** Executes a return on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmERR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ prefix##FOOTER(); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++#define _gcmkERR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ prefix##FOOTER(); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++#define gcmERR_RETURN(func) _gcmERR_RETURN(gcm, func)
++#define gcmkERR_RETURN(func) _gcmkERR_RETURN(gcmk, func)
++
++
++/*******************************************************************************
++**
++** gcmONERROR
++**
++** Jump to the error handler in case there is an error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmONERROR(prefix, func) \
++ do \
++ { \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ONERROR: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ goto OnError; \
++ } \
++ } \
++ while (gcvFALSE)
++#define _gcmkONERROR(prefix, func) \
++ do \
++ { \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ONERROR: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ goto OnError; \
++ } \
++ } \
++ while (gcvFALSE)
++#define gcmONERROR(func) _gcmONERROR(gcm, func)
++#define gcmkONERROR(func) _gcmkONERROR(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmVERIFY_LOCK
++**
++** Verifies whether the surface is locked.
++**
++** ARGUMENTS:
++**
++** surfaceInfo Pointer to the surface iniformational structure.
++*/
++#define gcmVERIFY_LOCK(surfaceInfo) \
++ if (!surfaceInfo->node.valid) \
++ { \
++ gcmONERROR(gcvSTATUS_MEMORY_UNLOCKED); \
++ } \
++
++/*******************************************************************************
++**
++** gcmVERIFY_NODE_LOCK
++**
++** Verifies whether the surface node is locked.
++**
++** ARGUMENTS:
++**
++** surfaceInfo Pointer to the surface iniformational structure.
++*/
++#define gcmVERIFY_NODE_LOCK(surfaceNode) \
++ if (!(surfaceNode)->valid) \
++ { \
++ status = gcvSTATUS_MEMORY_UNLOCKED; \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++
++/*******************************************************************************
++**
++** gcmBADOBJECT_BREAK
++**
++** Executes a break statement on bad object.
++**
++** ARGUMENTS:
++**
++** obj Object to test.
++** t Expected type of the object.
++*/
++#define gcmBADOBJECT_BREAK(obj, t) \
++ if ((obj == gcvNULL) \
++ || (((gcsOBJECT *)(obj))->type != t) \
++ ) \
++ { \
++ status = gcvSTATUS_INVALID_OBJECT; \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++
++/*******************************************************************************
++**
++** gcmCHECK_STATUS
++**
++** Executes a break statement on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmCHECK_STATUS(prefix, func) \
++ do \
++ { \
++ last = func; \
++ if (gcmIS_ERROR(last)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \
++ last, gcoOS_DebugStatus2Name(last), __FUNCTION__, __LINE__); \
++ status = last; \
++ } \
++ } \
++ while (gcvFALSE)
++#define _gcmkCHECK_STATUS(prefix, func) \
++ do \
++ { \
++ last = func; \
++ if (gcmIS_ERROR(last)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \
++ last, gckOS_DebugStatus2Name(last), __FUNCTION__, __LINE__); \
++ status = last; \
++ } \
++ } \
++ while (gcvFALSE)
++#define gcmCHECK_STATUS(func) _gcmCHECK_STATUS(gcm, func)
++#define gcmkCHECK_STATUS(func) _gcmkCHECK_STATUS(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmVERIFY_ARGUMENT
++**
++** Assert if an argument does not apply to the specified expression. If
++** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be
++** returned from the current function. In retail mode this macro does
++** nothing.
++**
++** ARGUMENTS:
++**
++** arg Argument to evaluate.
++*/
++# define _gcmVERIFY_ARGUMENT(prefix, arg) \
++ do \
++ { \
++ if (!(arg)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, #prefix "VERIFY_ARGUMENT failed:"); \
++ prefix##ASSERT(arg); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT); \
++ return gcvSTATUS_INVALID_ARGUMENT; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg)
++# define gcmkVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcmk, arg)
++
++/*******************************************************************************
++**
++** gcmDEBUG_VERIFY_ARGUMENT
++**
++** Works just like gcmVERIFY_ARGUMENT, but is only valid in debug mode.
++** Use this to verify arguments inside non-public API functions.
++*/
++#if gcdDEBUG
++# define gcmDEBUG_VERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg)
++# define gcmkDEBUG_VERIFY_ARGUMENT(arg) _gcmkVERIFY_ARGUMENT(gcm, arg)
++#else
++# define gcmDEBUG_VERIFY_ARGUMENT(arg)
++# define gcmkDEBUG_VERIFY_ARGUMENT(arg)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY_ARGUMENT_RETURN
++**
++** Assert if an argument does not apply to the specified expression. If
++** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be
++** returned from the current function. In retail mode this macro does
++** nothing.
++**
++** ARGUMENTS:
++**
++** arg Argument to evaluate.
++*/
++# define _gcmVERIFY_ARGUMENT_RETURN(prefix, arg, value) \
++ do \
++ { \
++ if (!(arg)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "gcmVERIFY_ARGUMENT_RETURN failed:"); \
++ prefix##ASSERT(arg); \
++ prefix##FOOTER_ARG("value=%d", value); \
++ return value; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_ARGUMENT_RETURN(arg, value) \
++ _gcmVERIFY_ARGUMENT_RETURN(gcm, arg, value)
++# define gcmkVERIFY_ARGUMENT_RETURN(arg, value) \
++ _gcmVERIFY_ARGUMENT_RETURN(gcmk, arg, value)
++
++#define MAX_LOOP_COUNT 0x7FFFFFFF
++
++/******************************************************************************\
++****************************** User Debug Option ******************************
++\******************************************************************************/
++
++/* User option. */
++typedef enum _gceDEBUG_MSG
++{
++ gcvDEBUG_MSG_NONE,
++ gcvDEBUG_MSG_ERROR,
++ gcvDEBUG_MSG_WARNING
++}
++gceDEBUG_MSG;
++
++typedef struct _gcsUSER_DEBUG_OPTION
++{
++ gceDEBUG_MSG debugMsg;
++}
++gcsUSER_DEBUG_OPTION;
++
++gcsUSER_DEBUG_OPTION *
++gcGetUserDebugOption(
++ void
++ );
++
++struct _gcoOS_SymbolsList
++{
++ gcePATCH_ID patchId;
++ const char * symList[10];
++};
++
++#if gcdHAS_ELLIPSES
++#define gcmUSER_DEBUG_MSG(level, ...) \
++ do \
++ { \
++ if (level <= gcGetUserDebugOption()->debugMsg) \
++ { \
++ gcoOS_Print(__VA_ARGS__); \
++ } \
++ } while (gcvFALSE)
++
++#define gcmUSER_DEBUG_ERROR_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_ERROR, "Error: " __VA_ARGS__)
++#define gcmUSER_DEBUG_WARNING_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_WARNING, "Warring: " __VA_ARGS__)
++#else
++#define gcmUSER_DEBUG_MSG
++#define gcmUSER_DEBUG_ERROR_MSG
++#define gcmUSER_DEBUG_WARNING_MSG
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_base_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_compiler.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_compiler.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_compiler.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_compiler.h 2015-05-01 14:57:59.539427001 -0500
+@@ -0,0 +1,4298 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++/*
++** Include file the defines the front- and back-end compilers, as well as the
++** objects they use.
++*/
++
++#ifndef __gc_hal_compiler_h_
++#define __gc_hal_compiler_h_
++
++#ifndef VIVANTE_NO_3D
++#include "gc_hal_types.h"
++#include "gc_hal_engine.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#ifndef GC_ENABLE_LOADTIME_OPT
++#define GC_ENABLE_LOADTIME_OPT 1
++#endif
++
++#define TEMP_OPT_CONSTANT_TEXLD_COORD 0
++
++#define TEMP_SHADER_PATCH 1
++
++#define TEMP_INLINE_ALL_EXPANSION 1
++/******************************* IR VERSION ******************/
++#define gcdSL_IR_VERSION gcmCC('\0','\0','\0','\1')
++
++/******************************************************************************\
++|******************************* SHADER LANGUAGE ******************************|
++\******************************************************************************/
++
++ /* allocator/deallocator function pointer */
++typedef gceSTATUS (*gctAllocatorFunc)(
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++typedef gceSTATUS (*gctDeallocatorFunc)(
++ IN gctPOINTER Memory
++ );
++
++typedef gctBOOL (*compareFunc) (
++ IN void * data,
++ IN void * key
++ );
++
++typedef struct _gcsListNode gcsListNode;
++struct _gcsListNode
++{
++ gcsListNode * next;
++ void * data;
++};
++
++typedef struct _gcsAllocator
++{
++ gctAllocatorFunc allocate;
++ gctDeallocatorFunc deallocate;
++} gcsAllocator;
++
++/* simple map structure */
++typedef struct _SimpleMap SimpleMap;
++struct _SimpleMap
++{
++ gctUINT32 key;
++ gctUINT32 val;
++ SimpleMap *next;
++ gcsAllocator *allocator;
++
++};
++
++/* SimpleMap Operations */
++/* return -1 if not found, otherwise return the mapped value */
++gctUINT32
++gcSimpleMap_Find(
++ IN SimpleMap *Map,
++ IN gctUINT32 Key
++ );
++
++gceSTATUS
++gcSimpleMap_Destory(
++ IN SimpleMap * Map,
++ IN gcsAllocator * Allocator
++ );
++
++/* Add a pair <Key, Val> to the Map head, the user should be aware that the
++ * map pointer is always changed when adding a new node :
++ *
++ * gcSimpleMap_AddNode(&theMap, key, val, allocator);
++ *
++ */
++gceSTATUS
++gcSimpleMap_AddNode(
++ IN SimpleMap ** Map,
++ IN gctUINT32 Key,
++ IN gctUINT32 Val,
++ IN gcsAllocator * Allocator
++ );
++
++/* gcsList data structure and related operations */
++typedef struct _gcsList
++{
++ gcsListNode *head;
++ gcsListNode *tail;
++ gctINT count;
++ gcsAllocator *allocator;
++} gcsList;
++
++/* List operations */
++void
++gcList_Init(
++ IN gcsList *list,
++ IN gcsAllocator *allocator
++ );
++
++gceSTATUS
++gcList_CreateNode(
++ IN void * Data,
++ IN gctAllocatorFunc Allocator,
++ OUT gcsListNode ** ListNode
++ );
++
++gceSTATUS
++gcList_Clean(
++ IN gcsList * List,
++ IN gctBOOL FreeData
++ );
++
++gcsListNode *
++gcList_FindNode(
++ IN gcsList * List,
++ IN void * Key,
++ IN compareFunc compare
++ );
++
++gceSTATUS
++gcList_AddNode(
++ IN gcsList * List,
++ IN void * Data
++ );
++
++gceSTATUS
++gcList_RemoveNode(
++ IN gcsList * List,
++ IN gcsListNode * Node
++ );
++
++/* link list structure for code list */
++typedef gcsList gcsCodeList;
++typedef gcsCodeList * gctCodeList;
++typedef gcsListNode gcsCodeListNode;
++
++/* Possible shader language opcodes. */
++typedef enum _gcSL_OPCODE
++{
++ gcSL_NOP, /* 0x00 */
++ gcSL_MOV, /* 0x01 */
++ gcSL_SAT, /* 0x02 */
++ gcSL_DP3, /* 0x03 */
++ gcSL_DP4, /* 0x04 */
++ gcSL_ABS, /* 0x05 */
++ gcSL_JMP, /* 0x06 */
++ gcSL_ADD, /* 0x07 */
++ gcSL_MUL, /* 0x08 */
++ gcSL_RCP, /* 0x09 */
++ gcSL_SUB, /* 0x0A */
++ gcSL_KILL, /* 0x0B */
++ gcSL_TEXLD, /* 0x0C */
++ gcSL_CALL, /* 0x0D */
++ gcSL_RET, /* 0x0E */
++ gcSL_NORM, /* 0x0F */
++ gcSL_MAX, /* 0x10 */
++ gcSL_MIN, /* 0x11 */
++ gcSL_POW, /* 0x12 */
++ gcSL_RSQ, /* 0x13 */
++ gcSL_LOG, /* 0x14 */
++ gcSL_FRAC, /* 0x15 */
++ gcSL_FLOOR, /* 0x16 */
++ gcSL_CEIL, /* 0x17 */
++ gcSL_CROSS, /* 0x18 */
++ gcSL_TEXLDP, /* 0x19 */
++ gcSL_TEXBIAS, /* 0x1A */
++ gcSL_TEXGRAD, /* 0x1B */
++ gcSL_TEXLOD, /* 0x1C */
++ gcSL_SIN, /* 0x1D */
++ gcSL_COS, /* 0x1E */
++ gcSL_TAN, /* 0x1F */
++ gcSL_EXP, /* 0x20 */
++ gcSL_SIGN, /* 0x21 */
++ gcSL_STEP, /* 0x22 */
++ gcSL_SQRT, /* 0x23 */
++ gcSL_ACOS, /* 0x24 */
++ gcSL_ASIN, /* 0x25 */
++ gcSL_ATAN, /* 0x26 */
++ gcSL_SET, /* 0x27 */
++ gcSL_DSX, /* 0x28 */
++ gcSL_DSY, /* 0x29 */
++ gcSL_FWIDTH, /* 0x2A */
++ gcSL_DIV, /* 0x2B */
++ gcSL_MOD, /* 0x2C */
++ gcSL_AND_BITWISE, /* 0x2D */
++ gcSL_OR_BITWISE, /* 0x2E */
++ gcSL_XOR_BITWISE, /* 0x2F */
++ gcSL_NOT_BITWISE, /* 0x30 */
++ gcSL_LSHIFT, /* 0x31 */
++ gcSL_RSHIFT, /* 0x32 */
++ gcSL_ROTATE, /* 0x33 */
++ gcSL_BITSEL, /* 0x34 */
++ gcSL_LEADZERO, /* 0x35 */
++ gcSL_LOAD, /* 0x36 */
++ gcSL_STORE, /* 0x37 */
++ gcSL_BARRIER, /* 0x38 */
++ gcSL_STORE1, /* 0x39 */
++ gcSL_ATOMADD, /* 0x3A */
++ gcSL_ATOMSUB, /* 0x3B */
++ gcSL_ATOMXCHG, /* 0x3C */
++ gcSL_ATOMCMPXCHG, /* 0x3D */
++ gcSL_ATOMMIN, /* 0x3E */
++ gcSL_ATOMMAX, /* 0x3F */
++ gcSL_ATOMOR, /* 0x40 */
++ gcSL_ATOMAND, /* 0x41 */
++ gcSL_ATOMXOR, /* 0x42 */
++ /*gcSL_UNUSED, 0x43 */
++ /*gcSL_UNUSED, 0x44 */
++ /*gcSL_UNUSED, 0x45 */
++ /*gcSL_UNUSED, 0x46 */
++ /*gcSL_UNUSED, 0x47 */
++ /*gcSL_UNUSED, 0x48 */
++ /*gcSL_UNUSED, 0x49 */
++ /*gcSL_UNUSED, 0x4A */
++ /*gcSL_UNUSED, 0x4B */
++ /*gcSL_UNUSED, 0x4C */
++ /*gcSL_UNUSED, 0x4D */
++ /*gcSL_UNUSED, 0x4E */
++ /*gcSL_UNUSED, 0x4F */
++ /*gcSL_UNUSED, 0x50 */
++ /*gcSL_UNUSED, 0x51 */
++ /*gcSL_UNUSED, 0x52 */
++ gcSL_ADDLO = 0x53, /* 0x53 */ /* Float only. */
++ gcSL_MULLO, /* 0x54 */ /* Float only. */
++ gcSL_CONV, /* 0x55 */
++ gcSL_GETEXP, /* 0x56 */
++ gcSL_GETMANT, /* 0x57 */
++ gcSL_MULHI, /* 0x58 */ /* Integer only. */
++ gcSL_CMP, /* 0x59 */
++ gcSL_I2F, /* 0x5A */
++ gcSL_F2I, /* 0x5B */
++ gcSL_ADDSAT, /* 0x5C */ /* Integer only. */
++ gcSL_SUBSAT, /* 0x5D */ /* Integer only. */
++ gcSL_MULSAT, /* 0x5E */ /* Integer only. */
++ gcSL_DP2, /* 0x5F */
++ gcSL_MAXOPCODE
++}
++gcSL_OPCODE;
++
++typedef enum _gcSL_FORMAT
++{
++ gcSL_FLOAT = 0, /* 0 */
++ gcSL_INTEGER = 1, /* 1 */
++ gcSL_INT32 = 1, /* 1 */
++ gcSL_BOOLEAN = 2, /* 2 */
++ gcSL_UINT32 = 3, /* 3 */
++ gcSL_INT8, /* 4 */
++ gcSL_UINT8, /* 5 */
++ gcSL_INT16, /* 6 */
++ gcSL_UINT16, /* 7 */
++ gcSL_INT64, /* 8 */ /* Reserved for future enhancement. */
++ gcSL_UINT64, /* 9 */ /* Reserved for future enhancement. */
++ gcSL_INT128, /* 10 */ /* Reserved for future enhancement. */
++ gcSL_UINT128, /* 11 */ /* Reserved for future enhancement. */
++ gcSL_FLOAT16, /* 12 */
++ gcSL_FLOAT64, /* 13 */ /* Reserved for future enhancement. */
++ gcSL_FLOAT128, /* 14 */ /* Reserved for future enhancement. */
++}
++gcSL_FORMAT;
++
++/* Destination write enable bits. */
++typedef enum _gcSL_ENABLE
++{
++ gcSL_ENABLE_NONE = 0x0, /* none is enabled, error/uninitialized state */
++ gcSL_ENABLE_X = 0x1,
++ gcSL_ENABLE_Y = 0x2,
++ gcSL_ENABLE_Z = 0x4,
++ gcSL_ENABLE_W = 0x8,
++ /* Combinations. */
++ gcSL_ENABLE_XY = gcSL_ENABLE_X | gcSL_ENABLE_Y,
++ gcSL_ENABLE_XYZ = gcSL_ENABLE_X | gcSL_ENABLE_Y | gcSL_ENABLE_Z,
++ gcSL_ENABLE_XYZW = gcSL_ENABLE_X | gcSL_ENABLE_Y | gcSL_ENABLE_Z | gcSL_ENABLE_W,
++ gcSL_ENABLE_XYW = gcSL_ENABLE_X | gcSL_ENABLE_Y | gcSL_ENABLE_W,
++ gcSL_ENABLE_XZ = gcSL_ENABLE_X | gcSL_ENABLE_Z,
++ gcSL_ENABLE_XZW = gcSL_ENABLE_X | gcSL_ENABLE_Z | gcSL_ENABLE_W,
++ gcSL_ENABLE_XW = gcSL_ENABLE_X | gcSL_ENABLE_W,
++ gcSL_ENABLE_YZ = gcSL_ENABLE_Y | gcSL_ENABLE_Z,
++ gcSL_ENABLE_YZW = gcSL_ENABLE_Y | gcSL_ENABLE_Z | gcSL_ENABLE_W,
++ gcSL_ENABLE_YW = gcSL_ENABLE_Y | gcSL_ENABLE_W,
++ gcSL_ENABLE_ZW = gcSL_ENABLE_Z | gcSL_ENABLE_W,
++}
++gcSL_ENABLE;
++
++/* Possible indices. */
++typedef enum _gcSL_INDEXED
++{
++ gcSL_NOT_INDEXED, /* 0 */
++ gcSL_INDEXED_X, /* 1 */
++ gcSL_INDEXED_Y, /* 2 */
++ gcSL_INDEXED_Z, /* 3 */
++ gcSL_INDEXED_W, /* 4 */
++}
++gcSL_INDEXED;
++
++/* Opcode conditions. */
++typedef enum _gcSL_CONDITION
++{
++ gcSL_ALWAYS, /* 0x0 */
++ gcSL_NOT_EQUAL, /* 0x1 */
++ gcSL_LESS_OR_EQUAL, /* 0x2 */
++ gcSL_LESS, /* 0x3 */
++ gcSL_EQUAL, /* 0x4 */
++ gcSL_GREATER, /* 0x5 */
++ gcSL_GREATER_OR_EQUAL, /* 0x6 */
++ gcSL_AND, /* 0x7 */
++ gcSL_OR, /* 0x8 */
++ gcSL_XOR, /* 0x9 */
++ gcSL_NOT_ZERO, /* 0xA */
++}
++gcSL_CONDITION;
++
++/* Possible source operand types. */
++typedef enum _gcSL_TYPE
++{
++ gcSL_NONE, /* 0x0 */
++ gcSL_TEMP, /* 0x1 */
++ gcSL_ATTRIBUTE, /* 0x2 */
++ gcSL_UNIFORM, /* 0x3 */
++ gcSL_SAMPLER, /* 0x4 */
++ gcSL_CONSTANT, /* 0x5 */
++ gcSL_OUTPUT, /* 0x6 */
++ gcSL_PHYSICAL, /* 0x7 */
++}
++gcSL_TYPE;
++
++/* Swizzle generator macro. */
++#define gcmSWIZZLE(Component1, Component2, Component3, Component4) \
++( \
++ (gcSL_SWIZZLE_ ## Component1 << 0) | \
++ (gcSL_SWIZZLE_ ## Component2 << 2) | \
++ (gcSL_SWIZZLE_ ## Component3 << 4) | \
++ (gcSL_SWIZZLE_ ## Component4 << 6) \
++)
++
++#define gcmExtractSwizzle(Swizzle, Index) \
++ ((gcSL_SWIZZLE) ((((Swizzle) >> (Index * 2)) & 0x3)))
++
++#define gcmComposeSwizzle(SwizzleX, SwizzleY, SwizzleZ, SwizzleW) \
++( \
++ ((SwizzleX) << 0) | \
++ ((SwizzleY) << 2) | \
++ ((SwizzleZ) << 4) | \
++ ((SwizzleW) << 6) \
++)
++
++/* Possible swizzle values. */
++typedef enum _gcSL_SWIZZLE
++{
++ gcSL_SWIZZLE_X, /* 0x0 */
++ gcSL_SWIZZLE_Y, /* 0x1 */
++ gcSL_SWIZZLE_Z, /* 0x2 */
++ gcSL_SWIZZLE_W, /* 0x3 */
++ /* Combinations. */
++ gcSL_SWIZZLE_XXXX = gcmSWIZZLE(X, X, X, X),
++ gcSL_SWIZZLE_YYYY = gcmSWIZZLE(Y, Y, Y, Y),
++ gcSL_SWIZZLE_ZZZZ = gcmSWIZZLE(Z, Z, Z, Z),
++ gcSL_SWIZZLE_WWWW = gcmSWIZZLE(W, W, W, W),
++ gcSL_SWIZZLE_XYYY = gcmSWIZZLE(X, Y, Y, Y),
++ gcSL_SWIZZLE_XZZZ = gcmSWIZZLE(X, Z, Z, Z),
++ gcSL_SWIZZLE_XWWW = gcmSWIZZLE(X, W, W, W),
++ gcSL_SWIZZLE_YZZZ = gcmSWIZZLE(Y, Z, Z, Z),
++ gcSL_SWIZZLE_YWWW = gcmSWIZZLE(Y, W, W, W),
++ gcSL_SWIZZLE_ZWWW = gcmSWIZZLE(Z, W, W, W),
++ gcSL_SWIZZLE_XYZZ = gcmSWIZZLE(X, Y, Z, Z),
++ gcSL_SWIZZLE_XYWW = gcmSWIZZLE(X, Y, W, W),
++ gcSL_SWIZZLE_XZWW = gcmSWIZZLE(X, Z, W, W),
++ gcSL_SWIZZLE_YZWW = gcmSWIZZLE(Y, Z, W, W),
++ gcSL_SWIZZLE_XXYZ = gcmSWIZZLE(X, X, Y, Z),
++ gcSL_SWIZZLE_XYZW = gcmSWIZZLE(X, Y, Z, W),
++ gcSL_SWIZZLE_XYXY = gcmSWIZZLE(X, Y, X, Y),
++ gcSL_SWIZZLE_YYZZ = gcmSWIZZLE(Y, Y, Z, Z),
++ gcSL_SWIZZLE_YYWW = gcmSWIZZLE(Y, Y, W, W),
++ gcSL_SWIZZLE_ZZZW = gcmSWIZZLE(Z, Z, Z, W),
++ gcSL_SWIZZLE_XZZW = gcmSWIZZLE(X, Z, Z, W),
++ gcSL_SWIZZLE_YYZW = gcmSWIZZLE(Y, Y, Z, W),
++
++ gcSL_SWIZZLE_INVALID = 0x7FFFFFFF
++}
++gcSL_SWIZZLE;
++
++typedef enum _gcSL_COMPONENT
++{
++ gcSL_COMPONENT_X, /* 0x0 */
++ gcSL_COMPONENT_Y, /* 0x1 */
++ gcSL_COMPONENT_Z, /* 0x2 */
++ gcSL_COMPONENT_W, /* 0x3 */
++ gcSL_COMPONENT_COUNT /* 0x4 */
++} gcSL_COMPONENT;
++
++#define gcmIsComponentEnabled(Enable, Component) (((Enable) & (1 << (Component))) != 0)
++
++/******************************************************************************\
++|*********************************** SHADERS **********************************|
++\******************************************************************************/
++
++/* Shader types. */
++typedef enum _gcSHADER_KIND {
++ gcSHADER_TYPE_UNKNOWN = 0,
++ gcSHADER_TYPE_VERTEX,
++ gcSHADER_TYPE_FRAGMENT,
++ gcSHADER_TYPE_CL,
++ gcSHADER_TYPE_PRECOMPILED,
++ gcSHADER_KIND_COUNT
++} gcSHADER_KIND;
++
++typedef enum _gcGL_DRIVER_VERSION {
++ gcGL_DRIVER_ES11, /* OpenGL ES 1.1 */
++ gcGL_DRIVER_ES20, /* OpenGL ES 2.0 */
++ gcGL_DRIVER_ES30 /* OpenGL ES 3.0 */
++} gcGL_DRIVER_VERSION;
++
++/* gcSHADER objects. */
++typedef struct _gcSHADER * gcSHADER;
++typedef struct _gcATTRIBUTE * gcATTRIBUTE;
++typedef struct _gcUNIFORM * gcUNIFORM;
++typedef struct _gcOUTPUT * gcOUTPUT;
++typedef struct _gcsFUNCTION * gcFUNCTION;
++typedef struct _gcsKERNEL_FUNCTION * gcKERNEL_FUNCTION;
++typedef struct _gcsHINT * gcsHINT_PTR;
++typedef struct _gcSHADER_PROFILER * gcSHADER_PROFILER;
++typedef struct _gcVARIABLE * gcVARIABLE;
++
++struct _gcsHINT
++{
++ /* Numbr of data transfers for Vertex Shader output. */
++ gctUINT32 vsOutputCount;
++
++ /* Flag whether the VS has point size or not. */
++ gctBOOL vsHasPointSize;
++
++#if gcdUSE_WCLIP_PATCH
++ /* Flag whether the VS gl_position.z depends on gl_position.w
++ it's a hint for wclipping */
++ gctBOOL vsPositionZDependsOnW;
++#endif
++
++ gctBOOL clipW;
++
++ /* Flag whether or not the shader has a KILL instruction. */
++ gctBOOL hasKill;
++
++ /* Element count. */
++ gctUINT32 elementCount;
++
++ /* Component count. */
++ gctUINT32 componentCount;
++
++ /* Number of data transfers for Fragment Shader input. */
++ gctUINT32 fsInputCount;
++
++ /* Maximum number of temporary registers used in FS. */
++ gctUINT32 fsMaxTemp;
++
++ /* Maximum number of temporary registers used in VS. */
++ gctUINT32 vsMaxTemp;
++
++ /* Balance minimum. */
++ gctUINT32 balanceMin;
++
++ /* Balance maximum. */
++ gctUINT32 balanceMax;
++
++ /* Auto-shift balancing. */
++ gctBOOL autoShift;
++
++ /* Flag whether the PS outputs the depth value or not. */
++ gctBOOL psHasFragDepthOut;
++
++ /* Flag whether the ThreadWalker is in PS. */
++ gctBOOL threadWalkerInPS;
++
++ /* HW reg number for position of VS */
++ gctUINT32 hwRegNoOfSIVPos;
++
++#if gcdALPHA_KILL_IN_SHADER
++ /* States to set when alpha kill is enabled. */
++ gctUINT32 killStateAddress;
++ gctUINT32 alphaKillStateValue;
++ gctUINT32 colorKillStateValue;
++
++ /* Shader instructiuon. */
++ gctUINT32 killInstructionAddress;
++ gctUINT32 alphaKillInstruction[3];
++ gctUINT32 colorKillInstruction[3];
++#endif
++
++#if TEMP_SHADER_PATCH
++ gctUINT32 pachedShaderIdentifier;
++#endif
++};
++
++#if TEMP_SHADER_PATCH
++#define INVALID_SHADER_IDENTIFIER 0xFFFFFFFF
++#endif
++
++/* gcSHADER_TYPE enumeration. */
++typedef enum _gcSHADER_TYPE
++{
++ gcSHADER_FLOAT_X1 = 0, /* 0x00 */
++ gcSHADER_FLOAT_X2, /* 0x01 */
++ gcSHADER_FLOAT_X3, /* 0x02 */
++ gcSHADER_FLOAT_X4, /* 0x03 */
++ gcSHADER_FLOAT_2X2, /* 0x04 */
++ gcSHADER_FLOAT_3X3, /* 0x05 */
++ gcSHADER_FLOAT_4X4, /* 0x06 */
++ gcSHADER_BOOLEAN_X1, /* 0x07 */
++ gcSHADER_BOOLEAN_X2, /* 0x08 */
++ gcSHADER_BOOLEAN_X3, /* 0x09 */
++ gcSHADER_BOOLEAN_X4, /* 0x0A */
++ gcSHADER_INTEGER_X1, /* 0x0B */
++ gcSHADER_INTEGER_X2, /* 0x0C */
++ gcSHADER_INTEGER_X3, /* 0x0D */
++ gcSHADER_INTEGER_X4, /* 0x0E */
++ gcSHADER_SAMPLER_1D, /* 0x0F */
++ gcSHADER_SAMPLER_2D, /* 0x10 */
++ gcSHADER_SAMPLER_3D, /* 0x11 */
++ gcSHADER_SAMPLER_CUBIC, /* 0x12 */
++ gcSHADER_FIXED_X1, /* 0x13 */
++ gcSHADER_FIXED_X2, /* 0x14 */
++ gcSHADER_FIXED_X3, /* 0x15 */
++ gcSHADER_FIXED_X4, /* 0x16 */
++ gcSHADER_IMAGE_2D, /* 0x17 */ /* For OCL. */
++ gcSHADER_IMAGE_3D, /* 0x18 */ /* For OCL. */
++ gcSHADER_SAMPLER, /* 0x19 */ /* For OCL. */
++ gcSHADER_FLOAT_2X3, /* 0x1A */
++ gcSHADER_FLOAT_2X4, /* 0x1B */
++ gcSHADER_FLOAT_3X2, /* 0x1C */
++ gcSHADER_FLOAT_3X4, /* 0x1D */
++ gcSHADER_FLOAT_4X2, /* 0x1E */
++ gcSHADER_FLOAT_4X3, /* 0x1F */
++ gcSHADER_ISAMPLER_2D, /* 0x20 */
++ gcSHADER_ISAMPLER_3D, /* 0x21 */
++ gcSHADER_ISAMPLER_CUBIC, /* 0x22 */
++ gcSHADER_USAMPLER_2D, /* 0x23 */
++ gcSHADER_USAMPLER_3D, /* 0x24 */
++ gcSHADER_USAMPLER_CUBIC, /* 0x25 */
++ gcSHADER_SAMPLER_EXTERNAL_OES, /* 0x26 */
++
++ gcSHADER_UINT_X1, /* 0x27 */
++ gcSHADER_UINT_X2, /* 0x28 */
++ gcSHADER_UINT_X3, /* 0x29 */
++ gcSHADER_UINT_X4, /* 0x2A */
++
++ gcSHADER_UNKONWN_TYPE, /* do not add type after this */
++ gcSHADER_TYPE_COUNT /* must to change gcvShaderTypeInfo at the
++ * same time if you add any new type! */}
++gcSHADER_TYPE;
++
++typedef enum _gcSHADER_TYPE_KIND
++{
++ gceTK_UNKOWN,
++ gceTK_FLOAT,
++ gceTK_INT,
++ gceTK_UINT,
++ gceTK_BOOL,
++ gceTK_FIXED,
++ gceTK_SAMPLER,
++ gceTK_IMAGE,
++ gceTK_OTHER
++} gcSHADER_TYPE_KIND;
++
++typedef struct _gcSHADER_TYPEINFO
++{
++ gcSHADER_TYPE type; /* e.g. gcSHADER_FLOAT_2X4 */
++ gctINT components; /* e.g. 4 components */
++ gctINT rows; /* e.g. 2 rows */
++ gcSHADER_TYPE componentType; /* e.g. gcSHADER_FLOAT_X4 */
++ gcSHADER_TYPE_KIND kind; /* e.g. gceTK_FLOAT */
++ gctCONST_STRING name; /* e.g. "FLOAT_2X4" */
++} gcSHADER_TYPEINFO;
++
++extern gcSHADER_TYPEINFO gcvShaderTypeInfo[];
++
++#define gcmType_Comonents(Type) (gcvShaderTypeInfo[Type].components)
++#define gcmType_Rows(Type) (gcvShaderTypeInfo[Type].rows)
++#define gcmType_ComonentType(Type) (gcvShaderTypeInfo[Type].componentType)
++#define gcmType_Kind(Type) (gcvShaderTypeInfo[Type].kind)
++#define gcmType_Name(Type) (gcvShaderTypeInfo[Type].name)
++
++#define gcmType_isMatrix(type) (gcmType_Rows(type) > 1)
++
++typedef enum _gcSHADER_VAR_CATEGORY
++{
++ gcSHADER_VAR_CATEGORY_NORMAL = 0, /* primitive type and its array */
++ gcSHADER_VAR_CATEGORY_STRUCT = 1 /* structure */
++}
++gcSHADER_VAR_CATEGORY;
++
++typedef enum _gceTYPE_QUALIFIER
++{
++ gcvTYPE_QUALIFIER_NONE = 0x0, /* unqualified */
++ gcvTYPE_QUALIFIER_VOLATILE = 0x1, /* volatile */
++}gceTYPE_QUALIFIER;
++
++typedef gctUINT16 gctTYPE_QUALIFIER;
++
++#if GC_ENABLE_LOADTIME_OPT
++typedef struct _gcSHADER_TYPE_INFO
++{
++ gcSHADER_TYPE type; /* eg. gcSHADER_FLOAT_2X3 is the type */
++ gctCONST_STRING name; /* the name of the type: "gcSHADER_FLOAT_2X3" */
++ gcSHADER_TYPE baseType; /* its base type is gcSHADER_FLOAT_2 */
++ gctINT components; /* it has 2 components */
++ gctINT rows; /* and 3 rows */
++ gctINT size; /* the size in byte */
++} gcSHADER_TYPE_INFO;
++
++extern gcSHADER_TYPE_INFO shader_type_info[];
++
++enum gceLTCDumpOption {
++ gceLTC_DUMP_UNIFORM = 0x0001,
++ gceLTC_DUMP_EVALUATION = 0x0002,
++ gceLTC_DUMP_EXPESSION = 0x0004,
++ gceLTC_DUMP_COLLECTING = 0x0008,
++};
++
++gctBOOL gcDumpOption(gctINT Opt);
++
++#endif /* GC_ENABLE_LOADTIME_OPT */
++
++#define IS_MATRIX_TYPE(type) \
++ (((type >= gcSHADER_FLOAT_2X2) && (type <= gcSHADER_FLOAT_4X4)) || \
++ ((type >= gcSHADER_FLOAT_2X3) && (type <= gcSHADER_FLOAT_4X3)))
++
++/* gcSHADER_PRECISION enumeration. */
++typedef enum _gcSHADER_PRECISION
++{
++ gcSHADER_PRECISION_DEFAULT, /* 0x00 */
++ gcSHADER_PRECISION_HIGH, /* 0x01 */
++ gcSHADER_PRECISION_MEDIUM, /* 0x02 */
++ gcSHADER_PRECISION_LOW, /* 0x03 */
++}
++gcSHADER_PRECISION;
++
++/* Shader flags. */
++typedef enum _gceSHADER_FLAGS
++{
++ gcvSHADER_NO_OPTIMIZATION = 0x00,
++ gcvSHADER_DEAD_CODE = 0x01,
++ gcvSHADER_RESOURCE_USAGE = 0x02,
++ gcvSHADER_OPTIMIZER = 0x04,
++ gcvSHADER_USE_GL_Z = 0x08,
++ /*
++ The GC family of GPU cores model GC860 and under require the Z
++ to be from 0 <= z <= w.
++ However, OpenGL specifies the Z to be from -w <= z <= w. So we
++ have to a conversion here:
++
++ z = (z + w) / 2.
++
++ So here we append two instructions to the vertex shader.
++ */
++ gcvSHADER_USE_GL_POSITION = 0x10,
++ gcvSHADER_USE_GL_FACE = 0x20,
++ gcvSHADER_USE_GL_POINT_COORD = 0x40,
++ gcvSHADER_LOADTIME_OPTIMIZER = 0x80,
++#if gcdALPHA_KILL_IN_SHADER
++ gcvSHADER_USE_ALPHA_KILL = 0x100,
++#endif
++
++#if gcdPRE_ROTATION && (ANDROID_SDK_VERSION >= 14)
++ gcvSHADER_VS_PRE_ROTATION = 0x200,
++#endif
++
++#if TEMP_INLINE_ALL_EXPANSION
++ gcvSHADER_INLINE_ALL_EXPANSION = 0x400,
++#endif
++}
++gceSHADER_FLAGS;
++
++gceSTATUS
++gcSHADER_CheckClipW(
++ IN gctCONST_STRING VertexSource,
++ IN gctCONST_STRING FragmentSource,
++ OUT gctBOOL * clipW);
++
++/*******************************************************************************
++** gcSHADER_GetUniformVectorCount
++**
++** Get the number of vectors used by uniforms for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of vectors.
++*/
++gceSTATUS
++gcSHADER_GetUniformVectorCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcOptimizer Data Structures
++*******************************************************************************/
++typedef enum _gceSHADER_OPTIMIZATION
++{
++ /* No optimization. */
++ gcvOPTIMIZATION_NONE,
++
++ /* Flow graph construction. */
++ gcvOPTIMIZATION_CONSTRUCTION = 1 << 0,
++
++ /* Dead code elimination. */
++ gcvOPTIMIZATION_DEAD_CODE = 1 << 1,
++
++ /* Redundant move instruction elimination. */
++ gcvOPTIMIZATION_REDUNDANT_MOVE = 1 << 2,
++
++ /* Inline expansion. */
++ gcvOPTIMIZATION_INLINE_EXPANSION = 1 << 3,
++
++ /* Constant propagation. */
++ gcvOPTIMIZATION_CONSTANT_PROPAGATION = 1 << 4,
++
++ /* Redundant bounds/checking elimination. */
++ gcvOPTIMIZATION_REDUNDANT_CHECKING = 1 << 5,
++
++ /* Loop invariant movement. */
++ gcvOPTIMIZATION_LOOP_INVARIANT = 1 << 6,
++
++ /* Induction variable removal. */
++ gcvOPTIMIZATION_INDUCTION_VARIABLE = 1 << 7,
++
++ /* Common subexpression elimination. */
++ gcvOPTIMIZATION_COMMON_SUBEXPRESSION = 1 << 8,
++
++ /* Control flow/banch optimization. */
++ gcvOPTIMIZATION_CONTROL_FLOW = 1 << 9,
++
++ /* Vector component operation merge. */
++ gcvOPTIMIZATION_VECTOR_INSTRUCTION_MERGE = 1 << 10,
++
++ /* Algebra simplificaton. */
++ gcvOPTIMIZATION_ALGEBRAIC_SIMPLIFICATION = 1 << 11,
++
++ /* Pattern matching and replacing. */
++ gcvOPTIMIZATION_PATTERN_MATCHING = 1 << 12,
++
++ /* Interprocedural constant propagation. */
++ gcvOPTIMIZATION_IP_CONSTANT_PROPAGATION = 1 << 13,
++
++ /* Interprecedural register optimization. */
++ gcvOPTIMIZATION_IP_REGISTRATION = 1 << 14,
++
++ /* Optimization option number. */
++ gcvOPTIMIZATION_OPTION_NUMBER = 1 << 15,
++
++ /* Loadtime constant. */
++ gcvOPTIMIZATION_LOADTIME_CONSTANT = 1 << 16,
++
++ /* MAD instruction optimization. */
++ gcvOPTIMIZATION_MAD_INSTRUCTION = 1 << 17,
++
++ /* Special optimization for LOAD SW workaround. */
++ gcvOPTIMIZATION_LOAD_SW_WORKAROUND = 1 << 18,
++
++ /* move code into conditional block if possile */
++ gcvOPTIMIZATION_CONDITIONALIZE = 1 << 19,
++
++ /* expriemental: power optimization mode
++ 1. add extra dummy texld to tune performance
++ 2. insert NOP after high power instrucitons
++ 3. split high power vec3/vec4 instruciton to vec2/vec1 operation
++ 4. ...
++ */
++ gcvOPTIMIZATION_POWER_OPTIMIZATION = 1 << 20,
++
++ /* optimize varying packing */
++ gcvOPTIMIZATION_VARYINGPACKING = 1 << 22,
++
++#if TEMP_INLINE_ALL_EXPANSION
++ gcvOPTIMIZATION_INLINE_ALL_EXPANSION = 1 << 23,
++#endif
++
++ /* Full optimization. */
++ /* Note that gcvOPTIMIZATION_LOAD_SW_WORKAROUND is off. */
++ gcvOPTIMIZATION_FULL = 0x7FFFFFFF &
++ ~gcvOPTIMIZATION_LOAD_SW_WORKAROUND &
++ ~gcvOPTIMIZATION_INLINE_ALL_EXPANSION &
++ ~gcvOPTIMIZATION_POWER_OPTIMIZATION,
++
++ /* Optimization Unit Test flag. */
++ gcvOPTIMIZATION_UNIT_TEST = 1 << 31
++}
++gceSHADER_OPTIMIZATION;
++
++typedef enum _gceOPTIMIZATION_VaryingPaking
++{
++ gcvOPTIMIZATION_VARYINGPACKING_NONE = 0,
++ gcvOPTIMIZATION_VARYINGPACKING_NOSPLIT,
++ gcvOPTIMIZATION_VARYINGPACKING_SPLIT
++} gceOPTIMIZATION_VaryingPaking;
++
++typedef struct _gcOPTIMIZER_OPTION
++{
++ gceSHADER_OPTIMIZATION optFlags;
++
++ /* debug & dump options:
++
++ VC_OPTION=-DUMP:SRC:OPT|:OPTV|:CG|:CGV:|ALL|ALLV
++
++ SRC: dump shader source code
++ OPT: dump incoming and final IR
++ OPTV: dump result IR in each optimization phase
++ CG: dump generated machine code
++ CGV: dump BE tree and optimization detail
++
++ ALL = SRC|OPT|CG
++ ALLV = SRC|OPT|OPTV|CG|CGV
++ */
++ gctBOOL dumpShaderSource; /* dump shader source code */
++ gctBOOL dumpOptimizer; /* dump incoming and final IR */
++ gctBOOL dumpOptimizerVerbose; /* dump result IR in each optimization phase */
++ gctBOOL dumpBEGenertedCode; /* dump generated machine code */
++ gctBOOL dumpBEVerbose; /* dump BE tree and optimization detail */
++ gctBOOL dumpBEFinalIR; /* dump BE final IR */
++
++ /* Code generation */
++
++ /* Varying Packing:
++
++ VC_OPTION=-PACKVARYING:[0-2]|:T[-]m[,n]|:LshaderIdx,min,max
++
++ 0: turn off varying packing
++ 1: pack varyings, donot split any varying
++ 2: pack varyings, may split to make fully packed output
++
++ Tm: only packing shader pair which vertex shader id is m
++ Tm,n: only packing shader pair which vertex shader id
++ is in range of [m, n]
++ T-m: do not packing shader pair which vertex shader id is m
++ T-m,n: do not packing shader pair which vertex shader id
++ is in range of [m, n]
++
++ LshaderIdx,min,max : set load balance (min, max) for shaderIdx
++ if shaderIdx is -1, all shaders are impacted
++ newMin = origMin * (min/100.);
++ newMax = origMax * (max/100.);
++ */
++ gceOPTIMIZATION_VaryingPaking packVarying;
++ gctINT _triageStart;
++ gctINT _triageEnd;
++ gctINT _loadBalanceShaderIdx;
++ gctINT _loadBalanceMin;
++ gctINT _loadBalanceMax;
++
++ /* Do not generate immdeiate
++
++ VC_OPTION=-NOIMM
++
++ Force generate immediate even the machine model don't support it,
++ for testing purpose only
++
++ VC_OPTION=-FORCEIMM
++ */
++ gctBOOL noImmediate;
++ gctBOOL forceImmediate;
++
++ /* Power reduction mode options */
++ gctBOOL needPowerOptimization;
++
++ /* Patch TEXLD instruction by adding dummy texld
++ (can be used to tune GPU power usage):
++ for every TEXLD we seen, add n dummy TEXLD
++
++ it can be enabled by environment variable:
++
++ VC_OPTION=-PATCH_TEXLD:M:N
++
++ (for each M texld, add N dummy texld)
++ */
++ gctINT patchEveryTEXLDs;
++ gctINT patchDummyTEXLDs;
++
++ /* Insert NOP after high power consumption instructions
++
++ VC_OPTION="-INSERTNOP:MUL:MULLO:DP3:DP4:SEENTEXLD"
++ */
++ gctBOOL insertNOP;
++ gctBOOL insertNOPAfterMUL;
++ gctBOOL insertNOPAfterMULLO;
++ gctBOOL insertNOPAfterDP3;
++ gctBOOL insertNOPAfterDP4;
++ gctBOOL insertNOPOnlyWhenTexldSeen;
++
++ /* split MAD to MUL and ADD:
++
++ VC_OPTION=-SPLITMAD
++ */
++ gctBOOL splitMAD;
++
++ /* Convert vect3/vec4 operations to multiple vec2/vec1 operations
++
++ VC_OPTION=-SPLITVEC:MUL:MULLO:DP3:DP4
++ */
++ gctBOOL splitVec;
++ gctBOOL splitVec4MUL;
++ gctBOOL splitVec4MULLO;
++ gctBOOL splitVec4DP3;
++ gctBOOL splitVec4DP4;
++
++ /* turn/off features:
++
++ VC_OPTION=-F:n,[0|1]
++ Note: n must be decimal number
++ */
++ gctUINT featureBits;
++
++ /* inline level (default 2 at O1):
++
++ VC_OPTION=-INLINELEVEL:[0-3]
++ 0: no inline
++ 1: only inline the function only called once or small function
++ 2: inline functions be called less than 5 times or medium size function
++ 3: inline everything possible
++ */
++ gctUINT inlineLevel;
++} gcOPTIMIZER_OPTION;
++
++extern gcOPTIMIZER_OPTION theOptimizerOption;
++#define gcmGetOptimizerOption() gcGetOptimizerOption()
++
++#define gcmOPT_DUMP_SHADER_SRC() \
++ (gcmGetOptimizerOption()->dumpShaderSource != 0)
++#define gcmOPT_DUMP_OPTIMIZER() \
++ (gcmGetOptimizerOption()->dumpOptimizer != 0 || \
++ gcmOPT_DUMP_OPTIMIZER_VERBOSE() )
++#define gcmOPT_DUMP_OPTIMIZER_VERBOSE() \
++ (gcmGetOptimizerOption()->dumpOptimizerVerbose != 0)
++#define gcmOPT_DUMP_CODEGEN() \
++ (gcmGetOptimizerOption()->dumpBEGenertedCode != 0 || \
++ gcmOPT_DUMP_CODEGEN_VERBOSE() )
++#define gcmOPT_DUMP_CODEGEN_VERBOSE() \
++ (gcmGetOptimizerOption()->dumpBEVerbose != 0)
++#define gcmOPT_DUMP_FINAL_IR() \
++ (gcmGetOptimizerOption()->dumpBEFinalIR != 0)
++
++#define gcmOPT_SET_DUMP_SHADER_SRC(v) \
++ gcmGetOptimizerOption()->dumpShaderSource = (v)
++
++#define gcmOPT_PATCH_TEXLD() (gcmGetOptimizerOption()->patchDummyTEXLDs != 0)
++#define gcmOPT_INSERT_NOP() (gcmGetOptimizerOption()->insertNOP == gcvTRUE)
++#define gcmOPT_SPLITMAD() (gcmGetOptimizerOption()->splitMAD == gcvTRUE)
++#define gcmOPT_SPLITVEC() (gcmGetOptimizerOption()->splitVec == gcvTRUE)
++
++#define gcmOPT_NOIMMEDIATE() (gcmGetOptimizerOption()->noImmediate == gcvTRUE)
++#define gcmOPT_FORCEIMMEDIATE() (gcmGetOptimizerOption()->forceImmediate == gcvTRUE)
++
++#define gcmOPT_PACKVARYING() (gcmGetOptimizerOption()->packVarying)
++#define gcmOPT_PACKVARYING_triageStart() (gcmGetOptimizerOption()->_triageStart)
++#define gcmOPT_PACKVARYING_triageEnd() (gcmGetOptimizerOption()->_triageEnd)
++
++#define gcmOPT_INLINELEVEL() (gcmGetOptimizerOption()->inlineLevel)
++
++/* Setters */
++#define gcmOPT_SetPatchTexld(m,n) (gcmGetOptimizerOption()->patchEveryTEXLDs = (m),\
++ gcmGetOptimizerOption()->patchDummyTEXLDs = (n))
++#define gcmOPT_SetSplitVecMUL() (gcmGetOptimizerOption()->splitVec = gcvTRUE, \
++ gcmGetOptimizerOption()->splitVec4MUL = gcvTRUE)
++#define gcmOPT_SetSplitVecMULLO() (gcmGetOptimizerOption()->splitVec = gcvTRUE, \
++ gcmGetOptimizerOption()->splitVec4MULLO = gcvTRUE)
++#define gcmOPT_SetSplitVecDP3() (gcmGetOptimizerOption()->splitVec = gcvTRUE, \
++ gcmGetOptimizerOption()->splitVec4DP3 = gcvTRUE)
++#define gcmOPT_SetSplitVecDP4() (gcmGetOptimizerOption()->splitVec = gcvTRUE, \
++ gcmGetOptimizerOption()->splitVec4DP4 = gcvTRUE)
++
++#define gcmOPT_SetPackVarying(v) (gcmGetOptimizerOption()->packVarying = v)
++
++#define FB_LIVERANGE_FIX1 0x0001
++
++
++#define PredefinedDummySamplerId 8
++
++/* Function argument qualifier */
++typedef enum _gceINPUT_OUTPUT
++{
++ gcvFUNCTION_INPUT,
++ gcvFUNCTION_OUTPUT,
++ gcvFUNCTION_INOUT
++}
++gceINPUT_OUTPUT;
++
++/* Kernel function property flags. */
++typedef enum _gcePROPERTY_FLAGS
++{
++ gcvPROPERTY_REQD_WORK_GRP_SIZE = 0x01
++}
++gceKERNEL_FUNCTION_PROPERTY_FLAGS;
++
++/* Uniform flags. */
++typedef enum _gceUNIFORM_FLAGS
++{
++ gcvUNIFORM_KERNEL_ARG = 0x01,
++ gcvUNIFORM_KERNEL_ARG_LOCAL = 0x02,
++ gcvUNIFORM_KERNEL_ARG_SAMPLER = 0x04,
++ gcvUNIFORM_LOCAL_ADDRESS_SPACE = 0x08,
++ gcvUNIFORM_PRIVATE_ADDRESS_SPACE = 0x10,
++ gcvUNIFORM_CONSTANT_ADDRESS_SPACE = 0x20,
++ gcvUNIFORM_GLOBAL_SIZE = 0x40,
++ gcvUNIFORM_LOCAL_SIZE = 0x80,
++ gcvUNIFORM_NUM_GROUPS = 0x100,
++ gcvUNIFORM_GLOBAL_OFFSET = 0x200,
++ gcvUNIFORM_WORK_DIM = 0x400,
++ gcvUNIFORM_KERNEL_ARG_CONSTANT = 0x800,
++ gcvUNIFORM_KERNEL_ARG_LOCAL_MEM_SIZE = 0x1000,
++ gcvUNIFORM_KERNEL_ARG_PRIVATE = 0x2000,
++ gcvUNIFORM_LOADTIME_CONSTANT = 0x4000,
++ gcvUNIFORM_IS_ARRAY = 0x8000,
++}
++gceUNIFORM_FLAGS;
++
++#define gcdUNIFORM_KERNEL_ARG_MASK (gcvUNIFORM_KERNEL_ARG | \
++ gcvUNIFORM_KERNEL_ARG_LOCAL | \
++ gcvUNIFORM_KERNEL_ARG_SAMPLER | \
++ gcvUNIFORM_KERNEL_ARG_PRIVATE | \
++ gcvUNIFORM_KERNEL_ARG_CONSTANT)
++
++typedef enum _gceVARIABLE_UPDATE_FLAGS
++{
++ gcvVARIABLE_UPDATE_NOUPDATE = 0,
++ gcvVARIABLE_UPDATE_TEMPREG,
++ gcvVARIABLE_UPDATE_TYPE_QUALIFIER,
++}gceVARIABLE_UPDATE_FLAGS;
++
++typedef struct _gcMACHINE_INST
++{
++ gctUINT state0;
++ gctUINT state1;
++ gctUINT state2;
++ gctUINT state3;
++}gcMACHINE_INST, *gcMACHINE_INST_PTR;
++
++typedef struct _gcMACHINECODE
++{
++ gcMACHINE_INST_PTR pCode; /* machine code */
++ gctUINT instCount; /* 128-bit count */
++ gctUINT maxConstRegNo;
++ gctUINT maxTempRegNo;
++ gctUINT endPCOfMainRoutine;
++}gcMACHINECODE, *gcMACHINECODE_PTR;
++
++typedef enum NP2_ADDRESS_MODE
++{
++ NP2_ADDRESS_MODE_CLAMP = 0,
++ NP2_ADDRESS_MODE_REPEAT = 1,
++ NP2_ADDRESS_MODE_MIRROR = 2
++}NP2_ADDRESS_MODE;
++
++typedef struct _gcNPOT_PATCH_PARAM
++{
++ gctINT samplerSlot;
++ NP2_ADDRESS_MODE addressMode[3];
++ gctINT texDimension; /* 2 or 3 */
++}gcNPOT_PATCH_PARAM, *gcNPOT_PATCH_PARAM_PTR;
++
++typedef struct _gcZBIAS_PATCH_PARAM
++{
++ /* Driver uses this to program uniform that designating zbias */
++ gctINT uniformAddr;
++ gctINT channel;
++}gcZBIAS_PATCH_PARAM, *gcZBIAS_PATCH_PARAM_PTR;
++
++void
++gcGetOptionFromEnv(
++ IN OUT gcOPTIMIZER_OPTION * Option
++ );
++
++void
++gcSetOptimizerOption(
++ IN gceSHADER_FLAGS Flags
++ );
++
++gcOPTIMIZER_OPTION *
++gcGetOptimizerOption();
++
++/*******************************************************************************
++** gcSHADER_SetCompilerVersion
++**
++** Set the compiler version of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to gcSHADER object
++**
++** gctINT *Version
++** Pointer to a two word version
++*/
++gceSTATUS
++gcSHADER_SetCompilerVersion(
++ IN gcSHADER Shader,
++ IN gctUINT32 *Version
++ );
++
++/*******************************************************************************
++** gcSHADER_GetCompilerVersion
++**
++** Get the compiler version of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR *CompilerVersion.
++** Pointer to holder of returned compilerVersion pointer
++*/
++gceSTATUS
++gcSHADER_GetCompilerVersion(
++ IN gcSHADER Shader,
++ OUT gctUINT32_PTR *CompilerVersion
++ );
++
++/*******************************************************************************
++** gcSHADER_GetType
++**
++** Get the gcSHADER object's type.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctINT *Type.
++** Pointer to return shader type.
++*/
++gceSTATUS
++gcSHADER_GetType(
++ IN gcSHADER Shader,
++ OUT gctINT *Type
++ );
++
++gctUINT
++gcSHADER_NextId();
++/*******************************************************************************
++** gcSHADER_Construct
++********************************************************************************
++**
++** Construct a new gcSHADER object.
++**
++** INPUT:
++**
++** gcoOS Hal
++** Pointer to an gcoHAL object.
++**
++** gctINT ShaderType
++** Type of gcSHADER object to cerate. 'ShaderType' can be one of the
++** following:
++**
++** gcSHADER_TYPE_VERTEX Vertex shader.
++** gcSHADER_TYPE_FRAGMENT Fragment shader.
++**
++** OUTPUT:
++**
++** gcSHADER * Shader
++** Pointer to a variable receiving the gcSHADER object pointer.
++*/
++gceSTATUS
++gcSHADER_Construct(
++ IN gcoHAL Hal,
++ IN gctINT ShaderType,
++ OUT gcSHADER * Shader
++ );
++
++/*******************************************************************************
++** gcSHADER_Destroy
++********************************************************************************
++**
++** Destroy a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_Destroy(
++ IN gcSHADER Shader
++ );
++
++/*******************************************************************************
++** gcSHADER_Copy
++********************************************************************************
++**
++** Copy a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSHADER Source
++** Pointer to a gcSHADER object that will be copied.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_Copy(
++ IN gcSHADER Shader,
++ IN gcSHADER Source
++ );
++
++/*******************************************************************************
++** gcSHADER_LoadHeader
++**
++** Load a gcSHADER object from a binary buffer. The binary buffer is layed out
++** as follows:
++** // Six word header
++** // Signature, must be 'S','H','D','R'.
++** gctINT8 signature[4];
++** gctUINT32 binFileVersion;
++** gctUINT32 compilerVersion[2];
++** gctUINT32 gcSLVersion;
++** gctUINT32 binarySize;
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++** Shader type will be returned if type in shader object is not gcSHADER_TYPE_PRECOMPILED
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer containing the shader data to load.
++**
++** gctSIZE_T BufferSize
++** Number of bytes inside the binary buffer pointed to by 'Buffer'.
++**
++** OUTPUT:
++** nothing
++**
++*/
++gceSTATUS
++gcSHADER_LoadHeader(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN gctSIZE_T BufferSize,
++ OUT gctUINT32 * ShaderVersion
++ );
++
++/*******************************************************************************
++** gcSHADER_LoadKernel
++**
++** Load a kernel function given by name into gcSHADER object
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSTRING KernelName
++** Pointer to a kernel function name
++**
++** OUTPUT:
++** nothing
++**
++*/
++gceSTATUS
++gcSHADER_LoadKernel(
++ IN gcSHADER Shader,
++ IN gctSTRING KernelName
++ );
++
++/*******************************************************************************
++** gcSHADER_Load
++********************************************************************************
++**
++** Load a gcSHADER object from a binary buffer.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer containg the shader data to load.
++**
++** gctSIZE_T BufferSize
++** Number of bytes inside the binary buffer pointed to by 'Buffer'.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_Load(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN gctSIZE_T BufferSize
++ );
++
++/*******************************************************************************
++** gcSHADER_Save
++********************************************************************************
++**
++** Save a gcSHADER object to a binary buffer.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer to be used as storage for the gcSHADER
++** object. If 'Buffer' is gcvNULL, the gcSHADER object will not be saved,
++** but the number of bytes required to hold the binary output for the
++** gcSHADER object will be returned.
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable holding the number of bytes allocated in
++** 'Buffer'. Only valid if 'Buffer' is not gcvNULL.
++**
++** OUTPUT:
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable receiving the number of bytes required to hold
++** the binary form of the gcSHADER object.
++*/
++gceSTATUS
++gcSHADER_Save(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN OUT gctSIZE_T * BufferSize
++ );
++
++/*******************************************************************************
++** gcSHADER_LoadEx
++********************************************************************************
++**
++** Load a gcSHADER object from a binary buffer.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer containg the shader data to load.
++**
++** gctSIZE_T BufferSize
++** Number of bytes inside the binary buffer pointed to by 'Buffer'.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_LoadEx(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN gctSIZE_T BufferSize
++ );
++
++/*******************************************************************************
++** gcSHADER_SaveEx
++********************************************************************************
++**
++** Save a gcSHADER object to a binary buffer.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer to be used as storage for the gcSHADER
++** object. If 'Buffer' is gcvNULL, the gcSHADER object will not be saved,
++** but the number of bytes required to hold the binary output for the
++** gcSHADER object will be returned.
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable holding the number of bytes allocated in
++** 'Buffer'. Only valid if 'Buffer' is not gcvNULL.
++**
++** OUTPUT:
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable receiving the number of bytes required to hold
++** the binary form of the gcSHADER object.
++*/
++gceSTATUS
++gcSHADER_SaveEx(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN OUT gctSIZE_T * BufferSize
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateAttributes
++**
++** Reallocate an array of pointers to gcATTRIBUTE objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateAttributes(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++/*******************************************************************************
++** gcSHADER_AddAttribute
++********************************************************************************
++**
++** Add an attribute to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the attribute to add.
++**
++** gcSHADER_TYPE Type
++** Type of the attribute to add.
++**
++** gctSIZE_T Length
++** Array length of the attribute to add. 'Length' must be at least 1.
++**
++** gctBOOL IsTexture
++** gcvTRUE if the attribute is used as a texture coordinate, gcvFALSE if not.
++**
++** OUTPUT:
++**
++** gcATTRIBUTE * Attribute
++** Pointer to a variable receiving the gcATTRIBUTE object pointer.
++*/
++gceSTATUS
++gcSHADER_AddAttribute(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctBOOL IsTexture,
++ OUT gcATTRIBUTE * Attribute
++ );
++
++/*******************************************************************************
++** gcSHADER_GetAttributeCount
++********************************************************************************
++**
++** Get the number of attributes for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of attributes.
++*/
++gceSTATUS
++gcSHADER_GetAttributeCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_GetAttribute
++********************************************************************************
++**
++** Get the gcATTRIBUTE object poniter for an indexed attribute for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of the attribute to retrieve.
++**
++** OUTPUT:
++**
++** gcATTRIBUTE * Attribute
++** Pointer to a variable receiving the gcATTRIBUTE object pointer.
++*/
++gceSTATUS
++gcSHADER_GetAttribute(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcATTRIBUTE * Attribute
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateUniforms
++**
++** Reallocate an array of pointers to gcUNIFORM objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateUniforms(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++/*******************************************************************************
++** gcSHADER_AddUniform
++********************************************************************************
++**
++** Add an uniform to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the uniform to add.
++**
++** gcSHADER_TYPE Type
++** Type of the uniform to add.
++**
++** gctSIZE_T Length
++** Array length of the uniform to add. 'Length' must be at least 1.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++*/
++gceSTATUS
++gcSHADER_AddUniform(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ OUT gcUNIFORM * Uniform
++ );
++
++/*******************************************************************************
++** gcSHADER_AddPreRotationUniform
++********************************************************************************
++**
++** Add an uniform to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the uniform to add.
++**
++** gcSHADER_TYPE Type
++** Type of the uniform to add.
++**
++** gctSIZE_T Length
++** Array length of the uniform to add. 'Length' must be at least 1.
++**
++** gctINT col
++** Which uniform.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++*/
++gceSTATUS
++gcSHADER_AddPreRotationUniform(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctINT col,
++ OUT gcUNIFORM * Uniform
++ );
++
++/*******************************************************************************
++** gcSHADER_AddUniformEx
++********************************************************************************
++**
++** Add an uniform to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the uniform to add.
++**
++** gcSHADER_TYPE Type
++** Type of the uniform to add.
++**
++** gcSHADER_PRECISION precision
++** Precision of the uniform to add.
++**
++** gctSIZE_T Length
++** Array length of the uniform to add. 'Length' must be at least 1.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++*/
++gceSTATUS
++gcSHADER_AddUniformEx(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gcSHADER_PRECISION precision,
++ IN gctSIZE_T Length,
++ OUT gcUNIFORM * Uniform
++ );
++
++/*******************************************************************************
++** gcSHADER_AddUniformEx1
++********************************************************************************
++**
++** Add an uniform to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the uniform to add.
++**
++** gcSHADER_TYPE Type
++** Type of the uniform to add.
++**
++** gcSHADER_PRECISION precision
++** Precision of the uniform to add.
++**
++** gctSIZE_T Length
++** Array length of the uniform to add. 'Length' must be at least 1.
++**
++** gcSHADER_VAR_CATEGORY varCategory
++** Variable category, normal or struct.
++**
++** gctUINT16 numStructureElement
++** If struct, its element number.
++**
++** gctINT16 parent
++** If struct, parent index in gcSHADER.variables.
++**
++** gctINT16 prevSibling
++** If struct, previous sibling index in gcSHADER.variables.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++**
++** gctINT16* ThisUniformIndex
++** Returned value about uniform index in gcSHADER.
++*/
++gceSTATUS
++gcSHADER_AddUniformEx1(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gcSHADER_PRECISION precision,
++ IN gctSIZE_T Length,
++ IN gctINT IsArray,
++ IN gcSHADER_VAR_CATEGORY varCategory,
++ IN gctUINT16 numStructureElement,
++ IN gctINT16 parent,
++ IN gctINT16 prevSibling,
++ OUT gctINT16* ThisUniformIndex,
++ OUT gcUNIFORM * Uniform
++ );
++
++/*******************************************************************************
++** gcSHADER_GetUniformCount
++********************************************************************************
++**
++** Get the number of uniforms for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of uniforms.
++*/
++gceSTATUS
++gcSHADER_GetUniformCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_GetPreRotationUniform
++********************************************************************************
++**
++** Get the preRotate Uniform.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gcUNIFORM ** pUniform
++** Pointer to a preRotation uniforms array.
++*/
++gceSTATUS
++gcSHADER_GetPreRotationUniform(
++ IN gcSHADER Shader,
++ OUT gcUNIFORM ** pUniform
++ );
++
++/*******************************************************************************
++** gcSHADER_GetUniform
++********************************************************************************
++**
++** Get the gcUNIFORM object pointer for an indexed uniform for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of the uniform to retrieve.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++*/
++gceSTATUS
++gcSHADER_GetUniform(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcUNIFORM * Uniform
++ );
++
++
++/*******************************************************************************
++** gcSHADER_GetUniformIndexingRange
++********************************************************************************
++**
++** Get the gcUNIFORM object pointer for an indexed uniform for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctINT uniformIndex
++** Index of the start uniform.
++**
++** gctINT offset
++** Offset to indexing.
++**
++** OUTPUT:
++**
++** gctINT * LastUniformIndex
++** Pointer to index of last uniform in indexing range.
++**
++** gctINT * OffsetUniformIndex
++** Pointer to index of uniform that indexing at offset.
++**
++** gctINT * DeviationInOffsetUniform
++** Pointer to offset in uniform picked up.
++*/
++gceSTATUS
++gcSHADER_GetUniformIndexingRange(
++ IN gcSHADER Shader,
++ IN gctINT uniformIndex,
++ IN gctINT offset,
++ OUT gctINT * LastUniformIndex,
++ OUT gctINT * OffsetUniformIndex,
++ OUT gctINT * DeviationInOffsetUniform
++ );
++
++/*******************************************************************************
++** gcSHADER_GetKernelFucntion
++**
++** Get the gcKERNEL_FUNCTION object pointer for an indexed kernel function for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of kernel function to retreive the name for.
++**
++** OUTPUT:
++**
++** gcKERNEL_FUNCTION * KernelFunction
++** Pointer to a variable receiving the gcKERNEL_FUNCTION object pointer.
++*/
++gceSTATUS
++gcSHADER_GetKernelFunction(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcKERNEL_FUNCTION * KernelFunction
++ );
++
++gceSTATUS
++gcSHADER_GetKernelFunctionByName(
++ IN gcSHADER Shader,
++ IN gctSTRING KernelName,
++ OUT gcKERNEL_FUNCTION * KernelFunction
++ );
++/*******************************************************************************
++** gcSHADER_GetKernelFunctionCount
++**
++** Get the number of kernel functions for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of kernel functions.
++*/
++gceSTATUS
++gcSHADER_GetKernelFunctionCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateOutputs
++**
++** Reallocate an array of pointers to gcOUTPUT objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateOutputs(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOutput
++********************************************************************************
++**
++** Add an output to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the output to add.
++**
++** gcSHADER_TYPE Type
++** Type of the output to add.
++**
++** gctSIZE_T Length
++** Array length of the output to add. 'Length' must be at least 1.
++**
++** gctUINT16 TempRegister
++** Temporary register index that holds the output value.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOutput(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctUINT16 TempRegister
++ );
++
++gceSTATUS
++gcSHADER_AddOutputIndexed(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gctSIZE_T Index,
++ IN gctUINT16 TempIndex
++ );
++
++/*******************************************************************************
++** gcSHADER_GetOutputCount
++********************************************************************************
++**
++** Get the number of outputs for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of outputs.
++*/
++gceSTATUS
++gcSHADER_GetOutputCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_GetOutput
++********************************************************************************
++**
++** Get the gcOUTPUT object pointer for an indexed output for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of output to retrieve.
++**
++** OUTPUT:
++**
++** gcOUTPUT * Output
++** Pointer to a variable receiving the gcOUTPUT object pointer.
++*/
++gceSTATUS
++gcSHADER_GetOutput(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcOUTPUT * Output
++ );
++
++
++/*******************************************************************************
++** gcSHADER_GetOutputByName
++********************************************************************************
++**
++** Get the gcOUTPUT object pointer for this shader by output name.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSTRING name
++** Name of output to retrieve.
++**
++** gctSIZE_T nameLength
++** Length of name to retrieve
++**
++** OUTPUT:
++**
++** gcOUTPUT * Output
++** Pointer to a variable receiving the gcOUTPUT object pointer.
++*/
++gceSTATUS
++gcSHADER_GetOutputByName(
++ IN gcSHADER Shader,
++ IN gctSTRING name,
++ IN gctSIZE_T nameLength,
++ OUT gcOUTPUT * Output
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateVariables
++**
++** Reallocate an array of pointers to gcVARIABLE objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateVariables(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++/*******************************************************************************
++** gcSHADER_AddVariable
++********************************************************************************
++**
++** Add a variable to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the variable to add.
++**
++** gcSHADER_TYPE Type
++** Type of the variable to add.
++**
++** gctSIZE_T Length
++** Array length of the variable to add. 'Length' must be at least 1.
++**
++** gctUINT16 TempRegister
++** Temporary register index that holds the variable value.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddVariable(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctUINT16 TempRegister
++ );
++
++
++/*******************************************************************************
++** gcSHADER_AddVariableEx
++********************************************************************************
++**
++** Add a variable to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the variable to add.
++**
++** gcSHADER_TYPE Type
++** Type of the variable to add.
++**
++** gctSIZE_T Length
++** Array length of the variable to add. 'Length' must be at least 1.
++**
++** gctUINT16 TempRegister
++** Temporary register index that holds the variable value.
++**
++** gcSHADER_VAR_CATEGORY varCategory
++** Variable category, normal or struct.
++**
++** gctUINT16 numStructureElement
++** If struct, its element number.
++**
++** gctINT16 parent
++** If struct, parent index in gcSHADER.variables.
++**
++** gctINT16 prevSibling
++** If struct, previous sibling index in gcSHADER.variables.
++**
++** OUTPUT:
++**
++** gctINT16* ThisVarIndex
++** Returned value about variable index in gcSHADER.
++*/
++gceSTATUS
++gcSHADER_AddVariableEx(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctUINT16 TempRegister,
++ IN gcSHADER_VAR_CATEGORY varCategory,
++ IN gctUINT16 numStructureElement,
++ IN gctINT16 parent,
++ IN gctINT16 prevSibling,
++ OUT gctINT16* ThisVarIndex
++ );
++
++/*******************************************************************************
++** gcSHADER_UpdateVariable
++********************************************************************************
++**
++** Update a variable to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of variable to retrieve.
++**
++** gceVARIABLE_UPDATE_FLAGS flag
++** Flag which property of variable will be updated.
++**
++** gctUINT newValue
++** New value to update.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_UpdateVariable(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ IN gceVARIABLE_UPDATE_FLAGS flag,
++ IN gctUINT newValue
++ );
++
++/*******************************************************************************
++** gcSHADER_GetVariableCount
++********************************************************************************
++**
++** Get the number of variables for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of variables.
++*/
++gceSTATUS
++gcSHADER_GetVariableCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_GetVariable
++********************************************************************************
++**
++** Get the gcVARIABLE object pointer for an indexed variable for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of variable to retrieve.
++**
++** OUTPUT:
++**
++** gcVARIABLE * Variable
++** Pointer to a variable receiving the gcVARIABLE object pointer.
++*/
++gceSTATUS
++gcSHADER_GetVariable(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcVARIABLE * Variable
++ );
++
++/*******************************************************************************
++** gcSHADER_GetVariableIndexingRange
++********************************************************************************
++**
++** Get the gcVARIABLE indexing range.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcVARIABLE variable
++** Start variable.
++**
++** gctBOOL whole
++** Indicate whether maximum indexing range is queried
++**
++** OUTPUT:
++**
++** gctUINT *Start
++** Pointer to range start (temp register index).
++**
++** gctUINT *End
++** Pointer to range end (temp register index).
++*/
++gceSTATUS
++gcSHADER_GetVariableIndexingRange(
++ IN gcSHADER Shader,
++ IN gcVARIABLE variable,
++ IN gctBOOL whole,
++ OUT gctUINT *Start,
++ OUT gctUINT *End
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcode
++********************************************************************************
++**
++** Add an opcode to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gctUINT16 TempRegister
++** Temporary register index that acts as the target of the opcode.
++**
++** gctUINT8 Enable
++** Write enable bits for the temporary register that acts as the target
++** of the opcode.
++**
++** gcSL_FORMAT Format
++** Format of the temporary register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcode(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gctUINT16 TempRegister,
++ IN gctUINT8 Enable,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddOpcode2(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gctUINT16 TempRegister,
++ IN gctUINT8 Enable,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeIndexed
++********************************************************************************
++**
++** Add an opcode to a gcSHADER object that writes to an dynamically indexed
++** target.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gctUINT16 TempRegister
++** Temporary register index that acts as the target of the opcode.
++**
++** gctUINT8 Enable
++** Write enable bits for the temporary register that acts as the
++** target of the opcode.
++**
++** gcSL_INDEXED Mode
++** Location of the dynamic index inside the temporary register. Valid
++** values can be:
++**
++** gcSL_INDEXED_X - Use x component of the temporary register.
++** gcSL_INDEXED_Y - Use y component of the temporary register.
++** gcSL_INDEXED_Z - Use z component of the temporary register.
++** gcSL_INDEXED_W - Use w component of the temporary register.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** gcSL_FORMAT Format
++** Format of the temporary register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeIndexed(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gctUINT16 TempRegister,
++ IN gctUINT8 Enable,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeConditionIndexed
++**
++** Add an opcode to a gcSHADER object that writes to an dynamically indexed
++** target.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gcSL_CONDITION Condition
++** Condition to check.
++**
++** gctUINT16 TempRegister
++** Temporary register index that acts as the target of the opcode.
++**
++** gctUINT8 Enable
++** Write enable bits for the temporary register that acts as the
++** target of the opcode.
++**
++** gcSL_INDEXED Indexed
++** Location of the dynamic index inside the temporary register. Valid
++** values can be:
++**
++** gcSL_INDEXED_X - Use x component of the temporary register.
++** gcSL_INDEXED_Y - Use y component of the temporary register.
++** gcSL_INDEXED_Z - Use z component of the temporary register.
++** gcSL_INDEXED_W - Use w component of the temporary register.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeConditionIndexed(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gctUINT16 TempRegister,
++ IN gctUINT8 Enable,
++ IN gcSL_INDEXED Indexed,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeConditional
++********************************************************************************
++**
++** Add an conditional opcode to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gcSL_CONDITION Condition
++** Condition that needs to evaluate to gcvTRUE in order for the opcode to
++** execute.
++**
++** gctUINT Label
++** Target label if 'Condition' evaluates to gcvTRUE.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeConditional(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gctUINT Label
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeConditionalFormatted
++**
++** Add an conditional jump or call opcode to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gcSL_CONDITION Condition
++** Condition that needs to evaluate to gcvTRUE in order for the opcode to
++** execute.
++**
++** gcSL_FORMAT Format
++** Format of conditional operands
++**
++** gctUINT Label
++** Target label if 'Condition' evaluates to gcvTRUE.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeConditionalFormatted(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gcSL_FORMAT Format,
++ IN gctUINT Label
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeConditionalFormattedEnable
++**
++** Add an conditional jump or call opcode to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gcSL_CONDITION Condition
++** Condition that needs to evaluate to gcvTRUE in order for the opcode to
++** execute.
++**
++** gcSL_FORMAT Format
++** Format of conditional operands
++**
++** gctUINT8 Enable
++** Write enable value for the target of the opcode.
++**
++** gctUINT Label
++** Target label if 'Condition' evaluates to gcvTRUE.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeConditionalFormattedEnable(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gcSL_FORMAT Format,
++ IN gctUINT8 Enable,
++ IN gctUINT Label
++ );
++
++/*******************************************************************************
++** gcSHADER_AddLabel
++********************************************************************************
++**
++** Define a label at the current instruction of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Label
++** Label to define.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddLabel(
++ IN gcSHADER Shader,
++ IN gctUINT Label
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSource
++********************************************************************************
++**
++** Add a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_TYPE Type
++** Type of the source operand.
++**
++** gctUINT16 SourceIndex
++** Index of the source operand.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gcSL_FORMAT Format
++** Format of the source operand.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSource(
++ IN gcSHADER Shader,
++ IN gcSL_TYPE Type,
++ IN gctUINT16 SourceIndex,
++ IN gctUINT8 Swizzle,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceIndexed
++********************************************************************************
++**
++** Add a dynamically indexed source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_TYPE Type
++** Type of the source operand.
++**
++** gctUINT16 SourceIndex
++** Index of the source operand.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gcSL_INDEXED Mode
++** Addressing mode for the index.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** gcSL_FORMAT Format
++** Format of the source operand.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceIndexed(
++ IN gcSHADER Shader,
++ IN gcSL_TYPE Type,
++ IN gctUINT16 SourceIndex,
++ IN gctUINT8 Swizzle,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceAttribute
++********************************************************************************
++**
++** Add an attribute as a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gctINT Index
++** Static index into the attribute in case the attribute is a matrix
++** or array.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceAttribute(
++ IN gcSHADER Shader,
++ IN gcATTRIBUTE Attribute,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceAttributeIndexed
++********************************************************************************
++**
++** Add an indexed attribute as a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gctINT Index
++** Static index into the attribute in case the attribute is a matrix
++** or array.
++**
++** gcSL_INDEXED Mode
++** Addressing mode of the dynamic index.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceAttributeIndexed(
++ IN gcSHADER Shader,
++ IN gcATTRIBUTE Attribute,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceUniform
++********************************************************************************
++**
++** Add a uniform as a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gctINT Index
++** Static index into the uniform in case the uniform is a matrix or
++** array.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceUniform(
++ IN gcSHADER Shader,
++ IN gcUNIFORM Uniform,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceUniformIndexed
++********************************************************************************
++**
++** Add an indexed uniform as a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gctINT Index
++** Static index into the uniform in case the uniform is a matrix or
++** array.
++**
++** gcSL_INDEXED Mode
++** Addressing mode of the dynamic index.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceUniformIndexed(
++ IN gcSHADER Shader,
++ IN gcUNIFORM Uniform,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister
++ );
++
++gceSTATUS
++gcSHADER_AddSourceSamplerIndexed(
++ IN gcSHADER Shader,
++ IN gctUINT8 Swizzle,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister
++ );
++
++gceSTATUS
++gcSHADER_AddSourceAttributeFormatted(
++ IN gcSHADER Shader,
++ IN gcATTRIBUTE Attribute,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddSourceAttributeIndexedFormatted(
++ IN gcSHADER Shader,
++ IN gcATTRIBUTE Attribute,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddSourceUniformFormatted(
++ IN gcSHADER Shader,
++ IN gcUNIFORM Uniform,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddSourceUniformIndexedFormatted(
++ IN gcSHADER Shader,
++ IN gcUNIFORM Uniform,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddSourceSamplerIndexedFormatted(
++ IN gcSHADER Shader,
++ IN gctUINT8 Swizzle,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceConstant
++********************************************************************************
++**
++** Add a constant floating point value as a source operand to a gcSHADER
++** object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctFLOAT Constant
++** Floating point constant.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceConstant(
++ IN gcSHADER Shader,
++ IN gctFLOAT Constant
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceConstantFormatted
++********************************************************************************
++**
++** Add a constant value as a source operand to a gcSHADER
++** object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** void * Constant
++** Pointer to constant.
++**
++** gcSL_FORMAT Format
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceConstantFormatted(
++ IN gcSHADER Shader,
++ IN void *Constant,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_Pack
++********************************************************************************
++**
++** Pack a dynamically created gcSHADER object by trimming the allocated arrays
++** and resolving all the labeling.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_Pack(
++ IN gcSHADER Shader
++ );
++
++/*******************************************************************************
++** gcSHADER_SetOptimizationOption
++********************************************************************************
++**
++** Set optimization option of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT OptimizationOption
++** Optimization option. Can be one of the following:
++**
++** 0 - No optimization.
++** 1 - Full optimization.
++** Other value - For optimizer testing.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_SetOptimizationOption(
++ IN gcSHADER Shader,
++ IN gctUINT OptimizationOption
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateFunctions
++**
++** Reallocate an array of pointers to gcFUNCTION objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateFunctions(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcSHADER_AddFunction(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ OUT gcFUNCTION * Function
++ );
++
++gceSTATUS
++gcSHADER_ReallocateKernelFunctions(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcSHADER_AddKernelFunction(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ OUT gcKERNEL_FUNCTION * KernelFunction
++ );
++
++gceSTATUS
++gcSHADER_BeginFunction(
++ IN gcSHADER Shader,
++ IN gcFUNCTION Function
++ );
++
++gceSTATUS
++gcSHADER_EndFunction(
++ IN gcSHADER Shader,
++ IN gcFUNCTION Function
++ );
++
++gceSTATUS
++gcSHADER_BeginKernelFunction(
++ IN gcSHADER Shader,
++ IN gcKERNEL_FUNCTION KernelFunction
++ );
++
++gceSTATUS
++gcSHADER_EndKernelFunction(
++ IN gcSHADER Shader,
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctSIZE_T LocalMemorySize
++ );
++
++gceSTATUS
++gcSHADER_SetMaxKernelFunctionArgs(
++ IN gcSHADER Shader,
++ IN gctUINT32 MaxKernelFunctionArgs
++ );
++
++/*******************************************************************************
++** gcSHADER_SetConstantMemorySize
++**
++** Set the constant memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T ConstantMemorySize
++** Constant memory size in bytes
++**
++** gctCHAR *ConstantMemoryBuffer
++** Constant memory buffer
++*/
++gceSTATUS
++gcSHADER_SetConstantMemorySize(
++ IN gcSHADER Shader,
++ IN gctSIZE_T ConstantMemorySize,
++ IN gctCHAR * ConstantMemoryBuffer
++ );
++
++/*******************************************************************************
++** gcSHADER_GetConstantMemorySize
++**
++** Set the constant memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * ConstantMemorySize
++** Pointer to a variable receiving constant memory size in bytes
++**
++** gctCHAR **ConstantMemoryBuffer.
++** Pointer to a variable for returned shader constant memory buffer.
++*/
++gceSTATUS
++gcSHADER_GetConstantMemorySize(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * ConstantMemorySize,
++ OUT gctCHAR ** ConstantMemoryBuffer
++ );
++
++/*******************************************************************************
++** gcSHADER_SetPrivateMemorySize
++**
++** Set the private memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T PrivateMemorySize
++** Private memory size in bytes
++*/
++gceSTATUS
++gcSHADER_SetPrivateMemorySize(
++ IN gcSHADER Shader,
++ IN gctSIZE_T PrivateMemorySize
++ );
++
++/*******************************************************************************
++** gcSHADER_GetPrivateMemorySize
++**
++** Set the private memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * PrivateMemorySize
++** Pointer to a variable receiving private memory size in bytes
++*/
++gceSTATUS
++gcSHADER_GetPrivateMemorySize(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * PrivateMemorySize
++ );
++
++/*******************************************************************************
++** gcSHADER_SetLocalMemorySize
++**
++** Set the local memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T LocalMemorySize
++** Local memory size in bytes
++*/
++gceSTATUS
++gcSHADER_SetLocalMemorySize(
++ IN gcSHADER Shader,
++ IN gctSIZE_T LocalMemorySize
++ );
++
++/*******************************************************************************
++** gcSHADER_GetLocalMemorySize
++**
++** Set the local memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * LocalMemorySize
++** Pointer to a variable receiving lcoal memory size in bytes
++*/
++gceSTATUS
++gcSHADER_GetLocalMemorySize(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * LocalMemorySize
++ );
++
++
++/*******************************************************************************
++** gcSHADER_CheckValidity
++**
++** Check validity for a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++*/
++gceSTATUS
++gcSHADER_CheckValidity(
++ IN gcSHADER Shader
++ );
++
++#if gcdUSE_WCLIP_PATCH
++gceSTATUS
++gcATTRIBUTE_IsPosition(
++ IN gcATTRIBUTE Attribute,
++ OUT gctBOOL * IsPosition
++ );
++#endif
++
++/*******************************************************************************
++** gcATTRIBUTE_GetType
++********************************************************************************
++**
++** Get the type and array length of a gcATTRIBUTE object.
++**
++** INPUT:
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** OUTPUT:
++**
++** gcSHADER_TYPE * Type
++** Pointer to a variable receiving the type of the attribute. 'Type'
++** can be gcvNULL, in which case no type will be returned.
++**
++** gctSIZE_T * ArrayLength
++** Pointer to a variable receiving the length of the array if the
++** attribute was declared as an array. If the attribute was not
++** declared as an array, the array length will be 1. 'ArrayLength' can
++** be gcvNULL, in which case no array length will be returned.
++*/
++gceSTATUS
++gcATTRIBUTE_GetType(
++ IN gcATTRIBUTE Attribute,
++ OUT gcSHADER_TYPE * Type,
++ OUT gctSIZE_T * ArrayLength
++ );
++
++/*******************************************************************************
++** gcATTRIBUTE_GetName
++********************************************************************************
++**
++** Get the name of a gcATTRIBUTE object.
++**
++** INPUT:
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Length
++** Pointer to a variable receiving the length of the attribute name.
++** 'Length' can be gcvNULL, in which case no length will be returned.
++**
++** gctCONST_STRING * Name
++** Pointer to a variable receiving the pointer to the attribute name.
++** 'Name' can be gcvNULL, in which case no name will be returned.
++*/
++gceSTATUS
++gcATTRIBUTE_GetName(
++ IN gcATTRIBUTE Attribute,
++ OUT gctSIZE_T * Length,
++ OUT gctCONST_STRING * Name
++ );
++
++/*******************************************************************************
++** gcATTRIBUTE_IsEnabled
++********************************************************************************
++**
++** Query the enabled state of a gcATTRIBUTE object.
++**
++** INPUT:
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** OUTPUT:
++**
++** gctBOOL * Enabled
++** Pointer to a variable receiving the enabled state of the attribute.
++*/
++gceSTATUS
++gcATTRIBUTE_IsEnabled(
++ IN gcATTRIBUTE Attribute,
++ OUT gctBOOL * Enabled
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetType
++********************************************************************************
++**
++** Get the type and array length of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gcSHADER_TYPE * Type
++** Pointer to a variable receiving the type of the uniform. 'Type' can
++** be gcvNULL, in which case no type will be returned.
++**
++** gctSIZE_T * ArrayLength
++** Pointer to a variable receiving the length of the array if the
++** uniform was declared as an array. If the uniform was not declared
++** as an array, the array length will be 1. 'ArrayLength' can be gcvNULL,
++** in which case no array length will be returned.
++*/
++gceSTATUS
++gcUNIFORM_GetType(
++ IN gcUNIFORM Uniform,
++ OUT gcSHADER_TYPE * Type,
++ OUT gctSIZE_T * ArrayLength
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetTypeEx
++********************************************************************************
++**
++** Get the type and array length of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gcSHADER_TYPE * Type
++** Pointer to a variable receiving the type of the uniform. 'Type' can
++** be gcvNULL, in which case no type will be returned.
++**
++** gcSHADER_PRECISION * Precision
++** Pointer to a variable receiving the precision of the uniform. 'Precision' can
++** be gcvNULL, in which case no type will be returned.
++**
++** gctSIZE_T * ArrayLength
++** Pointer to a variable receiving the length of the array if the
++** uniform was declared as an array. If the uniform was not declared
++** as an array, the array length will be 1. 'ArrayLength' can be gcvNULL,
++** in which case no array length will be returned.
++*/
++gceSTATUS
++gcUNIFORM_GetTypeEx(
++ IN gcUNIFORM Uniform,
++ OUT gcSHADER_TYPE * Type,
++ OUT gcSHADER_PRECISION * Precision,
++ OUT gctSIZE_T * ArrayLength
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetFlags
++********************************************************************************
++**
++** Get the flags of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gceUNIFORM_FLAGS * Flags
++** Pointer to a variable receiving the flags of the uniform.
++**
++*/
++gceSTATUS
++gcUNIFORM_GetFlags(
++ IN gcUNIFORM Uniform,
++ OUT gceUNIFORM_FLAGS * Flags
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetFlags
++********************************************************************************
++**
++** Set the flags of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gceUNIFORM_FLAGS Flags
++** Flags of the uniform to be set.
++**
++** OUTPUT:
++** Nothing.
++**
++*/
++gceSTATUS
++gcUNIFORM_SetFlags(
++ IN gcUNIFORM Uniform,
++ IN gceUNIFORM_FLAGS Flags
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetName
++********************************************************************************
++**
++** Get the name of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Length
++** Pointer to a variable receiving the length of the uniform name.
++** 'Length' can be gcvNULL, in which case no length will be returned.
++**
++** gctCONST_STRING * Name
++** Pointer to a variable receiving the pointer to the uniform name.
++** 'Name' can be gcvNULL, in which case no name will be returned.
++*/
++gceSTATUS
++gcUNIFORM_GetName(
++ IN gcUNIFORM Uniform,
++ OUT gctSIZE_T * Length,
++ OUT gctCONST_STRING * Name
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetSampler
++********************************************************************************
++**
++** Get the physical sampler number for a sampler gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gctUINT32 * Sampler
++** Pointer to a variable receiving the physical sampler.
++*/
++gceSTATUS
++gcUNIFORM_GetSampler(
++ IN gcUNIFORM Uniform,
++ OUT gctUINT32 * Sampler
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetFormat
++**
++** Get the type and array length of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gcSL_FORMAT * Format
++** Pointer to a variable receiving the format of element of the uniform.
++** 'Type' can be gcvNULL, in which case no type will be returned.
++**
++** gctBOOL * IsPointer
++** Pointer to a variable receiving the state wheter the uniform is a pointer.
++** 'IsPointer' can be gcvNULL, in which case no array length will be returned.
++*/
++gceSTATUS
++gcUNIFORM_GetFormat(
++ IN gcUNIFORM Uniform,
++ OUT gcSL_FORMAT * Format,
++ OUT gctBOOL * IsPointer
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetFormat
++**
++** Set the format and isPointer of a uniform.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gcSL_FORMAT Format
++** Format of element of the uniform shaderType.
++**
++** gctBOOL IsPointer
++** Wheter the uniform is a pointer.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_SetFormat(
++ IN gcUNIFORM Uniform,
++ IN gcSL_FORMAT Format,
++ IN gctBOOL IsPointer
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetValue
++********************************************************************************
++**
++** Set the value of a uniform in integer.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctSIZE_T Count
++** Number of entries to program if the uniform has been declared as an
++** array.
++**
++** const gctINT * Value
++** Pointer to a buffer holding the integer values for the uniform.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_SetValue(
++ IN gcUNIFORM Uniform,
++ IN gctSIZE_T Count,
++ IN const gctINT * Value
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetValueX
++********************************************************************************
++**
++** Set the value of a uniform in fixed point.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctSIZE_T Count
++** Number of entries to program if the uniform has been declared as an
++** array.
++**
++** const gctFIXED_POINT * Value
++** Pointer to a buffer holding the fixed point values for the uniform.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_SetValueX(
++ IN gcUNIFORM Uniform,
++ IN gctSIZE_T Count,
++ IN gctFIXED_POINT * Value
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetValueF
++********************************************************************************
++**
++** Set the value of a uniform in floating point.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctSIZE_T Count
++** Number of entries to program if the uniform has been declared as an
++** array.
++**
++** const gctFLOAT * Value
++** Pointer to a buffer holding the floating point values for the
++** uniform.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_SetValueF(
++ IN gcUNIFORM Uniform,
++ IN gctSIZE_T Count,
++ IN const gctFLOAT * Value
++ );
++
++/*******************************************************************************
++** gcUNIFORM_ProgramF
++**
++** Set the value of a uniform in floating point.
++**
++** INPUT:
++**
++** gctUINT32 Address
++** Address of Uniform.
++**
++** gctSIZE_T Row/Col
++**
++** const gctFLOAT * Value
++** Pointer to a buffer holding the floating point values for the
++** uniform.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_ProgramF(
++ IN gctUINT32 Address,
++ IN gctSIZE_T Row,
++ IN gctSIZE_T Col,
++ IN const gctFLOAT * Value
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetModelViewProjMatrix
++********************************************************************************
++**
++** Get the value of uniform modelViewProjMatrix ID if present.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gctUINT
++gcUNIFORM_GetModelViewProjMatrix(
++ IN gcUNIFORM Uniform
++ );
++
++/*******************************************************************************
++** gcOUTPUT_GetType
++********************************************************************************
++**
++** Get the type and array length of a gcOUTPUT object.
++**
++** INPUT:
++**
++** gcOUTPUT Output
++** Pointer to a gcOUTPUT object.
++**
++** OUTPUT:
++**
++** gcSHADER_TYPE * Type
++** Pointer to a variable receiving the type of the output. 'Type' can
++** be gcvNULL, in which case no type will be returned.
++**
++** gctSIZE_T * ArrayLength
++** Pointer to a variable receiving the length of the array if the
++** output was declared as an array. If the output was not declared
++** as an array, the array length will be 1. 'ArrayLength' can be gcvNULL,
++** in which case no array length will be returned.
++*/
++gceSTATUS
++gcOUTPUT_GetType(
++ IN gcOUTPUT Output,
++ OUT gcSHADER_TYPE * Type,
++ OUT gctSIZE_T * ArrayLength
++ );
++
++/*******************************************************************************
++** gcOUTPUT_GetIndex
++********************************************************************************
++**
++** Get the index of a gcOUTPUT object.
++**
++** INPUT:
++**
++** gcOUTPUT Output
++** Pointer to a gcOUTPUT object.
++**
++** OUTPUT:
++**
++** gctUINT * Index
++** Pointer to a variable receiving the temporary register index of the
++** output. 'Index' can be gcvNULL,. in which case no index will be
++** returned.
++*/
++gceSTATUS
++gcOUTPUT_GetIndex(
++ IN gcOUTPUT Output,
++ OUT gctUINT * Index
++ );
++
++/*******************************************************************************
++** gcOUTPUT_GetName
++********************************************************************************
++**
++** Get the name of a gcOUTPUT object.
++**
++** INPUT:
++**
++** gcOUTPUT Output
++** Pointer to a gcOUTPUT object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Length
++** Pointer to a variable receiving the length of the output name.
++** 'Length' can be gcvNULL, in which case no length will be returned.
++**
++** gctCONST_STRING * Name
++** Pointer to a variable receiving the pointer to the output name.
++** 'Name' can be gcvNULL, in which case no name will be returned.
++*/
++gceSTATUS
++gcOUTPUT_GetName(
++ IN gcOUTPUT Output,
++ OUT gctSIZE_T * Length,
++ OUT gctCONST_STRING * Name
++ );
++
++/*******************************************************************************
++*********************************************************** F U N C T I O N S **
++*******************************************************************************/
++
++/*******************************************************************************
++** gcFUNCTION_ReallocateArguments
++**
++** Reallocate an array of gcsFUNCTION_ARGUMENT objects.
++**
++** INPUT:
++**
++** gcFUNCTION Function
++** Pointer to a gcFUNCTION object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcFUNCTION_ReallocateArguments(
++ IN gcFUNCTION Function,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcFUNCTION_AddArgument(
++ IN gcFUNCTION Function,
++ IN gctUINT16 TempIndex,
++ IN gctUINT8 Enable,
++ IN gctUINT8 Qualifier
++ );
++
++gceSTATUS
++gcFUNCTION_GetArgument(
++ IN gcFUNCTION Function,
++ IN gctUINT16 Index,
++ OUT gctUINT16_PTR Temp,
++ OUT gctUINT8_PTR Enable,
++ OUT gctUINT8_PTR Swizzle
++ );
++
++gceSTATUS
++gcFUNCTION_GetLabel(
++ IN gcFUNCTION Function,
++ OUT gctUINT_PTR Label
++ );
++
++/*******************************************************************************
++************************* K E R N E L P R O P E R T Y F U N C T I O N S **
++*******************************************************************************/
++/*******************************************************************************/
++gceSTATUS
++gcKERNEL_FUNCTION_AddKernelFunctionProperties(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctINT propertyType,
++ IN gctSIZE_T propertySize,
++ IN gctINT * values
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetPropertyCount(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ OUT gctSIZE_T * Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetProperty(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctUINT Index,
++ OUT gctSIZE_T * propertySize,
++ OUT gctINT * propertyType,
++ OUT gctINT * propertyValues
++ );
++
++
++/*******************************************************************************
++*******************************I M A G E S A M P L E R F U N C T I O N S **
++*******************************************************************************/
++/*******************************************************************************
++** gcKERNEL_FUNCTION_ReallocateImageSamplers
++**
++** Reallocate an array of pointers to image sampler pair.
++**
++** INPUT:
++**
++** gcKERNEL_FUNCTION KernelFunction
++** Pointer to a gcKERNEL_FUNCTION object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcKERNEL_FUNCTION_ReallocateImageSamplers(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_AddImageSampler(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctUINT8 ImageNum,
++ IN gctBOOL IsConstantSamplerType,
++ IN gctUINT32 SamplerType
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetImageSamplerCount(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ OUT gctSIZE_T * Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetImageSampler(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctUINT Index,
++ OUT gctUINT8 *ImageNum,
++ OUT gctBOOL *IsConstantSamplerType,
++ OUT gctUINT32 *SamplerType
++ );
++
++/*******************************************************************************
++*********************************************K E R N E L F U N C T I O N S **
++*******************************************************************************/
++
++/*******************************************************************************
++** gcKERNEL_FUNCTION_ReallocateArguments
++**
++** Reallocate an array of gcsFUNCTION_ARGUMENT objects.
++**
++** INPUT:
++**
++** gcKERNEL_FUNCTION Function
++** Pointer to a gcKERNEL_FUNCTION object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcKERNEL_FUNCTION_ReallocateArguments(
++ IN gcKERNEL_FUNCTION Function,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_AddArgument(
++ IN gcKERNEL_FUNCTION Function,
++ IN gctUINT16 TempIndex,
++ IN gctUINT8 Enable,
++ IN gctUINT8 Qualifier
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetArgument(
++ IN gcKERNEL_FUNCTION Function,
++ IN gctUINT16 Index,
++ OUT gctUINT16_PTR Temp,
++ OUT gctUINT8_PTR Enable,
++ OUT gctUINT8_PTR Swizzle
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetLabel(
++ IN gcKERNEL_FUNCTION Function,
++ OUT gctUINT_PTR Label
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetName(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ OUT gctSIZE_T * Length,
++ OUT gctCONST_STRING * Name
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_ReallocateUniformArguments(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_AddUniformArgument(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ OUT gcUNIFORM * UniformArgument
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetUniformArgumentCount(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ OUT gctSIZE_T * Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetUniformArgument(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctUINT Index,
++ OUT gcUNIFORM * UniformArgument
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_SetCodeEnd(
++ IN gcKERNEL_FUNCTION KernelFunction
++ );
++
++/*******************************************************************************
++** gcCompileShader
++********************************************************************************
++**
++** Compile a shader.
++**
++** INPUT:
++**
++** gcoOS Hal
++** Pointer to an gcoHAL object.
++**
++** gctINT ShaderType
++** Shader type to compile. Can be one of the following values:
++**
++** gcSHADER_TYPE_VERTEX
++** Compile a vertex shader.
++**
++** gcSHADER_TYPE_FRAGMENT
++** Compile a fragment shader.
++**
++** gctSIZE_T SourceSize
++** Size of the source buffer in bytes.
++**
++** gctCONST_STRING Source
++** Pointer to the buffer containing the shader source code.
++**
++** OUTPUT:
++**
++** gcSHADER * Binary
++** Pointer to a variable receiving the pointer to a gcSHADER object
++** containg the compiled shader code.
++**
++** gctSTRING * Log
++** Pointer to a variable receiving a string pointer containging the
++** compile log.
++*/
++gceSTATUS
++gcCompileShader(
++ IN gcoHAL Hal,
++ IN gctINT ShaderType,
++ IN gctSIZE_T SourceSize,
++ IN gctCONST_STRING Source,
++ OUT gcSHADER * Binary,
++ OUT gctSTRING * Log
++ );
++
++/*******************************************************************************
++** gcOptimizeShader
++********************************************************************************
++**
++** Optimize a shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object holding information about the compiled
++** shader.
++**
++** gctFILE LogFile
++** Pointer to an open FILE object.
++*/
++gceSTATUS
++gcOptimizeShader(
++ IN gcSHADER Shader,
++ IN gctFILE LogFile
++ );
++
++/*******************************************************************************
++** gcLinkShaders
++********************************************************************************
++**
++** Link two shaders and generate a harwdare specific state buffer by compiling
++** the compiler generated code through the resource allocator and code
++** generator.
++**
++** INPUT:
++**
++** gcSHADER VertexShader
++** Pointer to a gcSHADER object holding information about the compiled
++** vertex shader.
++**
++** gcSHADER FragmentShader
++** Pointer to a gcSHADER object holding information about the compiled
++** fragment shader.
++**
++** gceSHADER_FLAGS Flags
++** Compiler flags. Can be any of the following:
++**
++** gcvSHADER_DEAD_CODE - Dead code elimination.
++** gcvSHADER_RESOURCE_USAGE - Resource usage optimizaion.
++** gcvSHADER_OPTIMIZER - Full optimization.
++** gcvSHADER_USE_GL_Z - Use OpenGL ES Z coordinate.
++** gcvSHADER_USE_GL_POSITION - Use OpenGL ES gl_Position.
++** gcvSHADER_USE_GL_FACE - Use OpenGL ES gl_FaceForward.
++**
++** OUTPUT:
++**
++** gctSIZE_T * StateBufferSize
++** Pointer to a variable receicing the number of bytes in the buffer
++** returned in 'StateBuffer'.
++**
++** gctPOINTER * StateBuffer
++** Pointer to a variable receiving a buffer pointer that contains the
++** states required to download the shaders into the hardware.
++**
++** gcsHINT_PTR * Hints
++** Pointer to a variable receiving a gcsHINT structure pointer that
++** contains information required when loading the shader states.
++*/
++gceSTATUS
++gcLinkShaders(
++ IN gcSHADER VertexShader,
++ IN gcSHADER FragmentShader,
++ IN gceSHADER_FLAGS Flags,
++ OUT gctSIZE_T * StateBufferSize,
++ OUT gctPOINTER * StateBuffer,
++ OUT gcsHINT_PTR * Hints,
++ OUT gcMACHINECODE_PTR *ppVsMachineCode,
++ OUT gcMACHINECODE_PTR *ppFsMachineCode
++ );
++
++/*******************************************************************************
++** gcLoadShaders
++********************************************************************************
++**
++** Load a pre-compiled and pre-linked shader program into the hardware.
++**
++** INPUT:
++**
++** gcoHAL Hal
++** Pointer to a gcoHAL object.
++**
++** gctSIZE_T StateBufferSize
++** The number of bytes in the 'StateBuffer'.
++**
++** gctPOINTER StateBuffer
++** Pointer to the states that make up the shader program.
++**
++** gcsHINT_PTR Hints
++** Pointer to a gcsHINT structure that contains information required
++** when loading the shader states.
++*/
++gceSTATUS
++gcLoadShaders(
++ IN gcoHAL Hal,
++ IN gctSIZE_T StateBufferSize,
++ IN gctPOINTER StateBuffer,
++ IN gcsHINT_PTR Hints
++ );
++
++gceSTATUS
++gcRecompileShaders(
++ IN gcoHAL Hal,
++ IN gcMACHINECODE_PTR pVsMachineCode,
++ IN gcMACHINECODE_PTR pPsMachineCode,
++ /*Recompile variables*/
++ IN OUT gctPOINTER *ppRecompileStateBuffer,
++ IN OUT gctSIZE_T *pRecompileStateBufferSize,
++ IN OUT gcsHINT_PTR *ppRecompileHints,
++ /* natvie state*/
++ IN gctPOINTER pNativeStateBuffer,
++ IN gctSIZE_T nativeStateBufferSize,
++ IN gcsHINT_PTR pNativeHints,
++ /* npt info */
++ IN gctUINT32 Samplers,
++ IN gctUINT32 *SamplerWrapS,
++ IN gctUINT32 *SamplerWrapT
++ );
++
++gceSTATUS
++gcRecompileDepthBias(
++ IN gcoHAL Hal,
++ IN gcMACHINECODE_PTR pVsMachineCode,
++ /*Recompile variables*/
++ IN OUT gctPOINTER *ppRecompileStateBuffer,
++ IN OUT gctSIZE_T *pRecompileStateBufferSize,
++ IN OUT gcsHINT_PTR *ppRecompileHints,
++ /* natvie state*/
++ IN gctPOINTER pNativeStateBuffer,
++ IN gctSIZE_T nativeStateBufferSize,
++ IN gcsHINT_PTR pNativeHints,
++ OUT gctINT * uniformAddr,
++ OUT gctINT * uniformChannel
++ );
++
++/*******************************************************************************
++** gcSaveProgram
++********************************************************************************
++**
++** Save pre-compiled shaders and pre-linked programs to a binary file.
++**
++** INPUT:
++**
++** gcSHADER VertexShader
++** Pointer to vertex shader object.
++**
++** gcSHADER FragmentShader
++** Pointer to fragment shader object.
++**
++** gctSIZE_T ProgramBufferSize
++** Number of bytes in 'ProgramBuffer'.
++**
++** gctPOINTER ProgramBuffer
++** Pointer to buffer containing the program states.
++**
++** gcsHINT_PTR Hints
++** Pointer to HINTS structure for program states.
++**
++** OUTPUT:
++**
++** gctPOINTER * Binary
++** Pointer to a variable receiving the binary data to be saved.
++**
++** gctSIZE_T * BinarySize
++** Pointer to a variable receiving the number of bytes inside 'Binary'.
++*/
++gceSTATUS
++gcSaveProgram(
++ IN gcSHADER VertexShader,
++ IN gcSHADER FragmentShader,
++ IN gctSIZE_T ProgramBufferSize,
++ IN gctPOINTER ProgramBuffer,
++ IN gcsHINT_PTR Hints,
++ OUT gctPOINTER * Binary,
++ OUT gctSIZE_T * BinarySize
++ );
++
++/*******************************************************************************
++** gcLoadProgram
++********************************************************************************
++**
++** Load pre-compiled shaders and pre-linked programs from a binary file.
++**
++** INPUT:
++**
++** gctPOINTER Binary
++** Pointer to the binary data loaded.
++**
++** gctSIZE_T BinarySize
++** Number of bytes in 'Binary'.
++**
++** OUTPUT:
++**
++** gcSHADER VertexShader
++** Pointer to a vertex shader object.
++**
++** gcSHADER FragmentShader
++** Pointer to a fragment shader object.
++**
++** gctSIZE_T * ProgramBufferSize
++** Pointer to a variable receicing the number of bytes in the buffer
++** returned in 'ProgramBuffer'.
++**
++** gctPOINTER * ProgramBuffer
++** Pointer to a variable receiving a buffer pointer that contains the
++** states required to download the shaders into the hardware.
++**
++** gcsHINT_PTR * Hints
++** Pointer to a variable receiving a gcsHINT structure pointer that
++** contains information required when loading the shader states.
++*/
++gceSTATUS
++gcLoadProgram(
++ IN gctPOINTER Binary,
++ IN gctSIZE_T BinarySize,
++ OUT gcSHADER VertexShader,
++ OUT gcSHADER FragmentShader,
++ OUT gctSIZE_T * ProgramBufferSize,
++ OUT gctPOINTER * ProgramBuffer,
++ OUT gcsHINT_PTR * Hints
++ );
++
++/*******************************************************************************
++** gcCompileKernel
++********************************************************************************
++**
++** Compile a OpenCL kernel shader.
++**
++** INPUT:
++**
++** gcoOS Hal
++** Pointer to an gcoHAL object.
++**
++** gctSIZE_T SourceSize
++** Size of the source buffer in bytes.
++**
++** gctCONST_STRING Source
++** Pointer to the buffer containing the shader source code.
++**
++** OUTPUT:
++**
++** gcSHADER * Binary
++** Pointer to a variable receiving the pointer to a gcSHADER object
++** containg the compiled shader code.
++**
++** gctSTRING * Log
++** Pointer to a variable receiving a string pointer containging the
++** compile log.
++*/
++gceSTATUS
++gcCompileKernel(
++ IN gcoHAL Hal,
++ IN gctSIZE_T SourceSize,
++ IN gctCONST_STRING Source,
++ IN gctCONST_STRING Options,
++ OUT gcSHADER * Binary,
++ OUT gctSTRING * Log
++ );
++
++/*******************************************************************************
++** gcLinkKernel
++********************************************************************************
++**
++** Link OpenCL kernel and generate a harwdare specific state buffer by compiling
++** the compiler generated code through the resource allocator and code
++** generator.
++**
++** INPUT:
++**
++** gcSHADER Kernel
++** Pointer to a gcSHADER object holding information about the compiled
++** OpenCL kernel.
++**
++** gceSHADER_FLAGS Flags
++** Compiler flags. Can be any of the following:
++**
++** gcvSHADER_DEAD_CODE - Dead code elimination.
++** gcvSHADER_RESOURCE_USAGE - Resource usage optimizaion.
++** gcvSHADER_OPTIMIZER - Full optimization.
++** gcvSHADER_USE_GL_Z - Use OpenGL ES Z coordinate.
++** gcvSHADER_USE_GL_POSITION - Use OpenGL ES gl_Position.
++** gcvSHADER_USE_GL_FACE - Use OpenGL ES gl_FaceForward.
++**
++** OUTPUT:
++**
++** gctSIZE_T * StateBufferSize
++** Pointer to a variable receiving the number of bytes in the buffer
++** returned in 'StateBuffer'.
++**
++** gctPOINTER * StateBuffer
++** Pointer to a variable receiving a buffer pointer that contains the
++** states required to download the shaders into the hardware.
++**
++** gcsHINT_PTR * Hints
++** Pointer to a variable receiving a gcsHINT structure pointer that
++** contains information required when loading the shader states.
++*/
++gceSTATUS
++gcLinkKernel(
++ IN gcSHADER Kernel,
++ IN gceSHADER_FLAGS Flags,
++ OUT gctSIZE_T * StateBufferSize,
++ OUT gctPOINTER * StateBuffer,
++ OUT gcsHINT_PTR * Hints
++ );
++
++/*******************************************************************************
++** gcLoadKernel
++********************************************************************************
++**
++** Load a pre-compiled and pre-linked kernel program into the hardware.
++**
++** INPUT:
++**
++** gctSIZE_T StateBufferSize
++** The number of bytes in the 'StateBuffer'.
++**
++** gctPOINTER StateBuffer
++** Pointer to the states that make up the shader program.
++**
++** gcsHINT_PTR Hints
++** Pointer to a gcsHINT structure that contains information required
++** when loading the shader states.
++*/
++gceSTATUS
++gcLoadKernel(
++ IN gctSIZE_T StateBufferSize,
++ IN gctPOINTER StateBuffer,
++ IN gcsHINT_PTR Hints
++ );
++
++gceSTATUS
++gcInvokeThreadWalker(
++ IN gcsTHREAD_WALKER_INFO_PTR Info
++ );
++
++void
++gcTYPE_GetTypeInfo(
++ IN gcSHADER_TYPE Type,
++ OUT gctINT * Components,
++ OUT gctINT * Rows,
++ OUT gctCONST_STRING * Name
++ );
++
++gctBOOL
++gcOPT_doVaryingPackingForShader(
++ IN gcSHADER Shader
++ );
++
++gceSTATUS
++gcSHADER_PatchNPOTForMachineCode(
++ IN gcSHADER_KIND shaderType,
++ IN gcMACHINECODE_PTR pMachineCode,
++ IN gcNPOT_PATCH_PARAM_PTR pPatchParam,
++ IN gctUINT countOfPatchParam,
++ IN gctUINT hwSupportedInstCount,
++ OUT gctPOINTER* ppCmdBuffer,
++ OUT gctUINT32* pByteSizeOfCmdBuffer,
++ IN OUT gcsHINT_PTR pHints /* User needs copy original hints to this one, then passed this one in */
++ );
++
++gceSTATUS
++gcSHADER_PatchZBiasForMachineCodeVS(
++ IN gcMACHINECODE_PTR pMachineCode,
++ IN OUT gcZBIAS_PATCH_PARAM_PTR pPatchParam,
++ IN gctUINT hwSupportedInstCount,
++ OUT gctPOINTER* ppCmdBuffer,
++ OUT gctUINT32* pByteSizeOfCmdBuffer,
++ IN OUT gcsHINT_PTR pHints /* User needs copy original hints to this one, then passed this one in */
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* VIVANTE_NO_3D */
++#endif /* __gc_hal_compiler_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver.h 2015-05-01 14:57:59.539427001 -0500
+@@ -0,0 +1,1051 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_driver_h_
++#define __gc_hal_driver_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_driver_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* I/O Control Codes ******************************
++\******************************************************************************/
++
++#define gcvHAL_CLASS "galcore"
++#define IOCTL_GCHAL_INTERFACE 30000
++#define IOCTL_GCHAL_KERNEL_INTERFACE 30001
++#define IOCTL_GCHAL_TERMINATE 30002
++
++/******************************************************************************\
++********************************* Command Codes ********************************
++\******************************************************************************/
++
++typedef enum _gceHAL_COMMAND_CODES
++{
++ /* Generic query. */
++ gcvHAL_QUERY_VIDEO_MEMORY,
++ gcvHAL_QUERY_CHIP_IDENTITY,
++
++ /* Contiguous memory. */
++ gcvHAL_ALLOCATE_NON_PAGED_MEMORY,
++ gcvHAL_FREE_NON_PAGED_MEMORY,
++ gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY,
++ gcvHAL_FREE_CONTIGUOUS_MEMORY,
++
++ /* Video memory allocation. */
++ gcvHAL_ALLOCATE_VIDEO_MEMORY, /* Enforced alignment. */
++ gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY, /* No alignment. */
++ gcvHAL_FREE_VIDEO_MEMORY,
++
++ /* Physical-to-logical mapping. */
++ gcvHAL_MAP_MEMORY,
++ gcvHAL_UNMAP_MEMORY,
++
++ /* Logical-to-physical mapping. */
++ gcvHAL_MAP_USER_MEMORY,
++ gcvHAL_UNMAP_USER_MEMORY,
++
++ /* Surface lock/unlock. */
++ gcvHAL_LOCK_VIDEO_MEMORY,
++ gcvHAL_UNLOCK_VIDEO_MEMORY,
++
++ /* Event queue. */
++ gcvHAL_EVENT_COMMIT,
++
++ gcvHAL_USER_SIGNAL,
++ gcvHAL_SIGNAL,
++ gcvHAL_WRITE_DATA,
++
++ gcvHAL_COMMIT,
++ gcvHAL_STALL,
++
++ gcvHAL_READ_REGISTER,
++ gcvHAL_WRITE_REGISTER,
++
++ gcvHAL_GET_PROFILE_SETTING,
++ gcvHAL_SET_PROFILE_SETTING,
++
++ gcvHAL_READ_ALL_PROFILE_REGISTERS,
++ gcvHAL_PROFILE_REGISTERS_2D,
++#if VIVANTE_PROFILER_PERDRAW
++ gcvHAL_READ_PROFILER_REGISTER_SETTING,
++#endif
++
++ /* Power management. */
++ gcvHAL_SET_POWER_MANAGEMENT_STATE,
++ gcvHAL_QUERY_POWER_MANAGEMENT_STATE,
++
++ gcvHAL_GET_BASE_ADDRESS,
++
++ gcvHAL_SET_IDLE, /* reserved */
++
++ /* Queries. */
++ gcvHAL_QUERY_KERNEL_SETTINGS,
++
++ /* Reset. */
++ gcvHAL_RESET,
++
++ /* Map physical address into handle. */
++ gcvHAL_MAP_PHYSICAL,
++
++ /* Debugger stuff. */
++ gcvHAL_DEBUG,
++
++ /* Cache stuff. */
++ gcvHAL_CACHE,
++
++ /* TimeStamp */
++ gcvHAL_TIMESTAMP,
++
++ /* Database. */
++ gcvHAL_DATABASE,
++
++ /* Version. */
++ gcvHAL_VERSION,
++
++ /* Chip info */
++ gcvHAL_CHIP_INFO,
++
++ /* Process attaching/detaching. */
++ gcvHAL_ATTACH,
++ gcvHAL_DETACH,
++
++ /* Composition. */
++ gcvHAL_COMPOSE,
++
++ /* Set timeOut value */
++ gcvHAL_SET_TIMEOUT,
++
++ /* Frame database. */
++ gcvHAL_GET_FRAME_INFO,
++
++ /* Shared info for each process */
++ gcvHAL_GET_SHARED_INFO,
++ gcvHAL_SET_SHARED_INFO,
++ gcvHAL_QUERY_COMMAND_BUFFER,
++
++ gcvHAL_COMMIT_DONE,
++
++ /* GPU and event dump */
++ gcvHAL_DUMP_GPU_STATE,
++ gcvHAL_DUMP_EVENT,
++
++ /* Virtual command buffer. */
++ gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER,
++ gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER,
++
++ /* FSCALE_VAL. */
++ gcvHAL_SET_FSCALE_VALUE,
++ gcvHAL_GET_FSCALE_VALUE,
++
++ /* Reset time stamp. */
++ gcvHAL_QUERY_RESET_TIME_STAMP,
++
++ /* Sync point operations. */
++ gcvHAL_SYNC_POINT,
++
++ /* Create native fence and return its fd. */
++ gcvHAL_CREATE_NATIVE_FENCE,
++
++ /* Video memory database */
++ gcvHAL_VIDMEM_DATABASE,
++}
++gceHAL_COMMAND_CODES;
++
++/******************************************************************************\
++****************************** Interface Structure *****************************
++\******************************************************************************/
++
++#define gcdMAX_PROFILE_FILE_NAME 128
++
++/* Kernel settings. */
++typedef struct _gcsKERNEL_SETTINGS
++{
++ /* Used RealTime signal between kernel and user. */
++ gctINT signal;
++}
++gcsKERNEL_SETTINGS;
++
++
++/* gcvHAL_QUERY_CHIP_IDENTITY */
++typedef struct _gcsHAL_QUERY_CHIP_IDENTITY * gcsHAL_QUERY_CHIP_IDENTITY_PTR;
++typedef struct _gcsHAL_QUERY_CHIP_IDENTITY
++{
++
++ /* Chip model. */
++ gceCHIPMODEL chipModel;
++
++ /* Revision value.*/
++ gctUINT32 chipRevision;
++
++ /* Supported feature fields. */
++ gctUINT32 chipFeatures;
++
++ /* Supported minor feature fields. */
++ gctUINT32 chipMinorFeatures;
++
++ /* Supported minor feature 1 fields. */
++ gctUINT32 chipMinorFeatures1;
++
++ /* Supported minor feature 2 fields. */
++ gctUINT32 chipMinorFeatures2;
++
++ /* Supported minor feature 3 fields. */
++ gctUINT32 chipMinorFeatures3;
++
++ /* Supported minor feature 4 fields. */
++ gctUINT32 chipMinorFeatures4;
++
++ /* Number of streams supported. */
++ gctUINT32 streamCount;
++
++ /* Total number of temporary registers per thread. */
++ gctUINT32 registerMax;
++
++ /* Maximum number of threads. */
++ gctUINT32 threadCount;
++
++ /* Number of shader cores. */
++ gctUINT32 shaderCoreCount;
++
++ /* Size of the vertex cache. */
++ gctUINT32 vertexCacheSize;
++
++ /* Number of entries in the vertex output buffer. */
++ gctUINT32 vertexOutputBufferSize;
++
++ /* Number of pixel pipes. */
++ gctUINT32 pixelPipes;
++
++ /* Number of instructions. */
++ gctUINT32 instructionCount;
++
++ /* Number of constants. */
++ gctUINT32 numConstants;
++
++ /* Buffer size */
++ gctUINT32 bufferSize;
++
++ /* Number of varyings */
++ gctUINT32 varyingsCount;
++
++ /* Supertile layout style in hardware */
++ gctUINT32 superTileMode;
++
++ /* Special control bits for 2D chip. */
++ gctUINT32 chip2DControl;
++}
++gcsHAL_QUERY_CHIP_IDENTITY;
++
++/* gcvHAL_COMPOSE. */
++typedef struct _gcsHAL_COMPOSE * gcsHAL_COMPOSE_PTR;
++typedef struct _gcsHAL_COMPOSE
++{
++ /* Composition state buffer. */
++ gctUINT64 physical;
++ gctUINT64 logical;
++ gctUINT offset;
++ gctUINT size;
++
++ /* Composition end signal. */
++ gctUINT64 process;
++ gctUINT64 signal;
++
++ /* User signals. */
++ gctUINT64 userProcess;
++ gctUINT64 userSignal1;
++ gctUINT64 userSignal2;
++
++#if defined(__QNXNTO__)
++ /* Client pulse side-channel connection ID. */
++ gctINT32 coid;
++
++ /* Set by server. */
++ gctINT32 rcvid;
++#endif
++}
++gcsHAL_COMPOSE;
++
++
++typedef struct _gcsHAL_INTERFACE
++{
++ /* Command code. */
++ gceHAL_COMMAND_CODES command;
++
++ /* Hardware type. */
++ gceHARDWARE_TYPE hardwareType;
++
++ /* Status value. */
++ gceSTATUS status;
++
++ /* Handle to this interface channel. */
++ gctUINT64 handle;
++
++ /* Pid of the client. */
++ gctUINT32 pid;
++
++ /* Union of command structures. */
++ union _u
++ {
++ /* gcvHAL_GET_BASE_ADDRESS */
++ struct _gcsHAL_GET_BASE_ADDRESS
++ {
++ /* Physical memory address of internal memory. */
++ OUT gctUINT32 baseAddress;
++ }
++ GetBaseAddress;
++
++ /* gcvHAL_QUERY_VIDEO_MEMORY */
++ struct _gcsHAL_QUERY_VIDEO_MEMORY
++ {
++ /* Physical memory address of internal memory. Just a name. */
++ OUT gctUINT32 internalPhysical;
++
++ /* Size in bytes of internal memory. */
++ OUT gctUINT64 internalSize;
++
++ /* Physical memory address of external memory. Just a name. */
++ OUT gctUINT32 externalPhysical;
++
++ /* Size in bytes of external memory.*/
++ OUT gctUINT64 externalSize;
++
++ /* Physical memory address of contiguous memory. Just a name. */
++ OUT gctUINT32 contiguousPhysical;
++
++ /* Size in bytes of contiguous memory.*/
++ OUT gctUINT64 contiguousSize;
++ }
++ QueryVideoMemory;
++
++ /* gcvHAL_QUERY_CHIP_IDENTITY */
++ gcsHAL_QUERY_CHIP_IDENTITY QueryChipIdentity;
++
++ /* gcvHAL_MAP_MEMORY */
++ struct _gcsHAL_MAP_MEMORY
++ {
++ /* Physical memory address to map. Just a name on Linux/Qnx. */
++ IN gctUINT32 physical;
++
++ /* Number of bytes in physical memory to map. */
++ IN gctUINT64 bytes;
++
++ /* Address of mapped memory. */
++ OUT gctUINT64 logical;
++ }
++ MapMemory;
++
++ /* gcvHAL_UNMAP_MEMORY */
++ struct _gcsHAL_UNMAP_MEMORY
++ {
++ /* Physical memory address to unmap. Just a name on Linux/Qnx. */
++ IN gctUINT32 physical;
++
++ /* Number of bytes in physical memory to unmap. */
++ IN gctUINT64 bytes;
++
++ /* Address of mapped memory to unmap. */
++ IN gctUINT64 logical;
++ }
++ UnmapMemory;
++
++ /* gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY */
++ struct _gcsHAL_ALLOCATE_LINEAR_VIDEO_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT bytes;
++
++ /* Buffer alignment. */
++ IN gctUINT alignment;
++
++ /* Type of allocation. */
++ IN gceSURF_TYPE type;
++
++ /* Memory pool to allocate from. */
++ IN OUT gcePOOL pool;
++
++ /* Allocated video memory in gcuVIDMEM_NODE. */
++ OUT gctUINT64 node;
++ }
++ AllocateLinearVideoMemory;
++
++ /* gcvHAL_ALLOCATE_VIDEO_MEMORY */
++ struct _gcsHAL_ALLOCATE_VIDEO_MEMORY
++ {
++ /* Width of rectangle to allocate. */
++ IN OUT gctUINT width;
++
++ /* Height of rectangle to allocate. */
++ IN OUT gctUINT height;
++
++ /* Depth of rectangle to allocate. */
++ IN gctUINT depth;
++
++ /* Format rectangle to allocate in gceSURF_FORMAT. */
++ IN gceSURF_FORMAT format;
++
++ /* Type of allocation. */
++ IN gceSURF_TYPE type;
++
++ /* Memory pool to allocate from. */
++ IN OUT gcePOOL pool;
++
++ /* Allocated video memory in gcuVIDMEM_NODE. */
++ OUT gctUINT64 node;
++ }
++ AllocateVideoMemory;
++
++ /* gcvHAL_FREE_VIDEO_MEMORY */
++ struct _gcsHAL_FREE_VIDEO_MEMORY
++ {
++ /* Allocated video memory in gcuVIDMEM_NODE. */
++ IN gctUINT64 node;
++
++#ifdef __QNXNTO__
++/* TODO: This is part of the unlock - why is it here? */
++ /* Mapped logical address to unmap in user space. */
++ OUT gctUINT64 memory;
++
++ /* Number of bytes to allocated. */
++ OUT gctUINT64 bytes;
++#endif
++ }
++ FreeVideoMemory;
++
++ /* gcvHAL_LOCK_VIDEO_MEMORY */
++ struct _gcsHAL_LOCK_VIDEO_MEMORY
++ {
++ /* Allocated video memory gcuVIDMEM_NODE gcuVIDMEM_NODE. */
++ IN gctUINT64 node;
++
++ /* Cache configuration. */
++ /* Only gcvPOOL_CONTIGUOUS and gcvPOOL_VIRUTAL
++ ** can be configured */
++ IN gctBOOL cacheable;
++
++ /* Hardware specific address. */
++ OUT gctUINT32 address;
++
++ /* Mapped logical address. */
++ OUT gctUINT64 memory;
++ }
++ LockVideoMemory;
++
++ /* gcvHAL_UNLOCK_VIDEO_MEMORY */
++ struct _gcsHAL_UNLOCK_VIDEO_MEMORY
++ {
++ /* Allocated video memory in gcuVIDMEM_NODE. */
++ IN gctUINT64 node;
++
++ /* Type of surface. */
++ IN gceSURF_TYPE type;
++
++ /* Flag to unlock surface asynchroneously. */
++ IN OUT gctBOOL asynchroneous;
++ }
++ UnlockVideoMemory;
++
++ /* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */
++ struct _gcsHAL_ALLOCATE_NON_PAGED_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateNonPagedMemory;
++
++ /* gcvHAL_FREE_NON_PAGED_MEMORY */
++ struct _gcsHAL_FREE_NON_PAGED_MEMORY
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeNonPagedMemory;
++
++ /* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */
++ struct _gcsHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateVirtualCommandBuffer;
++
++ /* gcvHAL_FREE_NON_PAGED_MEMORY */
++ struct _gcsHAL_FREE_VIRTUAL_COMMAND_BUFFER
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeVirtualCommandBuffer;
++
++ /* gcvHAL_EVENT_COMMIT. */
++ struct _gcsHAL_EVENT_COMMIT
++ {
++ /* Event queue in gcsQUEUE. */
++ IN gctUINT64 queue;
++ }
++ Event;
++
++ /* gcvHAL_COMMIT */
++ struct _gcsHAL_COMMIT
++ {
++ /* Context buffer object gckCONTEXT. */
++ IN gctUINT64 context;
++
++ /* Command buffer gcoCMDBUF. */
++ IN gctUINT64 commandBuffer;
++
++ /* State delta buffer in gcsSTATE_DELTA. */
++ gctUINT64 delta;
++
++ /* Event queue in gcsQUEUE. */
++ IN gctUINT64 queue;
++ }
++ Commit;
++
++ /* gcvHAL_MAP_USER_MEMORY */
++ struct _gcsHAL_MAP_USER_MEMORY
++ {
++ /* Base address of user memory to map. */
++ IN gctUINT64 memory;
++
++ /* Physical address of user memory to map. */
++ IN gctUINT32 physical;
++
++ /* Size of user memory in bytes to map. */
++ IN gctUINT64 size;
++
++ /* Info record required by gcvHAL_UNMAP_USER_MEMORY. Just a name. */
++ OUT gctUINT32 info;
++
++ /* Physical address of mapped memory. */
++ OUT gctUINT32 address;
++ }
++ MapUserMemory;
++
++ /* gcvHAL_UNMAP_USER_MEMORY */
++ struct _gcsHAL_UNMAP_USER_MEMORY
++ {
++ /* Base address of user memory to unmap. */
++ IN gctUINT64 memory;
++
++ /* Size of user memory in bytes to unmap. */
++ IN gctUINT64 size;
++
++ /* Info record returned by gcvHAL_MAP_USER_MEMORY. Just a name. */
++ IN gctUINT32 info;
++
++ /* Physical address of mapped memory as returned by
++ gcvHAL_MAP_USER_MEMORY. */
++ IN gctUINT32 address;
++ }
++ UnmapUserMemory;
++#if !USE_NEW_LINUX_SIGNAL
++ /* gcsHAL_USER_SIGNAL */
++ struct _gcsHAL_USER_SIGNAL
++ {
++ /* Command. */
++ gceUSER_SIGNAL_COMMAND_CODES command;
++
++ /* Signal ID. */
++ IN OUT gctINT id;
++
++ /* Reset mode. */
++ IN gctBOOL manualReset;
++
++ /* Wait timedout. */
++ IN gctUINT32 wait;
++
++ /* State. */
++ IN gctBOOL state;
++ }
++ UserSignal;
++#endif
++
++ /* gcvHAL_SIGNAL. */
++ struct _gcsHAL_SIGNAL
++ {
++ /* Signal handle to signal gctSIGNAL. */
++ IN gctUINT64 signal;
++
++ /* Reserved gctSIGNAL. */
++ IN gctUINT64 auxSignal;
++
++ /* Process owning the signal gctHANDLE. */
++ IN gctUINT64 process;
++
++#if defined(__QNXNTO__)
++ /* Client pulse side-channel connection ID. Set by client in gcoOS_CreateSignal. */
++ IN gctINT32 coid;
++
++ /* Set by server. */
++ IN gctINT32 rcvid;
++#endif
++ /* Event generated from where of pipeline */
++ IN gceKERNEL_WHERE fromWhere;
++ }
++ Signal;
++
++ /* gcvHAL_WRITE_DATA. */
++ struct _gcsHAL_WRITE_DATA
++ {
++ /* Address to write data to. */
++ IN gctUINT32 address;
++
++ /* Data to write. */
++ IN gctUINT32 data;
++ }
++ WriteData;
++
++ /* gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY */
++ struct _gcsHAL_ALLOCATE_CONTIGUOUS_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Hardware address of allocation. */
++ OUT gctUINT32 address;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateContiguousMemory;
++
++ /* gcvHAL_FREE_CONTIGUOUS_MEMORY */
++ struct _gcsHAL_FREE_CONTIGUOUS_MEMORY
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeContiguousMemory;
++
++ /* gcvHAL_READ_REGISTER */
++ struct _gcsHAL_READ_REGISTER
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ /* Data read. */
++ OUT gctUINT32 data;
++ }
++ ReadRegisterData;
++
++ /* gcvHAL_WRITE_REGISTER */
++ struct _gcsHAL_WRITE_REGISTER
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ /* Data read. */
++ IN gctUINT32 data;
++ }
++ WriteRegisterData;
++
++#if VIVANTE_PROFILER
++ /* gcvHAL_GET_PROFILE_SETTING */
++ struct _gcsHAL_GET_PROFILE_SETTING
++ {
++ /* Enable profiling */
++ OUT gctBOOL enable;
++
++ /* The profile file name */
++ OUT gctCHAR fileName[gcdMAX_PROFILE_FILE_NAME];
++ }
++ GetProfileSetting;
++
++ /* gcvHAL_SET_PROFILE_SETTING */
++ struct _gcsHAL_SET_PROFILE_SETTING
++ {
++ /* Enable profiling */
++ IN gctBOOL enable;
++
++ /* The profile file name */
++ IN gctCHAR fileName[gcdMAX_PROFILE_FILE_NAME];
++ }
++ SetProfileSetting;
++
++#if VIVANTE_PROFILER_PERDRAW
++ /* gcvHAL_READ_PROFILER_REGISTER_SETTING */
++ struct _gcsHAL_READ_PROFILER_REGISTER_SETTING
++ {
++ /*Should Clear Register*/
++ IN gctBOOL bclear;
++ }
++ SetProfilerRegisterClear;
++#endif
++
++ /* gcvHAL_READ_ALL_PROFILE_REGISTERS */
++ struct _gcsHAL_READ_ALL_PROFILE_REGISTERS
++ {
++#if VIVANTE_PROFILER_CONTEXT
++ /* Context buffer object gckCONTEXT. Just a name. */
++ IN gctUINT32 context;
++#endif
++ /* Data read. */
++ OUT gcsPROFILER_COUNTERS counters;
++ }
++ RegisterProfileData;
++
++ /* gcvHAL_PROFILE_REGISTERS_2D */
++ struct _gcsHAL_PROFILE_REGISTERS_2D
++ {
++ /* Data read in gcs2D_PROFILE. */
++ OUT gctUINT64 hwProfile2D;
++ }
++ RegisterProfileData2D;
++#endif
++ /* Power management. */
++ /* gcvHAL_SET_POWER_MANAGEMENT_STATE */
++ struct _gcsHAL_SET_POWER_MANAGEMENT
++ {
++ /* Data read. */
++ IN gceCHIPPOWERSTATE state;
++ }
++ SetPowerManagement;
++
++ /* gcvHAL_QUERY_POWER_MANAGEMENT_STATE */
++ struct _gcsHAL_QUERY_POWER_MANAGEMENT
++ {
++ /* Data read. */
++ OUT gceCHIPPOWERSTATE state;
++
++ /* Idle query. */
++ OUT gctBOOL isIdle;
++ }
++ QueryPowerManagement;
++
++ /* gcvHAL_QUERY_KERNEL_SETTINGS */
++ struct _gcsHAL_QUERY_KERNEL_SETTINGS
++ {
++ /* Settings.*/
++ OUT gcsKERNEL_SETTINGS settings;
++ }
++ QueryKernelSettings;
++
++ /* gcvHAL_MAP_PHYSICAL */
++ struct _gcsHAL_MAP_PHYSICAL
++ {
++ /* gcvTRUE to map, gcvFALSE to unmap. */
++ IN gctBOOL map;
++
++ /* Physical address. */
++ IN OUT gctUINT64 physical;
++ }
++ MapPhysical;
++
++ /* gcvHAL_DEBUG */
++ struct _gcsHAL_DEBUG
++ {
++ /* If gcvTRUE, set the debug information. */
++ IN gctBOOL set;
++ IN gctUINT32 level;
++ IN gctUINT32 zones;
++ IN gctBOOL enable;
++
++ IN gceDEBUG_MESSAGE_TYPE type;
++ IN gctUINT32 messageSize;
++
++ /* Message to print if not empty. */
++ IN gctCHAR message[80];
++ }
++ Debug;
++
++ /* gcvHAL_CACHE */
++ struct _gcsHAL_CACHE
++ {
++ IN gceCACHEOPERATION operation;
++ /* gctHANDLE */
++ IN gctUINT64 process;
++ IN gctUINT64 logical;
++ IN gctUINT64 bytes;
++ /* gcuVIDMEM_NODE_PTR */
++ IN gctUINT64 node;
++ }
++ Cache;
++
++ /* gcvHAL_TIMESTAMP */
++ struct _gcsHAL_TIMESTAMP
++ {
++ /* Timer select. */
++ IN gctUINT32 timer;
++
++ /* Timer request type (0-stop, 1-start, 2-send delta). */
++ IN gctUINT32 request;
++
++ /* Result of delta time in microseconds. */
++ OUT gctINT32 timeDelta;
++ }
++ TimeStamp;
++
++ /* gcvHAL_DATABASE */
++ struct _gcsHAL_DATABASE
++ {
++ /* Set to gcvTRUE if you want to query a particular process ID.
++ ** Set to gcvFALSE to query the last detached process. */
++ IN gctBOOL validProcessID;
++
++ /* Process ID to query. */
++ IN gctUINT32 processID;
++
++ /* Information. */
++ OUT gcuDATABASE_INFO vidMem;
++ OUT gcuDATABASE_INFO nonPaged;
++ OUT gcuDATABASE_INFO contiguous;
++ OUT gcuDATABASE_INFO gpuIdle;
++ }
++ Database;
++
++ /* gcvHAL_VIDMEM_DATABASE */
++ struct _gcsHAL_VIDMEM_DATABASE
++ {
++ /* Set to gcvTRUE if you want to query a particular process ID.
++ ** Set to gcvFALSE to query the last detached process. */
++ IN gctBOOL validProcessID;
++
++ /* Process ID to query. */
++ IN gctUINT32 processID;
++
++ /* Information. */
++ OUT gcuDATABASE_INFO vidMemResv;
++ OUT gcuDATABASE_INFO vidMemCont;
++ OUT gcuDATABASE_INFO vidMemVirt;
++ }
++ VidMemDatabase;
++
++ /* gcvHAL_VERSION */
++ struct _gcsHAL_VERSION
++ {
++ /* Major version: N.n.n. */
++ OUT gctINT32 major;
++
++ /* Minor version: n.N.n. */
++ OUT gctINT32 minor;
++
++ /* Patch version: n.n.N. */
++ OUT gctINT32 patch;
++
++ /* Build version. */
++ OUT gctUINT32 build;
++ }
++ Version;
++
++ /* gcvHAL_CHIP_INFO */
++ struct _gcsHAL_CHIP_INFO
++ {
++ /* Chip count. */
++ OUT gctINT32 count;
++
++ /* Chip types. */
++ OUT gceHARDWARE_TYPE types[gcdCHIP_COUNT];
++ }
++ ChipInfo;
++
++ /* gcvHAL_ATTACH */
++ struct _gcsHAL_ATTACH
++ {
++ /* Context buffer object gckCONTEXT. Just a name. */
++ OUT gctUINT32 context;
++
++ /* Number of states in the buffer. */
++ OUT gctUINT64 stateCount;
++ }
++ Attach;
++
++ /* gcvHAL_DETACH */
++ struct _gcsHAL_DETACH
++ {
++ /* Context buffer object gckCONTEXT. Just a name. */
++ IN gctUINT32 context;
++ }
++ Detach;
++
++ /* gcvHAL_COMPOSE. */
++ gcsHAL_COMPOSE Compose;
++
++ /* gcvHAL_GET_FRAME_INFO. */
++ struct _gcsHAL_GET_FRAME_INFO
++ {
++ /* gcsHAL_FRAME_INFO* */
++ OUT gctUINT64 frameInfo;
++ }
++ GetFrameInfo;
++
++ /* gcvHAL_SET_TIME_OUT. */
++ struct _gcsHAL_SET_TIMEOUT
++ {
++ gctUINT32 timeOut;
++ }
++ SetTimeOut;
++
++#if gcdENABLE_VG
++ /* gcvHAL_COMMIT */
++ struct _gcsHAL_VGCOMMIT
++ {
++ /* Context buffer in gcsVGCONTEXT. */
++ IN gctUINT64 context;
++
++ /* Command queue in gcsVGCMDQUEUE. */
++ IN gctUINT64 queue;
++
++ /* Number of entries in the queue. */
++ IN gctUINT entryCount;
++
++ /* Task table in gcsTASK_MASTER_TABLE. */
++ IN gctUINT64 taskTable;
++ }
++ VGCommit;
++
++ /* gcvHAL_QUERY_COMMAND_BUFFER */
++ struct _gcsHAL_QUERY_COMMAND_BUFFER
++ {
++ /* Command buffer attributes. */
++ OUT gcsCOMMAND_BUFFER_INFO information;
++ }
++ QueryCommandBuffer;
++
++#endif
++
++ struct _gcsHAL_GET_SHARED_INFO
++ {
++ /* Process id. */
++ IN gctUINT32 pid;
++
++ /* Data id. */
++ IN gctUINT32 dataId;
++
++ /* Data size. */
++ IN gctSIZE_T bytes;
++
++ /* Pointer to save the shared data. */
++ OUT gctPOINTER data;
++ }
++ GetSharedInfo;
++
++ struct _gcsHAL_SET_SHARED_INFO
++ {
++ /* Data id. */
++ IN gctUINT32 dataId;
++
++ /* Data to be shared. */
++ IN gctPOINTER data;
++
++ /* Data size. */
++ IN gctSIZE_T bytes;
++ }
++ SetSharedInfo;
++
++ struct _gcsHAL_SET_FSCALE_VALUE
++ {
++ IN gctUINT value;
++ }
++ SetFscaleValue;
++
++ struct _gcsHAL_GET_FSCALE_VALUE
++ {
++ OUT gctUINT value;
++ OUT gctUINT minValue;
++ OUT gctUINT maxValue;
++ }
++ GetFscaleValue;
++
++ struct _gcsHAL_QUERY_RESET_TIME_STAMP
++ {
++ OUT gctUINT64 timeStamp;
++ }
++ QueryResetTimeStamp;
++
++ struct _gcsHAL_SYNC_POINT
++ {
++ /* Command. */
++ gceSYNC_POINT_COMMAND_CODES command;
++
++ /* Sync point. */
++ IN OUT gctUINT64 syncPoint;
++
++ /* From where. */
++ IN gceKERNEL_WHERE fromWhere;
++
++ /* Signaled state. */
++ OUT gctBOOL state;
++ }
++ SyncPoint;
++
++ struct _gcsHAL_CREATE_NATIVE_FENCE
++ {
++ /* Signal id to dup. */
++ IN gctUINT64 syncPoint;
++
++ /* Native fence file descriptor. */
++ OUT gctINT fenceFD;
++
++ }
++ CreateNativeFence;
++ }
++ u;
++}
++gcsHAL_INTERFACE;
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_driver_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver_vg.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver_vg.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_driver_vg.h 2015-05-01 14:57:59.539427001 -0500
+@@ -0,0 +1,270 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_driver_vg_h_
++#define __gc_hal_driver_vg_h_
++
++
++
++#include "gc_hal_types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* I/O Control Codes ******************************
++\******************************************************************************/
++
++#define gcvHAL_CLASS "galcore"
++#define IOCTL_GCHAL_INTERFACE 30000
++
++/******************************************************************************\
++********************************* Command Codes ********************************
++\******************************************************************************/
++
++/******************************************************************************\
++********************* Command buffer information structure. ********************
++\******************************************************************************/
++
++typedef struct _gcsCOMMAND_BUFFER_INFO * gcsCOMMAND_BUFFER_INFO_PTR;
++typedef struct _gcsCOMMAND_BUFFER_INFO
++{
++ /* FE command buffer interrupt ID. */
++ gctINT32 feBufferInt;
++
++ /* TS overflow interrupt ID. */
++ gctINT32 tsOverflowInt;
++
++ /* Alignment and mask for the buffer address. */
++ gctUINT addressMask;
++ gctSIZE_T addressAlignment;
++
++ /* Alignment for each command. */
++ gctSIZE_T commandAlignment;
++
++ /* Number of bytes required by the STATE command. */
++ gctSIZE_T stateCommandSize;
++
++ /* Number of bytes required by the RESTART command. */
++ gctSIZE_T restartCommandSize;
++
++ /* Number of bytes required by the FETCH command. */
++ gctSIZE_T fetchCommandSize;
++
++ /* Number of bytes required by the CALL command. */
++ gctSIZE_T callCommandSize;
++
++ /* Number of bytes required by the RETURN command. */
++ gctSIZE_T returnCommandSize;
++
++ /* Number of bytes required by the EVENT command. */
++ gctSIZE_T eventCommandSize;
++
++ /* Number of bytes required by the END command. */
++ gctSIZE_T endCommandSize;
++
++ /* Number of bytes reserved at the tail of a static command buffer. */
++ gctSIZE_T staticTailSize;
++
++ /* Number of bytes reserved at the tail of a dynamic command buffer. */
++ gctSIZE_T dynamicTailSize;
++}
++gcsCOMMAND_BUFFER_INFO;
++
++/******************************************************************************\
++******************************** Task Structures *******************************
++\******************************************************************************/
++
++typedef enum _gceTASK
++{
++ gcvTASK_LINK,
++ gcvTASK_CLUSTER,
++ gcvTASK_INCREMENT,
++ gcvTASK_DECREMENT,
++ gcvTASK_SIGNAL,
++ gcvTASK_LOCKDOWN,
++ gcvTASK_UNLOCK_VIDEO_MEMORY,
++ gcvTASK_FREE_VIDEO_MEMORY,
++ gcvTASK_FREE_CONTIGUOUS_MEMORY,
++ gcvTASK_UNMAP_USER_MEMORY
++}
++gceTASK;
++
++typedef struct _gcsTASK_HEADER * gcsTASK_HEADER_PTR;
++typedef struct _gcsTASK_HEADER
++{
++ /* Task ID. */
++ IN gceTASK id;
++}
++gcsTASK_HEADER;
++
++typedef struct _gcsTASK_LINK * gcsTASK_LINK_PTR;
++typedef struct _gcsTASK_LINK
++{
++ /* Task ID (gcvTASK_LINK). */
++ IN gceTASK id;
++
++ /* Pointer to the next task container. */
++ IN gctPOINTER cotainer;
++
++ /* Pointer to the next task from the next task container. */
++ IN gcsTASK_HEADER_PTR task;
++}
++gcsTASK_LINK;
++
++typedef struct _gcsTASK_CLUSTER * gcsTASK_CLUSTER_PTR;
++typedef struct _gcsTASK_CLUSTER
++{
++ /* Task ID (gcvTASK_CLUSTER). */
++ IN gceTASK id;
++
++ /* Number of tasks in the cluster. */
++ IN gctUINT taskCount;
++}
++gcsTASK_CLUSTER;
++
++typedef struct _gcsTASK_INCREMENT * gcsTASK_INCREMENT_PTR;
++typedef struct _gcsTASK_INCREMENT
++{
++ /* Task ID (gcvTASK_INCREMENT). */
++ IN gceTASK id;
++
++ /* Address of the variable to increment. */
++ IN gctUINT32 address;
++}
++gcsTASK_INCREMENT;
++
++typedef struct _gcsTASK_DECREMENT * gcsTASK_DECREMENT_PTR;
++typedef struct _gcsTASK_DECREMENT
++{
++ /* Task ID (gcvTASK_DECREMENT). */
++ IN gceTASK id;
++
++ /* Address of the variable to decrement. */
++ IN gctUINT32 address;
++}
++gcsTASK_DECREMENT;
++
++typedef struct _gcsTASK_SIGNAL * gcsTASK_SIGNAL_PTR;
++typedef struct _gcsTASK_SIGNAL
++{
++ /* Task ID (gcvTASK_SIGNAL). */
++ IN gceTASK id;
++
++ /* Process owning the signal. */
++ IN gctHANDLE process;
++
++ /* Signal handle to signal. */
++ IN gctSIGNAL signal;
++
++#if defined(__QNXNTO__)
++ IN gctINT32 coid;
++ IN gctINT32 rcvid;
++#endif
++}
++gcsTASK_SIGNAL;
++
++typedef struct _gcsTASK_LOCKDOWN * gcsTASK_LOCKDOWN_PTR;
++typedef struct _gcsTASK_LOCKDOWN
++{
++ /* Task ID (gcvTASK_LOCKDOWN). */
++ IN gceTASK id;
++
++ /* Address of the user space counter. */
++ IN gctUINT32 userCounter;
++
++ /* Address of the kernel space counter. */
++ IN gctUINT32 kernelCounter;
++
++ /* Process owning the signal. */
++ IN gctHANDLE process;
++
++ /* Signal handle to signal. */
++ IN gctSIGNAL signal;
++}
++gcsTASK_LOCKDOWN;
++
++typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY * gcsTASK_UNLOCK_VIDEO_MEMORY_PTR;
++typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY
++{
++ /* Task ID (gcvTASK_UNLOCK_VIDEO_MEMORY). */
++ IN gceTASK id;
++
++ /* Allocated video memory. */
++ IN gctUINT64 node;
++}
++gcsTASK_UNLOCK_VIDEO_MEMORY;
++
++typedef struct _gcsTASK_FREE_VIDEO_MEMORY * gcsTASK_FREE_VIDEO_MEMORY_PTR;
++typedef struct _gcsTASK_FREE_VIDEO_MEMORY
++{
++ /* Task ID (gcvTASK_FREE_VIDEO_MEMORY). */
++ IN gceTASK id;
++
++ /* Allocated video memory. */
++ IN gctUINT64 node;
++}
++gcsTASK_FREE_VIDEO_MEMORY;
++
++typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY * gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR;
++typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY
++{
++ /* Task ID (gcvTASK_FREE_CONTIGUOUS_MEMORY). */
++ IN gceTASK id;
++
++ /* Number of bytes allocated. */
++ IN gctSIZE_T bytes;
++
++ /* Physical address of allocation. */
++ IN gctPHYS_ADDR physical;
++
++ /* Logical address of allocation. */
++ IN gctPOINTER logical;
++}
++gcsTASK_FREE_CONTIGUOUS_MEMORY;
++
++typedef struct _gcsTASK_UNMAP_USER_MEMORY * gcsTASK_UNMAP_USER_MEMORY_PTR;
++typedef struct _gcsTASK_UNMAP_USER_MEMORY
++{
++ /* Task ID (gcvTASK_UNMAP_USER_MEMORY). */
++ IN gceTASK id;
++
++ /* Base address of user memory to unmap. */
++ IN gctPOINTER memory;
++
++ /* Size of user memory in bytes to unmap. */
++ IN gctSIZE_T size;
++
++ /* Info record returned by gcvHAL_MAP_USER_MEMORY. */
++ IN gctPOINTER info;
++
++ /* Physical address of mapped memory as returned by
++ gcvHAL_MAP_USER_MEMORY. */
++ IN gctUINT32 address;
++}
++gcsTASK_UNMAP_USER_MEMORY;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_driver_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_dump.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_dump.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_dump.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_dump.h 2015-05-01 14:57:59.539427001 -0500
+@@ -0,0 +1,88 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_dump_h_
++#define __gc_hal_dump_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++** FILE LAYOUT:
++**
++** gcsDUMP_FILE structure
++**
++** gcsDUMP_DATA frame
++** gcsDUMP_DATA or gcDUMP_DATA_SIZE records rendingring the frame
++** gctUINT8 data[length]
++*/
++
++#define gcvDUMP_FILE_SIGNATURE gcmCC('g','c','D','B')
++
++typedef struct _gcsDUMP_FILE
++{
++ gctUINT32 signature; /* File signature */
++ gctSIZE_T length; /* Length of file */
++ gctUINT32 frames; /* Number of frames in file */
++}
++gcsDUMP_FILE;
++
++typedef enum _gceDUMP_TAG
++{
++ gcvTAG_SURFACE = gcmCC('s','u','r','f'),
++ gcvTAG_FRAME = gcmCC('f','r','m',' '),
++ gcvTAG_COMMAND = gcmCC('c','m','d',' '),
++ gcvTAG_INDEX = gcmCC('i','n','d','x'),
++ gcvTAG_STREAM = gcmCC('s','t','r','m'),
++ gcvTAG_TEXTURE = gcmCC('t','e','x','t'),
++ gcvTAG_RENDER_TARGET = gcmCC('r','n','d','r'),
++ gcvTAG_DEPTH = gcmCC('z','b','u','f'),
++ gcvTAG_RESOLVE = gcmCC('r','s','l','v'),
++ gcvTAG_DELETE = gcmCC('d','e','l',' '),
++}
++gceDUMP_TAG;
++
++typedef struct _gcsDUMP_SURFACE
++{
++ gceDUMP_TAG type; /* Type of record. */
++ gctUINT32 address; /* Address of the surface. */
++ gctINT16 width; /* Width of surface. */
++ gctINT16 height; /* Height of surface. */
++ gceSURF_FORMAT format; /* Surface pixel format. */
++ gctSIZE_T length; /* Number of bytes inside the surface. */
++}
++gcsDUMP_SURFACE;
++
++typedef struct _gcsDUMP_DATA
++{
++ gceDUMP_TAG type; /* Type of record. */
++ gctSIZE_T length; /* Number of bytes of data. */
++ gctUINT32 address; /* Address for the data. */
++}
++gcsDUMP_DATA;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_dump_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform.h 2015-05-01 14:57:59.539427001 -0500
+@@ -0,0 +1,627 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#ifndef __gc_hal_eglplatform_h_
++#define __gc_hal_eglplatform_h_
++
++/* Include VDK types. */
++#include "gc_hal_types.h"
++#include "gc_hal_base.h"
++#include "gc_hal_eglplatform_type.h"
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++#if defined(_WIN32) || defined(__VC32__) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__)
++/* Win32 and Windows CE platforms. */
++#include <windows.h>
++typedef HDC HALNativeDisplayType;
++typedef HWND HALNativeWindowType;
++typedef HBITMAP HALNativePixmapType;
++
++typedef struct __BITFIELDINFO{
++ BITMAPINFO bmi;
++ RGBQUAD bmiColors[2];
++} BITFIELDINFO;
++
++#elif defined(LINUX) && defined(EGL_API_DFB) && !defined(__APPLE__)
++#include <directfb.h>
++typedef struct _DFBDisplay * HALNativeDisplayType;
++typedef struct _DFBWindow * HALNativeWindowType;
++typedef struct _DFBPixmap * HALNativePixmapType;
++
++#elif defined(LINUX) && defined(EGL_API_FB) && !defined(__APPLE__)
++
++#if defined(EGL_API_WL)
++/* Wayland platform. */
++#include "wayland-server.h"
++#include <wayland-egl.h>
++
++#define WL_EGL_NUM_BACKBUFFERS 3
++
++typedef struct _gcsWL_VIV_BUFFER
++{
++ struct wl_resource *wl_buffer;
++ gcoSURF surface;
++ gctINT32 width, height;
++} gcsWL_VIV_BUFFER;
++
++typedef struct _gcsWL_EGL_DISPLAY
++{
++ struct wl_display* wl_display;
++ struct wl_viv* wl_viv;
++ struct wl_registry *registry;
++ struct wl_event_queue *wl_queue;
++} gcsWL_EGL_DISPLAY;
++
++typedef struct _gcsWL_EGL_BUFFER_INFO
++{
++ gctINT32 width;
++ gctINT32 height;
++ gctINT32 stride;
++ gceSURF_FORMAT format;
++ gcuVIDMEM_NODE_PTR node;
++ gcePOOL pool;
++ gctUINT bytes;
++ gcoSURF surface;
++ gcoSURF attached_surface;
++ gctINT32 invalidate;
++ gctBOOL locked;
++} gcsWL_EGL_BUFFER_INFO;
++
++typedef struct _gcsWL_EGL_BUFFER
++{
++ struct wl_buffer* wl_buffer;
++ gcsWL_EGL_BUFFER_INFO info;
++} gcsWL_EGL_BUFFER;
++
++typedef struct _gcsWL_EGL_WINDOW_INFO
++{
++ gctINT32 dx;
++ gctINT32 dy;
++ gctUINT width;
++ gctUINT height;
++ gctINT32 attached_width;
++ gctINT32 attached_height;
++ gceSURF_FORMAT format;
++ gctUINT bpp;
++} gcsWL_EGL_WINDOW_INFO;
++
++struct wl_egl_window
++{
++ gcsWL_EGL_DISPLAY* display;
++ gcsWL_EGL_BUFFER backbuffers[WL_EGL_NUM_BACKBUFFERS];
++ gcsWL_EGL_WINDOW_INFO info;
++ gctUINT current;
++ struct wl_surface* surface;
++ struct wl_callback* frame_callback;
++};
++
++typedef void* HALNativeDisplayType;
++typedef void* HALNativeWindowType;
++typedef void* HALNativePixmapType;
++#else
++/* Linux platform for FBDEV. */
++typedef struct _FBDisplay * HALNativeDisplayType;
++typedef struct _FBWindow * HALNativeWindowType;
++typedef struct _FBPixmap * HALNativePixmapType;
++#endif
++#elif defined(__ANDROID__) || defined(ANDROID)
++
++struct egl_native_pixmap_t;
++
++#if ANDROID_SDK_VERSION >= 9
++ #include <android/native_window.h>
++
++ typedef struct ANativeWindow* HALNativeWindowType;
++ typedef struct egl_native_pixmap_t* HALNativePixmapType;
++ typedef void* HALNativeDisplayType;
++#else
++ struct android_native_window_t;
++ typedef struct android_native_window_t* HALNativeWindowType;
++ typedef struct egl_native_pixmap_t * HALNativePixmapType;
++ typedef void* HALNativeDisplayType;
++#endif
++
++#elif defined(LINUX) || defined(__APPLE__)
++/* X11 platform. */
++#include <X11/Xlib.h>
++#include <X11/Xutil.h>
++
++typedef Display * HALNativeDisplayType;
++typedef Window HALNativeWindowType;
++
++#ifdef CUSTOM_PIXMAP
++typedef void * HALNativePixmapType;
++#else
++typedef Pixmap HALNativePixmapType;
++#endif /* CUSTOM_PIXMAP */
++
++/* Rename some badly named X defines. */
++#ifdef Status
++# define XStatus int
++# undef Status
++#endif
++#ifdef Always
++# define XAlways 2
++# undef Always
++#endif
++#ifdef CurrentTime
++# undef CurrentTime
++# define XCurrentTime 0
++#endif
++
++#elif defined(__QNXNTO__)
++#include <screen/screen.h>
++
++/* VOID */
++typedef int HALNativeDisplayType;
++typedef screen_window_t HALNativeWindowType;
++typedef screen_pixmap_t HALNativePixmapType;
++
++#else
++
++#error "Platform not recognized"
++
++/* VOID */
++typedef void * HALNativeDisplayType;
++typedef void * HALNativeWindowType;
++typedef void * HALNativePixmapType;
++
++#endif
++
++/* define DUMMY according to the system */
++#if defined(EGL_API_WL)
++# define WL_DUMMY (31415926)
++# define EGL_DUMMY WL_DUMMY
++#elif defined(__ANDROID__) || defined(ANDROID)
++# define ANDROID_DUMMY (31415926)
++# define EGL_DUMMY ANDROID_DUMMY
++#else
++# define EGL_DUMMY (31415926)
++#endif
++
++/*******************************************************************************
++** Display. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_GetDisplay(
++ OUT HALNativeDisplayType * Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_GetDisplayByIndex(
++ IN gctINT DisplayIndex,
++ OUT HALNativeDisplayType * Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_GetDisplayInfo(
++ IN HALNativeDisplayType Display,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctSIZE_T * Physical,
++ OUT gctINT * Stride,
++ OUT gctINT * BitsPerPixel
++ );
++
++
++
++gceSTATUS
++gcoOS_GetDisplayInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetNextDisplayInfoExByIndex(
++ IN gctINT Index,
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetDisplayVirtual(
++ IN HALNativeDisplayType Display,
++ OUT gctINT * Width,
++ OUT gctINT * Height
++ );
++
++gceSTATUS
++gcoOS_GetDisplayBackbuffer(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctPOINTER * context,
++ OUT gcoSURF * surface,
++ OUT gctUINT * Offset,
++ OUT gctINT * X,
++ OUT gctINT * Y
++ );
++
++gceSTATUS
++gcoOS_SetDisplayVirtual(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT Offset,
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++gceSTATUS
++gcoOS_SetDisplayVirtualEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER Context,
++ IN gcoSURF Surface,
++ IN gctUINT Offset,
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++gceSTATUS
++gcoOS_SetSwapInterval(
++ IN HALNativeDisplayType Display,
++ IN gctINT Interval
++);
++
++gceSTATUS
++gcoOS_GetSwapInterval(
++ IN HALNativeDisplayType Display,
++ IN gctINT_PTR Min,
++ IN gctINT_PTR Max
++);
++
++gceSTATUS
++gcoOS_DisplayBufferRegions(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT NumRects,
++ IN gctINT_PTR Rects
++ );
++
++gceSTATUS
++gcoOS_DestroyDisplay(
++ IN HALNativeDisplayType Display
++ );
++
++gceSTATUS
++gcoOS_InitLocalDisplayInfo(
++ IN HALNativeDisplayType Display,
++ IN OUT gctPOINTER * localDisplay
++ );
++
++gceSTATUS
++gcoOS_DeinitLocalDisplayInfo(
++ IN HALNativeDisplayType Display,
++ IN OUT gctPOINTER * localDisplay
++ );
++
++gceSTATUS
++gcoOS_GetDisplayInfoEx2(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER localDisplay,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetDisplayBackbufferEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER localDisplay,
++ OUT gctPOINTER * context,
++ OUT gcoSURF * surface,
++ OUT gctUINT * Offset,
++ OUT gctINT * X,
++ OUT gctINT * Y
++ );
++
++gceSTATUS
++gcoOS_IsValidDisplay(
++ IN HALNativeDisplayType Display
++ );
++
++gceSTATUS
++gcoOS_GetNativeVisualId(
++ IN HALNativeDisplayType Display,
++ OUT gctINT* nativeVisualId
++ );
++
++gctBOOL
++gcoOS_SynchronousFlip(
++ IN HALNativeDisplayType Display
++ );
++
++/*******************************************************************************
++** Windows. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_CreateWindow(
++ IN HALNativeDisplayType Display,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gctINT Width,
++ IN gctINT Height,
++ OUT HALNativeWindowType * Window
++ );
++
++gceSTATUS
++gcoOS_GetWindowInfo(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctINT * X,
++ OUT gctINT * Y,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctUINT * Offset
++ );
++
++gceSTATUS
++gcoOS_DestroyWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_DrawImage(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits
++ );
++
++gceSTATUS
++gcoOS_GetImage(
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ OUT gctINT * BitsPerPixel,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_GetWindowInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctINT * X,
++ OUT gctINT * Y,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctUINT * Offset,
++ OUT gceSURF_FORMAT * Format
++ );
++
++gceSTATUS
++gcoOS_DrawImageEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits,
++ IN gceSURF_FORMAT Format
++ );
++
++/*******************************************************************************
++** Pixmaps. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_CreatePixmap(
++ IN HALNativeDisplayType Display,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ OUT HALNativePixmapType * Pixmap
++ );
++
++gceSTATUS
++gcoOS_GetPixmapInfo(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_DrawPixmap(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits
++ );
++
++gceSTATUS
++gcoOS_DestroyPixmap(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap
++ );
++
++gceSTATUS
++gcoOS_GetPixmapInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits,
++ OUT gceSURF_FORMAT * Format
++ );
++
++gceSTATUS
++gcoOS_CopyPixmapBits(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ IN gctUINT DstWidth,
++ IN gctUINT DstHeight,
++ IN gctINT DstStride,
++ IN gceSURF_FORMAT DstFormat,
++ OUT gctPOINTER DstBits
++ );
++
++/*******************************************************************************
++** OS relative. ****************************************************************
++*/
++gceSTATUS
++gcoOS_LoadEGLLibrary(
++ OUT gctHANDLE * Handle
++ );
++
++gceSTATUS
++gcoOS_FreeEGLLibrary(
++ IN gctHANDLE Handle
++ );
++
++gceSTATUS
++gcoOS_ShowWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_HideWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_SetWindowTitle(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctCONST_STRING Title
++ );
++
++gceSTATUS
++gcoOS_CapturePointer(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_GetEvent(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT halEvent * Event
++ );
++
++gceSTATUS
++gcoOS_CreateClientBuffer(
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT Format,
++ IN gctINT Type,
++ OUT gctPOINTER * ClientBuffer
++ );
++
++gceSTATUS
++gcoOS_GetClientBufferInfo(
++ IN gctPOINTER ClientBuffer,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_DestroyClientBuffer(
++ IN gctPOINTER ClientBuffer
++ );
++
++gceSTATUS
++gcoOS_DestroyContext(
++ IN gctPOINTER Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_CreateContext(
++ IN gctPOINTER LocalDisplay,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_MakeCurrent(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType DrawDrawable,
++ IN HALNativeWindowType ReadDrawable,
++ IN gctPOINTER Context,
++ IN gcoSURF ResolveTarget
++ );
++
++gceSTATUS
++gcoOS_CreateDrawable(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable
++ );
++
++gceSTATUS
++gcoOS_DestroyDrawable(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable
++ );
++gceSTATUS
++gcoOS_SwapBuffers(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable,
++ IN gcoSURF RenderTarget,
++ IN gcoSURF ResolveTarget,
++ IN gctPOINTER ResolveBits,
++ OUT gctUINT *Width,
++ OUT gctUINT *Height
++ );
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_eglplatform_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform_type.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform_type.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform_type.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_eglplatform_type.h 2015-05-01 14:57:59.539427001 -0500
+@@ -0,0 +1,286 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_eglplatform_type_h_
++#define __gc_hal_eglplatform_type_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*******************************************************************************
++** Events. *********************************************************************
++*/
++
++typedef enum _halEventType
++{
++ /* Keyboard event. */
++ HAL_KEYBOARD,
++
++ /* Mouse move event. */
++ HAL_POINTER,
++
++ /* Mouse button event. */
++ HAL_BUTTON,
++
++ /* Application close event. */
++ HAL_CLOSE,
++
++ /* Application window has been updated. */
++ HAL_WINDOW_UPDATE
++}
++halEventType;
++
++/* Scancodes for keyboard. */
++typedef enum _halKeys
++{
++ HAL_UNKNOWN = -1,
++
++ HAL_BACKSPACE = 0x08,
++ HAL_TAB,
++ HAL_ENTER = 0x0D,
++ HAL_ESCAPE = 0x1B,
++
++ HAL_SPACE = 0x20,
++ HAL_SINGLEQUOTE = 0x27,
++ HAL_PAD_ASTERISK = 0x2A,
++ HAL_COMMA = 0x2C,
++ HAL_HYPHEN,
++ HAL_PERIOD,
++ HAL_SLASH,
++ HAL_0,
++ HAL_1,
++ HAL_2,
++ HAL_3,
++ HAL_4,
++ HAL_5,
++ HAL_6,
++ HAL_7,
++ HAL_8,
++ HAL_9,
++ HAL_SEMICOLON = 0x3B,
++ HAL_EQUAL = 0x3D,
++ HAL_A = 0x41,
++ HAL_B,
++ HAL_C,
++ HAL_D,
++ HAL_E,
++ HAL_F,
++ HAL_G,
++ HAL_H,
++ HAL_I,
++ HAL_J,
++ HAL_K,
++ HAL_L,
++ HAL_M,
++ HAL_N,
++ HAL_O,
++ HAL_P,
++ HAL_Q,
++ HAL_R,
++ HAL_S,
++ HAL_T,
++ HAL_U,
++ HAL_V,
++ HAL_W,
++ HAL_X,
++ HAL_Y,
++ HAL_Z,
++ HAL_LBRACKET,
++ HAL_BACKSLASH,
++ HAL_RBRACKET,
++ HAL_BACKQUOTE = 0x60,
++
++ HAL_F1 = 0x80,
++ HAL_F2,
++ HAL_F3,
++ HAL_F4,
++ HAL_F5,
++ HAL_F6,
++ HAL_F7,
++ HAL_F8,
++ HAL_F9,
++ HAL_F10,
++ HAL_F11,
++ HAL_F12,
++
++ HAL_LCTRL,
++ HAL_RCTRL,
++ HAL_LSHIFT,
++ HAL_RSHIFT,
++ HAL_LALT,
++ HAL_RALT,
++ HAL_CAPSLOCK,
++ HAL_NUMLOCK,
++ HAL_SCROLLLOCK,
++ HAL_PAD_0,
++ HAL_PAD_1,
++ HAL_PAD_2,
++ HAL_PAD_3,
++ HAL_PAD_4,
++ HAL_PAD_5,
++ HAL_PAD_6,
++ HAL_PAD_7,
++ HAL_PAD_8,
++ HAL_PAD_9,
++ HAL_PAD_HYPHEN,
++ HAL_PAD_PLUS,
++ HAL_PAD_SLASH,
++ HAL_PAD_PERIOD,
++ HAL_PAD_ENTER,
++ HAL_SYSRQ,
++ HAL_PRNTSCRN,
++ HAL_BREAK,
++ HAL_UP,
++ HAL_LEFT,
++ HAL_RIGHT,
++ HAL_DOWN,
++ HAL_HOME,
++ HAL_END,
++ HAL_PGUP,
++ HAL_PGDN,
++ HAL_INSERT,
++ HAL_DELETE,
++ HAL_LWINDOW,
++ HAL_RWINDOW,
++ HAL_MENU,
++ HAL_POWER,
++ HAL_SLEEP,
++ HAL_WAKE
++}
++halKeys;
++
++/* Structure that defined keyboard mapping. */
++typedef struct _halKeyMap
++{
++ /* Normal key. */
++ halKeys normal;
++
++ /* Extended key. */
++ halKeys extended;
++}
++halKeyMap;
++
++/* Event structure. */
++typedef struct _halEvent
++{
++ /* Event type. */
++ halEventType type;
++
++ /* Event data union. */
++ union _halEventData
++ {
++ /* Event data for keyboard. */
++ struct _halKeyboard
++ {
++ /* Scancode. */
++ halKeys scancode;
++
++ /* ASCII characte of the key pressed. */
++ char key;
++
++ /* Flag whether the key was pressed (1) or released (0). */
++ char pressed;
++ }
++ keyboard;
++
++ /* Event data for pointer. */
++ struct _halPointer
++ {
++ /* Current pointer coordinate. */
++ int x;
++ int y;
++ }
++ pointer;
++
++ /* Event data for mouse buttons. */
++ struct _halButton
++ {
++ /* Left button state. */
++ int left;
++
++ /* Middle button state. */
++ int middle;
++
++ /* Right button state. */
++ int right;
++
++ /* Current pointer coordinate. */
++ int x;
++ int y;
++ }
++ button;
++ }
++ data;
++}
++halEvent;
++
++/* VFK_DISPLAY_INFO structure defining information returned by
++ vdkGetDisplayInfoEx. */
++typedef struct _halDISPLAY_INFO
++{
++ /* The size of the display in pixels. */
++ int width;
++ int height;
++
++ /* The stride of the dispay. -1 is returned if the stride is not known
++ ** for the specified display.*/
++ int stride;
++
++ /* The color depth of the display in bits per pixel. */
++ int bitsPerPixel;
++
++ /* The logical pointer to the display memory buffer. NULL is returned
++ ** if the pointer is not known for the specified display. */
++ void * logical;
++
++ /* The physical address of the display memory buffer. ~0 is returned
++ ** if the address is not known for the specified display. */
++ unsigned long physical;
++
++ int wrapFB; /* true if compositor, false otherwise. */
++
++#ifndef __QNXNTO__
++ /* 355_FB_MULTI_BUFFER */
++ int multiBuffer;
++ int backBufferY;
++#endif
++
++ /* The color info of the display. */
++ unsigned int alphaLength;
++ unsigned int alphaOffset;
++ unsigned int redLength;
++ unsigned int redOffset;
++ unsigned int greenLength;
++ unsigned int greenOffset;
++ unsigned int blueLength;
++ unsigned int blueOffset;
++
++ /* Display flip support. */
++ int flip;
++}
++halDISPLAY_INFO;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_eglplatform_type_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine.h 2015-05-01 14:57:59.539427001 -0500
+@@ -0,0 +1,2053 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_engine_h_
++#define __gc_hal_engine_h_
++
++#ifndef VIVANTE_NO_3D
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_engine_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gcoSTREAM * gcoSTREAM;
++typedef struct _gcoVERTEX * gcoVERTEX;
++typedef struct _gcoTEXTURE * gcoTEXTURE;
++typedef struct _gcoINDEX * gcoINDEX;
++typedef struct _gcsVERTEX_ATTRIBUTES * gcsVERTEX_ATTRIBUTES_PTR;
++typedef struct _gcoVERTEXARRAY * gcoVERTEXARRAY;
++
++#define gcdATTRIBUTE_COUNT 16
++
++/******************************************************************************\
++********************************* Enumerations *********************************
++\******************************************************************************/
++
++/* Shading format. */
++typedef enum _gceSHADING
++{
++ gcvSHADING_SMOOTH,
++ gcvSHADING_FLAT_D3D,
++ gcvSHADING_FLAT_OPENGL,
++}
++gceSHADING;
++
++/* Culling modes. */
++typedef enum _gceCULL
++{
++ gcvCULL_NONE,
++ gcvCULL_CCW,
++ gcvCULL_CW,
++}
++gceCULL;
++
++/* Fill modes. */
++typedef enum _gceFILL
++{
++ gcvFILL_POINT,
++ gcvFILL_WIRE_FRAME,
++ gcvFILL_SOLID,
++}
++gceFILL;
++
++/* Compare modes. */
++typedef enum _gceCOMPARE
++{
++ gcvCOMPARE_NEVER,
++ gcvCOMPARE_NOT_EQUAL,
++ gcvCOMPARE_LESS,
++ gcvCOMPARE_LESS_OR_EQUAL,
++ gcvCOMPARE_EQUAL,
++ gcvCOMPARE_GREATER,
++ gcvCOMPARE_GREATER_OR_EQUAL,
++ gcvCOMPARE_ALWAYS,
++ gcvCOMPARE_INVALID = -1
++}
++gceCOMPARE;
++
++/* Stencil modes. */
++typedef enum _gceSTENCIL_MODE
++{
++ gcvSTENCIL_NONE,
++ gcvSTENCIL_SINGLE_SIDED,
++ gcvSTENCIL_DOUBLE_SIDED,
++}
++gceSTENCIL_MODE;
++
++/* Stencil operations. */
++typedef enum _gceSTENCIL_OPERATION
++{
++ gcvSTENCIL_KEEP,
++ gcvSTENCIL_REPLACE,
++ gcvSTENCIL_ZERO,
++ gcvSTENCIL_INVERT,
++ gcvSTENCIL_INCREMENT,
++ gcvSTENCIL_DECREMENT,
++ gcvSTENCIL_INCREMENT_SATURATE,
++ gcvSTENCIL_DECREMENT_SATURATE,
++ gcvSTENCIL_OPERATION_INVALID = -1
++}
++gceSTENCIL_OPERATION;
++
++/* Stencil selection. */
++typedef enum _gceSTENCIL_WHERE
++{
++ gcvSTENCIL_FRONT,
++ gcvSTENCIL_BACK,
++}
++gceSTENCIL_WHERE;
++
++/* Texture addressing selection. */
++typedef enum _gceTEXTURE_WHICH
++{
++ gcvTEXTURE_S,
++ gcvTEXTURE_T,
++ gcvTEXTURE_R,
++}
++gceTEXTURE_WHICH;
++
++/* Texture addressing modes. */
++typedef enum _gceTEXTURE_ADDRESSING
++{
++ gcvTEXTURE_WRAP,
++ gcvTEXTURE_CLAMP,
++ gcvTEXTURE_BORDER,
++ gcvTEXTURE_MIRROR,
++ gcvTEXTURE_MIRROR_ONCE,
++}
++gceTEXTURE_ADDRESSING;
++
++/* Texture filters. */
++typedef enum _gceTEXTURE_FILTER
++{
++ gcvTEXTURE_NONE,
++ gcvTEXTURE_POINT,
++ gcvTEXTURE_LINEAR,
++ gcvTEXTURE_ANISOTROPIC,
++}
++gceTEXTURE_FILTER;
++
++/* Primitive types. */
++typedef enum _gcePRIMITIVE
++{
++ gcvPRIMITIVE_POINT_LIST,
++ gcvPRIMITIVE_LINE_LIST,
++ gcvPRIMITIVE_LINE_STRIP,
++ gcvPRIMITIVE_LINE_LOOP,
++ gcvPRIMITIVE_TRIANGLE_LIST,
++ gcvPRIMITIVE_TRIANGLE_STRIP,
++ gcvPRIMITIVE_TRIANGLE_FAN,
++ gcvPRIMITIVE_RECTANGLE,
++}
++gcePRIMITIVE;
++
++/* Index types. */
++typedef enum _gceINDEX_TYPE
++{
++ gcvINDEX_8,
++ gcvINDEX_16,
++ gcvINDEX_32,
++}
++gceINDEX_TYPE;
++
++/******************************************************************************\
++********************************* gcoHAL Object *********************************
++\******************************************************************************/
++
++/* Query the target capabilities. */
++gceSTATUS
++gcoHAL_QueryTargetCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MultiTargetCount,
++ OUT gctUINT * MaxSamples
++ );
++
++gceSTATUS
++gcoHAL_SetDepthOnly(
++ IN gcoHAL Hal,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoHAL_QueryShaderCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctUINT * Varyings
++ );
++
++gceSTATUS
++gcoHAL_QueryTextureCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MaxDepth,
++ OUT gctBOOL * Cubic,
++ OUT gctBOOL * NonPowerOfTwo,
++ OUT gctUINT * VertexSamplers,
++ OUT gctUINT * PixelSamplers
++ );
++
++gceSTATUS
++gcoHAL_QueryTextureMaxAniso(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxAnisoValue
++ );
++
++gceSTATUS
++gcoHAL_QueryStreamCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT32 * MaxAttributes,
++ OUT gctUINT32 * MaxStreamSize,
++ OUT gctUINT32 * NumberOfStreams,
++ OUT gctUINT32 * Alignment
++ );
++
++/******************************************************************************\
++********************************* gcoSURF Object ********************************
++\******************************************************************************/
++
++/*----------------------------------------------------------------------------*/
++/*--------------------------------- gcoSURF 3D --------------------------------*/
++
++/* Copy surface. */
++gceSTATUS
++gcoSURF_Copy(
++ IN gcoSURF Surface,
++ IN gcoSURF Source
++ );
++
++/* Clear surface. */
++gceSTATUS
++gcoSURF_Clear(
++ IN gcoSURF Surface,
++ IN gctUINT Flags
++ );
++
++/* Set number of samples for a gcoSURF object. */
++gceSTATUS
++gcoSURF_SetSamples(
++ IN gcoSURF Surface,
++ IN gctUINT Samples
++ );
++
++/* Get the number of samples per pixel. */
++gceSTATUS
++gcoSURF_GetSamples(
++ IN gcoSURF Surface,
++ OUT gctUINT_PTR Samples
++ );
++
++/* Clear rectangular surface. */
++gceSTATUS
++gcoSURF_ClearRect(
++ IN gcoSURF Surface,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctUINT Flags
++ );
++
++/* TO BE REMOVED */
++ gceSTATUS
++ depr_gcoSURF_Resolve(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 DestAddress,
++ IN gctPOINTER DestBits,
++ IN gctINT DestStride,
++ IN gceSURF_TYPE DestType,
++ IN gceSURF_FORMAT DestFormat,
++ IN gctUINT DestWidth,
++ IN gctUINT DestHeight
++ );
++
++ gceSTATUS
++ depr_gcoSURF_ResolveRect(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 DestAddress,
++ IN gctPOINTER DestBits,
++ IN gctINT DestStride,
++ IN gceSURF_TYPE DestType,
++ IN gceSURF_FORMAT DestFormat,
++ IN gctUINT DestWidth,
++ IN gctUINT DestHeight,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Resample surface. */
++gceSTATUS
++gcoSURF_Resample(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface
++ );
++
++/* Resolve surface. */
++gceSTATUS
++gcoSURF_Resolve(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface
++ );
++
++gceSTATUS
++gcoSURF_IsHWResolveable(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Resolve rectangular area of a surface. */
++gceSTATUS
++gcoSURF_ResolveRect(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Set surface resolvability. */
++gceSTATUS
++gcoSURF_SetResolvability(
++ IN gcoSURF Surface,
++ IN gctBOOL Resolvable
++ );
++
++gceSTATUS
++gcoSURF_IsRenderable(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoSURF_IsFormatRenderableAsRT(
++ IN gcoSURF Surface
++ );
++
++#if gcdSYNC
++gceSTATUS
++gcoSURF_GetFence(
++ IN gcoSURF Surface
++ );
++gceSTATUS
++gcoSURF_WaitFence(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoSTREAM_GetFence(
++ IN gcoSTREAM stream
++ );
++
++gceSTATUS
++gcoSTREAM_WaitFence(
++ IN gcoSTREAM stream
++ );
++
++gceSTATUS
++gcoINDEX_GetFence(
++ IN gcoINDEX index
++ );
++
++gceSTATUS
++gcoINDEX_WaitFence(
++ IN gcoINDEX index
++ );
++#endif
++
++/******************************************************************************\
++******************************** gcoINDEX Object *******************************
++\******************************************************************************/
++
++/* Construct a new gcoINDEX object. */
++gceSTATUS
++gcoINDEX_Construct(
++ IN gcoHAL Hal,
++ OUT gcoINDEX * Index
++ );
++
++/* Destroy a gcoINDEX object. */
++gceSTATUS
++gcoINDEX_Destroy(
++ IN gcoINDEX Index
++ );
++
++/* Lock index in memory. */
++gceSTATUS
++gcoINDEX_Lock(
++ IN gcoINDEX Index,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Unlock index that was previously locked with gcoINDEX_Lock. */
++gceSTATUS
++gcoINDEX_Unlock(
++ IN gcoINDEX Index
++ );
++
++/* Upload index data into the memory. */
++gceSTATUS
++gcoINDEX_Load(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE IndexType,
++ IN gctUINT32 IndexCount,
++ IN gctPOINTER IndexBuffer
++ );
++
++/* Bind an index object to the hardware. */
++gceSTATUS
++gcoINDEX_Bind(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type
++ );
++
++/* Bind an index object to the hardware. */
++gceSTATUS
++gcoINDEX_BindOffset(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset
++ );
++
++/* Free existing index buffer. */
++gceSTATUS
++gcoINDEX_Free(
++ IN gcoINDEX Index
++ );
++
++/* Upload data into an index buffer. */
++gceSTATUS
++gcoINDEX_Upload(
++ IN gcoINDEX Index,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Bytes
++ );
++
++/* Upload data into an index buffer starting at an offset. */
++gceSTATUS
++gcoINDEX_UploadOffset(
++ IN gcoINDEX Index,
++ IN gctUINT32 Offset,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Bytes
++ );
++
++/*Merge index2 to index1 from 0, index2 must subset of inex1*/
++gceSTATUS
++gcoINDEX_Merge(
++ IN gcoINDEX Index1,
++ IN gcoINDEX Index2
++ );
++
++/*check if index buffer is enough for this draw*/
++gctBOOL
++gcoINDEX_CheckRange(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctINT Count,
++ IN gctUINT32 Indices
++ );
++
++/* Query the index capabilities. */
++gceSTATUS
++gcoINDEX_QueryCaps(
++ OUT gctBOOL * Index8,
++ OUT gctBOOL * Index16,
++ OUT gctBOOL * Index32,
++ OUT gctUINT * MaxIndex
++ );
++
++/* Determine the index range in the current index buffer. */
++gceSTATUS
++gcoINDEX_GetIndexRange(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset,
++ IN gctUINT32 Count,
++ OUT gctUINT32 * MinimumIndex,
++ OUT gctUINT32 * MaximumIndex
++ );
++
++/* Dynamic buffer management. */
++gceSTATUS
++gcoINDEX_SetDynamic(
++ IN gcoINDEX Index,
++ IN gctSIZE_T Bytes,
++ IN gctUINT Buffers
++ );
++
++gceSTATUS
++gcoINDEX_UploadDynamic(
++ IN gcoINDEX Index,
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Bytes
++ );
++
++/******************************************************************************\
++********************************** gco3D Object *********************************
++\******************************************************************************/
++
++/* Clear flags. */
++typedef enum _gceCLEAR
++{
++ gcvCLEAR_COLOR = 0x1,
++ gcvCLEAR_DEPTH = 0x2,
++ gcvCLEAR_STENCIL = 0x4,
++ gcvCLEAR_HZ = 0x8,
++ gcvCLEAR_HAS_VAA = 0x10,
++}
++gceCLEAR;
++
++/* Blending targets. */
++typedef enum _gceBLEND_UNIT
++{
++ gcvBLEND_SOURCE,
++ gcvBLEND_TARGET,
++}
++gceBLEND_UNIT;
++
++/* Construct a new gco3D object. */
++gceSTATUS
++gco3D_Construct(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++
++/* Destroy an gco3D object. */
++gceSTATUS
++gco3D_Destroy(
++ IN gco3D Engine
++ );
++
++/* Set 3D API type. */
++gceSTATUS
++gco3D_SetAPI(
++ IN gco3D Engine,
++ IN gceAPI ApiType
++ );
++
++/* Set render target. */
++gceSTATUS
++gco3D_SetTarget(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Unset render target. */
++gceSTATUS
++gco3D_UnsetTarget(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Set depth buffer. */
++gceSTATUS
++gco3D_SetDepth(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Unset depth buffer. */
++gceSTATUS
++gco3D_UnsetDepth(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Set viewport. */
++gceSTATUS
++gco3D_SetViewport(
++ IN gco3D Engine,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Set scissors. */
++gceSTATUS
++gco3D_SetScissors(
++ IN gco3D Engine,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Set clear color. */
++gceSTATUS
++gco3D_SetClearColor(
++ IN gco3D Engine,
++ IN gctUINT8 Red,
++ IN gctUINT8 Green,
++ IN gctUINT8 Blue,
++ IN gctUINT8 Alpha
++ );
++
++/* Set fixed point clear color. */
++gceSTATUS
++gco3D_SetClearColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++/* Set floating point clear color. */
++gceSTATUS
++gco3D_SetClearColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Set fixed point clear depth. */
++gceSTATUS
++gco3D_SetClearDepthX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Depth
++ );
++
++/* Set floating point clear depth. */
++gceSTATUS
++gco3D_SetClearDepthF(
++ IN gco3D Engine,
++ IN gctFLOAT Depth
++ );
++
++/* Set clear stencil. */
++gceSTATUS
++gco3D_SetClearStencil(
++ IN gco3D Engine,
++ IN gctUINT32 Stencil
++ );
++
++/* Clear a Rect sub-surface. */
++gceSTATUS
++gco3D_ClearRect(
++ IN gco3D Engine,
++ IN gctUINT32 Address,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom,
++ IN gctUINT32 Width,
++ IN gctUINT32 Height,
++ IN gctUINT32 Flags
++ );
++
++/* Clear surface. */
++gceSTATUS
++gco3D_Clear(
++ IN gco3D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT32 Width,
++ IN gctUINT32 Height,
++ IN gctUINT32 Flags
++ );
++
++
++/* Clear tile status. */
++gceSTATUS
++gco3D_ClearTileStatus(
++ IN gco3D Engine,
++ IN gcsSURF_INFO_PTR Surface,
++ IN gctUINT32 TileStatusAddress,
++ IN gctUINT32 Flags
++ );
++
++/* Set shading mode. */
++gceSTATUS
++gco3D_SetShading(
++ IN gco3D Engine,
++ IN gceSHADING Shading
++ );
++
++/* Set blending mode. */
++gceSTATUS
++gco3D_EnableBlending(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set blending function. */
++gceSTATUS
++gco3D_SetBlendFunction(
++ IN gco3D Engine,
++ IN gceBLEND_UNIT Unit,
++ IN gceBLEND_FUNCTION FunctionRGB,
++ IN gceBLEND_FUNCTION FunctionAlpha
++ );
++
++/* Set blending mode. */
++gceSTATUS
++gco3D_SetBlendMode(
++ IN gco3D Engine,
++ IN gceBLEND_MODE ModeRGB,
++ IN gceBLEND_MODE ModeAlpha
++ );
++
++/* Set blending color. */
++gceSTATUS
++gco3D_SetBlendColor(
++ IN gco3D Engine,
++ IN gctUINT Red,
++ IN gctUINT Green,
++ IN gctUINT Blue,
++ IN gctUINT Alpha
++ );
++
++/* Set fixed point blending color. */
++gceSTATUS
++gco3D_SetBlendColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++/* Set floating point blending color. */
++gceSTATUS
++gco3D_SetBlendColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Set culling mode. */
++gceSTATUS
++gco3D_SetCulling(
++ IN gco3D Engine,
++ IN gceCULL Mode
++ );
++
++/* Enable point size */
++gceSTATUS
++gco3D_SetPointSizeEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set point sprite */
++gceSTATUS
++gco3D_SetPointSprite(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set fill mode. */
++gceSTATUS
++gco3D_SetFill(
++ IN gco3D Engine,
++ IN gceFILL Mode
++ );
++
++/* Set depth compare mode. */
++gceSTATUS
++gco3D_SetDepthCompare(
++ IN gco3D Engine,
++ IN gceCOMPARE Compare
++ );
++
++/* Enable depth writing. */
++gceSTATUS
++gco3D_EnableDepthWrite(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set depth mode. */
++gceSTATUS
++gco3D_SetDepthMode(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode
++ );
++
++/* Set depth range. */
++gceSTATUS
++gco3D_SetDepthRangeX(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode,
++ IN gctFIXED_POINT Near,
++ IN gctFIXED_POINT Far
++ );
++
++/* Set depth range. */
++gceSTATUS
++gco3D_SetDepthRangeF(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode,
++ IN gctFLOAT Near,
++ IN gctFLOAT Far
++ );
++
++/* Set last pixel enable */
++gceSTATUS
++gco3D_SetLastPixelEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set depth Bias and Scale */
++gceSTATUS
++gco3D_SetDepthScaleBiasX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT DepthScale,
++ IN gctFIXED_POINT DepthBias
++ );
++
++gceSTATUS
++gco3D_SetDepthScaleBiasF(
++ IN gco3D Engine,
++ IN gctFLOAT DepthScale,
++ IN gctFLOAT DepthBias
++ );
++
++/* Set depth near and far clipping plane. */
++gceSTATUS
++gco3D_SetDepthPlaneF(
++ IN gco3D Engine,
++ IN gctFLOAT Near,
++ IN gctFLOAT Far
++ );
++
++/* Enable or disable dithering. */
++gceSTATUS
++gco3D_EnableDither(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set color write enable bits. */
++gceSTATUS
++gco3D_SetColorWrite(
++ IN gco3D Engine,
++ IN gctUINT8 Enable
++ );
++
++/* Enable or disable early depth. */
++gceSTATUS
++gco3D_SetEarlyDepth(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Enable or disable all early depth operations. */
++gceSTATUS
++gco3D_SetAllEarlyDepthModes(
++ IN gco3D Engine,
++ IN gctBOOL Disable
++ );
++
++/* Switch dynamic early mode */
++gceSTATUS
++gco3D_SwitchDynamicEarlyDepthMode(
++ IN gco3D Engine
++ );
++
++/* Set dynamic early mode */
++gceSTATUS
++gco3D_DisableDynamicEarlyDepthMode(
++ IN gco3D Engine,
++ IN gctBOOL Disable
++ );
++
++/* Enable or disable depth-only mode. */
++gceSTATUS
++gco3D_SetDepthOnly(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++typedef struct _gcsSTENCIL_INFO * gcsSTENCIL_INFO_PTR;
++typedef struct _gcsSTENCIL_INFO
++{
++ gceSTENCIL_MODE mode;
++
++ gctUINT8 maskFront;
++ gctUINT8 maskBack;
++ gctUINT8 writeMaskFront;
++ gctUINT8 writeMaskBack;
++
++ gctUINT8 referenceFront;
++
++ gceCOMPARE compareFront;
++ gceSTENCIL_OPERATION passFront;
++ gceSTENCIL_OPERATION failFront;
++ gceSTENCIL_OPERATION depthFailFront;
++
++ gctUINT8 referenceBack;
++ gceCOMPARE compareBack;
++ gceSTENCIL_OPERATION passBack;
++ gceSTENCIL_OPERATION failBack;
++ gceSTENCIL_OPERATION depthFailBack;
++}
++gcsSTENCIL_INFO;
++
++/* Set stencil mode. */
++gceSTATUS
++gco3D_SetStencilMode(
++ IN gco3D Engine,
++ IN gceSTENCIL_MODE Mode
++ );
++
++/* Set stencil mask. */
++gceSTATUS
++gco3D_SetStencilMask(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil back mask. */
++gceSTATUS
++gco3D_SetStencilMaskBack(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil write mask. */
++gceSTATUS
++gco3D_SetStencilWriteMask(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil back write mask. */
++gceSTATUS
++gco3D_SetStencilWriteMaskBack(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil reference. */
++gceSTATUS
++gco3D_SetStencilReference(
++ IN gco3D Engine,
++ IN gctUINT8 Reference,
++ IN gctBOOL Front
++ );
++
++/* Set stencil compare. */
++gceSTATUS
++gco3D_SetStencilCompare(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceCOMPARE Compare
++ );
++
++/* Set stencil operation on pass. */
++gceSTATUS
++gco3D_SetStencilPass(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set stencil operation on fail. */
++gceSTATUS
++gco3D_SetStencilFail(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set stencil operation on depth fail. */
++gceSTATUS
++gco3D_SetStencilDepthFail(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set all stencil states in one blow. */
++gceSTATUS
++gco3D_SetStencilAll(
++ IN gco3D Engine,
++ IN gcsSTENCIL_INFO_PTR Info
++ );
++
++typedef struct _gcsALPHA_INFO * gcsALPHA_INFO_PTR;
++typedef struct _gcsALPHA_INFO
++{
++ /* Alpha test states. */
++ gctBOOL test;
++ gceCOMPARE compare;
++ gctUINT8 reference;
++ gctFLOAT floatReference;
++
++ /* Alpha blending states. */
++ gctBOOL blend;
++
++ gceBLEND_FUNCTION srcFuncColor;
++ gceBLEND_FUNCTION srcFuncAlpha;
++ gceBLEND_FUNCTION trgFuncColor;
++ gceBLEND_FUNCTION trgFuncAlpha;
++
++ gceBLEND_MODE modeColor;
++ gceBLEND_MODE modeAlpha;
++
++ gctUINT32 color;
++}
++gcsALPHA_INFO;
++
++/* Enable or disable alpha test. */
++gceSTATUS
++gco3D_SetAlphaTest(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set alpha test compare. */
++gceSTATUS
++gco3D_SetAlphaCompare(
++ IN gco3D Engine,
++ IN gceCOMPARE Compare
++ );
++
++/* Set alpha test reference in unsigned integer. */
++gceSTATUS
++gco3D_SetAlphaReference(
++ IN gco3D Engine,
++ IN gctUINT8 Reference,
++ IN gctFLOAT FloatReference
++ );
++
++/* Set alpha test reference in fixed point. */
++gceSTATUS
++gco3D_SetAlphaReferenceX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Reference
++ );
++
++/* Set alpha test reference in floating point. */
++gceSTATUS
++gco3D_SetAlphaReferenceF(
++ IN gco3D Engine,
++ IN gctFLOAT Reference
++ );
++
++/* Enable/Disable anti-alias line. */
++gceSTATUS
++gco3D_SetAntiAliasLine(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set texture slot for anti-alias line. */
++gceSTATUS
++gco3D_SetAALineTexSlot(
++ IN gco3D Engine,
++ IN gctUINT TexSlot
++ );
++
++/* Set anti-alias line width scale. */
++gceSTATUS
++gco3D_SetAALineWidth(
++ IN gco3D Engine,
++ IN gctFLOAT Width
++ );
++
++/* Draw a number of primitives. */
++gceSTATUS
++gco3D_DrawPrimitives(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT StartVertex,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++gceSTATUS
++gco3D_DrawPrimitivesCount(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT* StartVertex,
++ IN gctSIZE_T* VertexCount,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++
++/* Draw a number of primitives using offsets. */
++gceSTATUS
++gco3D_DrawPrimitivesOffset(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT32 StartOffset,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Draw a number of indexed primitives. */
++gceSTATUS
++gco3D_DrawIndexedPrimitives(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT BaseVertex,
++ IN gctINT StartIndex,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Draw a number of indexed primitives using offsets. */
++gceSTATUS
++gco3D_DrawIndexedPrimitivesOffset(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT32 BaseOffset,
++ IN gctINT32 StartOffset,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Enable or disable anti-aliasing. */
++gceSTATUS
++gco3D_SetAntiAlias(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Write data into the command buffer. */
++gceSTATUS
++gco3D_WriteBuffer(
++ IN gco3D Engine,
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Aligned
++ );
++
++/* Send sempahore and stall until sempahore is signalled. */
++gceSTATUS
++gco3D_Semaphore(
++ IN gco3D Engine,
++ IN gceWHERE From,
++ IN gceWHERE To,
++ IN gceHOW How);
++
++/* Set the subpixels center. */
++gceSTATUS
++gco3D_SetCentroids(
++ IN gco3D Engine,
++ IN gctUINT32 Index,
++ IN gctPOINTER Centroids
++ );
++
++gceSTATUS
++gco3D_SetLogicOp(
++ IN gco3D Engine,
++ IN gctUINT8 Rop
++ );
++
++/* OCL thread walker information. */
++typedef struct _gcsTHREAD_WALKER_INFO * gcsTHREAD_WALKER_INFO_PTR;
++typedef struct _gcsTHREAD_WALKER_INFO
++{
++ gctUINT32 dimensions;
++ gctUINT32 traverseOrder;
++ gctUINT32 enableSwathX;
++ gctUINT32 enableSwathY;
++ gctUINT32 enableSwathZ;
++ gctUINT32 swathSizeX;
++ gctUINT32 swathSizeY;
++ gctUINT32 swathSizeZ;
++ gctUINT32 valueOrder;
++
++ gctUINT32 globalSizeX;
++ gctUINT32 globalOffsetX;
++ gctUINT32 globalSizeY;
++ gctUINT32 globalOffsetY;
++ gctUINT32 globalSizeZ;
++ gctUINT32 globalOffsetZ;
++
++ gctUINT32 workGroupSizeX;
++ gctUINT32 workGroupCountX;
++ gctUINT32 workGroupSizeY;
++ gctUINT32 workGroupCountY;
++ gctUINT32 workGroupSizeZ;
++ gctUINT32 workGroupCountZ;
++
++ gctUINT32 threadAllocation;
++}
++gcsTHREAD_WALKER_INFO;
++
++/* Start OCL thread walker. */
++gceSTATUS
++gco3D_InvokeThreadWalker(
++ IN gco3D Engine,
++ IN gcsTHREAD_WALKER_INFO_PTR Info
++ );
++
++/* Set w clip and w plane limit value. */
++gceSTATUS
++gco3D_SetWClipEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco3D_GetWClipEnable(
++ IN gco3D Engine,
++ OUT gctBOOL * Enable
++ );
++
++gceSTATUS
++gco3D_SetWPlaneLimitF(
++ IN gco3D Engine,
++ IN gctFLOAT Value
++ );
++
++gceSTATUS
++gco3D_SetWPlaneLimitX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Value
++ );
++
++
++gceSTATUS
++gco3D_SetWPlaneLimit(
++ IN gco3D Engine,
++ IN gctFLOAT Value
++ );
++
++/*----------------------------------------------------------------------------*/
++/*-------------------------- gco3D Fragment Processor ------------------------*/
++
++/* Set the fragment processor configuration. */
++gceSTATUS
++gco3D_SetFragmentConfiguration(
++ IN gco3D Engine,
++ IN gctBOOL ColorFromStream,
++ IN gctBOOL EnableFog,
++ IN gctBOOL EnableSmoothPoint,
++ IN gctUINT32 ClipPlanes
++ );
++
++/* Enable/disable texture stage operation. */
++gceSTATUS
++gco3D_EnableTextureStage(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL Enable
++ );
++
++/* Program the channel enable masks for the color texture function. */
++gceSTATUS
++gco3D_SetTextureColorMask(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL ColorEnabled,
++ IN gctBOOL AlphaEnabled
++ );
++
++/* Program the channel enable masks for the alpha texture function. */
++gceSTATUS
++gco3D_SetTextureAlphaMask(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL ColorEnabled,
++ IN gctBOOL AlphaEnabled
++ );
++
++/* Program the constant fragment color. */
++gceSTATUS
++gco3D_SetFragmentColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetFragmentColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Program the constant fog color. */
++gceSTATUS
++gco3D_SetFogColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetFogColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Program the constant texture color. */
++gceSTATUS
++gco3D_SetTetxureColorX(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetTetxureColorF(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Configure color texture function. */
++gceSTATUS
++gco3D_SetColorTextureFunction(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gceTEXTURE_FUNCTION Function,
++ IN gceTEXTURE_SOURCE Source0,
++ IN gceTEXTURE_CHANNEL Channel0,
++ IN gceTEXTURE_SOURCE Source1,
++ IN gceTEXTURE_CHANNEL Channel1,
++ IN gceTEXTURE_SOURCE Source2,
++ IN gceTEXTURE_CHANNEL Channel2,
++ IN gctINT Scale
++ );
++
++/* Configure alpha texture function. */
++gceSTATUS
++gco3D_SetAlphaTextureFunction(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gceTEXTURE_FUNCTION Function,
++ IN gceTEXTURE_SOURCE Source0,
++ IN gceTEXTURE_CHANNEL Channel0,
++ IN gceTEXTURE_SOURCE Source1,
++ IN gceTEXTURE_CHANNEL Channel1,
++ IN gceTEXTURE_SOURCE Source2,
++ IN gceTEXTURE_CHANNEL Channel2,
++ IN gctINT Scale
++ );
++
++/* Invoke OCL thread walker. */
++gceSTATUS
++gcoHARDWARE_InvokeThreadWalker(
++ IN gcsTHREAD_WALKER_INFO_PTR Info
++ );
++
++/******************************************************************************\
++******************************* gcoTEXTURE Object *******************************
++\******************************************************************************/
++
++/* Cube faces. */
++typedef enum _gceTEXTURE_FACE
++{
++ gcvFACE_NONE,
++ gcvFACE_POSITIVE_X,
++ gcvFACE_NEGATIVE_X,
++ gcvFACE_POSITIVE_Y,
++ gcvFACE_NEGATIVE_Y,
++ gcvFACE_POSITIVE_Z,
++ gcvFACE_NEGATIVE_Z,
++}
++gceTEXTURE_FACE;
++
++#if gcdFORCE_MIPMAP
++typedef enum
++{
++ gcvForceMipDisabled = 0,
++ gcvForceMipEnable = 1,
++ gcvForceMipGenerated = 2,
++ gcvForceMipNever = 3,
++}gceFORCE_MIPMAP;
++#endif
++
++typedef struct _gcsTEXTURE
++{
++ /* Addressing modes. */
++ gceTEXTURE_ADDRESSING s;
++ gceTEXTURE_ADDRESSING t;
++ gceTEXTURE_ADDRESSING r;
++
++ /* Border color. */
++ gctUINT8 border[4];
++
++ /* Filters. */
++ gceTEXTURE_FILTER minFilter;
++ gceTEXTURE_FILTER magFilter;
++ gceTEXTURE_FILTER mipFilter;
++ gctUINT anisoFilter;
++ gctBOOL forceTopLevel;
++ gctBOOL autoMipmap;
++#if gcdFORCE_MIPMAP
++ gceFORCE_MIPMAP forceMipmap;
++#endif
++ /* Level of detail. */
++ gctFIXED_POINT lodBias;
++ gctFIXED_POINT lodMin;
++ gctFIXED_POINT lodMax;
++}
++gcsTEXTURE, * gcsTEXTURE_PTR;
++
++/* Construct a new gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Construct(
++ IN gcoHAL Hal,
++ OUT gcoTEXTURE * Texture
++ );
++
++/* Construct a new sized gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_ConstructSized(
++ IN gcoHAL Hal,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT Faces,
++ IN gctUINT MipMapCount,
++ IN gcePOOL Pool,
++ OUT gcoTEXTURE * Texture
++ );
++
++/* Destroy an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Destroy(
++ IN gcoTEXTURE Texture
++ );
++#if gcdFORCE_MIPMAP
++gceSTATUS
++gcoTEXTURE_DestroyForceMipmap(
++ IN gcoTEXTURE Texture
++ );
++
++gceSTATUS
++gcoTEXTURE_GetMipLevels(
++ IN gcoTEXTURE Texture,
++ OUT gctINT * levels
++ );
++#endif
++/* Replace a mipmap in gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_ReplaceMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Level,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctINT imageFormat,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Depth,
++ IN gctUINT Faces,
++ IN gcePOOL Pool
++ );
++
++/* Upload data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Upload(
++ IN gcoTEXTURE Texture,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctINT Stride,
++ IN gceSURF_FORMAT Format
++ );
++
++/* Upload data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadSub(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT X,
++ IN gctUINT Y,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctINT Stride,
++ IN gceSURF_FORMAT Format
++ );
++
++/* Upload YUV data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadYUV(
++ IN gcoTEXTURE Texture,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctPOINTER Memory[3],
++ IN gctINT Stride[3],
++ IN gceSURF_FORMAT Format
++ );
++
++/* Upload compressed data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadCompressed(
++ IN gcoTEXTURE Texture,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Bytes
++ );
++
++/* Upload compressed sub data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadCompressedSub(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT XOffset,
++ IN gctUINT YOffset,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Size
++ );
++
++/* GetImageFormat of texture. */
++gceSTATUS
++gcoTEXTURE_GetImageFormat(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ OUT gctINT * ImageFormat
++ );
++
++/* Get gcoSURF object for a mipmap level. */
++gceSTATUS
++gcoTEXTURE_GetMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ OUT gcoSURF * Surface
++ );
++
++/* Get gcoSURF object for a mipmap level and face offset. */
++gceSTATUS
++gcoTEXTURE_GetMipMapFace(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ OUT gcoSURF * Surface,
++ OUT gctUINT32_PTR Offset
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gctINT imageFormat,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT Faces,
++ IN gcePOOL Pool,
++ OUT gcoSURF * Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMapFromClient(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMapFromSurface(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_SetMaxLevel(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Levels
++ );
++
++gceSTATUS
++gcoTEXTURE_SetEndianHint(
++ IN gcoTEXTURE Texture,
++ IN gceENDIAN_HINT EndianHint
++ );
++
++gceSTATUS
++gcoTEXTURE_Disable(
++ IN gcoHAL Hal,
++ IN gctINT Sampler
++ );
++
++gceSTATUS
++gcoTEXTURE_Flush(
++ IN gcoTEXTURE Texture
++ );
++
++gceSTATUS
++gcoTEXTURE_QueryCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MaxDepth,
++ OUT gctBOOL * Cubic,
++ OUT gctBOOL * NonPowerOfTwo,
++ OUT gctUINT * VertexSamplers,
++ OUT gctUINT * PixelSamplers
++ );
++
++gceSTATUS
++gcoTEXTURE_GetTiling(
++ IN gcoTEXTURE Texture,
++ IN gctINT preferLevel,
++ OUT gceTILING * Tiling
++ );
++
++gceSTATUS
++gcoTEXTURE_GetClosestFormat(
++ IN gcoHAL Hal,
++ IN gceSURF_FORMAT InFormat,
++ OUT gceSURF_FORMAT* OutFormat
++ );
++
++gceSTATUS
++gcoTEXTURE_RenderIntoMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_IsRenderable(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_IsRenderableEx(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_IsComplete(
++ IN gcoTEXTURE Texture,
++ IN gctINT MaxLevel
++ );
++
++gceSTATUS
++gcoTEXTURE_BindTexture(
++ IN gcoTEXTURE Texture,
++ IN gctINT Target,
++ IN gctINT Sampler,
++ IN gcsTEXTURE_PTR Info
++ );
++
++/******************************************************************************\
++******************************* gcoSTREAM Object ******************************
++\******************************************************************************/
++
++typedef enum _gceVERTEX_FORMAT
++{
++ gcvVERTEX_BYTE,
++ gcvVERTEX_UNSIGNED_BYTE,
++ gcvVERTEX_SHORT,
++ gcvVERTEX_UNSIGNED_SHORT,
++ gcvVERTEX_INT,
++ gcvVERTEX_UNSIGNED_INT,
++ gcvVERTEX_FIXED,
++ gcvVERTEX_HALF,
++ gcvVERTEX_FLOAT,
++ gcvVERTEX_UNSIGNED_INT_10_10_10_2,
++ gcvVERTEX_INT_10_10_10_2,
++}
++gceVERTEX_FORMAT;
++
++gceSTATUS
++gcoSTREAM_Construct(
++ IN gcoHAL Hal,
++ OUT gcoSTREAM * Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Destroy(
++ IN gcoSTREAM Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Upload(
++ IN gcoSTREAM Stream,
++ IN gctCONST_POINTER Buffer,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Dynamic
++ );
++
++gceSTATUS
++gcoSTREAM_SetStride(
++ IN gcoSTREAM Stream,
++ IN gctUINT32 Stride
++ );
++
++gceSTATUS
++gcoSTREAM_Lock(
++ IN gcoSTREAM Stream,
++ OUT gctPOINTER * Logical,
++ OUT gctUINT32 * Physical
++ );
++
++gceSTATUS
++gcoSTREAM_Unlock(
++ IN gcoSTREAM Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Reserve(
++ IN gcoSTREAM Stream,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoSTREAM_Flush(
++ IN gcoSTREAM Stream
++ );
++
++/* Dynamic buffer API. */
++gceSTATUS
++gcoSTREAM_SetDynamic(
++ IN gcoSTREAM Stream,
++ IN gctSIZE_T Bytes,
++ IN gctUINT Buffers
++ );
++
++typedef struct _gcsSTREAM_INFO
++{
++ gctUINT index;
++ gceVERTEX_FORMAT format;
++ gctBOOL normalized;
++ gctUINT components;
++ gctSIZE_T size;
++ gctCONST_POINTER data;
++ gctUINT stride;
++}
++gcsSTREAM_INFO, * gcsSTREAM_INFO_PTR;
++
++gceSTATUS
++gcoSTREAM_UploadDynamic(
++ IN gcoSTREAM Stream,
++ IN gctUINT VertexCount,
++ IN gctUINT InfoCount,
++ IN gcsSTREAM_INFO_PTR Info,
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoSTREAM_CPUCacheOperation(
++ IN gcoSTREAM Stream,
++ IN gceCACHEOPERATION Operation
++ );
++
++/******************************************************************************\
++******************************** gcoVERTEX Object ******************************
++\******************************************************************************/
++
++typedef struct _gcsVERTEX_ATTRIBUTES
++{
++ gceVERTEX_FORMAT format;
++ gctBOOL normalized;
++ gctUINT32 components;
++ gctSIZE_T size;
++ gctUINT32 stream;
++ gctUINT32 offset;
++ gctUINT32 stride;
++}
++gcsVERTEX_ATTRIBUTES;
++
++gceSTATUS
++gcoVERTEX_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVERTEX * Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_Destroy(
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_Reset(
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_EnableAttribute(
++ IN gcoVERTEX Vertex,
++ IN gctUINT32 Index,
++ IN gceVERTEX_FORMAT Format,
++ IN gctBOOL Normalized,
++ IN gctUINT32 Components,
++ IN gcoSTREAM Stream,
++ IN gctUINT32 Offset,
++ IN gctUINT32 Stride
++ );
++
++gceSTATUS
++gcoVERTEX_DisableAttribute(
++ IN gcoVERTEX Vertex,
++ IN gctUINT32 Index
++ );
++
++gceSTATUS
++gcoVERTEX_Bind(
++ IN gcoVERTEX Vertex
++ );
++
++/*******************************************************************************
++***** gcoVERTEXARRAY Object ***************************************************/
++
++typedef struct _gcsVERTEXARRAY
++{
++ /* Enabled. */
++ gctBOOL enable;
++
++ /* Number of components. */
++ gctINT size;
++
++ /* Attribute format. */
++ gceVERTEX_FORMAT format;
++
++ /* Flag whether the attribute is normalized or not. */
++ gctBOOL normalized;
++
++ /* Stride of the component. */
++ gctUINT stride;
++
++ /* Pointer to the attribute data. */
++ gctCONST_POINTER pointer;
++
++ /* Stream object owning the attribute data. */
++ gcoSTREAM stream;
++
++ /* Generic values for attribute. */
++ gctFLOAT genericValue[4];
++
++ /* Generic size for attribute. */
++ gctINT genericSize;
++
++ /* Vertex shader linkage. */
++ gctUINT linkage;
++
++#if gcdUSE_WCLIP_PATCH
++ gctBOOL isPosition;
++#endif
++}
++gcsVERTEXARRAY,
++* gcsVERTEXARRAY_PTR;
++
++gceSTATUS
++gcoVERTEXARRAY_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVERTEXARRAY * Vertex
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Destroy(
++ IN gcoVERTEXARRAY Vertex
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Bind(
++ IN gcoVERTEXARRAY Vertex,
++ IN gctUINT32 EnableBits,
++ IN gcsVERTEXARRAY_PTR VertexArray,
++ IN gctUINT First,
++ IN gctSIZE_T Count,
++ IN gceINDEX_TYPE IndexType,
++ IN gcoINDEX IndexObject,
++ IN gctPOINTER IndexMemory,
++ IN OUT gcePRIMITIVE * PrimitiveType,
++#if gcdUSE_WCLIP_PATCH
++ IN OUT gctUINT * PrimitiveCount,
++ IN OUT gctFLOAT * wLimitRms,
++ IN OUT gctBOOL * wLimitDirty
++#else
++ IN OUT gctUINT * PrimitiveCount
++#endif
++ );
++
++gctUINT
++gcoVERTEXARRAY_GetMaxStream(
++ IN gcoVERTEXARRAY Vertex
++);
++
++gceSTATUS
++gcoVERTEXARRAY_SetMaxStream(
++ IN gcoVERTEXARRAY Vertex,
++ gctUINT maxStreams
++);
++/*******************************************************************************
++***** Composition *************************************************************/
++
++typedef enum _gceCOMPOSITION
++{
++ gcvCOMPOSE_CLEAR = 1,
++ gcvCOMPOSE_BLUR,
++ gcvCOMPOSE_DIM,
++ gcvCOMPOSE_LAYER
++}
++gceCOMPOSITION;
++
++typedef struct _gcsCOMPOSITION * gcsCOMPOSITION_PTR;
++typedef struct _gcsCOMPOSITION
++{
++ /* Structure size. */
++ gctUINT structSize;
++
++ /* Composition operation. */
++ gceCOMPOSITION operation;
++
++ /* Layer to be composed. */
++ gcoSURF layer;
++
++ /* Source and target coordinates. */
++ gcsRECT srcRect;
++ gcsRECT trgRect;
++
++ /* Target rectangle */
++ gcsPOINT v0;
++ gcsPOINT v1;
++ gcsPOINT v2;
++
++ /* Blending parameters. */
++ gctBOOL enableBlending;
++ gctBOOL premultiplied;
++ gctUINT8 alphaValue;
++
++ /* Clear color. */
++ gctFLOAT r;
++ gctFLOAT g;
++ gctFLOAT b;
++ gctFLOAT a;
++}
++gcsCOMPOSITION;
++
++gceSTATUS
++gco3D_ProbeComposition(
++ gctBOOL ResetIfEmpty
++ );
++
++gceSTATUS
++gco3D_CompositionBegin(
++ void
++ );
++
++gceSTATUS
++gco3D_ComposeLayer(
++ IN gcsCOMPOSITION_PTR Layer
++ );
++
++gceSTATUS
++gco3D_CompositionSignals(
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal1,
++ IN gctSIGNAL Signal2
++ );
++
++gceSTATUS
++gco3D_CompositionEnd(
++ IN gcoSURF Target,
++ IN gctBOOL Synchronous
++ );
++
++/* Frame Database */
++gceSTATUS
++gcoHAL_AddFrameDB(
++ void
++ );
++
++gceSTATUS
++gcoHAL_DumpFrameDB(
++ gctCONST_STRING Filename OPTIONAL
++ );
++
++gceSTATUS
++gcoHAL_GetSharedInfo(
++ IN gctUINT32 Pid,
++ IN gctUINT32 DataId,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER Data
++ );
++
++gceSTATUS
++gcoHAL_SetSharedInfo(
++ IN gctUINT32 DataId,
++ IN gctPOINTER Data,
++ IN gctSIZE_T Bytes
++ );
++
++#if VIVANTE_PROFILER_CONTEXT
++gceSTATUS
++gcoHARDWARE_GetContext(
++ IN gcoHARDWARE Hardware,
++ OUT gctUINT32 * Context
++ );
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* VIVANTE_NO_3D */
++#endif /* __gc_hal_engine_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine_vg.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine_vg.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_engine_vg.h 2015-05-01 14:57:59.539427001 -0500
+@@ -0,0 +1,904 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_engine_vg_h_
++#define __gc_hal_engine_vg_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include "gc_hal_types.h"
++
++/******************************************************************************\
++******************************** VG Enumerations *******************************
++\******************************************************************************/
++
++/**
++** @ingroup gcoVG
++**
++** @brief Tiling mode for painting and imagig.
++**
++** This enumeration defines the tiling modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 tile modes.
++*/
++typedef enum _gceTILE_MODE
++{
++ gcvTILE_FILL,
++ gcvTILE_PAD,
++ gcvTILE_REPEAT,
++ gcvTILE_REFLECT
++}
++gceTILE_MODE;
++
++/******************************************************************************/
++/** @ingroup gcoVG
++**
++** @brief The different paint modes.
++**
++** This enumeration lists the available paint modes.
++*/
++typedef enum _gcePAINT_TYPE
++{
++ /** Solid color. */
++ gcvPAINT_MODE_SOLID,
++
++ /** Linear gradient. */
++ gcvPAINT_MODE_LINEAR,
++
++ /** Radial gradient. */
++ gcvPAINT_MODE_RADIAL,
++
++ /** Pattern. */
++ gcvPAINT_MODE_PATTERN,
++
++ /** Mode count. */
++ gcvPAINT_MODE_COUNT
++}
++gcePAINT_TYPE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Types of path data supported by HAL.
++**
++** This enumeration defines the types of path data supported by the HAL.
++** This is in fact a one-to-one mapping of the OpenVG 1.1 path types.
++*/
++typedef enum _gcePATHTYPE
++{
++ gcePATHTYPE_UNKNOWN = -1,
++ gcePATHTYPE_INT8,
++ gcePATHTYPE_INT16,
++ gcePATHTYPE_INT32,
++ gcePATHTYPE_FLOAT
++}
++gcePATHTYPE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Supported path segment commands.
++**
++** This enumeration defines the path segment commands supported by the HAL.
++*/
++typedef enum _gceVGCMD
++{
++ gcvVGCMD_END, /* 0: GCCMD_TS_OPCODE_END */
++ gcvVGCMD_CLOSE, /* 1: GCCMD_TS_OPCODE_CLOSE */
++ gcvVGCMD_MOVE, /* 2: GCCMD_TS_OPCODE_MOVE */
++ gcvVGCMD_MOVE_REL, /* 3: GCCMD_TS_OPCODE_MOVE_REL */
++ gcvVGCMD_LINE, /* 4: GCCMD_TS_OPCODE_LINE */
++ gcvVGCMD_LINE_REL, /* 5: GCCMD_TS_OPCODE_LINE_REL */
++ gcvVGCMD_QUAD, /* 6: GCCMD_TS_OPCODE_QUADRATIC */
++ gcvVGCMD_QUAD_REL, /* 7: GCCMD_TS_OPCODE_QUADRATIC_REL */
++ gcvVGCMD_CUBIC, /* 8: GCCMD_TS_OPCODE_CUBIC */
++ gcvVGCMD_CUBIC_REL, /* 9: GCCMD_TS_OPCODE_CUBIC_REL */
++ gcvVGCMD_BREAK, /* 10: GCCMD_TS_OPCODE_BREAK */
++ gcvVGCMD_HLINE, /* 11: ******* R E S E R V E D *******/
++ gcvVGCMD_HLINE_REL, /* 12: ******* R E S E R V E D *******/
++ gcvVGCMD_VLINE, /* 13: ******* R E S E R V E D *******/
++ gcvVGCMD_VLINE_REL, /* 14: ******* R E S E R V E D *******/
++ gcvVGCMD_SQUAD, /* 15: ******* R E S E R V E D *******/
++ gcvVGCMD_SQUAD_REL, /* 16: ******* R E S E R V E D *******/
++ gcvVGCMD_SCUBIC, /* 17: ******* R E S E R V E D *******/
++ gcvVGCMD_SCUBIC_REL, /* 18: ******* R E S E R V E D *******/
++ gcvVGCMD_SCCWARC, /* 19: ******* R E S E R V E D *******/
++ gcvVGCMD_SCCWARC_REL, /* 20: ******* R E S E R V E D *******/
++ gcvVGCMD_SCWARC, /* 21: ******* R E S E R V E D *******/
++ gcvVGCMD_SCWARC_REL, /* 22: ******* R E S E R V E D *******/
++ gcvVGCMD_LCCWARC, /* 23: ******* R E S E R V E D *******/
++ gcvVGCMD_LCCWARC_REL, /* 24: ******* R E S E R V E D *******/
++ gcvVGCMD_LCWARC, /* 25: ******* R E S E R V E D *******/
++ gcvVGCMD_LCWARC_REL, /* 26: ******* R E S E R V E D *******/
++
++ /* The width of the command recognized by the hardware on bits. */
++ gcvVGCMD_WIDTH = 5,
++
++ /* Hardware command mask. */
++ gcvVGCMD_MASK = (1 << gcvVGCMD_WIDTH) - 1,
++
++ /* Command modifiers. */
++ gcvVGCMD_H_MOD = 1 << gcvVGCMD_WIDTH, /* = 32 */
++ gcvVGCMD_V_MOD = 2 << gcvVGCMD_WIDTH, /* = 64 */
++ gcvVGCMD_S_MOD = 3 << gcvVGCMD_WIDTH, /* = 96 */
++ gcvVGCMD_ARC_MOD = 4 << gcvVGCMD_WIDTH, /* = 128 */
++
++ /* Emulated LINE commands. */
++ gcvVGCMD_HLINE_EMUL = gcvVGCMD_H_MOD | gcvVGCMD_LINE, /* = 36 */
++ gcvVGCMD_HLINE_EMUL_REL = gcvVGCMD_H_MOD | gcvVGCMD_LINE_REL, /* = 37 */
++ gcvVGCMD_VLINE_EMUL = gcvVGCMD_V_MOD | gcvVGCMD_LINE, /* = 68 */
++ gcvVGCMD_VLINE_EMUL_REL = gcvVGCMD_V_MOD | gcvVGCMD_LINE_REL, /* = 69 */
++
++ /* Emulated SMOOTH commands. */
++ gcvVGCMD_SQUAD_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD, /* = 102 */
++ gcvVGCMD_SQUAD_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD_REL, /* = 103 */
++ gcvVGCMD_SCUBIC_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC, /* = 104 */
++ gcvVGCMD_SCUBIC_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC_REL, /* = 105 */
++
++ /* Emulation ARC commands. */
++ gcvVGCMD_ARC_LINE = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE, /* = 132 */
++ gcvVGCMD_ARC_LINE_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE_REL, /* = 133 */
++ gcvVGCMD_ARC_QUAD = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD, /* = 134 */
++ gcvVGCMD_ARC_QUAD_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD_REL /* = 135 */
++}
++gceVGCMD;
++typedef enum _gceVGCMD * gceVGCMD_PTR;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Blending modes supported by the HAL.
++**
++** This enumeration defines the blending modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 blending modes.
++*/
++typedef enum _gceVG_BLEND
++{
++ gcvVG_BLEND_SRC,
++ gcvVG_BLEND_SRC_OVER,
++ gcvVG_BLEND_DST_OVER,
++ gcvVG_BLEND_SRC_IN,
++ gcvVG_BLEND_DST_IN,
++ gcvVG_BLEND_MULTIPLY,
++ gcvVG_BLEND_SCREEN,
++ gcvVG_BLEND_DARKEN,
++ gcvVG_BLEND_LIGHTEN,
++ gcvVG_BLEND_ADDITIVE,
++ gcvVG_BLEND_SUBTRACT,
++ gcvVG_BLEND_FILTER
++}
++gceVG_BLEND;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Image modes supported by the HAL.
++**
++** This enumeration defines the image modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 image modes with the addition
++** of NO IMAGE.
++*/
++typedef enum _gceVG_IMAGE
++{
++ gcvVG_IMAGE_NONE,
++ gcvVG_IMAGE_NORMAL,
++ gcvVG_IMAGE_MULTIPLY,
++ gcvVG_IMAGE_STENCIL,
++ gcvVG_IMAGE_FILTER
++}
++gceVG_IMAGE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Filter mode patterns and imaging.
++**
++** This enumeration defines the filter modes supported by the HAL.
++*/
++typedef enum _gceIMAGE_FILTER
++{
++ gcvFILTER_POINT,
++ gcvFILTER_LINEAR,
++ gcvFILTER_BI_LINEAR
++}
++gceIMAGE_FILTER;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Primitive modes supported by the HAL.
++**
++** This enumeration defines the primitive modes supported by the HAL.
++*/
++typedef enum _gceVG_PRIMITIVE
++{
++ gcvVG_SCANLINE,
++ gcvVG_RECTANGLE,
++ gcvVG_TESSELLATED,
++ gcvVG_TESSELLATED_TILED
++}
++gceVG_PRIMITIVE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Rendering quality modes supported by the HAL.
++**
++** This enumeration defines the rendering quality modes supported by the HAL.
++*/
++typedef enum _gceRENDER_QUALITY
++{
++ gcvVG_NONANTIALIASED,
++ gcvVG_2X2_MSAA,
++ gcvVG_2X4_MSAA,
++ gcvVG_4X4_MSAA
++}
++gceRENDER_QUALITY;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Fill rules supported by the HAL.
++**
++** This enumeration defines the fill rules supported by the HAL.
++*/
++typedef enum _gceFILL_RULE
++{
++ gcvVG_EVEN_ODD,
++ gcvVG_NON_ZERO
++}
++gceFILL_RULE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Cap styles supported by the HAL.
++**
++** This enumeration defines the cap styles supported by the HAL.
++*/
++typedef enum _gceCAP_STYLE
++{
++ gcvCAP_BUTT,
++ gcvCAP_ROUND,
++ gcvCAP_SQUARE
++}
++gceCAP_STYLE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Join styles supported by the HAL.
++**
++** This enumeration defines the join styles supported by the HAL.
++*/
++typedef enum _gceJOIN_STYLE
++{
++ gcvJOIN_MITER,
++ gcvJOIN_ROUND,
++ gcvJOIN_BEVEL
++}
++gceJOIN_STYLE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Channel mask values.
++**
++** This enumeration defines the values for channel mask used in image
++** filtering.
++*/
++
++/* Base values for channel mask definitions. */
++#define gcvCHANNEL_X (0)
++#define gcvCHANNEL_R (1 << 0)
++#define gcvCHANNEL_G (1 << 1)
++#define gcvCHANNEL_B (1 << 2)
++#define gcvCHANNEL_A (1 << 3)
++
++typedef enum _gceCHANNEL
++{
++ gcvCHANNEL_XXXX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_XXXA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_XXBX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_XXBA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_XGXX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_XGXA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_XGBX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_XGBA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_RXXX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_RXXA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_RXBX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_RXBA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_RGXX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_RGXA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_RGBX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_RGBA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A),
++}
++gceCHANNEL;
++
++/******************************************************************************\
++******************************** VG Structures *******************************
++\******************************************************************************/
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the color ramp used by the gradient paints.
++**
++** The gcsCOLOR_RAMP structure defines the layout of one single color inside
++** a color ramp which is used by gradient paints.
++*/
++typedef struct _gcsCOLOR_RAMP
++{
++ /** Value for the color stop. */
++ gctFLOAT stop;
++
++ /** Red color channel value for the color stop. */
++ gctFLOAT red;
++
++ /** Green color channel value for the color stop. */
++ gctFLOAT green;
++
++ /** Blue color channel value for the color stop. */
++ gctFLOAT blue;
++
++ /** Alpha color channel value for the color stop. */
++ gctFLOAT alpha;
++}
++gcsCOLOR_RAMP, * gcsCOLOR_RAMP_PTR;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the color ramp used by the gradient paints in fixed form.
++**
++** The gcsCOLOR_RAMP structure defines the layout of one single color inside
++** a color ramp which is used by gradient paints.
++*/
++typedef struct _gcsFIXED_COLOR_RAMP
++{
++ /** Value for the color stop. */
++ gctFIXED_POINT stop;
++
++ /** Red color channel value for the color stop. */
++ gctFIXED_POINT red;
++
++ /** Green color channel value for the color stop. */
++ gctFIXED_POINT green;
++
++ /** Blue color channel value for the color stop. */
++ gctFIXED_POINT blue;
++
++ /** Alpha color channel value for the color stop. */
++ gctFIXED_POINT alpha;
++}
++gcsFIXED_COLOR_RAMP, * gcsFIXED_COLOR_RAMP_PTR;
++
++
++/**
++** @ingroup gcoVG
++**
++** @brief Rectangle structure used by the gcoVG object.
++**
++** This structure defines the layout of a rectangle. Make sure width and
++** height are larger than 0.
++*/
++typedef struct _gcsVG_RECT * gcsVG_RECT_PTR;
++typedef struct _gcsVG_RECT
++{
++ /** Left location of the rectangle. */
++ gctINT x;
++
++ /** Top location of the rectangle. */
++ gctINT y;
++
++ /** Width of the rectangle. */
++ gctINT width;
++
++ /** Height of the rectangle. */
++ gctINT height;
++}
++gcsVG_RECT;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Path command buffer attribute structure.
++**
++** The gcsPATH_BUFFER_INFO structure contains the specifics about
++** the layout of the path data command buffer.
++*/
++typedef struct _gcsPATH_BUFFER_INFO * gcsPATH_BUFFER_INFO_PTR;
++typedef struct _gcsPATH_BUFFER_INFO
++{
++ gctUINT reservedForHead;
++ gctUINT reservedForTail;
++}
++gcsPATH_BUFFER_INFO;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the path data container structure.
++**
++** The gcsPATH structure defines the layout of the path data container.
++*/
++typedef struct _gcsPATH_DATA * gcsPATH_DATA_PTR;
++typedef struct _gcsPATH_DATA
++{
++ /* Data container in command buffer format. */
++ gcsCMDBUFFER data;
++
++ /* Path data type. */
++ gcePATHTYPE dataType;
++}
++gcsPATH_DATA;
++
++
++/******************************************************************************\
++********************************* gcoHAL Object ********************************
++\******************************************************************************/
++
++/* Query path data storage attributes. */
++gceSTATUS
++gcoHAL_QueryPathStorage(
++ IN gcoHAL Hal,
++ OUT gcsPATH_BUFFER_INFO_PTR Information
++ );
++
++/* Associate a completion signal with the command buffer. */
++gceSTATUS
++gcoHAL_AssociateCompletion(
++ IN gcoHAL Hal,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Release the current command buffer completion signal. */
++gceSTATUS
++gcoHAL_DeassociateCompletion(
++ IN gcoHAL Hal,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Verify whether the command buffer is still in use. */
++gceSTATUS
++gcoHAL_CheckCompletion(
++ IN gcoHAL Hal,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Wait until the command buffer is no longer in use. */
++gceSTATUS
++gcoHAL_WaitCompletion(
++ IN gcoHAL Hal,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Flush the pixel cache. */
++gceSTATUS
++gcoHAL_Flush(
++ IN gcoHAL Hal
++ );
++
++/* Split a harwdare address into pool and offset. */
++gceSTATUS
++gcoHAL_SplitAddress(
++ IN gcoHAL Hal,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Combine pool and offset into a harwdare address. */
++gceSTATUS
++gcoHAL_CombineAddress(
++ IN gcoHAL Hal,
++ IN gcePOOL Pool,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Schedule to free linear video memory allocated. */
++gceSTATUS
++gcoHAL_ScheduleVideoMemory(
++ IN gcoHAL Hal,
++ IN gctUINT64 Node
++ );
++
++/* Free linear video memory allocated with gcoHAL_AllocateLinearVideoMemory. */
++gceSTATUS
++gcoHAL_FreeVideoMemory(
++ IN gcoHAL Hal,
++ IN gctUINT64 Node
++ );
++
++/* Query command buffer attributes. */
++gceSTATUS
++gcoHAL_QueryCommandBuffer(
++ IN gcoHAL Hal,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++/* Allocate and lock linear video memory. */
++gceSTATUS
++gcoHAL_AllocateLinearVideoMemory(
++ IN gcoHAL Hal,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ IN gcePOOL Pool,
++ OUT gctUINT64 * Node,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Align the specified size accordingly to the hardware requirements. */
++gceSTATUS
++gcoHAL_GetAlignedSurfaceSize(
++ IN gcoHAL Hal,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Width,
++ IN OUT gctUINT32_PTR Height
++ );
++
++gceSTATUS
++gcoHAL_ReserveTask(
++ IN gcoHAL Hal,
++ IN gceBLOCK Block,
++ IN gctUINT TaskCount,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++/******************************************************************************\
++********************************** gcoVG Object ********************************
++\******************************************************************************/
++
++/** @defgroup gcoVG gcoVG
++**
++** The gcoVG object abstracts the VG hardware pipe.
++*/
++
++gctBOOL
++gcoVG_IsMaskSupported(
++ IN gceSURF_FORMAT Format
++ );
++
++gctBOOL
++gcoVG_IsTargetSupported(
++ IN gceSURF_FORMAT Format
++ );
++
++gctBOOL
++gcoVG_IsImageSupported(
++ IN gceSURF_FORMAT Format
++ );
++
++gctUINT8 gcoVG_PackColorComponent(
++ gctFLOAT Value
++ );
++
++gceSTATUS
++gcoVG_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVG * Vg
++ );
++
++gceSTATUS
++gcoVG_Destroy(
++ IN gcoVG Vg
++ );
++
++gceSTATUS
++gcoVG_SetTarget(
++ IN gcoVG Vg,
++ IN gcoSURF Target
++ );
++
++gceSTATUS
++gcoVG_UnsetTarget(
++ IN gcoVG Vg,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoVG_SetUserToSurface(
++ IN gcoVG Vg,
++ IN gctFLOAT UserToSurface[9]
++ );
++
++gceSTATUS
++gcoVG_SetSurfaceToImage(
++ IN gcoVG Vg,
++ IN gctFLOAT SurfaceToImage[9]
++ );
++
++gceSTATUS
++gcoVG_EnableMask(
++ IN gcoVG Vg,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetMask(
++ IN gcoVG Vg,
++ IN gcoSURF Mask
++ );
++
++gceSTATUS
++gcoVG_UnsetMask(
++ IN gcoVG Vg,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoVG_FlushMask(
++ IN gcoVG Vg
++ );
++
++gceSTATUS
++gcoVG_EnableScissor(
++ IN gcoVG Vg,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetScissor(
++ IN gcoVG Vg,
++ IN gctSIZE_T RectangleCount,
++ IN gcsVG_RECT_PTR Rectangles
++ );
++
++gceSTATUS
++gcoVG_EnableColorTransform(
++ IN gcoVG Vg,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetColorTransform(
++ IN gcoVG Vg,
++ IN gctFLOAT ColorTransform[8]
++ );
++
++gceSTATUS
++gcoVG_SetTileFillColor(
++ IN gcoVG Vg,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++gceSTATUS
++gcoVG_SetSolidPaint(
++ IN gcoVG Vg,
++ IN gctUINT8 Red,
++ IN gctUINT8 Green,
++ IN gctUINT8 Blue,
++ IN gctUINT8 Alpha
++ );
++
++gceSTATUS
++gcoVG_SetLinearPaint(
++ IN gcoVG Vg,
++ IN gctFLOAT Constant,
++ IN gctFLOAT StepX,
++ IN gctFLOAT StepY
++ );
++
++gceSTATUS
++gcoVG_SetRadialPaint(
++ IN gcoVG Vg,
++ IN gctFLOAT LinConstant,
++ IN gctFLOAT LinStepX,
++ IN gctFLOAT LinStepY,
++ IN gctFLOAT RadConstant,
++ IN gctFLOAT RadStepX,
++ IN gctFLOAT RadStepY,
++ IN gctFLOAT RadStepXX,
++ IN gctFLOAT RadStepYY,
++ IN gctFLOAT RadStepXY
++ );
++
++gceSTATUS
++gcoVG_SetPatternPaint(
++ IN gcoVG Vg,
++ IN gctFLOAT UConstant,
++ IN gctFLOAT UStepX,
++ IN gctFLOAT UStepY,
++ IN gctFLOAT VConstant,
++ IN gctFLOAT VStepX,
++ IN gctFLOAT VStepY,
++ IN gctBOOL Linear
++ );
++
++gceSTATUS
++gcoVG_SetColorRamp(
++ IN gcoVG Vg,
++ IN gcoSURF ColorRamp,
++ IN gceTILE_MODE ColorRampSpreadMode
++ );
++
++gceSTATUS
++gcoVG_SetPattern(
++ IN gcoVG Vg,
++ IN gcoSURF Pattern,
++ IN gceTILE_MODE TileMode,
++ IN gceIMAGE_FILTER Filter
++ );
++
++gceSTATUS
++gcoVG_SetImageMode(
++ IN gcoVG Vg,
++ IN gceVG_IMAGE Mode
++ );
++
++gceSTATUS
++gcoVG_SetBlendMode(
++ IN gcoVG Vg,
++ IN gceVG_BLEND Mode
++ );
++
++gceSTATUS
++gcoVG_SetRenderingQuality(
++ IN gcoVG Vg,
++ IN gceRENDER_QUALITY Quality
++ );
++
++gceSTATUS
++gcoVG_SetFillRule(
++ IN gcoVG Vg,
++ IN gceFILL_RULE FillRule
++ );
++
++gceSTATUS
++gcoVG_FinalizePath(
++ IN gcoVG Vg,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++gceSTATUS
++gcoVG_Clear(
++ IN gcoVG Vg,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_DrawPath(
++ IN gcoVG Vg,
++ IN gcsPATH_DATA_PTR PathData,
++ IN gctFLOAT Scale,
++ IN gctFLOAT Bias,
++ IN gctBOOL SoftwareTesselation
++ );
++
++gceSTATUS
++gcoVG_DrawImage(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT SourceX,
++ IN gctINT SourceY,
++ IN gctINT TargetX,
++ IN gctINT TargetY,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctBOOL Mask
++ );
++
++gceSTATUS
++gcoVG_TesselateImage(
++ IN gcoVG Vg,
++ IN gcoSURF Image,
++ IN gcsVG_RECT_PTR Rectangle,
++ IN gceIMAGE_FILTER Filter,
++ IN gctBOOL Mask,
++ IN gctBOOL SoftwareTesselation
++ );
++
++gceSTATUS
++gcoVG_Blit(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gcsVG_RECT_PTR SrcRect,
++ IN gcsVG_RECT_PTR TrgRect,
++ IN gceIMAGE_FILTER Filter,
++ IN gceVG_BLEND Mode
++ );
++
++gceSTATUS
++gcoVG_ColorMatrix(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN const gctFLOAT * Matrix,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_SeparableConvolve(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctINT KernelWidth,
++ IN gctINT KernelHeight,
++ IN gctINT ShiftX,
++ IN gctINT ShiftY,
++ IN const gctINT16 * KernelX,
++ IN const gctINT16 * KernelY,
++ IN gctFLOAT Scale,
++ IN gctFLOAT Bias,
++ IN gceTILE_MODE TilingMode,
++ IN gctFLOAT_PTR FillColor,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_GaussianBlur(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctFLOAT StdDeviationX,
++ IN gctFLOAT StdDeviationY,
++ IN gceTILE_MODE TilingMode,
++ IN gctFLOAT_PTR FillColor,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_EnableDither(
++ IN gcoVG Vg,
++ IN gctBOOL Enable
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_vg_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_enum.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_enum.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_enum.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_enum.h 2015-05-01 14:57:59.539427001 -0500
+@@ -0,0 +1,965 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_enum_h_
++#define __gc_hal_enum_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Chip models. */
++typedef enum _gceCHIPMODEL
++{
++ gcv300 = 0x0300,
++ gcv320 = 0x0320,
++ gcv350 = 0x0350,
++ gcv355 = 0x0355,
++ gcv400 = 0x0400,
++ gcv410 = 0x0410,
++ gcv420 = 0x0420,
++ gcv450 = 0x0450,
++ gcv500 = 0x0500,
++ gcv530 = 0x0530,
++ gcv600 = 0x0600,
++ gcv700 = 0x0700,
++ gcv800 = 0x0800,
++ gcv860 = 0x0860,
++ gcv880 = 0x0880,
++ gcv1000 = 0x1000,
++ gcv2000 = 0x2000,
++ gcv2100 = 0x2100,
++ gcv4000 = 0x4000,
++}
++gceCHIPMODEL;
++
++/* Chip features. */
++typedef enum _gceFEATURE
++{
++ gcvFEATURE_PIPE_2D = 0,
++ gcvFEATURE_PIPE_3D,
++ gcvFEATURE_PIPE_VG,
++ gcvFEATURE_DC,
++ gcvFEATURE_HIGH_DYNAMIC_RANGE,
++ gcvFEATURE_MODULE_CG,
++ gcvFEATURE_MIN_AREA,
++ gcvFEATURE_BUFFER_INTERLEAVING,
++ gcvFEATURE_BYTE_WRITE_2D,
++ gcvFEATURE_ENDIANNESS_CONFIG,
++ gcvFEATURE_DUAL_RETURN_BUS,
++ gcvFEATURE_DEBUG_MODE,
++ gcvFEATURE_YUY2_RENDER_TARGET,
++ gcvFEATURE_FRAGMENT_PROCESSOR,
++ gcvFEATURE_2DPE20,
++ gcvFEATURE_FAST_CLEAR,
++ gcvFEATURE_YUV420_TILER,
++ gcvFEATURE_YUY2_AVERAGING,
++ gcvFEATURE_FLIP_Y,
++ gcvFEATURE_EARLY_Z,
++ gcvFEATURE_Z_COMPRESSION,
++ gcvFEATURE_MSAA,
++ gcvFEATURE_SPECIAL_ANTI_ALIASING,
++ gcvFEATURE_SPECIAL_MSAA_LOD,
++ gcvFEATURE_422_TEXTURE_COMPRESSION,
++ gcvFEATURE_DXT_TEXTURE_COMPRESSION,
++ gcvFEATURE_ETC1_TEXTURE_COMPRESSION,
++ gcvFEATURE_CORRECT_TEXTURE_CONVERTER,
++ gcvFEATURE_TEXTURE_8K,
++ gcvFEATURE_SCALER,
++ gcvFEATURE_YUV420_SCALER,
++ gcvFEATURE_SHADER_HAS_W,
++ gcvFEATURE_SHADER_HAS_SIGN,
++ gcvFEATURE_SHADER_HAS_FLOOR,
++ gcvFEATURE_SHADER_HAS_CEIL,
++ gcvFEATURE_SHADER_HAS_SQRT,
++ gcvFEATURE_SHADER_HAS_TRIG,
++ gcvFEATURE_VAA,
++ gcvFEATURE_HZ,
++ gcvFEATURE_CORRECT_STENCIL,
++ gcvFEATURE_VG20,
++ gcvFEATURE_VG_FILTER,
++ gcvFEATURE_VG21,
++ gcvFEATURE_VG_DOUBLE_BUFFER,
++ gcvFEATURE_MC20,
++ gcvFEATURE_SUPER_TILED,
++ gcvFEATURE_2D_FILTERBLIT_PLUS_ALPHABLEND,
++ gcvFEATURE_2D_DITHER,
++ gcvFEATURE_2D_A8_TARGET,
++ gcvFEATURE_2D_FILTERBLIT_FULLROTATION,
++ gcvFEATURE_2D_BITBLIT_FULLROTATION,
++ gcvFEATURE_WIDE_LINE,
++ gcvFEATURE_FC_FLUSH_STALL,
++ gcvFEATURE_FULL_DIRECTFB,
++ gcvFEATURE_HALF_FLOAT_PIPE,
++ gcvFEATURE_LINE_LOOP,
++ gcvFEATURE_2D_YUV_BLIT,
++ gcvFEATURE_2D_TILING,
++ gcvFEATURE_NON_POWER_OF_TWO,
++ gcvFEATURE_3D_TEXTURE,
++ gcvFEATURE_TEXTURE_ARRAY,
++ gcvFEATURE_TILE_FILLER,
++ gcvFEATURE_LOGIC_OP,
++ gcvFEATURE_COMPOSITION,
++ gcvFEATURE_MIXED_STREAMS,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT,
++ gcvFEATURE_END_EVENT,
++ gcvFEATURE_VERTEX_10_10_10_2,
++ gcvFEATURE_TEXTURE_10_10_10_2,
++ gcvFEATURE_TEXTURE_ANISOTROPIC_FILTERING,
++ gcvFEATURE_TEXTURE_FLOAT_HALF_FLOAT,
++ gcvFEATURE_2D_ROTATION_STALL_FIX,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT_EX,
++ gcvFEATURE_BUG_FIXES10,
++ gcvFEATURE_2D_MINOR_TILING,
++ /* Supertiled compressed textures are supported. */
++ gcvFEATURE_TEX_COMPRRESSION_SUPERTILED,
++ gcvFEATURE_FAST_MSAA,
++ gcvFEATURE_BUG_FIXED_INDEXED_TRIANGLE_STRIP,
++ gcvFEATURE_TEXTURE_TILED_READ,
++ gcvFEATURE_DEPTH_BIAS_FIX,
++ gcvFEATURE_RECT_PRIMITIVE,
++ gcvFEATURE_BUG_FIXES11,
++ gcvFEATURE_SUPERTILED_TEXTURE,
++ gcvFEATURE_2D_NO_COLORBRUSH_INDEX8,
++ gcvFEATURE_RS_YUV_TARGET,
++ gcvFEATURE_2D_FC_SOURCE,
++ gcvFEATURE_PE_DITHER_FIX,
++ gcvFEATURE_2D_YUV_SEPARATE_STRIDE,
++ gcvFEATURE_FRUSTUM_CLIP_FIX,
++ gcvFEATURE_TEXTURE_LINEAR,
++ gcvFEATURE_TEXTURE_YUV_ASSEMBLER,
++ gcvFEATURE_SHADER_HAS_INSTRUCTION_CACHE,
++ gcvFEATURE_DYNAMIC_FREQUENCY_SCALING,
++ gcvFEATURE_BUGFIX15,
++ gcvFEATURE_2D_GAMMA,
++ gcvFEATURE_2D_COLOR_SPACE_CONVERSION,
++ gcvFEATURE_2D_SUPER_TILE_VERSION,
++ gcvFEATURE_2D_MIRROR_EXTENSION,
++ gcvFEATURE_2D_SUPER_TILE_V1,
++ gcvFEATURE_2D_SUPER_TILE_V2,
++ gcvFEATURE_2D_SUPER_TILE_V3,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT_EX2,
++ gcvFEATURE_ELEMENT_INDEX_UINT,
++ gcvFEATURE_2D_COMPRESSION,
++ gcvFEATURE_2D_OPF_YUV_OUTPUT,
++ gcvFEATURE_2D_MULTI_SRC_BLT_TO_UNIFIED_DST_RECT,
++ gcvFEATURE_2D_YUV_MODE,
++ gcvFEATURE_DECOMPRESS_Z16,
++ gcvFEATURE_LINEAR_RENDER_TARGET,
++ gcvFEATURE_BUG_FIXES8,
++ gcvFEATURE_HALTI2,
++ gcvFEATURE_MMU,
++}
++gceFEATURE;
++
++/* Chip Power Status. */
++typedef enum _gceCHIPPOWERSTATE
++{
++ gcvPOWER_ON = 0,
++ gcvPOWER_OFF,
++ gcvPOWER_IDLE,
++ gcvPOWER_SUSPEND,
++ gcvPOWER_SUSPEND_ATPOWERON,
++ gcvPOWER_OFF_ATPOWERON,
++ gcvPOWER_IDLE_BROADCAST,
++ gcvPOWER_SUSPEND_BROADCAST,
++ gcvPOWER_OFF_BROADCAST,
++ gcvPOWER_OFF_RECOVERY,
++ gcvPOWER_OFF_TIMEOUT,
++ gcvPOWER_ON_AUTO
++}
++gceCHIPPOWERSTATE;
++
++/* CPU cache operations */
++typedef enum _gceCACHEOPERATION
++{
++ gcvCACHE_CLEAN = 0x01,
++ gcvCACHE_INVALIDATE = 0x02,
++ gcvCACHE_FLUSH = gcvCACHE_CLEAN | gcvCACHE_INVALIDATE,
++ gcvCACHE_MEMORY_BARRIER = 0x04
++}
++gceCACHEOPERATION;
++
++/* Surface types. */
++typedef enum _gceSURF_TYPE
++{
++ gcvSURF_TYPE_UNKNOWN = 0,
++ gcvSURF_INDEX,
++ gcvSURF_VERTEX,
++ gcvSURF_TEXTURE,
++ gcvSURF_RENDER_TARGET,
++ gcvSURF_DEPTH,
++ gcvSURF_BITMAP,
++ gcvSURF_TILE_STATUS,
++ gcvSURF_IMAGE,
++ gcvSURF_MASK,
++ gcvSURF_SCISSOR,
++ gcvSURF_HIERARCHICAL_DEPTH,
++ gcvSURF_NUM_TYPES, /* Make sure this is the last one! */
++
++ /* Combinations. */
++ gcvSURF_NO_TILE_STATUS = 0x100,
++ gcvSURF_NO_VIDMEM = 0x200, /* Used to allocate surfaces with no underlying vidmem node.
++ In Android, vidmem node is allocated by another process. */
++ gcvSURF_CACHEABLE = 0x400, /* Used to allocate a cacheable surface */
++ gcvSURF_FLIP = 0x800, /* The Resolve Target the will been flip resolve from RT */
++ gcvSURF_TILE_STATUS_DIRTY = 0x1000, /* Init tile status to all dirty */
++
++ gcvSURF_LINEAR = 0x2000,
++ gcvSURF_VG = 0x4000,
++
++ gcvSURF_TEXTURE_LINEAR = gcvSURF_TEXTURE
++ | gcvSURF_LINEAR,
++
++ gcvSURF_RENDER_TARGET_NO_TILE_STATUS = gcvSURF_RENDER_TARGET
++ | gcvSURF_NO_TILE_STATUS,
++
++ gcvSURF_RENDER_TARGET_TS_DIRTY = gcvSURF_RENDER_TARGET
++ | gcvSURF_TILE_STATUS_DIRTY,
++
++ gcvSURF_DEPTH_NO_TILE_STATUS = gcvSURF_DEPTH
++ | gcvSURF_NO_TILE_STATUS,
++
++ gcvSURF_DEPTH_TS_DIRTY = gcvSURF_DEPTH
++ | gcvSURF_TILE_STATUS_DIRTY,
++
++ /* Supported surface types with no vidmem node. */
++ gcvSURF_BITMAP_NO_VIDMEM = gcvSURF_BITMAP
++ | gcvSURF_NO_VIDMEM,
++
++ gcvSURF_TEXTURE_NO_VIDMEM = gcvSURF_TEXTURE
++ | gcvSURF_NO_VIDMEM,
++
++ /* Cacheable surface types with no vidmem node. */
++ gcvSURF_CACHEABLE_BITMAP_NO_VIDMEM = gcvSURF_BITMAP_NO_VIDMEM
++ | gcvSURF_CACHEABLE,
++
++ gcvSURF_CACHEABLE_BITMAP = gcvSURF_BITMAP
++ | gcvSURF_CACHEABLE,
++
++ gcvSURF_FLIP_BITMAP = gcvSURF_BITMAP
++ | gcvSURF_FLIP,
++}
++gceSURF_TYPE;
++
++typedef enum _gceSURF_USAGE
++{
++ gcvSURF_USAGE_UNKNOWN,
++ gcvSURF_USAGE_RESOLVE_AFTER_CPU,
++ gcvSURF_USAGE_RESOLVE_AFTER_3D
++}
++gceSURF_USAGE;
++
++typedef enum _gceSURF_COLOR_TYPE
++{
++ gcvSURF_COLOR_UNKNOWN = 0,
++ gcvSURF_COLOR_LINEAR = 0x01,
++ gcvSURF_COLOR_ALPHA_PRE = 0x02,
++}
++gceSURF_COLOR_TYPE;
++
++/* Rotation. */
++typedef enum _gceSURF_ROTATION
++{
++ gcvSURF_0_DEGREE = 0,
++ gcvSURF_90_DEGREE,
++ gcvSURF_180_DEGREE,
++ gcvSURF_270_DEGREE,
++ gcvSURF_FLIP_X,
++ gcvSURF_FLIP_Y,
++
++ gcvSURF_POST_FLIP_X = 0x40000000,
++ gcvSURF_POST_FLIP_Y = 0x80000000,
++}
++gceSURF_ROTATION;
++
++typedef enum _gceMIPMAP_IMAGE_FORMAT
++{
++ gcvUNKNOWN_MIPMAP_IMAGE_FORMAT = -2
++}
++gceMIPMAP_IMAGE_FORMAT;
++
++
++/* Surface formats. */
++typedef enum _gceSURF_FORMAT
++{
++ /* Unknown format. */
++ gcvSURF_UNKNOWN = 0,
++
++ /* Palettized formats. */
++ gcvSURF_INDEX1 = 100,
++ gcvSURF_INDEX4,
++ gcvSURF_INDEX8,
++
++ /* RGB formats. */
++ gcvSURF_A2R2G2B2 = 200,
++ gcvSURF_R3G3B2,
++ gcvSURF_A8R3G3B2,
++ gcvSURF_X4R4G4B4,
++ gcvSURF_A4R4G4B4,
++ gcvSURF_R4G4B4A4,
++ gcvSURF_X1R5G5B5,
++ gcvSURF_A1R5G5B5,
++ gcvSURF_R5G5B5A1,
++ gcvSURF_R5G6B5,
++ gcvSURF_R8G8B8,
++ gcvSURF_X8R8G8B8,
++ gcvSURF_A8R8G8B8,
++ gcvSURF_R8G8B8A8,
++ gcvSURF_G8R8G8B8,
++ gcvSURF_R8G8B8G8,
++ gcvSURF_X2R10G10B10,
++ gcvSURF_A2R10G10B10,
++ gcvSURF_X12R12G12B12,
++ gcvSURF_A12R12G12B12,
++ gcvSURF_X16R16G16B16,
++ gcvSURF_A16R16G16B16,
++ gcvSURF_A32R32G32B32,
++ gcvSURF_R8G8B8X8,
++ gcvSURF_R5G5B5X1,
++ gcvSURF_R4G4B4X4,
++
++ /* BGR formats. */
++ gcvSURF_A4B4G4R4 = 300,
++ gcvSURF_A1B5G5R5,
++ gcvSURF_B5G6R5,
++ gcvSURF_B8G8R8,
++ gcvSURF_B16G16R16,
++ gcvSURF_X8B8G8R8,
++ gcvSURF_A8B8G8R8,
++ gcvSURF_A2B10G10R10,
++ gcvSURF_X16B16G16R16,
++ gcvSURF_A16B16G16R16,
++ gcvSURF_B32G32R32,
++ gcvSURF_X32B32G32R32,
++ gcvSURF_A32B32G32R32,
++ gcvSURF_B4G4R4A4,
++ gcvSURF_B5G5R5A1,
++ gcvSURF_B8G8R8X8,
++ gcvSURF_B8G8R8A8,
++ gcvSURF_X4B4G4R4,
++ gcvSURF_X1B5G5R5,
++ gcvSURF_B4G4R4X4,
++ gcvSURF_B5G5R5X1,
++ gcvSURF_X2B10G10R10,
++
++ /* Compressed formats. */
++ gcvSURF_DXT1 = 400,
++ gcvSURF_DXT2,
++ gcvSURF_DXT3,
++ gcvSURF_DXT4,
++ gcvSURF_DXT5,
++ gcvSURF_CXV8U8,
++ gcvSURF_ETC1,
++ gcvSURF_R11_EAC,
++ gcvSURF_SIGNED_R11_EAC,
++ gcvSURF_RG11_EAC,
++ gcvSURF_SIGNED_RG11_EAC,
++ gcvSURF_RGB8_ETC2,
++ gcvSURF_SRGB8_ETC2,
++ gcvSURF_RGB8_PUNCHTHROUGH_ALPHA1_ETC2,
++ gcvSURF_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2,
++ gcvSURF_RGBA8_ETC2_EAC,
++ gcvSURF_SRGB8_ALPHA8_ETC2_EAC,
++
++ /* YUV formats. */
++ gcvSURF_YUY2 = 500,
++ gcvSURF_UYVY,
++ gcvSURF_YV12,
++ gcvSURF_I420,
++ gcvSURF_NV12,
++ gcvSURF_NV21,
++ gcvSURF_NV16,
++ gcvSURF_NV61,
++ gcvSURF_YVYU,
++ gcvSURF_VYUY,
++
++ /* Depth formats. */
++ gcvSURF_D16 = 600,
++ gcvSURF_D24S8,
++ gcvSURF_D32,
++ gcvSURF_D24X8,
++
++ /* Alpha formats. */
++ gcvSURF_A4 = 700,
++ gcvSURF_A8,
++ gcvSURF_A12,
++ gcvSURF_A16,
++ gcvSURF_A32,
++ gcvSURF_A1,
++
++ /* Luminance formats. */
++ gcvSURF_L4 = 800,
++ gcvSURF_L8,
++ gcvSURF_L12,
++ gcvSURF_L16,
++ gcvSURF_L32,
++ gcvSURF_L1,
++
++ /* Alpha/Luminance formats. */
++ gcvSURF_A4L4 = 900,
++ gcvSURF_A2L6,
++ gcvSURF_A8L8,
++ gcvSURF_A4L12,
++ gcvSURF_A12L12,
++ gcvSURF_A16L16,
++
++ /* Bump formats. */
++ gcvSURF_L6V5U5 = 1000,
++ gcvSURF_V8U8,
++ gcvSURF_X8L8V8U8,
++ gcvSURF_Q8W8V8U8,
++ gcvSURF_A2W10V10U10,
++ gcvSURF_V16U16,
++ gcvSURF_Q16W16V16U16,
++
++ /* R/RG/RA formats. */
++ gcvSURF_R8 = 1100,
++ gcvSURF_X8R8,
++ gcvSURF_G8R8,
++ gcvSURF_X8G8R8,
++ gcvSURF_A8R8,
++ gcvSURF_R16,
++ gcvSURF_X16R16,
++ gcvSURF_G16R16,
++ gcvSURF_X16G16R16,
++ gcvSURF_A16R16,
++ gcvSURF_R32,
++ gcvSURF_X32R32,
++ gcvSURF_G32R32,
++ gcvSURF_X32G32R32,
++ gcvSURF_A32R32,
++ gcvSURF_RG16,
++
++ /* Floating point formats. */
++ gcvSURF_R16F = 1200,
++ gcvSURF_X16R16F,
++ gcvSURF_G16R16F,
++ gcvSURF_X16G16R16F,
++ gcvSURF_B16G16R16F,
++ gcvSURF_X16B16G16R16F,
++ gcvSURF_A16B16G16R16F,
++ gcvSURF_R32F,
++ gcvSURF_X32R32F,
++ gcvSURF_G32R32F,
++ gcvSURF_X32G32R32F,
++ gcvSURF_B32G32R32F,
++ gcvSURF_X32B32G32R32F,
++ gcvSURF_A32B32G32R32F,
++ gcvSURF_A16F,
++ gcvSURF_L16F,
++ gcvSURF_A16L16F,
++ gcvSURF_A16R16F,
++ gcvSURF_A32F,
++ gcvSURF_L32F,
++ gcvSURF_A32L32F,
++ gcvSURF_A32R32F,
++
++}
++gceSURF_FORMAT;
++
++/* Pixel swizzle modes. */
++typedef enum _gceSURF_SWIZZLE
++{
++ gcvSURF_NOSWIZZLE = 0,
++ gcvSURF_ARGB,
++ gcvSURF_ABGR,
++ gcvSURF_RGBA,
++ gcvSURF_BGRA
++}
++gceSURF_SWIZZLE;
++
++/* Transparency modes. */
++typedef enum _gceSURF_TRANSPARENCY
++{
++ /* Valid only for PE 1.0 */
++ gcvSURF_OPAQUE = 0,
++ gcvSURF_SOURCE_MATCH,
++ gcvSURF_SOURCE_MASK,
++ gcvSURF_PATTERN_MASK,
++}
++gceSURF_TRANSPARENCY;
++
++/* Surface Alignment. */
++typedef enum _gceSURF_ALIGNMENT
++{
++ gcvSURF_FOUR = 0,
++ gcvSURF_SIXTEEN,
++ gcvSURF_SUPER_TILED,
++ gcvSURF_SPLIT_TILED,
++ gcvSURF_SPLIT_SUPER_TILED,
++}
++gceSURF_ALIGNMENT;
++
++
++/* Surface Addressing. */
++typedef enum _gceSURF_ADDRESSING
++{
++ gcvSURF_NO_STRIDE_TILED = 0,
++ gcvSURF_NO_STRIDE_LINEAR,
++ gcvSURF_STRIDE_TILED,
++ gcvSURF_STRIDE_LINEAR
++}
++gceSURF_ADDRESSING;
++
++/* Transparency modes. */
++typedef enum _gce2D_TRANSPARENCY
++{
++ /* Valid only for PE 2.0 */
++ gcv2D_OPAQUE = 0,
++ gcv2D_KEYED,
++ gcv2D_MASKED
++}
++gce2D_TRANSPARENCY;
++
++/* Mono packing modes. */
++typedef enum _gceSURF_MONOPACK
++{
++ gcvSURF_PACKED8 = 0,
++ gcvSURF_PACKED16,
++ gcvSURF_PACKED32,
++ gcvSURF_UNPACKED,
++}
++gceSURF_MONOPACK;
++
++/* Blending modes. */
++typedef enum _gceSURF_BLEND_MODE
++{
++ /* Porter-Duff blending modes. */
++ /* Fsrc Fdst */
++ gcvBLEND_CLEAR = 0, /* 0 0 */
++ gcvBLEND_SRC, /* 1 0 */
++ gcvBLEND_DST, /* 0 1 */
++ gcvBLEND_SRC_OVER_DST, /* 1 1 - Asrc */
++ gcvBLEND_DST_OVER_SRC, /* 1 - Adst 1 */
++ gcvBLEND_SRC_IN_DST, /* Adst 0 */
++ gcvBLEND_DST_IN_SRC, /* 0 Asrc */
++ gcvBLEND_SRC_OUT_DST, /* 1 - Adst 0 */
++ gcvBLEND_DST_OUT_SRC, /* 0 1 - Asrc */
++ gcvBLEND_SRC_ATOP_DST, /* Adst 1 - Asrc */
++ gcvBLEND_DST_ATOP_SRC, /* 1 - Adst Asrc */
++ gcvBLEND_SRC_XOR_DST, /* 1 - Adst 1 - Asrc */
++
++ /* Special blending modes. */
++ gcvBLEND_SET, /* DST = 1 */
++ gcvBLEND_SUB /* DST = DST * (1 - SRC) */
++}
++gceSURF_BLEND_MODE;
++
++/* Per-pixel alpha modes. */
++typedef enum _gceSURF_PIXEL_ALPHA_MODE
++{
++ gcvSURF_PIXEL_ALPHA_STRAIGHT = 0,
++ gcvSURF_PIXEL_ALPHA_INVERSED
++}
++gceSURF_PIXEL_ALPHA_MODE;
++
++/* Global alpha modes. */
++typedef enum _gceSURF_GLOBAL_ALPHA_MODE
++{
++ gcvSURF_GLOBAL_ALPHA_OFF = 0,
++ gcvSURF_GLOBAL_ALPHA_ON,
++ gcvSURF_GLOBAL_ALPHA_SCALE
++}
++gceSURF_GLOBAL_ALPHA_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gceSURF_PIXEL_COLOR_MODE
++{
++ gcvSURF_COLOR_STRAIGHT = 0,
++ gcvSURF_COLOR_MULTIPLY
++}
++gceSURF_PIXEL_COLOR_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gce2D_PIXEL_COLOR_MULTIPLY_MODE
++{
++ gcv2D_COLOR_MULTIPLY_DISABLE = 0,
++ gcv2D_COLOR_MULTIPLY_ENABLE
++}
++gce2D_PIXEL_COLOR_MULTIPLY_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gce2D_GLOBAL_COLOR_MULTIPLY_MODE
++{
++ gcv2D_GLOBAL_COLOR_MULTIPLY_DISABLE = 0,
++ gcv2D_GLOBAL_COLOR_MULTIPLY_ALPHA,
++ gcv2D_GLOBAL_COLOR_MULTIPLY_COLOR
++}
++gce2D_GLOBAL_COLOR_MULTIPLY_MODE;
++
++/* Alpha blending factor modes. */
++typedef enum _gceSURF_BLEND_FACTOR_MODE
++{
++ gcvSURF_BLEND_ZERO = 0,
++ gcvSURF_BLEND_ONE,
++ gcvSURF_BLEND_STRAIGHT,
++ gcvSURF_BLEND_INVERSED,
++ gcvSURF_BLEND_COLOR,
++ gcvSURF_BLEND_COLOR_INVERSED,
++ gcvSURF_BLEND_SRC_ALPHA_SATURATED,
++ gcvSURF_BLEND_STRAIGHT_NO_CROSS,
++ gcvSURF_BLEND_INVERSED_NO_CROSS,
++ gcvSURF_BLEND_COLOR_NO_CROSS,
++ gcvSURF_BLEND_COLOR_INVERSED_NO_CROSS,
++ gcvSURF_BLEND_SRC_ALPHA_SATURATED_CROSS
++}
++gceSURF_BLEND_FACTOR_MODE;
++
++/* Alpha blending porter duff rules. */
++typedef enum _gce2D_PORTER_DUFF_RULE
++{
++ gcvPD_CLEAR = 0,
++ gcvPD_SRC,
++ gcvPD_SRC_OVER,
++ gcvPD_DST_OVER,
++ gcvPD_SRC_IN,
++ gcvPD_DST_IN,
++ gcvPD_SRC_OUT,
++ gcvPD_DST_OUT,
++ gcvPD_SRC_ATOP,
++ gcvPD_DST_ATOP,
++ gcvPD_ADD,
++ gcvPD_XOR,
++ gcvPD_DST
++}
++gce2D_PORTER_DUFF_RULE;
++
++/* Alpha blending factor modes. */
++typedef enum _gce2D_YUV_COLOR_MODE
++{
++ gcv2D_YUV_601= 0,
++ gcv2D_YUV_709,
++ gcv2D_YUV_USER_DEFINED,
++ gcv2D_YUV_USER_DEFINED_CLAMP,
++
++ /* Default setting is for src. gcv2D_YUV_DST
++ can be ORed to set dst.
++ */
++ gcv2D_YUV_DST = 0x80000000,
++}
++gce2D_YUV_COLOR_MODE;
++
++typedef enum _gce2D_COMMAND
++{
++ gcv2D_CLEAR = 0,
++ gcv2D_LINE,
++ gcv2D_BLT,
++ gcv2D_STRETCH,
++ gcv2D_HOR_FILTER,
++ gcv2D_VER_FILTER,
++ gcv2D_MULTI_SOURCE_BLT,
++}
++gce2D_COMMAND;
++
++typedef enum _gce2D_TILE_STATUS_CONFIG
++{
++ gcv2D_TSC_DISABLE = 0,
++ gcv2D_TSC_ENABLE = 0x00000001,
++ gcv2D_TSC_COMPRESSED = 0x00000002,
++ gcv2D_TSC_DOWN_SAMPLER = 0x00000004,
++ gcv2D_TSC_2D_COMPRESSED = 0x00000008,
++}
++gce2D_TILE_STATUS_CONFIG;
++
++typedef enum _gce2D_QUERY
++{
++ gcv2D_QUERY_RGB_ADDRESS_MIN_ALIGN = 0,
++ gcv2D_QUERY_RGB_STRIDE_MIN_ALIGN,
++ gcv2D_QUERY_YUV_ADDRESS_MIN_ALIGN,
++ gcv2D_QUERY_YUV_STRIDE_MIN_ALIGN,
++}
++gce2D_QUERY;
++
++typedef enum _gce2D_SUPER_TILE_VERSION
++{
++ gcv2D_SUPER_TILE_VERSION_V1 = 1,
++ gcv2D_SUPER_TILE_VERSION_V2 = 2,
++ gcv2D_SUPER_TILE_VERSION_V3 = 3,
++}
++gce2D_SUPER_TILE_VERSION;
++
++typedef enum _gce2D_STATE
++{
++ gcv2D_STATE_SPECIAL_FILTER_MIRROR_MODE = 1,
++ gcv2D_STATE_SUPER_TILE_VERSION,
++ gcv2D_STATE_EN_GAMMA,
++ gcv2D_STATE_DE_GAMMA,
++ gcv2D_STATE_MULTI_SRC_BLIT_UNIFIED_DST_RECT,
++ gcv2D_STATE_XRGB_ENABLE,
++
++ gcv2D_STATE_ARRAY_EN_GAMMA = 0x10001,
++ gcv2D_STATE_ARRAY_DE_GAMMA,
++ gcv2D_STATE_ARRAY_CSC_YUV_TO_RGB,
++ gcv2D_STATE_ARRAY_CSC_RGB_TO_YUV,
++}
++gce2D_STATE;
++
++#ifndef VIVANTE_NO_3D
++/* Texture functions. */
++typedef enum _gceTEXTURE_FUNCTION
++{
++ gcvTEXTURE_DUMMY = 0,
++ gcvTEXTURE_REPLACE = 0,
++ gcvTEXTURE_MODULATE,
++ gcvTEXTURE_ADD,
++ gcvTEXTURE_ADD_SIGNED,
++ gcvTEXTURE_INTERPOLATE,
++ gcvTEXTURE_SUBTRACT,
++ gcvTEXTURE_DOT3
++}
++gceTEXTURE_FUNCTION;
++
++/* Texture sources. */
++typedef enum _gceTEXTURE_SOURCE
++{
++ gcvCOLOR_FROM_TEXTURE = 0,
++ gcvCOLOR_FROM_CONSTANT_COLOR,
++ gcvCOLOR_FROM_PRIMARY_COLOR,
++ gcvCOLOR_FROM_PREVIOUS_COLOR
++}
++gceTEXTURE_SOURCE;
++
++/* Texture source channels. */
++typedef enum _gceTEXTURE_CHANNEL
++{
++ gcvFROM_COLOR = 0,
++ gcvFROM_ONE_MINUS_COLOR,
++ gcvFROM_ALPHA,
++ gcvFROM_ONE_MINUS_ALPHA
++}
++gceTEXTURE_CHANNEL;
++#endif /* VIVANTE_NO_3D */
++
++/* Filter types. */
++typedef enum _gceFILTER_TYPE
++{
++ gcvFILTER_SYNC = 0,
++ gcvFILTER_BLUR,
++ gcvFILTER_USER
++}
++gceFILTER_TYPE;
++
++/* Filter pass types. */
++typedef enum _gceFILTER_PASS_TYPE
++{
++ gcvFILTER_HOR_PASS = 0,
++ gcvFILTER_VER_PASS
++}
++gceFILTER_PASS_TYPE;
++
++/* Endian hints. */
++typedef enum _gceENDIAN_HINT
++{
++ gcvENDIAN_NO_SWAP = 0,
++ gcvENDIAN_SWAP_WORD,
++ gcvENDIAN_SWAP_DWORD
++}
++gceENDIAN_HINT;
++
++/* Tiling modes. */
++typedef enum _gceTILING
++{
++ gcvLINEAR = 0,
++ gcvTILED,
++ gcvSUPERTILED,
++ gcvMULTI_TILED,
++ gcvMULTI_SUPERTILED,
++ gcvMINORTILED,
++}
++gceTILING;
++
++/* 2D pattern type. */
++typedef enum _gce2D_PATTERN
++{
++ gcv2D_PATTERN_SOLID = 0,
++ gcv2D_PATTERN_MONO,
++ gcv2D_PATTERN_COLOR,
++ gcv2D_PATTERN_INVALID
++}
++gce2D_PATTERN;
++
++/* 2D source type. */
++typedef enum _gce2D_SOURCE
++{
++ gcv2D_SOURCE_MASKED = 0,
++ gcv2D_SOURCE_MONO,
++ gcv2D_SOURCE_COLOR,
++ gcv2D_SOURCE_INVALID
++}
++gce2D_SOURCE;
++
++/* Pipes. */
++typedef enum _gcePIPE_SELECT
++{
++ gcvPIPE_INVALID = ~0,
++ gcvPIPE_3D = 0,
++ gcvPIPE_2D
++}
++gcePIPE_SELECT;
++
++/* Hardware type. */
++typedef enum _gceHARDWARE_TYPE
++{
++ gcvHARDWARE_INVALID = 0x00,
++ gcvHARDWARE_3D = 0x01,
++ gcvHARDWARE_2D = 0x02,
++ gcvHARDWARE_VG = 0x04,
++
++ gcvHARDWARE_3D2D = gcvHARDWARE_3D | gcvHARDWARE_2D
++}
++gceHARDWARE_TYPE;
++
++#define gcdCHIP_COUNT 3
++
++typedef enum _gceMMU_MODE
++{
++ gcvMMU_MODE_1K,
++ gcvMMU_MODE_4K,
++} gceMMU_MODE;
++
++/* User signal command codes. */
++typedef enum _gceUSER_SIGNAL_COMMAND_CODES
++{
++ gcvUSER_SIGNAL_CREATE,
++ gcvUSER_SIGNAL_DESTROY,
++ gcvUSER_SIGNAL_SIGNAL,
++ gcvUSER_SIGNAL_WAIT,
++ gcvUSER_SIGNAL_MAP,
++ gcvUSER_SIGNAL_UNMAP,
++}
++gceUSER_SIGNAL_COMMAND_CODES;
++
++/* Sync point command codes. */
++typedef enum _gceSYNC_POINT_COMMAND_CODES
++{
++ gcvSYNC_POINT_CREATE,
++ gcvSYNC_POINT_DESTROY,
++ gcvSYNC_POINT_SIGNAL,
++}
++gceSYNC_POINT_COMMAND_CODES;
++
++/* Event locations. */
++typedef enum _gceKERNEL_WHERE
++{
++ gcvKERNEL_COMMAND,
++ gcvKERNEL_VERTEX,
++ gcvKERNEL_TRIANGLE,
++ gcvKERNEL_TEXTURE,
++ gcvKERNEL_PIXEL,
++}
++gceKERNEL_WHERE;
++
++#if gcdENABLE_VG
++/* Hardware blocks. */
++typedef enum _gceBLOCK
++{
++ gcvBLOCK_COMMAND,
++ gcvBLOCK_TESSELLATOR,
++ gcvBLOCK_TESSELLATOR2,
++ gcvBLOCK_TESSELLATOR3,
++ gcvBLOCK_RASTER,
++ gcvBLOCK_VG,
++ gcvBLOCK_VG2,
++ gcvBLOCK_VG3,
++ gcvBLOCK_PIXEL,
++
++ /* Number of defined blocks. */
++ gcvBLOCK_COUNT
++}
++gceBLOCK;
++#endif
++
++/* gcdDUMP message type. */
++typedef enum _gceDEBUG_MESSAGE_TYPE
++{
++ gcvMESSAGE_TEXT,
++ gcvMESSAGE_DUMP
++}
++gceDEBUG_MESSAGE_TYPE;
++
++typedef enum _gceSPECIAL_HINT
++{
++ gceSPECIAL_HINT0,
++ gceSPECIAL_HINT1,
++ gceSPECIAL_HINT2,
++ gceSPECIAL_HINT3,
++ /* For disable dynamic stream/index */
++ gceSPECIAL_HINT4
++}
++gceSPECIAL_HINT;
++
++typedef enum _gceMACHINECODE
++{
++ gcvMACHINECODE_HOVERJET0 = 0x0,
++ gcvMACHINECODE_HOVERJET1 ,
++
++ gcvMACHINECODE_TAIJI0 ,
++ gcvMACHINECODE_TAIJI1 ,
++ gcvMACHINECODE_TAIJI2 ,
++
++ gcvMACHINECODE_ANTUTU0 ,
++
++ gcvMACHINECODE_GLB27_RELEASE_0,
++ gcvMACHINECODE_GLB27_RELEASE_1,
++
++ gcvMACHINECODE_WAVESCAPE0 ,
++ gcvMACHINECODE_WAVESCAPE1 ,
++
++ gcvMACHINECODE_NENAMARKV2_4_0 ,
++ gcvMACHINECODE_NENAMARKV2_4_1 ,
++
++ gcvMACHINECODE_GLB25_RELEASE_0,
++ gcvMACHINECODE_GLB25_RELEASE_1,
++ gcvMACHINECODE_GLB25_RELEASE_2,
++}
++gceMACHINECODE;
++
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gckCONTEXT * gckCONTEXT;
++typedef struct _gcoCMDBUF * gcoCMDBUF;
++typedef struct _gcsSTATE_DELTA * gcsSTATE_DELTA_PTR;
++typedef struct _gcsQUEUE * gcsQUEUE_PTR;
++typedef struct _gcoQUEUE * gcoQUEUE;
++typedef struct _gcsHAL_INTERFACE * gcsHAL_INTERFACE_PTR;
++typedef struct _gcs2D_PROFILE * gcs2D_PROFILE_PTR;
++
++#if gcdENABLE_VG
++typedef struct _gcoVGHARDWARE * gcoVGHARDWARE;
++typedef struct _gcoVGBUFFER * gcoVGBUFFER;
++typedef struct _gckVGHARDWARE * gckVGHARDWARE;
++typedef struct _gcsVGCONTEXT * gcsVGCONTEXT_PTR;
++typedef struct _gcsVGCONTEXT_MAP * gcsVGCONTEXT_MAP_PTR;
++typedef struct _gcsVGCMDQUEUE * gcsVGCMDQUEUE_PTR;
++typedef struct _gcsTASK_MASTER_TABLE * gcsTASK_MASTER_TABLE_PTR;
++typedef struct _gckVGKERNEL * gckVGKERNEL;
++typedef void * gctTHREAD;
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_enum_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal.h 2015-05-01 14:57:59.543427001 -0500
+@@ -0,0 +1,2661 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_h_
++#define __gc_hal_h_
++
++#include "gc_hal_rename.h"
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++#include "gc_hal_base.h"
++#include "gc_hal_profiler.h"
++#include "gc_hal_driver.h"
++#ifndef VIVANTE_NO_3D
++#include "gc_hal_statistics.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* Alignment Macros *******************************
++\******************************************************************************/
++
++#define gcmALIGN(n, align) \
++( \
++ ((n) + ((align) - 1)) & ~((align) - 1) \
++)
++
++#define gcmALIGN_BASE(n, align) \
++( \
++ ((n) & ~((align) - 1)) \
++)
++
++/******************************************************************************\
++***************************** Element Count Macro *****************************
++\******************************************************************************/
++
++#define gcmSIZEOF(a) \
++( \
++ (gctSIZE_T) (sizeof(a)) \
++)
++
++#define gcmCOUNTOF(a) \
++( \
++ sizeof(a) / sizeof(a[0]) \
++)
++
++/******************************************************************************\
++********************************* Cast Macro **********************************
++\******************************************************************************/
++#define gcmNAME_TO_PTR(na) \
++ gckKERNEL_QueryPointerFromName(kernel, gcmALL_TO_UINT32(na))
++
++#define gcmPTR_TO_NAME(ptr) \
++ gckKERNEL_AllocateNameFromPointer(kernel, ptr)
++
++#define gcmRELEASE_NAME(na) \
++ gckKERNEL_DeleteName(kernel, gcmALL_TO_UINT32(na))
++
++#ifdef __LP64__
++
++#define gcmALL_TO_UINT32(t) \
++( \
++ (gctUINT32) (gctUINTPTR_T) (t)\
++)
++
++#define gcmPTR_TO_UINT64(p) \
++( \
++ (gctUINT64) (p)\
++)
++
++#define gcmUINT64_TO_PTR(u) \
++( \
++ (gctPOINTER) (u)\
++)
++
++#else /* 32 bit */
++
++#define gcmALL_TO_UINT32(t) \
++( \
++ (gctUINT32) (t)\
++)
++
++#define gcmPTR_TO_UINT64(p) \
++( \
++ (gctUINT64) (gctUINTPTR_T) (p)\
++)
++
++#define gcmUINT64_TO_PTR(u) \
++( \
++ (gctPOINTER) (gctUINTPTR_T) (u)\
++)
++
++#endif
++
++#define gcmUINT64_TO_TYPE(u, t) \
++( \
++ (t) (gctUINTPTR_T) (u)\
++)
++
++/******************************************************************************\
++******************************** Useful Macro *********************************
++\******************************************************************************/
++
++#define gcvINVALID_ADDRESS ~0U
++
++#define gcmGET_PRE_ROTATION(rotate) \
++ ((rotate) & (~(gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y)))
++
++#define gcmGET_POST_ROTATION(rotate) \
++ ((rotate) & (gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y))
++
++/******************************************************************************\
++******************************** gcsOBJECT Object *******************************
++\******************************************************************************/
++
++/* Type of objects. */
++typedef enum _gceOBJECT_TYPE
++{
++ gcvOBJ_UNKNOWN = 0,
++ gcvOBJ_2D = gcmCC('2','D',' ',' '),
++ gcvOBJ_3D = gcmCC('3','D',' ',' '),
++ gcvOBJ_ATTRIBUTE = gcmCC('A','T','T','R'),
++ gcvOBJ_BRUSHCACHE = gcmCC('B','R','U','$'),
++ gcvOBJ_BRUSHNODE = gcmCC('B','R','U','n'),
++ gcvOBJ_BRUSH = gcmCC('B','R','U','o'),
++ gcvOBJ_BUFFER = gcmCC('B','U','F','R'),
++ gcvOBJ_COMMAND = gcmCC('C','M','D',' '),
++ gcvOBJ_COMMANDBUFFER = gcmCC('C','M','D','B'),
++ gcvOBJ_CONTEXT = gcmCC('C','T','X','T'),
++ gcvOBJ_DEVICE = gcmCC('D','E','V',' '),
++ gcvOBJ_DUMP = gcmCC('D','U','M','P'),
++ gcvOBJ_EVENT = gcmCC('E','V','N','T'),
++ gcvOBJ_FUNCTION = gcmCC('F','U','N','C'),
++ gcvOBJ_HAL = gcmCC('H','A','L',' '),
++ gcvOBJ_HARDWARE = gcmCC('H','A','R','D'),
++ gcvOBJ_HEAP = gcmCC('H','E','A','P'),
++ gcvOBJ_INDEX = gcmCC('I','N','D','X'),
++ gcvOBJ_INTERRUPT = gcmCC('I','N','T','R'),
++ gcvOBJ_KERNEL = gcmCC('K','E','R','N'),
++ gcvOBJ_KERNEL_FUNCTION = gcmCC('K','F','C','N'),
++ gcvOBJ_MEMORYBUFFER = gcmCC('M','E','M','B'),
++ gcvOBJ_MMU = gcmCC('M','M','U',' '),
++ gcvOBJ_OS = gcmCC('O','S',' ',' '),
++ gcvOBJ_OUTPUT = gcmCC('O','U','T','P'),
++ gcvOBJ_PAINT = gcmCC('P','N','T',' '),
++ gcvOBJ_PATH = gcmCC('P','A','T','H'),
++ gcvOBJ_QUEUE = gcmCC('Q','U','E',' '),
++ gcvOBJ_SAMPLER = gcmCC('S','A','M','P'),
++ gcvOBJ_SHADER = gcmCC('S','H','D','R'),
++ gcvOBJ_STREAM = gcmCC('S','T','R','M'),
++ gcvOBJ_SURF = gcmCC('S','U','R','F'),
++ gcvOBJ_TEXTURE = gcmCC('T','X','T','R'),
++ gcvOBJ_UNIFORM = gcmCC('U','N','I','F'),
++ gcvOBJ_VARIABLE = gcmCC('V','A','R','I'),
++ gcvOBJ_VERTEX = gcmCC('V','R','T','X'),
++ gcvOBJ_VIDMEM = gcmCC('V','M','E','M'),
++ gcvOBJ_VG = gcmCC('V','G',' ',' '),
++}
++gceOBJECT_TYPE;
++
++/* gcsOBJECT object defintinon. */
++typedef struct _gcsOBJECT
++{
++ /* Type of an object. */
++ gceOBJECT_TYPE type;
++}
++gcsOBJECT;
++
++typedef struct _gckHARDWARE * gckHARDWARE;
++
++/* CORE flags. */
++typedef enum _gceCORE
++{
++ gcvCORE_MAJOR = 0x0,
++ gcvCORE_2D = 0x1,
++ gcvCORE_VG = 0x2
++}
++gceCORE;
++
++#define gcdMAX_GPU_COUNT 3
++
++/*******************************************************************************
++**
++** gcmVERIFY_OBJECT
++**
++** Assert if an object is invalid or is not of the specified type. If the
++** object is invalid or not of the specified type, gcvSTATUS_INVALID_OBJECT
++** will be returned from the current function. In retail mode this macro
++** does nothing.
++**
++** ARGUMENTS:
++**
++** obj Object to test.
++** t Expected type of the object.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++#define _gcmVERIFY_OBJECT(prefix, obj, t) \
++ if ((obj) == gcvNULL) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT failed: NULL"); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT((obj) != gcvNULL); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \
++ return gcvSTATUS_INVALID_OBJECT; \
++ } \
++ else if (((gcsOBJECT*) (obj))->type != t) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT failed: %c%c%c%c", \
++ gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \
++ return gcvSTATUS_INVALID_OBJECT; \
++ }
++
++# define gcmVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcm, obj, t)
++# define gcmkVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcmk, obj, t)
++#else
++# define gcmVERIFY_OBJECT(obj, t) do {} while (gcvFALSE)
++# define gcmkVERIFY_OBJECT(obj, t) do {} while (gcvFALSE)
++#endif
++
++/******************************************************************************/
++/*VERIFY_OBJECT if special return expected*/
++/******************************************************************************/
++#ifndef EGL_API_ANDROID
++# define _gcmVERIFY_OBJECT_RETURN(prefix, obj, t, retVal) \
++ do \
++ { \
++ if ((obj) == gcvNULL) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT_RETURN failed: NULL"); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT((obj) != gcvNULL); \
++ prefix##FOOTER_ARG("retVal=%d", retVal); \
++ return retVal; \
++ } \
++ else if (((gcsOBJECT*) (obj))->type != t) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT_RETURN failed: %c%c%c%c", \
++ gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \
++ prefix##FOOTER_ARG("retVal=%d", retVal); \
++ return retVal; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_OBJECT_RETURN(obj, t, retVal) \
++ _gcmVERIFY_OBJECT_RETURN(gcm, obj, t, retVal)
++# define gcmkVERIFY_OBJECT_RETURN(obj, t, retVal) \
++ _gcmVERIFY_OBJECT_RETURN(gcmk, obj, t, retVal)
++#else
++# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE)
++# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE)
++#endif
++
++/******************************************************************************\
++********************************** gckOS Object *********************************
++\******************************************************************************/
++
++/* Construct a new gckOS object. */
++gceSTATUS
++gckOS_Construct(
++ IN gctPOINTER Context,
++ OUT gckOS * Os
++ );
++
++/* Destroy an gckOS object. */
++gceSTATUS
++gckOS_Destroy(
++ IN gckOS Os
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gckOS_QueryVideoMemory(
++ IN gckOS Os,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Allocate memory from the heap. */
++gceSTATUS
++gckOS_Allocate(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Free allocated memory. */
++gceSTATUS
++gckOS_Free(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Wrapper for allocation memory.. */
++gceSTATUS
++gckOS_AllocateMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Wrapper for freeing memory. */
++gceSTATUS
++gckOS_FreeMemory(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate paged memory. */
++gceSTATUS
++gckOS_AllocatePagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ );
++
++/* Allocate paged memory. */
++gceSTATUS
++gckOS_AllocatePagedMemoryEx(
++ IN gckOS Os,
++ IN gctBOOL Contiguous,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ );
++
++/* Lock pages. */
++gceSTATUS
++gckOS_LockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Cacheable,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ );
++
++/* Map pages. */
++gceSTATUS
++gckOS_MapPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++#ifdef __QNXNTO__
++ IN gctPOINTER Logical,
++#endif
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ );
++
++/* Map pages. */
++gceSTATUS
++gckOS_MapPagesEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPHYS_ADDR Physical,
++#ifdef __QNXNTO__
++ IN gctPOINTER Logical,
++#endif
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ );
++
++/* Unlock pages. */
++gceSTATUS
++gckOS_UnlockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Free paged memory. */
++gceSTATUS
++gckOS_FreePagedMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Allocate non-paged memory. */
++gceSTATUS
++gckOS_AllocateNonPagedMemory(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free non-paged memory. */
++gceSTATUS
++gckOS_FreeNonPagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++/* Allocate contiguous memory. */
++gceSTATUS
++gckOS_AllocateContiguous(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free contiguous memory. */
++gceSTATUS
++gckOS_FreeContiguous(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Get the number fo bytes per page. */
++gceSTATUS
++gckOS_GetPageSize(
++ IN gckOS Os,
++ OUT gctSIZE_T * PageSize
++ );
++
++/* Get the physical address of a corresponding logical address. */
++gceSTATUS
++gckOS_GetPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++/* Get the physical address of a corresponding logical address. */
++gceSTATUS
++gckOS_GetPhysicalAddressProcess(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32 * Address
++ );
++
++/* Map physical memory. */
++gceSTATUS
++gckOS_MapPhysical(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap previously mapped physical memory. */
++gceSTATUS
++gckOS_UnmapPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Read data from a hardware register. */
++gceSTATUS
++gckOS_ReadRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++/* Read data from a hardware register. */
++gceSTATUS
++gckOS_ReadRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++/* Write data to a hardware register. */
++gceSTATUS
++gckOS_WriteRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Write data to a hardware register. */
++gceSTATUS
++gckOS_WriteRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Write data to a 32-bit memory location. */
++gceSTATUS
++gckOS_WriteMemory(
++ IN gckOS Os,
++ IN gctPOINTER Address,
++ IN gctUINT32 Data
++ );
++
++/* Map physical memory into the process space. */
++gceSTATUS
++gckOS_MapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap physical memory from the specified process space. */
++gceSTATUS
++gckOS_UnmapMemoryEx(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical,
++ IN gctUINT32 PID
++ );
++
++/* Unmap physical memory from the process space. */
++gceSTATUS
++gckOS_UnmapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Unmap user logical memory out of physical memory.
++ * This function is only supported in Linux currently.
++ */
++gceSTATUS
++gckOS_UnmapUserLogical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Create a new mutex. */
++gceSTATUS
++gckOS_CreateMutex(
++ IN gckOS Os,
++ OUT gctPOINTER * Mutex
++ );
++
++/* Delete a mutex. */
++gceSTATUS
++gckOS_DeleteMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Acquire a mutex. */
++gceSTATUS
++gckOS_AcquireMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ );
++
++/* Release a mutex. */
++gceSTATUS
++gckOS_ReleaseMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Atomically exchange a pair of 32-bit values. */
++gceSTATUS
++gckOS_AtomicExchange(
++ IN gckOS Os,
++ IN OUT gctUINT32_PTR Target,
++ IN gctUINT32 NewValue,
++ OUT gctUINT32_PTR OldValue
++ );
++
++/* Atomically exchange a pair of pointers. */
++gceSTATUS
++gckOS_AtomicExchangePtr(
++ IN gckOS Os,
++ IN OUT gctPOINTER * Target,
++ IN gctPOINTER NewValue,
++ OUT gctPOINTER * OldValue
++ );
++
++#if gcdSMP
++gceSTATUS
++gckOS_AtomSetMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ );
++
++gceSTATUS
++gckOS_AtomClearMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ );
++#endif
++
++gceSTATUS
++gckOS_DumpCallStack(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_GetProcessNameByPid(
++ IN gctINT Pid,
++ IN gctSIZE_T Length,
++ OUT gctUINT8_PTR String
++ );
++
++
++
++/*******************************************************************************
++**
++** gckOS_AtomConstruct
++**
++** Create an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Atom
++** Pointer to a variable receiving the constructed atom.
++*/
++gceSTATUS
++gckOS_AtomConstruct(
++ IN gckOS Os,
++ OUT gctPOINTER * Atom
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomDestroy
++**
++** Destroy an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomDestroy(
++ IN gckOS Os,
++ OUT gctPOINTER Atom
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomGet
++**
++** Get the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the value of the atom.
++*/
++gceSTATUS
++gckOS_AtomGet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomSet
++**
++** Set the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** gctINT32 Value
++** The value of the atom.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ IN gctINT32 Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomIncrement
++**
++** Atomically increment the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomIncrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomDecrement
++**
++** Atomically decrement the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomDecrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/* Delay a number of microseconds. */
++gceSTATUS
++gckOS_Delay(
++ IN gckOS Os,
++ IN gctUINT32 Delay
++ );
++
++/* Get time in milliseconds. */
++gceSTATUS
++gckOS_GetTicks(
++ OUT gctUINT32_PTR Time
++ );
++
++/* Compare time value. */
++gceSTATUS
++gckOS_TicksAfter(
++ IN gctUINT32 Time1,
++ IN gctUINT32 Time2,
++ OUT gctBOOL_PTR IsAfter
++ );
++
++/* Get time in microseconds. */
++gceSTATUS
++gckOS_GetTime(
++ OUT gctUINT64_PTR Time
++ );
++
++/* Memory barrier. */
++gceSTATUS
++gckOS_MemoryBarrier(
++ IN gckOS Os,
++ IN gctPOINTER Address
++ );
++
++/* Map user pointer. */
++gceSTATUS
++gckOS_MapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Unmap user pointer. */
++gceSTATUS
++gckOS_UnmapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ IN gctPOINTER KernelPointer
++ );
++
++/*******************************************************************************
++**
++** gckOS_QueryNeedCopy
++**
++** Query whether the memory can be accessed or mapped directly or it has to be
++** copied.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID of the current process.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR NeedCopy
++** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or
++** gcvFALSE if the memory can be accessed or mapped dircetly.
++*/
++gceSTATUS
++gckOS_QueryNeedCopy(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ OUT gctBOOL_PTR NeedCopy
++ );
++
++/*******************************************************************************
++**
++** gckOS_CopyFromUserData
++**
++** Copy data from user to kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyFromUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ );
++
++/*******************************************************************************
++**
++** gckOS_CopyToUserData
++**
++** Copy data from kernel to user memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyToUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ );
++
++#ifdef __QNXNTO__
++/* Map user physical address. */
++gceSTATUS
++gckOS_MapUserPhysical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Phys,
++ OUT gctPOINTER * KernelPointer
++ );
++#endif
++
++gceSTATUS
++gckOS_SuspendInterrupt(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_SuspendInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_ResumeInterrupt(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_ResumeInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++/* Get the base address for the physical memory. */
++gceSTATUS
++gckOS_GetBaseAddress(
++ IN gckOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++/* Perform a memory copy. */
++gceSTATUS
++gckOS_MemCopy(
++ IN gctPOINTER Destination,
++ IN gctCONST_POINTER Source,
++ IN gctSIZE_T Bytes
++ );
++
++/* Zero memory. */
++gceSTATUS
++gckOS_ZeroMemory(
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Bytes
++ );
++
++/* Device I/O control to the kernel HAL layer. */
++gceSTATUS
++gckOS_DeviceControl(
++ IN gckOS Os,
++ IN gctBOOL FromUser,
++ IN gctUINT32 IoControlCode,
++ IN gctPOINTER InputBuffer,
++ IN gctSIZE_T InputBufferSize,
++ OUT gctPOINTER OutputBuffer,
++ IN gctSIZE_T OutputBufferSize
++ );
++
++/*******************************************************************************
++**
++** gckOS_GetProcessID
++**
++** Get current process ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ProcessID
++** Pointer to the variable that receives the process ID.
++*/
++gceSTATUS
++gckOS_GetProcessID(
++ OUT gctUINT32_PTR ProcessID
++ );
++
++gceSTATUS
++gckOS_GetCurrentProcessID(
++ OUT gctUINT32_PTR ProcessID
++ );
++
++/*******************************************************************************
++**
++** gckOS_GetThreadID
++**
++** Get current thread ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ThreadID
++** Pointer to the variable that receives the thread ID.
++*/
++gceSTATUS
++gckOS_GetThreadID(
++ OUT gctUINT32_PTR ThreadID
++ );
++
++/******************************************************************************\
++********************************** Signal Object *********************************
++\******************************************************************************/
++
++/* Create a signal. */
++gceSTATUS
++gckOS_CreateSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ );
++
++/* Destroy a signal. */
++gceSTATUS
++gckOS_DestroySignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Signal a signal. */
++gceSTATUS
++gckOS_Signal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ );
++
++/* Wait for a signal. */
++gceSTATUS
++gckOS_WaitSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ );
++
++/* Map a user signal to the kernel space. */
++gceSTATUS
++gckOS_MapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process,
++ OUT gctSIGNAL * MappedSignal
++ );
++
++/* Unmap a user signal */
++gceSTATUS
++gckOS_UnmapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Map user memory. */
++gceSTATUS
++gckOS_MapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gckOS_UnmapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ );
++
++/******************************************************************************\
++************************** Android Native Fence Sync ***************************
++\******************************************************************************/
++gceSTATUS
++gckOS_CreateSyncTimeline(
++ IN gckOS Os,
++ OUT gctHANDLE * Timeline
++ );
++
++gceSTATUS
++gckOS_DestroySyncTimeline(
++ IN gckOS Os,
++ IN gctHANDLE Timeline
++ );
++
++gceSTATUS
++gckOS_CreateSyncPoint(
++ IN gckOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ );
++
++gceSTATUS
++gckOS_ReferenceSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_DestroySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_SignalSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_QuerySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctBOOL_PTR State
++ );
++
++gceSTATUS
++gckOS_CreateNativeFence(
++ IN gckOS Os,
++ IN gctHANDLE Timeline,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ );
++
++#if !USE_NEW_LINUX_SIGNAL
++/* Create signal to be used in the user space. */
++gceSTATUS
++gckOS_CreateUserSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctINT * SignalID
++ );
++
++/* Destroy signal used in the user space. */
++gceSTATUS
++gckOS_DestroyUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID
++ );
++
++/* Wait for signal used in the user space. */
++gceSTATUS
++gckOS_WaitUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctUINT32 Wait
++ );
++
++/* Signal a signal used in the user space. */
++gceSTATUS
++gckOS_SignalUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctBOOL State
++ );
++#endif /* USE_NEW_LINUX_SIGNAL */
++
++/* Set a signal owned by a process. */
++#if defined(__QNXNTO__)
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctINT Recvid,
++ IN gctINT Coid
++ );
++#else
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process
++ );
++#endif
++
++/******************************************************************************\
++** Cache Support
++*/
++
++gceSTATUS
++gckOS_CacheClean(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctPOINTER Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gckOS_CacheFlush(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctPOINTER Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gckOS_CacheInvalidate(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctPOINTER Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++/******************************************************************************\
++** Debug Support
++*/
++
++void
++gckOS_SetDebugLevel(
++ IN gctUINT32 Level
++ );
++
++void
++gckOS_SetDebugZone(
++ IN gctUINT32 Zone
++ );
++
++void
++gckOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ );
++
++void
++gckOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ );
++
++void
++gckOS_SetDebugFile(
++ IN gctCONST_STRING FileName
++ );
++
++/*******************************************************************************
++** Broadcast interface.
++*/
++
++typedef enum _gceBROADCAST
++{
++ /* GPU might be idle. */
++ gcvBROADCAST_GPU_IDLE,
++
++ /* A commit is going to happen. */
++ gcvBROADCAST_GPU_COMMIT,
++
++ /* GPU seems to be stuck. */
++ gcvBROADCAST_GPU_STUCK,
++
++ /* First process gets attached. */
++ gcvBROADCAST_FIRST_PROCESS,
++
++ /* Last process gets detached. */
++ gcvBROADCAST_LAST_PROCESS,
++
++ /* AXI bus error. */
++ gcvBROADCAST_AXI_BUS_ERROR,
++}
++gceBROADCAST;
++
++gceSTATUS
++gckOS_Broadcast(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gceBROADCAST Reason
++ );
++
++gceSTATUS
++gckOS_BroadcastHurry(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Urgency
++ );
++
++gceSTATUS
++gckOS_BroadcastCalibrateSpeed(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Idle,
++ IN gctUINT Time
++ );
++
++/*******************************************************************************
++**
++** gckOS_SetGPUPower
++**
++** Set the power of the GPU on or off.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.ß
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctBOOL Clock
++** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock.
++**
++** gctBOOL Power
++** gcvTRUE to turn on the power, or gcvFALSE to turn off the power.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUPower(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctBOOL Clock,
++ IN gctBOOL Power
++ );
++
++gceSTATUS
++gckOS_ResetGPU(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_PrepareGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_FinishGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_QueryGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gctUINT32 * Frequency,
++ OUT gctUINT8 * Scale
++ );
++
++gceSTATUS
++gckOS_SetGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT8 Scale
++ );
++
++/*******************************************************************************
++** Semaphores.
++*/
++
++/* Create a new semaphore. */
++gceSTATUS
++gckOS_CreateSemaphore(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ );
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_CreateSemaphoreVG(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ );
++#endif
++
++/* Delete a semahore. */
++gceSTATUS
++gckOS_DestroySemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Acquire a semahore. */
++gceSTATUS
++gckOS_AcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Try to acquire a semahore. */
++gceSTATUS
++gckOS_TryAcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Release a semahore. */
++gceSTATUS
++gckOS_ReleaseSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/*******************************************************************************
++** Timer API.
++*/
++
++typedef void (*gctTIMERFUNCTION)(gctPOINTER);
++
++/* Create a timer. */
++gceSTATUS
++gckOS_CreateTimer(
++ IN gckOS Os,
++ IN gctTIMERFUNCTION Function,
++ IN gctPOINTER Data,
++ OUT gctPOINTER * Timer
++ );
++
++/* Destory a timer. */
++gceSTATUS
++gckOS_DestroyTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ );
++
++/* Start a timer. */
++gceSTATUS
++gckOS_StartTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer,
++ IN gctUINT32 Delay
++ );
++
++/* Stop a timer. */
++gceSTATUS
++gckOS_StopTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ );
++
++/******************************************************************************\
++********************************* gckHEAP Object ********************************
++\******************************************************************************/
++
++typedef struct _gckHEAP * gckHEAP;
++
++/* Construct a new gckHEAP object. */
++gceSTATUS
++gckHEAP_Construct(
++ IN gckOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gckHEAP * Heap
++ );
++
++/* Destroy an gckHEAP object. */
++gceSTATUS
++gckHEAP_Destroy(
++ IN gckHEAP Heap
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gckHEAP_Allocate(
++ IN gckHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Node
++ );
++
++/* Free memory. */
++gceSTATUS
++gckHEAP_Free(
++ IN gckHEAP Heap,
++ IN gctPOINTER Node
++ );
++
++/* Profile the heap. */
++gceSTATUS
++gckHEAP_ProfileStart(
++ IN gckHEAP Heap
++ );
++
++gceSTATUS
++gckHEAP_ProfileEnd(
++ IN gckHEAP Heap,
++ IN gctCONST_STRING Title
++ );
++
++
++/******************************************************************************\
++******************************** gckVIDMEM Object ******************************
++\******************************************************************************/
++
++typedef struct _gckVIDMEM * gckVIDMEM;
++typedef struct _gckKERNEL * gckKERNEL;
++typedef struct _gckDB * gckDB;
++typedef struct _gckDVFS * gckDVFS;
++
++/* Construct a new gckVIDMEM object. */
++gceSTATUS
++gckVIDMEM_Construct(
++ IN gckOS Os,
++ IN gctUINT32 BaseAddress,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Threshold,
++ IN gctSIZE_T Banking,
++ OUT gckVIDMEM * Memory
++ );
++
++/* Destroy an gckVDIMEM object. */
++gceSTATUS
++gckVIDMEM_Destroy(
++ IN gckVIDMEM Memory
++ );
++
++/* Allocate rectangular memory. */
++gceSTATUS
++gckVIDMEM_Allocate(
++ IN gckVIDMEM Memory,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT BytesPerPixel,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Allocate linear memory. */
++gceSTATUS
++gckVIDMEM_AllocateLinear(
++ IN gckVIDMEM Memory,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Free memory. */
++gceSTATUS
++gckVIDMEM_Free(
++ IN gcuVIDMEM_NODE_PTR Node
++ );
++
++/* Lock memory. */
++gceSTATUS
++gckVIDMEM_Lock(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctBOOL Cacheable,
++ OUT gctUINT32 * Address
++ );
++
++/* Unlock memory. */
++gceSTATUS
++gckVIDMEM_Unlock(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gceSURF_TYPE Type,
++ IN OUT gctBOOL * Asynchroneous
++ );
++
++/* Construct a gcuVIDMEM_NODE union for virtual memory. */
++gceSTATUS
++gckVIDMEM_ConstructVirtual(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Contiguous,
++ IN gctSIZE_T Bytes,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Destroy a gcuVIDMEM_NODE union for virtual memory. */
++gceSTATUS
++gckVIDMEM_DestroyVirtual(
++ IN gcuVIDMEM_NODE_PTR Node
++ );
++
++/******************************************************************************\
++******************************** gckKERNEL Object ******************************
++\******************************************************************************/
++
++struct _gcsHAL_INTERFACE;
++
++/* Notifications. */
++typedef enum _gceNOTIFY
++{
++ gcvNOTIFY_INTERRUPT,
++ gcvNOTIFY_COMMAND_QUEUE,
++}
++gceNOTIFY;
++
++/* Flush flags. */
++typedef enum _gceKERNEL_FLUSH
++{
++ gcvFLUSH_COLOR = 0x01,
++ gcvFLUSH_DEPTH = 0x02,
++ gcvFLUSH_TEXTURE = 0x04,
++ gcvFLUSH_2D = 0x08,
++ gcvFLUSH_ALL = gcvFLUSH_COLOR
++ | gcvFLUSH_DEPTH
++ | gcvFLUSH_TEXTURE
++ | gcvFLUSH_2D,
++}
++gceKERNEL_FLUSH;
++
++/* Construct a new gckKERNEL object. */
++gceSTATUS
++gckKERNEL_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Context,
++ IN gckDB SharedDB,
++ OUT gckKERNEL * Kernel
++ );
++
++/* Destroy an gckKERNEL object. */
++gceSTATUS
++gckKERNEL_Destroy(
++ IN gckKERNEL Kernel
++ );
++
++/* Dispatch a user-level command. */
++gceSTATUS
++gckKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gckKERNEL_QueryVideoMemory(
++ IN gckKERNEL Kernel,
++ OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Lookup the gckVIDMEM object for a pool. */
++gceSTATUS
++gckKERNEL_GetVideoMemoryPool(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ OUT gckVIDMEM * VideoMemory
++ );
++
++#if gcdUSE_VIDMEM_PER_PID
++gceSTATUS
++gckKERNEL_GetVideoMemoryPoolPid(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ IN gctUINT32 Pid,
++ OUT gckVIDMEM * VideoMemory
++ );
++
++gceSTATUS
++gckKERNEL_CreateVideoMemoryPoolPid(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ IN gctUINT32 Pid,
++ OUT gckVIDMEM * VideoMemory
++ );
++
++gceSTATUS
++gckKERNEL_RemoveVideoMemoryPoolPid(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM VideoMemory
++ );
++#endif
++
++/* Map video memory. */
++gceSTATUS
++gckKERNEL_MapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++#ifdef __QNXNTO__
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes,
++#endif
++ OUT gctPOINTER * Logical
++ );
++
++/* Map video memory. */
++gceSTATUS
++gckKERNEL_MapVideoMemoryEx(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++#ifdef __QNXNTO__
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes,
++#endif
++ OUT gctPOINTER * Logical
++ );
++
++#ifdef __QNXNTO__
++/* Unmap video memory. */
++gceSTATUS
++gckKERNEL_UnmapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes
++ );
++#endif
++
++/* Map memory. */
++gceSTATUS
++gckKERNEL_MapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap memory. */
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Notification of events. */
++gceSTATUS
++gckKERNEL_Notify(
++ IN gckKERNEL Kernel,
++ IN gceNOTIFY Notifcation,
++ IN gctBOOL Data
++ );
++
++gceSTATUS
++gckKERNEL_QuerySettings(
++ IN gckKERNEL Kernel,
++ OUT gcsKERNEL_SETTINGS * Settings
++ );
++
++/*******************************************************************************
++**
++** gckKERNEL_Recovery
++**
++** Try to recover the GPU from a fatal error.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Recovery(
++ IN gckKERNEL Kernel
++ );
++
++/* Set the value of timeout on HW operation. */
++void
++gckKERNEL_SetTimeOut(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 timeOut
++ );
++
++/* Get access to the user data. */
++gceSTATUS
++gckKERNEL_OpenUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctPOINTER StaticStorage,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Release resources associated with the user data connection. */
++gceSTATUS
++gckKERNEL_CloseUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctBOOL FlushData,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++gceSTATUS
++gckDVFS_Construct(
++ IN gckHARDWARE Hardware,
++ OUT gckDVFS * Frequency
++ );
++
++gceSTATUS
++gckDVFS_Destroy(
++ IN gckDVFS Dvfs
++ );
++
++gceSTATUS
++gckDVFS_Start(
++ IN gckDVFS Dvfs
++ );
++
++gceSTATUS
++gckDVFS_Stop(
++ IN gckDVFS Dvfs
++ );
++
++/******************************************************************************\
++******************************* gckHARDWARE Object *****************************
++\******************************************************************************/
++
++/* Construct a new gckHARDWARE object. */
++gceSTATUS
++gckHARDWARE_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gckHARDWARE * Hardware
++ );
++
++/* Destroy an gckHARDWARE object. */
++gceSTATUS
++gckHARDWARE_Destroy(
++ IN gckHARDWARE Hardware
++ );
++
++/* Get hardware type. */
++gceSTATUS
++gckHARDWARE_GetType(
++ IN gckHARDWARE Hardware,
++ OUT gceHARDWARE_TYPE * Type
++ );
++
++/* Query system memory requirements. */
++gceSTATUS
++gckHARDWARE_QuerySystemMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ );
++
++/* Build virtual address. */
++gceSTATUS
++gckHARDWARE_BuildVirtualAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Query command buffer requirements. */
++gceSTATUS
++gckHARDWARE_QueryCommandBuffer(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * Alignment,
++ OUT gctSIZE_T * ReservedHead,
++ OUT gctSIZE_T * ReservedTail
++ );
++
++/* Add a WAIT/LINK pair in the command queue. */
++gceSTATUS
++gckHARDWARE_WaitLink(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctUINT32 * WaitOffset,
++ OUT gctSIZE_T * WaitBytes
++ );
++
++/* Kickstart the command processor. */
++gceSTATUS
++gckHARDWARE_Execute(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++#ifdef __QNXNTO__
++ IN gctPOINTER Physical,
++ IN gctBOOL PhysicalAddresses,
++#endif
++ IN gctSIZE_T Bytes
++ );
++
++/* Add an END command in the command queue. */
++gceSTATUS
++gckHARDWARE_End(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a NOP command in the command queue. */
++gceSTATUS
++gckHARDWARE_Nop(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a WAIT command in the command queue. */
++gceSTATUS
++gckHARDWARE_Wait(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Count,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a PIPESELECT command in the command queue. */
++gceSTATUS
++gckHARDWARE_PipeSelect(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gcePIPE_SELECT Pipe,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a LINK command in the command queue. */
++gceSTATUS
++gckHARDWARE_Link(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctPOINTER FetchAddress,
++ IN gctSIZE_T FetchSize,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add an EVENT command in the command queue. */
++gceSTATUS
++gckHARDWARE_Event(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT8 Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Query the available memory. */
++gceSTATUS
++gckHARDWARE_QueryMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gckHARDWARE_QueryChipIdentity(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ );
++
++/* Query the shader support. */
++gceSTATUS
++gckHARDWARE_QueryShaderCaps(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctUINT * Varyings
++ );
++
++/* Split a harwdare specific address into API stuff. */
++gceSTATUS
++gckHARDWARE_SplitMemory(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Update command queue tail pointer. */
++gceSTATUS
++gckHARDWARE_UpdateQueueTail(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset
++ );
++
++/* Convert logical address to hardware specific address. */
++gceSTATUS
++gckHARDWARE_ConvertLogical(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++#ifdef __QNXNTO__
++/* Convert physical address to hardware specific address. */
++gceSTATUS
++gckHARDWARE_ConvertPhysical(
++ IN gckHARDWARE Hardware,
++ IN gctPHYS_ADDR Physical,
++ OUT gctUINT32 * Address
++ );
++#endif
++
++/* Interrupt manager. */
++gceSTATUS
++gckHARDWARE_Interrupt(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL InterruptValid
++ );
++
++/* Program MMU. */
++gceSTATUS
++gckHARDWARE_SetMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical
++ );
++
++/* Flush the MMU. */
++gceSTATUS
++gckHARDWARE_FlushMMU(
++ IN gckHARDWARE Hardware
++ );
++
++/* Set the page table base address. */
++gceSTATUS
++gckHARDWARE_SetMMUv2(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Enable,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctBOOL FromPower
++ );
++
++/* Get idle register. */
++gceSTATUS
++gckHARDWARE_GetIdle(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Wait,
++ OUT gctUINT32 * Data
++ );
++
++/* Flush the caches. */
++gceSTATUS
++gckHARDWARE_Flush(
++ IN gckHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Enable/disable fast clear. */
++gceSTATUS
++gckHARDWARE_SetFastClear(
++ IN gckHARDWARE Hardware,
++ IN gctINT Enable,
++ IN gctINT Compression
++ );
++
++gceSTATUS
++gckHARDWARE_ReadInterrupt(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ );
++
++/* Power management. */
++gceSTATUS
++gckHARDWARE_SetPowerManagementState(
++ IN gckHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gckHARDWARE_QueryPowerManagementState(
++ IN gckHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ );
++
++gceSTATUS
++gckHARDWARE_SetPowerManagement(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ );
++
++gceSTATUS
++gckHARDWARE_SetGpuProfiler(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL GpuProfiler
++ );
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ );
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ );
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckHARDWARE_SetPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Timeout
++);
++
++gceSTATUS
++gckHARDWARE_QueryPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++);
++#endif
++
++/* Profile 2D Engine. */
++gceSTATUS
++gckHARDWARE_ProfileEngine2D(
++ IN gckHARDWARE Hardware,
++ OUT gcs2D_PROFILE_PTR Profile
++ );
++
++gceSTATUS
++gckHARDWARE_InitializeHardware(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_Reset(
++ IN gckHARDWARE Hardware
++ );
++
++typedef gceSTATUS (*gctISRMANAGERFUNC)(gctPOINTER Context, gceCORE Core);
++
++gceSTATUS
++gckHARDWARE_SetIsrManager(
++ IN gckHARDWARE Hardware,
++ IN gctISRMANAGERFUNC StartIsr,
++ IN gctISRMANAGERFUNC StopIsr,
++ IN gctPOINTER Context
++ );
++
++/* Start a composition. */
++gceSTATUS
++gckHARDWARE_Compose(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Size,
++ IN gctUINT8 EventID
++ );
++
++/* Check for Hardware features. */
++gceSTATUS
++gckHARDWARE_IsFeatureAvailable(
++ IN gckHARDWARE Hardware,
++ IN gceFEATURE Feature
++ );
++
++gceSTATUS
++gckHARDWARE_DumpMMUException(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_DumpGPUState(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_InitDVFS(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_QueryLoad(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Load
++ );
++
++gceSTATUS
++gckHARDWARE_SetDVFSPeroid(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Frequency
++ );
++
++#if !gcdENABLE_VG
++/******************************************************************************\
++***************************** gckINTERRUPT Object ******************************
++\******************************************************************************/
++
++typedef struct _gckINTERRUPT * gckINTERRUPT;
++
++typedef gceSTATUS (* gctINTERRUPT_HANDLER)(
++ IN gckKERNEL Kernel
++ );
++
++gceSTATUS
++gckINTERRUPT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckINTERRUPT * Interrupt
++ );
++
++gceSTATUS
++gckINTERRUPT_Destroy(
++ IN gckINTERRUPT Interrupt
++ );
++
++gceSTATUS
++gckINTERRUPT_SetHandler(
++ IN gckINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ );
++
++gceSTATUS
++gckINTERRUPT_Notify(
++ IN gckINTERRUPT Interrupt,
++ IN gctBOOL Valid
++ );
++#endif
++/******************************************************************************\
++******************************** gckEVENT Object *******************************
++\******************************************************************************/
++
++typedef struct _gckEVENT * gckEVENT;
++
++/* Construct a new gckEVENT object. */
++gceSTATUS
++gckEVENT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckEVENT * Event
++ );
++
++/* Destroy an gckEVENT object. */
++gceSTATUS
++gckEVENT_Destroy(
++ IN gckEVENT Event
++ );
++
++/* Add a new event to the list of events. */
++gceSTATUS
++gckEVENT_AddList(
++ IN gckEVENT Event,
++ IN gcsHAL_INTERFACE_PTR Interface,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctBOOL AllocateAllowed,
++ IN gctBOOL FromKernel
++ );
++
++/* Schedule a FreeNonPagedMemory event. */
++gceSTATUS
++gckEVENT_FreeNonPagedMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a FreeContiguousMemory event. */
++gceSTATUS
++gckEVENT_FreeContiguousMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a FreeVideoMemory event. */
++gceSTATUS
++gckEVENT_FreeVideoMemory(
++ IN gckEVENT Event,
++ IN gcuVIDMEM_NODE_PTR VideoMemory,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a signal event. */
++gceSTATUS
++gckEVENT_Signal(
++ IN gckEVENT Event,
++ IN gctSIGNAL Signal,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule an Unlock event. */
++gceSTATUS
++gckEVENT_Unlock(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gceSURF_TYPE Type
++ );
++
++gceSTATUS
++gckEVENT_CommitDone(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++/* Schedule a FreeVirtualCommandBuffer event. */
++gceSTATUS
++gckEVENT_DestroyVirtualCommandBuffer(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++#endif
++
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower
++ );
++
++/* Commit an event queue. */
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue
++ );
++
++/* Schedule a composition event. */
++gceSTATUS
++gckEVENT_Compose(
++ IN gckEVENT Event,
++ IN gcsHAL_COMPOSE_PTR Info
++ );
++
++/* Event callback routine. */
++gceSTATUS
++gckEVENT_Notify(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ );
++
++/* Event callback routine. */
++gceSTATUS
++gckEVENT_Interrupt(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ );
++
++gceSTATUS
++gckEVENT_Dump(
++ IN gckEVENT Event
++ );
++/******************************************************************************\
++******************************* gckCOMMAND Object ******************************
++\******************************************************************************/
++
++typedef struct _gckCOMMAND * gckCOMMAND;
++
++/* Construct a new gckCOMMAND object. */
++gceSTATUS
++gckCOMMAND_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckCOMMAND * Command
++ );
++
++/* Destroy an gckCOMMAND object. */
++gceSTATUS
++gckCOMMAND_Destroy(
++ IN gckCOMMAND Command
++ );
++
++/* Acquire command queue synchronization objects. */
++gceSTATUS
++gckCOMMAND_EnterCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Release command queue synchronization objects. */
++gceSTATUS
++gckCOMMAND_ExitCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Start the command queue. */
++gceSTATUS
++gckCOMMAND_Start(
++ IN gckCOMMAND Command
++ );
++
++/* Stop the command queue. */
++gceSTATUS
++gckCOMMAND_Stop(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromRecovery
++ );
++
++/* Commit a buffer to the command queue. */
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID
++ );
++
++/* Reserve space in the command buffer. */
++gceSTATUS
++gckCOMMAND_Reserve(
++ IN gckCOMMAND Command,
++ IN gctSIZE_T RequestedBytes,
++ OUT gctPOINTER * Buffer,
++ OUT gctSIZE_T * BufferSize
++ );
++
++/* Execute reserved space in the command buffer. */
++gceSTATUS
++gckCOMMAND_Execute(
++ IN gckCOMMAND Command,
++ IN gctSIZE_T RequstedBytes
++ );
++
++/* Stall the command queue. */
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Attach user process. */
++gceSTATUS
++gckCOMMAND_Attach(
++ IN gckCOMMAND Command,
++ OUT gckCONTEXT * Context,
++ OUT gctSIZE_T * StateCount,
++ IN gctUINT32 ProcessID
++ );
++
++/* Detach user process. */
++gceSTATUS
++gckCOMMAND_Detach(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context
++ );
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++gceSTATUS
++gckCOMMAND_DumpExecutingBuffer(
++ IN gckCOMMAND Command
++ );
++#endif
++
++/******************************************************************************\
++********************************* gckMMU Object ********************************
++\******************************************************************************/
++
++typedef struct _gckMMU * gckMMU;
++
++/* Construct a new gckMMU object. */
++gceSTATUS
++gckMMU_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ );
++
++/* Destroy an gckMMU object. */
++gceSTATUS
++gckMMU_Destroy(
++ IN gckMMU Mmu
++ );
++
++/* Enable the MMU. */
++gceSTATUS
++gckMMU_Enable(
++ IN gckMMU Mmu,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize
++ );
++
++/* Allocate pages inside the MMU. */
++gceSTATUS
++gckMMU_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++gceSTATUS
++gckMMU_AllocatePagesEx(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++/* Remove a page table from the MMU. */
++gceSTATUS
++gckMMU_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ );
++
++/* Set the MMU page with info. */
++gceSTATUS
++gckMMU_SetPage(
++ IN gckMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ );
++
++#ifdef __QNXNTO__
++gceSTATUS
++gckMMU_InsertNode(
++ IN gckMMU Mmu,
++ IN gcuVIDMEM_NODE_PTR Node);
++
++gceSTATUS
++gckMMU_RemoveNode(
++ IN gckMMU Mmu,
++ IN gcuVIDMEM_NODE_PTR Node);
++#endif
++
++#ifdef __QNXNTO__
++gceSTATUS
++gckMMU_FreeHandleMemory(
++ IN gckKERNEL Kernel,
++ IN gckMMU Mmu,
++ IN gctUINT32 Pid
++ );
++#endif
++
++gceSTATUS
++gckMMU_Flush(
++ IN gckMMU Mmu
++ );
++
++gceSTATUS
++gckMMU_DumpPageTableEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address
++ );
++
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHARDWARE_QueryProfileRegisters(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Clear,
++ OUT gcsPROFILER_COUNTERS * Counters
++ );
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++gceSTATUS
++gckHARDWARE_QueryContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Clear,
++ IN gckCONTEXT Context,
++ OUT gcsPROFILER_COUNTERS * Counters
++ );
++
++gceSTATUS
++gckHARDWARE_UpdateContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gckCONTEXT Context
++ );
++#endif
++
++gceSTATUS
++gckOS_SignalQueryHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ OUT gckHARDWARE * Hardware
++ );
++
++gceSTATUS
++gckOS_SignalSetHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ gckHARDWARE Hardware
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#if gcdENABLE_VG
++#include "gc_hal_vg.h"
++#endif
++
++#endif /* __gc_hal_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_kernel_buffer.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_kernel_buffer.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_kernel_buffer.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_kernel_buffer.h 2015-05-01 14:57:59.543427001 -0500
+@@ -0,0 +1,185 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_buffer_h_
++#define __gc_hal_kernel_buffer_h_
++
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++************************ Command Buffer and Event Objects **********************
++\******************************************************************************/
++
++/* The number of context buffers per user. */
++#define gcdCONTEXT_BUFFER_COUNT 2
++
++/* State delta record. */
++typedef struct _gcsSTATE_DELTA_RECORD * gcsSTATE_DELTA_RECORD_PTR;
++typedef struct _gcsSTATE_DELTA_RECORD
++{
++ /* State address. */
++ gctUINT address;
++
++ /* State mask. */
++ gctUINT32 mask;
++
++ /* State data. */
++ gctUINT32 data;
++}
++gcsSTATE_DELTA_RECORD;
++
++/* State delta. */
++typedef struct _gcsSTATE_DELTA
++{
++ /* For debugging: the number of delta in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT num;
++#endif
++
++ /* Main state delta ID. Every time state delta structure gets reinitialized,
++ main ID is incremented. If main state ID overflows, all map entry IDs get
++ reinitialized to make sure there is no potential erroneous match after
++ the overflow.*/
++ gctUINT id;
++
++ /* The number of contexts pending modification by the delta. */
++ gctINT refCount;
++
++ /* Vertex element count for the delta buffer. */
++ gctUINT elementCount;
++
++ /* Number of states currently stored in the record array. */
++ gctUINT recordCount;
++
++ /* Record array; holds all modified states in gcsSTATE_DELTA_RECORD. */
++ gctUINT64 recordArray;
++
++ /* Map entry ID is used for map entry validation. If map entry ID does not
++ match the main state delta ID, the entry and the corresponding state are
++ considered not in use. */
++ gctUINT64 mapEntryID;
++ gctUINT mapEntryIDSize;
++
++ /* If the map entry ID matches the main state delta ID, index points to
++ the state record in the record array. */
++ gctUINT64 mapEntryIndex;
++
++ /* Previous and next state deltas in gcsSTATE_DELTA. */
++ gctUINT64 prev;
++ gctUINT64 next;
++}
++gcsSTATE_DELTA;
++
++/* Command buffer object. */
++struct _gcoCMDBUF
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Command buffer entry and exit pipes. */
++ gcePIPE_SELECT entryPipe;
++ gcePIPE_SELECT exitPipe;
++
++ /* Feature usage flags. */
++ gctBOOL using2D;
++ gctBOOL using3D;
++ gctBOOL usingFilterBlit;
++ gctBOOL usingPalette;
++
++ /* Physical address of command buffer. Just a name. */
++ gctUINT32 physical;
++
++ /* Logical address of command buffer. */
++ gctUINT64 logical;
++
++ /* Number of bytes in command buffer. */
++ gctUINT bytes;
++
++ /* Start offset into the command buffer. */
++ gctUINT startOffset;
++
++ /* Current offset into the command buffer. */
++ gctUINT offset;
++
++ /* Number of free bytes in command buffer. */
++ gctUINT free;
++
++ /* Location of the last reserved area. */
++ gctUINT64 lastReserve;
++ gctUINT lastOffset;
++
++#if gcdSECURE_USER
++ /* Hint array for the current command buffer. */
++ gctUINT hintArraySize;
++ gctUINT64 hintArray;
++ gctUINT64 hintArrayTail;
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Last load state command location and hardware address. */
++ gctUINT64 lastLoadStatePtr;
++ gctUINT32 lastLoadStateAddress;
++ gctUINT32 lastLoadStateCount;
++#endif
++};
++
++typedef struct _gcsQUEUE
++{
++ /* Pointer to next gcsQUEUE structure in gcsQUEUE. */
++ gctUINT64 next;
++
++ /* Event information. */
++ gcsHAL_INTERFACE iface;
++}
++gcsQUEUE;
++
++/* Event queue. */
++struct _gcoQUEUE
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to current event queue. */
++ gcsQUEUE_PTR head;
++ gcsQUEUE_PTR tail;
++
++#ifdef __QNXNTO__
++ /* Buffer for records. */
++ gcsQUEUE_PTR records;
++ gctUINT32 freeBytes;
++ gctUINT32 offset;
++#else
++ /* List of free records. */
++ gcsQUEUE_PTR freeList;
++#endif
++ #define gcdIN_QUEUE_RECORD_LIMIT 16
++ /* Number of records currently in queue */
++ gctUINT32 recordCount;
++};
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_buffer_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_mem.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_mem.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_mem.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_mem.h 2015-05-01 14:57:59.543427001 -0500
+@@ -0,0 +1,530 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/*
++** Include file for the local memory management.
++*/
++
++#ifndef __gc_hal_mem_h_
++#define __gc_hal_mem_h_
++#ifndef VIVANTE_NO_3D
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*******************************************************************************
++** Usage:
++
++ The macros to declare MemPool type and functions are
++ gcmMEM_DeclareFSMemPool (Type, TypeName, Prefix)
++ gcmMEM_DeclareVSMemPool (Type, TypeName, Prefix)
++ gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix)
++
++ The data structures for MemPool are
++ typedef struct _gcsMEM_FS_MEM_POOL * gcsMEM_FS_MEM_POOL;
++ typedef struct _gcsMEM_VS_MEM_POOL * gcsMEM_VS_MEM_POOL;
++ typedef struct _gcsMEM_AFS_MEM_POOL * gcsMEM_AFS_MEM_POOL;
++
++ The MemPool constructor and destructor functions are
++ gcfMEM_InitFSMemPool(gcsMEM_FS_MEM_POOL *, gcoOS, gctUINT, gctUINT);
++ gcfMEM_FreeFSMemPool(gcsMEM_FS_MEM_POOL *);
++ gcfMEM_InitVSMemPool(gcsMEM_VS_MEM_POOL *, gcoOS, gctUINT, gctBOOL);
++ gcfMEM_FreeVSMemPool(gcsMEM_VS_MEM_POOL *);
++ gcfMEM_InitAFSMemPool(gcsMEM_AFS_MEM_POOL *, gcoOS, gctUINT);
++ gcfMEM_FreeAFSMemPool(gcsMEM_AFS_MEM_POOL *);
++
++ FS: for Fixed-Size data structures
++ VS: for Variable-size data structures
++ AFS: for Array of Fixed-Size data structures
++
++
++ // Example 1: For a fixed-size data structure, struct gcsNode.
++ // It is used locally in a file, so the functions are static without prefix.
++ // At top level, declear allocate and free functions.
++ // The first argument is the data type.
++ // The second armument is the short name used in the fuctions.
++ gcmMEM_DeclareFSMemPool(struct gcsNode, Node, );
++
++ // The previous macro creates two inline functions,
++ // _AllocateNode and _FreeNode.
++
++ // In function or struct
++ gcsMEM_FS_MEM_POOL nodeMemPool;
++
++ // In function,
++ struct gcsNode * node;
++ gceSTATUS status;
++
++ // Before using the memory pool, initialize it.
++ // The second argument is the gcoOS object.
++ // The third argument is the number of data structures to allocate for each chunk.
++ status = gcfMEM_InitFSMemPool(&nodeMemPool, os, 100, sizeof(struct gcsNode));
++ ...
++
++ // Allocate a node.
++ status = _AllocateNode(nodeMemPool, &node);
++ ...
++ // Free a node.
++ _FreeNode(nodeMemPool, node);
++
++ // After using the memory pool, free it.
++ gcfMEM_FreeFSMemPool(&nodeMemPool);
++
++
++ // Example 2: For array of fixed-size data structures, struct gcsNode.
++ // It is used in several files, so the functions are extern with prefix.
++ // At top level, declear allocate and free functions.
++ // The first argument is the data type, and the second one is the short name
++ // used in the fuctions.
++ gcmMEM_DeclareAFSMemPool(struct gcsNode, NodeArray, gcfOpt);
++
++ // The previous macro creates two inline functions,
++ // gcfOpt_AllocateNodeArray and gcfOpt_FreeNodeArray.
++
++ // In function or struct
++ gcsMEM_AFS_MEM_POOL nodeArrayMemPool;
++
++ // In function,
++ struct gcsNode * nodeArray;
++ gceSTATUS status;
++
++ // Before using the array memory pool, initialize it.
++ // The second argument is the gcoOS object, the third is the number of data
++ // structures to allocate for each chunk.
++ status = gcfMEM_InitAFSMemPool(&nodeArrayMemPool, os, sizeof(struct gcsNode));
++ ...
++
++ // Allocate a node array of size 100.
++ status = gcfOpt_AllocateNodeArray(nodeArrayMemPool, &nodeArray, 100);
++ ...
++ // Free a node array.
++ gcfOpt_FreeNodeArray(&nodeArrayMemPool, nodeArray);
++
++ // After using the array memory pool, free it.
++ gcfMEM_FreeAFSMemPool(&nodeArrayMemPool);
++
++*******************************************************************************/
++
++/*******************************************************************************
++** To switch back to use gcoOS_Allocate and gcoOS_Free, add
++** #define USE_LOCAL_MEMORY_POOL 0
++** before including this file.
++*******************************************************************************/
++#ifndef USE_LOCAL_MEMORY_POOL
++/*
++ USE_LOCAL_MEMORY_POOL
++
++ This define enables the local memory management to improve performance.
++*/
++#define USE_LOCAL_MEMORY_POOL 1
++#endif
++
++/*******************************************************************************
++** Memory Pool Data Structures
++*******************************************************************************/
++#if USE_LOCAL_MEMORY_POOL
++ typedef struct _gcsMEM_FS_MEM_POOL * gcsMEM_FS_MEM_POOL;
++ typedef struct _gcsMEM_VS_MEM_POOL * gcsMEM_VS_MEM_POOL;
++ typedef struct _gcsMEM_AFS_MEM_POOL * gcsMEM_AFS_MEM_POOL;
++#else
++ typedef gcoOS gcsMEM_FS_MEM_POOL;
++ typedef gcoOS gcsMEM_VS_MEM_POOL;
++ typedef gcoOS gcsMEM_AFS_MEM_POOL;
++#endif
++
++/*******************************************************************************
++** Memory Pool Macros
++*******************************************************************************/
++#if USE_LOCAL_MEMORY_POOL
++#define gcmMEM_DeclareFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ return(gcfMEM_FSMemPoolGetANode(MemPool, (gctPOINTER *) Pointer)); \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ gcmERR_RETURN(gcfMEM_FSMemPoolGetANode(MemPool, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcfMEM_FSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName##List( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * FirstPointer, \
++ Type * LastPointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x FirstPointer=0x%x LastPointer=0x%x", MemPool, FirstPointer, LastPointer); \
++ status = gcfMEM_FSMemPoolFreeAList(MemPool, (gctPOINTER) FirstPointer, (gctPOINTER) LastPointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareVSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status;\
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ status = gcfMEM_VSMemPoolGetANode(MemPool, Size, (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++ Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ gcmERR_RETURN(gcfMEM_VSMemPoolGetANode(MemPool, Size, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, size); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pinter); \
++ status = gcfMEM_VSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ status = gcfMEM_AFSMemPoolGetANode(MemPool, Count, (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ gcmERR_RETURN(gcfMEM_AFSMemPoolGetANode(MemPool, Count, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Count * gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcfMEM_AFSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#else
++
++#define gcmMEM_DeclareFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcoOS_Allocate(MemPool, \
++ gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareVSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ status = gcoOS_Allocate(MemPool, \
++ Size, \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ Size, \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Size); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ status = gcoOS_Allocate(MemPool, \
++ Count * gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ Count * gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Count * gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++#endif
++
++/*******************************************************************************
++** Memory Pool Data Functions
++*******************************************************************************/
++gceSTATUS
++gcfMEM_InitFSMemPool(
++ IN gcsMEM_FS_MEM_POOL * MemPool,
++ IN gcoOS OS,
++ IN gctUINT NodeCount,
++ IN gctUINT NodeSize
++ );
++
++gceSTATUS
++gcfMEM_FreeFSMemPool(
++ IN gcsMEM_FS_MEM_POOL * MemPool
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolGetANode(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolFreeANode(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolFreeAList(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ IN gctPOINTER FirstNode,
++ IN gctPOINTER LastNode
++ );
++
++gceSTATUS
++gcfMEM_InitVSMemPool(
++ IN gcsMEM_VS_MEM_POOL * MemPool,
++ IN gcoOS OS,
++ IN gctUINT BlockSize,
++ IN gctBOOL RecycleFreeNode
++ );
++
++gceSTATUS
++gcfMEM_FreeVSMemPool(
++ IN gcsMEM_VS_MEM_POOL * MemPool
++ );
++
++gceSTATUS
++gcfMEM_VSMemPoolGetANode(
++ IN gcsMEM_VS_MEM_POOL MemPool,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_VSMemPoolFreeANode(
++ IN gcsMEM_VS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++gceSTATUS
++gcfMEM_InitAFSMemPool(
++ IN gcsMEM_AFS_MEM_POOL *MemPool,
++ IN gcoOS OS,
++ IN gctUINT NodeCount,
++ IN gctUINT NodeSize
++ );
++
++gceSTATUS
++gcfMEM_FreeAFSMemPool(
++ IN gcsMEM_AFS_MEM_POOL *MemPool
++ );
++
++gceSTATUS
++gcfMEM_AFSMemPoolGetANode(
++ IN gcsMEM_AFS_MEM_POOL MemPool,
++ IN gctUINT Count,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_AFSMemPoolFreeANode(
++ IN gcsMEM_AFS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* VIVANTE_NO_3D */
++#endif /* __gc_hal_mem_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_options.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_options.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_options.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_options.h 2015-05-01 14:57:59.543427001 -0500
+@@ -0,0 +1,947 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_options_h_
++#define __gc_hal_options_h_
++
++/*
++ gcdPRINT_VERSION
++
++ Print HAL version.
++*/
++#ifndef gcdPRINT_VERSION
++# define gcdPRINT_VERSION 0
++#endif
++
++/*
++ USE_NEW_LINUX_SIGNAL
++
++ This define enables the Linux kernel signaling between kernel and user.
++*/
++#ifndef USE_NEW_LINUX_SIGNAL
++# define USE_NEW_LINUX_SIGNAL 0
++#endif
++
++/*
++ VIVANTE_PROFILER
++
++ This define enables the profiler.
++*/
++#ifndef VIVANTE_PROFILER
++# define VIVANTE_PROFILER 1
++#endif
++
++#ifndef VIVANTE_PROFILER_PERDRAW
++# define VIVANTE_PROFILER_PERDRAW 0
++#endif
++
++/*
++ VIVANTE_PROFILER_CONTEXT
++
++ This define enables the profiler according to each hw context.
++*/
++#ifndef VIVANTE_PROFILER_CONTEXT
++# define VIVANTE_PROFILER_CONTEXT 1
++#endif
++
++/*
++ gcdUSE_VG
++
++ Enable VG HAL layer (only for GC350).
++*/
++#ifndef gcdUSE_VG
++# define gcdUSE_VG 0
++#endif
++
++/*
++ USE_SW_FB
++
++ Set to 1 if the frame buffer memory cannot be accessed by the GPU.
++*/
++#ifndef USE_SW_FB
++# define USE_SW_FB 0
++#endif
++
++/*
++ USE_SUPER_SAMPLING
++
++ This define enables super-sampling support.
++*/
++#define USE_SUPER_SAMPLING 0
++
++/*
++ PROFILE_HAL_COUNTERS
++
++ This define enables HAL counter profiling support. HW and SHADER
++ counter profiling depends on this.
++*/
++#ifndef PROFILE_HAL_COUNTERS
++# define PROFILE_HAL_COUNTERS 1
++#endif
++
++/*
++ PROFILE_HW_COUNTERS
++
++ This define enables HW counter profiling support.
++*/
++#ifndef PROFILE_HW_COUNTERS
++# define PROFILE_HW_COUNTERS 1
++#endif
++
++/*
++ PROFILE_SHADER_COUNTERS
++
++ This define enables SHADER counter profiling support.
++*/
++#ifndef PROFILE_SHADER_COUNTERS
++# define PROFILE_SHADER_COUNTERS 1
++#endif
++
++/*
++ COMMAND_PROCESSOR_VERSION
++
++ The version of the command buffer and task manager.
++*/
++#define COMMAND_PROCESSOR_VERSION 1
++
++/*
++ gcdDUMP_KEY
++
++ Set this to a string that appears in 'cat /proc/<pid>/cmdline'. E.g. 'camera'.
++ HAL will create dumps for the processes matching this key.
++*/
++#ifndef gcdDUMP_KEY
++# define gcdDUMP_KEY "process"
++#endif
++
++/*
++ gcdDUMP_PATH
++
++ The dump file location. Some processes cannot write to the sdcard.
++ Try apps' data dir, e.g. /data/data/com.android.launcher
++*/
++#ifndef gcdDUMP_PATH
++#if defined(ANDROID)
++# define gcdDUMP_PATH "/mnt/sdcard/"
++#else
++# define gcdDUMP_PATH "./"
++#endif
++#endif
++
++/*
++ gcdDUMP
++
++ When set to 1, a dump of all states and memory uploads, as well as other
++ hardware related execution will be printed to the debug console. This
++ data can be used for playing back applications.
++*/
++#ifndef gcdDUMP
++# define gcdDUMP 0
++#endif
++
++/*
++ gcdDUMP_API
++
++ When set to 1, a high level dump of the EGL and GL/VG APs's are
++ captured.
++*/
++#ifndef gcdDUMP_API
++# define gcdDUMP_API 0
++#endif
++
++/*
++ gcdDUMP_FRAMERATE
++ When set to a value other than zero, averaqe frame rate will be dumped.
++ The value set is the starting frame that the average will be calculated.
++ This is needed because sometimes first few frames are too slow to be included
++ in the average. Frame count starts from 1.
++*/
++#ifndef gcdDUMP_FRAMERATE
++# define gcdDUMP_FRAMERATE 0
++#endif
++
++/*
++ gcdVIRTUAL_COMMAND_BUFFER
++ When set to 1, user command buffer and context buffer will be allocated
++ from gcvPOOL_VIRTUAL.
++*/
++#ifndef gcdVIRTUAL_COMMAND_BUFFER
++# define gcdVIRTUAL_COMMAND_BUFFER 0
++#endif
++
++/*
++ gcdENABLE_FSCALE_VAL_ADJUST
++ When non-zero, FSCALE_VAL when gcvPOWER_ON can be adjusted externally.
++ */
++#ifndef gcdENABLE_FSCALE_VAL_ADJUST
++# define gcdENABLE_FSCALE_VAL_ADJUST 1
++#endif
++
++/*
++ gcdDUMP_IN_KERNEL
++
++ When set to 1, all dumps will happen in the kernel. This is handy if
++ you want the kernel to dump its command buffers as well and the data
++ needs to be in sync.
++*/
++#ifndef gcdDUMP_IN_KERNEL
++# define gcdDUMP_IN_KERNEL 0
++#endif
++
++/*
++ gcdDUMP_COMMAND
++
++ When set to non-zero, the command queue will dump all incoming command
++ and context buffers as well as all other modifications to the command
++ queue.
++*/
++#ifndef gcdDUMP_COMMAND
++# define gcdDUMP_COMMAND 0
++#endif
++
++/*
++ gcdDUMP_FRAME_TGA
++
++ When set to a value other than 0, a dump of the frame specified by the value,
++ will be done into frame.tga. Frame count starts from 1.
++ */
++#ifndef gcdDUMP_FRAME_TGA
++#define gcdDUMP_FRAME_TGA 0
++#endif
++/*
++ gcdNULL_DRIVER
++
++ Set to 1 for infinite speed hardware.
++ Set to 2 for bypassing the HAL.
++ Set to 3 for bypassing the drivers.
++*/
++#ifndef gcdNULL_DRIVER
++# define gcdNULL_DRIVER 0
++#endif
++
++/*
++ gcdENABLE_TIMEOUT_DETECTION
++
++ Enable timeout detection.
++*/
++#ifndef gcdENABLE_TIMEOUT_DETECTION
++# define gcdENABLE_TIMEOUT_DETECTION 0
++#endif
++
++/*
++ gcdCMD_BUFFER_SIZE
++
++ Number of bytes in a command buffer.
++*/
++#ifndef gcdCMD_BUFFER_SIZE
++# define gcdCMD_BUFFER_SIZE (128 << 10)
++#endif
++
++/*
++ gcdCMD_BUFFERS
++
++ Number of command buffers to use per client.
++*/
++#ifndef gcdCMD_BUFFERS
++# define gcdCMD_BUFFERS 2
++#endif
++
++/*
++ gcdMAX_CMD_BUFFERS
++
++ Maximum number of command buffers to use per client.
++*/
++#ifndef gcdMAX_CMD_BUFFERS
++# define gcdMAX_CMD_BUFFERS 8
++#endif
++
++/*
++ gcdCOMMAND_QUEUES
++
++ Number of command queues in the kernel.
++*/
++#ifndef gcdCOMMAND_QUEUES
++# define gcdCOMMAND_QUEUES 2
++#endif
++
++/*
++ gcdPOWER_CONTROL_DELAY
++
++ The delay in milliseconds required to wait until the GPU has woke up
++ from a suspend or power-down state. This is system dependent because
++ the bus clock also needs to stabalize.
++*/
++#ifndef gcdPOWER_CONTROL_DELAY
++# define gcdPOWER_CONTROL_DELAY 0
++#endif
++
++/*
++ gcdMIRROR_PAGETABLE
++
++ Enable it when GPUs with old MMU and new MMU exist at same SoC. It makes
++ each GPU use same virtual address to access same physical memory.
++*/
++#ifndef gcdMIRROR_PAGETABLE
++# define gcdMIRROR_PAGETABLE 0
++#endif
++
++/*
++ gcdMMU_SIZE
++
++ Size of the MMU page table in bytes. Each 4 bytes can hold 4kB worth of
++ virtual data.
++*/
++#ifndef gcdMMU_SIZE
++#if gcdMIRROR_PAGETABLE
++# define gcdMMU_SIZE 0x200000
++#else
++# define gcdMMU_SIZE (2048 << 10)
++#endif
++#endif
++
++/*
++ gcdSECURE_USER
++
++ Use logical addresses instead of physical addresses in user land. In
++ this case a hint table is created for both command buffers and context
++ buffers, and that hint table will be used to patch up those buffers in
++ the kernel when they are ready to submit.
++*/
++#ifndef gcdSECURE_USER
++# define gcdSECURE_USER 0
++#endif
++
++/*
++ gcdSECURE_CACHE_SLOTS
++
++ Number of slots in the logical to DMA address cache table. Each time a
++ logical address needs to be translated into a DMA address for the GPU,
++ this cache will be walked. The replacement scheme is LRU.
++*/
++#ifndef gcdSECURE_CACHE_SLOTS
++# define gcdSECURE_CACHE_SLOTS 1024
++#endif
++
++/*
++ gcdSECURE_CACHE_METHOD
++
++ Replacement scheme used for Secure Cache. The following options are
++ available:
++
++ gcdSECURE_CACHE_LRU
++ A standard LRU cache.
++
++ gcdSECURE_CACHE_LINEAR
++ A linear walker with the idea that an application will always
++ render the scene in a similar way, so the next entry in the
++ cache should be a hit most of the time.
++
++ gcdSECURE_CACHE_HASH
++ A 256-entry hash table.
++
++ gcdSECURE_CACHE_TABLE
++ A simple cache but with potential of a lot of cache replacement.
++*/
++#ifndef gcdSECURE_CACHE_METHOD
++# define gcdSECURE_CACHE_METHOD gcdSECURE_CACHE_HASH
++#endif
++
++/*
++ gcdREGISTER_ACCESS_FROM_USER
++
++ Set to 1 to allow IOCTL calls to get through from user land. This
++ should only be in debug or development drops.
++*/
++#ifndef gcdREGISTER_ACCESS_FROM_USER
++# define gcdREGISTER_ACCESS_FROM_USER 1
++#endif
++
++/*
++ gcdUSER_HEAP_ALLOCATOR
++
++ Set to 1 to enable user mode heap allocator for fast memory allocation
++ and destroying. Otherwise, memory allocation/destroying in user mode
++ will be directly managed by system. Only for linux for now.
++*/
++#ifndef gcdUSER_HEAP_ALLOCATOR
++# define gcdUSER_HEAP_ALLOCATOR 1
++#endif
++
++/*
++ gcdHEAP_SIZE
++
++ Set the allocation size for the internal heaps. Each time a heap is
++ full, a new heap will be allocated with this minmimum amount of bytes.
++ The bigger this size, the fewer heaps there are to allocate, the better
++ the performance. However, heaps won't be freed until they are
++ completely free, so there might be some more memory waste if the size is
++ too big.
++*/
++#ifndef gcdHEAP_SIZE
++# define gcdHEAP_SIZE (64 << 10)
++#endif
++
++/*
++ gcdPOWER_SUSNPEND_WHEN_IDLE
++
++ Set to 1 to make GPU enter gcvPOWER_SUSPEND when idle detected,
++ otherwise GPU will enter gcvPOWER_IDLE.
++*/
++#ifndef gcdPOWER_SUSNPEND_WHEN_IDLE
++# define gcdPOWER_SUSNPEND_WHEN_IDLE 1
++#endif
++
++/*
++ gcdFPGA_BUILD
++
++ This define enables work arounds for FPGA images.
++*/
++#ifndef gcdFPGA_BUILD
++# define gcdFPGA_BUILD 0
++#endif
++
++/*
++ gcdGPU_TIMEOUT
++
++ This define specified the number of milliseconds the system will wait
++ before it broadcasts the GPU is stuck. In other words, it will define
++ the timeout of any operation that needs to wait for the GPU.
++
++ If the value is 0, no timeout will be checked for.
++*/
++#ifndef gcdGPU_TIMEOUT
++#if gcdFPGA_BUILD
++# define gcdGPU_TIMEOUT 0
++# else
++# define gcdGPU_TIMEOUT 20000
++# endif
++#endif
++
++/*
++ gcdGPU_ADVANCETIMER
++
++ it is advance timer.
++*/
++#ifndef gcdGPU_ADVANCETIMER
++# define gcdGPU_ADVANCETIMER 250
++#endif
++
++/*
++ gcdSTATIC_LINK
++
++ This define disalbes static linking;
++*/
++#ifndef gcdSTATIC_LINK
++# define gcdSTATIC_LINK 0
++#endif
++
++/*
++ gcdUSE_NEW_HEAP
++
++ Setting this define to 1 enables new heap.
++*/
++#ifndef gcdUSE_NEW_HEAP
++# define gcdUSE_NEW_HEAP 0
++#endif
++
++/*
++ gcdCMD_NO_2D_CONTEXT
++
++ This define enables no-context 2D command buffer.
++*/
++#ifndef gcdCMD_NO_2D_CONTEXT
++# define gcdCMD_NO_2D_CONTEXT 1
++#endif
++
++/*
++ gcdENABLE_BANK_ALIGNMENT
++
++ When enabled, video memory is allocated bank aligned. The vendor can modify
++ _GetSurfaceBankAlignment() and gcoSURF_GetBankOffsetBytes() to define how
++ different types of allocations are bank and channel aligned.
++ When disabled (default), no bank alignment is done.
++*/
++#ifndef gcdENABLE_BANK_ALIGNMENT
++# define gcdENABLE_BANK_ALIGNMENT 0
++#endif
++
++/*
++ gcdBANK_BIT_START
++
++ Specifies the start bit of the bank (inclusive).
++*/
++#ifndef gcdBANK_BIT_START
++# define gcdBANK_BIT_START 12
++#endif
++
++/*
++ gcdBANK_BIT_END
++
++ Specifies the end bit of the bank (inclusive).
++*/
++#ifndef gcdBANK_BIT_END
++# define gcdBANK_BIT_END 14
++#endif
++
++/*
++ gcdBANK_CHANNEL_BIT
++
++ When set, video memory when allocated bank aligned is allocated such that
++ render and depth buffer addresses alternate on the channel bit specified.
++ This option has an effect only when gcdENABLE_BANK_ALIGNMENT is enabled.
++ When disabled (default), no alteration is done.
++*/
++#ifndef gcdBANK_CHANNEL_BIT
++# define gcdBANK_CHANNEL_BIT 7
++#endif
++
++/*
++ gcdDYNAMIC_SPEED
++
++ When non-zero, it informs the kernel driver to use the speed throttling
++ broadcasting functions to inform the system the GPU should be spet up or
++ slowed down. It will send a broadcast for slowdown each "interval"
++ specified by this define in milliseconds
++ (gckOS_BroadcastCalibrateSpeed).
++*/
++#ifndef gcdDYNAMIC_SPEED
++# define gcdDYNAMIC_SPEED 2000
++#endif
++
++/*
++ gcdDYNAMIC_EVENT_THRESHOLD
++
++ When non-zero, it specifies the maximum number of available events at
++ which the kernel driver will issue a broadcast to speed up the GPU
++ (gckOS_BroadcastHurry).
++*/
++#ifndef gcdDYNAMIC_EVENT_THRESHOLD
++# define gcdDYNAMIC_EVENT_THRESHOLD 5
++#endif
++
++/*
++ gcdENABLE_PROFILING
++
++ Enable profiling macros.
++*/
++#ifndef gcdENABLE_PROFILING
++# define gcdENABLE_PROFILING 0
++#endif
++
++/*
++ gcdENABLE_128B_MERGE
++
++ Enable 128B merge for the BUS control.
++*/
++#ifndef gcdENABLE_128B_MERGE
++# define gcdENABLE_128B_MERGE 0
++#endif
++
++/*
++ gcdFRAME_DB
++
++ When non-zero, it specified the number of frames inside the frame
++ database. The frame DB will collect per-frame timestamps and hardware
++ counters.
++*/
++#ifndef gcdFRAME_DB
++# define gcdFRAME_DB 0
++# define gcdFRAME_DB_RESET 0
++# define gcdFRAME_DB_NAME "/var/log/frameDB.log"
++#endif
++
++/*
++ gcdENABLE_VG
++ enable the 2D openVG
++*/
++
++#ifndef gcdENABLE_VG
++# define gcdENABLE_VG 0
++#endif
++
++/*
++ gcdDYNAMIC_MAP_RESERVED_MEMORY
++
++ When gcvPOOL_SYSTEM is constructed from RESERVED memory,
++ driver can map the whole reserved memory to kernel space
++ at the beginning, or just map a piece of memory when need
++ to access.
++
++ Notice:
++ - It's only for the 2D openVG. For other cores, there is
++ _NO_ need to map reserved memory to kernel.
++ - It's meaningless when memory is allocated by
++ gckOS_AllocateContiguous, in that case, memory is always
++ mapped by system when allocated.
++*/
++#ifndef gcdDYNAMIC_MAP_RESERVED_MEMORY
++# define gcdDYNAMIC_MAP_RESERVED_MEMORY 1
++#endif
++
++/*
++ gcdPAGED_MEMORY_CACHEABLE
++
++ When non-zero, paged memory will be cacheable.
++
++ Normally, driver will detemines whether a video memory
++ is cacheable or not. When cacheable is not neccessary,
++ it will be writecombine.
++
++ This option is only for those SOC which can't enable
++ writecombine without enabling cacheable.
++*/
++
++#ifndef gcdPAGED_MEMORY_CACHEABLE
++# define gcdPAGED_MEMORY_CACHEABLE 0
++#endif
++
++/*
++ gcdNONPAGED_MEMORY_CACHEABLE
++
++ When non-zero, non paged memory will be cacheable.
++*/
++
++#ifndef gcdNONPAGED_MEMORY_CACHEABLE
++# define gcdNONPAGED_MEMORY_CACHEABLE 0
++#endif
++
++/*
++ gcdNONPAGED_MEMORY_BUFFERABLE
++
++ When non-zero, non paged memory will be bufferable.
++ gcdNONPAGED_MEMORY_BUFFERABLE and gcdNONPAGED_MEMORY_CACHEABLE
++ can't be set 1 at same time
++*/
++
++#ifndef gcdNONPAGED_MEMORY_BUFFERABLE
++# define gcdNONPAGED_MEMORY_BUFFERABLE 1
++#endif
++
++/*
++ gcdENABLE_INFINITE_SPEED_HW
++ enable the Infinte HW , this is for 2D openVG
++*/
++
++#ifndef gcdENABLE_INFINITE_SPEED_HW
++# define gcdENABLE_INFINITE_SPEED_HW 0
++#endif
++
++/*
++ gcdENABLE_TS_DOUBLE_BUFFER
++ enable the TS double buffer, this is for 2D openVG
++*/
++
++#ifndef gcdENABLE_TS_DOUBLE_BUFFER
++# define gcdENABLE_TS_DOUBLE_BUFFER 1
++#endif
++
++/*
++ gcd6000_SUPPORT
++
++ Temporary define to enable/disable 6000 support.
++ */
++#ifndef gcd6000_SUPPORT
++# define gcd6000_SUPPORT 0
++#endif
++
++/*
++ gcdPOWEROFF_TIMEOUT
++
++ When non-zero, GPU will power off automatically from
++ idle state, and gcdPOWEROFF_TIMEOUT is also the default
++ timeout in milliseconds.
++ */
++
++#ifndef gcdPOWEROFF_TIMEOUT
++# define gcdPOWEROFF_TIMEOUT 300
++#endif
++
++/*
++ gcdUSE_VIDMEM_PER_PID
++*/
++#ifndef gcdUSE_VIDMEM_PER_PID
++# define gcdUSE_VIDMEM_PER_PID 0
++#endif
++
++/*
++ QNX_SINGLE_THREADED_DEBUGGING
++*/
++#ifndef QNX_SINGLE_THREADED_DEBUGGING
++# define QNX_SINGLE_THREADED_DEBUGGING 0
++#endif
++
++/*
++ gcdENABLE_RECOVERY
++
++ This define enables the recovery code.
++*/
++#ifndef gcdENABLE_RECOVERY
++# define gcdENABLE_RECOVERY 1
++#endif
++
++/*
++ gcdRENDER_THREADS
++
++ Number of render threads. Make it zero, and there will be no render
++ threads.
++*/
++#ifndef gcdRENDER_THREADS
++# define gcdRENDER_THREADS 0
++#endif
++
++/*
++ gcdSMP
++
++ This define enables SMP support.
++
++ Currently, it only works on Linux/Android,
++ Kbuild will config it according to whether
++ CONFIG_SMP is set.
++
++*/
++#ifndef gcdSMP
++# define gcdSMP 0
++#endif
++
++/*
++ gcdSUPPORT_SWAP_RECTANGLE
++
++ Support swap with a specific rectangle.
++
++ Set the rectangle with eglSetSwapRectangleANDROID api.
++*/
++#ifndef gcdSUPPORT_SWAP_RECTANGLE
++# define gcdSUPPORT_SWAP_RECTANGLE 0
++#endif
++
++/*
++ gcdGPU_LINEAR_BUFFER_ENABLED
++
++ Use linear buffer for GPU apps so HWC can do 2D composition.
++*/
++#ifndef gcdGPU_LINEAR_BUFFER_ENABLED
++# define gcdGPU_LINEAR_BUFFER_ENABLED 1
++#endif
++
++/*
++ gcdENABLE_RENDER_INTO_WINDOW
++
++ Enable Render-Into-Window (ie, No-Resolve) feature on android.
++ NOTE that even if enabled, it still depends on hardware feature and
++ android application behavior. When hardware feature or application
++ behavior can not support render into window mode, it will fail back
++ to normal mode.
++ When Render-Into-Window is finally used, window back buffer of android
++ applications will be allocated matching render target tiling format.
++ Otherwise buffer tiling is decided by the above option
++ 'gcdGPU_LINEAR_BUFFER_ENABLED'.
++*/
++#ifndef gcdENABLE_RENDER_INTO_WINDOW
++# define gcdENABLE_RENDER_INTO_WINDOW 1
++#endif
++
++/*
++ gcdSHARED_RESOLVE_BUFFER_ENABLED
++
++ Use shared resolve buffer for all app buffers.
++*/
++#ifndef gcdSHARED_RESOLVE_BUFFER_ENABLED
++# define gcdSHARED_RESOLVE_BUFFER_ENABLED 0
++#endif
++
++/*
++ gcdUSE_TRIANGLE_STRIP_PATCH
++ */
++#ifndef gcdUSE_TRIANGLE_STRIP_PATCH
++# define gcdUSE_TRIANGLE_STRIP_PATCH 1
++#endif
++
++/*
++ gcdENABLE_OUTER_CACHE_PATCH
++
++ Enable the outer cache patch.
++*/
++#ifndef gcdENABLE_OUTER_CACHE_PATCH
++# define gcdENABLE_OUTER_CACHE_PATCH 0
++#endif
++
++#ifndef gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST
++# ifdef ANDROID
++# define gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST 1
++# else
++# define gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST 0
++# endif
++#endif
++
++#ifndef gcdENABLE_PE_DITHER_FIX
++# define gcdENABLE_PE_DITHER_FIX 1
++#endif
++
++#ifndef gcdSHARED_PAGETABLE
++# define gcdSHARED_PAGETABLE 1
++#endif
++#ifndef gcdUSE_PVR
++# define gcdUSE_PVR 1
++#endif
++
++/*
++ gcdSMALL_BLOCK_SIZE
++
++ When non-zero, a part of VIDMEM will be reserved for requests
++ whose requesting size is less than gcdSMALL_BLOCK_SIZE.
++
++ For Linux, it's the size of a page. If this requeset fallbacks
++ to gcvPOOL_CONTIGUOUS or gcvPOOL_VIRTUAL, memory will be wasted
++ because they allocate a page at least.
++ */
++#ifndef gcdSMALL_BLOCK_SIZE
++# define gcdSMALL_BLOCK_SIZE 4096
++# define gcdRATIO_FOR_SMALL_MEMORY 32
++#endif
++
++/*
++ gcdCONTIGUOUS_SIZE_LIMIT
++ When non-zero, size of video node from gcvPOOL_CONTIGUOUS is
++ limited by gcdCONTIGUOUS_SIZE_LIMIT.
++ */
++#ifndef gcdCONTIGUOUS_SIZE_LIMIT
++# define gcdCONTIGUOUS_SIZE_LIMIT 0
++#endif
++
++#ifndef gcdDISALBE_EARLY_EARLY_Z
++# define gcdDISALBE_EARLY_EARLY_Z 1
++#endif
++
++#ifndef gcdSHADER_SRC_BY_MACHINECODE
++# define gcdSHADER_SRC_BY_MACHINECODE 1
++#endif
++
++/*
++ gcdLINK_QUEUE_SIZE
++
++ When non-zero, driver maintains a queue to record information of
++ latest lined context buffer and command buffer. Data in this queue
++ is be used to debug.
++*/
++#ifndef gcdLINK_QUEUE_SIZE
++# define gcdLINK_QUEUE_SIZE 0
++#endif
++
++/* gcdALPHA_KILL_IN_SHADER
++ *
++ * Enable alpha kill inside the shader. This will be set automatically by the
++ * HAL if certain states match a criteria.
++ */
++#ifndef gcdALPHA_KILL_IN_SHADER
++# define gcdALPHA_KILL_IN_SHADER 1
++#endif
++
++/* gcdHIGH_PRECISION_DELAY_ENABLE
++ *
++ * Enable high precision schedule delay with 1ms unit. otherwise schedule delay up to 10ms.
++ * Browser app performance will have obvious drop without this enablement
++ */
++#ifndef gcdHIGH_PRECISION_DELAY_ENABLE
++# define gcdHIGH_PRECISION_DELAY_ENABLE 1
++#endif
++
++#ifndef gcdUSE_WCLIP_PATCH
++# define gcdUSE_WCLIP_PATCH 1
++#endif
++
++#ifndef gcdHZ_L2_DISALBE
++# define gcdHZ_L2_DISALBE 1
++#endif
++
++#ifndef gcdBUGFIX15_DISABLE
++# define gcdBUGFIX15_DISABLE 1
++#endif
++
++#ifndef gcdDISABLE_HZ_FAST_CLEAR
++# define gcdDISABLE_HZ_FAST_CLEAR 1
++#endif
++
++#ifndef gcdUSE_NPOT_PATCH
++#define gcdUSE_NPOT_PATCH 1
++#endif
++
++#ifndef gcdSYNC
++# define gcdSYNC 1
++#endif
++
++#ifndef gcdENABLE_SPECIAL_HINT3
++# define gcdENABLE_SPECIAL_HINT3 1
++#endif
++
++#if defined(ANDROID)
++#ifndef gcdPRE_ROTATION
++# define gcdPRE_ROTATION 1
++#endif
++#endif
++
++/*
++ gcdDVFS
++
++ When non-zero, software will make use of dynamic voltage and
++ frequency feature.
++ */
++#ifndef gcdDVFS
++# define gcdDVFS 0
++# define gcdDVFS_ANAYLSE_WINDOW 4
++# define gcdDVFS_POLLING_TIME (gcdDVFS_ANAYLSE_WINDOW * 4)
++#endif
++
++/*
++ gcdANDROID_NATIVE_FENCE_SYNC
++
++ Enable android native fence sync. It is introduced since jellybean-4.2.
++ Depends on linux kernel option: CONFIG_SYNC.
++
++ 0: Disabled
++ 1: Build framework for native fence sync feature, and EGL extension
++ 2: Enable async swap buffers for client
++ * Native fence sync for client 'queueBuffer' in EGL, which is
++ 'acquireFenceFd' for layer in compositor side.
++ 3. Enable async hwcomposer composition.
++ * 'releaseFenceFd' for layer in compositor side, which is native
++ fence sync when client 'dequeueBuffer'
++ * Native fence sync for compositor 'queueBuffer' in EGL, which is
++ 'acquireFenceFd' for framebuffer target for DC
++ */
++#ifndef gcdANDROID_NATIVE_FENCE_SYNC
++# define gcdANDROID_NATIVE_FENCE_SYNC 0
++#endif
++
++#ifndef gcdFORCE_MIPMAP
++# define gcdFORCE_MIPMAP 0
++#endif
++
++/*
++ gcdFORCE_GAL_LOAD_TWICE
++
++ When non-zero, each thread except the main one will load libGAL.so twice to avoid potential segmetantion fault when app using dlopen/dlclose.
++ If threads exit arbitrarily, libGAL.so may not unload until the process quit.
++ */
++#ifndef gcdFORCE_GAL_LOAD_TWICE
++# define gcdFORCE_GAL_LOAD_TWICE 0
++#endif
++
++#endif /* __gc_hal_options_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_profiler.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_profiler.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_profiler.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_profiler.h 2015-05-01 14:57:59.543427001 -0500
+@@ -0,0 +1,584 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_profiler_h_
++#define __gc_hal_profiler_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#define GLVERTEX_OBJECT 10
++#define GLVERTEX_OBJECT_BYTES 11
++
++#define GLINDEX_OBJECT 20
++#define GLINDEX_OBJECT_BYTES 21
++
++#define GLTEXTURE_OBJECT 30
++#define GLTEXTURE_OBJECT_BYTES 31
++
++#if VIVANTE_PROFILER
++#define gcmPROFILE_GC(Enum, Value) gcoPROFILER_Count(gcvNULL, Enum, Value)
++#else
++#define gcmPROFILE_GC(Enum, Value) do { } while (gcvFALSE)
++#endif
++
++#ifndef gcdNEW_PROFILER_FILE
++#define gcdNEW_PROFILER_FILE 1
++#endif
++
++#define ES11_CALLS 151
++#define ES11_DRAWCALLS (ES11_CALLS + 1)
++#define ES11_STATECHANGECALLS (ES11_DRAWCALLS + 1)
++#define ES11_POINTCOUNT (ES11_STATECHANGECALLS + 1)
++#define ES11_LINECOUNT (ES11_POINTCOUNT + 1)
++#define ES11_TRIANGLECOUNT (ES11_LINECOUNT + 1)
++
++#define ES20_CALLS 159
++#define ES20_DRAWCALLS (ES20_CALLS + 1)
++#define ES20_STATECHANGECALLS (ES20_DRAWCALLS + 1)
++#define ES20_POINTCOUNT (ES20_STATECHANGECALLS + 1)
++#define ES20_LINECOUNT (ES20_POINTCOUNT + 1)
++#define ES20_TRIANGLECOUNT (ES20_LINECOUNT + 1)
++
++#define VG11_CALLS 88
++#define VG11_DRAWCALLS (VG11_CALLS + 1)
++#define VG11_STATECHANGECALLS (VG11_DRAWCALLS + 1)
++#define VG11_FILLCOUNT (VG11_STATECHANGECALLS + 1)
++#define VG11_STROKECOUNT (VG11_FILLCOUNT + 1)
++/* End of Driver API ID Definitions. */
++
++/* HAL & MISC IDs. */
++#define HAL_VERTBUFNEWBYTEALLOC 1
++#define HAL_VERTBUFTOTALBYTEALLOC (HAL_VERTBUFNEWBYTEALLOC + 1)
++#define HAL_VERTBUFNEWOBJALLOC (HAL_VERTBUFTOTALBYTEALLOC + 1)
++#define HAL_VERTBUFTOTALOBJALLOC (HAL_VERTBUFNEWOBJALLOC + 1)
++#define HAL_INDBUFNEWBYTEALLOC (HAL_VERTBUFTOTALOBJALLOC + 1)
++#define HAL_INDBUFTOTALBYTEALLOC (HAL_INDBUFNEWBYTEALLOC + 1)
++#define HAL_INDBUFNEWOBJALLOC (HAL_INDBUFTOTALBYTEALLOC + 1)
++#define HAL_INDBUFTOTALOBJALLOC (HAL_INDBUFNEWOBJALLOC + 1)
++#define HAL_TEXBUFNEWBYTEALLOC (HAL_INDBUFTOTALOBJALLOC + 1)
++#define HAL_TEXBUFTOTALBYTEALLOC (HAL_TEXBUFNEWBYTEALLOC + 1)
++#define HAL_TEXBUFNEWOBJALLOC (HAL_TEXBUFTOTALBYTEALLOC + 1)
++#define HAL_TEXBUFTOTALOBJALLOC (HAL_TEXBUFNEWOBJALLOC + 1)
++
++#define GPU_CYCLES 1
++#define GPU_READ64BYTE (GPU_CYCLES + 1)
++#define GPU_WRITE64BYTE (GPU_READ64BYTE + 1)
++#define GPU_TOTALCYCLES (GPU_WRITE64BYTE + 1)
++#define GPU_IDLECYCLES (GPU_TOTALCYCLES + 1)
++
++#define VS_INSTCOUNT 1
++#define VS_BRANCHINSTCOUNT (VS_INSTCOUNT + 1)
++#define VS_TEXLDINSTCOUNT (VS_BRANCHINSTCOUNT + 1)
++#define VS_RENDEREDVERTCOUNT (VS_TEXLDINSTCOUNT + 1)
++#define VS_SOURCE (VS_RENDEREDVERTCOUNT + 1)
++
++#define PS_INSTCOUNT 1
++#define PS_BRANCHINSTCOUNT (PS_INSTCOUNT + 1)
++#define PS_TEXLDINSTCOUNT (PS_BRANCHINSTCOUNT + 1)
++#define PS_RENDEREDPIXCOUNT (PS_TEXLDINSTCOUNT + 1)
++#define PS_SOURCE (PS_RENDEREDPIXCOUNT + 1)
++
++#define PA_INVERTCOUNT 1
++#define PA_INPRIMCOUNT (PA_INVERTCOUNT + 1)
++#define PA_OUTPRIMCOUNT (PA_INPRIMCOUNT + 1)
++#define PA_DEPTHCLIPCOUNT (PA_OUTPRIMCOUNT + 1)
++#define PA_TRIVIALREJCOUNT (PA_DEPTHCLIPCOUNT + 1)
++#define PA_CULLCOUNT (PA_TRIVIALREJCOUNT + 1)
++
++#define SE_TRIANGLECOUNT 1
++#define SE_LINECOUNT (SE_TRIANGLECOUNT + 1)
++
++#define RA_VALIDPIXCOUNT 1
++#define RA_TOTALQUADCOUNT (RA_VALIDPIXCOUNT + 1)
++#define RA_VALIDQUADCOUNTEZ (RA_TOTALQUADCOUNT + 1)
++#define RA_TOTALPRIMCOUNT (RA_VALIDQUADCOUNTEZ + 1)
++#define RA_PIPECACHEMISSCOUNT (RA_TOTALPRIMCOUNT + 1)
++#define RA_PREFCACHEMISSCOUNT (RA_PIPECACHEMISSCOUNT + 1)
++#define RA_EEZCULLCOUNT (RA_PREFCACHEMISSCOUNT + 1)
++
++#define TX_TOTBILINEARREQ 1
++#define TX_TOTTRILINEARREQ (TX_TOTBILINEARREQ + 1)
++#define TX_TOTDISCARDTEXREQ (TX_TOTTRILINEARREQ + 1)
++#define TX_TOTTEXREQ (TX_TOTDISCARDTEXREQ + 1)
++#define TX_MEMREADCOUNT (TX_TOTTEXREQ + 1)
++#define TX_MEMREADIN8BCOUNT (TX_MEMREADCOUNT + 1)
++#define TX_CACHEMISSCOUNT (TX_MEMREADIN8BCOUNT + 1)
++#define TX_CACHEHITTEXELCOUNT (TX_CACHEMISSCOUNT + 1)
++#define TX_CACHEMISSTEXELCOUNT (TX_CACHEHITTEXELCOUNT + 1)
++
++#define PE_KILLEDBYCOLOR 1
++#define PE_KILLEDBYDEPTH (PE_KILLEDBYCOLOR + 1)
++#define PE_DRAWNBYCOLOR (PE_KILLEDBYDEPTH + 1)
++#define PE_DRAWNBYDEPTH (PE_DRAWNBYCOLOR + 1)
++
++#define MC_READREQ8BPIPE 1
++#define MC_READREQ8BIP (MC_READREQ8BPIPE + 1)
++#define MC_WRITEREQ8BPIPE (MC_READREQ8BIP + 1)
++
++#define AXI_READREQSTALLED 1
++#define AXI_WRITEREQSTALLED (AXI_READREQSTALLED + 1)
++#define AXI_WRITEDATASTALLED (AXI_WRITEREQSTALLED + 1)
++
++#define PVS_INSTRCOUNT 1
++#define PVS_ALUINSTRCOUNT (PVS_INSTRCOUNT + 1)
++#define PVS_TEXINSTRCOUNT (PVS_ALUINSTRCOUNT + 1)
++#define PVS_ATTRIBCOUNT (PVS_TEXINSTRCOUNT + 1)
++#define PVS_UNIFORMCOUNT (PVS_ATTRIBCOUNT + 1)
++#define PVS_FUNCTIONCOUNT (PVS_UNIFORMCOUNT + 1)
++#define PVS_SOURCE (PVS_FUNCTIONCOUNT + 1)
++
++#define PPS_INSTRCOUNT 1
++#define PPS_ALUINSTRCOUNT (PPS_INSTRCOUNT + 1)
++#define PPS_TEXINSTRCOUNT (PPS_ALUINSTRCOUNT + 1)
++#define PPS_ATTRIBCOUNT (PPS_TEXINSTRCOUNT + 1)
++#define PPS_UNIFORMCOUNT (PPS_ATTRIBCOUNT + 1)
++#define PPS_FUNCTIONCOUNT (PPS_UNIFORMCOUNT + 1)
++#define PPS_SOURCE (PPS_FUNCTIONCOUNT + 1)
++/* End of MISC Counter IDs. */
++
++#ifdef gcdNEW_PROFILER_FILE
++
++/* Category Constants. */
++#define VPHEADER 0x010000
++#define VPG_INFO 0x020000
++#define VPG_TIME 0x030000
++#define VPG_MEM 0x040000
++#define VPG_ES11 0x050000
++#define VPG_ES20 0x060000
++#define VPG_VG11 0x070000
++#define VPG_HAL 0x080000
++#define VPG_HW 0x090000
++#define VPG_GPU 0x0a0000
++#define VPG_VS 0x0b0000
++#define VPG_PS 0x0c0000
++#define VPG_PA 0x0d0000
++#define VPG_SETUP 0x0e0000
++#define VPG_RA 0x0f0000
++#define VPG_TX 0x100000
++#define VPG_PE 0x110000
++#define VPG_MC 0x120000
++#define VPG_AXI 0x130000
++#define VPG_PROG 0x140000
++#define VPG_PVS 0x150000
++#define VPG_PPS 0x160000
++#define VPG_ES11_TIME 0x170000
++#define VPG_ES20_TIME 0x180000
++#define VPG_FRAME 0x190000
++#define VPG_ES11_DRAW 0x200000
++#define VPG_ES20_DRAW 0x210000
++#define VPG_END 0xff0000
++
++/* Info. */
++#define VPC_INFOCOMPANY (VPG_INFO + 1)
++#define VPC_INFOVERSION (VPC_INFOCOMPANY + 1)
++#define VPC_INFORENDERER (VPC_INFOVERSION + 1)
++#define VPC_INFOREVISION (VPC_INFORENDERER + 1)
++#define VPC_INFODRIVER (VPC_INFOREVISION + 1)
++#define VPC_INFODRIVERMODE (VPC_INFODRIVER + 1)
++#define VPC_INFOSCREENSIZE (VPC_INFODRIVERMODE + 1)
++
++/* Counter Constants. */
++#define VPC_ELAPSETIME (VPG_TIME + 1)
++#define VPC_CPUTIME (VPC_ELAPSETIME + 1)
++
++#define VPC_MEMMAXRES (VPG_MEM + 1)
++#define VPC_MEMSHARED (VPC_MEMMAXRES + 1)
++#define VPC_MEMUNSHAREDDATA (VPC_MEMSHARED + 1)
++#define VPC_MEMUNSHAREDSTACK (VPC_MEMUNSHAREDDATA + 1)
++
++/* OpenGL ES11 Statics Counter IDs. */
++#define VPC_ES11CALLS (VPG_ES11 + ES11_CALLS)
++#define VPC_ES11DRAWCALLS (VPG_ES11 + ES11_DRAWCALLS)
++#define VPC_ES11STATECHANGECALLS (VPG_ES11 + ES11_STATECHANGECALLS)
++#define VPC_ES11POINTCOUNT (VPG_ES11 + ES11_POINTCOUNT)
++#define VPC_ES11LINECOUNT (VPG_ES11 + ES11_LINECOUNT)
++#define VPC_ES11TRIANGLECOUNT (VPG_ES11 + ES11_TRIANGLECOUNT)
++
++/* OpenGL ES20 Statistics Counter IDs. */
++#define VPC_ES20CALLS (VPG_ES20 + ES20_CALLS)
++#define VPC_ES20DRAWCALLS (VPG_ES20 + ES20_DRAWCALLS)
++#define VPC_ES20STATECHANGECALLS (VPG_ES20 + ES20_STATECHANGECALLS)
++#define VPC_ES20POINTCOUNT (VPG_ES20 + ES20_POINTCOUNT)
++#define VPC_ES20LINECOUNT (VPG_ES20 + ES20_LINECOUNT)
++#define VPC_ES20TRIANGLECOUNT (VPG_ES20 + ES20_TRIANGLECOUNT)
++
++/* OpenVG Statistics Counter IDs. */
++#define VPC_VG11CALLS (VPG_VG11 + VG11_CALLS)
++#define VPC_VG11DRAWCALLS (VPG_VG11 + VG11_DRAWCALLS)
++#define VPC_VG11STATECHANGECALLS (VPG_VG11 + VG11_STATECHANGECALLS)
++#define VPC_VG11FILLCOUNT (VPG_VG11 + VG11_FILLCOUNT)
++#define VPC_VG11STROKECOUNT (VPG_VG11 + VG11_STROKECOUNT)
++
++/* HAL Counters. */
++#define VPC_HALVERTBUFNEWBYTEALLOC (VPG_HAL + HAL_VERTBUFNEWBYTEALLOC)
++#define VPC_HALVERTBUFTOTALBYTEALLOC (VPG_HAL + HAL_VERTBUFTOTALBYTEALLOC)
++#define VPC_HALVERTBUFNEWOBJALLOC (VPG_HAL + HAL_VERTBUFNEWOBJALLOC)
++#define VPC_HALVERTBUFTOTALOBJALLOC (VPG_HAL + HAL_VERTBUFTOTALOBJALLOC)
++#define VPC_HALINDBUFNEWBYTEALLOC (VPG_HAL + HAL_INDBUFNEWBYTEALLOC)
++#define VPC_HALINDBUFTOTALBYTEALLOC (VPG_HAL + HAL_INDBUFTOTALBYTEALLOC)
++#define VPC_HALINDBUFNEWOBJALLOC (VPG_HAL + HAL_INDBUFNEWOBJALLOC)
++#define VPC_HALINDBUFTOTALOBJALLOC (VPG_HAL + HAL_INDBUFTOTALOBJALLOC)
++#define VPC_HALTEXBUFNEWBYTEALLOC (VPG_HAL + HAL_TEXBUFNEWBYTEALLOC)
++#define VPC_HALTEXBUFTOTALBYTEALLOC (VPG_HAL + HAL_TEXBUFTOTALBYTEALLOC)
++#define VPC_HALTEXBUFNEWOBJALLOC (VPG_HAL + HAL_TEXBUFNEWOBJALLOC)
++#define VPC_HALTEXBUFTOTALOBJALLOC (VPG_HAL + HAL_TEXBUFTOTALOBJALLOC)
++
++/* HW: GPU Counters. */
++#define VPC_GPUCYCLES (VPG_GPU + GPU_CYCLES)
++#define VPC_GPUREAD64BYTE (VPG_GPU + GPU_READ64BYTE)
++#define VPC_GPUWRITE64BYTE (VPG_GPU + GPU_WRITE64BYTE)
++#define VPC_GPUTOTALCYCLES (VPG_GPU + GPU_TOTALCYCLES)
++#define VPC_GPUIDLECYCLES (VPG_GPU + GPU_IDLECYCLES)
++
++/* HW: Shader Counters. */
++#define VPC_VSINSTCOUNT (VPG_VS + VS_INSTCOUNT)
++#define VPC_VSBRANCHINSTCOUNT (VPG_VS + VS_BRANCHINSTCOUNT)
++#define VPC_VSTEXLDINSTCOUNT (VPG_VS + VS_TEXLDINSTCOUNT)
++#define VPC_VSRENDEREDVERTCOUNT (VPG_VS + VS_RENDEREDVERTCOUNT)
++/* HW: PS Count. */
++#define VPC_PSINSTCOUNT (VPG_PS + PS_INSTCOUNT)
++#define VPC_PSBRANCHINSTCOUNT (VPG_PS + PS_BRANCHINSTCOUNT)
++#define VPC_PSTEXLDINSTCOUNT (VPG_PS + PS_TEXLDINSTCOUNT)
++#define VPC_PSRENDEREDPIXCOUNT (VPG_PS + PS_RENDEREDPIXCOUNT)
++
++
++/* HW: PA Counters. */
++#define VPC_PAINVERTCOUNT (VPG_PA + PA_INVERTCOUNT)
++#define VPC_PAINPRIMCOUNT (VPG_PA + PA_INPRIMCOUNT)
++#define VPC_PAOUTPRIMCOUNT (VPG_PA + PA_OUTPRIMCOUNT)
++#define VPC_PADEPTHCLIPCOUNT (VPG_PA + PA_DEPTHCLIPCOUNT)
++#define VPC_PATRIVIALREJCOUNT (VPG_PA + PA_TRIVIALREJCOUNT)
++#define VPC_PACULLCOUNT (VPG_PA + PA_CULLCOUNT)
++
++/* HW: Setup Counters. */
++#define VPC_SETRIANGLECOUNT (VPG_SETUP + SE_TRIANGLECOUNT)
++#define VPC_SELINECOUNT (VPG_SETUP + SE_LINECOUNT)
++
++/* HW: RA Counters. */
++#define VPC_RAVALIDPIXCOUNT (VPG_RA + RA_VALIDPIXCOUNT)
++#define VPC_RATOTALQUADCOUNT (VPG_RA + RA_TOTALQUADCOUNT)
++#define VPC_RAVALIDQUADCOUNTEZ (VPG_RA + RA_VALIDQUADCOUNTEZ)
++#define VPC_RATOTALPRIMCOUNT (VPG_RA + RA_TOTALPRIMCOUNT)
++#define VPC_RAPIPECACHEMISSCOUNT (VPG_RA + RA_PIPECACHEMISSCOUNT)
++#define VPC_RAPREFCACHEMISSCOUNT (VPG_RA + RA_PREFCACHEMISSCOUNT)
++#define VPC_RAEEZCULLCOUNT (VPG_RA + RA_EEZCULLCOUNT)
++
++/* HW: TEX Counters. */
++#define VPC_TXTOTBILINEARREQ (VPG_TX + TX_TOTBILINEARREQ)
++#define VPC_TXTOTTRILINEARREQ (VPG_TX + TX_TOTTRILINEARREQ)
++#define VPC_TXTOTDISCARDTEXREQ (VPG_TX + TX_TOTDISCARDTEXREQ)
++#define VPC_TXTOTTEXREQ (VPG_TX + TX_TOTTEXREQ)
++#define VPC_TXMEMREADCOUNT (VPG_TX + TX_MEMREADCOUNT)
++#define VPC_TXMEMREADIN8BCOUNT (VPG_TX + TX_MEMREADIN8BCOUNT)
++#define VPC_TXCACHEMISSCOUNT (VPG_TX + TX_CACHEMISSCOUNT)
++#define VPC_TXCACHEHITTEXELCOUNT (VPG_TX + TX_CACHEHITTEXELCOUNT)
++#define VPC_TXCACHEMISSTEXELCOUNT (VPG_TX + TX_CACHEMISSTEXELCOUNT)
++
++/* HW: PE Counters. */
++#define VPC_PEKILLEDBYCOLOR (VPG_PE + PE_KILLEDBYCOLOR)
++#define VPC_PEKILLEDBYDEPTH (VPG_PE + PE_KILLEDBYDEPTH)
++#define VPC_PEDRAWNBYCOLOR (VPG_PE + PE_DRAWNBYCOLOR)
++#define VPC_PEDRAWNBYDEPTH (VPG_PE + PE_DRAWNBYDEPTH)
++
++/* HW: MC Counters. */
++#define VPC_MCREADREQ8BPIPE (VPG_MC + MC_READREQ8BPIPE)
++#define VPC_MCREADREQ8BIP (VPG_MC + MC_READREQ8BIP)
++#define VPC_MCWRITEREQ8BPIPE (VPG_MC + MC_WRITEREQ8BPIPE)
++
++/* HW: AXI Counters. */
++#define VPC_AXIREADREQSTALLED (VPG_AXI + AXI_READREQSTALLED)
++#define VPC_AXIWRITEREQSTALLED (VPG_AXI + AXI_WRITEREQSTALLED)
++#define VPC_AXIWRITEDATASTALLED (VPG_AXI + AXI_WRITEDATASTALLED)
++
++/* PROGRAM: Shader program counters. */
++#define VPC_PVSINSTRCOUNT (VPG_PVS + PVS_INSTRCOUNT)
++#define VPC_PVSALUINSTRCOUNT (VPG_PVS + PVS_ALUINSTRCOUNT)
++#define VPC_PVSTEXINSTRCOUNT (VPG_PVS + PVS_TEXINSTRCOUNT)
++#define VPC_PVSATTRIBCOUNT (VPG_PVS + PVS_ATTRIBCOUNT)
++#define VPC_PVSUNIFORMCOUNT (VPG_PVS + PVS_UNIFORMCOUNT)
++#define VPC_PVSFUNCTIONCOUNT (VPG_PVS + PVS_FUNCTIONCOUNT)
++#define VPC_PVSSOURCE (VPG_PVS + PVS_SOURCE)
++
++#define VPC_PPSINSTRCOUNT (VPG_PPS + PPS_INSTRCOUNT)
++#define VPC_PPSALUINSTRCOUNT (VPG_PPS + PPS_ALUINSTRCOUNT)
++#define VPC_PPSTEXINSTRCOUNT (VPG_PPS + PPS_TEXINSTRCOUNT)
++#define VPC_PPSATTRIBCOUNT (VPG_PPS + PPS_ATTRIBCOUNT)
++#define VPC_PPSUNIFORMCOUNT (VPG_PPS + PPS_UNIFORMCOUNT)
++#define VPC_PPSFUNCTIONCOUNT (VPG_PPS + PPS_FUNCTIONCOUNT)
++#define VPC_PPSSOURCE (VPG_PPS + PPS_SOURCE)
++
++#define VPC_PROGRAMHANDLE (VPG_PROG + 1)
++
++#define VPG_ES20_DRAW_NO (VPG_ES20_DRAW + 1)
++#define VPG_ES11_DRAW_NO (VPG_ES11_DRAW + 1)
++
++#define VPG_FRAME_USEVBO (VPG_FRAME + 1)
++
++#endif
++
++
++/* HW profile information. */
++typedef struct _gcsPROFILER_COUNTERS
++{
++ /* HW static counters. */
++ gctUINT32 gpuClock;
++ gctUINT32 axiClock;
++ gctUINT32 shaderClock;
++
++ /* HW vairable counters. */
++ gctUINT32 gpuClockStart;
++ gctUINT32 gpuClockEnd;
++
++ /* HW vairable counters. */
++ gctUINT32 gpuCyclesCounter;
++ gctUINT32 gpuTotalCyclesCounter;
++ gctUINT32 gpuIdleCyclesCounter;
++ gctUINT32 gpuTotalRead64BytesPerFrame;
++ gctUINT32 gpuTotalWrite64BytesPerFrame;
++
++ /* PE */
++ gctUINT32 pe_pixel_count_killed_by_color_pipe;
++ gctUINT32 pe_pixel_count_killed_by_depth_pipe;
++ gctUINT32 pe_pixel_count_drawn_by_color_pipe;
++ gctUINT32 pe_pixel_count_drawn_by_depth_pipe;
++
++ /* SH */
++ gctUINT32 ps_inst_counter;
++ gctUINT32 rendered_pixel_counter;
++ gctUINT32 vs_inst_counter;
++ gctUINT32 rendered_vertice_counter;
++ gctUINT32 vtx_branch_inst_counter;
++ gctUINT32 vtx_texld_inst_counter;
++ gctUINT32 pxl_branch_inst_counter;
++ gctUINT32 pxl_texld_inst_counter;
++
++ /* PA */
++ gctUINT32 pa_input_vtx_counter;
++ gctUINT32 pa_input_prim_counter;
++ gctUINT32 pa_output_prim_counter;
++ gctUINT32 pa_depth_clipped_counter;
++ gctUINT32 pa_trivial_rejected_counter;
++ gctUINT32 pa_culled_counter;
++
++ /* SE */
++ gctUINT32 se_culled_triangle_count;
++ gctUINT32 se_culled_lines_count;
++
++ /* RA */
++ gctUINT32 ra_valid_pixel_count;
++ gctUINT32 ra_total_quad_count;
++ gctUINT32 ra_valid_quad_count_after_early_z;
++ gctUINT32 ra_total_primitive_count;
++ gctUINT32 ra_pipe_cache_miss_counter;
++ gctUINT32 ra_prefetch_cache_miss_counter;
++ gctUINT32 ra_eez_culled_counter;
++
++ /* TX */
++ gctUINT32 tx_total_bilinear_requests;
++ gctUINT32 tx_total_trilinear_requests;
++ gctUINT32 tx_total_discarded_texture_requests;
++ gctUINT32 tx_total_texture_requests;
++ gctUINT32 tx_mem_read_count;
++ gctUINT32 tx_mem_read_in_8B_count;
++ gctUINT32 tx_cache_miss_count;
++ gctUINT32 tx_cache_hit_texel_count;
++ gctUINT32 tx_cache_miss_texel_count;
++
++ /* MC */
++ gctUINT32 mc_total_read_req_8B_from_pipeline;
++ gctUINT32 mc_total_read_req_8B_from_IP;
++ gctUINT32 mc_total_write_req_8B_from_pipeline;
++
++ /* HI */
++ gctUINT32 hi_axi_cycles_read_request_stalled;
++ gctUINT32 hi_axi_cycles_write_request_stalled;
++ gctUINT32 hi_axi_cycles_write_data_stalled;
++}
++gcsPROFILER_COUNTERS;
++
++/* HAL profile information. */
++typedef struct _gcsPROFILER
++{
++ gctUINT32 enable;
++ gctBOOL enableHal;
++ gctBOOL enableHW;
++ gctBOOL enableSH;
++ gctBOOL isSyncMode;
++
++ gctBOOL useSocket;
++ gctINT sockFd;
++
++ gctFILE file;
++
++ /* Aggregate Information */
++
++ /* Clock Info */
++ gctUINT64 frameStart;
++ gctUINT64 frameEnd;
++
++ /* Current frame information */
++ gctUINT32 frameNumber;
++ gctUINT64 frameStartTimeusec;
++ gctUINT64 frameEndTimeusec;
++ gctUINT64 frameStartCPUTimeusec;
++ gctUINT64 frameEndCPUTimeusec;
++
++#if PROFILE_HAL_COUNTERS
++ gctUINT32 vertexBufferTotalBytesAlloc;
++ gctUINT32 vertexBufferNewBytesAlloc;
++ int vertexBufferTotalObjectsAlloc;
++ int vertexBufferNewObjectsAlloc;
++
++ gctUINT32 indexBufferTotalBytesAlloc;
++ gctUINT32 indexBufferNewBytesAlloc;
++ int indexBufferTotalObjectsAlloc;
++ int indexBufferNewObjectsAlloc;
++
++ gctUINT32 textureBufferTotalBytesAlloc;
++ gctUINT32 textureBufferNewBytesAlloc;
++ int textureBufferTotalObjectsAlloc;
++ int textureBufferNewObjectsAlloc;
++
++ gctUINT32 numCommits;
++ gctUINT32 drawPointCount;
++ gctUINT32 drawLineCount;
++ gctUINT32 drawTriangleCount;
++ gctUINT32 drawVertexCount;
++ gctUINT32 redundantStateChangeCalls;
++#endif
++
++ gctUINT32 prevVSInstCount;
++ gctUINT32 prevVSBranchInstCount;
++ gctUINT32 prevVSTexInstCount;
++ gctUINT32 prevVSVertexCount;
++ gctUINT32 prevPSInstCount;
++ gctUINT32 prevPSBranchInstCount;
++ gctUINT32 prevPSTexInstCount;
++ gctUINT32 prevPSPixelCount;
++
++ char* psSource;
++ char* vsSource;
++
++}
++gcsPROFILER;
++
++/* Memory profile information. */
++struct _gcsMemProfile
++{
++ /* Memory Usage */
++ gctUINT32 videoMemUsed;
++ gctUINT32 systemMemUsed;
++ gctUINT32 commitBufferSize;
++ gctUINT32 contextBufferCopyBytes;
++};
++
++/* Shader profile information. */
++struct _gcsSHADER_PROFILER
++{
++ gctUINT32 shaderLength;
++ gctUINT32 shaderALUCycles;
++ gctUINT32 shaderTexLoadCycles;
++ gctUINT32 shaderTempRegCount;
++ gctUINT32 shaderSamplerRegCount;
++ gctUINT32 shaderInputRegCount;
++ gctUINT32 shaderOutputRegCount;
++};
++
++/* Initialize the gcsProfiler. */
++gceSTATUS
++gcoPROFILER_Initialize(
++ IN gcoHAL Hal
++ );
++
++/* Destroy the gcProfiler. */
++gceSTATUS
++gcoPROFILER_Destroy(
++ IN gcoHAL Hal
++ );
++
++/* Write data to profiler. */
++gceSTATUS
++gcoPROFILER_Write(
++ IN gcoHAL Hal,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Flush data out. */
++gceSTATUS
++gcoPROFILER_Flush(
++ IN gcoHAL Hal
++ );
++
++/* Call to signal end of frame. */
++gceSTATUS
++gcoPROFILER_EndFrame(
++ IN gcoHAL Hal
++ );
++
++/* Call to signal end of draw. */
++gceSTATUS
++gcoPROFILER_EndDraw(
++ IN gcoHAL Hal,
++ IN gctBOOL FirstDraw
++ );
++
++/* Increase profile counter Enum by Value. */
++gceSTATUS
++gcoPROFILER_Count(
++ IN gcoHAL Hal,
++ IN gctUINT32 Enum,
++ IN gctINT Value
++ );
++
++gceSTATUS
++gcoPROFILER_ShaderSourceFS(
++ IN gcoHAL Hal,
++ IN char* source
++ );
++
++gceSTATUS
++gcoPROFILER_ShaderSourceVS(
++ IN gcoHAL Hal,
++ IN char* source
++ );
++
++/* Profile input vertex shader. */
++gceSTATUS
++gcoPROFILER_ShaderVS(
++ IN gcoHAL Hal,
++ IN gctPOINTER Vs
++ );
++
++/* Profile input fragment shader. */
++gceSTATUS
++gcoPROFILER_ShaderFS(
++ IN gcoHAL Hal,
++ IN gctPOINTER Fs
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_profiler_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_raster.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_raster.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_raster.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_raster.h 2015-05-01 14:57:59.543427001 -0500
+@@ -0,0 +1,1010 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_raster_h_
++#define __gc_hal_raster_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gcoBRUSH * gcoBRUSH;
++typedef struct _gcoBRUSH_CACHE * gcoBRUSH_CACHE;
++
++/******************************************************************************\
++******************************** gcoBRUSH Object *******************************
++\******************************************************************************/
++
++/* Create a new solid color gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructSingleColor(
++ IN gcoHAL Hal,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a new monochrome gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructMonochrome(
++ IN gcoHAL Hal,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a color gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructColor(
++ IN gcoHAL Hal,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctPOINTER Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Destroy an gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_Destroy(
++ IN gcoBRUSH Brush
++ );
++
++/******************************************************************************\
++******************************** gcoSURF Object *******************************
++\******************************************************************************/
++
++/* Set cipping rectangle. */
++gceSTATUS
++gcoSURF_SetClipping(
++ IN gcoSURF Surface
++ );
++
++/* Clear one or more rectangular areas. */
++gceSTATUS
++gcoSURF_Clear2D(
++ IN gcoSURF DestSurface,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 LoColor,
++ IN gctUINT32 HiColor
++ );
++
++/* Draw one or more Bresenham lines. */
++gceSTATUS
++gcoSURF_Line(
++ IN gcoSURF Surface,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++/* Generic rectangular blit. */
++gceSTATUS
++gcoSURF_Blit(
++ IN OPTIONAL gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 RectCount,
++ IN OPTIONAL gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN OPTIONAL gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN OPTIONAL gceSURF_TRANSPARENCY Transparency,
++ IN OPTIONAL gctUINT32 TransparencyColor,
++ IN OPTIONAL gctPOINTER Mask,
++ IN OPTIONAL gceSURF_MONOPACK MaskPack
++ );
++
++/* Monochrome blit. */
++gceSTATUS
++gcoSURF_MonoBlit(
++ IN gcoSURF DestSurface,
++ IN gctPOINTER Source,
++ IN gceSURF_MONOPACK SourcePack,
++ IN gcsPOINT_PTR SourceSize,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsRECT_PTR DestRect,
++ IN OPTIONAL gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gctBOOL ColorConvert,
++ IN gctUINT8 MonoTransparency,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor
++ );
++
++/* Filter blit. */
++gceSTATUS
++gcoSURF_FilterBlit(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Enable alpha blending engine in the hardware and disengage the ROP engine. */
++gceSTATUS
++gcoSURF_EnableAlphaBlend(
++ IN gcoSURF Surface,
++ IN gctUINT8 SrcGlobalAlphaValue,
++ IN gctUINT8 DstGlobalAlphaValue,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode,
++ IN gceSURF_PIXEL_COLOR_MODE SrcColorMode,
++ IN gceSURF_PIXEL_COLOR_MODE DstColorMode
++ );
++
++/* Disable alpha blending engine in the hardware and engage the ROP engine. */
++gceSTATUS
++gcoSURF_DisableAlphaBlend(
++ IN gcoSURF Surface
++ );
++
++/* Copy a rectangular area with format conversion. */
++gceSTATUS
++gcoSURF_CopyPixels(
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctINT SourceX,
++ IN gctINT SourceY,
++ IN gctINT TargetX,
++ IN gctINT TargetY,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++/* Read surface pixel. */
++gceSTATUS
++gcoSURF_ReadPixel(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gceSURF_FORMAT Format,
++ OUT gctPOINTER PixelValue
++ );
++
++/* Write surface pixel. */
++gceSTATUS
++gcoSURF_WritePixel(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gceSURF_FORMAT Format,
++ IN gctPOINTER PixelValue
++ );
++
++gceSTATUS
++gcoSURF_SetDither(
++ IN gcoSURF Surface,
++ IN gctBOOL Dither
++ );
++/******************************************************************************\
++********************************** gco2D Object *********************************
++\******************************************************************************/
++
++/* Construct a new gco2D object. */
++gceSTATUS
++gco2D_Construct(
++ IN gcoHAL Hal,
++ OUT gco2D * Hardware
++ );
++
++/* Destroy an gco2D object. */
++gceSTATUS
++gco2D_Destroy(
++ IN gco2D Hardware
++ );
++
++/* Sets the maximum number of brushes in the brush cache. */
++gceSTATUS
++gco2D_SetBrushLimit(
++ IN gco2D Hardware,
++ IN gctUINT MaxCount
++ );
++
++/* Flush the brush. */
++gceSTATUS
++gco2D_FlushBrush(
++ IN gco2D Engine,
++ IN gcoBRUSH Brush,
++ IN gceSURF_FORMAT Format
++ );
++
++/* Program the specified solid color brush. */
++gceSTATUS
++gco2D_LoadSolidBrush(
++ IN gco2D Engine,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask
++ );
++
++gceSTATUS
++gco2D_LoadMonochromeBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask
++ );
++
++gceSTATUS
++gco2D_LoadColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask
++ );
++
++/* Configure monochrome source. */
++gceSTATUS
++gco2D_SetMonochromeSource(
++ IN gco2D Engine,
++ IN gctBOOL ColorConvert,
++ IN gctUINT8 MonoTransparency,
++ IN gceSURF_MONOPACK DataPack,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor
++ );
++
++/* Configure color source. */
++gceSTATUS
++gco2D_SetColorSource(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 TransparencyColor
++ );
++
++/* Configure color source extension for full rotation. */
++gceSTATUS
++gco2D_SetColorSourceEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 TransparencyColor
++ );
++
++/* Configure color source. */
++gceSTATUS
++gco2D_SetColorSourceAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctBOOL CoordRelative
++ );
++
++gceSTATUS
++gco2D_SetColorSourceN(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctUINT32 SurfaceNumber
++ );
++
++/* Configure masked color source. */
++gceSTATUS
++gco2D_SetMaskedSource(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_MONOPACK MaskPack
++ );
++
++/* Configure masked color source extension for full rotation. */
++gceSTATUS
++gco2D_SetMaskedSourceEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_MONOPACK MaskPack,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++ );
++
++/* Setup the source rectangle. */
++gceSTATUS
++gco2D_SetSource(
++ IN gco2D Engine,
++ IN gcsRECT_PTR SrcRect
++ );
++
++/* Set clipping rectangle. */
++gceSTATUS
++gco2D_SetClipping(
++ IN gco2D Engine,
++ IN gcsRECT_PTR Rect
++ );
++
++/* Configure destination. */
++gceSTATUS
++gco2D_SetTarget(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth
++ );
++
++/* Configure destination extension for full rotation. */
++gceSTATUS
++gco2D_SetTargetEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++ );
++
++/* Calculate and program the stretch factors. */
++gceSTATUS
++gco2D_CalcStretchFactor(
++ IN gco2D Engine,
++ IN gctINT32 SrcSize,
++ IN gctINT32 DestSize,
++ OUT gctUINT32_PTR Factor
++ );
++
++gceSTATUS
++gco2D_SetStretchFactors(
++ IN gco2D Engine,
++ IN gctUINT32 HorFactor,
++ IN gctUINT32 VerFactor
++ );
++
++/* Calculate and program the stretch factors based on the rectangles. */
++gceSTATUS
++gco2D_SetStretchRectFactors(
++ IN gco2D Engine,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect
++ );
++
++/* Create a new solid color gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructSingleColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a new monochrome gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructMonochromeBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a color gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctPOINTER Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Clear one or more rectangular areas. */
++gceSTATUS
++gco2D_Clear(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT32 Color32,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Draw one or more Bresenham lines. */
++gceSTATUS
++gco2D_Line(
++ IN gco2D Engine,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Draw one or more Bresenham lines based on the 32-bit color. */
++gceSTATUS
++gco2D_ColorLine(
++ IN gco2D Engine,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gctUINT32 Color32,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Generic blit. */
++gceSTATUS
++gco2D_Blit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++gceSTATUS
++gco2D_Blend(
++ IN gco2D Engine,
++ IN gctUINT32 SrcCount,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Batch blit. */
++gceSTATUS
++gco2D_BatchBlit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Stretch blit. */
++gceSTATUS
++gco2D_StretchBlit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Monochrome blit. */
++gceSTATUS
++gco2D_MonoBlit(
++ IN gco2D Engine,
++ IN gctPOINTER StreamBits,
++ IN gcsPOINT_PTR StreamSize,
++ IN gcsRECT_PTR StreamRect,
++ IN gceSURF_MONOPACK SrcStreamPack,
++ IN gceSURF_MONOPACK DestStreamPack,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 FgRop,
++ IN gctUINT32 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++gceSTATUS
++gco2D_MonoBlitEx(
++ IN gco2D Engine,
++ IN gctPOINTER StreamBits,
++ IN gctINT32 StreamStride,
++ IN gctINT32 StreamWidth,
++ IN gctINT32 StreamHeight,
++ IN gctINT32 StreamX,
++ IN gctINT32 StreamY,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DstRect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++/* Set kernel size. */
++gceSTATUS
++gco2D_SetKernelSize(
++ IN gco2D Engine,
++ IN gctUINT8 HorKernelSize,
++ IN gctUINT8 VerKernelSize
++ );
++
++/* Set filter type. */
++gceSTATUS
++gco2D_SetFilterType(
++ IN gco2D Engine,
++ IN gceFILTER_TYPE FilterType
++ );
++
++/* Set the filter kernel by user. */
++gceSTATUS
++gco2D_SetUserFilterKernel(
++ IN gco2D Engine,
++ IN gceFILTER_PASS_TYPE PassType,
++ IN gctUINT16_PTR KernelArray
++ );
++
++/* Select the pass(es) to be done for user defined filter. */
++gceSTATUS
++gco2D_EnableUserFilterPasses(
++ IN gco2D Engine,
++ IN gctBOOL HorPass,
++ IN gctBOOL VerPass
++ );
++
++/* Frees the temporary buffer allocated by filter blit operation. */
++gceSTATUS
++gco2D_FreeFilterBuffer(
++ IN gco2D Engine
++ );
++
++/* Filter blit. */
++gceSTATUS
++gco2D_FilterBlit(
++ IN gco2D Engine,
++ IN gctUINT32 SrcAddress,
++ IN gctUINT SrcStride,
++ IN gctUINT32 SrcUAddress,
++ IN gctUINT SrcUStride,
++ IN gctUINT32 SrcVAddress,
++ IN gctUINT SrcVStride,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32 DestAddress,
++ IN gctUINT DestStride,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Filter blit extension for full rotation. */
++gceSTATUS
++gco2D_FilterBlitEx(
++ IN gco2D Engine,
++ IN gctUINT32 SrcAddress,
++ IN gctUINT SrcStride,
++ IN gctUINT32 SrcUAddress,
++ IN gctUINT SrcUStride,
++ IN gctUINT32 SrcVAddress,
++ IN gctUINT SrcVStride,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gctUINT32 SrcSurfaceHeight,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32 DestAddress,
++ IN gctUINT DestStride,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gctUINT32 DestSurfaceHeight,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++gceSTATUS
++gco2D_FilterBlitEx2(
++ IN gco2D Engine,
++ IN gctUINT32_PTR SrcAddresses,
++ IN gctUINT32 SrcAddressNum,
++ IN gctUINT32_PTR SrcStrides,
++ IN gctUINT32 SrcStrideNum,
++ IN gceTILING SrcTiling,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gctUINT32 SrcSurfaceHeight,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32_PTR DestAddresses,
++ IN gctUINT32 DestAddressNum,
++ IN gctUINT32_PTR DestStrides,
++ IN gctUINT32 DestStrideNum,
++ IN gceTILING DestTiling,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gctUINT32 DestSurfaceHeight,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Enable alpha blending engine in the hardware and disengage the ROP engine. */
++gceSTATUS
++gco2D_EnableAlphaBlend(
++ IN gco2D Engine,
++ IN gctUINT8 SrcGlobalAlphaValue,
++ IN gctUINT8 DstGlobalAlphaValue,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode,
++ IN gceSURF_PIXEL_COLOR_MODE SrcColorMode,
++ IN gceSURF_PIXEL_COLOR_MODE DstColorMode
++ );
++
++/* Enable alpha blending engine in the hardware. */
++gceSTATUS
++gco2D_EnableAlphaBlendAdvanced(
++ IN gco2D Engine,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode
++ );
++
++/* Enable alpha blending engine with Porter Duff rule. */
++gceSTATUS
++gco2D_SetPorterDuffBlending(
++ IN gco2D Engine,
++ IN gce2D_PORTER_DUFF_RULE Rule
++ );
++
++/* Disable alpha blending engine in the hardware and engage the ROP engine. */
++gceSTATUS
++gco2D_DisableAlphaBlend(
++ IN gco2D Engine
++ );
++
++/* Retrieve the maximum number of 32-bit data chunks for a single DE command. */
++gctUINT32
++gco2D_GetMaximumDataCount(
++ void
++ );
++
++/* Retrieve the maximum number of rectangles, that can be passed in a single DE command. */
++gctUINT32
++gco2D_GetMaximumRectCount(
++ void
++ );
++
++/* Returns the pixel alignment of the surface. */
++gceSTATUS
++gco2D_GetPixelAlignment(
++ gceSURF_FORMAT Format,
++ gcsPOINT_PTR Alignment
++ );
++
++/* Retrieve monochrome stream pack size. */
++gceSTATUS
++gco2D_GetPackSize(
++ IN gceSURF_MONOPACK StreamPack,
++ OUT gctUINT32 * PackWidth,
++ OUT gctUINT32 * PackHeight
++ );
++
++/* Flush the 2D pipeline. */
++gceSTATUS
++gco2D_Flush(
++ IN gco2D Engine
++ );
++
++/* Load 256-entry color table for INDEX8 source surfaces. */
++gceSTATUS
++gco2D_LoadPalette(
++ IN gco2D Engine,
++ IN gctUINT FirstIndex,
++ IN gctUINT IndexCount,
++ IN gctPOINTER ColorTable,
++ IN gctBOOL ColorConvert
++ );
++
++/* Enable/disable 2D BitBlt mirrorring. */
++gceSTATUS
++gco2D_SetBitBlitMirror(
++ IN gco2D Engine,
++ IN gctBOOL HorizontalMirror,
++ IN gctBOOL VerticalMirror
++ );
++
++/*
++ * Set the transparency for source, destination and pattern.
++ * It also enable or disable the DFB color key mode.
++ */
++gceSTATUS
++gco2D_SetTransparencyAdvancedEx(
++ IN gco2D Engine,
++ IN gce2D_TRANSPARENCY SrcTransparency,
++ IN gce2D_TRANSPARENCY DstTransparency,
++ IN gce2D_TRANSPARENCY PatTransparency,
++ IN gctBOOL EnableDFBColorKeyMode
++ );
++
++/* Set the transparency for source, destination and pattern. */
++gceSTATUS
++gco2D_SetTransparencyAdvanced(
++ IN gco2D Engine,
++ IN gce2D_TRANSPARENCY SrcTransparency,
++ IN gce2D_TRANSPARENCY DstTransparency,
++ IN gce2D_TRANSPARENCY PatTransparency
++ );
++
++/* Set the source color key. */
++gceSTATUS
++gco2D_SetSourceColorKeyAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKey
++ );
++
++/* Set the source color key range. */
++gceSTATUS
++gco2D_SetSourceColorKeyRangeAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKeyLow,
++ IN gctUINT32 ColorKeyHigh
++ );
++
++/* Set the target color key. */
++gceSTATUS
++gco2D_SetTargetColorKeyAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKey
++ );
++
++/* Set the target color key range. */
++gceSTATUS
++gco2D_SetTargetColorKeyRangeAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKeyLow,
++ IN gctUINT32 ColorKeyHigh
++ );
++
++/* Set the YUV color space mode. */
++gceSTATUS
++gco2D_SetYUVColorMode(
++ IN gco2D Engine,
++ IN gce2D_YUV_COLOR_MODE Mode
++ );
++
++/* Setup the source global color value in ARGB8 format. */
++gceSTATUS gco2D_SetSourceGlobalColorAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Color32
++ );
++
++/* Setup the target global color value in ARGB8 format. */
++gceSTATUS gco2D_SetTargetGlobalColorAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Color32
++ );
++
++/* Setup the source and target pixel multiply modes. */
++gceSTATUS
++gco2D_SetPixelMultiplyModeAdvanced(
++ IN gco2D Engine,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE SrcPremultiplySrcAlpha,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstPremultiplyDstAlpha,
++ IN gce2D_GLOBAL_COLOR_MULTIPLY_MODE SrcPremultiplyGlobalMode,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstDemultiplyDstAlpha
++ );
++
++/* Set the GPU clock cycles after which the idle engine will keep auto-flushing. */
++gceSTATUS
++gco2D_SetAutoFlushCycles(
++ IN gco2D Engine,
++ IN gctUINT32 Cycles
++ );
++
++#if VIVANTE_PROFILER
++/* Read the profile registers available in the 2D engine and sets them in the profile.
++ The function will also reset the pixelsRendered counter every time.
++*/
++gceSTATUS
++gco2D_ProfileEngine(
++ IN gco2D Engine,
++ OPTIONAL gcs2D_PROFILE_PTR Profile
++ );
++#endif
++
++/* Enable or disable 2D dithering. */
++gceSTATUS
++gco2D_EnableDither(
++ IN gco2D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco2D_SetGenericSource(
++ IN gco2D Engine,
++ IN gctUINT32_PTR Addresses,
++ IN gctUINT32 AddressNum,
++ IN gctUINT32_PTR Strides,
++ IN gctUINT32 StrideNum,
++ IN gceTILING Tiling,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++);
++
++gceSTATUS
++gco2D_SetGenericTarget(
++ IN gco2D Engine,
++ IN gctUINT32_PTR Addresses,
++ IN gctUINT32 AddressNum,
++ IN gctUINT32_PTR Strides,
++ IN gctUINT32 StrideNum,
++ IN gceTILING Tiling,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++);
++
++gceSTATUS
++gco2D_SetCurrentSourceIndex(
++ IN gco2D Engine,
++ IN gctUINT32 SrcIndex
++ );
++
++gceSTATUS
++gco2D_MultiSourceBlit(
++ IN gco2D Engine,
++ IN gctUINT32 SourceMask,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 RectCount
++ );
++
++gceSTATUS
++gco2D_SetROP(
++ IN gco2D Engine,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++gceSTATUS
++gco2D_SetGdiStretchMode(
++ IN gco2D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco2D_SetSourceTileStatus(
++ IN gco2D Engine,
++ IN gce2D_TILE_STATUS_CONFIG TSControl,
++ IN gceSURF_FORMAT CompressedFormat,
++ IN gctUINT32 ClearValue,
++ IN gctUINT32 GpuAddress
++ );
++
++gceSTATUS
++gco2D_SetTargetTileStatus(
++ IN gco2D Engine,
++ IN gce2D_TILE_STATUS_CONFIG TileStatusConfig,
++ IN gceSURF_FORMAT CompressedFormat,
++ IN gctUINT32 ClearValue,
++ IN gctUINT32 GpuAddress
++ );
++
++gceSTATUS
++gco2D_QueryU32(
++ IN gco2D Engine,
++ IN gce2D_QUERY Item,
++ OUT gctUINT32_PTR Value
++ );
++
++gceSTATUS
++gco2D_SetStateU32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctUINT32 Value
++ );
++
++gceSTATUS
++gco2D_SetStateArrayI32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctINT32_PTR Array,
++ IN gctINT32 ArraySize
++ );
++
++gceSTATUS
++gco2D_SetStateArrayU32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctUINT32_PTR Array,
++ IN gctINT32 ArraySize
++ );
++
++gceSTATUS
++gco2D_SetTargetRect(
++ IN gco2D Engine,
++ IN gcsRECT_PTR Rect
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_raster_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_rename.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_rename.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_rename.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_rename.h 2015-05-01 14:57:59.543427001 -0500
+@@ -0,0 +1,248 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_rename_h_
++#define __gc_hal_rename_h_
++
++
++#if defined(_HAL2D_APPENDIX)
++
++#define _HAL2D_RENAME_2(api, appendix) api ## appendix
++#define _HAL2D_RENAME_1(api, appendix) _HAL2D_RENAME_2(api, appendix)
++#define gcmHAL2D(api) _HAL2D_RENAME_1(api, _HAL2D_APPENDIX)
++
++
++#define gckOS_Construct gcmHAL2D(gckOS_Construct)
++#define gckOS_Destroy gcmHAL2D(gckOS_Destroy)
++#define gckOS_QueryVideoMemory gcmHAL2D(gckOS_QueryVideoMemory)
++#define gckOS_Allocate gcmHAL2D(gckOS_Allocate)
++#define gckOS_Free gcmHAL2D(gckOS_Free)
++#define gckOS_AllocateMemory gcmHAL2D(gckOS_AllocateMemory)
++#define gckOS_FreeMemory gcmHAL2D(gckOS_FreeMemory)
++#define gckOS_AllocatePagedMemory gcmHAL2D(gckOS_AllocatePagedMemory)
++#define gckOS_AllocatePagedMemoryEx gcmHAL2D(gckOS_AllocatePagedMemoryEx)
++#define gckOS_LockPages gcmHAL2D(gckOS_LockPages)
++#define gckOS_MapPages gcmHAL2D(gckOS_MapPages)
++#define gckOS_UnlockPages gcmHAL2D(gckOS_UnlockPages)
++#define gckOS_FreePagedMemory gcmHAL2D(gckOS_FreePagedMemory)
++#define gckOS_AllocateNonPagedMemory gcmHAL2D(gckOS_AllocateNonPagedMemory)
++#define gckOS_FreeNonPagedMemory gcmHAL2D(gckOS_FreeNonPagedMemory)
++#define gckOS_AllocateContiguous gcmHAL2D(gckOS_AllocateContiguous)
++#define gckOS_FreeContiguous gcmHAL2D(gckOS_FreeContiguous)
++#define gckOS_GetPageSize gcmHAL2D(gckOS_GetPageSize)
++#define gckOS_GetPhysicalAddress gcmHAL2D(gckOS_GetPhysicalAddress)
++#define gckOS_GetPhysicalAddressProcess gcmHAL2D(gckOS_GetPhysicalAddressProcess)
++#define gckOS_MapPhysical gcmHAL2D(gckOS_MapPhysical)
++#define gckOS_UnmapPhysical gcmHAL2D(gckOS_UnmapPhysical)
++#define gckOS_ReadRegister gcmHAL2D(gckOS_ReadRegister)
++#define gckOS_WriteRegister gcmHAL2D(gckOS_WriteRegister)
++#define gckOS_WriteMemory gcmHAL2D(gckOS_WriteMemory)
++#define gckOS_MapMemory gcmHAL2D(gckOS_MapMemory)
++#define gckOS_UnmapMemory gcmHAL2D(gckOS_UnmapMemory)
++#define gckOS_UnmapMemoryEx gcmHAL2D(gckOS_UnmapMemoryEx)
++#define gckOS_CreateMutex gcmHAL2D(gckOS_CreateMutex)
++#define gckOS_DeleteMutex gcmHAL2D(gckOS_DeleteMutex)
++#define gckOS_AcquireMutex gcmHAL2D(gckOS_AcquireMutex)
++#define gckOS_ReleaseMutex gcmHAL2D(gckOS_ReleaseMutex)
++#define gckOS_AtomicExchange gcmHAL2D(gckOS_AtomicExchange)
++#define gckOS_AtomicExchangePtr gcmHAL2D(gckOS_AtomicExchangePtr)
++#define gckOS_AtomConstruct gcmHAL2D(gckOS_AtomConstruct)
++#define gckOS_AtomDestroy gcmHAL2D(gckOS_AtomDestroy)
++#define gckOS_AtomGet gcmHAL2D(gckOS_AtomGet)
++#define gckOS_AtomIncrement gcmHAL2D(gckOS_AtomIncrement)
++#define gckOS_AtomDecrement gcmHAL2D(gckOS_AtomDecrement)
++#define gckOS_Delay gcmHAL2D(gckOS_Delay)
++#define gckOS_GetTime gcmHAL2D(gckOS_GetTime)
++#define gckOS_MemoryBarrier gcmHAL2D(gckOS_MemoryBarrier)
++#define gckOS_MapUserPointer gcmHAL2D(gckOS_MapUserPointer)
++#define gckOS_UnmapUserPointer gcmHAL2D(gckOS_UnmapUserPointer)
++#define gckOS_QueryNeedCopy gcmHAL2D(gckOS_QueryNeedCopy)
++#define gckOS_CopyFromUserData gcmHAL2D(gckOS_CopyFromUserData)
++#define gckOS_CopyToUserData gcmHAL2D(gckOS_CopyToUserData)
++#define gckOS_MapUserPhysical gcmHAL2D(gckOS_MapUserPhysical)
++#define gckOS_SuspendInterrupt gcmHAL2D(gckOS_SuspendInterrupt)
++#define gckOS_ResumeInterrupt gcmHAL2D(gckOS_ResumeInterrupt)
++#define gckOS_GetBaseAddress gcmHAL2D(gckOS_GetBaseAddress)
++#define gckOS_MemCopy gcmHAL2D(gckOS_MemCopy)
++#define gckOS_ZeroMemory gcmHAL2D(gckOS_ZeroMemory)
++#define gckOS_DeviceControl gcmHAL2D(gckOS_DeviceControl)
++#define gckOS_GetProcessID gcmHAL2D(gckOS_GetProcessID)
++#define gckOS_GetThreadID gcmHAL2D(gckOS_GetThreadID)
++#define gckOS_CreateSignal gcmHAL2D(gckOS_CreateSignal)
++#define gckOS_DestroySignal gcmHAL2D(gckOS_DestroySignal)
++#define gckOS_Signal gcmHAL2D(gckOS_Signal)
++#define gckOS_WaitSignal gcmHAL2D(gckOS_WaitSignal)
++#define gckOS_MapSignal gcmHAL2D(gckOS_MapSignal)
++#define gckOS_MapUserMemory gcmHAL2D(gckOS_MapUserMemory)
++#define gckOS_UnmapUserMemory gcmHAL2D(gckOS_UnmapUserMemory)
++#define gckOS_CreateUserSignal gcmHAL2D(gckOS_CreateUserSignal)
++#define gckOS_DestroyUserSignal gcmHAL2D(gckOS_DestroyUserSignal)
++#define gckOS_WaitUserSignal gcmHAL2D(gckOS_WaitUserSignal)
++#define gckOS_SignalUserSignal gcmHAL2D(gckOS_SignalUserSignal)
++#define gckOS_UserSignal gcmHAL2D(gckOS_UserSignal)
++#define gckOS_UserSignal gcmHAL2D(gckOS_UserSignal)
++#define gckOS_CacheClean gcmHAL2D(gckOS_CacheClean)
++#define gckOS_CacheFlush gcmHAL2D(gckOS_CacheFlush)
++#define gckOS_SetDebugLevel gcmHAL2D(gckOS_SetDebugLevel)
++#define gckOS_SetDebugZone gcmHAL2D(gckOS_SetDebugZone)
++#define gckOS_SetDebugLevelZone gcmHAL2D(gckOS_SetDebugLevelZone)
++#define gckOS_SetDebugZones gcmHAL2D(gckOS_SetDebugZones)
++#define gckOS_SetDebugFile gcmHAL2D(gckOS_SetDebugFile)
++#define gckOS_Broadcast gcmHAL2D(gckOS_Broadcast)
++#define gckOS_SetGPUPower gcmHAL2D(gckOS_SetGPUPower)
++#define gckOS_CreateSemaphore gcmHAL2D(gckOS_CreateSemaphore)
++#define gckOS_DestroySemaphore gcmHAL2D(gckOS_DestroySemaphore)
++#define gckOS_AcquireSemaphore gcmHAL2D(gckOS_AcquireSemaphore)
++#define gckOS_ReleaseSemaphore gcmHAL2D(gckOS_ReleaseSemaphore)
++#define gckHEAP_Construct gcmHAL2D(gckHEAP_Construct)
++#define gckHEAP_Destroy gcmHAL2D(gckHEAP_Destroy)
++#define gckHEAP_Allocate gcmHAL2D(gckHEAP_Allocate)
++#define gckHEAP_Free gcmHAL2D(gckHEAP_Free)
++#define gckHEAP_ProfileStart gcmHAL2D(gckHEAP_ProfileStart)
++#define gckHEAP_ProfileEnd gcmHAL2D(gckHEAP_ProfileEnd)
++#define gckHEAP_Test gcmHAL2D(gckHEAP_Test)
++#define gckVIDMEM_Construct gcmHAL2D(gckVIDMEM_Construct)
++#define gckVIDMEM_Destroy gcmHAL2D(gckVIDMEM_Destroy)
++#define gckVIDMEM_Allocate gcmHAL2D(gckVIDMEM_Allocate)
++#define gckVIDMEM_AllocateLinear gcmHAL2D(gckVIDMEM_AllocateLinear)
++#define gckVIDMEM_Free gcmHAL2D(gckVIDMEM_Free)
++#define gckVIDMEM_Lock gcmHAL2D(gckVIDMEM_Lock)
++#define gckVIDMEM_Unlock gcmHAL2D(gckVIDMEM_Unlock)
++#define gckVIDMEM_ConstructVirtual gcmHAL2D(gckVIDMEM_ConstructVirtual)
++#define gckVIDMEM_DestroyVirtual gcmHAL2D(gckVIDMEM_DestroyVirtual)
++#define gckKERNEL_Construct gcmHAL2D(gckKERNEL_Construct)
++#define gckKERNEL_Destroy gcmHAL2D(gckKERNEL_Destroy)
++#define gckKERNEL_Dispatch gcmHAL2D(gckKERNEL_Dispatch)
++#define gckKERNEL_QueryVideoMemory gcmHAL2D(gckKERNEL_QueryVideoMemory)
++#define gckKERNEL_GetVideoMemoryPool gcmHAL2D(gckKERNEL_GetVideoMemoryPool)
++#define gckKERNEL_MapVideoMemory gcmHAL2D(gckKERNEL_MapVideoMemory)
++#define gckKERNEL_UnmapVideoMemory gcmHAL2D(gckKERNEL_UnmapVideoMemory)
++#define gckKERNEL_MapMemory gcmHAL2D(gckKERNEL_MapMemory)
++#define gckKERNEL_UnmapMemory gcmHAL2D(gckKERNEL_UnmapMemory)
++#define gckKERNEL_Notify gcmHAL2D(gckKERNEL_Notify)
++#define gckKERNEL_QuerySettings gcmHAL2D(gckKERNEL_QuerySettings)
++#define gckKERNEL_Recovery gcmHAL2D(gckKERNEL_Recovery)
++#define gckKERNEL_OpenUserData gcmHAL2D(gckKERNEL_OpenUserData)
++#define gckKERNEL_CloseUserData gcmHAL2D(gckKERNEL_CloseUserData)
++#define gckHARDWARE_Construct gcmHAL2D(gckHARDWARE_Construct)
++#define gckHARDWARE_Destroy gcmHAL2D(gckHARDWARE_Destroy)
++#define gckHARDWARE_QuerySystemMemory gcmHAL2D(gckHARDWARE_QuerySystemMemory)
++#define gckHARDWARE_BuildVirtualAddress gcmHAL2D(gckHARDWARE_BuildVirtualAddress)
++#define gckHARDWARE_QueryCommandBuffer gcmHAL2D(gckHARDWARE_QueryCommandBuffer)
++#define gckHARDWARE_WaitLink gcmHAL2D(gckHARDWARE_WaitLink)
++#define gckHARDWARE_Execute gcmHAL2D(gckHARDWARE_Execute)
++#define gckHARDWARE_End gcmHAL2D(gckHARDWARE_End)
++#define gckHARDWARE_Nop gcmHAL2D(gckHARDWARE_Nop)
++#define gckHARDWARE_Wait gcmHAL2D(gckHARDWARE_Wait)
++#define gckHARDWARE_PipeSelect gcmHAL2D(gckHARDWARE_PipeSelect)
++#define gckHARDWARE_Link gcmHAL2D(gckHARDWARE_Link)
++#define gckHARDWARE_Event gcmHAL2D(gckHARDWARE_Event)
++#define gckHARDWARE_QueryMemory gcmHAL2D(gckHARDWARE_QueryMemory)
++#define gckHARDWARE_QueryChipIdentity gcmHAL2D(gckHARDWARE_QueryChipIdentity)
++#define gckHARDWARE_QueryChipSpecs gcmHAL2D(gckHARDWARE_QueryChipSpecs)
++#define gckHARDWARE_QueryShaderCaps gcmHAL2D(gckHARDWARE_QueryShaderCaps)
++#define gckHARDWARE_ConvertFormat gcmHAL2D(gckHARDWARE_ConvertFormat)
++#define gckHARDWARE_SplitMemory gcmHAL2D(gckHARDWARE_SplitMemory)
++#define gckHARDWARE_AlignToTile gcmHAL2D(gckHARDWARE_AlignToTile)
++#define gckHARDWARE_UpdateQueueTail gcmHAL2D(gckHARDWARE_UpdateQueueTail)
++#define gckHARDWARE_ConvertLogical gcmHAL2D(gckHARDWARE_ConvertLogical)
++#define gckHARDWARE_ConvertPhysical gcmHAL2D(gckHARDWARE_ConvertPhysical)
++#define gckHARDWARE_Interrupt gcmHAL2D(gckHARDWARE_Interrupt)
++#define gckHARDWARE_SetMMU gcmHAL2D(gckHARDWARE_SetMMU)
++#define gckHARDWARE_FlushMMU gcmHAL2D(gckHARDWARE_FlushMMU)
++#define gckHARDWARE_GetIdle gcmHAL2D(gckHARDWARE_GetIdle)
++#define gckHARDWARE_Flush gcmHAL2D(gckHARDWARE_Flush)
++#define gckHARDWARE_SetFastClear gcmHAL2D(gckHARDWARE_SetFastClear)
++#define gckHARDWARE_ReadInterrupt gcmHAL2D(gckHARDWARE_ReadInterrupt)
++#define gckHARDWARE_SetPowerManagementState gcmHAL2D(gckHARDWARE_SetPowerManagementState)
++#define gckHARDWARE_QueryPowerManagementState gcmHAL2D(gckHARDWARE_QueryPowerManagementState)
++#define gckHARDWARE_ProfileEngine2D gcmHAL2D(gckHARDWARE_ProfileEngine2D)
++#define gckHARDWARE_InitializeHardware gcmHAL2D(gckHARDWARE_InitializeHardware)
++#define gckHARDWARE_Reset gcmHAL2D(gckHARDWARE_Reset)
++#define gckINTERRUPT_Construct gcmHAL2D(gckINTERRUPT_Construct)
++#define gckINTERRUPT_Destroy gcmHAL2D(gckINTERRUPT_Destroy)
++#define gckINTERRUPT_SetHandler gcmHAL2D(gckINTERRUPT_SetHandler)
++#define gckINTERRUPT_Notify gcmHAL2D(gckINTERRUPT_Notify)
++#define gckEVENT_Construct gcmHAL2D(gckEVENT_Construct)
++#define gckEVENT_Destroy gcmHAL2D(gckEVENT_Destroy)
++#define gckEVENT_AddList gcmHAL2D(gckEVENT_AddList)
++#define gckEVENT_FreeNonPagedMemory gcmHAL2D(gckEVENT_FreeNonPagedMemory)
++#define gckEVENT_FreeContiguousMemory gcmHAL2D(gckEVENT_FreeContiguousMemory)
++#define gckEVENT_FreeVideoMemory gcmHAL2D(gckEVENT_FreeVideoMemory)
++#define gckEVENT_Signal gcmHAL2D(gckEVENT_Signal)
++#define gckEVENT_Unlock gcmHAL2D(gckEVENT_Unlock)
++#define gckEVENT_Submit gcmHAL2D(gckEVENT_Submit)
++#define gckEVENT_Commit gcmHAL2D(gckEVENT_Commit)
++#define gckEVENT_Notify gcmHAL2D(gckEVENT_Notify)
++#define gckEVENT_Interrupt gcmHAL2D(gckEVENT_Interrupt)
++#define gckCOMMAND_Construct gcmHAL2D(gckCOMMAND_Construct)
++#define gckCOMMAND_Destroy gcmHAL2D(gckCOMMAND_Destroy)
++#define gckCOMMAND_EnterCommit gcmHAL2D(gckCOMMAND_EnterCommit)
++#define gckCOMMAND_ExitCommit gcmHAL2D(gckCOMMAND_ExitCommit)
++#define gckCOMMAND_Start gcmHAL2D(gckCOMMAND_Start)
++#define gckCOMMAND_Stop gcmHAL2D(gckCOMMAND_Stop)
++#define gckCOMMAND_Commit gcmHAL2D(gckCOMMAND_Commit)
++#define gckCOMMAND_Reserve gcmHAL2D(gckCOMMAND_Reserve)
++#define gckCOMMAND_Execute gcmHAL2D(gckCOMMAND_Execute)
++#define gckCOMMAND_Stall gcmHAL2D(gckCOMMAND_Stall)
++#define gckCOMMAND_Attach gcmHAL2D(gckCOMMAND_Attach)
++#define gckCOMMAND_Detach gcmHAL2D(gckCOMMAND_Detach)
++#define gckMMU_Construct gcmHAL2D(gckMMU_Construct)
++#define gckMMU_Destroy gcmHAL2D(gckMMU_Destroy)
++#define gckMMU_AllocatePages gcmHAL2D(gckMMU_AllocatePages)
++#define gckMMU_FreePages gcmHAL2D(gckMMU_FreePages)
++#define gckMMU_InsertNode gcmHAL2D(gckMMU_InsertNode)
++#define gckMMU_RemoveNode gcmHAL2D(gckMMU_RemoveNode)
++#define gckMMU_FreeHandleMemory gcmHAL2D(gckMMU_FreeHandleMemory)
++#define gckMMU_Test gcmHAL2D(gckMMU_Test)
++#define gckHARDWARE_QueryProfileRegisters gcmHAL2D(gckHARDWARE_QueryProfileRegisters)
++
++
++#define FindMdlMap gcmHAL2D(FindMdlMap)
++#define OnProcessExit gcmHAL2D(OnProcessExit)
++
++#define gckGALDEVICE_Destroy gcmHAL2D(gckGALDEVICE_Destroy)
++#define gckOS_Print gcmHAL2D(gckOS_Print)
++#define gckGALDEVICE_FreeMemory gcmHAL2D(gckGALDEVICE_FreeMemory)
++#define gckGALDEVICE_AllocateMemory gcmHAL2D(gckGALDEVICE_AllocateMemory)
++#define gckOS_DebugBreak gcmHAL2D(gckOS_DebugBreak)
++#define gckGALDEVICE_Release_ISR gcmHAL2D(gckGALDEVICE_Release_ISR)
++#define gckOS_Verify gcmHAL2D(gckOS_Verify)
++#define gckCOMMAND_Release gcmHAL2D(gckCOMMAND_Release)
++#define gckGALDEVICE_Stop gcmHAL2D(gckGALDEVICE_Stop)
++#define gckGALDEVICE_Construct gcmHAL2D(gckGALDEVICE_Construct)
++#define gckOS_DebugFatal gcmHAL2D(gckOS_DebugFatal)
++#define gckOS_DebugTrace gcmHAL2D(gckOS_DebugTrace)
++#define gckHARDWARE_GetBaseAddress gcmHAL2D(gckHARDWARE_GetBaseAddress)
++#define gckGALDEVICE_Setup_ISR gcmHAL2D(gckGALDEVICE_Setup_ISR)
++#define gckKERNEL_AttachProcess gcmHAL2D(gckKERNEL_AttachProcess)
++#define gckKERNEL_AttachProcessEx gcmHAL2D(gckKERNEL_AttachProcessEx)
++#define gckGALDEVICE_Start_Thread gcmHAL2D(gckGALDEVICE_Start_Thread)
++#define gckHARDWARE_QueryIdle gcmHAL2D(gckHARDWARE_QueryIdle)
++#define gckGALDEVICE_Start gcmHAL2D(gckGALDEVICE_Start)
++#define gckOS_GetKernelLogical gcmHAL2D(gckOS_GetKernelLogical)
++#define gckOS_DebugTraceZone gcmHAL2D(gckOS_DebugTraceZone)
++#define gckGALDEVICE_Stop_Thread gcmHAL2D(gckGALDEVICE_Stop_Thread)
++#define gckHARDWARE_NeedBaseAddress gcmHAL2D(gckHARDWARE_NeedBaseAddress)
++
++#endif
++
++#endif /* __gc_hal_rename_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_statistics.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_statistics.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_statistics.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_statistics.h 2015-05-01 14:57:59.543427001 -0500
+@@ -0,0 +1,115 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_statistics_h_
++#define __gc_hal_statistics_h_
++
++
++#define VIV_STAT_ENABLE_STATISTICS 0
++
++/* Toal number of frames for which the frame time is accounted. We have storage
++ to keep frame times for last this many frames.
++*/
++#define VIV_STAT_FRAME_BUFFER_SIZE 30
++
++/*
++ Total number of frames sampled for a mode. This means
++
++ # of frames for HZ Current : VIV_STAT_EARLY_Z_SAMPLE_FRAMES
++ # of frames for HZ Switched : VIV_STAT_EARLY_Z_SAMPLE_FRAMES
++ +
++ --------------------------------------------------------
++ : (2 * VIV_STAT_EARLY_Z_SAMPLE_FRAMES) frames needed
++
++ IMPORTANT: This total must be smaller than VIV_STAT_FRAME_BUFFER_SIZE
++*/
++#define VIV_STAT_EARLY_Z_SAMPLE_FRAMES 7
++#define VIV_STAT_EARLY_Z_LATENCY_FRAMES 2
++
++/* Multiplication factor for previous Hz off mode. Make it more than 1.0 to advertise HZ on.*/
++#define VIV_STAT_EARLY_Z_FACTOR (1.05f)
++
++/* Defines the statistical data keys monitored by the statistics module */
++typedef enum _gceSTATISTICS
++{
++ gcvFRAME_FPS = 1,
++}
++gceSTATISTICS;
++
++/* HAL statistics information. */
++typedef struct _gcsSTATISTICS_EARLYZ
++{
++ gctUINT switchBackCount;
++ gctUINT nextCheckPoint;
++ gctBOOL disabled;
++}
++gcsSTATISTICS_EARLYZ;
++
++
++/* Defines the statistical data keys monitored by the statistics module */
++typedef enum _gceSTATISTICS_Call
++{
++ gcvSTAT_ES11_GLDRAWELEMENTS = 1,
++}
++gceSTATISTICS_Call;
++
++
++/* HAL statistics information. */
++typedef struct _gcsSTATISTICS
++{
++ gctUINT64 frameTime[VIV_STAT_FRAME_BUFFER_SIZE];
++ gctUINT64 previousFrameTime;
++ gctUINT frame;
++ gcsSTATISTICS_EARLYZ earlyZ;
++ gctUINT ES11_drawElementsCount;
++ gctBOOL applyRTestVAFix;
++}
++gcsSTATISTICS;
++
++
++/* Add a frame based data into current statistics. */
++void
++gcfSTATISTICS_AddData(
++ IN gceSTATISTICS Key,
++ IN gctUINT Value
++ );
++
++/* Marks the frame end and triggers statistical calculations and decisions.*/
++void
++gcfSTATISTICS_MarkFrameEnd (
++ void
++ );
++
++/* Sets whether the dynmaic HZ is disabled or not .*/
++void
++gcfSTATISTICS_DisableDynamicEarlyZ (
++ IN gctBOOL Disabled
++ );
++
++/* Checks whether or not glDrawArray function call will be discarded */
++gctBOOL
++gcfSTATISTICS_DiscardCall(
++ gceSTATISTICS_Call Function
++ );
++
++
++#endif /*__gc_hal_statistics_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_types.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_types.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_types.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_types.h 2015-05-01 14:57:59.543427001 -0500
+@@ -0,0 +1,1080 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_types_h_
++#define __gc_hal_types_h_
++
++#include "gc_hal_version.h"
++#include "gc_hal_options.h"
++
++#ifdef _WIN32
++#pragma warning(disable:4127) /* Conditional expression is constant (do { }
++ ** while(0)). */
++#pragma warning(disable:4100) /* Unreferenced formal parameter. */
++#pragma warning(disable:4204) /* Non-constant aggregate initializer (C99). */
++#pragma warning(disable:4131) /* Uses old-style declarator (for Bison and
++ ** Flex generated files). */
++#pragma warning(disable:4206) /* Translation unit is empty. */
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++** Platform macros.
++*/
++
++#if defined(__GNUC__)
++# define gcdHAS_ELLIPSES 1 /* GCC always has it. */
++#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
++# define gcdHAS_ELLIPSES 1 /* C99 has it. */
++#elif defined(_MSC_VER) && (_MSC_VER >= 1500)
++# define gcdHAS_ELLIPSES 1 /* MSVC 2007+ has it. */
++#elif defined(UNDER_CE)
++#if UNDER_CE >= 600
++# define gcdHAS_ELLIPSES 1
++# else
++# define gcdHAS_ELLIPSES 0
++# endif
++#else
++# error "gcdHAS_ELLIPSES: Platform could not be determined"
++#endif
++
++/******************************************************************************\
++************************************ Keyword ***********************************
++\******************************************************************************/
++
++#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))
++# define gcmINLINE inline /* C99 keyword. */
++#elif defined(__GNUC__)
++# define gcmINLINE __inline__ /* GNU keyword. */
++#elif defined(_MSC_VER) || defined(UNDER_CE)
++# define gcmINLINE __inline /* Internal keyword. */
++#else
++# error "gcmINLINE: Platform could not be determined"
++#endif
++
++/* Possible debug flags. */
++#define gcdDEBUG_NONE 0
++#define gcdDEBUG_ALL (1 << 0)
++#define gcdDEBUG_FATAL (1 << 1)
++#define gcdDEBUG_TRACE (1 << 2)
++#define gcdDEBUG_BREAK (1 << 3)
++#define gcdDEBUG_ASSERT (1 << 4)
++#define gcdDEBUG_CODE (1 << 5)
++#define gcdDEBUG_STACK (1 << 6)
++
++#define gcmIS_DEBUG(flag) ( gcdDEBUG & (flag | gcdDEBUG_ALL) )
++
++#ifndef gcdDEBUG
++#if (defined(DBG) && DBG) || defined(DEBUG) || defined(_DEBUG)
++# define gcdDEBUG gcdDEBUG_ALL
++# else
++# define gcdDEBUG gcdDEBUG_NONE
++# endif
++#endif
++
++#ifdef _USRDLL
++#ifdef _MSC_VER
++#ifdef HAL_EXPORTS
++# define HALAPI __declspec(dllexport)
++# else
++# define HALAPI __declspec(dllimport)
++# endif
++# define HALDECL __cdecl
++# else
++#ifdef HAL_EXPORTS
++# define HALAPI
++# else
++# define HALAPI extern
++# endif
++# endif
++#else
++# define HALAPI
++# define HALDECL
++#endif
++
++/******************************************************************************\
++********************************** Common Types ********************************
++\******************************************************************************/
++
++#define gcvFALSE 0
++#define gcvTRUE 1
++
++#define gcvINFINITE ((gctUINT32) ~0U)
++
++#define gcvINVALID_HANDLE ((gctHANDLE) ~0U)
++
++typedef int gctBOOL;
++typedef gctBOOL * gctBOOL_PTR;
++
++typedef int gctINT;
++typedef long gctLONG;
++typedef signed char gctINT8;
++typedef signed short gctINT16;
++typedef signed int gctINT32;
++typedef signed long long gctINT64;
++
++typedef gctINT * gctINT_PTR;
++typedef gctINT8 * gctINT8_PTR;
++typedef gctINT16 * gctINT16_PTR;
++typedef gctINT32 * gctINT32_PTR;
++typedef gctINT64 * gctINT64_PTR;
++
++typedef unsigned int gctUINT;
++typedef unsigned char gctUINT8;
++typedef unsigned short gctUINT16;
++typedef unsigned int gctUINT32;
++typedef unsigned long long gctUINT64;
++typedef unsigned long gctUINTPTR_T;
++
++typedef gctUINT * gctUINT_PTR;
++typedef gctUINT8 * gctUINT8_PTR;
++typedef gctUINT16 * gctUINT16_PTR;
++typedef gctUINT32 * gctUINT32_PTR;
++typedef gctUINT64 * gctUINT64_PTR;
++
++typedef unsigned long gctSIZE_T;
++typedef gctSIZE_T * gctSIZE_T_PTR;
++
++#ifdef __cplusplus
++# define gcvNULL 0
++#else
++# define gcvNULL ((void *) 0)
++#endif
++
++typedef float gctFLOAT;
++typedef signed int gctFIXED_POINT;
++typedef float * gctFLOAT_PTR;
++
++typedef void * gctPHYS_ADDR;
++typedef void * gctHANDLE;
++typedef void * gctFILE;
++typedef void * gctSIGNAL;
++typedef void * gctWINDOW;
++typedef void * gctIMAGE;
++typedef void * gctSYNC_POINT;
++
++typedef void * gctSEMAPHORE;
++
++typedef void * gctPOINTER;
++typedef const void * gctCONST_POINTER;
++
++typedef char gctCHAR;
++typedef char * gctSTRING;
++typedef const char * gctCONST_STRING;
++
++typedef struct _gcsCOUNT_STRING
++{
++ gctSIZE_T Length;
++ gctCONST_STRING String;
++}
++gcsCOUNT_STRING;
++
++typedef union _gcuFLOAT_UINT32
++{
++ gctFLOAT f;
++ gctUINT32 u;
++}
++gcuFLOAT_UINT32;
++
++/* Fixed point constants. */
++#define gcvZERO_X ((gctFIXED_POINT) 0x00000000)
++#define gcvHALF_X ((gctFIXED_POINT) 0x00008000)
++#define gcvONE_X ((gctFIXED_POINT) 0x00010000)
++#define gcvNEGONE_X ((gctFIXED_POINT) 0xFFFF0000)
++#define gcvTWO_X ((gctFIXED_POINT) 0x00020000)
++
++/* Stringizing macro. */
++#define gcmSTRING(Value) #Value
++
++/******************************************************************************\
++******************************* Fixed Point Math *******************************
++\******************************************************************************/
++
++#define gcmXMultiply(x1, x2) gcoMATH_MultiplyFixed(x1, x2)
++#define gcmXDivide(x1, x2) gcoMATH_DivideFixed(x1, x2)
++#define gcmXMultiplyDivide(x1, x2, x3) gcoMATH_MultiplyDivideFixed(x1, x2, x3)
++
++/* 2D Engine profile. */
++typedef struct _gcs2D_PROFILE
++{
++ /* Cycle count.
++ 32bit counter incremented every 2D clock cycle.
++ Wraps back to 0 when the counter overflows.
++ */
++ gctUINT32 cycleCount;
++
++ /* Pixels rendered by the 2D engine.
++ Resets to 0 every time it is read. */
++ gctUINT32 pixelsRendered;
++}
++gcs2D_PROFILE;
++
++/* Macro to combine four characters into a Charcater Code. */
++#define gcmCC(c1, c2, c3, c4) \
++( \
++ (char) (c1) \
++ | \
++ ((char) (c2) << 8) \
++ | \
++ ((char) (c3) << 16) \
++ | \
++ ((char) (c4) << 24) \
++)
++
++#define gcmPRINTABLE(c) ((((c) >= ' ') && ((c) <= '}')) ? ((c) != '%' ? (c) : ' ') : ' ')
++
++#define gcmCC_PRINT(cc) \
++ gcmPRINTABLE((char) ( (cc) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 8) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 16) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 24) & 0xFF))
++
++/******************************************************************************\
++****************************** Function Parameters *****************************
++\******************************************************************************/
++
++#define IN
++#define OUT
++#define OPTIONAL
++
++/******************************************************************************\
++********************************* Status Codes *********************************
++\******************************************************************************/
++
++typedef enum _gceSTATUS
++{
++ gcvSTATUS_OK = 0,
++ gcvSTATUS_FALSE = 0,
++ gcvSTATUS_TRUE = 1,
++ gcvSTATUS_NO_MORE_DATA = 2,
++ gcvSTATUS_CACHED = 3,
++ gcvSTATUS_MIPMAP_TOO_LARGE = 4,
++ gcvSTATUS_NAME_NOT_FOUND = 5,
++ gcvSTATUS_NOT_OUR_INTERRUPT = 6,
++ gcvSTATUS_MISMATCH = 7,
++ gcvSTATUS_MIPMAP_TOO_SMALL = 8,
++ gcvSTATUS_LARGER = 9,
++ gcvSTATUS_SMALLER = 10,
++ gcvSTATUS_CHIP_NOT_READY = 11,
++ gcvSTATUS_NEED_CONVERSION = 12,
++ gcvSTATUS_SKIP = 13,
++ gcvSTATUS_DATA_TOO_LARGE = 14,
++ gcvSTATUS_INVALID_CONFIG = 15,
++ gcvSTATUS_CHANGED = 16,
++ gcvSTATUS_NOT_SUPPORT_DITHER = 17,
++ gcvSTATUS_EXECUTED = 18,
++ gcvSTATUS_TERMINATE = 19,
++
++ gcvSTATUS_CONVERT_TO_SINGLE_STREAM = 20,
++
++ gcvSTATUS_INVALID_ARGUMENT = -1,
++ gcvSTATUS_INVALID_OBJECT = -2,
++ gcvSTATUS_OUT_OF_MEMORY = -3,
++ gcvSTATUS_MEMORY_LOCKED = -4,
++ gcvSTATUS_MEMORY_UNLOCKED = -5,
++ gcvSTATUS_HEAP_CORRUPTED = -6,
++ gcvSTATUS_GENERIC_IO = -7,
++ gcvSTATUS_INVALID_ADDRESS = -8,
++ gcvSTATUS_CONTEXT_LOSSED = -9,
++ gcvSTATUS_TOO_COMPLEX = -10,
++ gcvSTATUS_BUFFER_TOO_SMALL = -11,
++ gcvSTATUS_INTERFACE_ERROR = -12,
++ gcvSTATUS_NOT_SUPPORTED = -13,
++ gcvSTATUS_MORE_DATA = -14,
++ gcvSTATUS_TIMEOUT = -15,
++ gcvSTATUS_OUT_OF_RESOURCES = -16,
++ gcvSTATUS_INVALID_DATA = -17,
++ gcvSTATUS_INVALID_MIPMAP = -18,
++ gcvSTATUS_NOT_FOUND = -19,
++ gcvSTATUS_NOT_ALIGNED = -20,
++ gcvSTATUS_INVALID_REQUEST = -21,
++ gcvSTATUS_GPU_NOT_RESPONDING = -22,
++ gcvSTATUS_TIMER_OVERFLOW = -23,
++ gcvSTATUS_VERSION_MISMATCH = -24,
++ gcvSTATUS_LOCKED = -25,
++ gcvSTATUS_INTERRUPTED = -26,
++ gcvSTATUS_DEVICE = -27,
++ gcvSTATUS_NOT_MULTI_PIPE_ALIGNED = -28,
++
++ /* Linker errors. */
++ gcvSTATUS_GLOBAL_TYPE_MISMATCH = -1000,
++ gcvSTATUS_TOO_MANY_ATTRIBUTES = -1001,
++ gcvSTATUS_TOO_MANY_UNIFORMS = -1002,
++ gcvSTATUS_TOO_MANY_VARYINGS = -1003,
++ gcvSTATUS_UNDECLARED_VARYING = -1004,
++ gcvSTATUS_VARYING_TYPE_MISMATCH = -1005,
++ gcvSTATUS_MISSING_MAIN = -1006,
++ gcvSTATUS_NAME_MISMATCH = -1007,
++ gcvSTATUS_INVALID_INDEX = -1008,
++ gcvSTATUS_UNIFORM_TYPE_MISMATCH = -1009,
++
++ /* Compiler errors. */
++ gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR = -2000,
++ gcvSTATUS_COMPILER_FE_PARSER_ERROR = -2001,
++}
++gceSTATUS;
++
++/******************************************************************************\
++********************************* Status Macros ********************************
++\******************************************************************************/
++
++#define gcmIS_ERROR(status) (status < 0)
++#define gcmNO_ERROR(status) (status >= 0)
++#define gcmIS_SUCCESS(status) (status == gcvSTATUS_OK)
++
++/******************************************************************************\
++********************************* Field Macros *********************************
++\******************************************************************************/
++
++#define __gcmSTART(reg_field) \
++ (0 ? reg_field)
++
++#define __gcmEND(reg_field) \
++ (1 ? reg_field)
++
++#define __gcmGETSIZE(reg_field) \
++ (__gcmEND(reg_field) - __gcmSTART(reg_field) + 1)
++
++#define __gcmALIGN(data, reg_field) \
++ (((gctUINT32) (data)) << __gcmSTART(reg_field))
++
++#define __gcmMASK(reg_field) \
++ ((gctUINT32) ((__gcmGETSIZE(reg_field) == 32) \
++ ? ~0 \
++ : (~(~0 << __gcmGETSIZE(reg_field)))))
++
++/*******************************************************************************
++**
++** gcmFIELDMASK
++**
++** Get aligned field mask.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmFIELDMASK(reg, field) \
++( \
++ __gcmALIGN(__gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmGETFIELD
++**
++** Extract the value of a field from specified data.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmGETFIELD(data, reg, field) \
++( \
++ ((((gctUINT32) (data)) >> __gcmSTART(reg##_##field)) \
++ & __gcmMASK(reg##_##field)) \
++)
++
++/*******************************************************************************
++**
++** gcmSETFIELD
++**
++** Set the value of a field within specified data.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETFIELD(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) \
++ & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \
++ | __gcmALIGN((gctUINT32) (value) \
++ & __gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmSETFIELDVALUE
++**
++** Set the value of a field within specified data with a
++** predefined value.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Name of the value within the field.
++*/
++#define gcmSETFIELDVALUE(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) \
++ & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \
++ | __gcmALIGN(reg##_##field##_##value \
++ & __gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmGETMASKEDFIELDMASK
++**
++** Determine field mask of a masked field.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmGETMASKEDFIELDMASK(reg, field) \
++( \
++ gcmSETFIELD(0, reg, field, ~0) | \
++ gcmSETFIELD(0, reg, MASK_ ## field, ~0) \
++)
++
++/*******************************************************************************
++**
++** gcmSETMASKEDFIELD
++**
++** Set the value of a masked field with specified data.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETMASKEDFIELD(reg, field, value) \
++( \
++ gcmSETFIELD (~0, reg, field, value) & \
++ gcmSETFIELDVALUE(~0, reg, MASK_ ## field, ENABLED) \
++)
++
++/*******************************************************************************
++**
++** gcmSETMASKEDFIELDVALUE
++**
++** Set the value of a masked field with specified data.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETMASKEDFIELDVALUE(reg, field, value) \
++( \
++ gcmSETFIELDVALUE(~0, reg, field, value) & \
++ gcmSETFIELDVALUE(~0, reg, MASK_ ## field, ENABLED) \
++)
++
++/*******************************************************************************
++**
++** gcmVERIFYFIELDVALUE
++**
++** Verify if the value of a field within specified data equals a
++** predefined value.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Name of the value within the field.
++*/
++#define gcmVERIFYFIELDVALUE(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) >> __gcmSTART(reg##_##field) & \
++ __gcmMASK(reg##_##field)) \
++ == \
++ (reg##_##field##_##value & __gcmMASK(reg##_##field)) \
++)
++
++/*******************************************************************************
++** Bit field macros.
++*/
++
++#define __gcmSTARTBIT(Field) \
++ ( 1 ? Field )
++
++#define __gcmBITSIZE(Field) \
++ ( 0 ? Field )
++
++#define __gcmBITMASK(Field) \
++( \
++ (1 << __gcmBITSIZE(Field)) - 1 \
++)
++
++#define gcmGETBITS(Value, Type, Field) \
++( \
++ ( ((Type) (Value)) >> __gcmSTARTBIT(Field) ) \
++ & \
++ __gcmBITMASK(Field) \
++)
++
++#define gcmSETBITS(Value, Type, Field, NewValue) \
++( \
++ ( ((Type) (Value)) \
++ & ~(__gcmBITMASK(Field) << __gcmSTARTBIT(Field)) \
++ ) \
++ | \
++ ( ( ((Type) (NewValue)) \
++ & __gcmBITMASK(Field) \
++ ) << __gcmSTARTBIT(Field) \
++ ) \
++)
++
++/*******************************************************************************
++**
++** gcmISINREGRANGE
++**
++** Verify whether the specified address is in the register range.
++**
++** ARGUMENTS:
++**
++** Address Address to be verified.
++** Name Name of a register.
++*/
++
++#define gcmISINREGRANGE(Address, Name) \
++( \
++ ((Address & (~0U << Name ## _LSB)) == (Name ## _Address >> 2)) \
++)
++
++/*******************************************************************************
++**
++** A set of macros to aid state loading.
++**
++** ARGUMENTS:
++**
++** CommandBuffer Pointer to a gcoCMDBUF object.
++** StateDelta Pointer to a gcsSTATE_DELTA state delta structure.
++** Memory Destination memory pointer of gctUINT32_PTR type.
++** PartOfContext Whether or not the state is a part of the context.
++** FixedPoint Whether or not the state is of the fixed point format.
++** Count Number of consecutive states to be loaded.
++** Address State address.
++** Data Data to be set to the state.
++*/
++
++/*----------------------------------------------------------------------------*/
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++
++# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count) \
++ CommandBuffer->lastLoadStatePtr = gcmPTR_TO_UINT64(Memory); \
++ CommandBuffer->lastLoadStateAddress = Address; \
++ CommandBuffer->lastLoadStateCount = Count
++
++# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address) \
++ gcmASSERT( \
++ (gctUINT) (Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastLoadStatePtr, gctUINT32_PTR) - 1) \
++ == \
++ (gctUINT) (Address - CommandBuffer->lastLoadStateAddress) \
++ ); \
++ \
++ gcmASSERT(CommandBuffer->lastLoadStateCount > 0); \
++ \
++ CommandBuffer->lastLoadStateCount -= 1
++
++# define gcmVERIFYLOADSTATEDONE(CommandBuffer) \
++ gcmASSERT(CommandBuffer->lastLoadStateCount == 0)
++
++#else
++
++# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count)
++# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address)
++# define gcmVERIFYLOADSTATEDONE(CommandBuffer)
++
++#endif
++
++#if gcdSECURE_USER
++
++# define gcmDEFINESECUREUSER() \
++ gctUINT __secure_user_offset__; \
++ gctUINT32_PTR __secure_user_hintArray__;
++
++# define gcmBEGINSECUREUSER() \
++ __secure_user_offset__ = reserve->lastOffset; \
++ \
++ __secure_user_hintArray__ = gcmUINT64_TO_PTR(reserve->hintArrayTail)
++
++# define gcmENDSECUREUSER() \
++ reserve->hintArrayTail = gcmPTR_TO_UINT64(__secure_user_hintArray__)
++
++# define gcmSKIPSECUREUSER() \
++ __secure_user_offset__ += gcmSIZEOF(gctUINT32)
++
++# define gcmUPDATESECUREUSER() \
++ *__secure_user_hintArray__ = __secure_user_offset__; \
++ \
++ __secure_user_offset__ += gcmSIZEOF(gctUINT32); \
++ __secure_user_hintArray__ += 1
++
++#else
++
++# define gcmDEFINESECUREUSER()
++# define gcmBEGINSECUREUSER()
++# define gcmENDSECUREUSER()
++# define gcmSKIPSECUREUSER()
++# define gcmUPDATESECUREUSER()
++
++#endif
++
++/*----------------------------------------------------------------------------*/
++
++#if gcdDUMP
++# define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data) \
++ if (FixedPoint) \
++ { \
++ gcmDUMP(gcvNULL, "@[state.x 0x%04X 0x%08X]", \
++ Address, Data \
++ ); \
++ } \
++ else \
++ { \
++ gcmDUMP(gcvNULL, "@[state 0x%04X 0x%08X]", \
++ Address, Data \
++ ); \
++ }
++#else
++# define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data)
++#endif
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmDEFINESTATEBUFFER(CommandBuffer, StateDelta, Memory, ReserveSize) \
++ gcmDEFINESECUREUSER() \
++ gctSIZE_T ReserveSize; \
++ gcoCMDBUF CommandBuffer; \
++ gctUINT32_PTR Memory; \
++ gcsSTATE_DELTA_PTR StateDelta
++
++#define gcmBEGINSTATEBUFFER(Hardware, CommandBuffer, StateDelta, Memory, ReserveSize) \
++{ \
++ gcmONERROR(gcoBUFFER_Reserve( \
++ Hardware->buffer, ReserveSize, gcvTRUE, &CommandBuffer \
++ )); \
++ \
++ Memory = gcmUINT64_TO_PTR(CommandBuffer->lastReserve); \
++ \
++ StateDelta = Hardware->delta; \
++ \
++ gcmBEGINSECUREUSER(); \
++}
++
++#define gcmENDSTATEBUFFER(CommandBuffer, Memory, ReserveSize) \
++{ \
++ gcmENDSECUREUSER(); \
++ \
++ gcmASSERT( \
++ gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT8_PTR) + ReserveSize \
++ == \
++ (gctUINT8_PTR) Memory \
++ ); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, Count) \
++{ \
++ gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) == 0); \
++ gcmASSERT((gctUINT32)Count <= 1024); \
++ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count); \
++ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, FLOAT, FixedPoint) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, Address); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmENDSTATEBATCH(CommandBuffer, Memory) \
++{ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) == 0); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcoHARDWARE_UpdateDelta( \
++ StateDelta, FixedPoint, Address, 0, __temp_data32__ \
++ ); \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, gcvFALSE, Address, __temp_data32__); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmSETFILLER(CommandBuffer, Memory) \
++{ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ Memory += 1; \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSINGLESTATE(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++#define gcmSETSINGLECTRLSTATE(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++
++/*******************************************************************************
++**
++** gcmSETSTARTDECOMMAND
++**
++** Form a START_DE command.
++**
++** ARGUMENTS:
++**
++** Memory Destination memory pointer of gctUINT32_PTR type.
++** Count Number of the rectangles.
++*/
++
++#define gcmSETSTARTDECOMMAND(Memory, Count) \
++{ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_START_DE_COMMAND, OPCODE, START_DE) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, DATA_COUNT, 0); \
++ \
++ *Memory++ = 0xDEADDEED; \
++}
++
++/******************************************************************************\
++******************************** Ceiling Macro ********************************
++\******************************************************************************/
++#define gcmCEIL(x) ((x - (gctUINT32)x) == 0 ? (gctUINT32)x : (gctUINT32)x + 1)
++
++/******************************************************************************\
++******************************** Min/Max Macros ********************************
++\******************************************************************************/
++
++#define gcmMIN(x, y) (((x) <= (y)) ? (x) : (y))
++#define gcmMAX(x, y) (((x) >= (y)) ? (x) : (y))
++#define gcmCLAMP(x, min, max) (((x) < (min)) ? (min) : \
++ ((x) > (max)) ? (max) : (x))
++#define gcmABS(x) (((x) < 0) ? -(x) : (x))
++#define gcmNEG(x) (((x) < 0) ? (x) : -(x))
++
++/*******************************************************************************
++**
++** gcmPTR2INT
++**
++** Convert a pointer to an integer value.
++**
++** ARGUMENTS:
++**
++** p Pointer value.
++*/
++#if defined(_WIN32) || (defined(__LP64__) && __LP64__)
++# define gcmPTR2INT(p) \
++ ( \
++ (gctUINT32) (gctUINT64) (p) \
++ )
++#else
++# define gcmPTR2INT(p) \
++ ( \
++ (gctUINT32) (p) \
++ )
++#endif
++
++/*******************************************************************************
++**
++** gcmINT2PTR
++**
++** Convert an integer value into a pointer.
++**
++** ARGUMENTS:
++**
++** v Integer value.
++*/
++#ifdef __LP64__
++# define gcmINT2PTR(i) \
++ ( \
++ (gctPOINTER) (gctINT64) (i) \
++ )
++#else
++# define gcmINT2PTR(i) \
++ ( \
++ (gctPOINTER) (i) \
++ )
++#endif
++
++/*******************************************************************************
++**
++** gcmOFFSETOF
++**
++** Compute the byte offset of a field inside a structure.
++**
++** ARGUMENTS:
++**
++** s Structure name.
++** field Field name.
++*/
++#define gcmOFFSETOF(s, field) \
++( \
++ gcmPTR2INT(& (((struct s *) 0)->field)) \
++)
++
++#define gcmSWAB32(x) ((gctUINT32)( \
++ (((gctUINT32)(x) & (gctUINT32)0x000000FFUL) << 24) | \
++ (((gctUINT32)(x) & (gctUINT32)0x0000FF00UL) << 8) | \
++ (((gctUINT32)(x) & (gctUINT32)0x00FF0000UL) >> 8) | \
++ (((gctUINT32)(x) & (gctUINT32)0xFF000000UL) >> 24)))
++
++/*******************************************************************************
++***** Database ****************************************************************/
++
++typedef struct _gcsDATABASE_COUNTERS
++{
++ /* Number of currently allocated bytes. */
++ gctUINT64 bytes;
++
++ /* Maximum number of bytes allocated (memory footprint). */
++ gctUINT64 maxBytes;
++
++ /* Total number of bytes allocated. */
++ gctUINT64 totalBytes;
++}
++gcsDATABASE_COUNTERS;
++
++typedef struct _gcuDATABASE_INFO
++{
++ /* Counters. */
++ gcsDATABASE_COUNTERS counters;
++
++ /* Time value. */
++ gctUINT64 time;
++}
++gcuDATABASE_INFO;
++
++/*******************************************************************************
++***** Frame database **********************************************************/
++
++/* gcsHAL_FRAME_INFO */
++typedef struct _gcsHAL_FRAME_INFO
++{
++ /* Current timer tick. */
++ OUT gctUINT64 ticks;
++
++ /* Bandwidth counters. */
++ OUT gctUINT readBytes8[8];
++ OUT gctUINT writeBytes8[8];
++
++ /* Counters. */
++ OUT gctUINT cycles[8];
++ OUT gctUINT idleCycles[8];
++ OUT gctUINT mcCycles[8];
++ OUT gctUINT readRequests[8];
++ OUT gctUINT writeRequests[8];
++
++ /* FE counters. */
++ OUT gctUINT drawCount;
++ OUT gctUINT vertexOutCount;
++ OUT gctUINT vertexMissCount;
++
++ /* 3D counters. */
++ OUT gctUINT vertexCount;
++ OUT gctUINT primitiveCount;
++ OUT gctUINT rejectedPrimitives;
++ OUT gctUINT culledPrimitives;
++ OUT gctUINT clippedPrimitives;
++ OUT gctUINT droppedPrimitives;
++ OUT gctUINT frustumClippedPrimitives;
++ OUT gctUINT outPrimitives;
++ OUT gctUINT inPrimitives;
++ OUT gctUINT culledQuadCount;
++ OUT gctUINT totalQuadCount;
++ OUT gctUINT quadCount;
++ OUT gctUINT totalPixelCount;
++
++ /* PE counters. */
++ OUT gctUINT colorKilled[8];
++ OUT gctUINT colorDrawn[8];
++ OUT gctUINT depthKilled[8];
++ OUT gctUINT depthDrawn[8];
++
++ /* Shader counters. */
++ OUT gctUINT shaderCycles;
++ OUT gctUINT vsInstructionCount;
++ OUT gctUINT vsTextureCount;
++ OUT gctUINT vsBranchCount;
++ OUT gctUINT vsVertices;
++ OUT gctUINT psInstructionCount;
++ OUT gctUINT psTextureCount;
++ OUT gctUINT psBranchCount;
++ OUT gctUINT psPixels;
++
++ /* Texture counters. */
++ OUT gctUINT bilinearRequests;
++ OUT gctUINT trilinearRequests;
++ OUT gctUINT txBytes8[2];
++ OUT gctUINT txHitCount;
++ OUT gctUINT txMissCount;
++}
++gcsHAL_FRAME_INFO;
++
++typedef enum _gcePATCH_ID
++{
++ gcePATCH_UNKNOWN = 0xFFFFFFFF,
++
++ /* Benchmark list*/
++ gcePATCH_GLB11 = 0x0,
++ gcePATCH_GLB21,
++ gcePATCH_GLB25,
++ gcePATCH_GLB27,
++
++ gcePATCH_BM21,
++ gcePATCH_MM,
++ gcePATCH_MM06,
++ gcePATCH_MM07,
++ gcePATCH_QUADRANT,
++ gcePATCH_ANTUTU,
++ gcePATCH_SMARTBENCH,
++ gcePATCH_JPCT,
++ gcePATCH_NENAMARK,
++ gcePATCH_NENAMARK2,
++ gcePATCH_NEOCORE,
++ gcePATCH_GLB,
++ gcePATCH_GB,
++ gcePATCH_RTESTVA,
++ gcePATCH_BMX,
++ gcePATCH_BMGUI,
++
++ /* Game list */
++ gcePATCH_NBA2013,
++ gcePATCH_BARDTALE,
++ gcePATCH_BUSPARKING3D,
++ gcePATCH_FISHBOODLE,
++ gcePATCH_SUBWAYSURFER,
++ gcePATCH_HIGHWAYDRIVER,
++ gcePATCH_PREMIUM,
++ gcePATCH_RACEILLEGAL,
++ gcePATCH_BLABLA,
++ gcePATCH_MEGARUN,
++ gcePATCH_GALAXYONFIRE2,
++ gcePATCH_GLOFTR3HM,
++ gcePATCH_GLOFTSXHM,
++ gcePATCH_GLOFTF3HM,
++ gcePATCH_GLOFTGANG,
++ gcePATCH_XRUNNER,
++ gcePATCH_WP,
++ gcePATCH_DEVIL,
++ gcePATCH_HOLYARCH,
++ gcePATCH_MUSE,
++ gcePATCH_SG,
++ gcePATCH_SIEGECRAFT,
++ gcePATCH_CARCHALLENGE,
++ gcePATCH_HEROESCALL,
++ gcePATCH_MONOPOLY,
++ gcePATCH_CTGL20,
++ gcePATCH_FIREFOX,
++ gcePATCH_CHORME,
++ gcePATCH_DUOKANTV,
++ gcePATCH_TESTAPP,
++ gcePATCH_GOOGLEEARTH,
++
++ /* Count enum*/
++ gcePATCH_COUNT,
++}
++gcePATCH_ID;
++
++#if gcdLINK_QUEUE_SIZE
++typedef struct _gckLINKDATA * gckLINKDATA;
++struct _gckLINKDATA
++{
++ gctUINT32 start;
++ gctUINT32 end;
++ gctINT pid;
++};
++
++typedef struct _gckLINKQUEUE * gckLINKQUEUE;
++struct _gckLINKQUEUE
++{
++ struct _gckLINKDATA data[gcdLINK_QUEUE_SIZE];
++ gctUINT32 rear;
++ gctUINT32 front;
++ gctUINT32 count;
++};
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_types_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_version.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_version.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_version.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_version.h 2015-05-01 14:57:59.543427001 -0500
+@@ -0,0 +1,37 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_version_h_
++#define __gc_hal_version_h_
++
++#define gcvVERSION_MAJOR 4
++
++#define gcvVERSION_MINOR 6
++
++#define gcvVERSION_PATCH 9
++
++#define gcvVERSION_BUILD 9754
++
++#define gcvVERSION_DATE __DATE__
++
++#define gcvVERSION_TIME __TIME__
++
++#endif /* __gc_hal_version_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_vg.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_vg.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/kernel/inc/gc_hal_vg.h 2015-05-01 14:57:59.547427001 -0500
+@@ -0,0 +1,913 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_vg_h_
++#define __gc_hal_vg_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++#include "gc_hal_rename.h"
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++#include "gc_hal_base.h"
++
++#if gcdENABLE_VG
++
++/* Thread routine type. */
++#if defined(LINUX)
++ typedef gctINT gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE
++#elif defined(WIN32)
++ typedef gctUINT gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE __stdcall
++#elif defined(__QNXNTO__)
++ typedef void * gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE
++#endif
++
++typedef gctTHREADFUNCRESULT (gctTHREADFUNCTYPE * gctTHREADFUNC) (
++ gctTHREADFUNCPARAMETER ThreadParameter
++ );
++
++
++#if defined(gcvDEBUG)
++# undef gcvDEBUG
++#endif
++
++#define gcdFORCE_DEBUG 0
++#define gcdFORCE_MESSAGES 0
++
++
++#if DBG || defined(DEBUG) || defined(_DEBUG) || gcdFORCE_DEBUG
++# define gcvDEBUG 1
++#else
++# define gcvDEBUG 0
++#endif
++
++#define _gcmERROR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++
++#define gcmERROR_RETURN(func) _gcmERROR_RETURN(gcm, func)
++
++#define gcmLOG_LOCATION()
++
++#define gcmkIS_ERROR(status) (status < 0)
++
++#define gcmALIGNDOWN(n, align) \
++( \
++ (n) & ~((align) - 1) \
++)
++
++#define gcmIS_VALID_INDEX(Index, Array) \
++ (((gctUINT) (Index)) < gcmCOUNTOF(Array))
++
++
++#define gcmIS_NAN(x) \
++( \
++ ((* (gctUINT32_PTR) &(x)) & 0x7FFFFFFF) == 0x7FFFFFFF \
++)
++
++#define gcmLERP(v1, v2, w) \
++ ((v1) * (w) + (v2) * (1.0f - (w)))
++
++#define gcmINTERSECT(Start1, Start2, Length) \
++ (gcmABS((Start1) - (Start2)) < (Length))
++
++/*******************************************************************************
++**
++** gcmERR_GOTO
++**
++** Prints a message and terminates the current loop on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** Function
++** Function to evaluate.
++*/
++
++#define gcmERR_GOTO(Function) \
++ status = Function; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ gcmTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmERR_GOTO: status=%d @ line=%d in function %s.\n", \
++ status, __LINE__, __FUNCTION__ \
++ ); \
++ goto ErrorHandler; \
++ }
++
++#if gcvDEBUG || gcdFORCE_MESSAGES
++# define gcmVERIFY_BOOLEAN(Expression) \
++ gcmASSERT( \
++ ( (Expression) == gcvFALSE ) || \
++ ( (Expression) == gcvTRUE ) \
++ )
++#else
++# define gcmVERIFY_BOOLEAN(Expression)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFYFIELDFIT
++**
++** Verify whether the value fits in the field.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmVERIFYFIELDFIT(reg, field, value) \
++ gcmASSERT( \
++ (value) <= gcmFIELDMAX(reg, field) \
++ )
++/*******************************************************************************
++**
++** gcmFIELDMAX
++**
++** Get field maximum value.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmFIELDMAX(reg, field) \
++( \
++ (gctUINT32) \
++ ( \
++ (__gcmGETSIZE(reg##_##field) == 32) \
++ ? ~0 \
++ : (~(~0 << __gcmGETSIZE(reg##_##field))) \
++ ) \
++)
++
++
++/* ANSI C does not have the 'f' functions, define replacements here. */
++#define gcmSINF(x) ((gctFLOAT) sin(x))
++#define gcmCOSF(x) ((gctFLOAT) cos(x))
++#define gcmASINF(x) ((gctFLOAT) asin(x))
++#define gcmACOSF(x) ((gctFLOAT) acos(x))
++#define gcmSQRTF(x) ((gctFLOAT) sqrt(x))
++#define gcmFABSF(x) ((gctFLOAT) fabs(x))
++#define gcmFMODF(x, y) ((gctFLOAT) fmod((x), (y)))
++#define gcmCEILF(x) ((gctFLOAT) ceil(x))
++#define gcmFLOORF(x) ((gctFLOAT) floor(x))
++
++
++
++/* Fixed point constants. */
++#define gcvZERO_X ((gctFIXED_POINT) 0x00000000)
++#define gcvHALF_X ((gctFIXED_POINT) 0x00008000)
++#define gcvONE_X ((gctFIXED_POINT) 0x00010000)
++#define gcvNEGONE_X ((gctFIXED_POINT) 0xFFFF0000)
++#define gcvTWO_X ((gctFIXED_POINT) 0x00020000)
++
++/* Integer constants. */
++#define gcvMAX_POS_INT ((gctINT) 0x7FFFFFFF)
++#define gcvMAX_NEG_INT ((gctINT) 0x80000000)
++
++/* Float constants. */
++#define gcvMAX_POS_FLOAT ((gctFLOAT) 3.4028235e+038)
++#define gcvMAX_NEG_FLOAT ((gctFLOAT) -3.4028235e+038)
++
++/******************************************************************************\
++***************************** Miscellaneous Macro ******************************
++\******************************************************************************/
++
++#define gcmKB2BYTES(Kilobyte) \
++( \
++ (Kilobyte) << 10 \
++)
++
++#define gcmMB2BYTES(Megabyte) \
++( \
++ (Megabyte) << 20 \
++)
++
++#define gcmMAT(Matrix, Row, Column) \
++( \
++ (Matrix) [(Row) * 3 + (Column)] \
++)
++
++#define gcmMAKE2CHAR(Char1, Char2) \
++( \
++ ((gctUINT16) (gctUINT8) (Char1) << 0) | \
++ ((gctUINT16) (gctUINT8) (Char2) << 8) \
++)
++
++#define gcmMAKE4CHAR(Char1, Char2, Char3, Char4) \
++( \
++ ((gctUINT32)(gctUINT8) (Char1) << 0) | \
++ ((gctUINT32)(gctUINT8) (Char2) << 8) | \
++ ((gctUINT32)(gctUINT8) (Char3) << 16) | \
++ ((gctUINT32)(gctUINT8) (Char4) << 24) \
++)
++
++/* some platforms need to fix the physical address for HW to access*/
++#define gcmFIXADDRESS(address) \
++(\
++ (address)\
++)
++
++#define gcmkFIXADDRESS(address) \
++(\
++ (address)\
++)
++
++/******************************************************************************\
++****************************** Kernel Debug Macro ******************************
++\******************************************************************************/
++
++/* Set signal to signaled state for specified process. */
++gceSTATUS
++gckOS_SetSignal(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ );
++
++/* Return the kernel logical pointer for the given physical one. */
++gceSTATUS
++gckOS_GetKernelLogical(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Return the kernel logical pointer for the given physical one. */
++gceSTATUS
++gckOS_GetKernelLogicalEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----------------------------- Semaphore Object -----------------------------*/
++
++/* Increment the value of a semaphore. */
++gceSTATUS
++gckOS_IncrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ );
++
++/* Decrement the value of a semaphore (waiting might occur). */
++gceSTATUS
++gckOS_DecrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ );
++
++
++/*----------------------------------------------------------------------------*/
++/*------------------------------- Thread Object ------------------------------*/
++
++/* Start a thread. */
++gceSTATUS
++gckOS_StartThread(
++ IN gckOS Os,
++ IN gctTHREADFUNC ThreadFunction,
++ IN gctPOINTER ThreadParameter,
++ OUT gctTHREAD * Thread
++ );
++
++/* Stop a thread. */
++gceSTATUS
++gckOS_StopThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ );
++
++/* Verify whether the thread is still running. */
++gceSTATUS
++gckOS_VerifyThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ );
++
++
++/* Construct a new gckVGKERNEL object. */
++gceSTATUS
++gckVGKERNEL_Construct(
++ IN gckOS Os,
++ IN gctPOINTER Context,
++ IN gckKERNEL inKernel,
++ OUT gckVGKERNEL * Kernel
++ );
++
++/* Destroy an gckVGKERNEL object. */
++gceSTATUS
++gckVGKERNEL_Destroy(
++ IN gckVGKERNEL Kernel
++ );
++
++/* Allocate linear video memory. */
++gceSTATUS
++gckKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Unmap memory. */
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Dispatch a user-level command. */
++gceSTATUS
++gckVGKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Query command buffer requirements. */
++gceSTATUS
++gckKERNEL_QueryCommandBuffer(
++ IN gckKERNEL Kernel,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY
++gceSTATUS
++gckOS_MapReservedMemoryToKernel(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctINT Bytes,
++ IN OUT gctPOINTER *Virtual
++ );
++
++gceSTATUS
++gckOS_UnmapReservedMemoryFromKernel(
++ IN gctPOINTER Virtual
++ );
++#endif
++
++/******************************************************************************\
++******************************* gckVGHARDWARE Object ******************************
++\******************************************************************************/
++
++/* Construct a new gckVGHARDWARE object. */
++gceSTATUS
++gckVGHARDWARE_Construct(
++ IN gckOS Os,
++ OUT gckVGHARDWARE * Hardware
++ );
++
++/* Destroy an gckVGHARDWARE object. */
++gceSTATUS
++gckVGHARDWARE_Destroy(
++ IN gckVGHARDWARE Hardware
++ );
++
++/* Query system memory requirements. */
++gceSTATUS
++gckVGHARDWARE_QuerySystemMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ );
++
++/* Build virtual address. */
++gceSTATUS
++gckVGHARDWARE_BuildVirtualAddress(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Kickstart the command processor. */
++gceSTATUS
++gckVGHARDWARE_Execute(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Count
++ );
++
++/* Query the available memory. */
++gceSTATUS
++gckVGHARDWARE_QueryMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gckVGHARDWARE_QueryChipIdentity(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPMODEL* ChipModel,
++ OUT gctUINT32* ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures,
++ OUT gctUINT32* ChipMinorFeatures1
++ );
++
++/* Convert an API format. */
++gceSTATUS
++gckVGHARDWARE_ConvertFormat(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT32 * BitsPerPixel,
++ OUT gctUINT32 * BytesPerTile
++ );
++
++/* Split a harwdare specific address into API stuff. */
++gceSTATUS
++gckVGHARDWARE_SplitMemory(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Align size to tile boundary. */
++gceSTATUS
++gckVGHARDWARE_AlignToTile(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Width,
++ IN OUT gctUINT32_PTR Height
++ );
++
++/* Convert logical address to hardware specific address. */
++gceSTATUS
++gckVGHARDWARE_ConvertLogical(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++/* Program MMU. */
++gceSTATUS
++gckVGHARDWARE_SetMMU(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical
++ );
++
++/* Flush the MMU. */
++gceSTATUS
++gckVGHARDWARE_FlushMMU(
++ IN gckVGHARDWARE Hardware
++ );
++
++/* Get idle register. */
++gceSTATUS
++gckVGHARDWARE_GetIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32 * Data
++ );
++
++/* Flush the caches. */
++gceSTATUS
++gckVGHARDWARE_Flush(
++ IN gckVGHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Enable/disable fast clear. */
++gceSTATUS
++gckVGHARDWARE_SetFastClear(
++ IN gckVGHARDWARE Hardware,
++ IN gctINT Enable
++ );
++
++gceSTATUS
++gckVGHARDWARE_ReadInterrupt(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ );
++
++/* Power management. */
++gceSTATUS
++gckVGHARDWARE_SetPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ );
++
++gceSTATUS
++gckVGHARDWARE_SetPowerManagement(
++ IN gckVGHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ );
++
++gceSTATUS
++gckVGHARDWARE_SetPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Timeout
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ );
++/******************************************************************************\
++*************************** Command Buffer Structures **************************
++\******************************************************************************/
++
++/* Vacant command buffer marker. */
++#define gcvVACANT_BUFFER ((gcsCOMPLETION_SIGNAL_PTR) (1))
++
++/* Command buffer header. */
++typedef struct _gcsCMDBUFFER * gcsCMDBUFFER_PTR;
++typedef struct _gcsCMDBUFFER
++{
++ /* Pointer to the completion signal. */
++ gcsCOMPLETION_SIGNAL_PTR completion;
++
++ /* The user sets this to the node of the container buffer whitin which
++ this particular command buffer resides. The kernel sets this to the
++ node of the internally allocated buffer. */
++ gctUINT64 node;
++
++ /* Command buffer hardware address. */
++ gctUINT32 address;
++
++ /* The offset of the buffer from the beginning of the header. */
++ gctUINT32 bufferOffset;
++
++ /* Size of the area allocated for the data portion of this particular
++ command buffer (headers and tail reserves are excluded). */
++ gctSIZE_T size;
++
++ /* Offset into the buffer [0..size]; reflects exactly how much data has
++ been put into the command buffer. */
++ gctUINT offset;
++
++ /* The number of command units in the buffer for the hardware to
++ execute. */
++ gctSIZE_T dataCount;
++
++ /* MANAGED BY : user HAL (gcoBUFFER object).
++ USED BY : user HAL (gcoBUFFER object).
++ Points to the immediate next allocated command buffer. */
++ gcsCMDBUFFER_PTR nextAllocated;
++
++ /* MANAGED BY : user layers (HAL and drivers).
++ USED BY : kernel HAL (gcoBUFFER object).
++ Points to the next subbuffer if any. A family of subbuffers are chained
++ together and are meant to be executed inseparably as a unit. Meaning
++ that context switching cannot occur while a chain of subbuffers is being
++ executed. */
++ gcsCMDBUFFER_PTR nextSubBuffer;
++}
++gcsCMDBUFFER;
++
++/* Command queue element. */
++typedef struct _gcsVGCMDQUEUE
++{
++ /* Pointer to the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Dynamic vs. static command buffer state. */
++ gctBOOL dynamic;
++}
++gcsVGCMDQUEUE;
++
++/* Context map entry. */
++typedef struct _gcsVGCONTEXT_MAP
++{
++ /* State index. */
++ gctUINT32 index;
++
++ /* New state value. */
++ gctUINT32 data;
++
++ /* Points to the next entry in the mod list. */
++ gcsVGCONTEXT_MAP_PTR next;
++}
++gcsVGCONTEXT_MAP;
++
++/* gcsVGCONTEXT structure that holds the current context. */
++typedef struct _gcsVGCONTEXT
++{
++ /* Context ID. */
++ gctUINT64 id;
++
++ /* State caching ebable flag. */
++ gctBOOL stateCachingEnabled;
++
++ /* Current pipe. */
++ gctUINT32 currentPipe;
++
++ /* State map/mod buffer. */
++ gctSIZE_T mapFirst;
++ gctSIZE_T mapLast;
++#ifdef __QNXNTO__
++ gctSIZE_T mapContainerSize;
++#endif
++ gcsVGCONTEXT_MAP_PTR mapContainer;
++ gcsVGCONTEXT_MAP_PTR mapPrev;
++ gcsVGCONTEXT_MAP_PTR mapCurr;
++ gcsVGCONTEXT_MAP_PTR firstPrevMap;
++ gcsVGCONTEXT_MAP_PTR firstCurrMap;
++
++ /* Main context buffer. */
++ gcsCMDBUFFER_PTR header;
++ gctUINT32_PTR buffer;
++
++ /* Completion signal. */
++ gctHANDLE process;
++ gctSIGNAL signal;
++
++#if defined(__QNXNTO__)
++ gctINT32 coid;
++ gctINT32 rcvid;
++#endif
++}
++gcsVGCONTEXT;
++
++/* User space task header. */
++typedef struct _gcsTASK * gcsTASK_PTR;
++typedef struct _gcsTASK
++{
++ /* Pointer to the next task for the same interrupt in user space. */
++ gcsTASK_PTR next;
++
++ /* Size of the task data that immediately follows the structure. */
++ gctUINT size;
++
++ /* Task data starts here. */
++ /* ... */
++}
++gcsTASK;
++
++/* User space task master table entry. */
++typedef struct _gcsTASK_MASTER_ENTRY * gcsTASK_MASTER_ENTRY_PTR;
++typedef struct _gcsTASK_MASTER_ENTRY
++{
++ /* Pointers to the head and to the tail of the task chain. */
++ gcsTASK_PTR head;
++ gcsTASK_PTR tail;
++}
++gcsTASK_MASTER_ENTRY;
++
++/* User space task master table entry. */
++typedef struct _gcsTASK_MASTER_TABLE
++{
++ /* Table with one entry per block. */
++ gcsTASK_MASTER_ENTRY table[gcvBLOCK_COUNT];
++
++ /* The total number of tasks sckeduled. */
++ gctUINT count;
++
++ /* The total size of event data in bytes. */
++ gctUINT size;
++
++#if defined(__QNXNTO__)
++ gctINT32 coid;
++ gctINT32 rcvid;
++#endif
++}
++gcsTASK_MASTER_TABLE;
++
++/******************************************************************************\
++***************************** gckVGINTERRUPT Object ******************************
++\******************************************************************************/
++
++typedef struct _gckVGINTERRUPT * gckVGINTERRUPT;
++
++typedef gceSTATUS (* gctINTERRUPT_HANDLER)(
++ IN gckVGKERNEL Kernel
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Construct(
++ IN gckVGKERNEL Kernel,
++ OUT gckVGINTERRUPT * Interrupt
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Destroy(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Enable(
++ IN gckVGINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Disable(
++ IN gckVGINTERRUPT Interrupt,
++ IN gctINT32 Id
++ );
++
++#ifndef __QNXNTO__
++
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++#else
++
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt,
++ OUT gckOS *Os,
++ OUT gctSEMAPHORE *Semaphore
++ );
++
++#endif
++
++gceSTATUS
++gckVGINTERRUPT_DumpState(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++
++/******************************************************************************\
++******************************* gckVGCOMMAND Object *******************************
++\******************************************************************************/
++
++typedef struct _gckVGCOMMAND * gckVGCOMMAND;
++
++/* Construct a new gckVGCOMMAND object. */
++gceSTATUS
++gckVGCOMMAND_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT TaskGranularity,
++ IN gctUINT QueueSize,
++ OUT gckVGCOMMAND * Command
++ );
++
++/* Destroy an gckVGCOMMAND object. */
++gceSTATUS
++gckVGCOMMAND_Destroy(
++ IN gckVGCOMMAND Command
++ );
++
++/* Query command buffer attributes. */
++gceSTATUS
++gckVGCOMMAND_QueryCommandBuffer(
++ IN gckVGCOMMAND Command,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++
++/* Allocate a command queue. */
++gceSTATUS
++gckVGCOMMAND_Allocate(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer,
++ OUT gctPOINTER * Data
++ );
++
++/* Release memory held by the command queue. */
++gceSTATUS
++gckVGCOMMAND_Free(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ );
++
++/* Schedule the command queue for execution. */
++gceSTATUS
++gckVGCOMMAND_Execute(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ );
++
++/* Commit a buffer to the command queue. */
++gceSTATUS
++gckVGCOMMAND_Commit(
++ IN gckVGCOMMAND Command,
++ IN gcsVGCONTEXT_PTR Context,
++ IN gcsVGCMDQUEUE_PTR Queue,
++ IN gctUINT EntryCount,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable
++ );
++
++/******************************************************************************\
++********************************* gckVGMMU Object ********************************
++\******************************************************************************/
++
++typedef struct _gckVGMMU * gckVGMMU;
++
++/* Construct a new gckVGMMU object. */
++gceSTATUS
++gckVGMMU_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckVGMMU * Mmu
++ );
++
++/* Destroy an gckVGMMU object. */
++gceSTATUS
++gckVGMMU_Destroy(
++ IN gckVGMMU Mmu
++ );
++
++/* Allocate pages inside the MMU. */
++gceSTATUS
++gckVGMMU_AllocatePages(
++ IN gckVGMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++/* Remove a page table from the MMU. */
++gceSTATUS
++gckVGMMU_FreePages(
++ IN gckVGMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ );
++
++/* Set the MMU page with info. */
++gceSTATUS
++gckVGMMU_SetPage(
++ IN gckVGMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ );
++
++/* Flush MMU */
++gceSTATUS
++gckVGMMU_Flush(
++ IN gckVGMMU Mmu
++ );
++
++#endif /* gcdENABLE_VG */
++
++#ifdef __cplusplus
++} /* extern "C" */
++#endif
++
++#endif /* __gc_hal_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.c 2015-05-01 14:57:59.547427001 -0500
+@@ -0,0 +1,795 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifdef MODULE
++#include <linux/module.h>
++#endif
++#include <linux/init.h>
++#include <linux/debugfs.h>
++#include <linux/slab.h>
++#ifdef MODVERSIONS
++#include <linux/modversions.h>
++#endif
++#include <linux/stddef.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/mutex.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <asm/uaccess.h>
++#include <linux/completion.h>
++#include "gc_hal_kernel_linux.h"
++
++/*
++ Prequsite:
++
++ 1) Debugfs feature must be enabled in the kernel.
++ 1.a) You can enable this, in the compilation of the uImage, all you have to do is, In the "make menuconfig" part,
++ you have to enable the debugfs in the kernel hacking part of the menu.
++
++ HOW TO USE:
++ 1) insert the driver with the following option logFileSize, Ex: insmod galcore.ko ...... logFileSize=10240
++ This gives a circular buffer of 10 MB
++
++ 2)Usually after inserting the driver, the debug file system is mounted under /sys/kernel/debug/
++
++ 2.a)If the debugfs is not mounted, you must do "mount -t debugfs none /sys/kernel/debug"
++
++ 3) To read what is being printed in the debugfs file system:
++ Ex : cat /sys/kernel/debug/gpu/galcore_trace
++
++ 4)To write into the debug file system from user side :
++ Ex: echo "hello" > cat /sys/kernel/debug/gpu/galcore_trace
++
++ 5)To write into debugfs from kernel side, Use the function called gckDebugFileSystemPrint
++
++
++ USECASE Kernel Dump:
++
++ 1) Go to /hal/inc/gc_hal_options.h, and enable the following flags:
++ - # define gcdDUMP 1
++ - # define gcdDUMP_IN_KERNEL 1
++ - # define gcdDUMP_COMMAND 1
++
++ 2) Go to /hal/kernel/gc_hal_kernel_command.c and disable the following flag
++ -#define gcdSIMPLE_COMMAND_DUMP 0
++
++ 3) Compile the driver
++ 4) insmod it with the logFileSize option
++ 5) Run an application
++ 6) You can get the dump by cat /sys/kernel/debug/gpu/galcore_trace
++
++ */
++
++/**/
++typedef va_list gctDBGARGS ;
++#define gcmkARGS_START(argument, pointer) va_start(argument, pointer)
++#define gcmkARGS_END(argument) va_end(argument)
++
++#define gcmkDBGFSPRINT(ArgumentSize, Message) \
++ { \
++ gctDBGARGS __arguments__; \
++ gcmkARGS_START(__arguments__, Message); \
++ _DebugFSPrint(ArgumentSize, Message, __arguments__);\
++ gcmkARGS_END(__arguments__); \
++ }
++
++/*Debug File System Node Struct*/
++struct _gcsDebugFileSystemNode
++{
++ /*wait queues for read and write operations*/
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ wait_queue_head_t read_q , write_q ;
++#else
++ struct wait_queue *read_q , *write_q ;
++#endif
++ struct dentry *parent ; /*parent directory*/
++ struct dentry *filen ; /*filename*/
++ struct semaphore sem ; /* mutual exclusion semaphore */
++ char *data ; /* The circular buffer data */
++ int size ; /* Size of the buffer pointed to by 'data' */
++ int refcount ; /* Files that have this buffer open */
++ int read_point ; /* Offset in circ. buffer of oldest data */
++ int write_point ; /* Offset in circ. buffer of newest data */
++ int offset ; /* Byte number of read_point in the stream */
++ struct _gcsDebugFileSystemNode *next ;
++} ;
++
++/* amount of data in the queue */
++#define gcmkNODE_QLEN(node) ( (node)->write_point >= (node)->read_point ? \
++ (node)->write_point - (node)->read_point : \
++ (node)->size - (node)->read_point + (node)->write_point)
++
++/* byte number of the last byte in the queue */
++#define gcmkNODE_FIRST_EMPTY_BYTE(node) ((node)->offset + gcmkNODE_QLEN(node))
++
++/*Synchronization primitives*/
++#define gcmkNODE_READQ(node) (&((node)->read_q))
++#define gcmkNODE_WRITEQ(node) (&((node)->write_q))
++#define gcmkNODE_SEM(node) (&((node)->sem))
++
++/*Utilities*/
++#define gcmkMIN(x, y) ((x) < (y) ? (x) : y)
++
++/*Debug File System Struct*/
++typedef struct _gcsDebugFileSystem
++{
++ gcsDebugFileSystemNode* linkedlist ;
++ gcsDebugFileSystemNode* currentNode ;
++ int isInited ;
++} gcsDebugFileSystem ;
++
++
++/*debug file system*/
++static gcsDebugFileSystem gc_dbgfs ;
++
++
++
++/*******************************************************************************
++ **
++ ** READ & WRITE FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** _ReadFromNode
++ **
++ ** 1) reading bytes out of a circular buffer with wraparound.
++ ** 2)returns caddr_t, pointer to data read, which the caller must free.
++ ** 3) length is (a pointer to) the number of bytes to be read, which will be set by this function to
++ ** be the number of bytes actually returned
++ **
++ *******************************************************************************/
++static caddr_t
++_ReadFromNode (
++ gcsDebugFileSystemNode* Node ,
++ size_t *Length ,
++ loff_t *Offset
++ )
++{
++ caddr_t retval ;
++ int bytes_copied = 0 , n , start_point , remaining ;
++
++ /* is the user trying to read data that has already scrolled off? */
++ if ( *Offset < Node->offset )
++ {
++ *Offset = Node->offset ;
++ }
++
++ /* is the user trying to read past EOF? */
++ if ( *Offset >= gcmkNODE_FIRST_EMPTY_BYTE ( Node ) )
++ {
++ return NULL ;
++ }
++
++ /* find the smaller of the total bytes we have available and what
++ * the user is asking for */
++
++ *Length = gcmkMIN ( *Length , gcmkNODE_FIRST_EMPTY_BYTE ( Node ) - *Offset ) ;
++
++ remaining = * Length ;
++
++ /* figure out where to start based on user's Offset */
++ start_point = Node->read_point + ( *Offset - Node->offset ) ;
++
++ start_point = start_point % Node->size ;
++
++ /* allocate memory to return */
++ if ( ( retval = kmalloc ( sizeof (char ) * remaining , GFP_KERNEL ) ) == NULL )
++ return NULL ;
++
++ /* copy the (possibly noncontiguous) data to our buffer */
++ while ( remaining )
++ {
++ n = gcmkMIN ( remaining , Node->size - start_point ) ;
++ memcpy ( retval + bytes_copied , Node->data + start_point , n ) ;
++ bytes_copied += n ;
++ remaining -= n ;
++ start_point = ( start_point + n ) % Node->size ;
++ }
++
++ /* advance user's file pointer */
++ *Offset += * Length ;
++
++ return retval ;
++}
++
++/*******************************************************************************
++ **
++ ** _WriteToNode
++ **
++ ** 1) writes to a circular buffer with wraparound.
++ ** 2)in case of an overflow, it overwrites the oldest unread data.
++ **
++ *********************************************************************************/
++static void
++_WriteToNode (
++ gcsDebugFileSystemNode* Node ,
++ caddr_t Buf ,
++ int Length
++ )
++{
++ int bytes_copied = 0 ;
++ int overflow = 0 ;
++ int n ;
++
++ if ( Length + gcmkNODE_QLEN ( Node ) >= ( Node->size - 1 ) )
++ {
++ overflow = 1 ;
++
++ /* in case of overflow, figure out where the new buffer will
++ * begin. we start by figuring out where the current buffer ENDS:
++ * node->parent->offset + gcmkNODE_QLEN. we then advance the end-offset
++ * by the Length of the current write, and work backwards to
++ * figure out what the oldest unoverwritten data will be (i.e.,
++ * size of the buffer). */
++ Node->offset = Node->offset + gcmkNODE_QLEN ( Node ) + Length
++ - Node->size + 1 ;
++ }
++
++ while ( Length )
++ {
++ /* how many contiguous bytes are available from the write point to
++ * the end of the circular buffer? */
++ n = gcmkMIN ( Length , Node->size - Node->write_point ) ;
++ memcpy ( Node->data + Node->write_point , Buf + bytes_copied , n ) ;
++ bytes_copied += n ;
++ Length -= n ;
++ Node->write_point = ( Node->write_point + n ) % Node->size ;
++ }
++
++ /* if there is an overflow, reset the read point to read whatever is
++ * the oldest data that we have, that has not yet been
++ * overwritten. */
++ if ( overflow )
++ {
++ Node->read_point = ( Node->write_point + 1 ) % Node->size ;
++ }
++}
++
++
++/*******************************************************************************
++ **
++ ** PRINTING UTILITY (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** _GetArgumentSize
++ **
++ **
++ *******************************************************************************/
++static gctINT
++_GetArgumentSize (
++ IN gctCONST_STRING Message
++ )
++{
++ gctINT i , count ;
++
++ for ( i = 0 , count = 0 ; Message[i] ; i += 1 )
++ {
++ if ( Message[i] == '%' )
++ {
++ count += 1 ;
++ }
++ }
++ return count * sizeof (unsigned int ) ;
++}
++
++/*******************************************************************************
++ **
++ ** _AppendString
++ **
++ **
++ *******************************************************************************/
++static ssize_t
++_AppendString (
++ IN gcsDebugFileSystemNode* Node ,
++ IN gctCONST_STRING String ,
++ IN int Length
++ )
++{
++ caddr_t message = NULL ;
++ int n ;
++
++ /* if the message is longer than the buffer, just take the beginning
++ * of it, in hopes that the reader (if any) will have time to read
++ * before we wrap around and obliterate it */
++ n = gcmkMIN ( Length , Node->size - 1 ) ;
++
++ /* make sure we have the memory for it */
++ if ( ( message = kmalloc ( n , GFP_KERNEL ) ) == NULL )
++ return - ENOMEM ;
++
++ /* copy into our temp buffer */
++ memcpy ( message , String , n ) ;
++
++ /* now copy it into the circular buffer and free our temp copy */
++ _WriteToNode ( Node , message , n ) ;
++ kfree ( message ) ;
++ return n ;
++}
++
++/*******************************************************************************
++ **
++ ** _DebugFSPrint
++ **
++ **
++ *******************************************************************************/
++static void
++_DebugFSPrint (
++ IN unsigned int ArgumentSize ,
++ IN const char* Message ,
++ IN gctDBGARGS Arguments
++
++ )
++{
++ char buffer[MAX_LINE_SIZE] ;
++ int len ;
++ down ( gcmkNODE_SEM ( gc_dbgfs.currentNode ) ) ;
++ len = vsnprintf ( buffer , sizeof (buffer ) , Message , *( va_list * ) & Arguments ) ;
++ buffer[len] = '\0' ;
++
++ /* Add end-of-line if missing. */
++ if ( buffer[len - 1] != '\n' )
++ {
++ buffer[len ++] = '\n' ;
++ buffer[len] = '\0' ;
++ }
++ _AppendString ( gc_dbgfs.currentNode , buffer , len ) ;
++ up ( gcmkNODE_SEM ( gc_dbgfs.currentNode ) ) ;
++ wake_up_interruptible ( gcmkNODE_READQ ( gc_dbgfs.currentNode ) ) ; /* blocked in read*/
++}
++
++/*******************************************************************************
++ **
++ ** LINUX SYSTEM FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** find the vivlog structure associated with an inode.
++ ** returns a pointer to the structure if found, NULL if not found
++ **
++ *******************************************************************************/
++static gcsDebugFileSystemNode*
++_GetNodeInfo (
++ IN struct inode *Inode
++ )
++{
++ gcsDebugFileSystemNode* node ;
++
++ if ( Inode == NULL )
++ return NULL ;
++
++ for ( node = gc_dbgfs.linkedlist ; node != NULL ; node = node->next )
++ if ( node->filen->d_inode->i_ino == Inode->i_ino )
++ return node ;
++
++ return NULL ;
++}
++
++/*******************************************************************************
++ **
++ ** _DebugFSRead
++ **
++ *******************************************************************************/
++static ssize_t
++_DebugFSRead (
++ struct file *file ,
++ char __user * buffer ,
++ size_t length ,
++ loff_t * offset
++ )
++{
++ int retval ;
++ caddr_t data_to_return ;
++ gcsDebugFileSystemNode* node ;
++ /* get the metadata about this emlog */
++ if ( ( node = _GetNodeInfo ( file->f_dentry->d_inode ) ) == NULL )
++ {
++ printk ( "debugfs_read: record not found\n" ) ;
++ return - EIO ;
++ }
++
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++
++ /* wait until there's data available (unless we do nonblocking reads) */
++ while ( *offset >= gcmkNODE_FIRST_EMPTY_BYTE ( node ) )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ if ( file->f_flags & O_NONBLOCK )
++ {
++ return - EAGAIN ;
++ }
++ if ( wait_event_interruptible ( ( *( gcmkNODE_READQ ( node ) ) ) , ( *offset < gcmkNODE_FIRST_EMPTY_BYTE ( node ) ) ) )
++ {
++ return - ERESTARTSYS ; /* signal: tell the fs layer to handle it */
++ }
++ /* otherwise loop, but first reacquire the lock */
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++ }
++ data_to_return = _ReadFromNode ( node , &length , offset ) ;
++ if ( data_to_return == NULL )
++ {
++ retval = 0 ;
++ goto unlock ;
++ }
++ if ( copy_to_user ( buffer , data_to_return , length ) > 0 )
++ {
++ retval = - EFAULT ;
++ }
++ else
++ {
++ retval = length ;
++ }
++ kfree ( data_to_return ) ;
++unlock:
++ up ( gcmkNODE_SEM ( node ) ) ;
++ wake_up_interruptible ( gcmkNODE_WRITEQ ( node ) ) ;
++ return retval ;
++}
++
++/*******************************************************************************
++ **
++ **_DebugFSWrite
++ **
++ *******************************************************************************/
++static ssize_t
++_DebugFSWrite (
++ struct file *file ,
++ const char __user * buffer ,
++ size_t length ,
++ loff_t * offset
++ )
++{
++ caddr_t message = NULL ;
++ int n ;
++ gcsDebugFileSystemNode*node ;
++
++ /* get the metadata about this log */
++ if ( ( node = _GetNodeInfo ( file->f_dentry->d_inode ) ) == NULL )
++ {
++ return - EIO ;
++ }
++
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++
++ /* if the message is longer than the buffer, just take the beginning
++ * of it, in hopes that the reader (if any) will have time to read
++ * before we wrap around and obliterate it */
++ n = gcmkMIN ( length , node->size - 1 ) ;
++
++ /* make sure we have the memory for it */
++ if ( ( message = kmalloc ( n , GFP_KERNEL ) ) == NULL )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ return - ENOMEM ;
++ }
++
++ /* copy into our temp buffer */
++ if ( copy_from_user ( message , buffer , n ) > 0 )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ kfree ( message ) ;
++ return - EFAULT ;
++ }
++
++ /* now copy it into the circular buffer and free our temp copy */
++ _WriteToNode ( node , message , n ) ;
++
++ kfree ( message ) ;
++ up ( gcmkNODE_SEM ( node ) ) ;
++
++ /* wake up any readers that might be waiting for the data. we call
++ * schedule in the vague hope that a reader will run before the
++ * writer's next write, to avoid losing data. */
++ wake_up_interruptible ( gcmkNODE_READQ ( node ) ) ;
++
++ return n ;
++}
++
++/*******************************************************************************
++ **
++ ** File Operations Table
++ **
++ *******************************************************************************/
++static const struct file_operations debugfs_operations = {
++ .owner = THIS_MODULE ,
++ .read = _DebugFSRead ,
++ .write = _DebugFSWrite ,
++} ;
++
++/*******************************************************************************
++ **
++ ** INTERFACE FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemIsEnabled
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++
++gctINT
++gckDebugFileSystemIsEnabled ( void )
++{
++ return gc_dbgfs.isInited ;
++}
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemInitialize
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++gctINT
++gckDebugFileSystemInitialize ( void )
++{
++ if ( ! gc_dbgfs.isInited )
++ {
++ gc_dbgfs.linkedlist = gcvNULL ;
++ gc_dbgfs.currentNode = gcvNULL ;
++ gc_dbgfs.isInited = 1 ;
++ }
++ return gc_dbgfs.isInited ;
++}
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemTerminate
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++gctINT
++gckDebugFileSystemTerminate ( void )
++{
++ gcsDebugFileSystemNode * next = gcvNULL ;
++ gcsDebugFileSystemNode * temp = gcvNULL ;
++ if ( gc_dbgfs.isInited )
++ {
++ temp = gc_dbgfs.linkedlist ;
++ while ( temp != gcvNULL )
++ {
++ next = temp->next ;
++ gckDebugFileSystemFreeNode ( temp ) ;
++ kfree ( temp ) ;
++ temp = next ;
++ }
++ gc_dbgfs.isInited = 0 ;
++ }
++ return 0 ;
++}
++
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemCreateNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ ** gckDebugFileSystemFreeNode * Device
++ ** Pointer to a variable receiving the gcsDebugFileSystemNode object pointer on
++ ** success.
++ *********************************************************************************/
++
++gctINT
++gckDebugFileSystemCreateNode (
++ IN gctINT SizeInKB ,
++ IN gctCONST_STRING ParentName ,
++ IN gctCONST_STRING NodeName ,
++ OUT gcsDebugFileSystemNode **Node
++ )
++{
++ gcsDebugFileSystemNode*node ;
++ /* allocate space for our metadata and initialize it */
++ if ( ( node = kmalloc ( sizeof (gcsDebugFileSystemNode ) , GFP_KERNEL ) ) == NULL )
++ goto struct_malloc_failed ;
++
++ /*Zero it out*/
++ memset ( node , 0 , sizeof (gcsDebugFileSystemNode ) ) ;
++
++ /*Init the sync primitives*/
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ init_waitqueue_head ( gcmkNODE_READQ ( node ) ) ;
++#else
++ init_waitqueue ( gcmkNODE_READQ ( node ) ) ;
++#endif
++
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ init_waitqueue_head ( gcmkNODE_WRITEQ ( node ) ) ;
++#else
++ init_waitqueue ( gcmkNODE_WRITEQ ( node ) ) ;
++#endif
++ sema_init ( gcmkNODE_SEM ( node ) , 1 ) ;
++ /*End the sync primitives*/
++
++
++ /* figure out how much of a buffer this should be and allocate the buffer */
++ node->size = 1024 * SizeInKB ;
++ if ( ( node->data = ( char * ) vmalloc ( sizeof (char ) * node->size ) ) == NULL )
++ goto data_malloc_failed ;
++
++ /*creating the debug file system*/
++ node->parent = debugfs_create_dir ( ParentName , NULL ) ;
++
++ /*creating the file*/
++ node->filen = debugfs_create_file ( NodeName , S_IRUGO | S_IWUSR , node->parent , NULL ,
++ &debugfs_operations ) ;
++
++ /* add it to our linked list */
++ node->next = gc_dbgfs.linkedlist ;
++ gc_dbgfs.linkedlist = node ;
++
++ /* pass the struct back */
++ *Node = node ;
++ return 0 ;
++
++ vfree ( node->data ) ;
++data_malloc_failed:
++ kfree ( node ) ;
++struct_malloc_failed:
++ return - ENOMEM ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemFreeNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDebugFileSystemFreeNode (
++ IN gcsDebugFileSystemNode * Node
++ )
++{
++
++ gcsDebugFileSystemNode **ptr ;
++
++ if ( Node == NULL )
++ {
++ printk ( "null passed to free_vinfo\n" ) ;
++ return ;
++ }
++
++ down ( gcmkNODE_SEM ( Node ) ) ;
++ /*free data*/
++ vfree ( Node->data ) ;
++
++ /*Close Debug fs*/
++ if ( Node->filen )
++ {
++ debugfs_remove ( Node->filen ) ;
++ }
++ if ( Node->parent )
++ {
++ debugfs_remove ( Node->parent ) ;
++ }
++
++ /* now delete the node from the linked list */
++ ptr = & ( gc_dbgfs.linkedlist ) ;
++ while ( *ptr != Node )
++ {
++ if ( ! *ptr )
++ {
++ printk ( "corrupt info list!\n" ) ;
++ break ;
++ }
++ else
++ ptr = & ( ( **ptr ).next ) ;
++ }
++ *ptr = Node->next ;
++ up ( gcmkNODE_SEM ( Node ) ) ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemSetCurrentNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDebugFileSystemSetCurrentNode (
++ IN gcsDebugFileSystemNode * Node
++ )
++{
++ gc_dbgfs.currentNode = Node ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemGetCurrentNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDebugFileSystemGetCurrentNode (
++ OUT gcsDebugFileSystemNode ** Node
++ )
++{
++ *Node = gc_dbgfs.currentNode ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemPrint
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDebugFileSystemPrint (
++ IN gctCONST_STRING Message ,
++ ...
++ )
++{
++ gcmkDBGFSPRINT ( _GetArgumentSize ( Message ) , Message ) ;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debugfs.h 2015-05-01 14:57:59.547427001 -0500
+@@ -0,0 +1,84 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <stdarg.h>
++
++#ifndef __gc_hal_kernel_debugfs_h_
++#define __gc_hal_kernel_debugfs_h_
++
++ #define MAX_LINE_SIZE 768 /* Max bytes for a line of debug info */
++
++
++ typedef struct _gcsDebugFileSystemNode gcsDebugFileSystemNode ;
++
++
++/*******************************************************************************
++ **
++ ** System Related
++ **
++ *******************************************************************************/
++
++gctINT gckDebugFileSystemIsEnabled(void);
++
++gctINT gckDebugFileSystemInitialize(void);
++
++gctINT gckDebugFileSystemTerminate(void);
++
++
++/*******************************************************************************
++ **
++ ** Node Related
++ **
++ *******************************************************************************/
++
++gctINT gckDebugFileSystemCreateNode(
++ IN gctINT SizeInKB,
++ IN gctCONST_STRING ParentName ,
++ IN gctCONST_STRING NodeName,
++ OUT gcsDebugFileSystemNode **Node
++ );
++
++
++void gckDebugFileSystemFreeNode(
++ IN gcsDebugFileSystemNode * Node
++ );
++
++
++
++void gckDebugFileSystemSetCurrentNode(
++ IN gcsDebugFileSystemNode * Node
++ );
++
++
++
++void gckDebugFileSystemGetCurrentNode(
++ OUT gcsDebugFileSystemNode ** Node
++ );
++
++
++void gckDebugFileSystemPrint(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#endif
++
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debug.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debug.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debug.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_debug.h 2015-05-01 14:57:59.547427001 -0500
+@@ -0,0 +1,102 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_debug_h_
++#define __gc_hal_kernel_debug_h_
++
++#include <gc_hal_kernel_linux.h>
++#include <linux/spinlock.h>
++#include <linux/time.h>
++#include <stdarg.h>
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** OS-dependent Macros *****************************
++\******************************************************************************/
++
++typedef va_list gctARGUMENTS;
++
++#define gcmkARGUMENTS_START(Arguments, Pointer) \
++ va_start(Arguments, Pointer)
++
++#define gcmkARGUMENTS_END(Arguments) \
++ va_end(Arguments)
++
++#define gcmkDECLARE_LOCK(__spinLock__) \
++ static DEFINE_SPINLOCK(__spinLock__);
++
++#define gcmkLOCKSECTION(__spinLock__) \
++ spin_lock(&__spinLock__)
++
++#define gcmkUNLOCKSECTION(__spinLock__) \
++ spin_unlock(&__spinLock__)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++# define gcmkGETPROCESSID() \
++ task_tgid_vnr(current)
++#else
++# define gcmkGETPROCESSID() \
++ current->tgid
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++# define gcmkGETTHREADID() \
++ task_pid_vnr(current)
++#else
++# define gcmkGETTHREADID() \
++ current->pid
++#endif
++
++#define gcmkOUTPUT_STRING(String) \
++ if(gckDebugFileSystemIsEnabled()) \
++ gckDebugFileSystemPrint(String);\
++ else\
++ printk(String); \
++ touch_softlockup_watchdog()
++
++
++#define gcmkSPRINTF(Destination, Size, Message, Value) \
++ snprintf(Destination, Size, Message, Value)
++
++#define gcmkSPRINTF2(Destination, Size, Message, Value1, Value2) \
++ snprintf(Destination, Size, Message, Value1, Value2)
++
++#define gcmkSPRINTF3(Destination, Size, Message, Value1, Value2, Value3) \
++ snprintf(Destination, Size, Message, Value1, Value2, Value3)
++
++#define gcmkVSPRINTF(Destination, Size, Message, Arguments) \
++ vsnprintf(Destination, Size, Message, *(va_list *) &Arguments)
++
++#define gcmkSTRCAT(Destination, Size, String) \
++ strncat(Destination, String, Size)
++
++/* If not zero, forces data alignment in the variable argument list
++ by its individual size. */
++#define gcdALIGNBYSIZE 1
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_debug_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.c 2015-05-01 14:57:59.547427001 -0500
+@@ -0,0 +1,1676 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/slab.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
++#include <mach/hardware.h>
++#endif
++#include <linux/pm_runtime.h>
++
++#define _GC_OBJ_ZONE gcvZONE_DEVICE
++
++#define DEBUG_FILE "galcore_trace"
++#define PARENT_FILE "gpu"
++
++
++#ifdef FLAREON
++ static struct dove_gpio_irq_handler gc500_handle;
++#endif
++
++#define gcmIS_CORE_PRESENT(Device, Core) (Device->irqLines[Core] > 0)
++
++/******************************************************************************\
++*************************** Memory Allocation Wrappers *************************
++\******************************************************************************/
++
++static gceSTATUS
++_AllocateMemory(
++ IN gckGALDEVICE Device,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER *Logical,
++ OUT gctPHYS_ADDR *Physical,
++ OUT gctUINT32 *PhysAddr
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Bytes=%lu", Device, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++ gcmkVERIFY_ARGUMENT(Logical != NULL);
++ gcmkVERIFY_ARGUMENT(Physical != NULL);
++ gcmkVERIFY_ARGUMENT(PhysAddr != NULL);
++
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Device->os, gcvFALSE, &Bytes, Physical, Logical
++ ));
++
++ *PhysAddr = ((PLINUX_MDL)*Physical)->dmaHandle - Device->baseAddress;
++
++ /* Success. */
++ gcmkFOOTER_ARG(
++ "*Logical=0x%x *Physical=0x%x *PhysAddr=0x%08x",
++ *Logical, *Physical, *PhysAddr
++ );
++
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_FreeMemory(
++ IN gckGALDEVICE Device,
++ IN gctPOINTER Logical,
++ IN gctPHYS_ADDR Physical)
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Logical=0x%x Physical=0x%x",
++ Device, Logical, Physical);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ status = gckOS_FreeContiguous(
++ Device->os, Physical, Logical,
++ ((PLINUX_MDL) Physical)->numPages * PAGE_SIZE
++ );
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++
++/******************************************************************************\
++******************************* Interrupt Handler ******************************
++\******************************************************************************/
++static irqreturn_t isrRoutine(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR], gcvNOTIFY_INTERRUPT, gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ device->dataReadys[gcvCORE_MAJOR] = gcvTRUE;
++
++ up(&device->semas[gcvCORE_MAJOR]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_MAJOR]);
++ if (down); /*To make gcc 4.6 happye*/
++ device->dataReadys[gcvCORE_MAJOR] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR], gcvNOTIFY_INTERRUPT, gcvFALSE);
++ }
++}
++
++static irqreturn_t isrRoutine2D(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_2D], gcvNOTIFY_INTERRUPT, gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ device->dataReadys[gcvCORE_2D] = gcvTRUE;
++
++ up(&device->semas[gcvCORE_2D]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine2D(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_2D]);
++ if (down); /*To make gcc 4.6 happye*/
++ device->dataReadys[gcvCORE_2D] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_2D], gcvNOTIFY_INTERRUPT, gcvFALSE);
++ }
++}
++
++static irqreturn_t isrRoutineVG(int irq, void *ctxt)
++{
++#if gcdENABLE_VG
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Serve the interrupt. */
++ status = gckVGINTERRUPT_Enque(device->kernels[gcvCORE_VG]->vg->interrupt);
++
++ /* Determine the return value. */
++ return (status == gcvSTATUS_NOT_OUR_INTERRUPT)
++ ? IRQ_RETVAL(0)
++ : IRQ_RETVAL(1);
++#else
++ return IRQ_NONE;
++#endif
++}
++
++static int threadRoutineVG(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_VG]);
++ if (down); /*To make gcc 4.6 happye*/
++ device->dataReadys[gcvCORE_VG] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_VG], gcvNOTIFY_INTERRUPT, gcvFALSE);
++ }
++}
++
++/******************************************************************************\
++******************************* gckGALDEVICE Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Construct
++**
++** Constructor.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gckGALDEVICE * Device
++** Pointer to a variable receiving the gckGALDEVICE object pointer on
++** success.
++*/
++gceSTATUS
++gckGALDEVICE_Construct(
++ IN gctINT IrqLine,
++ IN gctUINT32 RegisterMemBase,
++ IN gctSIZE_T RegisterMemSize,
++ IN gctINT IrqLine2D,
++ IN gctUINT32 RegisterMemBase2D,
++ IN gctSIZE_T RegisterMemSize2D,
++ IN gctINT IrqLineVG,
++ IN gctUINT32 RegisterMemBaseVG,
++ IN gctSIZE_T RegisterMemSizeVG,
++ IN gctUINT32 ContiguousBase,
++ IN gctSIZE_T ContiguousSize,
++ IN gctSIZE_T BankSize,
++ IN gctINT FastClear,
++ IN gctINT Compression,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize,
++ IN gctINT Signal,
++ IN gctUINT LogFileSize,
++ IN struct device *pdev,
++ IN gctINT PowerManagement,
++ IN gctINT GpuProfiler,
++ OUT gckGALDEVICE *Device
++ )
++{
++ gctUINT32 internalBaseAddress = 0, internalAlignment = 0;
++ gctUINT32 externalBaseAddress = 0, externalAlignment = 0;
++ gctUINT32 horizontalTileSize, verticalTileSize;
++ struct resource* mem_region;
++ gctUINT32 physAddr;
++ gctUINT32 physical;
++ gckGALDEVICE device;
++ gceSTATUS status;
++ gctINT32 i;
++ gceHARDWARE_TYPE type;
++ gckDB sharedDB = gcvNULL;
++ gckKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("IrqLine=%d RegisterMemBase=0x%08x RegisterMemSize=%u "
++ "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u "
++ "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u "
++ "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu "
++ "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d",
++ IrqLine, RegisterMemBase, RegisterMemSize,
++ IrqLine2D, RegisterMemBase2D, RegisterMemSize2D,
++ IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG,
++ ContiguousBase, ContiguousSize, BankSize, FastClear, Compression,
++ PhysBaseAddr, PhysSize, Signal);
++
++ /* Allocate device structure. */
++ device = kmalloc(sizeof(struct _gckGALDEVICE), GFP_KERNEL);
++
++ if (!device)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ memset(device, 0, sizeof(struct _gckGALDEVICE));
++
++ device->dbgnode = gcvNULL;
++ if(LogFileSize != 0)
++ {
++ if(gckDebugFileSystemCreateNode(LogFileSize,PARENT_FILE,DEBUG_FILE,&(device->dbgnode)) != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the debug file system %s/%s \n",
++ __FUNCTION__, __LINE__,
++ PARENT_FILE, DEBUG_FILE
++ );
++ }
++ else
++ {
++ /*Everything is OK*/
++ gckDebugFileSystemSetCurrentNode(device->dbgnode);
++ }
++ }
++#ifdef CONFIG_PM
++ /*Init runtime pm for gpu*/
++ pm_runtime_enable(pdev);
++ device->pmdev = pdev;
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ /*get gpu regulator*/
++ device->gpu_regulator = regulator_get(pdev, "cpu_vddgpu");
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ device->gpu_regulator = devm_regulator_get(pdev, "pu");
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if (IS_ERR(device->gpu_regulator)) {
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to get gpu regulator %s/%s \n",
++ __FUNCTION__, __LINE__,
++ PARENT_FILE, DEBUG_FILE);
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++#endif
++ /*Initialize the clock structure*/
++ if (IrqLine != -1) {
++ device->clk_3d_core = clk_get(pdev, "gpu3d_clk");
++ if (!IS_ERR(device->clk_3d_core)) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (cpu_is_mx6q()) {
++ device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
++ if (IS_ERR(device->clk_3d_shader)) {
++ IrqLine = -1;
++ clk_put(device->clk_3d_core);
++ device->clk_3d_core = NULL;
++ device->clk_3d_shader = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
++ }
++ }
++#else
++ device->clk_3d_axi = clk_get(pdev, "gpu3d_axi_clk");
++ device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
++ if (IS_ERR(device->clk_3d_shader)) {
++ IrqLine = -1;
++ clk_put(device->clk_3d_core);
++ device->clk_3d_core = NULL;
++ device->clk_3d_shader = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
++ }
++#endif
++ } else {
++ IrqLine = -1;
++ device->clk_3d_core = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_clk failed, disable 3d!\n");
++ }
++ }
++ if ((IrqLine2D != -1) || (IrqLineVG != -1)) {
++ device->clk_2d_core = clk_get(pdev, "gpu2d_clk");
++ if (IS_ERR(device->clk_2d_core)) {
++ IrqLine2D = -1;
++ IrqLineVG = -1;
++ device->clk_2d_core = NULL;
++ gckOS_Print("galcore: clk_get 2d core clock failed, disable 2d/vg!\n");
++ } else {
++ if (IrqLine2D != -1) {
++ device->clk_2d_axi = clk_get(pdev, "gpu2d_axi_clk");
++ if (IS_ERR(device->clk_2d_axi)) {
++ device->clk_2d_axi = NULL;
++ IrqLine2D = -1;
++ gckOS_Print("galcore: clk_get 2d axi clock failed, disable 2d\n");
++ }
++ }
++ if (IrqLineVG != -1) {
++ device->clk_vg_axi = clk_get(pdev, "openvg_axi_clk");
++ if (IS_ERR(device->clk_vg_axi)) {
++ IrqLineVG = -1;
++ device->clk_vg_axi = NULL;
++ gckOS_Print("galcore: clk_get vg clock failed, disable vg!\n");
++ }
++ }
++ }
++ }
++
++ if (IrqLine != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_MAJOR] = RegisterMemBase;
++ device->requestedRegisterMemSizes[gcvCORE_MAJOR] = RegisterMemSize;
++ }
++
++ if (IrqLine2D != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_2D] = RegisterMemBase2D;
++ device->requestedRegisterMemSizes[gcvCORE_2D] = RegisterMemSize2D;
++ }
++
++ if (IrqLineVG != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_VG] = RegisterMemBaseVG;
++ device->requestedRegisterMemSizes[gcvCORE_VG] = RegisterMemSizeVG;
++ }
++
++ device->requestedContiguousBase = 0;
++ device->requestedContiguousSize = 0;
++
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ physical = device->requestedRegisterMemBases[i];
++
++ /* Set up register memory region. */
++ if (physical != 0)
++ {
++ mem_region = request_mem_region(
++ physical, device->requestedRegisterMemSizes[i], "galcore register region"
++ );
++
++ if (mem_region == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to claim %lu bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSizes[i]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->registerBases[i] = (gctPOINTER) ioremap_nocache(
++ physical, device->requestedRegisterMemSizes[i]);
++
++ if (device->registerBases[i] == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Unable to map %ld bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSizes[i]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ physical += device->requestedRegisterMemSizes[i];
++ }
++ else
++ {
++ device->registerBases[i] = gcvNULL;
++ }
++ }
++
++ /* Set the base address */
++ device->baseAddress = PhysBaseAddr;
++
++ /* Construct the gckOS object. */
++ gcmkONERROR(gckOS_Construct(device, &device->os));
++
++ if (IrqLine != -1)
++ {
++ /* Construct the gckKERNEL object. */
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_MAJOR, device,
++ gcvNULL, &device->kernels[gcvCORE_MAJOR]));
++
++ sharedDB = device->kernels[gcvCORE_MAJOR]->db;
++
++ /* Initialize core mapping */
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_MAJOR;
++ }
++
++ /* Setup the ISR manager. */
++ gcmkONERROR(gckHARDWARE_SetIsrManager(
++ device->kernels[gcvCORE_MAJOR]->hardware,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Enable_ISR,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Disable_ISR,
++ device
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(
++ device->kernels[gcvCORE_MAJOR]->hardware, FastClear, Compression
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_MAJOR]->hardware, PowerManagement
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetGpuProfiler(
++ device->kernels[gcvCORE_MAJOR]->hardware, GpuProfiler
++ ));
++
++#if COMMAND_PROCESSOR_VERSION == 1
++ /* Start the command queue. */
++ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_MAJOR]->command));
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_MAJOR] = gcvNULL;
++ }
++
++ if (IrqLine2D != -1)
++ {
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_2D, device,
++ sharedDB, &device->kernels[gcvCORE_2D]));
++
++ if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_2D]->db;
++
++ /* Verify the hardware type */
++ gcmkONERROR(gckHARDWARE_GetType(device->kernels[gcvCORE_2D]->hardware, &type));
++
++ if (type != gcvHARDWARE_2D)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Unexpected hardware type: %d\n",
++ __FUNCTION__, __LINE__,
++ type
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Initialize core mapping */
++ if (device->kernels[gcvCORE_MAJOR] == gcvNULL)
++ {
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_2D;
++ }
++ }
++ else
++ {
++ device->coreMapping[gcvHARDWARE_2D] = gcvCORE_2D;
++ }
++
++ /* Setup the ISR manager. */
++ gcmkONERROR(gckHARDWARE_SetIsrManager(
++ device->kernels[gcvCORE_2D]->hardware,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Enable_ISR,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Disable_ISR,
++ device
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_2D]->hardware, PowerManagement
++ ));
++
++
++#if COMMAND_PROCESSOR_VERSION == 1
++ /* Start the command queue. */
++ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_2D]->command));
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_2D] = gcvNULL;
++ }
++
++ if (IrqLineVG != -1)
++ {
++#if gcdENABLE_VG
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_VG, device,
++ sharedDB, &device->kernels[gcvCORE_VG]));
++ /* Initialize core mapping */
++ if (device->kernels[gcvCORE_MAJOR] == gcvNULL
++ && device->kernels[gcvCORE_2D] == gcvNULL
++ )
++ {
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_VG;
++ }
++ }
++ else
++ {
++ device->coreMapping[gcvHARDWARE_VG] = gcvCORE_VG;
++ }
++
++
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_VG]->vg->hardware,
++ PowerManagement
++ ));
++
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_VG] = gcvNULL;
++ }
++
++ /* Initialize the ISR. */
++ device->irqLines[gcvCORE_MAJOR] = IrqLine;
++ device->irqLines[gcvCORE_2D] = IrqLine2D;
++ device->irqLines[gcvCORE_VG] = IrqLineVG;
++
++ /* Initialize the kernel thread semaphores. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->irqLines[i] != -1) sema_init(&device->semas[i], 0);
++ }
++
++ device->signal = Signal;
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL) break;
++ }
++
++ if (i == gcdMAX_GPU_COUNT)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ /* Query the ceiling of the system memory. */
++ gcmkONERROR(gckVGHARDWARE_QuerySystemMemory(
++ device->kernels[i]->vg->hardware,
++ &device->systemMemorySize,
++ &device->systemMemoryBaseAddress
++ ));
++ /* query the amount of video memory */
++ gcmkONERROR(gckVGHARDWARE_QueryMemory(
++ device->kernels[i]->vg->hardware,
++ &device->internalSize, &internalBaseAddress, &internalAlignment,
++ &device->externalSize, &externalBaseAddress, &externalAlignment,
++ &horizontalTileSize, &verticalTileSize
++ ));
++ }
++ else
++#endif
++ {
++ /* Query the ceiling of the system memory. */
++ gcmkONERROR(gckHARDWARE_QuerySystemMemory(
++ device->kernels[i]->hardware,
++ &device->systemMemorySize,
++ &device->systemMemoryBaseAddress
++ ));
++
++ /* query the amount of video memory */
++ gcmkONERROR(gckHARDWARE_QueryMemory(
++ device->kernels[i]->hardware,
++ &device->internalSize, &internalBaseAddress, &internalAlignment,
++ &device->externalSize, &externalBaseAddress, &externalAlignment,
++ &horizontalTileSize, &verticalTileSize
++ ));
++ }
++
++
++ /* Grab the first availiable kernel */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->irqLines[i] != -1)
++ {
++ kernel = device->kernels[i];
++ break;
++ }
++ }
++
++ /* Set up the internal memory region. */
++ if (device->internalSize > 0)
++ {
++ status = gckVIDMEM_Construct(
++ device->os,
++ internalBaseAddress, device->internalSize, internalAlignment,
++ 0, &device->internalVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable internal heap. */
++ device->internalSize = 0;
++ }
++ else
++ {
++ /* Map internal memory. */
++ device->internalLogical
++ = (gctPOINTER) ioremap_nocache(physical, device->internalSize);
++
++ if (device->internalLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->internalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
++ device->internalPhysicalName = gcmPTR_TO_NAME(device->internalPhysical);
++ physical += device->internalSize;
++ }
++ }
++
++ if (device->externalSize > 0)
++ {
++ /* create the external memory heap */
++ status = gckVIDMEM_Construct(
++ device->os,
++ externalBaseAddress, device->externalSize, externalAlignment,
++ 0, &device->externalVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable internal heap. */
++ device->externalSize = 0;
++ }
++ else
++ {
++ /* Map external memory. */
++ device->externalLogical
++ = (gctPOINTER) ioremap_nocache(physical, device->externalSize);
++
++ if (device->externalLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->externalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
++ device->externalPhysicalName = gcmPTR_TO_NAME(device->externalPhysical);
++ physical += device->externalSize;
++ }
++ }
++
++ /* set up the contiguous memory */
++ device->contiguousSize = ContiguousSize;
++
++ if (ContiguousSize > 0)
++ {
++ if (ContiguousBase == 0)
++ {
++ while (device->contiguousSize > 0)
++ {
++ /* Allocate contiguous memory. */
++ status = _AllocateMemory(
++ device,
++ device->contiguousSize,
++ &device->contiguousBase,
++ &device->contiguousPhysical,
++ &physAddr
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ device->contiguousPhysicalName = gcmPTR_TO_NAME(device->contiguousPhysical);
++ status = gckVIDMEM_Construct(
++ device->os,
++ physAddr | device->systemMemoryBaseAddress,
++ device->contiguousSize,
++ 64,
++ BankSize,
++ &device->contiguousVidMem
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ break;
++ }
++
++ gcmkONERROR(_FreeMemory(
++ device,
++ device->contiguousBase,
++ device->contiguousPhysical
++ ));
++
++ gcmRELEASE_NAME(device->contiguousPhysicalName);
++ device->contiguousBase = gcvNULL;
++ device->contiguousPhysical = gcvNULL;
++ }
++
++ if (device->contiguousSize <= (4 << 20))
++ {
++ device->contiguousSize = 0;
++ }
++ else
++ {
++ device->contiguousSize -= (4 << 20);
++ }
++ }
++ }
++ else
++ {
++ /* Create the contiguous memory heap. */
++ status = gckVIDMEM_Construct(
++ device->os,
++ ContiguousBase | device->systemMemoryBaseAddress,
++ ContiguousSize,
++ 64, BankSize,
++ &device->contiguousVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable contiguous memory pool. */
++ device->contiguousVidMem = gcvNULL;
++ device->contiguousSize = 0;
++ }
++ else
++ {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
++ mem_region = request_mem_region(
++ ContiguousBase, ContiguousSize, "galcore managed memory"
++ );
++
++ if (mem_region == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to claim %ld bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ ContiguousSize, ContiguousBase
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#endif
++
++ device->requestedContiguousBase = ContiguousBase;
++ device->requestedContiguousSize = ContiguousSize;
++
++#if !gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ if (gcmIS_CORE_PRESENT(device, gcvCORE_VG))
++ {
++ device->contiguousBase
++#if gcdPAGED_MEMORY_CACHEABLE
++ = (gctPOINTER) ioremap_cached(ContiguousBase, ContiguousSize);
++#else
++ = (gctPOINTER) ioremap_nocache(ContiguousBase, ContiguousSize);
++#endif
++ if (device->contiguousBase == gcvNULL)
++ {
++ device->contiguousVidMem = gcvNULL;
++ device->contiguousSize = 0;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++#endif
++
++ device->contiguousPhysical = gcvNULL;
++ device->contiguousPhysicalName = 0;
++ device->contiguousSize = ContiguousSize;
++ device->contiguousMapped = gcvTRUE;
++ }
++ }
++ }
++
++ /* Return pointer to the device. */
++ * Device = device;
++
++ gcmkFOOTER_ARG("*Device=0x%x", * Device);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(device));
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Destroy
++**
++** Class destructor.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Destroy(
++ gckGALDEVICE Device)
++{
++ gctINT i;
++ gceSTATUS status = gcvSTATUS_OK;
++ gckKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ if (Device != gcvNULL)
++ {
++ /* Grab the first availiable kernel */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Device->irqLines[i] != -1)
++ {
++ kernel = Device->kernels[i];
++ break;
++ }
++ }
++ if (Device->internalPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->internalPhysicalName);
++ Device->internalPhysicalName = 0;
++ }
++ if (Device->externalPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->externalPhysicalName);
++ Device->externalPhysicalName = 0;
++ }
++ if (Device->contiguousPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->contiguousPhysicalName);
++ Device->contiguousPhysicalName = 0;
++ }
++
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Device->kernels[i] != gcvNULL)
++ {
++ /* Destroy the gckKERNEL object. */
++ gcmkVERIFY_OK(gckKERNEL_Destroy(Device->kernels[i]));
++ Device->kernels[i] = gcvNULL;
++ }
++ }
++
++ {
++ if (Device->internalLogical != gcvNULL)
++ {
++ /* Unmap the internal memory. */
++ iounmap(Device->internalLogical);
++ Device->internalLogical = gcvNULL;
++ }
++
++ if (Device->internalVidMem != gcvNULL)
++ {
++ /* Destroy the internal heap. */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->internalVidMem));
++ Device->internalVidMem = gcvNULL;
++ }
++ }
++
++ {
++ if (Device->externalLogical != gcvNULL)
++ {
++ /* Unmap the external memory. */
++ iounmap(Device->externalLogical);
++ Device->externalLogical = gcvNULL;
++ }
++
++ if (Device->externalVidMem != gcvNULL)
++ {
++ /* destroy the external heap */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->externalVidMem));
++ Device->externalVidMem = gcvNULL;
++ }
++ }
++
++ {
++ if (Device->contiguousBase != gcvNULL)
++ {
++ if (Device->contiguousMapped)
++ {
++#if !gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ if (Device->contiguousBase)
++ {
++ /* Unmap the contiguous memory. */
++ iounmap(Device->contiguousBase);
++ }
++#endif
++ }
++ else
++ {
++ gcmkONERROR(_FreeMemory(
++ Device,
++ Device->contiguousBase,
++ Device->contiguousPhysical
++ ));
++ }
++
++ Device->contiguousBase = gcvNULL;
++ Device->contiguousPhysical = gcvNULL;
++ }
++
++ if (Device->requestedContiguousBase != 0)
++ {
++ release_mem_region(Device->requestedContiguousBase, Device->requestedContiguousSize);
++ Device->requestedContiguousBase = 0;
++ Device->requestedContiguousSize = 0;
++ }
++
++ if (Device->contiguousVidMem != gcvNULL)
++ {
++ /* Destroy the contiguous heap. */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->contiguousVidMem));
++ Device->contiguousVidMem = gcvNULL;
++ }
++ }
++
++ {
++ if(gckDebugFileSystemIsEnabled())
++ {
++ gckDebugFileSystemFreeNode(Device->dbgnode);
++ kfree(Device->dbgnode);
++ Device->dbgnode = gcvNULL;
++ }
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Device->registerBases[i] != gcvNULL)
++ {
++ /* Unmap register memory. */
++ iounmap(Device->registerBases[i]);
++ if (Device->requestedRegisterMemBases[i] != 0)
++ {
++ release_mem_region(Device->requestedRegisterMemBases[i], Device->requestedRegisterMemSizes[i]);
++ }
++
++ Device->registerBases[i] = gcvNULL;
++ Device->requestedRegisterMemBases[i] = 0;
++ Device->requestedRegisterMemSizes[i] = 0;
++ }
++ }
++
++ /*Disable clock*/
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ if (Device->clk_3d_axi) {
++ clk_put(Device->clk_3d_axi);
++ Device->clk_3d_axi = NULL;
++ }
++#endif
++ if (Device->clk_3d_core) {
++ clk_put(Device->clk_3d_core);
++ Device->clk_3d_core = NULL;
++ }
++ if (Device->clk_3d_shader) {
++ clk_put(Device->clk_3d_shader);
++ Device->clk_3d_shader = NULL;
++ }
++ if (Device->clk_2d_core) {
++ clk_put(Device->clk_2d_core);
++ Device->clk_2d_core = NULL;
++ }
++ if (Device->clk_2d_axi) {
++ clk_put(Device->clk_2d_axi);
++ Device->clk_2d_axi = NULL;
++ }
++ if (Device->clk_vg_axi) {
++ clk_put(Device->clk_vg_axi);
++ Device->clk_vg_axi = NULL;
++ }
++
++#ifdef CONFIG_PM
++ if(Device->pmdev)
++ pm_runtime_disable(Device->pmdev);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (Device->gpu_regulator) {
++ regulator_put(Device->gpu_regulator);
++ Device->gpu_regulator = NULL;
++ }
++#endif
++
++ /* Destroy the gckOS object. */
++ if (Device->os != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_Destroy(Device->os));
++ Device->os = gcvNULL;
++ }
++
++ /* Free the device. */
++ kfree(Device);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Setup_ISR
++**
++** Start the ISR routine.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Setup successfully.
++** gcvSTATUS_GENERIC_IO
++** Setup failed.
++*/
++gceSTATUS
++gckGALDEVICE_Setup_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ )
++{
++ gceSTATUS status;
++ gctINT ret = -1;
++
++ gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->irqLines[Core] < 0)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Hook up the isr based on the irq line. */
++#ifdef FLAREON
++ gc500_handle.dev_name = "galcore interrupt service";
++ gc500_handle.dev_id = Device;
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ gc500_handle.handler = isrRoutine;
++ break;
++ case gcvCORE_2D:
++ gc500_handle.handler = isrRoutine2D;
++ break;
++ case gcvCORE_VG:
++ gc500_handle.handler = isrRoutineVG;
++ break;
++ default:
++ break;
++ }
++ gc500_handle.intr_gen = GPIO_INTR_LEVEL_TRIGGER;
++ gc500_handle.intr_trig = GPIO_TRIG_HIGH_LEVEL;
++
++ ret = dove_gpio_request(
++ DOVE_GPIO0_7, &gc500_handle
++ );
++#else
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ ret = request_irq(
++ Device->irqLines[Core], isrRoutine, IRQF_DISABLED,
++ "galcore interrupt service", Device
++ );
++ break;
++ case gcvCORE_2D:
++ ret = request_irq(
++ Device->irqLines[Core], isrRoutine2D, IRQF_DISABLED,
++ "galcore 2D interrupt service", Device
++ );
++ break;
++ case gcvCORE_VG:
++ ret = request_irq(
++ Device->irqLines[Core], isrRoutineVG, IRQF_DISABLED,
++ "galcore VG interrupt service", Device
++ );
++ break;
++ default:
++ break;
++ }
++#endif
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[Core], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->isrEnabled[Core] = 1;
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[Core] = gcvTRUE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckGALDEVICE_Enable_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->irqLines[Core] < 0)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ spin_lock(&Device->kernels[Core]->irq_lock);
++ if (Device->isrEnabled[Core] == 0)
++ {
++ enable_irq(Device->irqLines[Core]);
++ /* Mark ISR as initialized. */
++ Device->isrEnabled[Core] = gcvTRUE;
++ }
++ Device->isrEnabled[Core]++;
++ spin_unlock(&Device->kernels[Core]->irq_lock);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Release_ISR
++**
++** Release the irq line.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Release_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ /* release the irq */
++ if (Device->isrInitializeds[Core])
++ {
++#ifdef FLAREON
++ dove_gpio_free(DOVE_GPIO0_7, "galcore interrupt service");
++#else
++ free_irq(Device->irqLines[Core], Device);
++#endif
++
++ Device->isrInitializeds[Core] = gcvFALSE;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckGALDEVICE_Disable_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ /* disable the irq */
++ spin_lock(&Device->kernels[Core]->irq_lock);
++ if (Device->isrEnabled[Core] > 0)
++ {
++ Device->isrEnabled[Core]--;
++ if (Device->isrEnabled[Core] == 0)
++ disable_irq(Device->irqLines[Core]);
++ }
++ spin_unlock(&Device->kernels[Core]->irq_lock);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Start_Threads
++**
++** Start the daemon threads.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Start successfully.
++** gcvSTATUS_GENERIC_IO
++** Start failed.
++*/
++gceSTATUS
++gckGALDEVICE_Start_Threads(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++ struct task_struct * task;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine, Device, "galcore daemon thread");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_MAJOR] = task;
++ Device->threadInitializeds[gcvCORE_MAJOR] = gcvTRUE;
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine2D, Device, "galcore daemon thread for 2D");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_2D] = task;
++ Device->threadInitializeds[gcvCORE_2D] = gcvTRUE;
++ }
++ else
++ {
++ Device->threadInitializeds[gcvCORE_2D] = gcvFALSE;
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutineVG, Device, "galcore daemon thread for VG");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_VG] = task;
++ Device->threadInitializeds[gcvCORE_VG] = gcvTRUE;
++ }
++ else
++ {
++ Device->threadInitializeds[gcvCORE_VG] = gcvFALSE;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Stop_Threads
++**
++** Stop the gal device, including the following actions: stop the daemon
++** thread, release the irq.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Stop_Threads(
++ gckGALDEVICE Device
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ /* Stop the kernel threads. */
++ if (Device->threadInitializeds[i])
++ {
++ Device->killThread = gcvTRUE;
++ up(&Device->semas[i]);
++
++ kthread_stop(Device->threadCtxts[i]);
++ Device->threadCtxts[i] = gcvNULL;
++ Device->threadInitializeds[i] = gcvFALSE;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Start
++**
++** Start the gal device, including the following actions: setup the isr routine
++** and start the daemoni thread.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Start successfully.
++*/
++gceSTATUS
++gckGALDEVICE_Start(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ /* Start the kernel thread. */
++ gcmkONERROR(gckGALDEVICE_Start_Threads(Device));
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR(Device, gcvCORE_MAJOR));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_MAJOR]->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR(Device, gcvCORE_2D));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_2D]->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR(Device, gcvCORE_VG));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_VG]->vg->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Stop
++**
++** Stop the gal device, including the following actions: stop the daemon
++** thread, release the irq.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Stop(
++ gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_MAJOR]->hardware, gcvPOWER_OFF
++ ));
++
++ /* Remove the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR(Device, gcvCORE_MAJOR));
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR(Device, gcvCORE_2D));
++
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_2D]->hardware, gcvPOWER_OFF
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR(Device, gcvCORE_VG));
++
++#if gcdENABLE_VG
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_VG]->vg->hardware, gcvPOWER_OFF
++ ));
++#endif
++ }
++
++ /* Stop the kernel thread. */
++ gcmkONERROR(gckGALDEVICE_Stop_Threads(Device));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_device.h 2015-05-01 14:57:59.547427001 -0500
+@@ -0,0 +1,192 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_device_h_
++#define __gc_hal_kernel_device_h_
++
++/******************************************************************************\
++******************************* gckGALDEVICE Structure *******************************
++\******************************************************************************/
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++struct contiguous_mem_pool {
++ struct dma_attrs attrs;
++ dma_addr_t phys;
++ void *virt;
++ size_t size;
++};
++#endif
++
++typedef struct _gckGALDEVICE
++{
++ /* Objects. */
++ gckOS os;
++ gckKERNEL kernels[gcdMAX_GPU_COUNT];
++
++ /* Attributes. */
++ gctSIZE_T internalSize;
++ gctPHYS_ADDR internalPhysical;
++ gctUINT32 internalPhysicalName;
++ gctPOINTER internalLogical;
++ gckVIDMEM internalVidMem;
++ gctSIZE_T externalSize;
++ gctPHYS_ADDR externalPhysical;
++ gctUINT32 externalPhysicalName;
++ gctPOINTER externalLogical;
++ gckVIDMEM externalVidMem;
++ gckVIDMEM contiguousVidMem;
++ gctPOINTER contiguousBase;
++ gctPHYS_ADDR contiguousPhysical;
++ gctUINT32 contiguousPhysicalName;
++ gctSIZE_T contiguousSize;
++ gctBOOL contiguousMapped;
++ gctPOINTER contiguousMappedUser;
++ gctSIZE_T systemMemorySize;
++ gctUINT32 systemMemoryBaseAddress;
++ gctPOINTER registerBases[gcdMAX_GPU_COUNT];
++ gctSIZE_T registerSizes[gcdMAX_GPU_COUNT];
++ gctUINT32 baseAddress;
++ gctUINT32 requestedRegisterMemBases[gcdMAX_GPU_COUNT];
++ gctSIZE_T requestedRegisterMemSizes[gcdMAX_GPU_COUNT];
++ gctUINT32 requestedContiguousBase;
++ gctSIZE_T requestedContiguousSize;
++
++ /* IRQ management. */
++ gctINT irqLines[gcdMAX_GPU_COUNT];
++ gctBOOL isrInitializeds[gcdMAX_GPU_COUNT];
++ gctINT isrEnabled[gcdMAX_GPU_COUNT];
++ gctBOOL dataReadys[gcdMAX_GPU_COUNT];
++
++ /* Thread management. */
++ struct task_struct *threadCtxts[gcdMAX_GPU_COUNT];
++ struct semaphore semas[gcdMAX_GPU_COUNT];
++ gctBOOL threadInitializeds[gcdMAX_GPU_COUNT];
++ gctBOOL killThread;
++
++ /* Signal management. */
++ gctINT signal;
++
++ /* Core mapping */
++ gceCORE coreMapping[8];
++
++ /* States before suspend. */
++ gceCHIPPOWERSTATE statesStored[gcdMAX_GPU_COUNT];
++
++ /*Device Debug File System Entry in Kernel*/
++ struct _gcsDebugFileSystemNode * dbgnode;
++
++ /* Clock management.*/
++ struct clk *clk_3d_core;
++ struct clk *clk_3d_shader;
++ struct clk *clk_3d_axi;
++ struct clk *clk_2d_core;
++ struct clk *clk_2d_axi;
++ struct clk *clk_vg_axi;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ /*Power management.*/
++ struct regulator *gpu_regulator;
++#endif
++ /*Run time pm*/
++ struct device *pmdev;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct contiguous_mem_pool *pool;
++ struct reset_control *rstc[gcdMAX_GPU_COUNT];
++#endif
++}
++* gckGALDEVICE;
++
++typedef struct _gcsHAL_PRIVATE_DATA
++{
++ gckGALDEVICE device;
++ gctPOINTER mappedMemory;
++ gctPOINTER contiguousLogical;
++ /* The process opening the device may not be the same as the one that closes it. */
++ gctUINT32 pidOpen;
++}
++gcsHAL_PRIVATE_DATA, * gcsHAL_PRIVATE_DATA_PTR;
++
++gceSTATUS gckGALDEVICE_Enable_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ );
++
++gceSTATUS gckGALDEVICE_Disable_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ );
++
++gceSTATUS gckGALDEVICE_Setup_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ );
++
++gceSTATUS gckGALDEVICE_Release_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ );
++
++gceSTATUS gckGALDEVICE_Start_Threads(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Stop_Threads(
++ gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Start(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Stop(
++ gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Construct(
++ IN gctINT IrqLine,
++ IN gctUINT32 RegisterMemBase,
++ IN gctSIZE_T RegisterMemSize,
++ IN gctINT IrqLine2D,
++ IN gctUINT32 RegisterMemBase2D,
++ IN gctSIZE_T RegisterMemSize2D,
++ IN gctINT IrqLineVG,
++ IN gctUINT32 RegisterMemBaseVG,
++ IN gctSIZE_T RegisterMemSizeVG,
++ IN gctUINT32 ContiguousBase,
++ IN gctSIZE_T ContiguousSize,
++ IN gctSIZE_T BankSize,
++ IN gctINT FastClear,
++ IN gctINT Compression,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize,
++ IN gctINT Signal,
++ IN gctUINT LogFileSize,
++ IN struct device *pdev,
++ IN gctINT PowerManagement,
++ IN gctINT GpuProfiler,
++ OUT gckGALDEVICE *Device
++ );
++
++gceSTATUS gckGALDEVICE_Destroy(
++ IN gckGALDEVICE Device
++ );
++
++#endif /* __gc_hal_kernel_device_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_driver.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_driver.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_driver.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_driver.c 2015-05-01 14:57:59.547427001 -0500
+@@ -0,0 +1,1476 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#include <linux/device.h>
++#include <linux/slab.h>
++#include <linux/notifier.h>
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_driver.h"
++
++#if USE_PLATFORM_DRIVER
++# include <linux/platform_device.h>
++#endif
++
++#ifdef CONFIG_PXA_DVFM
++# include <mach/dvfm.h>
++# include <mach/pxa3xx_dvfm.h>
++#endif
++
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++# include <linux/resmem_account.h>
++# include <linux/kernel.h>
++# include <linux/mm.h>
++# include <linux/oom.h>
++# include <linux/sched.h>
++# include <linux/notifier.h>
++
++struct task_struct *lowmem_deathpending;
++
++static int
++task_notify_func(struct notifier_block *self, unsigned long val, void *data);
++
++static struct notifier_block task_nb = {
++ .notifier_call = task_notify_func,
++};
++
++static int
++task_notify_func(struct notifier_block *self, unsigned long val, void *data)
++{
++ struct task_struct *task = data;
++
++ if (task == lowmem_deathpending)
++ lowmem_deathpending = NULL;
++
++ return NOTIFY_OK;
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++#include <mach/viv_gpu.h>
++#else
++#include <linux/pm_runtime.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
++#include <mach/busfreq.h>
++#else
++#include <linux/busfreq-imx6.h>
++#include <linux/reset.h>
++#endif
++#endif
++/* Zone used for header/footer. */
++#define _GC_OBJ_ZONE gcvZONE_DRIVER
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++#include <linux/device_cooling.h>
++#define REG_THERMAL_NOTIFIER(a) register_devfreq_cooling_notifier(a);
++#define UNREG_THERMAL_NOTIFIER(a) unregister_devfreq_cooling_notifier(a);
++#else
++extern int register_thermal_notifier(struct notifier_block *nb);
++extern int unregister_thermal_notifier(struct notifier_block *nb);
++#define REG_THERMAL_NOTIFIER(a) register_thermal_notifier(a);
++#define UNREG_THERMAL_NOTIFIER(a) unregister_thermal_notifier(a);
++#endif
++#endif
++
++MODULE_DESCRIPTION("Vivante Graphics Driver");
++MODULE_LICENSE("GPL");
++
++static struct class* gpuClass;
++
++static gckGALDEVICE galDevice;
++
++static uint major = 199;
++module_param(major, uint, 0644);
++
++static int irqLine = -1;
++module_param(irqLine, int, 0644);
++
++static ulong registerMemBase = 0x80000000;
++module_param(registerMemBase, ulong, 0644);
++
++static ulong registerMemSize = 2 << 10;
++module_param(registerMemSize, ulong, 0644);
++
++static int irqLine2D = -1;
++module_param(irqLine2D, int, 0644);
++
++static ulong registerMemBase2D = 0x00000000;
++module_param(registerMemBase2D, ulong, 0644);
++
++static ulong registerMemSize2D = 2 << 10;
++module_param(registerMemSize2D, ulong, 0644);
++
++static int irqLineVG = -1;
++module_param(irqLineVG, int, 0644);
++
++static ulong registerMemBaseVG = 0x00000000;
++module_param(registerMemBaseVG, ulong, 0644);
++
++static ulong registerMemSizeVG = 2 << 10;
++module_param(registerMemSizeVG, ulong, 0644);
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++static ulong contiguousSize = 128 << 20;
++#else
++static ulong contiguousSize = 4 << 20;
++#endif
++module_param(contiguousSize, ulong, 0644);
++
++static ulong contiguousBase = 0;
++module_param(contiguousBase, ulong, 0644);
++
++static ulong bankSize = 0;
++module_param(bankSize, ulong, 0644);
++
++static int fastClear = -1;
++module_param(fastClear, int, 0644);
++
++static int compression = -1;
++module_param(compression, int, 0644);
++
++static int powerManagement = 1;
++module_param(powerManagement, int, 0644);
++
++static int gpuProfiler = 0;
++module_param(gpuProfiler, int, 0644);
++
++static int signal = 48;
++module_param(signal, int, 0644);
++
++static ulong baseAddress = 0;
++module_param(baseAddress, ulong, 0644);
++
++static ulong physSize = 0;
++module_param(physSize, ulong, 0644);
++
++static uint logFileSize=0;
++module_param(logFileSize,uint, 0644);
++
++static int showArgs = 0;
++module_param(showArgs, int, 0644);
++
++int gpu3DMinClock = 0;
++module_param(gpu3DMinClock, int, 0644);
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER
++ unsigned long coreClock = 156000000;
++ module_param(coreClock, ulong, 0644);
++#endif
++
++static int drv_open(
++ struct inode* inode,
++ struct file* filp
++ );
++
++static int drv_release(
++ struct inode* inode,
++ struct file* filp
++ );
++
++static long drv_ioctl(
++ struct file* filp,
++ unsigned int ioctlCode,
++ unsigned long arg
++ );
++
++static int drv_mmap(
++ struct file* filp,
++ struct vm_area_struct* vma
++ );
++
++static struct file_operations driver_fops =
++{
++ .owner = THIS_MODULE,
++ .open = drv_open,
++ .release = drv_release,
++ .unlocked_ioctl = drv_ioctl,
++#ifdef HAVE_COMPAT_IOCTL
++ .compat_ioctl = drv_ioctl,
++#endif
++ .mmap = drv_mmap,
++};
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++static size_t viv_gpu_resmem_query(struct task_struct *p, struct reserved_memory_account *m);
++static struct reserved_memory_account viv_gpu_resmem_handler = {
++ .name = "viv_gpu",
++ .get_page_used_by_process = viv_gpu_resmem_query,
++};
++
++size_t viv_gpu_resmem_query(struct task_struct *p, struct reserved_memory_account *m)
++{
++ gcuDATABASE_INFO info;
++ unsigned int processid = p->pid;
++ gckKERNEL gpukernel = m->data;
++
++ /* ignore error happens in this api. */
++ if (gckKERNEL_QueryProcessDB(gpukernel, processid, false, gcvDB_VIDEO_MEMORY, &info) != gcvSTATUS_OK)
++ return 0;
++
++ /* we return pages. */
++ if (info.counters.bytes > 0)
++ return info.counters.bytes / PAGE_SIZE;
++ return 0;
++}
++#endif
++
++int drv_open(
++ struct inode* inode,
++ struct file* filp
++ )
++{
++ gceSTATUS status;
++ gctBOOL attached = gcvFALSE;
++ gcsHAL_PRIVATE_DATA_PTR data = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = kmalloc(sizeof(gcsHAL_PRIVATE_DATA), GFP_KERNEL);
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ data->device = galDevice;
++ data->mappedMemory = gcvNULL;
++ data->contiguousLogical = gcvNULL;
++ gcmkONERROR(gckOS_GetProcessID(&data->pidOpen));
++
++ /* Attached the process. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkONERROR(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvTRUE));
++ }
++ }
++ attached = gcvTRUE;
++
++ if (!galDevice->contiguousMapped)
++ {
++ gcmkONERROR(gckOS_MapMemory(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ &data->contiguousLogical
++ ));
++ }
++
++ filp->private_data = data;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ if (data != gcvNULL)
++ {
++ if (data->contiguousLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapMemory(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ data->contiguousLogical
++ ));
++ }
++
++ kfree(data);
++ }
++
++ if (attached)
++ {
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvFALSE));
++ }
++ }
++ }
++
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++int drv_release(
++ struct inode* inode,
++ struct file* filp
++ )
++{
++ gceSTATUS status;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gckGALDEVICE device;
++ gctINT i;
++
++ gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (!device->contiguousMapped)
++ {
++ if (data->contiguousLogical != gcvNULL)
++ {
++ gcmkONERROR(gckOS_UnmapMemoryEx(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ data->contiguousLogical,
++ data->pidOpen
++ ));
++
++ data->contiguousLogical = gcvNULL;
++ }
++ }
++
++ /* A process gets detached. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkONERROR(gckKERNEL_AttachProcessEx(galDevice->kernels[i], gcvFALSE, data->pidOpen));
++ }
++ }
++
++ kfree(data);
++ filp->private_data = NULL;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++long drv_ioctl(
++ struct file* filp,
++ unsigned int ioctlCode,
++ unsigned long arg
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gctUINT32 copyLen;
++ DRIVER_ARGS drvArgs;
++ gckGALDEVICE device;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gctINT32 i, count;
++
++ gcmkHEADER_ARG(
++ "filp=0x%08X ioctlCode=0x%08X arg=0x%08X",
++ filp, ioctlCode, arg
++ );
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if ((ioctlCode != IOCTL_GCHAL_INTERFACE)
++ && (ioctlCode != IOCTL_GCHAL_KERNEL_INTERFACE)
++ )
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): unknown command %d\n",
++ __FUNCTION__, __LINE__,
++ ioctlCode
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Get the drvArgs. */
++ copyLen = copy_from_user(
++ &drvArgs, (void *) arg, sizeof(DRIVER_ARGS)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of the input arguments.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Now bring in the gcsHAL_INTERFACE structure. */
++ if ((drvArgs.InputBufferSize != sizeof(gcsHAL_INTERFACE))
++ || (drvArgs.OutputBufferSize != sizeof(gcsHAL_INTERFACE))
++ )
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): input or/and output structures are invalid.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ copyLen = copy_from_user(
++ &iface, gcmUINT64_TO_PTR(drvArgs.InputBuffer), sizeof(gcsHAL_INTERFACE)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of input HAL interface.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (iface.command == gcvHAL_CHIP_INFO)
++ {
++ count = 0;
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ iface.u.ChipInfo.types[count] = gcvHARDWARE_VG;
++ }
++ else
++#endif
++ {
++ gcmkVERIFY_OK(gckHARDWARE_GetType(device->kernels[i]->hardware,
++ &iface.u.ChipInfo.types[count]));
++ }
++ count++;
++ }
++ }
++
++ iface.u.ChipInfo.count = count;
++ iface.status = status = gcvSTATUS_OK;
++ }
++ else
++ {
++ if (iface.hardwareType < 0 || iface.hardwareType > 7)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): unknown hardwareType %d\n",
++ __FUNCTION__, __LINE__,
++ iface.hardwareType
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if gcdENABLE_VG
++ if (device->coreMapping[iface.hardwareType] == gcvCORE_VG)
++ {
++ status = gckVGKERNEL_Dispatch(device->kernels[gcvCORE_VG],
++ (ioctlCode == IOCTL_GCHAL_INTERFACE),
++ &iface);
++ }
++ else
++#endif
++ {
++ status = gckKERNEL_Dispatch(device->kernels[device->coreMapping[iface.hardwareType]],
++ (ioctlCode == IOCTL_GCHAL_INTERFACE),
++ &iface);
++ }
++ }
++
++ /* Redo system call after pending signal is handled. */
++ if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkFOOTER();
++ return -ERESTARTSYS;
++ }
++
++ if (gcmIS_SUCCESS(status) && (iface.command == gcvHAL_LOCK_VIDEO_MEMORY))
++ {
++ gcuVIDMEM_NODE_PTR node = gcmUINT64_TO_PTR(iface.u.LockVideoMemory.node);
++ /* Special case for mapped memory. */
++ if ((data->mappedMemory != gcvNULL)
++ && (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ )
++ {
++ /* Compute offset into mapped memory. */
++ gctUINT32 offset
++ = (gctUINT8 *) gcmUINT64_TO_PTR(iface.u.LockVideoMemory.memory)
++ - (gctUINT8 *) device->contiguousBase;
++
++ /* Compute offset into user-mapped region. */
++ iface.u.LockVideoMemory.memory =
++ gcmPTR_TO_UINT64((gctUINT8 *) data->mappedMemory + offset);
++ }
++ }
++
++ /* Copy data back to the user. */
++ copyLen = copy_to_user(
++ gcmUINT64_TO_PTR(drvArgs.OutputBuffer), &iface, sizeof(gcsHAL_INTERFACE)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of output HAL interface.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++static int drv_mmap(
++ struct file* filp,
++ struct vm_area_struct* vma
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("filp=0x%08X vma=0x%08X", filp, vma);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if !gcdPAGED_MEMORY_CACHEABLE
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ vma->vm_flags |= gcdVM_FLAGS;
++#endif
++ vma->vm_pgoff = 0;
++
++ if (device->contiguousMapped)
++ {
++ unsigned long size = vma->vm_end - vma->vm_start;
++ int ret = 0;
++
++ if (size > device->contiguousSize)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Invalid mapping size.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ ret = io_remap_pfn_range(
++ vma,
++ vma->vm_start,
++ device->requestedContiguousBase >> PAGE_SHIFT,
++ size,
++ vma->vm_page_prot
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): io_remap_pfn_range failed %d\n",
++ __FUNCTION__, __LINE__,
++ ret
++ );
++
++ data->mappedMemory = gcvNULL;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ data->mappedMemory = (gctPOINTER) vma->vm_start;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++ }
++
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++
++#if !USE_PLATFORM_DRIVER
++static int __init drv_init(void)
++#else
++static int drv_init(struct device *pdev)
++#endif
++{
++ int ret;
++ int result = -EINVAL;
++ gceSTATUS status;
++ gckGALDEVICE device = gcvNULL;
++ struct class* device_class = gcvNULL;
++
++ gcmkHEADER();
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
++ {
++# if 0
++ struct clk * clk;
++
++ clk = clk_get(NULL, "GCCLK");
++
++ if (IS_ERR(clk))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): clk get error: %d\n",
++ __FUNCTION__, __LINE__,
++ PTR_ERR(clk)
++ );
++
++ result = -ENODEV;
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /*
++ * APMU_GC_156M, APMU_GC_312M, APMU_GC_PLL2, APMU_GC_PLL2_DIV2 currently.
++ * Use the 2X clock.
++ */
++ if (clk_set_rate(clk, coreClock * 2))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to set core clock.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ result = -EAGAIN;
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ clk_enable(clk);
++
++#if defined(CONFIG_PXA_DVFM) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29))
++ gc_pwr(1);
++# endif
++# endif
++ }
++#endif
++
++ printk(KERN_INFO "Galcore version %d.%d.%d.%d\n",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD);
++ /* when enable gpu profiler, we need to turn off gpu powerMangement */
++ if(gpuProfiler)
++ powerManagement = 0;
++ if (showArgs)
++ {
++ printk("galcore options:\n");
++ printk(" irqLine = %d\n", irqLine);
++ printk(" registerMemBase = 0x%08lX\n", registerMemBase);
++ printk(" registerMemSize = 0x%08lX\n", registerMemSize);
++
++ if (irqLine2D != -1)
++ {
++ printk(" irqLine2D = %d\n", irqLine2D);
++ printk(" registerMemBase2D = 0x%08lX\n", registerMemBase2D);
++ printk(" registerMemSize2D = 0x%08lX\n", registerMemSize2D);
++ }
++
++ if (irqLineVG != -1)
++ {
++ printk(" irqLineVG = %d\n", irqLineVG);
++ printk(" registerMemBaseVG = 0x%08lX\n", registerMemBaseVG);
++ printk(" registerMemSizeVG = 0x%08lX\n", registerMemSizeVG);
++ }
++
++ printk(" contiguousSize = %ld\n", contiguousSize);
++ printk(" contiguousBase = 0x%08lX\n", contiguousBase);
++ printk(" bankSize = 0x%08lX\n", bankSize);
++ printk(" fastClear = %d\n", fastClear);
++ printk(" compression = %d\n", compression);
++ printk(" signal = %d\n", signal);
++ printk(" baseAddress = 0x%08lX\n", baseAddress);
++ printk(" physSize = 0x%08lX\n", physSize);
++ printk(" logFileSize = %d KB \n", logFileSize);
++ printk(" powerManagement = %d\n", powerManagement);
++ printk(" gpuProfiler = %d\n", gpuProfiler);
++#if ENABLE_GPU_CLOCK_BY_DRIVER
++ printk(" coreClock = %lu\n", coreClock);
++#endif
++ }
++
++ if(logFileSize != 0)
++ {
++ gckDebugFileSystemInitialize();
++ }
++
++ /* Create the GAL device. */
++ gcmkONERROR(gckGALDEVICE_Construct(
++ irqLine,
++ registerMemBase, registerMemSize,
++ irqLine2D,
++ registerMemBase2D, registerMemSize2D,
++ irqLineVG,
++ registerMemBaseVG, registerMemSizeVG,
++ contiguousBase, contiguousSize,
++ bankSize, fastClear, compression, baseAddress, physSize, signal,
++ logFileSize,
++ pdev,
++ powerManagement,
++ gpuProfiler,
++ &device
++ ));
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ device->pool = dev_get_drvdata(pdev);
++#endif
++
++ /* Start the GAL device. */
++ gcmkONERROR(gckGALDEVICE_Start(device));
++
++ if ((physSize != 0)
++ && (device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ && (device->kernels[gcvCORE_MAJOR]->hardware->mmuVersion != 0))
++ {
++ status = gckMMU_Enable(device->kernels[gcvCORE_MAJOR]->mmu, baseAddress, physSize);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Enable new MMU: status=%d\n", status);
++
++ if ((device->kernels[gcvCORE_2D] != gcvNULL)
++ && (device->kernels[gcvCORE_2D]->hardware->mmuVersion != 0))
++ {
++ status = gckMMU_Enable(device->kernels[gcvCORE_2D]->mmu, baseAddress, physSize);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Enable new MMU for 2D: status=%d\n", status);
++ }
++
++ /* Reset the base address */
++ device->baseAddress = 0;
++ }
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++ task_free_register(&task_nb);
++ viv_gpu_resmem_handler.data = device->kernels[gcvCORE_MAJOR];
++ register_reserved_memory_account(&viv_gpu_resmem_handler);
++#endif
++
++
++ /* Register the character device. */
++ ret = register_chrdev(major, DRV_NAME, &driver_fops);
++
++ if (ret < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not allocate major number for mmap.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ if (major == 0)
++ {
++ major = ret;
++ }
++
++ /* Create the device class. */
++ device_class = class_create(THIS_MODULE, "graphics_class");
++
++ if (IS_ERR(device_class))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the class.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++ device_create(device_class, NULL, MKDEV(major, 0), NULL, "galcore");
++#else
++ device_create(device_class, NULL, MKDEV(major, 0), "galcore");
++#endif
++
++ galDevice = device;
++ gpuClass = device_class;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "%s(%d): irqLine=%d, contiguousSize=%lu, memBase=0x%lX\n",
++ __FUNCTION__, __LINE__,
++ irqLine, contiguousSize, registerMemBase
++ );
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ /* Roll back. */
++ if (device_class != gcvNULL)
++ {
++ device_destroy(device_class, MKDEV(major, 0));
++ class_destroy(device_class);
++ }
++
++ if (device != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckGALDEVICE_Stop(device));
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(device));
++ }
++
++ gcmkFOOTER();
++ return result;
++}
++
++#if !USE_PLATFORM_DRIVER
++static void __exit drv_exit(void)
++#else
++static void drv_exit(void)
++#endif
++{
++ gcmkHEADER();
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++ task_free_unregister(&task_nb);
++ unregister_reserved_memory_account(&viv_gpu_resmem_handler);
++#endif
++
++ gcmkASSERT(gpuClass != gcvNULL);
++ device_destroy(gpuClass, MKDEV(major, 0));
++ class_destroy(gpuClass);
++
++ unregister_chrdev(major, DRV_NAME);
++
++ gcmkVERIFY_OK(gckGALDEVICE_Stop(galDevice));
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(galDevice));
++
++ if(gckDebugFileSystemIsEnabled())
++ {
++ gckDebugFileSystemTerminate();
++ }
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
++ {
++# if 0
++ struct clk * clk = NULL;
++
++#if defined(CONFIG_PXA_DVFM) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29))
++ gc_pwr(0);
++#endif
++ clk = clk_get(NULL, "GCCLK");
++ clk_disable(clk);
++# endif
++ }
++#endif
++
++ gcmkFOOTER_NO();
++}
++
++#if !USE_PLATFORM_DRIVER
++ module_init(drv_init);
++ module_exit(drv_exit);
++#else
++
++#ifdef CONFIG_DOVE_GPU
++# define DEVICE_NAME "dove_gpu"
++#else
++# define DEVICE_NAME "galcore"
++#endif
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++static int thermal_hot_pm_notify(struct notifier_block *nb, unsigned long event,
++ void *dummy)
++{
++ static gctUINT orgFscale, minFscale, maxFscale;
++ static gctBOOL critical;
++ gckHARDWARE hardware = galDevice->kernels[gcvCORE_MAJOR]->hardware;
++
++ if (event > 4) {
++ critical = gcvTRUE;
++ gckHARDWARE_GetFscaleValue(hardware,&orgFscale,&minFscale, &maxFscale);
++ gckHARDWARE_SetFscaleValue(hardware, minFscale);
++ gckOS_Print("System is too hot. GPU3D scalign to %d/64 clock.\n", minFscale);
++ } else if (event > 1) {
++ gckHARDWARE_GetFscaleValue(hardware,&orgFscale,&minFscale, &maxFscale);
++ gckHARDWARE_SetFscaleValue(hardware, maxFscale - (8 * event));
++ } else if (orgFscale) {
++ gckHARDWARE_SetFscaleValue(hardware, orgFscale);
++ if (critical) {
++ gckOS_Print("Hot alarm is canceled. GPU3D clock will return to %d/64\n", orgFscale);
++ critical = gcvFALSE;
++ }
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block thermal_hot_pm_notifier = {
++ .notifier_call = thermal_hot_pm_notify,
++ };
++#endif
++
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++static int gpu_probe(struct platform_device *pdev)
++#else
++static int __devinit gpu_probe(struct platform_device *pdev)
++#endif
++{
++ int ret = -ENODEV;
++ struct resource* res;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct contiguous_mem_pool *pool;
++ struct reset_control *rstc;
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct device_node *dn =pdev->dev.of_node;
++ const u32 *prop;
++#else
++ struct viv_gpu_platform_data *pdata;
++#endif
++ gcmkHEADER();
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phys_baseaddr");
++ if (res)
++ baseAddress = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_3d");
++ if (res)
++ irqLine = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_3d");
++ if (res)
++ {
++ registerMemBase = res->start;
++ registerMemSize = res->end - res->start + 1;
++ }
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_2d");
++ if (res)
++ irqLine2D = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_2d");
++ if (res)
++ {
++ registerMemBase2D = res->start;
++ registerMemSize2D = res->end - res->start + 1;
++ }
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_vg");
++ if (res)
++ irqLineVG = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_vg");
++ if (res)
++ {
++ registerMemBaseVG = res->start;
++ registerMemSizeVG = res->end - res->start + 1;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ pool = devm_kzalloc(&pdev->dev, sizeof(*pool), GFP_KERNEL);
++ if (!pool)
++ return -ENOMEM;
++ pool->size = contiguousSize;
++ init_dma_attrs(&pool->attrs);
++ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &pool->attrs);
++ pool->virt = dma_alloc_attrs(&pdev->dev, pool->size, &pool->phys,
++ GFP_KERNEL, &pool->attrs);
++ if (!pool->virt) {
++ dev_err(&pdev->dev, "Failed to allocate contiguous memory\n");
++ return -ENOMEM;
++ }
++ contiguousBase = pool->phys;
++ dev_set_drvdata(&pdev->dev, pool);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ prop = of_get_property(dn, "contiguousbase", NULL);
++ if(prop)
++ contiguousBase = *prop;
++ of_property_read_u32(dn,"contiguoussize", (u32 *)&contiguousSize);
++#else
++ pdata = pdev->dev.platform_data;
++ if (pdata) {
++ contiguousBase = pdata->reserved_mem_base;
++ contiguousSize = pdata->reserved_mem_size;
++ }
++#endif
++ if (contiguousSize == 0)
++ gckOS_Print("Warning: No contiguous memory is reserverd for gpu.!\n ");
++ ret = drv_init(&pdev->dev);
++
++ if (!ret)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ rstc = devm_reset_control_get(&pdev->dev, "gpu3d");
++ galDevice->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc;
++
++ rstc = devm_reset_control_get(&pdev->dev, "gpu2d");
++ galDevice->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc;
++
++ rstc = devm_reset_control_get(&pdev->dev, "gpuvg");
++ galDevice->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc;
++#endif
++ platform_set_drvdata(pdev, galDevice);
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ if (galDevice->kernels[gcvCORE_MAJOR])
++ REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++#endif
++ gcmkFOOTER_NO();
++ return ret;
++ }
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ dma_free_attrs(&pdev->dev, pool->size, pool->virt, pool->phys,
++ &pool->attrs);
++#endif
++ gcmkFOOTER_ARG(KERN_INFO "Failed to register gpu driver: %d\n", ret);
++ return ret;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++static int gpu_remove(struct platform_device *pdev)
++#else
++static int __devexit gpu_remove(struct platform_device *pdev)
++#endif
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ gckGALDEVICE device = platform_get_drvdata(pdev);
++ struct contiguous_mem_pool *pool = device->pool;
++#endif
++ gcmkHEADER();
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ if(galDevice->kernels[gcvCORE_MAJOR])
++ UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++#endif
++ drv_exit();
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ dma_free_attrs(&pdev->dev, pool->size, pool->virt, pool->phys,
++ &pool->attrs);
++#endif
++ gcmkFOOTER_NO();
++ return 0;
++}
++
++static int gpu_suspend(struct platform_device *dev, pm_message_t state)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++ gctINT i;
++
++ device = platform_get_drvdata(dev);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++ /* Store states. */
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_QueryPowerManagementState(device->kernels[i]->vg->hardware, &device->statesStored[i]);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_QueryPowerManagementState(device->kernels[i]->hardware, &device->statesStored[i]);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, gcvPOWER_OFF);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_OFF);
++ }
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++ }
++ }
++
++ return 0;
++}
++
++static int gpu_resume(struct platform_device *dev)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++ gctINT i;
++ gceCHIPPOWERSTATE statesStored;
++
++ device = platform_get_drvdata(dev);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, gcvPOWER_ON);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_ON);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++ /* Convert global state to crossponding internal state. */
++ switch(device->statesStored[i])
++ {
++ case gcvPOWER_OFF:
++ statesStored = gcvPOWER_OFF_BROADCAST;
++ break;
++ case gcvPOWER_IDLE:
++ statesStored = gcvPOWER_IDLE_BROADCAST;
++ break;
++ case gcvPOWER_SUSPEND:
++ statesStored = gcvPOWER_SUSPEND_BROADCAST;
++ break;
++ case gcvPOWER_ON:
++ statesStored = gcvPOWER_ON_AUTO;
++ break;
++ default:
++ statesStored = device->statesStored[i];
++ break;
++ }
++
++ /* Restore states. */
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, statesStored);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, statesStored);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++ }
++ }
++
++ return 0;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++static const struct of_device_id mxs_gpu_dt_ids[] = {
++ { .compatible = "fsl,imx6q-gpu", },
++ {/* sentinel */}
++};
++MODULE_DEVICE_TABLE(of, mxs_gpu_dt_ids);
++
++#ifdef CONFIG_PM
++static int gpu_runtime_suspend(struct device *dev)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 7)
++ release_bus_freq(BUS_FREQ_HIGH);
++#endif
++ return 0;
++}
++
++static int gpu_runtime_resume(struct device *dev)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 7)
++ request_bus_freq(BUS_FREQ_HIGH);
++#endif
++ return 0;
++}
++
++static int gpu_system_suspend(struct device *dev)
++{
++ pm_message_t state={0};
++ return gpu_suspend(to_platform_device(dev), state);
++}
++
++static int gpu_system_resume(struct device *dev)
++{
++ return gpu_resume(to_platform_device(dev));
++}
++
++static const struct dev_pm_ops gpu_pm_ops = {
++ SET_RUNTIME_PM_OPS(gpu_runtime_suspend, gpu_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(gpu_system_suspend, gpu_system_resume)
++};
++#endif
++#endif
++
++static struct platform_driver gpu_driver = {
++ .probe = gpu_probe,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++ .remove = gpu_remove,
++#else
++ .remove = __devexit_p(gpu_remove),
++#endif
++
++ .suspend = gpu_suspend,
++ .resume = gpu_resume,
++
++ .driver = {
++ .name = DEVICE_NAME,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ .of_match_table = mxs_gpu_dt_ids,
++#if CONFIG_PM
++ .pm = &gpu_pm_ops,
++#endif
++#endif
++ }
++};
++
++#if 0 /*CONFIG_DOVE_GPU*/
++static struct resource gpu_resources[] = {
++ {
++ .name = "gpu_irq",
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .name = "gpu_base",
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .name = "gpu_mem",
++ .flags = IORESOURCE_MEM,
++ },
++};
++
++static struct platform_device * gpu_device;
++#endif
++
++static int __init gpu_init(void)
++{
++ int ret = 0;
++
++#if 0 /*ndef CONFIG_DOVE_GPU*/
++ gpu_resources[0].start = gpu_resources[0].end = irqLine;
++
++ gpu_resources[1].start = registerMemBase;
++ gpu_resources[1].end = registerMemBase + registerMemSize - 1;
++
++ gpu_resources[2].start = contiguousBase;
++ gpu_resources[2].end = contiguousBase + contiguousSize - 1;
++
++ /* Allocate device */
++ gpu_device = platform_device_alloc(DEVICE_NAME, -1);
++ if (!gpu_device)
++ {
++ printk(KERN_ERR "galcore: platform_device_alloc failed.\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ /* Insert resource */
++ ret = platform_device_add_resources(gpu_device, gpu_resources, 3);
++ if (ret)
++ {
++ printk(KERN_ERR "galcore: platform_device_add_resources failed.\n");
++ goto put_dev;
++ }
++
++ /* Add device */
++ ret = platform_device_add(gpu_device);
++ if (ret)
++ {
++ printk(KERN_ERR "galcore: platform_device_add failed.\n");
++ goto put_dev;
++ }
++#endif
++
++ ret = platform_driver_register(&gpu_driver);
++ if (!ret)
++ {
++ goto out;
++ }
++
++#if 0 /*ndef CONFIG_DOVE_GPU*/
++ platform_device_del(gpu_device);
++put_dev:
++ platform_device_put(gpu_device);
++#endif
++
++out:
++ return ret;
++}
++
++static void __exit gpu_exit(void)
++{
++ platform_driver_unregister(&gpu_driver);
++#if 0 /*ndef CONFIG_DOVE_GPU*/
++ platform_device_unregister(gpu_device);
++#endif
++}
++
++module_init(gpu_init);
++module_exit(gpu_exit);
++
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.c 2015-05-01 14:57:59.547427001 -0500
+@@ -0,0 +1,481 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckKERNEL_QueryVideoMemory
++**
++** Query the amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to an gcsHAL_INTERFACE structure that will be filled in with
++** the memory information.
++*/
++gceSTATUS
++gckKERNEL_QueryVideoMemory(
++ IN gckKERNEL Kernel,
++ OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("Kernel=%p", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Get internal memory size and physical address. */
++ Interface->u.QueryVideoMemory.internalSize = device->internalSize;
++ Interface->u.QueryVideoMemory.internalPhysical = device->internalPhysicalName;
++
++ /* Get external memory size and physical address. */
++ Interface->u.QueryVideoMemory.externalSize = device->externalSize;
++ Interface->u.QueryVideoMemory.externalPhysical = device->externalPhysicalName;
++
++ /* Get contiguous memory size and physical address. */
++ Interface->u.QueryVideoMemory.contiguousSize = device->contiguousSize;
++ Interface->u.QueryVideoMemory.contiguousPhysical = device->contiguousPhysicalName;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_GetVideoMemoryPool
++**
++** Get the gckVIDMEM object belonging to the specified pool.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcePOOL Pool
++** Pool to query gckVIDMEM object for.
++**
++** OUTPUT:
++**
++** gckVIDMEM * VideoMemory
++** Pointer to a variable that will hold the pointer to the gckVIDMEM
++** object belonging to the requested pool.
++*/
++gceSTATUS
++gckKERNEL_GetVideoMemoryPool(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ OUT gckVIDMEM * VideoMemory
++ )
++{
++ gckGALDEVICE device;
++ gckVIDMEM videoMemory;
++
++ gcmkHEADER_ARG("Kernel=%p Pool=%d", Kernel, Pool);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(VideoMemory != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Dispatch on pool. */
++ switch (Pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ videoMemory = device->internalVidMem;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ videoMemory = device->externalVidMem;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ videoMemory = device->contiguousVidMem;
++ break;
++
++ default:
++ /* Unknown pool. */
++ videoMemory = NULL;
++ }
++
++ /* Return pointer to the gckVIDMEM object. */
++ *VideoMemory = videoMemory;
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*VideoMemory=%p", *VideoMemory);
++ return (videoMemory == NULL) ? gcvSTATUS_OUT_OF_MEMORY : gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapMemory
++**
++** Map video memory into the current process space.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of video memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the base address of the mapped
++** memory region.
++*/
++gceSTATUS
++gckKERNEL_MapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckKERNEL kernel = Kernel;
++ gctPHYS_ADDR physical = gcmNAME_TO_PTR(Physical);
++
++ return gckOS_MapMemory(Kernel->os, physical, Bytes, Logical);
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_UnmapMemory
++**
++** Unmap video memory from the current process space.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of video memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** gctPOINTER Logical
++** Base address of the mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gckKERNEL kernel = Kernel;
++ gctPHYS_ADDR physical = gcmNAME_TO_PTR(Physical);
++
++ return gckOS_UnmapMemory(Kernel->os, physical, Bytes, Logical);
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapVideoMemory
++**
++** Get the logical address for a hardware specific memory address for the
++** current process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE to map the memory into the user space.
++**
++** gctUINT32 Address
++** Hardware specific memory address.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** specified memory address.
++*/
++gceSTATUS
++gckKERNEL_MapVideoMemoryEx(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckGALDEVICE device;
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++ gcePOOL pool;
++ gctUINT32 offset, base;
++ gceSTATUS status;
++ gctPOINTER logical;
++
++ gcmkHEADER_ARG("Kernel=%p InUserSpace=%d Address=%08x",
++ Kernel, InUserSpace, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Logical != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkONERROR(
++ gckVGHARDWARE_SplitMemory(Kernel->vg->hardware, Address, &pool, &offset));
++ }
++ else
++#endif
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkONERROR(
++ gckHARDWARE_SplitMemory(Kernel->hardware, Address, &pool, &offset));
++ }
++
++ /* Dispatch on pool. */
++ switch (pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ logical = device->internalLogical;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ logical = device->externalLogical;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ if (device->contiguousMapped)
++ {
++ logical = device->contiguousBase;
++ }
++ else
++ {
++ gctINT processID;
++ gckOS_GetProcessID(&processID);
++
++ mdl = (PLINUX_MDL) device->contiguousPhysical;
++
++ mdlMap = FindMdlMap(mdl, processID);
++ gcmkASSERT(mdlMap);
++
++ logical = (gctPOINTER) mdlMap->vmaAddr;
++ }
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkVERIFY_OK(
++ gckVGHARDWARE_SplitMemory(Kernel->vg->hardware,
++ device->contiguousVidMem->baseAddress,
++ &pool,
++ &base));
++ }
++ else
++#endif
++ {
++ gctUINT32 baseAddress = 0;
++
++ if (Kernel->hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++ }
++
++ gcmkVERIFY_OK(
++ gckHARDWARE_SplitMemory(Kernel->hardware,
++ device->contiguousVidMem->baseAddress - baseAddress,
++ &pool,
++ &base));
++ }
++ offset -= base;
++ break;
++
++ default:
++ /* Invalid memory pool. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Build logical address of specified address. */
++ *Logical = (gctPOINTER) ((gctUINT8_PTR) logical + offset);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=%p", *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Retunn the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapVideoMemory
++**
++** Get the logical address for a hardware specific memory address for the
++** current process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE to map the memory into the user space.
++**
++** gctUINT32 Address
++** Hardware specific memory address.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** specified memory address.
++*/
++gceSTATUS
++gckKERNEL_MapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ return gckKERNEL_MapVideoMemoryEx(Kernel, gcvCORE_MAJOR, InUserSpace, Address, Logical);
++}
++/*******************************************************************************
++**
++** gckKERNEL_Notify
++**
++** This function iscalled by clients to notify the gckKERNRL object of an event.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gceNOTIFY Notification
++** Notification event.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Notify(
++ IN gckKERNEL Kernel,
++ IN gceNOTIFY Notification,
++ IN gctBOOL Data
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=%p Notification=%d Data=%d",
++ Kernel, Notification, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Dispatch on notifcation. */
++ switch (Notification)
++ {
++ case gcvNOTIFY_INTERRUPT:
++ /* Process the interrupt. */
++#if COMMAND_PROCESSOR_VERSION > 1
++ status = gckINTERRUPT_Notify(Kernel->interrupt, Data);
++#else
++ status = gckHARDWARE_Interrupt(Kernel->hardware, Data);
++#endif
++ break;
++
++ default:
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QuerySettings(
++ IN gckKERNEL Kernel,
++ OUT gcsKERNEL_SETTINGS * Settings
++ )
++{
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("Kernel=%p", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Settings != gcvNULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Fill in signal. */
++ Settings->signal = device->signal;
++
++ /* Success. */
++ gcmkFOOTER_ARG("Settings->signal=%d", Settings->signal);
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_linux.h 2015-05-01 14:57:59.547427001 -0500
+@@ -0,0 +1,94 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_linux_h_
++#define __gc_hal_kernel_linux_h_
++
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/signal.h>
++#ifdef FLAREON
++# include <asm/arch-realview/dove_gpio_irq.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/vmalloc.h>
++#include <linux/dma-mapping.h>
++#include <linux/kthread.h>
++
++#ifdef MODVERSIONS
++# include <linux/modversions.h>
++#endif
++#include <asm/io.h>
++#include <asm/uaccess.h>
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
++#include <linux/clk.h>
++#include <linux/regulator/consumer.h>
++#endif
++
++#define NTSTRSAFE_NO_CCH_FUNCTIONS
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_device.h"
++#include "gc_hal_kernel_os.h"
++#include "gc_hal_kernel_debugfs.h"
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
++#define FIND_TASK_BY_PID(x) pid_task(find_vpid(x), PIDTYPE_PID)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++#define FIND_TASK_BY_PID(x) find_task_by_vpid(x)
++#else
++#define FIND_TASK_BY_PID(x) find_task_by_pid(x)
++#endif
++
++#define _WIDE(string) L##string
++#define WIDE(string) _WIDE(string)
++
++#define countof(a) (sizeof(a) / sizeof(a[0]))
++
++#define DRV_NAME "galcore"
++
++#define GetPageCount(size, offset) ((((size) + ((offset) & ~PAGE_CACHE_MASK)) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION (3,7,0)
++#define gcdVM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP)
++#else
++#define gcdVM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
++#endif
++
++static inline gctINT
++GetOrder(
++ IN gctINT numPages
++ )
++{
++ gctINT order = 0;
++
++ while ((1 << order) < numPages) order++;
++
++ return order;
++}
++
++#endif /* __gc_hal_kernel_linux_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_math.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_math.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_math.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_math.c 2015-05-01 14:57:59.547427001 -0500
+@@ -0,0 +1,32 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++gctINT
++gckMATH_ModuloInt(
++ IN gctINT X,
++ IN gctINT Y
++ )
++{
++ if(Y ==0) {return 0;}
++ else {return X % Y;}
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.c 2015-05-01 14:57:59.551427001 -0500
+@@ -0,0 +1,9019 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/sched.h>
++#include <asm/atomic.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/idr.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
++#include <mach/hardware.h>
++#endif
++#include <linux/workqueue.h>
++#include <linux/idr.h>
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
++#include <linux/math64.h>
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++#include <linux/reset.h>
++static inline void imx_gpc_power_up_pu(bool flag) {}
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++#include <mach/common.h>
++#endif
++#include <linux/delay.h>
++#include <linux/pm_runtime.h>
++
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++#include <linux/file.h>
++#include "gc_hal_kernel_sync.h"
++#endif
++
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++/*******************************************************************************
++***** Version Signature *******************************************************/
++
++#ifdef ANDROID
++const char * _PLATFORM = "\n\0$PLATFORM$Android$\n";
++#else
++const char * _PLATFORM = "\n\0$PLATFORM$Linux$\n";
++#endif
++
++#define USER_SIGNAL_TABLE_LEN_INIT 64
++#define gcdSUPPRESS_OOM_MESSAGE 1
++
++#define MEMORY_LOCK(os) \
++ gcmkVERIFY_OK(gckOS_AcquireMutex( \
++ (os), \
++ (os)->memoryLock, \
++ gcvINFINITE))
++
++#define MEMORY_UNLOCK(os) \
++ gcmkVERIFY_OK(gckOS_ReleaseMutex((os), (os)->memoryLock))
++
++#define MEMORY_MAP_LOCK(os) \
++ gcmkVERIFY_OK(gckOS_AcquireMutex( \
++ (os), \
++ (os)->memoryMapLock, \
++ gcvINFINITE))
++
++#define MEMORY_MAP_UNLOCK(os) \
++ gcmkVERIFY_OK(gckOS_ReleaseMutex((os), (os)->memoryMapLock))
++
++/* Protection bit when mapping memroy to user sapce */
++#define gcmkPAGED_MEMROY_PROT(x) pgprot_writecombine(x)
++
++#if gcdNONPAGED_MEMORY_BUFFERABLE
++#define gcmkIOREMAP ioremap_wc
++#define gcmkNONPAGED_MEMROY_PROT(x) pgprot_writecombine(x)
++#elif !gcdNONPAGED_MEMORY_CACHEABLE
++#define gcmkIOREMAP ioremap_nocache
++#define gcmkNONPAGED_MEMROY_PROT(x) pgprot_noncached(x)
++#endif
++
++#if gcdSUPPRESS_OOM_MESSAGE
++#define gcdNOWARN __GFP_NOWARN
++#else
++#define gcdNOWARN 0
++#endif
++
++#define gcdINFINITE_TIMEOUT (60 * 1000)
++#define gcdDETECT_TIMEOUT 0
++#define gcdDETECT_DMA_ADDRESS 1
++#define gcdDETECT_DMA_STATE 1
++
++#define gcdUSE_NON_PAGED_MEMORY_CACHE 10
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++typedef struct _gcsNonPagedMemoryCache
++{
++#ifndef NO_DMA_COHERENT
++ gctINT size;
++ gctSTRING addr;
++ dma_addr_t dmaHandle;
++#else
++ long order;
++ struct page * page;
++#endif
++
++ struct _gcsNonPagedMemoryCache * prev;
++ struct _gcsNonPagedMemoryCache * next;
++}
++gcsNonPagedMemoryCache;
++#endif /* gcdUSE_NON_PAGED_MEMORY_CACHE */
++
++typedef struct _gcsUSER_MAPPING * gcsUSER_MAPPING_PTR;
++typedef struct _gcsUSER_MAPPING
++{
++ /* Pointer to next mapping structure. */
++ gcsUSER_MAPPING_PTR next;
++
++ /* Physical address of this mapping. */
++ gctUINT32 physical;
++
++ /* Logical address of this mapping. */
++ gctPOINTER logical;
++
++ /* Number of bytes of this mapping. */
++ gctSIZE_T bytes;
++
++ /* Starting address of this mapping. */
++ gctINT8_PTR start;
++
++ /* Ending address of this mapping. */
++ gctINT8_PTR end;
++}
++gcsUSER_MAPPING;
++
++typedef struct _gcsINTEGER_DB * gcsINTEGER_DB_PTR;
++typedef struct _gcsINTEGER_DB
++{
++ struct idr idr;
++ spinlock_t lock;
++ gctINT curr;
++}
++gcsINTEGER_DB;
++
++struct _gckOS
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Heap. */
++ gckHEAP heap;
++
++ /* Pointer to device */
++ gckGALDEVICE device;
++
++ /* Memory management */
++ gctPOINTER memoryLock;
++ gctPOINTER memoryMapLock;
++
++ struct _LINUX_MDL *mdlHead;
++ struct _LINUX_MDL *mdlTail;
++
++ /* Kernel process ID. */
++ gctUINT32 kernelProcessID;
++
++ /* Signal management. */
++
++ /* Lock. */
++ gctPOINTER signalMutex;
++
++ /* signal id database. */
++ gcsINTEGER_DB signalDB;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /* Lock. */
++ gctPOINTER syncPointMutex;
++
++ /* sync point id database. */
++ gcsINTEGER_DB syncPointDB;
++#endif
++
++ gcsUSER_MAPPING_PTR userMap;
++ gctPOINTER debugLock;
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ gctUINT cacheSize;
++ gcsNonPagedMemoryCache * cacheHead;
++ gcsNonPagedMemoryCache * cacheTail;
++#endif
++
++ /* workqueue for os timer. */
++ struct workqueue_struct * workqueue;
++};
++
++typedef struct _gcsSIGNAL * gcsSIGNAL_PTR;
++typedef struct _gcsSIGNAL
++{
++ /* Kernel sync primitive. */
++ struct completion obj;
++
++ /* Manual reset flag. */
++ gctBOOL manualReset;
++
++ /* The reference counter. */
++ atomic_t ref;
++
++ /* The owner of the signal. */
++ gctHANDLE process;
++
++ gckHARDWARE hardware;
++
++ /* ID. */
++ gctUINT32 id;
++}
++gcsSIGNAL;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++typedef struct _gcsSYNC_POINT * gcsSYNC_POINT_PTR;
++typedef struct _gcsSYNC_POINT
++{
++ /* The reference counter. */
++ atomic_t ref;
++
++ /* State. */
++ atomic_t state;
++
++ /* timeline. */
++ struct sync_timeline * timeline;
++
++ /* ID. */
++ gctUINT32 id;
++}
++gcsSYNC_POINT;
++#endif
++
++typedef struct _gcsPageInfo * gcsPageInfo_PTR;
++typedef struct _gcsPageInfo
++{
++ struct page **pages;
++ gctUINT32_PTR pageTable;
++}
++gcsPageInfo;
++
++typedef struct _gcsOSTIMER * gcsOSTIMER_PTR;
++typedef struct _gcsOSTIMER
++{
++ struct delayed_work work;
++ gctTIMERFUNCTION function;
++ gctPOINTER data;
++} gcsOSTIMER;
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++static gctINT
++_GetProcessID(
++ void
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ return task_tgid_vnr(current);
++#else
++ return current->tgid;
++#endif
++}
++
++static gctINT
++_GetThreadID(
++ void
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ return task_pid_vnr(current);
++#else
++ return current->pid;
++#endif
++}
++
++static PLINUX_MDL
++_CreateMdl(
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL mdl;
++
++ gcmkHEADER_ARG("ProcessID=%d", ProcessID);
++
++ mdl = (PLINUX_MDL)kzalloc(sizeof(struct _LINUX_MDL), GFP_KERNEL | gcdNOWARN);
++ if (mdl == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ mdl->pid = ProcessID;
++ mdl->maps = gcvNULL;
++ mdl->prev = gcvNULL;
++ mdl->next = gcvNULL;
++
++ gcmkFOOTER_ARG("0x%X", mdl);
++ return mdl;
++}
++
++static gceSTATUS
++_DestroyMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap
++ );
++
++static gceSTATUS
++_DestroyMdl(
++ IN PLINUX_MDL Mdl
++ )
++{
++ PLINUX_MDL_MAP mdlMap, next;
++
++ gcmkHEADER_ARG("Mdl=0x%X", Mdl);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Mdl != gcvNULL);
++
++ mdlMap = Mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ next = mdlMap->next;
++
++ gcmkVERIFY_OK(_DestroyMdlMap(Mdl, mdlMap));
++
++ mdlMap = next;
++ }
++
++ kfree(Mdl);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++static PLINUX_MDL_MAP
++_CreateMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X ProcessID=%d", Mdl, ProcessID);
++
++ mdlMap = (PLINUX_MDL_MAP)kmalloc(sizeof(struct _LINUX_MDL_MAP), GFP_KERNEL | gcdNOWARN);
++ if (mdlMap == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ mdlMap->pid = ProcessID;
++ mdlMap->vmaAddr = gcvNULL;
++ mdlMap->vma = gcvNULL;
++ mdlMap->count = 0;
++
++ mdlMap->next = Mdl->maps;
++ Mdl->maps = mdlMap;
++
++ gcmkFOOTER_ARG("0x%X", mdlMap);
++ return mdlMap;
++}
++
++static gceSTATUS
++_DestroyMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap
++ )
++{
++ PLINUX_MDL_MAP prevMdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X MdlMap=0x%X", Mdl, MdlMap);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(MdlMap != gcvNULL);
++ gcmkASSERT(Mdl->maps != gcvNULL);
++
++ if (Mdl->maps == MdlMap)
++ {
++ Mdl->maps = MdlMap->next;
++ }
++ else
++ {
++ prevMdlMap = Mdl->maps;
++
++ while (prevMdlMap->next != MdlMap)
++ {
++ prevMdlMap = prevMdlMap->next;
++
++ gcmkASSERT(prevMdlMap != gcvNULL);
++ }
++
++ prevMdlMap->next = MdlMap->next;
++ }
++
++ kfree(MdlMap);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++extern PLINUX_MDL_MAP
++FindMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X ProcessID=%d", Mdl, ProcessID);
++ if(Mdl == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++ mdlMap = Mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if (mdlMap->pid == ProcessID)
++ {
++ gcmkFOOTER_ARG("0x%X", mdlMap);
++ return mdlMap;
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvNULL;
++}
++
++void
++OnProcessExit(
++ IN gckOS Os,
++ IN gckKERNEL Kernel
++ )
++{
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++static inline int
++is_vmalloc_addr(
++ void *Addr
++ )
++{
++ unsigned long addr = (unsigned long)Addr;
++
++ return addr >= VMALLOC_START && addr < VMALLOC_END;
++}
++#endif
++
++static void
++_NonContiguousFree(
++ IN struct page ** Pages,
++ IN gctUINT32 NumPages
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Pages=0x%X, NumPages=%d", Pages, NumPages);
++
++ gcmkASSERT(Pages != gcvNULL);
++
++ for (i = 0; i < NumPages; i++)
++ {
++ __free_page(Pages[i]);
++ }
++
++ if (is_vmalloc_addr(Pages))
++ {
++ vfree(Pages);
++ }
++ else
++ {
++ kfree(Pages);
++ }
++
++ gcmkFOOTER_NO();
++}
++
++static struct page **
++_NonContiguousAlloc(
++ IN gctUINT32 NumPages
++ )
++{
++ struct page ** pages;
++ struct page *p;
++ gctINT i, size;
++
++ gcmkHEADER_ARG("NumPages=%lu", NumPages);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
++ if (NumPages > totalram_pages)
++#else
++ if (NumPages > num_physpages)
++#endif
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ size = NumPages * sizeof(struct page *);
++
++ pages = kmalloc(size, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ pages = vmalloc(size);
++
++ if (!pages)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++ }
++
++ for (i = 0; i < NumPages; i++)
++ {
++ p = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN);
++
++ if (!p)
++ {
++ _NonContiguousFree(pages, i);
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ pages[i] = p;
++ }
++
++ gcmkFOOTER_ARG("pages=0x%X", pages);
++ return pages;
++}
++
++static inline struct page *
++_NonContiguousToPage(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return Pages[Index];
++}
++
++static inline unsigned long
++_NonContiguousToPfn(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return page_to_pfn(_NonContiguousToPage(Pages, Index));
++}
++
++static inline unsigned long
++_NonContiguousToPhys(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return page_to_phys(_NonContiguousToPage(Pages, Index));
++}
++
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++
++static gctBOOL
++_AddNonPagedMemoryCache(
++ gckOS Os,
++#ifndef NO_DMA_COHERENT
++ gctINT Size,
++ gctSTRING Addr,
++ dma_addr_t DmaHandle
++#else
++ long Order,
++ struct page * Page
++#endif
++ )
++{
++ gcsNonPagedMemoryCache *cache;
++
++ if (Os->cacheSize >= gcdUSE_NON_PAGED_MEMORY_CACHE)
++ {
++ return gcvFALSE;
++ }
++
++ /* Allocate the cache record */
++ cache = (gcsNonPagedMemoryCache *)kmalloc(sizeof(gcsNonPagedMemoryCache), GFP_ATOMIC);
++
++ if (cache == gcvNULL) return gcvFALSE;
++
++#ifndef NO_DMA_COHERENT
++ cache->size = Size;
++ cache->addr = Addr;
++ cache->dmaHandle = DmaHandle;
++#else
++ cache->order = Order;
++ cache->page = Page;
++#endif
++
++ /* Add to list */
++ if (Os->cacheHead == gcvNULL)
++ {
++ cache->prev = gcvNULL;
++ cache->next = gcvNULL;
++ Os->cacheHead =
++ Os->cacheTail = cache;
++ }
++ else
++ {
++ /* Add to the tail. */
++ cache->prev = Os->cacheTail;
++ cache->next = gcvNULL;
++ Os->cacheTail->next = cache;
++ Os->cacheTail = cache;
++ }
++
++ Os->cacheSize++;
++
++ return gcvTRUE;
++}
++
++#ifndef NO_DMA_COHERENT
++static gctSTRING
++_GetNonPagedMemoryCache(
++ gckOS Os,
++ gctINT Size,
++ dma_addr_t * DmaHandle
++ )
++#else
++static struct page *
++_GetNonPagedMemoryCache(
++ gckOS Os,
++ long Order
++ )
++#endif
++{
++ gcsNonPagedMemoryCache *cache;
++#ifndef NO_DMA_COHERENT
++ gctSTRING addr;
++#else
++ struct page * page;
++#endif
++
++ if (Os->cacheHead == gcvNULL) return gcvNULL;
++
++ /* Find the right cache */
++ cache = Os->cacheHead;
++
++ while (cache != gcvNULL)
++ {
++#ifndef NO_DMA_COHERENT
++ if (cache->size == Size) break;
++#else
++ if (cache->order == Order) break;
++#endif
++
++ cache = cache->next;
++ }
++
++ if (cache == gcvNULL) return gcvNULL;
++
++ /* Remove the cache from list */
++ if (cache == Os->cacheHead)
++ {
++ Os->cacheHead = cache->next;
++
++ if (Os->cacheHead == gcvNULL)
++ {
++ Os->cacheTail = gcvNULL;
++ }
++ }
++ else
++ {
++ cache->prev->next = cache->next;
++
++ if (cache == Os->cacheTail)
++ {
++ Os->cacheTail = cache->prev;
++ }
++ else
++ {
++ cache->next->prev = cache->prev;
++ }
++ }
++
++ /* Destroy cache */
++#ifndef NO_DMA_COHERENT
++ addr = cache->addr;
++ *DmaHandle = cache->dmaHandle;
++#else
++ page = cache->page;
++#endif
++
++ kfree(cache);
++
++ Os->cacheSize--;
++
++#ifndef NO_DMA_COHERENT
++ return addr;
++#else
++ return page;
++#endif
++}
++
++static void
++_FreeAllNonPagedMemoryCache(
++ gckOS Os
++ )
++{
++ gcsNonPagedMemoryCache *cache, *nextCache;
++
++ MEMORY_LOCK(Os);
++
++ cache = Os->cacheHead;
++
++ while (cache != gcvNULL)
++ {
++ if (cache != Os->cacheTail)
++ {
++ nextCache = cache->next;
++ }
++ else
++ {
++ nextCache = gcvNULL;
++ }
++
++ /* Remove the cache from list */
++ if (cache == Os->cacheHead)
++ {
++ Os->cacheHead = cache->next;
++
++ if (Os->cacheHead == gcvNULL)
++ {
++ Os->cacheTail = gcvNULL;
++ }
++ }
++ else
++ {
++ cache->prev->next = cache->next;
++
++ if (cache == Os->cacheTail)
++ {
++ Os->cacheTail = cache->prev;
++ }
++ else
++ {
++ cache->next->prev = cache->prev;
++ }
++ }
++
++#ifndef NO_DMA_COHERENT
++ dma_free_coherent(gcvNULL,
++ cache->size,
++ cache->addr,
++ cache->dmaHandle);
++#else
++ free_pages((unsigned long)page_address(cache->page), cache->order);
++#endif
++
++ kfree(cache);
++
++ cache = nextCache;
++ }
++
++ MEMORY_UNLOCK(Os);
++}
++
++#endif /* gcdUSE_NON_PAGED_MEMORY_CACHE */
++
++/*******************************************************************************
++** Integer Id Management.
++*/
++gceSTATUS
++_AllocateIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctPOINTER KernelPointer,
++ OUT gctUINT32 *Id
++ )
++{
++ int result;
++ gctINT next;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ idr_preload(GFP_KERNEL | gcdNOWARN);
++
++ spin_lock(&Database->lock);
++
++ next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1;
++ result = idr_alloc(&Database->idr, KernelPointer, next, 0, GFP_ATOMIC);
++
++ if (!result)
++ {
++ Database->curr = *Id;
++ }
++
++ spin_unlock(&Database->lock);
++
++ idr_preload_end();
++
++ if (result < 0)
++ {
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ *Id = result;
++#else
++again:
++ if (idr_pre_get(&Database->idr, GFP_KERNEL | gcdNOWARN) == 0)
++ {
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ spin_lock(&Database->lock);
++
++ next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1;
++
++ /* Try to get a id greater than current id. */
++ result = idr_get_new_above(&Database->idr, KernelPointer, next, Id);
++
++ if (!result)
++ {
++ Database->curr = *Id;
++ }
++
++ spin_unlock(&Database->lock);
++
++ if (result == -EAGAIN)
++ {
++ goto again;
++ }
++
++ if (result != 0)
++ {
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_QueryIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gctPOINTER pointer;
++
++ spin_lock(&Database->lock);
++
++ pointer = idr_find(&Database->idr, Id);
++
++ spin_unlock(&Database->lock);
++
++ if(pointer)
++ {
++ *KernelPointer = pointer;
++ return gcvSTATUS_OK;
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_OS,
++ "%s(%d) Id = %d is not found",
++ __FUNCTION__, __LINE__, Id);
++
++ return gcvSTATUS_NOT_FOUND;
++ }
++}
++
++gceSTATUS
++_DestroyIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctUINT32 Id
++ )
++{
++ spin_lock(&Database->lock);
++
++ idr_remove(&Database->idr, Id);
++
++ spin_unlock(&Database->lock);
++
++ return gcvSTATUS_OK;
++}
++
++static void
++_UnmapUserLogical(
++ IN gctINT Pid,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++)
++{
++ if (unlikely(current->mm == gcvNULL))
++ {
++ /* Do nothing if process is exiting. */
++ return;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ if (vm_munmap((unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): vm_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++#else
++ down_write(&current->mm->mmap_sem);
++ if (do_munmap(current->mm, (unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++ up_write(&current->mm->mmap_sem);
++#endif
++}
++
++gceSTATUS
++_QueryProcessPageTable(
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ spinlock_t *lock;
++ gctUINTPTR_T logical = (gctUINTPTR_T)Logical;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ if (!current->mm)
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pgd = pgd_offset(current->mm, logical);
++ if (pgd_none(*pgd) || pgd_bad(*pgd))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pud = pud_offset(pgd, logical);
++ if (pud_none(*pud) || pud_bad(*pud))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pmd = pmd_offset(pud, logical);
++ if (pmd_none(*pmd) || pmd_bad(*pmd))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pte = pte_offset_map_lock(current->mm, pmd, logical, &lock);
++ if (!pte)
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ if (!pte_present(*pte))
++ {
++ pte_unmap_unlock(pte, lock);
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ *Address = (pte_pfn(*pte) << PAGE_SHIFT) | (logical & ~PAGE_MASK);
++ pte_unmap_unlock(pte, lock);
++
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Construct
++**
++** Construct a new gckOS object.
++**
++** INPUT:
++**
++** gctPOINTER Context
++** Pointer to the gckGALDEVICE class.
++**
++** OUTPUT:
++**
++** gckOS * Os
++** Pointer to a variable that will hold the pointer to the gckOS object.
++*/
++gceSTATUS
++gckOS_Construct(
++ IN gctPOINTER Context,
++ OUT gckOS * Os
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Context=0x%X", Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Os != gcvNULL);
++
++ /* Allocate the gckOS object. */
++ os = (gckOS) kmalloc(gcmSIZEOF(struct _gckOS), GFP_KERNEL | gcdNOWARN);
++
++ if (os == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ /* Zero the memory. */
++ gckOS_ZeroMemory(os, gcmSIZEOF(struct _gckOS));
++
++ /* Initialize the gckOS object. */
++ os->object.type = gcvOBJ_OS;
++
++ /* Set device device. */
++ os->device = Context;
++
++ /* IMPORTANT! No heap yet. */
++ os->heap = gcvNULL;
++
++ /* Initialize the memory lock. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->memoryLock));
++ gcmkONERROR(gckOS_CreateMutex(os, &os->memoryMapLock));
++
++ /* Create debug lock mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->debugLock));
++
++
++ os->mdlHead = os->mdlTail = gcvNULL;
++
++ /* Get the kernel process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&os->kernelProcessID));
++
++ /*
++ * Initialize the signal manager.
++ */
++
++ /* Initialize mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->signalMutex));
++
++ /* Initialize signal id database lock. */
++ spin_lock_init(&os->signalDB.lock);
++
++ /* Initialize signal id database. */
++ idr_init(&os->signalDB.idr);
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /*
++ * Initialize the sync point manager.
++ */
++
++ /* Initialize mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->syncPointMutex));
++
++ /* Initialize sync point id database lock. */
++ spin_lock_init(&os->syncPointDB.lock);
++
++ /* Initialize sync point id database. */
++ idr_init(&os->syncPointDB.idr);
++#endif
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ os->cacheSize = 0;
++ os->cacheHead = gcvNULL;
++ os->cacheTail = gcvNULL;
++#endif
++
++ /* Create a workqueue for os timer. */
++ os->workqueue = create_singlethread_workqueue("galcore workqueue");
++
++ if (os->workqueue == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Return pointer to the gckOS object. */
++ *Os = os;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Os=0x%X", *Os);
++ return gcvSTATUS_OK;
++
++OnError:
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ if (os->syncPointMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->syncPointMutex));
++ }
++#endif
++
++ if (os->signalMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->signalMutex));
++ }
++
++ if (os->heap != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckHEAP_Destroy(os->heap));
++ }
++
++ if (os->memoryMapLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->memoryMapLock));
++ }
++
++ if (os->memoryLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->memoryLock));
++ }
++
++ if (os->debugLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->debugLock));
++ }
++
++ if (os->workqueue != gcvNULL)
++ {
++ destroy_workqueue(os->workqueue);
++ }
++
++ kfree(os);
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Destroy
++**
++** Destroy an gckOS object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Destroy(
++ IN gckOS Os
++ )
++{
++ gckHEAP heap;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ _FreeAllNonPagedMemoryCache(Os);
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /*
++ * Destroy the sync point manager.
++ */
++
++ /* Destroy the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->syncPointMutex));
++#endif
++
++ /*
++ * Destroy the signal manager.
++ */
++
++ /* Destroy the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->signalMutex));
++
++ if (Os->heap != gcvNULL)
++ {
++ /* Mark gckHEAP as gone. */
++ heap = Os->heap;
++ Os->heap = gcvNULL;
++
++ /* Destroy the gckHEAP object. */
++ gcmkVERIFY_OK(gckHEAP_Destroy(heap));
++ }
++
++ /* Destroy the memory lock. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->memoryMapLock));
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->memoryLock));
++
++ /* Destroy debug lock mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->debugLock));
++
++ /* Wait for all works done. */
++ flush_workqueue(Os->workqueue);
++
++ /* Destory work queue. */
++ destroy_workqueue(Os->workqueue);
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(~0U);
++
++ /* Mark the gckOS object as unknown. */
++ Os->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckOS object. */
++ kfree(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++static gctSTRING
++_CreateKernelVirtualMapping(
++ IN PLINUX_MDL Mdl
++ )
++{
++ gctSTRING addr = 0;
++ gctINT numPages = Mdl->numPages;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ if (Mdl->contiguous)
++ {
++ addr = page_address(Mdl->u.contiguousPages);
++ }
++ else
++ {
++ addr = vmap(Mdl->u.nonContiguousPages,
++ numPages,
++ 0,
++ PAGE_KERNEL);
++
++ /* Trigger a page fault. */
++ memset(addr, 0, numPages * PAGE_SIZE);
++ }
++#else
++ struct page ** pages;
++ gctBOOL free = gcvFALSE;
++ gctINT i;
++
++ if (Mdl->contiguous)
++ {
++ pages = kmalloc(sizeof(struct page *) * numPages, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ return gcvNULL;
++ }
++
++ for (i = 0; i < numPages; i++)
++ {
++ pages[i] = nth_page(Mdl->u.contiguousPages, i);
++ }
++
++ free = gcvTRUE;
++ }
++ else
++ {
++ pages = Mdl->u.nonContiguousPages;
++ }
++
++ /* ioremap() can't work on system memory since 2.6.38. */
++ addr = vmap(pages, numPages, 0, gcmkNONPAGED_MEMROY_PROT(PAGE_KERNEL));
++
++ /* Trigger a page fault. */
++ memset(addr, 0, numPages * PAGE_SIZE);
++
++ if (free)
++ {
++ kfree(pages);
++ }
++
++#endif
++
++ return addr;
++}
++
++static void
++_DestoryKernelVirtualMapping(
++ IN gctSTRING Addr
++ )
++{
++#if !gcdNONPAGED_MEMORY_CACHEABLE
++ vunmap(Addr);
++#endif
++}
++
++gceSTATUS
++gckOS_CreateKernelVirtualMapping(
++ IN gctPHYS_ADDR Physical,
++ OUT gctSIZE_T * PageCount,
++ OUT gctPOINTER * Logical
++ )
++{
++ *PageCount = ((PLINUX_MDL)Physical)->numPages;
++ *Logical = _CreateKernelVirtualMapping((PLINUX_MDL)Physical);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DestroyKernelVirtualMapping(
++ IN gctPOINTER Logical
++ )
++{
++ _DestoryKernelVirtualMapping((gctSTRING)Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Allocate
++**
++** Allocate memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the allocated memory location.
++*/
++gceSTATUS
++gckOS_Allocate(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Do we have a heap? */
++ if (Os->heap != gcvNULL)
++ {
++ /* Allocate from the heap. */
++ gcmkONERROR(gckHEAP_Allocate(Os->heap, Bytes, Memory));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AllocateMemory(Os, Bytes, Memory));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%X", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Free
++**
++** Free allocated memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Memory
++** Pointer to memory allocation to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Free(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Memory=0x%X", Os, Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Do we have a heap? */
++ if (Os->heap != gcvNULL)
++ {
++ /* Free from the heap. */
++ gcmkONERROR(gckHEAP_Free(Os->heap, Memory));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_FreeMemory(Os, Memory));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocateMemory
++**
++** Allocate memory wrapper.
++**
++** INPUT:
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the allocated memory location.
++*/
++gceSTATUS
++gckOS_AllocateMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gctPOINTER memory;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ if (Bytes > PAGE_SIZE)
++ {
++ memory = (gctPOINTER) vmalloc(Bytes);
++ }
++ else
++ {
++ memory = (gctPOINTER) kmalloc(Bytes, GFP_KERNEL | gcdNOWARN);
++ }
++
++ if (memory == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Return pointer to the memory allocation. */
++ *Memory = memory;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%X", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeMemory
++**
++** Free allocated memory wrapper.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory allocation to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreeMemory(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ )
++{
++ gcmkHEADER_ARG("Memory=0x%X", Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Free the memory from the OS pool. */
++ if (is_vmalloc_addr(Memory))
++ {
++ vfree(Memory);
++ }
++ else
++ {
++ kfree(Memory);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapMemory
++**
++** Map physical memory into the current process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the logical address of the
++** mapped memory.
++*/
++gceSTATUS
++gckOS_MapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = FindMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++
++ if (mdlMap->vmaAddr == gcvNULL)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (char *)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (char *)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): mdl->numPages: %d mdl->vmaAddr: 0x%X",
++ __FUNCTION__, __LINE__,
++ mdl->numPages,
++ mdlMap->vmaAddr
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (!mdlMap->vma)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): find_vma error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ up_write(&current->mm->mmap_sem);
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++#ifndef NO_DMA_COHERENT
++ if (dma_mmap_coherent(gcvNULL,
++ mdlMap->vma,
++ mdl->addr,
++ mdl->dmaHandle,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): dma_mmap_coherent error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#else
++#if !gcdPAGED_MEMORY_CACHEABLE
++ mdlMap->vma->vm_page_prot = gcmkPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++# endif
++ mdlMap->vma->vm_pgoff = 0;
++
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ mdl->dmaHandle >> PAGE_SHIFT,
++ mdl->numPages*PAGE_SIZE,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): remap_pfn_range error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#endif
++
++ up_write(&current->mm->mmap_sem);
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ *Logical = mdlMap->vmaAddr;
++
++ gcmkFOOTER_ARG("*Logical=0x%X", *Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapMemory
++**
++** Unmap physical memory out of the current process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gckOS_UnmapMemoryEx(Os, Physical, Bytes, Logical, _GetProcessID());
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckOS_UnmapMemoryEx
++**
++** Unmap physical memory in the specified process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** gctUINT32 PID
++** Pid of the process that opened the device and mapped this memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapMemoryEx(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical,
++ IN gctUINT32 PID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X PID=%d",
++ Os, Physical, Bytes, Logical, PID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PID != 0);
++
++ MEMORY_LOCK(Os);
++
++ if (Logical)
++ {
++ mdlMap = FindMdlMap(mdl, PID);
++
++ if (mdlMap == gcvNULL || mdlMap->vmaAddr == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ _UnmapUserLogical(PID, mdlMap->vmaAddr, mdl->numPages * PAGE_SIZE);
++
++ gcmkVERIFY_OK(_DestroyMdlMap(mdl, mdlMap));
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserLogical
++**
++** Unmap user logical memory out of physical memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserLogical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gckOS_UnmapMemory(Os, Physical, Bytes, Logical);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocateNonPagedMemory
++**
++** Allocate a number of pages from non-paged memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the pages need to be mapped into user space.
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that holds the number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that hold the number of bytes allocated.
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that will hold the physical address of the
++** allocation.
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** allocation.
++*/
++gceSTATUS
++gckOS_AllocateNonPagedMemory(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gctSIZE_T bytes;
++ gctINT numPages;
++ PLINUX_MDL mdl = gcvNULL;
++ PLINUX_MDL_MAP mdlMap = gcvNULL;
++ gctSTRING addr;
++#ifdef NO_DMA_COHERENT
++ struct page * page;
++ long size, order;
++ gctPOINTER vaddr;
++#endif
++ gctBOOL locked = gcvFALSE;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ Os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Align number of bytes to page size. */
++ bytes = gcmALIGN(*Bytes, PAGE_SIZE);
++
++ /* Get total number of pages.. */
++ numPages = GetPageCount(bytes, 0);
++
++ /* Allocate mdl+vector structure */
++ mdl = _CreateMdl(_GetProcessID());
++ if (mdl == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ mdl->pagedMem = 0;
++ mdl->numPages = numPages;
++
++ MEMORY_LOCK(Os);
++ locked = gcvTRUE;
++
++#ifndef NO_DMA_COHERENT
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ addr = _GetNonPagedMemoryCache(Os,
++ mdl->numPages * PAGE_SIZE,
++ &mdl->dmaHandle);
++
++ if (addr == gcvNULL)
++#endif
++ {
++ addr = dma_alloc_coherent(gcvNULL,
++ mdl->numPages * PAGE_SIZE,
++ &mdl->dmaHandle,
++ GFP_KERNEL | gcdNOWARN);
++ }
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ if(addr == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++ locked = gcvFALSE;
++ /*Free all cache and try again*/
++ _FreeAllNonPagedMemoryCache(Os);
++ MEMORY_LOCK(Os);
++ locked = gcvTRUE;
++ addr = dma_alloc_coherent(gcvNULL,
++ mdl->numPages * PAGE_SIZE,
++ &mdl->dmaHandle,
++ GFP_KERNEL | gcdNOWARN);
++ }
++#endif
++#else
++ size = mdl->numPages * PAGE_SIZE;
++ order = get_order(size);
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ page = _GetNonPagedMemoryCache(Os, order);
++
++ if (page == gcvNULL)
++#endif
++ {
++ page = alloc_pages(GFP_KERNEL | gcdNOWARN, order);
++ }
++
++ if (page == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ vaddr = (gctPOINTER)page_address(page);
++ mdl->contiguous = gcvTRUE;
++ mdl->u.contiguousPages = page;
++ addr = _CreateKernelVirtualMapping(mdl);
++ mdl->dmaHandle = virt_to_phys(vaddr);
++ mdl->kaddr = vaddr;
++ mdl->u.contiguousPages = page;
++
++#if !defined(CONFIG_PPC)
++ /* Cache invalidate. */
++ dma_sync_single_for_device(
++ gcvNULL,
++ page_to_phys(page),
++ bytes,
++ DMA_FROM_DEVICE);
++#endif
++
++ while (size > 0)
++ {
++ SetPageReserved(virt_to_page(vaddr));
++
++ vaddr += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++#endif
++
++ if (addr == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ mdl->addr = addr;
++
++ /* Return allocated memory. */
++ *Bytes = bytes;
++ *Physical = (gctPHYS_ADDR) mdl;
++
++ if (InUserSpace)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Only after mmap this will be valid. */
++
++ /* We need to map this to user space. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING) vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING) do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++#ifndef NO_DMA_COHERENT
++ if (dma_mmap_coherent(gcvNULL,
++ mdlMap->vma,
++ mdl->addr,
++ mdl->dmaHandle,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): dma_mmap_coherent error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#else
++ mdlMap->vma->vm_page_prot = gcmkNONPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++ mdlMap->vma->vm_pgoff = 0;
++
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ mdl->dmaHandle >> PAGE_SHIFT,
++ mdl->numPages * PAGE_SIZE,
++ mdlMap->vma->vm_page_prot))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): remap_pfn_range error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#endif /* NO_DMA_COHERENT */
++
++ up_write(&current->mm->mmap_sem);
++
++ *Logical = mdlMap->vmaAddr;
++ }
++ else
++ {
++ *Logical = (gctPOINTER)mdl->addr;
++ }
++
++ /*
++ * Add this to a global list.
++ * Will be used by get physical address
++ * and mapuser pointer functions.
++ */
++
++ if (!Os->mdlHead)
++ {
++ /* Initialize the queue. */
++ Os->mdlHead = Os->mdlTail = mdl;
++ }
++ else
++ {
++ /* Add to the tail. */
++ mdl->prev = Os->mdlTail;
++ Os->mdlTail->next = mdl;
++ Os->mdlTail = mdl;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *Physical=0x%X *Logical=0x%X",
++ *Bytes, *Physical, *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mdlMap != gcvNULL)
++ {
++ /* Free LINUX_MDL_MAP. */
++ gcmkVERIFY_OK(_DestroyMdlMap(mdl, mdlMap));
++ }
++
++ if (mdl != gcvNULL)
++ {
++ /* Free LINUX_MDL. */
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++ }
++
++ if (locked)
++ {
++ /* Unlock memory. */
++ MEMORY_UNLOCK(Os);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeNonPagedMemory
++**
++** Free previously allocated and mapped pages from non-paged memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes allocated.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocated memory.
++**
++** gctPOINTER Logical
++** Logical address of the allocated memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckOS_FreeNonPagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ )
++{
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++#ifdef NO_DMA_COHERENT
++ unsigned size;
++ gctPOINTER vaddr;
++#endif /* NO_DMA_COHERENT */
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu Physical=0x%X Logical=0x%X",
++ Os, Bytes, Physical, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Convert physical address into a pointer to a MDL. */
++ mdl = (PLINUX_MDL) Physical;
++
++ MEMORY_LOCK(Os);
++
++#ifndef NO_DMA_COHERENT
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ if (!_AddNonPagedMemoryCache(Os,
++ mdl->numPages * PAGE_SIZE,
++ mdl->addr,
++ mdl->dmaHandle))
++#endif
++ {
++ dma_free_coherent(gcvNULL,
++ mdl->numPages * PAGE_SIZE,
++ mdl->addr,
++ mdl->dmaHandle);
++ }
++#else
++ size = mdl->numPages * PAGE_SIZE;
++ vaddr = mdl->kaddr;
++
++ while (size > 0)
++ {
++ ClearPageReserved(virt_to_page(vaddr));
++
++ vaddr += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ if (!_AddNonPagedMemoryCache(Os,
++ get_order(mdl->numPages * PAGE_SIZE),
++ virt_to_page(mdl->kaddr)))
++#endif
++ {
++ free_pages((unsigned long)mdl->kaddr, get_order(mdl->numPages * PAGE_SIZE));
++ }
++
++ _DestoryKernelVirtualMapping(mdl->addr);
++#endif /* NO_DMA_COHERENT */
++
++ mdlMap = mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if (mdlMap->vmaAddr != gcvNULL)
++ {
++ /* No mapped memory exists when free nonpaged memory */
++ gcmkASSERT(0);
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ /* Remove the node from global list.. */
++ if (mdl == Os->mdlHead)
++ {
++ if ((Os->mdlHead = mdl->next) == gcvNULL)
++ {
++ Os->mdlTail = gcvNULL;
++ }
++ }
++ else
++ {
++ mdl->prev->next = mdl->next;
++ if (mdl == Os->mdlTail)
++ {
++ Os->mdlTail = mdl->prev;
++ }
++ else
++ {
++ mdl->next->prev = mdl->prev;
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReadRegister
++**
++** Read data from a register.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Address of register.
++**
++** OUTPUT:
++**
++** gctUINT32 * Data
++** Pointer to a variable that receives the data read from the register.
++*/
++gceSTATUS
++gckOS_ReadRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ )
++{
++ return gckOS_ReadRegisterEx(Os, gcvCORE_MAJOR, Address, Data);
++}
++
++gceSTATUS
++gckOS_ReadRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%X", Os, Core, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address < Os->device->requestedRegisterMemSizes[Core]);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ *Data = readl((gctUINT8 *)Os->device->registerBases[Core] + Address);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_WriteRegister
++**
++** Write data to a register.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Address of register.
++**
++** gctUINT32 Data
++** Data for register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WriteRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ )
++{
++ return gckOS_WriteRegisterEx(Os, gcvCORE_MAJOR, Address, Data);
++}
++
++gceSTATUS
++gckOS_WriteRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%X Data=0x%08x", Os, Core, Address, Data);
++
++ gcmkVERIFY_ARGUMENT(Address < Os->device->requestedRegisterMemSizes[Core]);
++
++ writel(Data, (gctUINT8 *)Os->device->registerBases[Core] + Address);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPageSize
++**
++** Get the system's page size.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * PageSize
++** Pointer to a variable that will receive the system's page size.
++*/
++gceSTATUS gckOS_GetPageSize(
++ IN gckOS Os,
++ OUT gctSIZE_T * PageSize
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(PageSize != gcvNULL);
++
++ /* Return the page size. */
++ *PageSize = (gctSIZE_T) PAGE_SIZE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*PageSize", *PageSize);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPhysicalAddress
++**
++** Get the physical system address of a corresponding virtual address.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Logical
++** Logical address.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Poinetr to a variable that receives the 32-bit physical adress.
++*/
++gceSTATUS
++gckOS_GetPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X", Os, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Query page table of current process first. */
++ status = _QueryProcessPageTable(Logical, Address);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Get current process ID. */
++ processID = _GetProcessID();
++
++ /* Route through other function. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddressProcess(Os, Logical, processID, Address));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++static gceSTATUS
++gckOS_AddMapping(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gcsUSER_MAPPING_PTR map;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Logical=0x%X Bytes=%lu",
++ Os, Physical, Logical, Bytes);
++
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(gcsUSER_MAPPING),
++ (gctPOINTER *) &map));
++
++ map->next = Os->userMap;
++ map->physical = Physical - Os->device->baseAddress;
++ map->logical = Logical;
++ map->bytes = Bytes;
++ map->start = (gctINT8_PTR) Logical;
++ map->end = map->start + Bytes;
++
++ Os->userMap = map;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckOS_RemoveMapping(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gcsUSER_MAPPING_PTR map, prev;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu", Os, Logical, Bytes);
++
++ for (map = Os->userMap, prev = gcvNULL; map != gcvNULL; map = map->next)
++ {
++ if ((map->logical == Logical)
++ && (map->bytes == Bytes)
++ )
++ {
++ break;
++ }
++
++ prev = map;
++ }
++
++ if (map == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
++ }
++
++ if (prev == gcvNULL)
++ {
++ Os->userMap = map->next;
++ }
++ else
++ {
++ prev->next = map->next;
++ }
++
++ gcmkONERROR(gcmkOS_SAFE_FREE(Os, map));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_ConvertLogical2Physical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ IN PLINUX_MDL Mdl,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ gctINT8_PTR base, vBase;
++ gctUINT32 offset;
++ PLINUX_MDL_MAP map;
++ gcsUSER_MAPPING_PTR userMap;
++
++ base = (Mdl == gcvNULL) ? gcvNULL : (gctINT8_PTR) Mdl->addr;
++
++ /* Check for the logical address match. */
++ if ((base != gcvNULL)
++ && ((gctINT8_PTR) Logical >= base)
++ && ((gctINT8_PTR) Logical < base + Mdl->numPages * PAGE_SIZE)
++ )
++ {
++ offset = (gctINT8_PTR) Logical - base;
++
++ if (Mdl->dmaHandle != 0)
++ {
++ /* The memory was from coherent area. */
++ *Physical = (gctUINT32) Mdl->dmaHandle + offset;
++ }
++ else if (Mdl->pagedMem && !Mdl->contiguous)
++ {
++ /* paged memory is not mapped to kernel space. */
++ return gcvSTATUS_INVALID_ADDRESS;
++ }
++ else
++ {
++ *Physical = gcmPTR2INT(virt_to_phys(base)) + offset;
++ }
++
++ return gcvSTATUS_OK;
++ }
++
++ /* Walk user maps. */
++ for (userMap = Os->userMap; userMap != gcvNULL; userMap = userMap->next)
++ {
++ if (((gctINT8_PTR) Logical >= userMap->start)
++ && ((gctINT8_PTR) Logical < userMap->end)
++ )
++ {
++ *Physical = userMap->physical
++ + (gctUINT32) ((gctINT8_PTR) Logical - userMap->start);
++
++ return gcvSTATUS_OK;
++ }
++ }
++
++ if (ProcessID != Os->kernelProcessID)
++ {
++ map = FindMdlMap(Mdl, (gctINT) ProcessID);
++ vBase = (map == gcvNULL) ? gcvNULL : (gctINT8_PTR) map->vmaAddr;
++
++ /* Is the given address within that range. */
++ if ((vBase != gcvNULL)
++ && ((gctINT8_PTR) Logical >= vBase)
++ && ((gctINT8_PTR) Logical < vBase + Mdl->numPages * PAGE_SIZE)
++ )
++ {
++ offset = (gctINT8_PTR) Logical - vBase;
++
++ if (Mdl->dmaHandle != 0)
++ {
++ /* The memory was from coherent area. */
++ *Physical = (gctUINT32) Mdl->dmaHandle + offset;
++ }
++ else if (Mdl->pagedMem && !Mdl->contiguous)
++ {
++ *Physical = _NonContiguousToPhys(Mdl->u.nonContiguousPages, offset/PAGE_SIZE);
++ }
++ else
++ {
++ *Physical = page_to_phys(Mdl->u.contiguousPages) + offset;
++ }
++
++ return gcvSTATUS_OK;
++ }
++ }
++
++ /* Address not yet found. */
++ return gcvSTATUS_INVALID_ADDRESS;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPhysicalAddressProcess
++**
++** Get the physical system address of a corresponding virtual address for a
++** given process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctPOINTER Logical
++** Logical address.
++**
++** gctUINT32 ProcessID
++** Process ID.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Poinetr to a variable that receives the 32-bit physical adress.
++*/
++gceSTATUS
++gckOS_GetPhysicalAddressProcess(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32 * Address
++ )
++{
++ PLINUX_MDL mdl;
++ gctINT8_PTR base;
++ gceSTATUS status = gcvSTATUS_INVALID_ADDRESS;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X ProcessID=%d", Os, Logical, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ /* First try the contiguous memory pool. */
++ if (Os->device->contiguousMapped)
++ {
++ base = (gctINT8_PTR) Os->device->contiguousBase;
++
++ if (((gctINT8_PTR) Logical >= base)
++ && ((gctINT8_PTR) Logical < base + Os->device->contiguousSize)
++ )
++ {
++ /* Convert logical address into physical. */
++ *Address = Os->device->contiguousVidMem->baseAddress
++ + (gctINT8_PTR) Logical - base;
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ /* Try the contiguous memory pool. */
++ mdl = (PLINUX_MDL) Os->device->contiguousPhysical;
++ status = _ConvertLogical2Physical(Os,
++ Logical,
++ ProcessID,
++ mdl,
++ Address);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Walk all MDLs. */
++ for (mdl = Os->mdlHead; mdl != gcvNULL; mdl = mdl->next)
++ {
++ /* Try this MDL. */
++ status = _ConvertLogical2Physical(Os,
++ Logical,
++ ProcessID,
++ mdl,
++ Address);
++ if (gcmIS_SUCCESS(status))
++ {
++ break;
++ }
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkONERROR(status);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapPhysical
++**
++** Map a physical address into kernel space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Physical
++** Physical address of the memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the base address of the mapped
++** memory.
++*/
++gceSTATUS
++gckOS_MapPhysical(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ gctPOINTER logical;
++ PLINUX_MDL mdl;
++ gctUINT32 physical = Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ /* Go through our mapping to see if we know this physical address already. */
++ mdl = Os->mdlHead;
++
++ while (mdl != gcvNULL)
++ {
++ if (mdl->dmaHandle != 0)
++ {
++ if ((physical >= mdl->dmaHandle)
++ && (physical < mdl->dmaHandle + mdl->numPages * PAGE_SIZE)
++ )
++ {
++ *Logical = mdl->addr + (physical - mdl->dmaHandle);
++ break;
++ }
++ }
++
++ mdl = mdl->next;
++ }
++
++ if (mdl == gcvNULL)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct contiguous_mem_pool *pool = Os->device->pool;
++
++ if (Physical >= pool->phys && Physical < pool->phys + pool->size)
++ logical = (gctPOINTER)(Physical - pool->phys + pool->virt);
++ else
++ logical = gcvNULL;
++#else
++ /* Map memory as cached memory. */
++ request_mem_region(physical, Bytes, "MapRegion");
++ logical = (gctPOINTER) ioremap_nocache(physical, Bytes);
++#endif
++
++ if (logical == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Failed to map physical address 0x%08x",
++ __FUNCTION__, __LINE__, Physical
++ );
++
++ MEMORY_UNLOCK(Os);
++
++ /* Out of resources. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ /* Return pointer to mapped memory. */
++ *Logical = logical;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=0x%X", *Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapPhysical
++**
++** Unmap a previously mapped memory region from kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Logical
++** Pointer to the base address of the memory to unmap.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ PLINUX_MDL mdl;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu", Os, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ MEMORY_LOCK(Os);
++
++ mdl = Os->mdlHead;
++
++ while (mdl != gcvNULL)
++ {
++ if (mdl->addr != gcvNULL)
++ {
++ if (Logical >= (gctPOINTER)mdl->addr
++ && Logical < (gctPOINTER)((gctSTRING)mdl->addr + mdl->numPages * PAGE_SIZE))
++ {
++ break;
++ }
++ }
++
++ mdl = mdl->next;
++ }
++
++ if (mdl == gcvNULL)
++ {
++ /* Unmap the memory. */
++ iounmap(Logical);
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateMutex
++**
++** Create a new mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Mutex
++** Pointer to a variable that will hold a pointer to the mutex.
++*/
++gceSTATUS
++gckOS_CreateMutex(
++ IN gckOS Os,
++ OUT gctPOINTER * Mutex
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Allocate the mutex structure. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct mutex), Mutex));
++
++ /* Initialize the mutex. */
++ mutex_init(*Mutex);
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*Mutex=0x%X", *Mutex);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DeleteMutex
++**
++** Delete a mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mute to be deleted.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DeleteMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%X", Os, Mutex);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Destroy the mutex. */
++ mutex_destroy(Mutex);
++
++ /* Free the mutex structure. */
++ gcmkONERROR(gckOS_Free(Os, Mutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AcquireMutex
++**
++** Acquire a mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mutex to be acquired.
++**
++** gctUINT32 Timeout
++** Timeout value specified in milliseconds.
++** Specify the value of gcvINFINITE to keep the thread suspended
++** until the mutex has been acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AcquireMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ )
++{
++#if gcdDETECT_TIMEOUT
++ gctUINT32 timeout;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%0x Timeout=%u", Os, Mutex, Timeout);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++#if gcdDETECT_TIMEOUT
++ timeout = 0;
++
++ for (;;)
++ {
++ /* Try to acquire the mutex. */
++ if (mutex_trylock(Mutex))
++ {
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Advance the timeout. */
++ timeout += 1;
++
++ if (Timeout == gcvINFINITE)
++ {
++ if (timeout == gcdINFINITE_TIMEOUT)
++ {
++ gctUINT32 dmaAddress1, dmaAddress2;
++ gctUINT32 dmaState1, dmaState2;
++
++ dmaState1 = dmaState2 =
++ dmaAddress1 = dmaAddress2 = 0;
++
++ /* Verify whether DMA is running. */
++ gcmkVERIFY_OK(_VerifyDMA(
++ Os, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2
++ ));
++
++#if gcdDETECT_DMA_ADDRESS
++ /* Dump only if DMA appears stuck. */
++ if (
++ (dmaAddress1 == dmaAddress2)
++#if gcdDETECT_DMA_STATE
++ && (dmaState1 == dmaState2)
++# endif
++ )
++# endif
++ {
++ gcmkVERIFY_OK(_DumpGPUState(Os, gcvCORE_MAJOR));
++
++ gcmkPRINT(
++ "%s(%d): mutex 0x%X; forced message flush.",
++ __FUNCTION__, __LINE__, Mutex
++ );
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(dmaAddress2);
++ }
++
++ timeout = 0;
++ }
++ }
++ else
++ {
++ /* Timedout? */
++ if (timeout >= Timeout)
++ {
++ break;
++ }
++ }
++
++ /* Wait for 1 millisecond. */
++ gcmkVERIFY_OK(gckOS_Delay(Os, 1));
++ }
++#else
++ if (Timeout == gcvINFINITE)
++ {
++ /* Lock the mutex. */
++ mutex_lock(Mutex);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ for (;;)
++ {
++ /* Try to acquire the mutex. */
++ if (mutex_trylock(Mutex))
++ {
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ if (Timeout-- == 0)
++ {
++ break;
++ }
++
++ /* Wait for 1 millisecond. */
++ gcmkVERIFY_OK(gckOS_Delay(Os, 1));
++ }
++#endif
++
++ /* Timeout. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_TIMEOUT);
++ return gcvSTATUS_TIMEOUT;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReleaseMutex
++**
++** Release an acquired mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mutex to be released.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ReleaseMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%0x", Os, Mutex);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Release the mutex. */
++ mutex_unlock(Mutex);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomicExchange
++**
++** Atomically exchange a pair of 32-bit values.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN OUT gctINT32_PTR Target
++** Pointer to the 32-bit value to exchange.
++**
++** IN gctINT32 NewValue
++** Specifies a new value for the 32-bit value pointed to by Target.
++**
++** OUT gctINT32_PTR OldValue
++** The old value of the 32-bit value pointed to by Target.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomicExchange(
++ IN gckOS Os,
++ IN OUT gctUINT32_PTR Target,
++ IN gctUINT32 NewValue,
++ OUT gctUINT32_PTR OldValue
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Target=0x%X NewValue=%u", Os, Target, NewValue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ /* Exchange the pair of 32-bit values. */
++ *OldValue = (gctUINT32) atomic_xchg((atomic_t *) Target, (int) NewValue);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*OldValue=%u", *OldValue);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomicExchangePtr
++**
++** Atomically exchange a pair of pointers.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN OUT gctPOINTER * Target
++** Pointer to the 32-bit value to exchange.
++**
++** IN gctPOINTER NewValue
++** Specifies a new value for the pointer pointed to by Target.
++**
++** OUT gctPOINTER * OldValue
++** The old value of the pointer pointed to by Target.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomicExchangePtr(
++ IN gckOS Os,
++ IN OUT gctPOINTER * Target,
++ IN gctPOINTER NewValue,
++ OUT gctPOINTER * OldValue
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Target=0x%X NewValue=0x%X", Os, Target, NewValue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ /* Exchange the pair of pointers. */
++ *OldValue = (gctPOINTER)(gctUINTPTR_T) atomic_xchg((atomic_t *) Target, (int)(gctUINTPTR_T) NewValue);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*OldValue=0x%X", *OldValue);
++ return gcvSTATUS_OK;
++}
++
++#if gcdSMP
++/*******************************************************************************
++**
++** gckOS_AtomicSetMask
++**
++** Atomically set mask to Atom
++**
++** INPUT:
++** IN OUT gctPOINTER Atom
++** Pointer to the atom to set.
++**
++** IN gctUINT32 Mask
++** Mask to set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSetMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ )
++{
++ gctUINT32 oval, nval;
++
++ gcmkHEADER_ARG("Atom=0x%0x", Atom);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ do
++ {
++ oval = atomic_read((atomic_t *) Atom);
++ nval = oval | Mask;
++ } while (atomic_cmpxchg((atomic_t *) Atom, oval, nval) != oval);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomClearMask
++**
++** Atomically clear mask from Atom
++**
++** INPUT:
++** IN OUT gctPOINTER Atom
++** Pointer to the atom to clear.
++**
++** IN gctUINT32 Mask
++** Mask to clear.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomClearMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ )
++{
++ gctUINT32 oval, nval;
++
++ gcmkHEADER_ARG("Atom=0x%0x", Atom);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ do
++ {
++ oval = atomic_read((atomic_t *) Atom);
++ nval = oval & ~Mask;
++ } while (atomic_cmpxchg((atomic_t *) Atom, oval, nval) != oval);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_AtomConstruct
++**
++** Create an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Atom
++** Pointer to a variable receiving the constructed atom.
++*/
++gceSTATUS
++gckOS_AtomConstruct(
++ IN gckOS Os,
++ OUT gctPOINTER * Atom
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Allocate the atom. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(atomic_t), Atom));
++
++ /* Initialize the atom. */
++ atomic_set((atomic_t *) *Atom, 0);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Atom=0x%X", *Atom);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomDestroy
++**
++** Destroy an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomDestroy(
++ IN gckOS Os,
++ OUT gctPOINTER Atom
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Free the atom. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Os, Atom));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomGet
++**
++** Get the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the value of the atom.
++*/
++gceSTATUS
++gckOS_AtomGet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Return the current value of atom. */
++ *Value = atomic_read((atomic_t *) Atom);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomSet
++**
++** Set the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** gctINT32 Value
++** The value of the atom.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ IN gctINT32 Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x Value=%d", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Set the current value of atom. */
++ atomic_set((atomic_t *) Atom, Value);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomIncrement
++**
++** Atomically increment the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable that receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomIncrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Increment the atom. */
++ *Value = atomic_inc_return((atomic_t *) Atom) - 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomDecrement
++**
++** Atomically decrement the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable that receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomDecrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Decrement the atom. */
++ *Value = atomic_dec_return((atomic_t *) Atom) + 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Delay
++**
++** Delay execution of the current thread for a number of milliseconds.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Delay
++** Delay to sleep, specified in milliseconds.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Delay(
++ IN gckOS Os,
++ IN gctUINT32 Delay
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Delay=%u", Os, Delay);
++
++ if (Delay > 0)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ ktime_t delay = ktime_set(Delay/1000, (Delay%1000) * NSEC_PER_MSEC);
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_hrtimeout(&delay, HRTIMER_MODE_REL);
++#else
++ msleep(Delay);
++#endif
++
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetTicks
++**
++** Get the number of milliseconds since the system started.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gctUINT32_PTR Time
++** Pointer to a variable to get time.
++**
++*/
++gceSTATUS
++gckOS_GetTicks(
++ OUT gctUINT32_PTR Time
++ )
++{
++ gcmkHEADER();
++
++ *Time = jiffies_to_msecs(jiffies);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_TicksAfter
++**
++** Compare time values got from gckOS_GetTicks.
++**
++** INPUT:
++** gctUINT32 Time1
++** First time value to be compared.
++**
++** gctUINT32 Time2
++** Second time value to be compared.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR IsAfter
++** Pointer to a variable to result.
++**
++*/
++gceSTATUS
++gckOS_TicksAfter(
++ IN gctUINT32 Time1,
++ IN gctUINT32 Time2,
++ OUT gctBOOL_PTR IsAfter
++ )
++{
++ gcmkHEADER();
++
++ *IsAfter = time_after((unsigned long)Time1, (unsigned long)Time2);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetTime
++**
++** Get the number of microseconds since the system started.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gctUINT64_PTR Time
++** Pointer to a variable to get time.
++**
++*/
++gceSTATUS
++gckOS_GetTime(
++ OUT gctUINT64_PTR Time
++ )
++{
++ gcmkHEADER();
++
++ *Time = 0;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MemoryBarrier
++**
++** Make sure the CPU has executed everything up to this point and the data got
++** written to the specified pointer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Address
++** Address of memory that needs to be barriered.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_MemoryBarrier(
++ IN gckOS Os,
++ IN gctPOINTER Address
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Address=0x%X", Os, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++#if gcdNONPAGED_MEMORY_BUFFERABLE \
++ && defined (CONFIG_ARM) \
++ && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++ /* drain write buffer */
++ dsb();
++
++ /* drain outer cache's write buffer? */
++#else
++ mb();
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocatePagedMemory
++**
++** Allocate memory from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocatePagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++
++ /* Allocate the memory. */
++ gcmkONERROR(gckOS_AllocatePagedMemoryEx(Os, gcvFALSE, Bytes, Physical));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Physical=0x%X", *Physical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocatePagedMemoryEx
++**
++** Allocate memory from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL Contiguous
++** Need contiguous memory or not.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocatePagedMemoryEx(
++ IN gckOS Os,
++ IN gctBOOL Contiguous,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ )
++{
++ gctINT numPages;
++ gctINT i;
++ PLINUX_MDL mdl = gcvNULL;
++ gctSIZE_T bytes;
++ gctBOOL locked = gcvFALSE;
++ gceSTATUS status;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ gctPOINTER addr = gcvNULL;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%X Contiguous=%d Bytes=%lu", Os, Contiguous, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++
++ bytes = gcmALIGN(Bytes, PAGE_SIZE);
++
++ numPages = GetPageCount(bytes, 0);
++
++ MEMORY_LOCK(Os);
++ locked = gcvTRUE;
++
++ mdl = _CreateMdl(_GetProcessID());
++ if (mdl == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ if (Contiguous)
++ {
++ gctUINT32 order = get_order(bytes);
++
++ if (order >= MAX_ORDER)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ addr =
++ alloc_pages_exact(numPages * PAGE_SIZE, GFP_KERNEL | gcdNOWARN | __GFP_NORETRY);
++
++ mdl->u.contiguousPages = addr
++ ? virt_to_page(addr)
++ : gcvNULL;
++
++ mdl->exact = gcvTRUE;
++#else
++ mdl->u.contiguousPages =
++ alloc_pages(GFP_KERNEL | gcdNOWARN | __GFP_NORETRY, order);
++#endif
++ if (mdl->u.contiguousPages == gcvNULL)
++ {
++ mdl->u.contiguousPages =
++ alloc_pages(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN, order);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ mdl->exact = gcvFALSE;
++#endif
++ }
++ }
++ else
++ {
++ mdl->u.nonContiguousPages = _NonContiguousAlloc(numPages);
++ }
++
++ if (mdl->u.contiguousPages == gcvNULL && mdl->u.nonContiguousPages == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ mdl->dmaHandle = 0;
++ mdl->addr = 0;
++ mdl->numPages = numPages;
++ mdl->pagedMem = 1;
++ mdl->contiguous = Contiguous;
++
++ for (i = 0; i < mdl->numPages; i++)
++ {
++ struct page *page;
++
++ if (mdl->contiguous)
++ {
++ page = nth_page(mdl->u.contiguousPages, i);
++ }
++ else
++ {
++ page = _NonContiguousToPage(mdl->u.nonContiguousPages, i);
++ }
++
++ SetPageReserved(page);
++
++ if (!PageHighMem(page) && page_to_phys(page))
++ {
++ gcmkVERIFY_OK(
++ gckOS_CacheFlush(Os, _GetProcessID(), gcvNULL,
++ (gctPOINTER)(gctUINTPTR_T)page_to_phys(page),
++ page_address(page),
++ PAGE_SIZE));
++ }
++ }
++
++ /* Return physical address. */
++ *Physical = (gctPHYS_ADDR) mdl;
++
++ /*
++ * Add this to a global list.
++ * Will be used by get physical address
++ * and mapuser pointer functions.
++ */
++ if (!Os->mdlHead)
++ {
++ /* Initialize the queue. */
++ Os->mdlHead = Os->mdlTail = mdl;
++ }
++ else
++ {
++ /* Add to tail. */
++ mdl->prev = Os->mdlTail;
++ Os->mdlTail->next = mdl;
++ Os->mdlTail = mdl;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Physical=0x%X", *Physical);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mdl != gcvNULL)
++ {
++ /* Free the memory. */
++ _DestroyMdl(mdl);
++ }
++
++ if (locked)
++ {
++ /* Unlock the memory. */
++ MEMORY_UNLOCK(Os);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreePagedMemory
++**
++** Free memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreePagedMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes
++ )
++{
++ PLINUX_MDL mdl = (PLINUX_MDL) Physical;
++ gctINT i;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /*addr = mdl->addr;*/
++
++ MEMORY_LOCK(Os);
++
++ for (i = 0; i < mdl->numPages; i++)
++ {
++ if (mdl->contiguous)
++ {
++ ClearPageReserved(nth_page(mdl->u.contiguousPages, i));
++ }
++ else
++ {
++ ClearPageReserved(_NonContiguousToPage(mdl->u.nonContiguousPages, i));
++ }
++ }
++
++ if (mdl->contiguous)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ if (mdl->exact == gcvTRUE)
++ {
++ free_pages_exact(page_address(mdl->u.contiguousPages), mdl->numPages * PAGE_SIZE);
++ }
++ else
++#endif
++ {
++ __free_pages(mdl->u.contiguousPages, GetOrder(mdl->numPages));
++ }
++ }
++ else
++ {
++ _NonContiguousFree(mdl->u.nonContiguousPages, mdl->numPages);
++ }
++
++ /* Remove the node from global list. */
++ if (mdl == Os->mdlHead)
++ {
++ if ((Os->mdlHead = mdl->next) == gcvNULL)
++ {
++ Os->mdlTail = gcvNULL;
++ }
++ }
++ else
++ {
++ mdl->prev->next = mdl->next;
++
++ if (mdl == Os->mdlTail)
++ {
++ Os->mdlTail = mdl->prev;
++ }
++ else
++ {
++ mdl->next->prev = mdl->prev;
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Free the structure... */
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_LockPages
++**
++** Lock memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** gctBOOL Cacheable
++** Cache mode of mapping.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the address of the mapped
++** memory.
++**
++** gctSIZE_T * PageCount
++** Pointer to a variable that receives the number of pages required for
++** the page table according to the GPU page size.
++*/
++gceSTATUS
++gckOS_LockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Cacheable,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ )
++{
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++ gctSTRING addr;
++ unsigned long start;
++ unsigned long pfn;
++ gctINT i;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount != gcvNULL);
++
++ mdl = (PLINUX_MDL) Physical;
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = FindMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++
++ if (mdlMap->vmaAddr == gcvNULL)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): vmaAddr->0x%X for phys_addr->0x%X",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)mdlMap->vmaAddr,
++ (gctUINT32)(gctUINTPTR_T)mdl
++ );
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++
++ if (Cacheable == gcvFALSE)
++ {
++ /* Make this mapping non-cached. */
++ mdlMap->vma->vm_page_prot = gcmkPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ }
++
++ addr = mdl->addr;
++
++ /* Now map all the vmalloc pages to this user address. */
++ if (mdl->contiguous)
++ {
++ /* map kernel memory to user space.. */
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ page_to_pfn(mdl->u.contiguousPages),
++ mdlMap->vma->vm_end - mdlMap->vma->vm_start,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): unable to mmap ret",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++ else
++ {
++ start = mdlMap->vma->vm_start;
++
++ for (i = 0; i < mdl->numPages; i++)
++ {
++ pfn = _NonContiguousToPfn(mdl->u.nonContiguousPages, i);
++
++ if (remap_pfn_range(mdlMap->vma,
++ start,
++ pfn,
++ PAGE_SIZE,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): gctPHYS_ADDR->0x%X Logical->0x%X Unable to map addr->0x%X to start->0x%X",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)Physical,
++ (gctUINT32)(gctUINTPTR_T)*Logical,
++ (gctUINT32)(gctUINTPTR_T)addr,
++ (gctUINT32)(gctUINTPTR_T)start
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ start += PAGE_SIZE;
++ addr += PAGE_SIZE;
++ }
++ }
++
++ up_write(&current->mm->mmap_sem);
++ }
++
++ mdlMap->count++;
++
++ /* Convert pointer to MDL. */
++ *Logical = mdlMap->vmaAddr;
++
++ /* Return the page number according to the GPU page size. */
++ gcmkASSERT((PAGE_SIZE % 4096) == 0);
++ gcmkASSERT((PAGE_SIZE / 4096) >= 1);
++
++ *PageCount = mdl->numPages * (PAGE_SIZE / 4096);
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkVERIFY_OK(gckOS_CacheFlush(
++ Os,
++ _GetProcessID(),
++ Physical,
++ gcvNULL,
++ (gctPOINTER)mdlMap->vmaAddr,
++ mdl->numPages * PAGE_SIZE
++ ));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=0x%X *PageCount=%lu", *Logical, *PageCount);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapPages
++**
++** Map paged memory into a page table.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T PageCount
++** Number of pages required for the physical address.
++**
++** gctPOINTER PageTable
++** Pointer to the page table to fill in.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_MapPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ )
++{
++ return gckOS_MapPagesEx(Os,
++ gcvCORE_MAJOR,
++ Physical,
++ PageCount,
++ PageTable);
++}
++
++gceSTATUS
++gckOS_MapPagesEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ PLINUX_MDL mdl;
++ gctUINT32* table;
++ gctUINT32 offset;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gckMMU mmu;
++ PLINUX_MDL mmuMdl;
++ gctUINT32 bytes;
++ gctPHYS_ADDR pageTablePhysical;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Physical=0x%X PageCount=%u PageTable=0x%X",
++ Os, Core, Physical, PageCount, PageTable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++
++ /* Convert pointer to MDL. */
++ mdl = (PLINUX_MDL)Physical;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Physical->0x%X PageCount->0x%X PagedMemory->?%d",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)Physical,
++ (gctUINT32)(gctUINTPTR_T)PageCount,
++ mdl->pagedMem
++ );
++
++ MEMORY_LOCK(Os);
++
++ table = (gctUINT32 *)PageTable;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ mmu = Os->device->kernels[Core]->mmu;
++ bytes = PageCount * sizeof(*table);
++ mmuMdl = (PLINUX_MDL)mmu->pageTablePhysical;
++#endif
++
++ /* Get all the physical addresses and store them in the page table. */
++
++ offset = 0;
++
++ if (mdl->pagedMem)
++ {
++ /* Try to get the user pages so DMA can happen. */
++ while (PageCount-- > 0)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ if (mdl->contiguous)
++ {
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ page_to_phys(nth_page(mdl->u.contiguousPages, offset)),
++ table));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ _NonContiguousToPhys(mdl->u.nonContiguousPages, offset),
++ table));
++ }
++ }
++ else
++#endif
++ {
++ if (mdl->contiguous)
++ {
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ page_to_phys(nth_page(mdl->u.contiguousPages, offset)),
++ table));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ _NonContiguousToPhys(mdl->u.nonContiguousPages, offset),
++ table));
++ }
++ }
++
++ table++;
++ offset += 1;
++ }
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): we should not get this call for Non Paged Memory!",
++ __FUNCTION__, __LINE__
++ );
++
++ while (PageCount-- > 0)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ page_to_phys(nth_page(mdl->u.contiguousPages, offset)),
++ table));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ page_to_phys(nth_page(mdl->u.contiguousPages, offset)),
++ table));
++ }
++ table++;
++ offset += 1;
++ }
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Get physical address of pageTable */
++ pageTablePhysical = (gctPHYS_ADDR)(mmuMdl->dmaHandle +
++ ((gctUINT32 *)PageTable - mmu->pageTableLogical));
++
++ /* Flush the mmu page table cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Os,
++ _GetProcessID(),
++ gcvNULL,
++ pageTablePhysical,
++ PageTable,
++ bytes
++ ));
++#endif
++
++OnError:
++
++ MEMORY_UNLOCK(Os);
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnlockPages
++**
++** Unlock memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** gctPOINTER Logical
++** Address of the mapped memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnlockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%u Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Make sure there is already a mapping...*/
++ gcmkVERIFY_ARGUMENT(mdl->u.nonContiguousPages != gcvNULL
++ || mdl->u.contiguousPages != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if ((mdlMap->vmaAddr != gcvNULL) && (_GetProcessID() == mdlMap->pid))
++ {
++ if (--mdlMap->count == 0)
++ {
++ _UnmapUserLogical(mdlMap->pid, mdlMap->vmaAddr, mdl->numPages * PAGE_SIZE);
++ mdlMap->vmaAddr = gcvNULL;
++ }
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckOS_AllocateContiguous
++**
++** Allocate memory from the contiguous pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the pages need to be mapped into user space.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that receives the number of bytes allocated.
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the logical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocateContiguous(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ Os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Same as non-paged memory for now. */
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(Os,
++ InUserSpace,
++ Bytes,
++ Physical,
++ Logical));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *Physical=0x%X *Logical=0x%X",
++ *Bytes, *Physical, *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeContiguous
++**
++** Free memory allocated from the contiguous pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctPOINTER Logical
++** Logicval address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreeContiguous(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Logical=0x%X Bytes=%lu",
++ Os, Physical, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Same of non-paged memory for now. */
++ gcmkONERROR(gckOS_FreeNonPagedMemory(Os, Bytes, Physical, Logical));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_VG
++/******************************************************************************
++**
++** gckOS_GetKernelLogical
++**
++** Return the kernel logical pointer that corresponods to the specified
++** hardware address.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Hardware physical address.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to a variable receiving the pointer in kernel address space.
++*/
++gceSTATUS
++gckOS_GetKernelLogical(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ return gckOS_GetKernelLogicalEx(Os, gcvCORE_MAJOR, Address, KernelPointer);
++}
++
++gceSTATUS
++gckOS_GetKernelLogicalEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%08x", Os, Core, Address);
++
++ do
++ {
++ gckGALDEVICE device;
++ gckKERNEL kernel;
++ gcePOOL pool;
++ gctUINT32 offset;
++ gctPOINTER logical;
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Os->device;
++
++ /* Kernel shortcut. */
++ kernel = device->kernels[Core];
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkERR_BREAK(gckVGHARDWARE_SplitMemory(
++ kernel->vg->hardware, Address, &pool, &offset
++ ));
++ }
++ else
++#endif
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkERR_BREAK(gckHARDWARE_SplitMemory(
++ kernel->hardware, Address, &pool, &offset
++ ));
++ }
++
++ /* Dispatch on pool. */
++ switch (pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ logical = device->internalLogical;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ logical = device->externalLogical;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ logical = device->contiguousBase;
++ break;
++
++ default:
++ /* Invalid memory pool. */
++ gcmkFOOTER();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Build logical address of specified address. */
++ * KernelPointer = ((gctUINT8_PTR) logical) + offset;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*KernelPointer=0x%X", *KernelPointer);
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_MapUserPointer
++**
++** Map a pointer from the user process into the kernel address space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Pointer
++** Pointer in user process space that needs to be mapped.
++**
++** gctSIZE_T Size
++** Number of bytes that need to be mapped.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to a variable receiving the mapped pointer in kernel address
++** space.
++*/
++gceSTATUS
++gckOS_MapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gctPOINTER buf = gcvNULL;
++ gctUINT32 len;
++
++ gcmkHEADER_ARG("Os=0x%X Pointer=0x%X Size=%lu", Os, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++
++ buf = kmalloc(Size, GFP_KERNEL | gcdNOWARN);
++ if (buf == gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to allocate memory.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ len = copy_from_user(buf, Pointer, Size);
++ if (len != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to copy data from user.",
++ __FUNCTION__, __LINE__
++ );
++
++ if (buf != gcvNULL)
++ {
++ kfree(buf);
++ }
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_GENERIC_IO);
++ return gcvSTATUS_GENERIC_IO;
++ }
++
++ *KernelPointer = buf;
++
++ gcmkFOOTER_ARG("*KernelPointer=0x%X", *KernelPointer);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserPointer
++**
++** Unmap a user process pointer from the kernel address space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Pointer
++** Pointer in user process space that needs to be unmapped.
++**
++** gctSIZE_T Size
++** Number of bytes that need to be unmapped.
++**
++** gctPOINTER KernelPointer
++** Pointer in kernel address space that needs to be unmapped.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ IN gctPOINTER KernelPointer
++ )
++{
++ gctUINT32 len;
++
++ gcmkHEADER_ARG("Os=0x%X Pointer=0x%X Size=%lu KernelPointer=0x%X",
++ Os, Pointer, Size, KernelPointer);
++
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++
++ len = copy_to_user(Pointer, KernelPointer, Size);
++
++ kfree(KernelPointer);
++
++ if (len != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to copy data to user.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_GENERIC_IO);
++ return gcvSTATUS_GENERIC_IO;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_QueryNeedCopy
++**
++** Query whether the memory can be accessed or mapped directly or it has to be
++** copied.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID of the current process.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR NeedCopy
++** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or
++** gcvFALSE if the memory can be accessed or mapped dircetly.
++*/
++gceSTATUS
++gckOS_QueryNeedCopy(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ OUT gctBOOL_PTR NeedCopy
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d", Os, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(NeedCopy != gcvNULL);
++
++ /* We need to copy data. */
++ *NeedCopy = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*NeedCopy=%d", *NeedCopy);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyFromUserData
++**
++** Copy data from user to kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyFromUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X KernelPointer=0x%X Pointer=0x%X Size=%lu",
++ Os, KernelPointer, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Copy data from user. */
++ if (copy_from_user(KernelPointer, Pointer, Size) != 0)
++ {
++ /* Could not copy all the bytes. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyToUserData
++**
++** Copy data from kernel to user memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyToUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X KernelPointer=0x%X Pointer=0x%X Size=%lu",
++ Os, KernelPointer, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Copy data to user. */
++ if (copy_to_user(Pointer, KernelPointer, Size) != 0)
++ {
++ /* Could not copy all the bytes. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_WriteMemory
++**
++** Write data to a memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Address
++** Address of the memory to write to.
++**
++** gctUINT32 Data
++** Data for register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WriteMemory(
++ IN gckOS Os,
++ IN gctPOINTER Address,
++ IN gctUINT32 Data
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Os=0x%X Address=0x%X Data=%u", Os, Address, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Write memory. */
++ if (access_ok(VERIFY_WRITE, Address, 4))
++ {
++ /* User address. */
++ if(put_user(Data, (gctUINT32*)Address))
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
++ }
++ }
++ else
++ {
++ /* Kernel address. */
++ *(gctUINT32 *)Address = Data;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapUserMemory
++**
++** Lock down a user buffer and return an DMA'able address to be used by the
++** hardware to access it.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory to lock down.
++**
++** gctSIZE_T Size
++** Size in bytes of the memory to lock down.
++**
++** OUTPUT:
++**
++** gctPOINTER * Info
++** Pointer to variable receiving the information record required by
++** gckOS_UnmapUserMemory.
++**
++** gctUINT32_PTR Address
++** Pointer to a variable that will receive the address DMA'able by the
++** hardware.
++*/
++gceSTATUS
++gckOS_MapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%x Core=%d Memory=0x%x Size=%lu", Os, Core, Memory, Size);
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckOS_AddMapping(Os, *Address, Memory, Size));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++#else
++{
++ gctSIZE_T pageCount, i, j;
++ gctUINT32_PTR pageTable;
++ gctUINT32 address = 0, physical = ~0U;
++ gctUINTPTR_T start, end, memory;
++ gctUINT32 offset;
++ gctINT result = 0;
++
++ gcsPageInfo_PTR info = gcvNULL;
++ struct page **pages = gcvNULL;
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL || Physical != ~0U);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ do
++ {
++ memory = (gctUINTPTR_T) Memory;
++
++ /* Get the number of required pages. */
++ end = (memory + Size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ start = memory >> PAGE_SHIFT;
++ pageCount = end - start;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pageCount: %d.",
++ __FUNCTION__, __LINE__,
++ pageCount
++ );
++
++ /* Overflow. */
++ if ((memory + Size) < memory)
++ {
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ MEMORY_MAP_LOCK(Os);
++
++ /* Allocate the Info struct. */
++ info = (gcsPageInfo_PTR)kmalloc(sizeof(gcsPageInfo), GFP_KERNEL | gcdNOWARN);
++
++ if (info == gcvNULL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ /* Allocate the array of page addresses. */
++ pages = (struct page **)kmalloc(pageCount * sizeof(struct page *), GFP_KERNEL | gcdNOWARN);
++
++ if (pages == gcvNULL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ if (Physical != ~0U)
++ {
++ for (i = 0; i < pageCount; i++)
++ {
++ pages[i] = pfn_to_page((Physical >> PAGE_SHIFT) + i);
++ get_page(pages[i]);
++ }
++ }
++ else
++ {
++ /* Get the user pages. */
++ down_read(&current->mm->mmap_sem);
++
++ result = get_user_pages(current,
++ current->mm,
++ memory & PAGE_MASK,
++ pageCount,
++ 1,
++ 0,
++ pages,
++ gcvNULL
++ );
++
++ up_read(&current->mm->mmap_sem);
++
++ if (result <=0 || result < pageCount)
++ {
++ struct vm_area_struct *vma;
++
++ /* Release the pages if any. */
++ if (result > 0)
++ {
++ for (i = 0; i < result; i++)
++ {
++ if (pages[i] == gcvNULL)
++ {
++ break;
++ }
++
++ page_cache_release(pages[i]);
++ pages[i] = gcvNULL;
++ }
++
++ result = 0;
++ }
++
++ vma = find_vma(current->mm, memory);
++
++ if (vma && (vma->vm_flags & VM_PFNMAP))
++ {
++ pte_t * pte;
++ spinlock_t * ptl;
++ gctUINTPTR_T logical = memory;
++
++ for (i = 0; i < pageCount; i++)
++ {
++ pgd_t * pgd = pgd_offset(current->mm, logical);
++ pud_t * pud = pud_offset(pgd, logical);
++
++ if (pud)
++ {
++ pmd_t * pmd = pmd_offset(pud, logical);
++ pte = pte_offset_map_lock(current->mm, pmd, logical, &ptl);
++ if (!pte)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ pages[i] = pte_page(*pte);
++ pte_unmap_unlock(pte, ptl);
++
++ /* Advance to next. */
++ logical += PAGE_SIZE;
++ }
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Check if this memory is contiguous for old mmu. */
++ if (Os->device->kernels[Core]->hardware->mmuVersion == 0)
++ {
++ for (i = 1; i < pageCount; i++)
++ {
++ if (pages[i] != nth_page(pages[0], i))
++ {
++ /* Non-contiguous. */
++ break;
++ }
++ }
++
++ if (i == pageCount)
++ {
++ /* Contiguous memory. */
++ physical = page_to_phys(pages[0]) | (memory & ~PAGE_MASK);
++
++ if (!((physical - Os->device->baseAddress) & 0x80000000))
++ {
++ kfree(pages);
++ pages = gcvNULL;
++
++ info->pages = gcvNULL;
++ info->pageTable = gcvNULL;
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ *Address = physical - Os->device->baseAddress;
++ *Info = info;
++
++ gcmkFOOTER_ARG("*Info=0x%X *Address=0x%08x",
++ *Info, *Address);
++
++ return gcvSTATUS_OK;
++ }
++ }
++ }
++
++ /* Reference pages. */
++ for (i = 0; i < pageCount; i++)
++ {
++ get_page(pages[i]);
++ }
++ }
++ }
++
++ for (i = 0; i < pageCount; i++)
++ {
++#ifdef CONFIG_ARM
++ gctUINT32 data;
++ get_user(data, (gctUINT32*)((memory & PAGE_MASK) + i * PAGE_SIZE));
++#endif
++
++ /* Flush(clean) the data cache. */
++ gcmkONERROR(gckOS_CacheFlush(Os, _GetProcessID(), gcvNULL,
++ (gctPOINTER)(gctUINTPTR_T)page_to_phys(pages[i]),
++ (gctPOINTER)(memory & PAGE_MASK) + i*PAGE_SIZE,
++ PAGE_SIZE));
++ }
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Allocate pages inside the page table. */
++ gcmkERR_BREAK(gckVGMMU_AllocatePages(Os->device->kernels[Core]->vg->mmu,
++ pageCount * (PAGE_SIZE/4096),
++ (gctPOINTER *) &pageTable,
++ &address));
++ }
++ else
++#endif
++ {
++ /* Allocate pages inside the page table. */
++ gcmkERR_BREAK(gckMMU_AllocatePages(Os->device->kernels[Core]->mmu,
++ pageCount * (PAGE_SIZE/4096),
++ (gctPOINTER *) &pageTable,
++ &address));
++ }
++
++ /* Fill the page table. */
++ for (i = 0; i < pageCount; i++)
++ {
++ gctUINT32 phys;
++ gctUINT32_PTR tab = pageTable + i * (PAGE_SIZE/4096);
++
++ phys = page_to_phys(pages[i]);
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Get the physical address from page struct. */
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ phys,
++ tab));
++ }
++ else
++#endif
++ {
++ /* Get the physical address from page struct. */
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ phys,
++ tab));
++ }
++
++ for (j = 1; j < (PAGE_SIZE/4096); j++)
++ {
++ pageTable[i * (PAGE_SIZE/4096) + j] = pageTable[i * (PAGE_SIZE/4096)] + 4096 * j;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pageTable[%d]: 0x%X 0x%X.",
++ __FUNCTION__, __LINE__,
++ i, phys, pageTable[i]);
++ }
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkONERROR(gckVGMMU_Flush(Os->device->kernels[Core]->vg->mmu));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckMMU_Flush(Os->device->kernels[Core]->mmu));
++ }
++
++ /* Save pointer to page table. */
++ info->pageTable = pageTable;
++ info->pages = pages;
++
++ *Info = (gctPOINTER) info;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): info->pages: 0x%X, info->pageTable: 0x%X, info: 0x%X.",
++ __FUNCTION__, __LINE__,
++ info->pages,
++ info->pageTable,
++ info
++ );
++
++ offset = (Physical != ~0U)
++ ? (Physical & ~PAGE_MASK)
++ : (memory & ~PAGE_MASK);
++
++ /* Return address. */
++ *Address = address + offset;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Address: 0x%X.",
++ __FUNCTION__, __LINE__,
++ *Address
++ );
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++OnError:
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error occured: %d.",
++ __FUNCTION__, __LINE__,
++ status
++ );
++
++ /* Release page array. */
++ if (result > 0 && pages != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: page table is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ for (i = 0; i < result; i++)
++ {
++ if (pages[i] == gcvNULL)
++ {
++ break;
++ }
++ page_cache_release(pages[i]);
++ }
++ }
++
++ if (info!= gcvNULL && pages != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: pages is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Free the page table. */
++ kfree(pages);
++ info->pages = gcvNULL;
++ }
++
++ /* Release page info struct. */
++ if (info != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: info is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Free the page info struct. */
++ kfree(info);
++ *Info = gcvNULL;
++ }
++ }
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ /* Return the status. */
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkFOOTER_ARG("*Info=0x%X *Address=0x%08x", *Info, *Address);
++ }
++ else
++ {
++ gcmkFOOTER();
++ }
++
++ return status;
++}
++#endif
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserMemory
++**
++** Unlock a user buffer and that was previously locked down by
++** gckOS_MapUserMemory.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory to unlock.
++**
++** gctSIZE_T Size
++** Size in bytes of the memory to unlock.
++**
++** gctPOINTER Info
++** Information record returned by gckOS_MapUserMemory.
++**
++** gctUINT32_PTR Address
++** The address returned by gckOS_MapUserMemory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Memory=0x%X Size=%lu Info=0x%X Address0x%08x",
++ Os, Core, Memory, Size, Info, Address);
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckOS_RemoveMapping(Os, Memory, Size));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++#else
++{
++ gctUINTPTR_T memory, start, end;
++ gcsPageInfo_PTR info;
++ gctSIZE_T pageCount, i;
++ struct page **pages;
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ do
++ {
++ info = (gcsPageInfo_PTR) Info;
++
++ pages = info->pages;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): info=0x%X, pages=0x%X.",
++ __FUNCTION__, __LINE__,
++ info, pages
++ );
++
++ /* Invalid page array. */
++ if (pages == gcvNULL && info->pageTable == gcvNULL)
++ {
++ kfree(info);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ memory = (gctUINTPTR_T)Memory;
++ end = (memory + Size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ start = memory >> PAGE_SHIFT;
++ pageCount = end - start;
++
++ /* Overflow. */
++ if ((memory + Size) < memory)
++ {
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): memory: 0x%X, pageCount: %d, pageTable: 0x%X.",
++ __FUNCTION__, __LINE__,
++ memory, pageCount, info->pageTable
++ );
++
++ MEMORY_MAP_LOCK(Os);
++
++ gcmkASSERT(info->pageTable != gcvNULL);
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Free the pages from the MMU. */
++ gcmkERR_BREAK(gckVGMMU_FreePages(Os->device->kernels[Core]->vg->mmu,
++ info->pageTable,
++ pageCount * (PAGE_SIZE/4096)
++ ));
++ }
++ else
++#endif
++ {
++ /* Free the pages from the MMU. */
++ gcmkERR_BREAK(gckMMU_FreePages(Os->device->kernels[Core]->mmu,
++ info->pageTable,
++ pageCount * (PAGE_SIZE/4096)
++ ));
++ }
++
++ /* Release the page cache. */
++ if (pages)
++ {
++ for (i = 0; i < pageCount; i++)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pages[%d]: 0x%X.",
++ __FUNCTION__, __LINE__,
++ i, pages[i]
++ );
++
++ if (!PageReserved(pages[i]))
++ {
++ SetPageDirty(pages[i]);
++ }
++
++ page_cache_release(pages[i]);
++ }
++ }
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ if (info != gcvNULL)
++ {
++ /* Free the page array. */
++ if (info->pages != gcvNULL)
++ {
++ kfree(info->pages);
++ }
++
++ kfree(info);
++ }
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++}
++
++/*******************************************************************************
++**
++** gckOS_GetBaseAddress
++**
++** Get the base address for the physical memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR BaseAddress
++** Pointer to a variable that will receive the base address.
++*/
++gceSTATUS
++gckOS_GetBaseAddress(
++ IN gckOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL);
++
++ /* Return base address. */
++ *BaseAddress = Os->device->baseAddress;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_SuspendInterrupt(
++ IN gckOS Os
++ )
++{
++ return gckOS_SuspendInterruptEx(Os, gcvCORE_MAJOR);
++}
++
++gceSTATUS
++gckOS_SuspendInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ disable_irq(Os->device->irqLines[Core]);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_ResumeInterrupt(
++ IN gckOS Os
++ )
++{
++ return gckOS_ResumeInterruptEx(Os, gcvCORE_MAJOR);
++}
++
++gceSTATUS
++gckOS_ResumeInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ enable_irq(Os->device->irqLines[Core]);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_MemCopy(
++ IN gctPOINTER Destination,
++ IN gctCONST_POINTER Source,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Destination=0x%X Source=0x%X Bytes=%lu",
++ Destination, Source, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Destination != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Source != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ memcpy(Destination, Source, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_ZeroMemory(
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Memory=0x%X Bytes=%lu", Memory, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ memset(Memory, 0, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************* Cache Control ********************************
++*******************************************************************************/
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED && defined(CONFIG_OUTER_CACHE)
++static inline gceSTATUS
++outer_func(
++ gceCACHEOPERATION Type,
++ unsigned long Start,
++ unsigned long End
++ )
++{
++ switch (Type)
++ {
++ case gcvCACHE_CLEAN:
++ outer_clean_range(Start, End);
++ break;
++ case gcvCACHE_INVALIDATE:
++ outer_inv_range(Start, End);
++ break;
++ case gcvCACHE_FLUSH:
++ outer_flush_range(Start, End);
++ break;
++ default:
++ return gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_OUTER_CACHE_PATCH
++/*******************************************************************************
++** _HandleOuterCache
++**
++** Handle the outer cache for the specified addresses.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Physical
++** Physical address to flush.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++**
++** gceOUTERCACHE_OPERATION Type
++** Operation need to be execute.
++*/
++static gceSTATUS
++_HandleOuterCache(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Type
++ )
++{
++ gceSTATUS status;
++ gctUINT32 i, pageNum;
++ unsigned long paddr;
++ gctPOINTER vaddr;
++
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ if (Physical != gcvNULL)
++ {
++ /* Non paged memory or gcvPOOL_USER surface */
++ paddr = (unsigned long) Physical;
++ gcmkONERROR(outer_func(Type, paddr, paddr + Bytes));
++ }
++ else if ((Handle == gcvNULL)
++ || (Handle != gcvNULL && ((PLINUX_MDL)Handle)->contiguous)
++ )
++ {
++ /* Video Memory or contiguous virtual memory */
++ gcmkONERROR(gckOS_GetPhysicalAddress(Os, Logical, (gctUINT32*)&paddr));
++ gcmkONERROR(outer_func(Type, paddr, paddr + Bytes));
++ }
++ else
++ {
++ /* Non contiguous virtual memory */
++ vaddr = (gctPOINTER)gcmALIGN_BASE((gctUINTPTR_T)Logical, PAGE_SIZE);
++ pageNum = GetPageCount(Bytes, 0);
++
++ for (i = 0; i < pageNum; i += 1)
++ {
++ gcmkONERROR(_ConvertLogical2Physical(
++ Os,
++ vaddr + PAGE_SIZE * i,
++ ProcessID,
++ (PLINUX_MDL)Handle,
++ (gctUINT32*)&paddr
++ ));
++
++ gcmkONERROR(outer_func(Type, paddr, paddr + PAGE_SIZE));
++ }
++ }
++
++ mb();
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++#endif
++
++/*******************************************************************************
++** gckOS_CacheClean
++**
++** Clean the cache for the specified addresses. The GPU is going to need the
++** data. If the system is allocating memory as non-cachable, this function can
++** be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Physical
++** Physical address to flush.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheClean(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++
++ /* Inner cache. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
++ dmac_map_area(Logical, Bytes, DMA_TO_DEVICE);
++# else
++ dmac_clean_range(Logical, Logical + Bytes);
++# endif
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, ProcessID, Handle, Physical, Logical, Bytes, gcvCACHE_CLEAN);
++#else
++ outer_clean_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++
++ dma_cache_wback((unsigned long) Logical, Bytes);
++
++#elif defined(CONFIG_PPC)
++
++ /* TODO */
++
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_TO_DEVICE);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** gckOS_CacheInvalidate
++**
++** Invalidate the cache for the specified addresses. The GPU is going to need
++** data. If the system is allocating memory as non-cachable, this function can
++** be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheInvalidate(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++
++ /* Inner cache. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
++ dmac_map_area(Logical, Bytes, DMA_FROM_DEVICE);
++# else
++ dmac_inv_range(Logical, Logical + Bytes);
++# endif
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, ProcessID, Handle, Physical, Logical, Bytes, gcvCACHE_INVALIDATE);
++#else
++ outer_inv_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++ dma_cache_inv((unsigned long) Logical, Bytes);
++#elif defined(CONFIG_PPC)
++ /* TODO */
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_FROM_DEVICE);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** gckOS_CacheFlush
++**
++** Clean the cache for the specified addresses and invalidate the lines as
++** well. The GPU is going to need and modify the data. If the system is
++** allocating memory as non-cachable, this function can be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheFlush(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++ /* Inner cache. */
++ dmac_flush_range(Logical, Logical + Bytes);
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, ProcessID, Handle, Physical, Logical, Bytes, gcvCACHE_FLUSH);
++#else
++ outer_flush_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++ dma_cache_wback_inv((unsigned long) Logical, Bytes);
++#elif defined(CONFIG_PPC)
++ /* TODO */
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_BIDIRECTIONAL);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************* Broadcasting *********************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_Broadcast
++**
++** System hook for broadcast events from the kernel driver.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gceBROADCAST Reason
++** Reason for the broadcast. Can be one of the following values:
++**
++** gcvBROADCAST_GPU_IDLE
++** Broadcasted when the kernel driver thinks the GPU might be
++** idle. This can be used to handle power management.
++**
++** gcvBROADCAST_GPU_COMMIT
++** Broadcasted when any client process commits a command
++** buffer. This can be used to handle power management.
++**
++** gcvBROADCAST_GPU_STUCK
++** Broadcasted when the kernel driver hits the timeout waiting
++** for the GPU.
++**
++** gcvBROADCAST_FIRST_PROCESS
++** First process is trying to connect to the kernel.
++**
++** gcvBROADCAST_LAST_PROCESS
++** Last process has detached from the kernel.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Broadcast(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gceBROADCAST Reason
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Hardware=0x%X Reason=%d", Os, Hardware, Reason);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ switch (Reason)
++ {
++ case gcvBROADCAST_FIRST_PROCESS:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "First process has attached");
++ break;
++
++ case gcvBROADCAST_LAST_PROCESS:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "Last process has detached");
++
++ /* Put GPU OFF. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware,
++ gcvPOWER_OFF_BROADCAST));
++ break;
++
++ case gcvBROADCAST_GPU_IDLE:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "GPU idle.");
++
++ /* Put GPU IDLE. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware,
++#if gcdPOWER_SUSNPEND_WHEN_IDLE
++ gcvPOWER_SUSPEND_BROADCAST));
++#else
++ gcvPOWER_IDLE_BROADCAST));
++#endif
++
++ /* Add idle process DB. */
++ gcmkONERROR(gckKERNEL_AddProcessDB(Hardware->kernel,
++ 1,
++ gcvDB_IDLE,
++ gcvNULL, gcvNULL, 0));
++ break;
++
++ case gcvBROADCAST_GPU_COMMIT:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "COMMIT has arrived.");
++
++ /* Add busy process DB. */
++ gcmkONERROR(gckKERNEL_AddProcessDB(Hardware->kernel,
++ 0,
++ gcvDB_IDLE,
++ gcvNULL, gcvNULL, 0));
++
++ /* Put GPU ON. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware, gcvPOWER_ON_AUTO));
++ break;
++
++ case gcvBROADCAST_GPU_STUCK:
++ gcmkTRACE_N(gcvLEVEL_ERROR, 0, "gcvBROADCAST_GPU_STUCK\n");
++#if !gcdENABLE_RECOVERY
++ gcmkONERROR(gckHARDWARE_DumpGPUState(Hardware));
++#endif
++ gcmkONERROR(gckKERNEL_Recovery(Hardware->kernel));
++ break;
++
++ case gcvBROADCAST_AXI_BUS_ERROR:
++ gcmkTRACE_N(gcvLEVEL_ERROR, 0, "gcvBROADCAST_AXI_BUS_ERROR\n");
++ gcmkONERROR(gckHARDWARE_DumpGPUState(Hardware));
++ gcmkONERROR(gckKERNEL_Recovery(Hardware->kernel));
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_BroadcastHurry
++**
++** The GPU is running too slow.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT Urgency
++** The higher the number, the higher the urgency to speed up the GPU.
++** The maximum value is defined by the gcdDYNAMIC_EVENT_THRESHOLD.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_BroadcastHurry(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Urgency
++ )
++{
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x Urgency=%u", Os, Hardware, Urgency);
++
++ /* Do whatever you need to do to speed up the GPU now. */
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_BroadcastCalibrateSpeed
++**
++** Calibrate the speed of the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT Idle, Time
++** Idle/Time will give the percentage the GPU is idle, so you can use
++** this to calibrate the working point of the GPU.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_BroadcastCalibrateSpeed(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Idle,
++ IN gctUINT Time
++ )
++{
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x Idle=%u Time=%u",
++ Os, Hardware, Idle, Time);
++
++ /* Do whatever you need to do to callibrate the GPU speed. */
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************** Semaphores **********************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_CreateSemaphore
++**
++** Create a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Semaphore
++** Pointer to the variable that will receive the created semaphore.
++*/
++gceSTATUS
++gckOS_CreateSemaphore(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ )
++{
++ gceSTATUS status;
++ struct semaphore *sem = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Allocate the semaphore structure. */
++ sem = (struct semaphore *)kmalloc(gcmSIZEOF(struct semaphore), GFP_KERNEL | gcdNOWARN);
++ if (sem == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the semaphore. */
++ sema_init(sem, 1);
++
++ /* Return to caller. */
++ *Semaphore = (gctPOINTER) sem;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AcquireSemaphore
++**
++** Acquire a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%08X Semaphore=0x%08X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Acquire the semaphore. */
++ if (down_interruptible((struct semaphore *) Semaphore))
++ {
++ gcmkONERROR(gcvSTATUS_INTERRUPTED);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_TryAcquireSemaphore
++**
++** Try to acquire a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_TryAcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Acquire the semaphore. */
++ if (down_trylock((struct semaphore *) Semaphore))
++ {
++ /* Timeout. */
++ status = gcvSTATUS_TIMEOUT;
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReleaseSemaphore
++**
++** Release a previously acquired semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be released.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ReleaseSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Release the semaphore. */
++ up((struct semaphore *) Semaphore);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroySemaphore
++**
++** Destroy a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroySemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Free the sempahore structure. */
++ kfree(Semaphore);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetProcessID
++**
++** Get current process ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ProcessID
++** Pointer to the variable that receives the process ID.
++*/
++gceSTATUS
++gckOS_GetProcessID(
++ OUT gctUINT32_PTR ProcessID
++ )
++{
++ /* Get process ID. */
++ if (ProcessID != gcvNULL)
++ {
++ *ProcessID = _GetProcessID();
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetThreadID
++**
++** Get current thread ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ThreadID
++** Pointer to the variable that receives the thread ID.
++*/
++gceSTATUS
++gckOS_GetThreadID(
++ OUT gctUINT32_PTR ThreadID
++ )
++{
++ /* Get thread ID. */
++ if (ThreadID != gcvNULL)
++ {
++ *ThreadID = _GetThreadID();
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetGPUPower
++**
++** Set the power of the GPU on or off.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctBOOL Clock
++** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock.
++**
++** gctBOOL Power
++** gcvTRUE to turn on the power, or gcvFALSE to turn off the power.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUPower(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctBOOL Clock,
++ IN gctBOOL Power
++ )
++{
++ struct clk *clk_3dcore = Os->device->clk_3d_core;
++ struct clk *clk_3dshader = Os->device->clk_3d_shader;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct clk *clk_3d_axi = Os->device->clk_3d_axi;
++#endif
++ struct clk *clk_2dcore = Os->device->clk_2d_core;
++ struct clk *clk_2d_axi = Os->device->clk_2d_axi;
++ struct clk *clk_vg_axi = Os->device->clk_vg_axi;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ int ret;
++#endif
++
++ gctBOOL oldClockState = gcvFALSE;
++ gctBOOL oldPowerState = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Clock=%d Power=%d", Os, Core, Clock, Power);
++
++ if (Os->device->kernels[Core] != NULL)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ oldClockState = Os->device->kernels[Core]->vg->hardware->clockState;
++ oldPowerState = Os->device->kernels[Core]->vg->hardware->powerState;
++ }
++ else
++ {
++#endif
++ oldClockState = Os->device->kernels[Core]->hardware->clockState;
++ oldPowerState = Os->device->kernels[Core]->hardware->powerState;
++#if gcdENABLE_VG
++ }
++#endif
++ }
++ if((Power == gcvTRUE) && (oldPowerState == gcvFALSE))
++ {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if(!IS_ERR(Os->device->gpu_regulator)) {
++ ret = regulator_enable(Os->device->gpu_regulator);
++ if (ret != 0)
++ gckOS_Print("%s(%d): fail to enable pu regulator %d!\n",
++ __FUNCTION__, __LINE__, ret);
++ }
++#else
++ imx_gpc_power_up_pu(true);
++#endif
++
++#ifdef CONFIG_PM
++ pm_runtime_get_sync(Os->device->pmdev);
++#endif
++ }
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (Clock == gcvTRUE) {
++ if (oldClockState == gcvFALSE) {
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ clk_enable(clk_3dcore);
++ if (cpu_is_mx6q())
++ clk_enable(clk_3dshader);
++ break;
++ case gcvCORE_2D:
++ clk_enable(clk_2dcore);
++ clk_enable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_enable(clk_2dcore);
++ clk_enable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ }
++ } else {
++ if (oldClockState == gcvTRUE) {
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ if (cpu_is_mx6q())
++ clk_disable(clk_3dshader);
++ clk_disable(clk_3dcore);
++ break;
++ case gcvCORE_2D:
++ clk_disable(clk_2dcore);
++ clk_disable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_disable(clk_2dcore);
++ clk_disable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ }
++ }
++#else
++ if (Clock == gcvTRUE) {
++ if (oldClockState == gcvFALSE) {
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ clk_prepare_enable(clk_3dcore);
++ clk_prepare_enable(clk_3dshader);
++ clk_prepare_enable(clk_3d_axi);
++ break;
++ case gcvCORE_2D:
++ clk_prepare_enable(clk_2dcore);
++ clk_prepare_enable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_prepare_enable(clk_2dcore);
++ clk_prepare_enable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ }
++ } else {
++ if (oldClockState == gcvTRUE) {
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ clk_disable_unprepare(clk_3d_axi);
++ clk_disable_unprepare(clk_3dshader);
++ clk_disable_unprepare(clk_3dcore);
++ break;
++ case gcvCORE_2D:
++ clk_disable_unprepare(clk_2d_axi);
++ clk_disable_unprepare(clk_2dcore);
++ break;
++ case gcvCORE_VG:
++ clk_disable_unprepare(clk_vg_axi);
++ clk_disable_unprepare(clk_2dcore);
++ break;
++ default:
++ break;
++ }
++ }
++ }
++#endif
++ if((Power == gcvFALSE) && (oldPowerState == gcvTRUE))
++ {
++#ifdef CONFIG_PM
++ pm_runtime_put_sync(Os->device->pmdev);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if(!IS_ERR(Os->device->gpu_regulator))
++ regulator_disable(Os->device->gpu_regulator);
++#else
++ imx_gpc_power_up_pu(false);
++#endif
++
++ }
++ /* TODO: Put your code here. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ResetGPU
++**
++** Reset the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ResetGPU(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++#define SRC_SCR_OFFSET 0
++#define BP_SRC_SCR_GPU3D_RST 1
++#define BP_SRC_SCR_GPU2D_RST 4
++ void __iomem *src_base = IO_ADDRESS(SRC_BASE_ADDR);
++ gctUINT32 bit_offset,val;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ if(Core == gcvCORE_MAJOR) {
++ bit_offset = BP_SRC_SCR_GPU3D_RST;
++ } else if((Core == gcvCORE_VG)
++ ||(Core == gcvCORE_2D)) {
++ bit_offset = BP_SRC_SCR_GPU2D_RST;
++ } else {
++ return gcvSTATUS_INVALID_CONFIG;
++ }
++ val = __raw_readl(src_base + SRC_SCR_OFFSET);
++ val &= ~(1 << (bit_offset));
++ val |= (1 << (bit_offset));
++ __raw_writel(val, src_base + SRC_SCR_OFFSET);
++
++ while ((__raw_readl(src_base + SRC_SCR_OFFSET) &
++ (1 << (bit_offset))) != 0) {
++ }
++
++ gcmkFOOTER_NO();
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct reset_control *rstc = Os->device->rstc[Core];
++ if (rstc)
++ reset_control_reset(rstc);
++#else
++ imx_src_reset_gpu((int)Core);
++#endif
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_PrepareGPUFrequency
++**
++** Prepare to set GPU frequency and voltage.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose frequency and voltage will be set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_PrepareGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_FinishGPUFrequency
++**
++** Finish GPU frequency setting.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose frequency and voltage is set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FinishGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_QueryGPUFrequency
++**
++** Query the current frequency of the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctUINT32 * Frequency
++** Pointer to a gctUINT32 to obtain current frequency, in MHz.
++**
++** gctUINT8 * Scale
++** Pointer to a gctUINT8 to obtain current scale(1 - 64).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_QueryGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gctUINT32 * Frequency,
++ OUT gctUINT8 * Scale
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetGPUFrequency
++**
++** Set frequency and voltage of the GPU.
++**
++** 1. DVFS manager gives the target scale of full frequency, BSP must find
++** a real frequency according to this scale and board's configure.
++**
++** 2. BSP should find a suitable voltage for this frequency.
++**
++** 3. BSP must make sure setting take effect before this function returns.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctUINT8 Scale
++** Target scale of full frequency, range is [1, 64]. 1 means 1/64 of
++** full frequency and 64 means 64/64 of full frequency.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT8 Scale
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*----------------------------------------------------------------------------*/
++/*----- Profile --------------------------------------------------------------*/
++
++gceSTATUS
++gckOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ )
++{
++ struct timespec time;
++
++ ktime_get_ts(&time);
++
++ *Tick = time.tv_nsec + time.tv_sec * 1000000000ULL;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ )
++{
++ struct timespec res;
++
++ hrtimer_get_res(CLOCK_MONOTONIC, &res);
++
++ *TickRate = res.tv_nsec + res.tv_sec * 1000000000ULL;
++
++ return gcvSTATUS_OK;
++}
++
++gctUINT32
++gckOS_ProfileToMS(
++ IN gctUINT64 Ticks
++ )
++{
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
++ return div_u64(Ticks, 1000000);
++#else
++ gctUINT64 rem = Ticks;
++ gctUINT64 b = 1000000;
++ gctUINT64 res, d = 1;
++ gctUINT32 high = rem >> 32;
++
++ /* Reduce the thing a bit first */
++ res = 0;
++ if (high >= 1000000)
++ {
++ high /= 1000000;
++ res = (gctUINT64) high << 32;
++ rem -= (gctUINT64) (high * 1000000) << 32;
++ }
++
++ while (((gctINT64) b > 0) && (b < rem))
++ {
++ b <<= 1;
++ d <<= 1;
++ }
++
++ do
++ {
++ if (rem >= b)
++ {
++ rem -= b;
++ res += d;
++ }
++
++ b >>= 1;
++ d >>= 1;
++ }
++ while (d);
++
++ return (gctUINT32) res;
++#endif
++}
++
++/******************************************************************************\
++******************************* Signal Management ******************************
++\******************************************************************************/
++
++#undef _GC_OBJ_ZONE
++#define _GC_OBJ_ZONE gcvZONE_SIGNAL
++
++/*******************************************************************************
++**
++** gckOS_CreateSignal
++**
++** Create a new signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL ManualReset
++** If set to gcvTRUE, gckOS_Signal with gcvFALSE must be called in
++** order to set the signal to nonsignaled state.
++** If set to gcvFALSE, the signal will automatically be set to
++** nonsignaled state by gckOS_WaitSignal function.
++**
++** OUTPUT:
++**
++** gctSIGNAL * Signal
++** Pointer to a variable receiving the created gctSIGNAL.
++*/
++gceSTATUS
++gckOS_CreateSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X ManualReset=%d", Os, ManualReset);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ /* Create an event structure. */
++ signal = (gcsSIGNAL_PTR) kmalloc(sizeof(gcsSIGNAL), GFP_KERNEL | gcdNOWARN);
++
++ if (signal == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Save the process ID. */
++ signal->process = (gctHANDLE)(gctUINTPTR_T) _GetProcessID();
++ signal->manualReset = ManualReset;
++ signal->hardware = gcvNULL;
++ init_completion(&signal->obj);
++ atomic_set(&signal->ref, 1);
++
++ gcmkONERROR(_AllocateIntegerId(&Os->signalDB, signal, &signal->id));
++
++ *Signal = (gctSIGNAL)(gctUINTPTR_T)signal->id;
++
++ gcmkFOOTER_ARG("*Signal=0x%X", *Signal);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (signal != gcvNULL)
++ {
++ kfree(signal);
++ }
++
++ gcmkFOOTER_NO();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalQueryHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ OUT gckHARDWARE * Hardware
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Hardware=0x%X", Os, Signal, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ *Hardware = signal->hardware;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalSetHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Hardware=0x%X", Os, Signal, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ signal->hardware = Hardware;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroySignal
++**
++** Destroy a signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroySignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X", Os, Signal);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->signalMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ if (atomic_dec_and_test(&signal->ref))
++ {
++ gcmkVERIFY_OK(_DestroyIntegerId(&Os->signalDB, signal->id));
++
++ /* Free the sgianl. */
++ kfree(signal);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Signal
++**
++** Set a state of the specified signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctBOOL State
++** If gcvTRUE, the signal will be set to signaled state.
++** If gcvFALSE, the signal will be set to nonsignaled state.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Signal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X State=%d", Os, Signal, State);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->signalMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ if (State)
++ {
++ /* unbind the signal from hardware. */
++ signal->hardware = gcvNULL;
++
++ /* Set the event to a signaled state. */
++ complete(&signal->obj);
++ }
++ else
++ {
++ /* Set the event to an unsignaled state. */
++ reinit_completion(&signal->obj);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_SetSignalVG(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++ struct task_struct * userTask;
++ struct siginfo info;
++
++ userTask = FIND_TASK_BY_PID((pid_t)(gctUINTPTR_T) Process);
++
++ if (userTask != gcvNULL)
++ {
++ info.si_signo = 48;
++ info.si_code = __SI_CODE(__SI_RT, SI_KERNEL);
++ info.si_pid = 0;
++ info.si_uid = 0;
++ info.si_ptr = (gctPOINTER) Signal;
++
++ /* Signals with numbers between 32 and 63 are real-time,
++ send a real-time signal to the user process. */
++ result = send_sig_info(48, &info, userTask);
++
++ printk("gckOS_SetSignalVG:0x%x\n", result);
++ /* Error? */
++ if (result < 0)
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ else
++ {
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Return status. */
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_UserSignal
++**
++** Set the specified signal which is owned by a process to signaled state.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process
++ )
++{
++ gceSTATUS status;
++ gctSIGNAL signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Process=%d",
++ Os, Signal, (gctINT32)(gctUINTPTR_T)Process);
++
++ /* Map the signal into kernel space. */
++ gcmkONERROR(gckOS_MapSignal(Os, Signal, Process, &signal));
++
++ /* Signal. */
++ status = gckOS_Signal(Os, signal, gcvTRUE);
++
++ /* Unmap the signal */
++ gcmkVERIFY_OK(gckOS_UnmapSignal(Os, Signal));
++
++ gcmkFOOTER();
++ return status;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_WaitSignal
++**
++** Wait for a signal to become signaled.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctUINT32 Wait
++** Number of milliseconds to wait.
++** Pass the value of gcvINFINITE for an infinite wait.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WaitSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Wait=0x%08X", Os, Signal, Wait);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ might_sleep();
++
++ spin_lock_irq(&signal->obj.wait.lock);
++
++ if (signal->obj.done)
++ {
++ if (!signal->manualReset)
++ {
++ signal->obj.done = 0;
++ }
++
++ status = gcvSTATUS_OK;
++ }
++ else if (Wait == 0)
++ {
++ status = gcvSTATUS_TIMEOUT;
++ }
++ else
++ {
++ /* Convert wait to milliseconds. */
++#if gcdDETECT_TIMEOUT
++ gctINT timeout = (Wait == gcvINFINITE)
++ ? gcdINFINITE_TIMEOUT * HZ / 1000
++ : Wait * HZ / 1000;
++
++ gctUINT complained = 0;
++#else
++ gctINT timeout = (Wait == gcvINFINITE)
++ ? MAX_SCHEDULE_TIMEOUT
++ : Wait * HZ / 1000;
++#endif
++
++ DECLARE_WAITQUEUE(wait, current);
++ wait.flags |= WQ_FLAG_EXCLUSIVE;
++ __add_wait_queue_tail(&signal->obj.wait, &wait);
++
++ while (gcvTRUE)
++ {
++ if (signal_pending(current))
++ {
++ /* Interrupt received. */
++ status = gcvSTATUS_INTERRUPTED;
++ break;
++ }
++
++ __set_current_state(TASK_INTERRUPTIBLE);
++ spin_unlock_irq(&signal->obj.wait.lock);
++ timeout = schedule_timeout(timeout);
++ spin_lock_irq(&signal->obj.wait.lock);
++
++ if (signal->obj.done)
++ {
++ if (!signal->manualReset)
++ {
++ signal->obj.done = 0;
++ }
++
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++#if gcdDETECT_TIMEOUT
++ if ((Wait == gcvINFINITE) && (timeout == 0))
++ {
++ gctUINT32 dmaAddress1, dmaAddress2;
++ gctUINT32 dmaState1, dmaState2;
++
++ dmaState1 = dmaState2 =
++ dmaAddress1 = dmaAddress2 = 0;
++
++ /* Verify whether DMA is running. */
++ gcmkVERIFY_OK(_VerifyDMA(
++ Os, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2
++ ));
++
++#if gcdDETECT_DMA_ADDRESS
++ /* Dump only if DMA appears stuck. */
++ if (
++ (dmaAddress1 == dmaAddress2)
++#if gcdDETECT_DMA_STATE
++ && (dmaState1 == dmaState2)
++#endif
++ )
++#endif
++ {
++ /* Increment complain count. */
++ complained += 1;
++
++ gcmkVERIFY_OK(_DumpGPUState(Os, gcvCORE_MAJOR));
++
++ gcmkPRINT(
++ "%s(%d): signal 0x%X; forced message flush (%d).",
++ __FUNCTION__, __LINE__, Signal, complained
++ );
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(dmaAddress2);
++ }
++
++ /* Reset timeout. */
++ timeout = gcdINFINITE_TIMEOUT * HZ / 1000;
++ }
++#endif
++
++ if (timeout == 0)
++ {
++
++ status = gcvSTATUS_TIMEOUT;
++ break;
++ }
++ }
++
++ __remove_wait_queue(&signal->obj.wait, &wait);
++
++#if gcdDETECT_TIMEOUT
++ if (complained)
++ {
++ gcmkPRINT(
++ "%s(%d): signal=0x%X; waiting done; status=%d",
++ __FUNCTION__, __LINE__, Signal, status
++ );
++ }
++#endif
++ }
++
++ spin_unlock_irq(&signal->obj.wait.lock);
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER_ARG("Signal=0x%X status=%d", Signal, status);
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapSignal
++**
++** Map a signal in to the current process space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to tha gctSIGNAL to map.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** OUTPUT:
++**
++** gctSIGNAL * MappedSignal
++** Pointer to a variable receiving the mapped gctSIGNAL.
++*/
++gceSTATUS
++gckOS_MapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process,
++ OUT gctSIGNAL * MappedSignal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Process=0x%X", Os, Signal, Process);
++
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++ gcmkVERIFY_ARGUMENT(MappedSignal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ if(atomic_inc_return(&signal->ref) <= 1)
++ {
++ /* The previous value is 0, it has been deleted. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ *MappedSignal = (gctSIGNAL) Signal;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*MappedSignal=0x%X", *MappedSignal);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapSignal
++**
++** Unmap a signal .
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to that gctSIGNAL mapped.
++*/
++gceSTATUS
++gckOS_UnmapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ )
++{
++ return gckOS_DestroySignal(Os, Signal);
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateUserSignal
++**
++** Create a new signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL ManualReset
++** If set to gcvTRUE, gckOS_Signal with gcvFALSE must be called in
++** order to set the signal to nonsignaled state.
++** If set to gcvFALSE, the signal will automatically be set to
++** nonsignaled state by gckOS_WaitSignal function.
++**
++** OUTPUT:
++**
++** gctINT * SignalID
++** Pointer to a variable receiving the created signal's ID.
++*/
++gceSTATUS
++gckOS_CreateUserSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctINT * SignalID
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T signal;
++
++ /* Create a new signal. */
++ status = gckOS_CreateSignal(Os, ManualReset, (gctSIGNAL *) &signal);
++ *SignalID = (gctINT) signal;
++
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroyUserSignal
++**
++** Destroy a signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** The signal's ID.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroyUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID
++ )
++{
++ return gckOS_DestroySignal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID);
++}
++
++/*******************************************************************************
++**
++** gckOS_WaitUserSignal
++**
++** Wait for a signal used in the user mode to become signaled.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** Signal ID.
++**
++** gctUINT32 Wait
++** Number of milliseconds to wait.
++** Pass the value of gcvINFINITE for an infinite wait.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WaitUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctUINT32 Wait
++ )
++{
++ return gckOS_WaitSignal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID, Wait);
++}
++
++/*******************************************************************************
++**
++** gckOS_SignalUserSignal
++**
++** Set a state of the specified signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** SignalID.
++**
++** gctBOOL State
++** If gcvTRUE, the signal will be set to signaled state.
++** If gcvFALSE, the signal will be set to nonsignaled state.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SignalUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctBOOL State
++ )
++{
++ return gckOS_Signal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID, State);
++}
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_CreateSemaphoreVG(
++ IN gckOS Os,
++ OUT gctSEMAPHORE * Semaphore
++ )
++{
++ gceSTATUS status;
++ struct semaphore * newSemaphore;
++
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ do
++ {
++ /* Allocate the semaphore structure. */
++ newSemaphore = (struct semaphore *)kmalloc(gcmSIZEOF(struct semaphore), GFP_KERNEL | gcdNOWARN);
++ if (newSemaphore == gcvNULL)
++ {
++ gcmkERR_BREAK(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the semaphore. */
++ sema_init(newSemaphore, 0);
++
++ /* Set the handle. */
++ * Semaphore = (gctSEMAPHORE) newSemaphore;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++
++gceSTATUS
++gckOS_IncrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Increment the semaphore's count. */
++ up((struct semaphore *) Semaphore);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DecrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ do
++ {
++ /* Decrement the semaphore's count. If the count is zero, wait
++ until it gets incremented. */
++ result = down_interruptible((struct semaphore *) Semaphore);
++
++ /* Signal received? */
++ if (result != 0)
++ {
++ status = gcvSTATUS_TERMINATE;
++ break;
++ }
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetSignal
++**
++** Set the specified signal to signaled state.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetSignal(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++ struct task_struct * userTask;
++ struct siginfo info;
++
++ userTask = FIND_TASK_BY_PID((pid_t)(gctUINTPTR_T) Process);
++
++ if (userTask != gcvNULL)
++ {
++ info.si_signo = 48;
++ info.si_code = __SI_CODE(__SI_RT, SI_KERNEL);
++ info.si_pid = 0;
++ info.si_uid = 0;
++ info.si_ptr = (gctPOINTER) Signal;
++
++ /* Signals with numbers between 32 and 63 are real-time,
++ send a real-time signal to the user process. */
++ result = send_sig_info(48, &info, userTask);
++
++ /* Error? */
++ if (result < 0)
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ else
++ {
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Return status. */
++ return status;
++}
++
++/******************************************************************************\
++******************************** Thread Object *********************************
++\******************************************************************************/
++
++gceSTATUS
++gckOS_StartThread(
++ IN gckOS Os,
++ IN gctTHREADFUNC ThreadFunction,
++ IN gctPOINTER ThreadParameter,
++ OUT gctTHREAD * Thread
++ )
++{
++ gceSTATUS status;
++ struct task_struct * thread;
++
++ gcmkHEADER_ARG("Os=0x%X ", Os);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(ThreadFunction != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ do
++ {
++ /* Create the thread. */
++ thread = kthread_create(
++ ThreadFunction,
++ ThreadParameter,
++ "Vivante Kernel Thread"
++ );
++
++ /* Failed? */
++ if (IS_ERR(thread))
++ {
++ status = gcvSTATUS_GENERIC_IO;
++ break;
++ }
++
++ /* Start the thread. */
++ wake_up_process(thread);
++
++ /* Set the thread handle. */
++ * Thread = (gctTHREAD) thread;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++gceSTATUS
++gckOS_StopThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Thread=0x%x", Os, Thread);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ /* Thread should have already been enabled to terminate. */
++ kthread_stop((struct task_struct *) Thread);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_VerifyThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Thread=0x%x", Os, Thread);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++/******************************************************************************\
++******************************** Software Timer ********************************
++\******************************************************************************/
++
++void
++_TimerFunction(
++ struct work_struct * work
++ )
++{
++ gcsOSTIMER_PTR timer = (gcsOSTIMER_PTR)work;
++
++ gctTIMERFUNCTION function = timer->function;
++
++ function(timer->data);
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateTimer
++**
++** Create a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctTIMERFUNCTION Function.
++** Pointer to a call back function which will be called when timer is
++** expired.
++**
++** gctPOINTER Data.
++** Private data which will be passed to call back function.
++**
++** OUTPUT:
++**
++** gctPOINTER * Timer
++** Pointer to a variable receiving the created timer.
++*/
++gceSTATUS
++gckOS_CreateTimer(
++ IN gckOS Os,
++ IN gctTIMERFUNCTION Function,
++ IN gctPOINTER Data,
++ OUT gctPOINTER * Timer
++ )
++{
++ gceSTATUS status;
++ gcsOSTIMER_PTR pointer;
++ gcmkHEADER_ARG("Os=0x%X Function=0x%X Data=0x%X", Os, Function, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ gcmkONERROR(gckOS_Allocate(Os, sizeof(gcsOSTIMER), (gctPOINTER)&pointer));
++
++ pointer->function = Function;
++ pointer->data = Data;
++
++ INIT_DELAYED_WORK(&pointer->work, _TimerFunction);
++
++ *Timer = pointer;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroyTimer
++**
++** Destory a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be destoryed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroyTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ )
++{
++ gcsOSTIMER_PTR timer;
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X", Os, Timer);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cancel_delayed_work_sync(&timer->work);
++#else
++ cancel_delayed_work(&timer->work);
++ flush_workqueue(Os->workqueue);
++#endif
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, Timer));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_StartTimer
++**
++** Schedule a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be scheduled.
++**
++** gctUINT32 Delay
++** Delay in milliseconds.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_StartTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer,
++ IN gctUINT32 Delay
++ )
++{
++ gcsOSTIMER_PTR timer;
++
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X Delay=%u", Os, Timer, Delay);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Delay != 0);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++ if (unlikely(delayed_work_pending(&timer->work)))
++ {
++ if (unlikely(!cancel_delayed_work(&timer->work)))
++ {
++ cancel_work_sync(&timer->work.work);
++
++ if (unlikely(delayed_work_pending(&timer->work)))
++ {
++ gckOS_Print("gckOS_StartTimer error, the pending worker cannot complete!!!! \n");
++
++ return gcvSTATUS_INVALID_REQUEST;
++ }
++ }
++ }
++
++ queue_delayed_work(Os->workqueue, &timer->work, msecs_to_jiffies(Delay));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_StopTimer
++**
++** Cancel a unscheduled timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be cancel.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_StopTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ )
++{
++ gcsOSTIMER_PTR timer;
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X", Os, Timer);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++ cancel_delayed_work(&timer->work);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckOS_DumpCallStack(
++ IN gckOS Os
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ dump_stack();
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckOS_GetProcessNameByPid(
++ IN gctINT Pid,
++ IN gctSIZE_T Length,
++ OUT gctUINT8_PTR String
++ )
++{
++ struct task_struct *task;
++
++ /* Get the task_struct of the task with pid. */
++ rcu_read_lock();
++
++ task = FIND_TASK_BY_PID(Pid);
++
++ if (task == gcvNULL)
++ {
++ rcu_read_unlock();
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ /* Get name of process. */
++ strncpy(String, task->comm, Length);
++
++ rcu_read_unlock();
++
++ return gcvSTATUS_OK;
++}
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++
++gceSTATUS
++gckOS_CreateSyncPoint(
++ IN gckOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ /* Create an sync point structure. */
++ syncPoint = (gcsSYNC_POINT_PTR) kmalloc(
++ sizeof(gcsSYNC_POINT), GFP_KERNEL | gcdNOWARN);
++
++ if (syncPoint == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the sync point. */
++ atomic_set(&syncPoint->ref, 1);
++ atomic_set(&syncPoint->state, 0);
++
++ gcmkONERROR(_AllocateIntegerId(&Os->syncPointDB, syncPoint, &syncPoint->id));
++
++ *SyncPoint = (gctSYNC_POINT)(gctUINTPTR_T)syncPoint->id;
++
++ gcmkFOOTER_ARG("*SyncPonint=%d", syncPoint->id);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (syncPoint != gcvNULL)
++ {
++ kfree(syncPoint);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_ReferenceSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ /* Initialize the sync point. */
++ atomic_inc(&syncPoint->ref);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_DestroySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->syncPointMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ if (atomic_dec_and_test(&syncPoint->ref))
++ {
++ gcmkVERIFY_OK(_DestroyIntegerId(&Os->syncPointDB, syncPoint->id));
++
++ /* Free the sgianl. */
++ syncPoint->timeline = gcvNULL;
++ kfree(syncPoint);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->syncPointMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Get state. */
++ atomic_set(&syncPoint->state, gcvTRUE);
++
++ /* Signal timeline. */
++ if (syncPoint->timeline)
++ {
++ sync_timeline_signal(syncPoint->timeline);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_QuerySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctBOOL_PTR State
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Get state. */
++ *State = atomic_read(&syncPoint->state);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_CreateSyncTimeline(
++ IN gckOS Os,
++ OUT gctHANDLE * Timeline
++ )
++{
++ struct viv_sync_timeline * timeline;
++
++ /* Create viv sync timeline. */
++ timeline = viv_sync_timeline_create("viv timeline", Os);
++
++ if (timeline == gcvNULL)
++ {
++ /* Out of memory. */
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ *Timeline = (gctHANDLE) timeline;
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DestroySyncTimeline(
++ IN gckOS Os,
++ IN gctHANDLE Timeline
++ )
++{
++ struct viv_sync_timeline * timeline;
++ gcmkASSERT(Timeline != gcvNULL);
++
++ /* Destroy timeline. */
++ timeline = (struct viv_sync_timeline *) Timeline;
++ sync_timeline_destroy(&timeline->obj);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_CreateNativeFence(
++ IN gckOS Os,
++ IN gctHANDLE Timeline,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ )
++{
++ int fd = -1;
++ struct viv_sync_timeline *timeline;
++ struct sync_pt * pt = gcvNULL;
++ struct sync_fence * fence;
++ char name[32];
++ gcsSYNC_POINT_PTR syncPoint;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Timeline=0x%X SyncPoint=%d",
++ Os, Timeline, (gctUINT)(gctUINTPTR_T)SyncPoint);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ /* Cast timeline. */
++ timeline = (struct viv_sync_timeline *) Timeline;
++
++ fd = get_unused_fd();
++
++ if (fd < 0)
++ {
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Create viv_sync_pt. */
++ pt = viv_sync_pt_create(timeline, SyncPoint);
++
++ if (pt == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Reference sync_timeline. */
++ syncPoint->timeline = &timeline->obj;
++
++ /* Build fence name. */
++ snprintf(name, 32, "viv sync_fence-%u", (gctUINT)(gctUINTPTR_T)SyncPoint);
++
++ /* Create sync_fence. */
++ fence = sync_fence_create(name, pt);
++
++ if (fence == NULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Install fence to fd. */
++ sync_fence_install(fence, fd);
++
++ *FenceFD = fd;
++ gcmkFOOTER_ARG("*FenceFD=%d", fd);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Error roll back. */
++ if (pt)
++ {
++ sync_pt_free(pt);
++ }
++
++ if (fd > 0)
++ {
++ put_unused_fd(fd);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_os.h 2015-05-01 14:57:59.551427001 -0500
+@@ -0,0 +1,83 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_os_h_
++#define __gc_hal_kernel_os_h_
++
++typedef struct _LINUX_MDL_MAP
++{
++ gctINT pid;
++ gctPOINTER vmaAddr;
++ gctUINT32 count;
++ struct vm_area_struct * vma;
++ struct _LINUX_MDL_MAP * next;
++}
++LINUX_MDL_MAP;
++
++typedef struct _LINUX_MDL_MAP * PLINUX_MDL_MAP;
++
++typedef struct _LINUX_MDL
++{
++ gctINT pid;
++ char * addr;
++
++ union _pages
++ {
++ /* Pointer to a array of pages. */
++ struct page * contiguousPages;
++ /* Pointer to a array of pointers to page. */
++ struct page ** nonContiguousPages;
++ }
++ u;
++
++#ifdef NO_DMA_COHERENT
++ gctPOINTER kaddr;
++#endif /* NO_DMA_COHERENT */
++
++ gctINT numPages;
++ gctINT pagedMem;
++ gctBOOL contiguous;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ gctBOOL exact;
++#endif
++ dma_addr_t dmaHandle;
++ PLINUX_MDL_MAP maps;
++ struct _LINUX_MDL * prev;
++ struct _LINUX_MDL * next;
++}
++LINUX_MDL, *PLINUX_MDL;
++
++extern PLINUX_MDL_MAP
++FindMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT PID
++ );
++
++typedef struct _DRIVER_ARGS
++{
++ gctUINT64 InputBuffer;
++ gctUINT64 InputBufferSize;
++ gctUINT64 OutputBuffer;
++ gctUINT64 OutputBufferSize;
++}
++DRIVER_ARGS;
++
++#endif /* __gc_hal_kernel_os_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.c linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.c 2015-05-01 14:57:59.551427001 -0500
+@@ -0,0 +1,174 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <linux/kernel.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/uaccess.h>
++
++#include "gc_hal_kernel_sync.h"
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++
++static struct sync_pt *
++viv_sync_pt_dup(
++ struct sync_pt * sync_pt
++ )
++{
++ gceSTATUS status;
++ struct viv_sync_pt *pt;
++ struct viv_sync_pt *src;
++ struct viv_sync_timeline *obj;
++
++ src = (struct viv_sync_pt *) sync_pt;
++ obj = (struct viv_sync_timeline *) sync_pt->parent;
++
++ /* Create the new sync_pt. */
++ pt = (struct viv_sync_pt *)
++ sync_pt_create(&obj->obj, sizeof(struct viv_sync_pt));
++
++ pt->stamp = src->stamp;
++ pt->sync = src->sync;
++
++ /* Reference sync point. */
++ status = gckOS_ReferenceSyncPoint(obj->os, pt->sync);
++
++ if (gcmIS_ERROR(status))
++ {
++ sync_pt_free((struct sync_pt *)pt);
++ return NULL;
++ }
++
++ return (struct sync_pt *)pt;
++}
++
++static int
++viv_sync_pt_has_signaled(
++ struct sync_pt * sync_pt
++ )
++{
++ gceSTATUS status;
++ gctBOOL state;
++ struct viv_sync_pt * pt;
++ struct viv_sync_timeline * obj;
++
++ pt = (struct viv_sync_pt *)sync_pt;
++ obj = (struct viv_sync_timeline *)sync_pt->parent;
++
++ status = gckOS_QuerySyncPoint(obj->os, pt->sync, &state);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error. */
++ return -1;
++ }
++
++ return state;
++}
++
++static int
++viv_sync_pt_compare(
++ struct sync_pt * a,
++ struct sync_pt * b
++ )
++{
++ int ret;
++ struct viv_sync_pt * pt1 = (struct viv_sync_pt *) a;
++ struct viv_sync_pt * pt2 = (struct viv_sync_pt *) b;
++
++ ret = (pt1->stamp < pt2->stamp) ? -1
++ : (pt1->stamp == pt2->stamp) ? 0
++ : 1;
++
++ return ret;
++}
++
++static void
++viv_sync_pt_free(
++ struct sync_pt * sync_pt
++ )
++{
++ struct viv_sync_pt * pt;
++ struct viv_sync_timeline * obj;
++
++ pt = (struct viv_sync_pt *) sync_pt;
++ obj = (struct viv_sync_timeline *) sync_pt->parent;
++
++ gckOS_DestroySyncPoint(obj->os, pt->sync);
++}
++
++static struct sync_timeline_ops viv_timeline_ops =
++{
++ .driver_name = "viv_sync",
++ .dup = viv_sync_pt_dup,
++ .has_signaled = viv_sync_pt_has_signaled,
++ .compare = viv_sync_pt_compare,
++ .free_pt = viv_sync_pt_free,
++};
++
++struct viv_sync_timeline *
++viv_sync_timeline_create(
++ const char * name,
++ gckOS os
++ )
++{
++ struct viv_sync_timeline * obj;
++
++ obj = (struct viv_sync_timeline *)
++ sync_timeline_create(&viv_timeline_ops, sizeof(struct viv_sync_timeline), name);
++
++ obj->os = os;
++ obj->stamp = 0;
++
++ return obj;
++}
++
++struct sync_pt *
++viv_sync_pt_create(
++ struct viv_sync_timeline * obj,
++ gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ struct viv_sync_pt * pt;
++
++ pt = (struct viv_sync_pt *)
++ sync_pt_create(&obj->obj, sizeof(struct viv_sync_pt));
++
++ pt->stamp = obj->stamp++;
++ pt->sync = SyncPoint;
++
++ /* Dup signal. */
++ status = gckOS_ReferenceSyncPoint(obj->os, SyncPoint);
++
++ if (gcmIS_ERROR(status))
++ {
++ sync_pt_free((struct sync_pt *)pt);
++ return NULL;
++ }
++
++ return (struct sync_pt *) pt;
++}
++
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.h linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/hal/os/linux/kernel/gc_hal_kernel_sync.h 2015-05-01 14:57:59.551427001 -0500
+@@ -0,0 +1,71 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_sync_h_
++#define __gc_hal_kernel_sync_h_
++
++#include <linux/types.h>
++
++#include <linux/sync.h>
++
++#include <gc_hal.h>
++#include <gc_hal_base.h>
++
++struct viv_sync_timeline
++{
++ /* Parent object. */
++ struct sync_timeline obj;
++
++ /* Timestamp when sync_pt is created. */
++ gctUINT stamp;
++
++ /* Pointer to os struct. */
++ gckOS os;
++};
++
++
++struct viv_sync_pt
++{
++ /* Parent object. */
++ struct sync_pt pt;
++
++ /* Reference sync point*/
++ gctSYNC_POINT sync;
++
++ /* Timestamp when sync_pt is created. */
++ gctUINT stamp;
++};
++
++/* Create viv_sync_timeline object. */
++struct viv_sync_timeline *
++viv_sync_timeline_create(
++ const char * Name,
++ gckOS Os
++ );
++
++/* Create viv_sync_pt object. */
++struct sync_pt *
++viv_sync_pt_create(
++ struct viv_sync_timeline * Obj,
++ gctSYNC_POINT SyncPoint
++ );
++
++#endif /* __gc_hal_kernel_sync_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/Kbuild linux-3.14.40/drivers/mxc/gpu-viv/v4/Kbuild
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v4/Kbuild 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v4/Kbuild 2015-05-01 14:57:59.555427001 -0500
+@@ -0,0 +1,236 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2013 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++#
++# Linux build file for kernel HAL driver.
++#
++
++AQROOT := $(srctree)/drivers/mxc/gpu-viv/v4
++AQARCH := $(AQROOT)/arch/XAQ2
++AQVGARCH := $(AQROOT)/arch/GC350
++
++include $(AQROOT)/config
++
++KERNEL_DIR ?= $(TOOL_DIR)/kernel
++
++OS_KERNEL_DIR := hal/os/linux/kernel
++ARCH_KERNEL_DIR := arch/$(notdir $(AQARCH))/hal/kernel
++ARCH_VG_KERNEL_DIR := arch/$(notdir $(AQVGARCH))/hal/kernel
++HAL_KERNEL_DIR := hal/kernel
++
++# EXTRA_CFLAGS += -Werror
++
++OBJS := $(OS_KERNEL_DIR)/gc_hal_kernel_device.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_driver.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_linux.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_math.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_os.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_debugfs.o
++
++OBJS += $(HAL_KERNEL_DIR)/gc_hal_kernel.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_command.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_db.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_debug.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_event.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_heap.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_mmu.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_video_memory.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_power.o
++
++OBJS += $(ARCH_KERNEL_DIR)/gc_hal_kernel_context.o \
++ $(ARCH_KERNEL_DIR)/gc_hal_kernel_hardware.o
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++OBJS +=\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_command_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_interrupt_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_mmu_vg.o\
++ $(ARCH_VG_KERNEL_DIR)/gc_hal_kernel_hardware_command_vg.o\
++ $(ARCH_VG_KERNEL_DIR)/gc_hal_kernel_hardware_vg.o
++endif
++
++ifneq ($(CONFIG_SYNC),)
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_sync.o
++endif
++
++ifeq ($(KERNELRELEASE), )
++
++.PHONY: all clean install
++
++# Define targets.
++all:
++ @make V=$(V) ARCH=$(ARCH_TYPE) -C $(KERNEL_DIR) SUBDIRS=`pwd` modules
++
++clean:
++ @rm -rf $(OBJS)
++ @rm -rf modules.order Module.symvers
++ @find $(AQROOT) -name ".gc_*.cmd" | xargs rm -f
++
++install: all
++ @mkdir -p $(SDK_DIR)/drivers
++
++else
++
++
++EXTRA_CFLAGS += -DLINUX -DDRIVER
++
++ifeq ($(ENUM_WORKAROUND), 1)
++EXTRA_CFLAGS += -DENUM_WORKAROUND=1
++else
++EXTRA_CFLAGS += -DENUM_WORKAROUND=0
++endif
++
++ifeq ($(FLAREON),1)
++EXTRA_CFLAGS += -DFLAREON
++endif
++
++ifeq ($(DEBUG), 1)
++EXTRA_CFLAGS += -DDBG=1 -DDEBUG -D_DEBUG
++else
++EXTRA_CFLAGS += -DDBG=0
++endif
++
++ifeq ($(NO_DMA_COHERENT), 1)
++EXTRA_CFLAGS += -DNO_DMA_COHERENT
++endif
++
++ifeq ($(CONFIG_DOVE_GPU), 1)
++EXTRA_CFLAGS += -DCONFIG_DOVE_GPU=1
++endif
++
++ifneq ($(USE_PLATFORM_DRIVER), 0)
++EXTRA_CFLAGS += -DUSE_PLATFORM_DRIVER=1
++else
++EXTRA_CFLAGS += -DUSE_PLATFORM_DRIVER=0
++endif
++
++
++EXTRA_CFLAGS += -DVIVANTE_PROFILER=1
++EXTRA_CFLAGS += -DVIVANTE_PROFILER_CONTEXT=1
++
++
++ifeq ($(ANDROID), 1)
++EXTRA_CFLAGS += -DANDROID=1
++endif
++
++ifeq ($(ENABLE_GPU_CLOCK_BY_DRIVER), 1)
++EXTRA_CFLAGS += -DENABLE_GPU_CLOCK_BY_DRIVER=1
++else
++EXTRA_CFLAGS += -DENABLE_GPU_CLOCK_BY_DRIVER=0
++endif
++
++ifeq ($(USE_NEW_LINUX_SIGNAL), 1)
++EXTRA_CFLAGS += -DUSE_NEW_LINUX_SIGNAL=1
++else
++EXTRA_CFLAGS += -DUSE_NEW_LINUX_SIGNAL=0
++endif
++
++ifeq ($(NO_USER_DIRECT_ACCESS_FROM_KERNEL), 1)
++EXTRA_CFLAGS += -DNO_USER_DIRECT_ACCESS_FROM_KERNEL=1
++else
++EXTRA_CFLAGS += -DNO_USER_DIRECT_ACCESS_FROM_KERNEL=0
++endif
++
++ifeq ($(FORCE_ALL_VIDEO_MEMORY_CACHED), 1)
++EXTRA_CFLAGS += -DgcdPAGED_MEMORY_CACHEABLE=1
++else
++EXTRA_CFLAGS += -DgcdPAGED_MEMORY_CACHEABLE=0
++endif
++
++ifeq ($(NONPAGED_MEMORY_CACHEABLE), 1)
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_CACHEABLE=1
++else
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_CACHEABLE=0
++endif
++
++ifeq ($(NONPAGED_MEMORY_BUFFERABLE), 1)
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_BUFFERABLE=1
++else
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_BUFFERABLE=0
++endif
++
++ifeq ($(CACHE_FUNCTION_UNIMPLEMENTED), 1)
++EXTRA_CFLAGS += -DgcdCACHE_FUNCTION_UNIMPLEMENTED=1
++else
++EXTRA_CFLAGS += -DgcdCACHE_FUNCTION_UNIMPLEMENTED=0
++endif
++
++ifeq ($(SUPPORT_SWAP_RECTANGLE), 1)
++EXTRA_CFLAGS += -DgcdSUPPORT_SWAP_RECTANGLE=1
++else
++EXTRA_CFLAGS += -DgcdSUPPORT_SWAP_RECTANGLE=0
++endif
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++EXTRA_CFLAGS += -DgcdENABLE_VG=1
++else
++EXTRA_CFLAGS += -DgcdENABLE_VG=0
++endif
++
++ifeq ($(CONFIG_SMP), y)
++EXTRA_CFLAGS += -DgcdSMP=1
++else
++EXTRA_CFLAGS += -DgcdSMP=0
++endif
++
++ifeq ($(VIVANTE_NO_3D),1)
++EXTRA_CFLAGS += -DVIVANTE_NO_3D
++endif
++
++ifeq ($(ENABLE_OUTER_CACHE_PATCH), 1)
++EXTRA_CFLAGS += -DgcdENABLE_OUTER_CACHE_PATCH=1
++else
++EXTRA_CFLAGS += -DgcdENABLE_OUTER_CACHE_PATCH=0
++endif
++
++ifeq ($(USE_BANK_ALIGNMENT), 1)
++ EXTRA_CFLAGS += -DgcdENABLE_BANK_ALIGNMENT=1
++ ifneq ($(BANK_BIT_START), 0)
++ ifneq ($(BANK_BIT_END), 0)
++ EXTRA_CFLAGS += -DgcdBANK_BIT_START=$(BANK_BIT_START)
++ EXTRA_CFLAGS += -DgcdBANK_BIT_END=$(BANK_BIT_END)
++ endif
++ endif
++
++ ifneq ($(BANK_CHANNEL_BIT), 0)
++ EXTRA_CFLAGS += -DgcdBANK_CHANNEL_BIT=$(BANK_CHANNEL_BIT)
++ endif
++endif
++
++ifneq ($(CONFIG_SYNC),)
++EXTRA_CFLAGS += -DgcdANDROID_NATIVE_FENCE_SYNC=1
++endif
++
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/inc
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel
++EXTRA_CFLAGS += -I$(AQARCH)/hal/kernel
++EXTRA_CFLAGS += -I$(AQROOT)/hal/os/linux/kernel
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++EXTRA_CFLAGS += -I$(AQVGARCH)/hal/kernel
++endif
++
++obj-$(CONFIG_MXC_GPU_VIV) += galcore.o
++
++galcore-objs := $(OBJS)
++
++endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/config linux-3.14.40/drivers/mxc/gpu-viv/v5/config
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/config 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/config 2015-05-01 14:57:59.555427001 -0500
+@@ -0,0 +1,36 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2014 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++ARCH_TYPE ?= arm
++SDK_DIR ?= $(AQROOT)/build/sdk
++VIVANTE_ENABLE_3D ?= 1
++VIVANTE_ENABLE_2D ?= 1
++VIVANTE_ENABLE_VG ?= 1
++FORCE_ALL_VIDEO_MEMORY_CACHED ?= 0
++NONPAGED_MEMORY_CACHEABLE ?= 0
++NONPAGED_MEMORY_BUFFERABLE ?= 1
++CACHE_FUNCTION_UNIMPLEMENTED ?= 0
++ENABLE_OUTER_CACHE_PATCH ?= 1
++USE_BANK_ALIGNMENT ?= 1
++BANK_BIT_START ?= 13
++BANK_BIT_END ?= 15
++BANK_CHANNEL_BIT ?= 12
++PLATFORM ?= freescale/gc_hal_kernel_platform_imx6q14
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.c 2015-05-01 14:57:59.555427001 -0500
+@@ -0,0 +1,2317 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_context.h"
++#include "gc_hal_kernel_buffer.h"
++
++/******************************************************************************\
++******************************** Debugging Macro *******************************
++\******************************************************************************/
++
++/* Zone used for header/footer. */
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++
++/******************************************************************************\
++************************** Context State Buffer Helpers ************************
++\******************************************************************************/
++
++#define _STATE(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_COUNT(reg, count) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_COUNT_OFFSET(reg, offset, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + offset, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_MIRROR_COUNT(reg, mirror, count) \
++ _StateMirror(\
++ Context, \
++ reg ## _Address >> 2, \
++ count, \
++ mirror ## _Address >> 2 \
++ )
++
++#define _STATE_HINT(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_HINT_BLOCK(reg, block, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + (block << reg ## _BLK), \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_COUNT_OFFSET_HINT(reg, offset, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + offset, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_X(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvTRUE, gcvFALSE \
++ )
++
++#define _STATE_INIT_VALUE(reg, value) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ value, \
++ reg ## _Count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _CLOSE_RANGE() \
++ _TerminateStateBlock(Context, index)
++
++#define _ENABLE(reg, field) \
++ do \
++ { \
++ if (gcmVERIFYFIELDVALUE(data, reg, MASK_ ## field, ENABLED)) \
++ { \
++ enable |= gcmFIELDMASK(reg, field); \
++ } \
++ } \
++ while (gcvFALSE)
++
++#define _BLOCK_COUNT(reg) \
++ ((reg ## _Count) >> (reg ## _BLK))
++
++
++/******************************************************************************\
++*********************** Support Functions and Definitions **********************
++\******************************************************************************/
++
++#define gcdSTATE_MASK \
++ (((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 | 0xC0FFEE & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))))
++
++#if gcdENABLE_3D
++static gctUINT32
++_TerminateStateBlock(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index
++ )
++{
++ gctUINT32_PTR buffer;
++ gctUINT32 align;
++
++ /* Determine if we need alignment. */
++ align = (Index & 1) ? 1 : 0;
++
++ /* Address correct index. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++ /* Flush the current state block; make sure no pairing with the states
++ to follow happens. */
++ if (align && (buffer != gcvNULL))
++ {
++ buffer[Index] = 0xDEADDEAD;
++ }
++
++ /* Reset last address. */
++ Context->lastAddress = ~0U;
++
++ /* Return alignment requirement. */
++ return align;
++}
++#endif
++
++
++#if (gcdENABLE_3D || gcdENABLE_2D)
++static gctUINT32
++_FlushPipe(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index,
++ IN gcePIPE_SELECT Pipe
++ )
++{
++ gctBOOL fcFlushStall;
++ gctUINT32 flushSlots;
++ gctBOOL iCacheInvalidate;
++
++ fcFlushStall
++ = gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_FC_FLUSH_STALL);
++
++ iCacheInvalidate
++ = ((((gctUINT32) (Context->hardware->identity.chipMinorFeatures3)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))));
++
++ flushSlots = 6;
++
++ if (fcFlushStall)
++ {
++ /* Flush tile status cache. */
++ flushSlots += 6;
++ }
++
++ if (iCacheInvalidate)
++ {
++ flushSlots += 12;
++ }
++
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* Flush the current pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = (Pipe == gcvPIPE_2D)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ if (fcFlushStall)
++ {
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ if (iCacheInvalidate)
++ {
++ /* Invalidate I$ after pipe is stalled */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0218) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x021A) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0218) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x021A) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++ }
++
++ /* Number of slots taken by flushing pipe. */
++ return flushSlots;
++}
++#endif
++
++#if gcdENABLE_3D
++static gctUINT32
++_SemaphoreStall(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index
++ )
++{
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ /* Semaphore/stall takes 4 slots. */
++ return 4;
++}
++#endif
++
++#if (gcdENABLE_3D || gcdENABLE_2D)
++static gctUINT32
++_SwitchPipe(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index,
++ IN gcePIPE_SELECT Pipe
++ )
++{
++ gctUINT32 slots = 6;
++
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++
++ = (Pipe == gcvPIPE_2D)
++ ? 0x1
++ : 0x0;
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ Context->pipeSelectBytes = slots * gcmSIZEOF(gctUINT32);
++
++ return slots;
++}
++#endif
++
++#if gcdENABLE_3D
++static gctUINT32
++_State(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index,
++ IN gctUINT32 Address,
++ IN gctUINT32 Value,
++ IN gctUINT32 Size,
++ IN gctBOOL FixedPoint,
++ IN gctBOOL Hinted
++ )
++{
++ gctUINT32_PTR buffer;
++ gctUINT32 align;
++ gctUINT32 i;
++
++ /* Determine if we need alignment. */
++ align = (Index & 1) ? 1 : 0;
++
++ /* Address correct index. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++ if ((buffer == gcvNULL) && (Address + Size > Context->stateCount))
++ {
++ /* Determine maximum state. */
++ Context->stateCount = Address + Size;
++ }
++
++ /* Do we need a new entry? */
++ if ((Address != Context->lastAddress) || (FixedPoint != Context->lastFixed))
++ {
++ if (buffer != gcvNULL)
++ {
++ if (align)
++ {
++ /* Add filler. */
++ buffer[Index++] = 0xDEADDEAD;
++ }
++
++ /* LoadState(Address, Count). */
++ gcmkASSERT((Index & 1) == 0);
++
++ if (FixedPoint)
++ {
++ buffer[Index]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++ else
++ {
++ buffer[Index]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++
++ /* Walk all the states. */
++ for (i = 0; i < (gctUINT32)Size; i += 1)
++ {
++ /* Set state to uninitialized value. */
++ buffer[Index + 1 + i] = Value;
++
++ /* Set index in state mapping table. */
++ Context->map[Address + i].index = (gctUINT)Index + 1 + i;
++
++#if gcdSECURE_USER
++ /* Save hint. */
++ if (Context->hint != gcvNULL)
++ {
++ Context->hint[Address + i] = Hinted;
++ }
++#endif
++ }
++ }
++
++ /* Save information for this LoadState. */
++ Context->lastIndex = (gctUINT)Index;
++ Context->lastAddress = Address + (gctUINT32)Size;
++ Context->lastSize = Size;
++ Context->lastFixed = FixedPoint;
++
++ /* Return size for load state. */
++ return align + 1 + Size;
++ }
++
++ /* Append this state to the previous one. */
++ if (buffer != gcvNULL)
++ {
++ /* Update last load state. */
++ buffer[Context->lastIndex] =
++ ((((gctUINT32) (buffer[Context->lastIndex])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Context->lastSize + Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Walk all the states. */
++ for (i = 0; i < (gctUINT32)Size; i += 1)
++ {
++ /* Set state to uninitialized value. */
++ buffer[Index + i] = Value;
++
++ /* Set index in state mapping table. */
++ Context->map[Address + i].index = (gctUINT)Index + i;
++
++#if gcdSECURE_USER
++ /* Save hint. */
++ if (Context->hint != gcvNULL)
++ {
++ Context->hint[Address + i] = Hinted;
++ }
++#endif
++ }
++ }
++
++ /* Update last address and size. */
++ Context->lastAddress += (gctUINT32)Size;
++ Context->lastSize += Size;
++
++ /* Return number of slots required. */
++ return Size;
++}
++
++static gctUINT32
++_StateMirror(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Address,
++ IN gctUINT32 Size,
++ IN gctUINT32 AddressMirror
++ )
++{
++ gctUINT32 i;
++
++ /* Process when buffer is set. */
++ if (Context->buffer != gcvNULL)
++ {
++ /* Walk all states. */
++ for (i = 0; i < Size; i++)
++ {
++ /* Copy the mapping address. */
++ Context->map[Address + i].index =
++ Context->map[AddressMirror + i].index;
++ }
++ }
++
++ /* Return the number of required maps. */
++ return Size;
++}
++#endif
++
++#if (gcdENABLE_3D || gcdENABLE_2D)
++static gceSTATUS
++_InitializeContextBuffer(
++ IN gckCONTEXT Context
++ )
++{
++ gctUINT32_PTR buffer;
++ gctUINT32 index;
++
++#if gcdENABLE_3D
++ gctBOOL halti0, halti1, halti2, halti3;
++ gctUINT i;
++ gctUINT vertexUniforms, fragmentUniforms, vsConstBase, psConstBase, constMax;
++ gctBOOL unifiedUniform;
++ gctUINT fe2vsCount;
++#endif
++
++ /* Reset the buffer index. */
++ index = 0;
++
++ /* Reset the last state address. */
++ Context->lastAddress = ~0U;
++
++ /* Get the buffer pointer. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++
++ /**************************************************************************/
++ /* Build 2D states. *******************************************************/
++
++
++#if gcdENABLE_3D
++ /**************************************************************************/
++ /* Build 3D states. *******************************************************/
++
++ halti0 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) );
++ halti1 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures2)) >> (0 ? 11:11)) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1)))))) );
++ halti2 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures4)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) );
++ halti3 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures5)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) );
++
++ /* Query how many uniforms can support for non-unified uniform mode. */
++ {if (Context->hardware->identity.numConstants > 256){ unifiedUniform = gcvTRUE; vsConstBase = 0xC000; psConstBase = 0xC000; constMax = Context->hardware->identity.numConstants; vertexUniforms = 256; fragmentUniforms = constMax - vertexUniforms;}else if (Context->hardware->identity.numConstants == 256){ if (Context->hardware->identity.chipModel == gcv2000 && Context->hardware->identity.chipRevision == 0x5118) { unifiedUniform = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vertexUniforms = 256; fragmentUniforms = 64; constMax = 320; } else { unifiedUniform = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vertexUniforms = 256; fragmentUniforms = 256; constMax = 512; }}else{ unifiedUniform = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vertexUniforms = 168; fragmentUniforms = 64; constMax = 232;}};
++
++#if !gcdENABLE_UNIFIED_CONSTANT
++ if (Context->hardware->identity.numConstants > 256)
++ {
++ unifiedUniform = gcvTRUE;
++ }
++ else
++ {
++ unifiedUniform = gcvFALSE;
++ }
++#endif
++
++ /* Store the 3D entry index. */
++ Context->entryOffset3D = (gctUINT)index * gcmSIZEOF(gctUINT32);
++
++ /* Switch to 3D pipe. */
++ index += _SwitchPipe(Context, index, gcvPIPE_3D);
++
++ /* Current context pointer. */
++#if gcdDEBUG
++ index += _State(Context, index, 0x03850 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++#endif
++
++ index += _FlushPipe(Context, index, gcvPIPE_3D);
++
++ /* Global states. */
++ index += _State(Context, index, 0x03814 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03818 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0381C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03820 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03828 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0382C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03834 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03854 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0384C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Front End states. */
++ fe2vsCount = 12;
++ if (halti0)
++ {
++ fe2vsCount = 16;
++ }
++ index += _State(Context, index, 0x00600 >> 2, 0x00000000, fe2vsCount, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ index += _State(Context, index, 0x00644 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x00648 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0064C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x00650 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00680 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x006A0 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00674 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0067C >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x006C0 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00700 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00740 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00780 >> 2, 0x3F800000, 16, gcvFALSE, gcvFALSE);
++
++ if (halti2)
++ {
++ index += _State(Context, index, 0x14600 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14640 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14680 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ }
++
++ /* This register is programed by all chips, which program all DECODE_SELECT as VS
++ ** except SAMPLER_DECODE_SELECT.
++ */
++ index += _State(Context, index, 0x00860 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures3)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))))
++ {
++ /* I-Cache states. */
++ index += _State(Context, index, 0x00868 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0086C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0304C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01028 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ if (halti3)
++ {
++ index += _State(Context, index, 0x00890 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0104C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _CLOSE_RANGE();
++ }
++ }
++
++ /* Vertex Shader states. */
++ index += _State(Context, index, 0x00804 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00808 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0080C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00810 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00820 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00830 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ index += _CLOSE_RANGE();
++
++ /* Primitive Assembly states. */
++ index += _State(Context, index, 0x00A00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A08 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A0C >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A10 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A1C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A28 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A2C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A30 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A40 >> 2, 0x00000000, Context->hardware->identity.varyingsCount, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A34 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A38 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A3C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A80 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A84 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A8C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A88 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++#if gcdMULTI_GPU
++ index += _State(Context, index, 0x03A00 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03A04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03A08 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++#endif
++ /* Setup states. */
++ index += _State(Context, index, 0x00C00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C08 >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C0C >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C10 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C1C >> 2, 0x42000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C20 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C24 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++
++ /* Raster states. */
++ index += _State(Context, index, 0x00E00 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E10 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E40 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E08 >> 2, 0x00000031, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E24 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E20 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (halti2)
++ {
++ index += _State(Context, index, 0x00E0C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ }
++
++ /* Pixel Shader states. */
++ index += _State(Context, index, 0x01004 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0100C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01010 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01030 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ index += _CLOSE_RANGE();
++
++ /* Texture states. */
++ index += _State(Context, index, 0x02000 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02040 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02080 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x020C0 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02100 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02140 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02180 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x021C0 >> 2, 0x00321000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02200 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02240 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x02400 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02440 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02480 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x024C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02500 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02540 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02580 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x025C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02600 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02640 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02680 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x026C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02700 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02740 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _CLOSE_RANGE();
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 22:22)) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) ))
++ {
++ /*
++ * Linear stride LODn will overwrite LOD0 on GC880,GC2000.
++ * And only LOD0 is valid for this register.
++ */
++ gctUINT count = halti1 ? 14 : 1;
++
++ for (i = 0; i < 12; i += 1)
++ {
++ index += _State(Context, index, (0x02C00 >> 2) + i * 16, 0x00000000, count, gcvFALSE, gcvFALSE);
++ }
++ }
++
++ if (halti1)
++ {
++ gctUINT texBlockCount;
++ gctUINT gcregTXLogSizeResetValue;
++
++ /* Enable the integer filter pipe for all texture samplers
++ so that the floating point filter clock will shut off until
++ we start using the floating point filter.
++ */
++ gcregTXLogSizeResetValue = ((((gctUINT32) (0x00000000)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 29:29) - (0 ? 29:29) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 29:29) - (0 ? 29:29) + 1))))))) << (0 ? 29:29))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 29:29) - (0 ? 29:29) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 29:29) - (0 ? 29:29) + 1))))))) << (0 ? 29:29)));
++
++ /* New texture block. */
++ index += _State(Context, index, 0x10000 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10080 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10100 >> 2, gcregTXLogSizeResetValue, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10180 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10200 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10280 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10300 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10380 >> 2, 0x00321000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10400 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10480 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures2)) >> (0 ? 15:15)) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1)))))) ))
++ {
++ index += _State(Context, index, 0x12000 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x12400 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE);
++ }
++
++ texBlockCount = ((512) >> (4));
++
++ for (i = 0; i < texBlockCount; i += 1)
++ {
++ index += _State(Context, index, (0x10800 >> 2) + (i << 4), 0x00000000, 14, gcvFALSE, gcvTRUE);
++ }
++ }
++
++ if (halti2)
++ {
++ index += _State(Context, index, 0x10700 >> 2, 0x00000F00, 32, gcvFALSE, gcvFALSE);
++ }
++
++ if (halti3)
++ {
++ index += _State(Context, index, 0x10780 >> 2, 0x00030000, 32, gcvFALSE, gcvFALSE);
++ }
++
++ /* ASTC */
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures4)) >> (0 ? 13:13)) & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))) ))
++ {
++ index += _State(Context, index, 0x10500 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10580 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10600 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10680 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ }
++
++ /* YUV. */
++ index += _State(Context, index, 0x01678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0167C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01680 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01684 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01688 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0168C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01690 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01694 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01698 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0169C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* Thread walker states. */
++ index += _State(Context, index, 0x00900 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00904 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00908 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0090C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00910 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00914 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00918 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0091C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00924 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures3)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ index += _State(Context, index, 0x00940 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00944 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00948 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0094C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00950 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00954 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ }
++
++ index += _CLOSE_RANGE();
++
++ if (!halti3)
++ {
++ if (Context->hardware->identity.instructionCount > 1024)
++ {
++ /* New Shader instruction PC registers. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ for (i = 0;
++ i < Context->hardware->identity.instructionCount << 2;
++ i += 256 << 2
++ )
++ {
++ index += _State(Context, index, (0x20000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++ }
++ else if (Context->hardware->identity.instructionCount > 256)
++ {
++ /* New Shader instruction PC registers. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* VX instruction memory. */
++ for (i = 0;
++ i < Context->hardware->identity.instructionCount << 2;
++ i += 256 << 2
++ )
++ {
++ index += _State(Context, index, (0x0C000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++
++ _StateMirror(Context, 0x08000 >> 2, Context->hardware->identity.instructionCount << 2 , 0x0C000 >> 2);
++ }
++ else /* if (Context->hardware->identity.instructionCount <= 256) */
++ {
++ /* old shader instruction PC registers */
++ index += _State(Context, index, 0x00800 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ index += _State(Context, index, 0x01000 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01018 >> 2, 0x01000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ index += _State(Context, index, 0x04000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x06000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++ }
++ /* I cache use the new instruction PC registers */
++ else
++ {
++ /* New Shader instruction PC registers. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++
++ if (unifiedUniform)
++ {
++ gctINT numConstants = Context->hardware->identity.numConstants;
++
++ index += _State(Context, index, 0x01024 >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00864 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ for (i = 0;
++ numConstants > 0;
++ i += 256 << 2,
++ numConstants -= 256
++ )
++ {
++ if (numConstants >= 256)
++ {
++ index += _State(Context, index, (0x30000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ }
++ else
++ {
++ index += _State(Context, index, (0x30000 >> 2) + i, 0x00000000, numConstants << 2, gcvFALSE, gcvFALSE);
++ }
++ index += _CLOSE_RANGE();
++ }
++ }
++#if gcdENABLE_UNIFIED_CONSTANT
++ else
++#endif
++ {
++ index += _State(Context, index, 0x05000 >> 2, 0x00000000, vertexUniforms * 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x07000 >> 2, 0x00000000, fragmentUniforms * 4, gcvFALSE, gcvFALSE);
++ }
++
++ /* Store the index of the "XD" entry. */
++ Context->entryOffsetXDFrom3D = (gctUINT)index * gcmSIZEOF(gctUINT32);
++
++
++ /* Pixel Engine states. */
++ index += _State(Context, index, 0x01400 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01404 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01408 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0140C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01414 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01418 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0141C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01420 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01424 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01428 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0142C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01434 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01454 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01458 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0145C >> 2, 0x00000010, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A8 >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014AC >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A4 >> 2, 0x000E400C, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01580 >> 2, 0x00000000, 3, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Composition states. */
++ index += _State(Context, index, 0x03008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (Context->hardware->identity.pixelPipes == 1)
++ {
++ index += _State(Context, index, 0x01460 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, 0x01430 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01410 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ }
++ else
++ {
++ index += _State(Context, index, (0x01460 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++
++ if (Context->hardware->identity.pixelPipes > 1 || halti0)
++ {
++ index += _State(Context, index, (0x01480 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++
++ for (i = 0; i < 3; i++)
++ {
++ index += _State(Context, index, (0x01500 >> 2) + (i << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++
++ if (halti2)
++ {
++ for (i = 0; i < 7; i++)
++ {
++ index += _State(Context, index, (0x14800 >> 2) + (i << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++ index += _State(Context, index, 0x14900 >> 2, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ }
++
++
++ if (halti3)
++ {
++ index += _State(Context, index, 0x014BC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ }
++
++ /* Resolve states. */
++ index += _State(Context, index, 0x01604 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01608 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0160C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01610 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01614 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01620 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01630 >> 2, 0x00000000, 2, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01640 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0163C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ if ((Context->hardware->identity.pixelPipes > 1) || halti1)
++ {
++ index += _State(Context, index, (0x016C0 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, (0x016E0 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, 0x01700 >> 2, 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvFALSE);
++ }
++
++#if gcd3DBLIT
++ index += _State(Context, index, (0x14000 >> 2) + (0 << 1), 0x00000000, 2, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x14008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1400C >> 2, 0x0001C800, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14010 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x14014 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x14018 >> 2) + (0 << 1), 0x00000000, 2, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x14020 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x14024 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14028 >> 2, 0x0001C800, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1402C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14030 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14034 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14038 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1403C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14040 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14044 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14048 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1404C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14050 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14058 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1405C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14054 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14100 >> 2, 0x00000000, 64, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14200 >> 2, 0x00000000, 64, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14064 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14068 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ index += _State(Context, index, 0x1406C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14070 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14074 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14078 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1407C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14080 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14084 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14088 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1408C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14090 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ index += _State(Context, index, 0x14094 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14098 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++#endif
++
++ /* Tile status. */
++ index += _State(Context, index, 0x01654 >> 2, 0x00200000, 1, gcvFALSE, gcvFALSE);
++
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x01658 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0165C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01660 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01664 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01668 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0166C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01674 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A4 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x016AC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01720 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01740 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01760 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++
++
++ if (halti2)
++ {
++ index += _State(Context, index, 0x01780 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016BC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x017A0 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x017C0 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x017E0 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x01A00 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x01A20 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x01A40 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ }
++
++ index += _CLOSE_RANGE();
++
++ if(((((gctUINT32) (Context->hardware->identity.chipMinorFeatures4)) >> (0 ? 25:25) & ((gctUINT32) ((((1 ? 25:25) - (0 ? 25:25) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:25) - (0 ? 25:25) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 25:25) - (0 ? 25:25) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:25) - (0 ? 25:25) + 1))))))))
++ {
++ index += _State(Context, index, 0x03860 >> 2, 0x6, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++
++ if (halti3)
++ {
++ index += _State(Context, index, 0x01A80 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _CLOSE_RANGE();
++ }
++
++ /* Semaphore/stall. */
++ index += _SemaphoreStall(Context, index);
++#endif
++
++ /**************************************************************************/
++ /* Link to another address. ***********************************************/
++
++ Context->linkIndex3D = (gctUINT)index;
++
++ if (buffer != gcvNULL)
++ {
++ buffer[index + 0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[index + 1]
++ = 0;
++ }
++
++ index += 2;
++
++ /* Store the end of the context buffer. */
++ Context->bufferSize = index * gcmSIZEOF(gctUINT32);
++
++
++ /**************************************************************************/
++ /* Pipe switch for the case where neither 2D nor 3D are used. *************/
++
++ /* Store the 3D entry index. */
++ Context->entryOffsetXDFrom2D = (gctUINT)index * gcmSIZEOF(gctUINT32);
++
++ /* Flush 2D pipe. */
++ index += _FlushPipe(Context, index, gcvPIPE_2D);
++
++ /* Switch to 3D pipe. */
++ index += _SwitchPipe(Context, index, gcvPIPE_3D);
++
++ /* Store the location of the link. */
++ Context->linkIndexXD = (gctUINT)index;
++
++ if (buffer != gcvNULL)
++ {
++ buffer[index + 0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[index + 1]
++ = 0;
++ }
++
++ index += 2;
++
++
++ /**************************************************************************/
++ /* Save size for buffer. **************************************************/
++
++ Context->totalSize = index * gcmSIZEOF(gctUINT32);
++
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++static gceSTATUS
++_DestroyContext(
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ if (Context != gcvNULL)
++ {
++ gcsCONTEXT_PTR bufferHead;
++
++ /* Free context buffers. */
++ for (bufferHead = Context->buffer; Context->buffer != gcvNULL;)
++ {
++ /* Get a shortcut to the current buffer. */
++ gcsCONTEXT_PTR buffer = Context->buffer;
++
++ /* Get the next buffer. */
++ gcsCONTEXT_PTR next = buffer->next;
++
++ /* Last item? */
++ if (next == bufferHead)
++ {
++ next = gcvNULL;
++ }
++
++ /* Destroy the signal. */
++ if (buffer->signal != gcvNULL)
++ {
++ gcmkONERROR(gckOS_DestroySignal(
++ Context->os, buffer->signal
++ ));
++
++ buffer->signal = gcvNULL;
++ }
++
++ /* Free state delta map. */
++ if (buffer->logical != gcvNULL)
++ {
++ if (Context->hardware->kernel->virtualCommandBuffer)
++ {
++ gcmkONERROR(gckEVENT_DestroyVirtualCommandBuffer(
++ Context->hardware->kernel->eventObj,
++ Context->totalSize,
++ buffer->physical,
++ buffer->logical,
++ gcvKERNEL_PIXEL
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckEVENT_FreeContiguousMemory(
++ Context->hardware->kernel->eventObj,
++ Context->totalSize,
++ buffer->physical,
++ buffer->logical,
++ gcvKERNEL_PIXEL
++ ));
++ }
++
++ buffer->logical = gcvNULL;
++ }
++
++ /* Free context buffer. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, buffer));
++
++ /* Remove from the list. */
++ Context->buffer = next;
++ }
++
++#if gcdSECURE_USER
++ /* Free the hint array. */
++ if (Context->hint != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->hint));
++ }
++#endif
++ /* Free record array copy. */
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ if (Context->recordArrayMap != gcvNULL)
++ {
++ gcsRECORD_ARRAY_MAP_PTR map = Context->recordArrayMap;
++
++ do
++ {
++ /* Free record array. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, map->kData));
++ map = map->next;
++ }
++ while (map != Context->recordArrayMap);
++
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->recordArrayMap));
++ }
++#else
++ if (Context->recordArray != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->recordArray));
++ }
++#endif
++
++ /* Free the state mapping. */
++ if (Context->map != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->map));
++ }
++
++ /* Mark the gckCONTEXT object as unknown. */
++ Context->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckCONTEXT object. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context));
++ }
++
++OnError:
++ return status;
++}
++
++
++/******************************************************************************\
++**************************** Context Management API ****************************
++\******************************************************************************/
++
++/******************************************************************************\
++**
++** gckCONTEXT_Construct
++**
++** Construct a new gckCONTEXT object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** gckHARDWARE Hardware
++** Pointer to gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gckCONTEXT * Context
++** Pointer to a variable thet will receive the gckCONTEXT object
++** pointer.
++*/
++#if (gcdENABLE_3D || gcdENABLE_2D)
++gceSTATUS
++gckCONTEXT_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ OUT gckCONTEXT * Context
++ )
++{
++ gceSTATUS status;
++ gckCONTEXT context = gcvNULL;
++ gctUINT32 allocationSize;
++ gctUINT i;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Os=0x%08X Hardware=0x%08X", Os, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Context != gcvNULL);
++
++
++ /**************************************************************************/
++ /* Allocate and initialize basic fields of gckCONTEXT. ********************/
++
++ /* The context object size. */
++ allocationSize = gcmSIZEOF(struct _gckCONTEXT);
++
++ /* Allocate the object. */
++ gcmkONERROR(gckOS_Allocate(
++ Os, allocationSize, &pointer
++ ));
++
++ context = pointer;
++
++ /* Reset the entire object. */
++ gcmkONERROR(gckOS_ZeroMemory(context, allocationSize));
++
++ /* Initialize the gckCONTEXT object. */
++ context->object.type = gcvOBJ_CONTEXT;
++ context->os = Os;
++ context->hardware = Hardware;
++
++
++#if !gcdENABLE_3D
++ context->entryPipe = gcvPIPE_2D;
++ context->exitPipe = gcvPIPE_2D;
++#elif gcdCMD_NO_2D_CONTEXT
++ context->entryPipe = gcvPIPE_3D;
++ context->exitPipe = gcvPIPE_3D;
++#else
++ context->entryPipe
++ = (((((gctUINT32) (context->hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) )
++ ? gcvPIPE_2D
++ : gcvPIPE_3D;
++ context->exitPipe = gcvPIPE_3D;
++#endif
++
++ /* Get the command buffer requirements. */
++ gcmkONERROR(gckHARDWARE_QueryCommandBuffer(
++ Hardware,
++ &context->alignment,
++ &context->reservedHead,
++ &context->reservedTail
++ ));
++
++ /* Mark the context as dirty to force loading of the entire state table
++ the first time. */
++ context->dirty = gcvTRUE;
++
++
++ /**************************************************************************/
++ /* Get the size of the context buffer. ************************************/
++
++ gcmkONERROR(_InitializeContextBuffer(context));
++
++
++ /**************************************************************************/
++ /* Compute the size of the record array. **********************************/
++
++ context->recordArraySize
++ = gcmSIZEOF(gcsSTATE_DELTA_RECORD) * (gctUINT)context->stateCount;
++
++
++ if (context->stateCount > 0)
++ {
++ /**************************************************************************/
++ /* Allocate and reset the state mapping table. ****************************/
++
++ /* Allocate the state mapping table. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gcsSTATE_MAP) * context->stateCount,
++ &pointer
++ ));
++
++ context->map = pointer;
++
++ /* Zero the state mapping table. */
++ gcmkONERROR(gckOS_ZeroMemory(
++ context->map, gcmSIZEOF(gcsSTATE_MAP) * context->stateCount
++ ));
++
++
++ /**************************************************************************/
++ /* Allocate the hint array. ***********************************************/
++
++#if gcdSECURE_USER
++ /* Allocate hints. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gctBOOL) * context->stateCount,
++ &pointer
++ ));
++
++ context->hint = pointer;
++#endif
++ }
++
++ /**************************************************************************/
++ /* Allocate the context and state delta buffers. **************************/
++
++ for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i += 1)
++ {
++ /* Allocate a context buffer. */
++ gcsCONTEXT_PTR buffer;
++
++ gctSIZE_T totalSize = context->totalSize;
++
++ /* Allocate the context buffer structure. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gcsCONTEXT),
++ &pointer
++ ));
++
++ buffer = pointer;
++
++ /* Reset the context buffer structure. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ buffer, gcmSIZEOF(gcsCONTEXT)
++ ));
++
++ /* Append to the list. */
++ if (context->buffer == gcvNULL)
++ {
++ buffer->next = buffer;
++ context->buffer = buffer;
++ }
++ else
++ {
++ buffer->next = context->buffer->next;
++ context->buffer->next = buffer;
++ }
++
++ /* Set the number of delta in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ buffer->num = i;
++#endif
++
++ /* Create the busy signal. */
++ gcmkONERROR(gckOS_CreateSignal(
++ Os, gcvFALSE, &buffer->signal
++ ));
++
++ /* Set the signal, buffer is currently not busy. */
++ gcmkONERROR(gckOS_Signal(
++ Os, buffer->signal, gcvTRUE
++ ));
++
++ /* Create a new physical context buffer. */
++ if (context->hardware->kernel->virtualCommandBuffer)
++ {
++ gcmkONERROR(gckKERNEL_AllocateVirtualCommandBuffer(
++ context->hardware->kernel,
++ gcvFALSE,
++ &totalSize,
++ &buffer->physical,
++ &pointer
++ ));
++
++ gcmkONERROR(gckKERNEL_GetGPUAddress(
++ context->hardware->kernel,
++ pointer,
++ gcvFALSE,
++ &address
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Os,
++ gcvFALSE,
++ &totalSize,
++ &buffer->physical,
++ &pointer
++ ));
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ context->hardware,
++ pointer,
++ gcvFALSE,
++ &address
++ ));
++ }
++
++ buffer->logical = pointer;
++ buffer->address = address;
++
++ /* Set gckEVENT object pointer. */
++ buffer->eventObj = Hardware->kernel->eventObj;
++
++ /* Set the pointers to the LINK commands. */
++ if (context->linkIndex2D != 0)
++ {
++ buffer->link2D = &buffer->logical[context->linkIndex2D];
++ }
++
++ if (context->linkIndex3D != 0)
++ {
++ buffer->link3D = &buffer->logical[context->linkIndex3D];
++ }
++
++ if (context->linkIndexXD != 0)
++ {
++ gctPOINTER xdLink;
++ gctUINT32 xdEntryAddress;
++ gctUINT32 xdEntrySize;
++ gctUINT32 linkBytes;
++
++ /* Determine LINK parameters. */
++ xdLink
++ = &buffer->logical[context->linkIndexXD];
++
++ xdEntryAddress
++ = buffer->address
++ + context->entryOffsetXDFrom3D;
++
++ xdEntrySize
++ = context->bufferSize
++ - context->entryOffsetXDFrom3D;
++
++ /* Query LINK size. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Hardware, gcvNULL, 0, 0, &linkBytes
++ ));
++
++ /* Generate a LINK. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Hardware,
++ xdLink,
++ xdEntryAddress,
++ xdEntrySize,
++ &linkBytes
++ ));
++ }
++ }
++
++
++ /**************************************************************************/
++ /* Initialize the context buffers. ****************************************/
++
++ /* Initialize the current context buffer. */
++ gcmkONERROR(_InitializeContextBuffer(context));
++
++ /* Make all created contexts equal. */
++ {
++ gcsCONTEXT_PTR currContext, tempContext;
++
++ /* Set the current context buffer. */
++ currContext = context->buffer;
++
++ /* Get the next context buffer. */
++ tempContext = currContext->next;
++
++ /* Loop through all buffers. */
++ while (tempContext != currContext)
++ {
++ if (tempContext == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ /* Copy the current context. */
++ gckOS_MemCopy(
++ tempContext->logical,
++ currContext->logical,
++ context->totalSize
++ );
++
++ /* Get the next context buffer. */
++ tempContext = tempContext->next;
++ }
++ }
++
++ /* Return pointer to the gckCONTEXT object. */
++ *Context = context;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Context=0x%08X", *Context);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back on error. */
++ gcmkVERIFY_OK(_DestroyContext(context));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/******************************************************************************\
++**
++** gckCONTEXT_Destroy
++**
++** Destroy a gckCONTEXT object.
++**
++** INPUT:
++**
++** gckCONTEXT Context
++** Pointer to an gckCONTEXT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCONTEXT_Destroy(
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Context=0x%08X", Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ /* Destroy the context and all related objects. */
++ status = _DestroyContext(Context);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/******************************************************************************\
++**
++** gckCONTEXT_Update
++**
++** Merge all pending state delta buffers into the current context buffer.
++**
++** INPUT:
++**
++** gckCONTEXT Context
++** Pointer to an gckCONTEXT object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** gcsSTATE_DELTA_PTR StateDelta
++** Pointer to the state delta.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCONTEXT_Update(
++ IN gckCONTEXT Context,
++ IN gctUINT32 ProcessID,
++ IN gcsSTATE_DELTA_PTR StateDelta
++ )
++{
++#if gcdENABLE_3D
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsSTATE_DELTA _stateDelta;
++ gckKERNEL kernel;
++ gcsCONTEXT_PTR buffer;
++ gcsSTATE_MAP_PTR map;
++ gctBOOL needCopy = gcvFALSE;
++ gcsSTATE_DELTA_PTR nDelta;
++ gcsSTATE_DELTA_PTR uDelta = gcvNULL;
++ gcsSTATE_DELTA_PTR kDelta = gcvNULL;
++ gcsSTATE_DELTA_RECORD_PTR record;
++ gcsSTATE_DELTA_RECORD_PTR recordArray = gcvNULL;
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ gcsRECORD_ARRAY_MAP_PTR recordArrayMap = gcvNULL;
++#endif
++ gctUINT elementCount;
++ gctUINT address;
++ gctUINT32 mask;
++ gctUINT32 data;
++ gctUINT index;
++ gctUINT i, j;
++
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++
++ gcmkHEADER_ARG(
++ "Context=0x%08X ProcessID=%d StateDelta=0x%08X",
++ Context, ProcessID, StateDelta
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ /* Get a shortcut to the kernel object. */
++ kernel = Context->hardware->kernel;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Context->os, ProcessID, &needCopy));
++
++ /* Allocate the copy buffer for the user record array. */
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ if (needCopy && (Context->recordArrayMap == gcvNULL))
++ {
++ /* Allocate enough maps. */
++ gcmkONERROR(gckOS_Allocate(
++ Context->os,
++ gcmSIZEOF(gcsRECORD_ARRAY_MAP_PTR) * gcdCONTEXT_BUFFER_COUNT,
++ (gctPOINTER *) &Context->recordArrayMap
++ ));
++
++ for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i++)
++ {
++ /* Next mapping id. */
++ gctUINT n = (i + 1) % gcdCONTEXT_BUFFER_COUNT;
++
++ recordArrayMap = &Context->recordArrayMap[i];
++
++ /* Allocate the buffer. */
++ gcmkONERROR(gckOS_Allocate(
++ Context->os,
++ Context->recordArraySize,
++ (gctPOINTER *) &recordArrayMap->kData
++ ));
++
++ /* Initialize fields. */
++ recordArrayMap->key = 0;
++ recordArrayMap->next = &Context->recordArrayMap[n];
++ }
++ }
++#else
++ if (needCopy && (Context->recordArray == gcvNULL))
++ {
++ /* Allocate the buffer. */
++ gcmkONERROR(gckOS_Allocate(
++ Context->os,
++ Context->recordArraySize,
++ (gctPOINTER *) &Context->recordArray
++ ));
++ }
++#endif
++
++ /* Get the current context buffer. */
++ buffer = Context->buffer;
++
++ /* Wait until the context buffer becomes available; this will
++ also reset the signal and mark the buffer as busy. */
++ gcmkONERROR(gckOS_WaitSignal(
++ Context->os, buffer->signal, gcvINFINITE
++ ));
++
++#if gcdSECURE_USER
++ /* Get the cache form the database. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache));
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE) && 1 && gcdENABLE_3D
++ /* Update current context token. */
++ buffer->logical[Context->map[0x0E14].index]
++ = (gctUINT32)gcmPTR2INT32(Context);
++#endif
++
++ /* Are there any pending deltas? */
++ if (buffer->deltaCount != 0)
++ {
++ /* Get the state map. */
++ map = Context->map;
++
++ /* Get the first delta item. */
++ uDelta = buffer->delta;
++
++ /* Reset the vertex stream count. */
++ elementCount = 0;
++
++ /* Merge all pending deltas. */
++ for (i = 0; i < buffer->deltaCount; i += 1)
++ {
++ /* Get access to the state delta. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ &_stateDelta,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ if (needCopy)
++ {
++ recordArray = gcvNULL;
++ recordArrayMap = Context->recordArrayMap;
++
++ do
++ {
++ /* Check if recordArray is alreay opened. */
++ if (recordArrayMap->key == kDelta->recordArray)
++ {
++ /* Found. */
++ recordArray = recordArrayMap->kData;
++ break;
++ }
++
++ recordArrayMap = recordArrayMap->next;
++ }
++ while (recordArrayMap != Context->recordArrayMap);
++
++ if (recordArray == gcvNULL)
++ {
++ while (recordArrayMap->key != 0)
++ {
++ /* Found an empty slot. */
++ recordArrayMap = recordArrayMap->next;
++ }
++
++ /* Get access to the state records. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ kernel->os,
++ recordArrayMap->kData,
++ gcmUINT64_TO_PTR(kDelta->recordArray),
++ Context->recordArraySize
++ ));
++
++ /* Save user pointer as key. */
++ recordArrayMap->key = kDelta->recordArray;
++ recordArray = recordArrayMap->kData;
++ }
++ }
++ else
++ {
++ /* Get access to the state records. */
++ gcmkONERROR(gckOS_MapUserPointer(
++ kernel->os,
++ gcmUINT64_TO_PTR(kDelta->recordArray),
++ Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++ }
++#else
++ /* Get access to the state records. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ Context->recordArray,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++#endif
++
++ /* Merge all pending states. */
++ for (j = 0; j < kDelta->recordCount; j += 1)
++ {
++ if (j >= Context->stateCount)
++ {
++ break;
++ }
++
++ /* Get the current state record. */
++ record = &recordArray[j];
++
++ /* Get the state address. */
++ address = record->address;
++
++ /* Make sure the state is a part of the mapping table. */
++ if (address >= Context->stateCount)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): State 0x%04X is not mapped.\n",
++ __FUNCTION__, __LINE__,
++ address
++ );
++
++ continue;
++ }
++
++ /* Get the state index. */
++ index = map[address].index;
++
++ /* Skip the state if not mapped. */
++ if (index == 0)
++ {
++ continue;
++ }
++
++ /* Get the data mask. */
++ mask = record->mask;
++
++ /* Masked states that are being completly reset or regular states. */
++ if ((mask == 0) || (mask == ~0U))
++ {
++ /* Get the new data value. */
++ data = record->data;
++
++ /* Process special states. */
++ if (address == 0x0595)
++ {
++ /* Force auto-disable to be disabled. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13)));
++ }
++
++#if gcdSECURE_USER
++ /* Do we need to convert the logical address? */
++ if (Context->hint[address])
++ {
++ /* Map handle into physical address. */
++ gcmkONERROR(gckKERNEL_MapLogicalToPhysical(
++ kernel, cache, (gctPOINTER) &data
++ ));
++ }
++#endif
++
++ /* Set new data. */
++ buffer->logical[index] = data;
++ }
++
++ /* Masked states that are being set partially. */
++ else
++ {
++ buffer->logical[index]
++ = (~mask & buffer->logical[index])
++ | (mask & record->data);
++ }
++ }
++
++ /* Get the element count. */
++ if (kDelta->elementCount != 0)
++ {
++ elementCount = kDelta->elementCount;
++ }
++
++ /* Dereference delta. */
++ kDelta->refCount -= 1;
++ gcmkASSERT(kDelta->refCount >= 0);
++
++ /* Get the next state delta. */
++ nDelta = gcmUINT64_TO_PTR(kDelta->next);
++
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ if (needCopy)
++ {
++ if (kDelta->refCount == 0)
++ {
++ /* No other reference, reset the mapping. */
++ recordArrayMap->key = 0;
++ }
++ }
++ else
++ {
++ /* Close access to the state records. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ kernel->os,
++ gcmUINT64_TO_PTR(kDelta->recordArray),
++ Context->recordArraySize,
++ (gctPOINTER *) recordArray
++ ));
++
++ recordArray = gcvNULL;
++ }
++#else
++ /* Get access to the state records. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvFALSE,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++#endif
++
++ /* Close access to the current state delta. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Update the user delta pointer. */
++ uDelta = nDelta;
++ }
++
++ /* Hardware disables all input streams when the stream 0 is programmed,
++ it then reenables those streams that were explicitely programmed by
++ the software. Because of this we cannot program the entire array of
++ values, otherwise we'll get all streams reenabled, but rather program
++ only those that are actully needed by the software. */
++ if (elementCount != 0)
++ {
++ gctUINT base;
++ gctUINT nopCount;
++ gctUINT32_PTR nop;
++ gctUINT fe2vsCount = 12;
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) ))
++ {
++ fe2vsCount = 16;
++ }
++
++ /* Determine the base index of the vertex stream array. */
++ base = map[0x0180].index;
++
++ /* Set the proper state count. */
++ buffer->logical[base - 1]
++ = ((((gctUINT32) (buffer->logical[base - 1])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (elementCount ) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Determine the number of NOP commands. */
++ nopCount
++ = (fe2vsCount / 2)
++ - (elementCount / 2);
++
++ /* Determine the location of the first NOP. */
++ nop = &buffer->logical[base + (elementCount | 1)];
++
++ /* Fill the unused space with NOPs. */
++ for (i = 0; i < nopCount; i += 1)
++ {
++ if (nop >= buffer->logical + Context->totalSize)
++ {
++ break;
++ }
++
++ /* Generate a NOP command. */
++ *nop = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ /* Advance. */
++ nop += 2;
++ }
++ }
++
++ /* Reset pending deltas. */
++ buffer->deltaCount = 0;
++ buffer->delta = gcvNULL;
++ }
++
++ /* Set state delta user pointer. */
++ uDelta = StateDelta;
++
++ /* Get access to the state delta. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ &_stateDelta,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* State delta cannot be attached to anything yet. */
++ if (kDelta->refCount != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): kDelta->refCount = %d (has to be 0).\n",
++ __FUNCTION__, __LINE__,
++ kDelta->refCount
++ );
++ }
++
++ /* Attach to all contexts. */
++ buffer = Context->buffer;
++
++ do
++ {
++ /* Attach to the context if nothing is attached yet. If a delta
++ is allready attached, all we need to do is to increment
++ the number of deltas in the context. */
++ if (buffer->delta == gcvNULL)
++ {
++ buffer->delta = uDelta;
++ }
++
++ /* Update reference count. */
++ kDelta->refCount += 1;
++
++ /* Update counters. */
++ buffer->deltaCount += 1;
++
++ /* Get the next context buffer. */
++ buffer = buffer->next;
++
++ if (buffer == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++ }
++ while (Context->buffer != buffer);
++
++ /* Close access to the current state delta. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Schedule an event to mark the context buffer as available. */
++ gcmkONERROR(gckEVENT_Signal(
++ buffer->eventObj, buffer->signal, gcvKERNEL_PIXEL
++ ));
++
++ /* Advance to the next context buffer. */
++ Context->buffer = buffer->next;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Get access to the state records. */
++ if (kDelta != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvFALSE,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++ }
++
++ /* Close access to the current state delta. */
++ gcmkVERIFY_OK(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
++gceSTATUS
++gckCONTEXT_MapBuffer(
++ IN gckCONTEXT Context,
++ OUT gctUINT32 *Physicals,
++ OUT gctUINT64 *Logicals,
++ OUT gctUINT32 *Bytes
++ )
++{
++ gceSTATUS status;
++ int i = 0;
++ gctSIZE_T pageCount;
++ gckVIRTUAL_COMMAND_BUFFER_PTR commandBuffer;
++ gckKERNEL kernel = Context->hardware->kernel;
++ gctPOINTER logical;
++ gctPHYS_ADDR physical;
++
++ gcsCONTEXT_PTR buffer;
++
++ gcmkHEADER();
++
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ buffer = Context->buffer;
++
++ for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i++)
++ {
++ if (kernel->virtualCommandBuffer)
++ {
++ commandBuffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)buffer->physical;
++ physical = commandBuffer->physical;
++
++ gcmkONERROR(gckOS_CreateUserVirtualMapping(
++ kernel->os,
++ physical,
++ Context->totalSize,
++ &logical,
++ &pageCount));
++ }
++ else
++ {
++ physical = buffer->physical;
++
++ gcmkONERROR(gckOS_MapMemory(
++ kernel->os,
++ physical,
++ Context->totalSize,
++ &logical));
++ }
++
++ Physicals[i] = gcmPTR_TO_NAME(physical);
++
++ Logicals[i] = gcmPTR_TO_UINT64(logical);
++
++ buffer = buffer->next;
++ }
++
++ *Bytes = (gctUINT)Context->totalSize;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_context.h 2015-05-01 14:57:59.555427001 -0500
+@@ -0,0 +1,183 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_context_h_
++#define __gc_hal_kernel_context_h_
++
++#include "gc_hal_kernel_buffer.h"
++
++/* Exprimental optimization. */
++#define REMOVE_DUPLICATED_COPY_FROM_USER 1
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Maps state locations within the context buffer. */
++typedef struct _gcsSTATE_MAP * gcsSTATE_MAP_PTR;
++typedef struct _gcsSTATE_MAP
++{
++ /* Index of the state in the context buffer. */
++ gctUINT index;
++
++ /* State mask. */
++ gctUINT32 mask;
++}
++gcsSTATE_MAP;
++
++/* Context buffer. */
++typedef struct _gcsCONTEXT * gcsCONTEXT_PTR;
++typedef struct _gcsCONTEXT
++{
++ /* For debugging: the number of context buffer in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT num;
++#endif
++
++ /* Pointer to gckEVENT object. */
++ gckEVENT eventObj;
++
++ /* Context busy signal. */
++ gctSIGNAL signal;
++
++ /* Physical address of the context buffer. */
++ gctPHYS_ADDR physical;
++
++ /* Logical address of the context buffer. */
++ gctUINT32_PTR logical;
++
++ /* Hardware address of the context buffer. */
++ gctUINT32 address;
++
++ /* Pointer to the LINK commands. */
++ gctPOINTER link2D;
++ gctPOINTER link3D;
++
++ /* The number of pending state deltas. */
++ gctUINT deltaCount;
++
++ /* Pointer to the first delta to be applied. */
++ gcsSTATE_DELTA_PTR delta;
++
++ /* Next context buffer. */
++ gcsCONTEXT_PTR next;
++}
++gcsCONTEXT;
++
++typedef struct _gcsRECORD_ARRAY_MAP * gcsRECORD_ARRAY_MAP_PTR;
++struct _gcsRECORD_ARRAY_MAP
++{
++ /* User pointer key. */
++ gctUINT64 key;
++
++ /* Kernel memory buffer. */
++ gcsSTATE_DELTA_RECORD_PTR kData;
++
++ /* Next map. */
++ gcsRECORD_ARRAY_MAP_PTR next;
++
++};
++
++/* gckCONTEXT structure that hold the current context. */
++struct _gckCONTEXT
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* Command buffer alignment. */
++ gctUINT32 alignment;
++ gctUINT32 reservedHead;
++ gctUINT32 reservedTail;
++
++ /* Context buffer metrics. */
++ gctSIZE_T stateCount;
++ gctUINT32 totalSize;
++ gctUINT32 bufferSize;
++ gctUINT32 linkIndex2D;
++ gctUINT32 linkIndex3D;
++ gctUINT32 linkIndexXD;
++ gctUINT32 entryOffset3D;
++ gctUINT32 entryOffsetXDFrom2D;
++ gctUINT32 entryOffsetXDFrom3D;
++
++ /* Dirty flags. */
++ gctBOOL dirty;
++ gctBOOL dirty2D;
++ gctBOOL dirty3D;
++ gcsCONTEXT_PTR dirtyBuffer;
++
++ /* State mapping. */
++ gcsSTATE_MAP_PTR map;
++
++ /* List of context buffers. */
++ gcsCONTEXT_PTR buffer;
++
++ /* A copy of the user record array. */
++ gctUINT recordArraySize;
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ gcsRECORD_ARRAY_MAP_PTR recordArrayMap;
++#else
++ gcsSTATE_DELTA_RECORD_PTR recordArray;
++#endif
++
++ /* Requested pipe select for context. */
++ gcePIPE_SELECT entryPipe;
++ gcePIPE_SELECT exitPipe;
++
++ /* Variables used for building state buffer. */
++ gctUINT32 lastAddress;
++ gctSIZE_T lastSize;
++ gctUINT32 lastIndex;
++ gctBOOL lastFixed;
++
++ gctUINT32 pipeSelectBytes;
++
++ /* Hint array. */
++#if gcdSECURE_USER
++ gctBOOL_PTR hint;
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ gcsPROFILER_COUNTERS latestProfiler;
++ gcsPROFILER_COUNTERS histroyProfiler;
++ gctUINT32 prevVSInstCount;
++ gctUINT32 prevVSBranchInstCount;
++ gctUINT32 prevVSTexInstCount;
++ gctUINT32 prevVSVertexCount;
++ gctUINT32 prevPSInstCount;
++ gctUINT32 prevPSBranchInstCount;
++ gctUINT32 prevPSTexInstCount;
++ gctUINT32 prevPSPixelCount;
++#endif
++};
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_context_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.c 2015-05-01 14:57:59.563427001 -0500
+@@ -0,0 +1,8036 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#if VIVANTE_PROFILER_CONTEXT
++#include "gc_hal_kernel_context.h"
++#endif
++
++#define gcdDISABLE_FE_L2 1
++
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++#define gcmSEMAPHORESTALL(buffer) \
++ do \
++ { \
++ /* Arm the PE-FE Semaphore. */ \
++ *buffer++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, 1) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, 0x0E02); \
++ \
++ *buffer++ \
++ = gcmSETFIELDVALUE(0, AQ_SEMAPHORE, SOURCE, FRONT_END) \
++ | gcmSETFIELDVALUE(0, AQ_SEMAPHORE, DESTINATION, PIXEL_ENGINE);\
++ \
++ /* STALL FE until PE is done flushing. */ \
++ *buffer++ \
++ = gcmSETFIELDVALUE(0, STALL_COMMAND, OPCODE, STALL); \
++ \
++ *buffer++ \
++ = gcmSETFIELDVALUE(0, STALL_STALL, SOURCE, FRONT_END) \
++ | gcmSETFIELDVALUE(0, STALL_STALL, DESTINATION, PIXEL_ENGINE); \
++ } while(0)
++
++typedef struct _gcsiDEBUG_REGISTERS * gcsiDEBUG_REGISTERS_PTR;
++typedef struct _gcsiDEBUG_REGISTERS
++{
++ gctSTRING module;
++ gctUINT index;
++ gctUINT shift;
++ gctUINT data;
++ gctUINT count;
++ gctUINT32 signature;
++}
++gcsiDEBUG_REGISTERS;
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gctBOOL
++_IsHardwareMatch(
++ IN gckHARDWARE Hardware,
++ IN gctINT32 ChipModel,
++ IN gctUINT32 ChipRevision
++ )
++{
++ return ((Hardware->identity.chipModel == ChipModel) &&
++ (Hardware->identity.chipRevision == ChipRevision));
++}
++
++static gceSTATUS
++_ResetGPU(
++ IN gckHARDWARE Hardware,
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++static gceSTATUS
++_IdentifyHardware(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ )
++{
++ gceSTATUS status;
++
++ gctUINT32 chipIdentity;
++
++ gctUINT32 streamCount = 0;
++ gctUINT32 registerMax = 0;
++ gctUINT32 threadCount = 0;
++ gctUINT32 shaderCoreCount = 0;
++ gctUINT32 vertexCacheSize = 0;
++ gctUINT32 vertexOutputBufferSize = 0;
++ gctUINT32 pixelPipes = 0;
++ gctUINT32 instructionCount = 0;
++ gctUINT32 numConstants = 0;
++ gctUINT32 bufferSize = 0;
++ gctUINT32 varyingsCount = 0;
++#if gcdMULTI_GPU
++ gctUINT32 gpuCoreCount = 0;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /***************************************************************************
++ ** Get chip ID and revision.
++ */
++
++ /* Read chip identity register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00018,
++ &chipIdentity));
++
++ /* Special case for older graphic cores. */
++ if (((((gctUINT32) (chipIdentity)) >> (0 ? 31:24) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))))
++ {
++ Identity->chipModel = gcv500;
++ Identity->chipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) );
++ }
++
++ else
++ {
++ /* Read chip identity register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00020,
++ (gctUINT32_PTR) &Identity->chipModel));
++
++ if (((Identity->chipModel & 0xFF00) == 0x0400)
++ && (Identity->chipModel != 0x0420)
++ && (Identity->chipModel != 0x0428))
++ {
++ Identity->chipModel = (gceCHIPMODEL) (Identity->chipModel & 0x0400);
++ }
++
++ /* Read CHIP_REV register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00024,
++ &Identity->chipRevision));
++
++ if ((Identity->chipModel == gcv300)
++ && (Identity->chipRevision == 0x2201)
++ )
++ {
++ gctUINT32 chipDate;
++ gctUINT32 chipTime;
++
++ /* Read date and time registers. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00028,
++ &chipDate));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0002C,
++ &chipTime));
++
++ if ((chipDate == 0x20080814) && (chipTime == 0x12051100))
++ {
++ /* This IP has an ECO; put the correct revision in it. */
++ Identity->chipRevision = 0x1051;
++ }
++ }
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x000A8,
++ &Identity->productID));
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipModel=%X",
++ Identity->chipModel);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipRevision=%X",
++ Identity->chipRevision);
++
++
++ /***************************************************************************
++ ** Get chip features.
++ */
++
++ /* Read chip feature register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0001C,
++ &Identity->chipFeatures));
++
++#if gcdENABLE_3D
++ /* Disable fast clear on GC700. */
++ if (Identity->chipModel == gcv700)
++ {
++ Identity->chipFeatures
++ = ((((gctUINT32) (Identity->chipFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++#endif
++
++ if (((Identity->chipModel == gcv500) && (Identity->chipRevision < 2))
++ || ((Identity->chipModel == gcv300) && (Identity->chipRevision < 0x2000))
++ )
++ {
++ /* GC500 rev 1.x and GC300 rev < 2.0 doesn't have these registers. */
++ Identity->chipMinorFeatures = 0;
++ Identity->chipMinorFeatures1 = 0;
++ Identity->chipMinorFeatures2 = 0;
++ Identity->chipMinorFeatures3 = 0;
++ Identity->chipMinorFeatures4 = 0;
++ Identity->chipMinorFeatures5 = 0;
++ }
++ else
++ {
++ /* Read chip minor feature register #0. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00034,
++ &Identity->chipMinorFeatures));
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))))
++ )
++ {
++ /* Read chip minor featuress register #1. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00074,
++ &Identity->chipMinorFeatures1));
++
++ /* Read chip minor featuress register #2. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00084,
++ &Identity->chipMinorFeatures2));
++
++ /*Identity->chipMinorFeatures2 &= ~(0x1 << 3);*/
++
++ /* Read chip minor featuress register #1. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00088,
++ &Identity->chipMinorFeatures3));
++
++
++ /* Read chip minor featuress register #4. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00094,
++ &Identity->chipMinorFeatures4));
++
++ /* Read chip minor featuress register #5. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x000A0,
++ &Identity->chipMinorFeatures5));
++ }
++ else
++ {
++ /* Chip doesn't has minor features register #1 or 2 or 3 or 4. */
++ Identity->chipMinorFeatures1 = 0;
++ Identity->chipMinorFeatures2 = 0;
++ Identity->chipMinorFeatures3 = 0;
++ Identity->chipMinorFeatures4 = 0;
++ Identity->chipMinorFeatures5 = 0;
++ }
++ }
++
++ /* Get the Supertile layout in the hardware. */
++ if (((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))))
++ || ((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))))
++ {
++ Identity->superTileMode = 2;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))))
++ {
++ Identity->superTileMode = 1;
++ }
++ else
++ {
++ Identity->superTileMode = 0;
++ }
++
++ /* Exception for GC1000, revision 5035 & GC800, revision 4612 */
++ if (((Identity->chipModel == gcv1000) && ((Identity->chipRevision == 0x5035)
++ || (Identity->chipRevision == 0x5036)
++ || (Identity->chipRevision == 0x5037)
++ || (Identity->chipRevision == 0x5039)
++ || (Identity->chipRevision >= 0x5040)))
++ || ((Identity->chipModel == gcv800) && (Identity->chipRevision == 0x4612))
++ || ((Identity->chipModel == gcv600) && (Identity->chipRevision >= 0x4650))
++ || ((Identity->chipModel == gcv860) && (Identity->chipRevision == 0x4647))
++ || ((Identity->chipModel == gcv400) && (Identity->chipRevision >= 0x4633)))
++ {
++ Identity->superTileMode = 1;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipFeatures=0x%08X",
++ Identity->chipFeatures);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures=0x%08X",
++ Identity->chipMinorFeatures);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures1=0x%08X",
++ Identity->chipMinorFeatures1);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures2=0x%08X",
++ Identity->chipMinorFeatures2);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures3=0x%08X",
++ Identity->chipMinorFeatures3);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures4=0x%08X",
++ Identity->chipMinorFeatures4);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures5=0x%08X",
++ Identity->chipMinorFeatures5);
++
++ /***************************************************************************
++ ** Get chip specs.
++ */
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ gctUINT32 specs, specs2, specs3, specs4;
++
++ /* Read gcChipSpecs register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00048,
++ &specs));
++
++ /* Extract the fields. */
++ registerMax = (((((gctUINT32) (specs)) >> (0 ? 7:4)) & ((gctUINT32) ((((1 ? 7:4) - (0 ? 7:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:4) - (0 ? 7:4) + 1)))))) );
++ threadCount = (((((gctUINT32) (specs)) >> (0 ? 11:8)) & ((gctUINT32) ((((1 ? 11:8) - (0 ? 11:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:8) - (0 ? 11:8) + 1)))))) );
++ shaderCoreCount = (((((gctUINT32) (specs)) >> (0 ? 24:20)) & ((gctUINT32) ((((1 ? 24:20) - (0 ? 24:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:20) - (0 ? 24:20) + 1)))))) );
++ vertexCacheSize = (((((gctUINT32) (specs)) >> (0 ? 16:12)) & ((gctUINT32) ((((1 ? 16:12) - (0 ? 16:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:12) - (0 ? 16:12) + 1)))))) );
++ vertexOutputBufferSize = (((((gctUINT32) (specs)) >> (0 ? 31:28)) & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1)))))) );
++ pixelPipes = (((((gctUINT32) (specs)) >> (0 ? 27:25)) & ((gctUINT32) ((((1 ? 27:25) - (0 ? 27:25) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:25) - (0 ? 27:25) + 1)))))) );
++
++ /* Read gcChipSpecs2 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00080,
++ &specs2));
++
++ instructionCount = (((((gctUINT32) (specs2)) >> (0 ? 15:8)) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1)))))) );
++ numConstants = (((((gctUINT32) (specs2)) >> (0 ? 31:16)) & ((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1)))))) );
++ bufferSize = (((((gctUINT32) (specs2)) >> (0 ? 7:0)) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1)))))) );
++
++ /* Read gcChipSpecs3 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0008C,
++ &specs3));
++
++ varyingsCount = (((((gctUINT32) (specs3)) >> (0 ? 8:4)) & ((gctUINT32) ((((1 ? 8:4) - (0 ? 8:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:4) - (0 ? 8:4) + 1)))))) );
++#if gcdMULTI_GPU
++ gpuCoreCount = (((((gctUINT32) (specs3)) >> (0 ? 2:0)) & ((gctUINT32) ((((1 ? 2:0) - (0 ? 2:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:0) - (0 ? 2:0) + 1)))))) );
++#endif
++
++ /* Read gcChipSpecs4 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0009C,
++ &specs4));
++
++
++ streamCount = (((((gctUINT32) (specs4)) >> (0 ? 16:12)) & ((gctUINT32) ((((1 ? 16:12) - (0 ? 16:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:12) - (0 ? 16:12) + 1)))))) );
++ if (streamCount == 0)
++ {
++ /* Extract stream count from older register. */
++ streamCount = (((((gctUINT32) (specs)) >> (0 ? 3:0)) & ((gctUINT32) ((((1 ? 3:0) - (0 ? 3:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:0) - (0 ? 3:0) + 1)))))) );
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipSpecs1=0x%08X",
++ specs);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipSpecs2=0x%08X",
++ specs2);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipSpecs3=0x%08X",
++ specs3);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipSpecs4=0x%08X",
++ specs4);
++ }
++
++ /* Get the number of pixel pipes. */
++ Identity->pixelPipes = gcmMAX(pixelPipes, 1);
++
++ /* Get the stream count. */
++ Identity->streamCount = (streamCount != 0)
++ ? streamCount
++ : (Identity->chipModel >= gcv1000) ? 4 : 1;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: streamCount=%u%s",
++ Identity->streamCount,
++ (streamCount == 0) ? " (default)" : "");
++
++ /* Get the vertex output buffer size. */
++ Identity->vertexOutputBufferSize = (vertexOutputBufferSize != 0)
++ ? 1 << vertexOutputBufferSize
++ : (Identity->chipModel == gcv400)
++ ? (Identity->chipRevision < 0x4000) ? 512
++ : (Identity->chipRevision < 0x4200) ? 256
++ : 128
++ : (Identity->chipModel == gcv530)
++ ? (Identity->chipRevision < 0x4200) ? 512
++ : 128
++ : 512;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: vertexOutputBufferSize=%u%s",
++ Identity->vertexOutputBufferSize,
++ (vertexOutputBufferSize == 0) ? " (default)" : "");
++
++ /* Get the maximum number of threads. */
++ Identity->threadCount = (threadCount != 0)
++ ? 1 << threadCount
++ : (Identity->chipModel == gcv400) ? 64
++ : (Identity->chipModel == gcv500) ? 128
++ : (Identity->chipModel == gcv530) ? 128
++ : 256;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: threadCount=%u%s",
++ Identity->threadCount,
++ (threadCount == 0) ? " (default)" : "");
++
++ /* Get the number of shader cores. */
++ Identity->shaderCoreCount = (shaderCoreCount != 0)
++ ? shaderCoreCount
++ : (Identity->chipModel >= gcv1000) ? 2
++ : 1;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: shaderCoreCount=%u%s",
++ Identity->shaderCoreCount,
++ (shaderCoreCount == 0) ? " (default)" : "");
++
++ /* Get the vertex cache size. */
++ Identity->vertexCacheSize = (vertexCacheSize != 0)
++ ? vertexCacheSize
++ : 8;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: vertexCacheSize=%u%s",
++ Identity->vertexCacheSize,
++ (vertexCacheSize == 0) ? " (default)" : "");
++
++ /* Get the maximum number of temporary registers. */
++ Identity->registerMax = (registerMax != 0)
++ /* Maximum of registerMax/4 registers are accessible to 1 shader */
++ ? 1 << registerMax
++ : (Identity->chipModel == gcv400) ? 32
++ : 64;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: registerMax=%u%s",
++ Identity->registerMax,
++ (registerMax == 0) ? " (default)" : "");
++
++ /* Get the instruction count. */
++ Identity->instructionCount = (instructionCount == 0) ? 256
++ : (instructionCount == 1) ? 1024
++ : (instructionCount == 2) ? 2048
++ : (instructionCount == 0xFF) ? 512
++ : 256;
++
++ if (Identity->instructionCount == 256)
++ {
++ if ((Identity->chipModel == gcv2000 && Identity->chipRevision == 0x5108)
++ || Identity->chipModel == gcv880)
++ {
++ Identity->instructionCount = 512;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))))
++ {
++ Identity->instructionCount = 512;
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: instructionCount=%u%s",
++ Identity->instructionCount,
++ (instructionCount == 0) ? " (default)" : "");
++
++ /* Get the number of constants. */
++ Identity->numConstants = (numConstants == 0) ? 168 : numConstants;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: numConstants=%u%s",
++ Identity->numConstants,
++ (numConstants == 0) ? " (default)" : "");
++
++ /* Get the buffer size. */
++ Identity->bufferSize = bufferSize;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: bufferSize=%u%s",
++ Identity->bufferSize,
++ (bufferSize == 0) ? " (default)" : "");
++
++
++ if (varyingsCount != 0)
++ {
++ Identity->varyingsCount = varyingsCount;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures1)) >> (0 ? 23:23) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))))
++ {
++ Identity->varyingsCount = 12;
++ }
++ else
++ {
++ Identity->varyingsCount = 8;
++ }
++
++ /* For some cores, it consumes two varying for position, so the max varying vectors should minus one. */
++ if ((Identity->chipModel == gcv5000 && Identity->chipRevision == 0x5434) ||
++ (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5222) ||
++ (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5208) ||
++ (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5245) ||
++ (Identity->chipModel == gcv3000 && Identity->chipRevision == 0x5435) ||
++ (Identity->chipModel == gcv2200 && Identity->chipRevision == 0x5244) ||
++ (Identity->chipModel == gcv1500 && Identity->chipRevision == 0x5246) ||
++ ((Identity->chipModel == gcv2100 || Identity->chipModel == gcv2000) && Identity->chipRevision == 0x5108) ||
++ (Identity->chipModel == gcv880 && (Identity->chipRevision == 0x5107 || Identity->chipRevision == 0x5106)))
++ {
++ Identity->varyingsCount -= 1;
++ }
++
++ Identity->chip2DControl = 0;
++ if (Identity->chipModel == gcv320)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x0002C,
++ &data));
++
++ if ((data != 33956864) &&
++ ((Identity->chipRevision == 0x5007) ||
++ (Identity->chipRevision == 0x5220)))
++ {
++ Identity->chip2DControl |= 0xFF &
++ (Identity->chipRevision == 0x5220 ? 8 :
++ (Identity->chipRevision == 0x5007 ? 12 : 0));
++ }
++
++ if (Identity->chipRevision == 0x5007)
++ {
++ /* Disable splitting rectangle. */
++ Identity->chip2DControl |= 0x100;
++
++ /* Enable 2D Flush. */
++ Identity->chip2DControl |= 0x200;
++ }
++ }
++
++#if gcdMULTI_GPU
++#if gcdMULTI_GPU > 1
++ Identity->gpuCoreCount = gpuCoreCount + 1;
++#else
++ Identity->gpuCoreCount = 1;
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#define gcdDEBUG_MODULE_CLOCK_GATING 0
++#define gcdDISABLE_MODULE_CLOCK_GATING 0
++#define gcdDISABLE_FE_CLOCK_GATING 0
++#define gcdDISABLE_PE_CLOCK_GATING 0
++#define gcdDISABLE_SH_CLOCK_GATING 0
++#define gcdDISABLE_PA_CLOCK_GATING 0
++#define gcdDISABLE_SE_CLOCK_GATING 0
++#define gcdDISABLE_RA_CLOCK_GATING 0
++#define gcdDISABLE_RA_EZ_CLOCK_GATING 0
++#define gcdDISABLE_RA_HZ_CLOCK_GATING 0
++#define gcdDISABLE_TX_CLOCK_GATING 0
++
++#if gcdDEBUG_MODULE_CLOCK_GATING
++gceSTATUS
++_ConfigureModuleLevelClockGating(
++ gckHARDWARE Hardware
++ )
++{
++ gctUINT32 data;
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++#if gcdDISABLE_FE_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++#endif
++
++#if gcdDISABLE_PE_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++#endif
++
++#if gcdDISABLE_SH_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++#endif
++
++#if gcdDISABLE_PA_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++#endif
++
++#if gcdDISABLE_SE_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++#endif
++
++#if gcdDISABLE_RA_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++#endif
++
++#if gcdDISABLE_TX_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)));
++#endif
++
++#if gcdDISABLE_RA_EZ_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++#endif
++
++#if gcdDISABLE_RA_HZ_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)));
++#endif
++
++ gcmkVERIFY_OK(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++
++#if gcdDISABLE_MODULE_CLOCK_GATING
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress +
++ 0x00100,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++
++ gcmkVERIFY_OK(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00100,
++ data));
++#endif
++
++ return gcvSTATUS_OK;
++}
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++void
++_PowerTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckHARDWARE hardware = (gckHARDWARE)Data;
++ gcmkVERIFY_OK(
++ gckHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT));
++}
++#endif
++
++static gceSTATUS
++_VerifyDMA(
++ IN gckOS Os,
++ IN gceCORE Core,
++ gctUINT32_PTR Address1,
++ gctUINT32_PTR Address2,
++ gctUINT32_PTR State1,
++ gctUINT32_PTR State2
++ )
++{
++ gceSTATUS status;
++ gctUINT32 i;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x660, State1));
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x664, Address1));
++
++ for (i = 0; i < 500; i += 1)
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x660, State2));
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x664, Address2));
++
++ if (*Address1 != *Address2)
++ {
++ break;
++ }
++
++ if (*State1 != *State2)
++ {
++ break;
++ }
++ }
++
++OnError:
++ return status;
++}
++
++static gceSTATUS
++_DumpDebugRegisters(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gcsiDEBUG_REGISTERS_PTR Descriptor
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctUINT32 select;
++ gctUINT32 data = 0;
++ gctUINT i;
++
++ gcmkHEADER_ARG("Os=0x%X Descriptor=0x%X", Os, Descriptor);
++
++ gcmkPRINT_N(4, " %s debug registers:\n", Descriptor->module);
++
++ for (i = 0; i < Descriptor->count; i += 1)
++ {
++ select = i << Descriptor->shift;
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, select));
++#if gcdFPGA_BUILD
++ gcmkONERROR(gckOS_Delay(Os, 1000));
++#endif
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &data));
++
++ gcmkPRINT_N(12, " [0x%02X] 0x%08X\n", i, data);
++ }
++
++ select = 0xF << Descriptor->shift;
++
++ for (i = 0; i < 500; i += 1)
++ {
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, select));
++#if gcdFPGA_BUILD
++ gcmkONERROR(gckOS_Delay(Os, 1000));
++#endif
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &data));
++
++ if (data == Descriptor->signature)
++ {
++ break;
++ }
++ }
++
++ if (i == 500)
++ {
++ gcmkPRINT_N(4, " failed to obtain the signature (read 0x%08X).\n", data);
++ }
++ else
++ {
++ gcmkPRINT_N(8, " signature = 0x%08X (%d read attempt(s))\n", data, i + 1);
++ }
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_IsGPUPresent(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcsHAL_QUERY_CHIP_IDENTITY identity;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ control));
++
++ /* Identify the hardware. */
++ gcmkONERROR(_IdentifyHardware(Hardware->os,
++ Hardware->core,
++ &identity));
++
++ /* Check if these are the same values as saved before. */
++ if ((Hardware->identity.chipModel != identity.chipModel)
++ || (Hardware->identity.chipRevision != identity.chipRevision)
++ || (Hardware->identity.chipFeatures != identity.chipFeatures)
++ || (Hardware->identity.chipMinorFeatures != identity.chipMinorFeatures)
++ || (Hardware->identity.chipMinorFeatures1 != identity.chipMinorFeatures1)
++ || (Hardware->identity.chipMinorFeatures2 != identity.chipMinorFeatures2)
++ )
++ {
++ gcmkPRINT("[galcore]: GPU is not present.");
++ gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++_FlushCache(
++ gckHARDWARE Hardware,
++ gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctUINT32 bytes, requested;
++ gctPOINTER buffer;
++
++ /* Get the size of the flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(Hardware,
++ gcvFLUSH_ALL,
++ gcvNULL,
++ &requested));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(Command,
++ requested,
++ &buffer,
++ &bytes));
++
++ /* Append a flush. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ Hardware, gcvFLUSH_ALL, buffer, &bytes
++ ));
++
++ /* Execute the command queue. */
++ gcmkONERROR(gckCOMMAND_Execute(Command, requested));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++gctBOOL
++_IsGPUIdle(
++ IN gctUINT32 Idle
++ )
++{
++ return (((((gctUINT32) (Idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 1:1)) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 3:3)) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 4:4)) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 6:6)) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 7:7)) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 2:2)) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) )
++ ;
++}
++
++/******************************************************************************\
++****************************** gckHARDWARE API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckHARDWARE_Construct
++**
++** Construct a new gckHARDWARE object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an initialized gckOS object.
++**
++** gceCORE Core
++** Specified core.
++**
++** OUTPUT:
++**
++** gckHARDWARE * Hardware
++** Pointer to a variable that will hold the pointer to the gckHARDWARE
++** object.
++*/
++gceSTATUS
++gckHARDWARE_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gckHARDWARE * Hardware
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware = gcvNULL;
++ gctUINT16 data = 0xff00;
++ gctPOINTER pointer = gcvNULL;
++#if gcdMULTI_GPU_AFFINITY
++ gctUINT32 control;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ /* Enable the GPU. */
++ gcmkONERROR(gckOS_SetGPUPower(Os, Core, gcvTRUE, gcvTRUE));
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ 0x00000900));
++
++ /* Allocate the gckHARDWARE object. */
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckHARDWARE),
++ &pointer));
++
++ hardware = (gckHARDWARE) pointer;
++
++ /* Initialize the gckHARDWARE object. */
++ hardware->object.type = gcvOBJ_HARDWARE;
++ hardware->os = Os;
++ hardware->core = Core;
++
++ /* Identify the hardware. */
++ gcmkONERROR(_IdentifyHardware(Os, Core, &hardware->identity));
++
++ /* Determine the hardware type */
++ switch (hardware->identity.chipModel)
++ {
++ case gcv350:
++ case gcv355:
++ hardware->type = gcvHARDWARE_VG;
++ break;
++
++ case gcv200:
++ case gcv300:
++ case gcv320:
++ case gcv328:
++ case gcv420:
++ case gcv428:
++ hardware->type = gcvHARDWARE_2D;
++ break;
++
++ default:
++#if gcdMULTI_GPU_AFFINITY
++ hardware->type = (Core == gcvCORE_MAJOR) ? gcvHARDWARE_3D : gcvHARDWARE_OCL;
++#else
++ hardware->type = gcvHARDWARE_3D;
++#endif
++
++ if(hardware->identity.chipModel == gcv880 && hardware->identity.chipRevision == 0x5107)
++ {
++ /*set outstanding limit*/
++ gctUINT32 axi_ot;
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x00010;
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00414, axi_ot));
++ }
++
++
++ if ((((((gctUINT32) (hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ))
++ {
++ hardware->type = (gceHARDWARE_TYPE) (hardware->type | gcvHARDWARE_2D);
++ }
++ }
++
++ hardware->powerBaseAddress
++ = ((hardware->identity.chipModel == gcv300)
++ && (hardware->identity.chipRevision < 0x2000))
++ ? 0x0100
++ : 0x0000;
++
++ /* _ResetGPU need powerBaseAddress. */
++ status = _ResetGPU(hardware, Os, Core);
++
++ if (status != gcvSTATUS_OK)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "_ResetGPU failed: status=%d\n", status);
++ }
++
++#if gcdMULTI_GPU
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0055C,
++#if gcdDISABLE_FE_L2
++ 0x00FFFFFF));
++#else
++ 0x00FFFF05));
++#endif
++
++#elif gcdMULTI_GPU_AFFINITY
++ control = ((((gctUINT32) (0x00FF0A05)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0055C,
++ control));
++#endif
++
++ hardware->powerMutex = gcvNULL;
++
++ hardware->mmuVersion
++ = (((((gctUINT32) (hardware->identity.chipMinorFeatures1)) >> (0 ? 28:28)) & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))) );
++
++ /* Determine whether bug fixes #1 are present. */
++ hardware->extraEventStates = ((((gctUINT32) (hardware->identity.chipMinorFeatures1)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))));
++
++ /* Check if big endian */
++ hardware->bigEndian = (*(gctUINT8 *)&data == 0xff);
++
++ /* Initialize the fast clear. */
++ gcmkONERROR(gckHARDWARE_SetFastClear(hardware, -1, -1));
++
++#if !gcdENABLE_128B_MERGE
++
++ if (((((gctUINT32) (hardware->identity.chipMinorFeatures2)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ /* 128B merge is turned on by default. Disable it. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00558, 0));
++ }
++
++#endif
++
++ /* Set power state to ON. */
++ hardware->chipPowerState = gcvPOWER_ON;
++ hardware->clockState = gcvTRUE;
++ hardware->powerState = gcvTRUE;
++ hardware->lastWaitLink = ~0U;
++ hardware->lastEnd = ~0U;
++ hardware->globalSemaphore = gcvNULL;
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ hardware->powerOnFscaleVal = 64;
++#endif
++
++ gcmkONERROR(gckOS_CreateMutex(Os, &hardware->powerMutex));
++ gcmkONERROR(gckOS_CreateSemaphore(Os, &hardware->globalSemaphore));
++ hardware->startIsr = gcvNULL;
++ hardware->stopIsr = gcvNULL;
++
++#if gcdPOWEROFF_TIMEOUT
++ hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT;
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(Os,
++ _PowerTimerFunction,
++ (gctPOINTER)hardware,
++ &hardware->powerOffTimer));
++#endif
++
++ gcmkONERROR(gckOS_AtomConstruct(Os, &hardware->pageTableDirty));
++ gcmkONERROR(gckOS_AtomConstruct(Os, &hardware->pendingEvent));
++
++#if gcdLINK_QUEUE_SIZE
++ hardware->linkQueue.front = 0;
++ hardware->linkQueue.rear = 0;
++ hardware->linkQueue.count = 0;
++#endif
++
++ /* Enable power management by default. */
++ hardware->powerManagement = gcvTRUE;
++
++ /* Disable profiler by default */
++ hardware->gpuProfiler = gcvFALSE;
++
++#if defined(LINUX) || defined(__QNXNTO__) || defined(UNDERCE)
++ if (hardware->mmuVersion)
++ {
++ hardware->endAfterFlushMmuCache = gcvTRUE;
++ }
++ else
++#endif
++ {
++ hardware->endAfterFlushMmuCache = gcvFALSE;
++ }
++
++ gcmkONERROR(gckOS_QueryOption(Os, "mmu", (gctUINT32_PTR)&hardware->enableMMU));
++
++ hardware->minFscaleValue = 1;
++
++ /* Return pointer to the gckHARDWARE object. */
++ *Hardware = hardware;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Hardware=0x%x", *Hardware);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (hardware != gcvNULL)
++ {
++ /* Turn off the power. */
++ gcmkVERIFY_OK(gckOS_SetGPUPower(Os, Core, gcvFALSE, gcvFALSE));
++
++ if (hardware->globalSemaphore != gcvNULL)
++ {
++ /* Destroy the global semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Os,
++ hardware->globalSemaphore));
++ }
++
++ if (hardware->powerMutex != gcvNULL)
++ {
++ /* Destroy the power mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, hardware->powerMutex));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ if (hardware->powerOffTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer));
++ }
++#endif
++
++ if (hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty));
++ }
++
++ if (hardware->pendingEvent != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pendingEvent));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, hardware));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Destroy
++**
++** Destroy an gckHARDWARE object.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Destroy(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Destroy the power semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Hardware->os,
++ Hardware->globalSemaphore));
++
++ /* Destroy the power mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Hardware->os, Hardware->powerMutex));
++
++#if gcdPOWEROFF_TIMEOUT
++ gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer));
++#endif
++
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty));
++
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pendingEvent));
++
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ Hardware->os,
++ Hardware->functionBytes,
++ Hardware->functionPhysical,
++ Hardware->functionLogical
++ ));
++
++ /* Mark the object as unknown. */
++ Hardware->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the object. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Hardware->os, Hardware));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_GetType
++**
++** Get the hardware type.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gceHARDWARE_TYPE * Type
++** Pointer to a variable that receives the type of hardware object.
++*/
++gceSTATUS
++gckHARDWARE_GetType(
++ IN gckHARDWARE Hardware,
++ OUT gceHARDWARE_TYPE * Type
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ gcmkVERIFY_ARGUMENT(Type != gcvNULL);
++
++ *Type = Hardware->type;
++
++ gcmkFOOTER_ARG("*Type=%d", *Type);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_InitializeHardware
++**
++** Initialize the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_InitializeHardware(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 baseAddress;
++ gctUINT32 chipRev;
++ gctUINT32 control;
++ gctUINT32 data;
++ gctUINT32 regPMC = 0;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Read the chip revision register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00024,
++ &chipRev));
++
++ if (chipRev != Hardware->identity.chipRevision)
++ {
++ /* Chip is not there! */
++ gcmkONERROR(gcvSTATUS_CONTEXT_LOSSED);
++ }
++
++ /* Disable isolate GPU bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)))));
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++
++ /* Enable debug register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++ /* Reset memory counters. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ ~0U));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 0));
++
++ /* Get the system's physical base address. */
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Program the base addesses. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0041C,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00418,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00428,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00420,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00424,
++ baseAddress));
++
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress +
++ 0x00100,
++ &data));
++
++ /* Enable clock gating. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ if ((Hardware->identity.chipRevision == 0x4301)
++ || (Hardware->identity.chipRevision == 0x4302)
++ )
++ {
++ /* Disable stall module level clock gating for 4.3.0.1 and 4.3.0.2
++ ** revisions. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ }
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00100,
++ data));
++
++#if gcdENABLE_3D
++ /* Disable PE clock gating on revs < 5.0 when HZ is present without a
++ ** bug fix. */
++ if ((Hardware->identity.chipRevision < 0x5000)
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HZ)
++ && ((((gctUINT32) (Hardware->identity.chipMinorFeatures1)) >> (0 ? 9:9) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))))
++ )
++ {
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ /* Disable PE clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++ }
++
++#endif
++ }
++
++ if (Hardware->identity.chipModel == gcv4000 &&
++ ((Hardware->identity.chipRevision == 0x5208) || (Hardware->identity.chipRevision == 0x5222)))
++ {
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23)))));
++ }
++
++ if (Hardware->identity.chipModel == gcv1000 &&
++ (Hardware->identity.chipRevision == 0x5039 ||
++ Hardware->identity.chipRevision == 0x5040))
++ {
++ gctUINT32 pulseEater;
++
++ pulseEater = ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (pulseEater)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)))));
++ }
++
++ if ((gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI2) == gcvSTATUS_FALSE)
++ || (Hardware->identity.chipRevision < 0x5422)
++ )
++ {
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15)));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv2000, 0x5108))
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00480,
++ &data));
++
++ /* Set FE bus to one, TX bus to zero */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00480,
++ data));
++ }
++
++ gcmkONERROR(
++ gckHARDWARE_SetMMU(Hardware,
++ Hardware->kernel->mmu->pageTableLogical));
++
++ if (Hardware->identity.chipModel >= gcv400
++ && Hardware->identity.chipModel != gcv420)
++ {
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ /* Disable PA clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ }
++
++ /* Limit 2D outstanding request. */
++ if (_IsHardwareMatch(Hardware, gcv880, 0x5107))
++ {
++ gctUINT32 axi_ot;
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x00010;
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00414, axi_ot));
++ }
++
++ if (Hardware->identity.chip2DControl & 0xFF)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (Hardware->identity.chip2DControl & 0xFF) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ data));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv1000, 0x5035))
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ &data));
++
++ /* Disable HZ-L2. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ data));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv4000, 0x5222))
++ {
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ /* Disable TX clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv880, 0x5106))
++ {
++ Hardware->kernel->timeOut = 140 * 1000;
++ }
++
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ /* Disable RA HZ clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)));
++
++ /* Disable RA EZ clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++
++ if (regPMC != 0)
++ {
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ regPMC));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv2000, 0x5108)
++ || _IsHardwareMatch(Hardware, gcv320, 0x5007)
++ || _IsHardwareMatch(Hardware, gcv880, 0x5106)
++ || _IsHardwareMatch(Hardware, gcv400, 0x4645)
++ )
++ {
++ /* Update GPU AXI cache atttribute. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00008,
++ 0x00002200));
++ }
++
++
++ if ((Hardware->identity.chipRevision > 0x5420)
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_3D))
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &data));
++
++ /* Disable internal DFS. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ data));
++ }
++
++#if gcdDEBUG_MODULE_CLOCK_GATING
++ _ConfigureModuleLevelClockGating(Hardware);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryMemory
++**
++** Query the amount of memory available on the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * InternalSize
++** Pointer to a variable that will hold the size of the internal video
++** memory in bytes. If 'InternalSize' is gcvNULL, no information of the
++** internal memory will be returned.
++**
++** gctUINT32 * InternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * InternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctSIZE_T * ExternalSize
++** Pointer to a variable that will hold the size of the external video
++** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the
++** external memory will be returned.
++**
++** gctUINT32 * ExternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * ExternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * HorizontalTileSize
++** Number of horizontal pixels per tile. If 'HorizontalTileSize' is
++** gcvNULL, no horizontal pixel per tile will be returned.
++**
++** gctUINT32 * VerticalTileSize
++** Number of vertical pixels per tile. If 'VerticalTileSize' is
++** gcvNULL, no vertical pixel per tile will be returned.
++*/
++gceSTATUS
++gckHARDWARE_QueryMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (InternalSize != gcvNULL)
++ {
++ /* No internal memory. */
++ *InternalSize = 0;
++ }
++
++ if (ExternalSize != gcvNULL)
++ {
++ /* No external memory. */
++ *ExternalSize = 0;
++ }
++
++ if (HorizontalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *HorizontalTileSize = 4;
++ }
++
++ if (VerticalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *VerticalTileSize = 4;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*InternalSize=%lu *InternalBaseAddress=0x%08x "
++ "*InternalAlignment=0x%08x *ExternalSize=%lu "
++ "*ExternalBaseAddress=0x%08x *ExtenalAlignment=0x%08x "
++ "*HorizontalTileSize=%u *VerticalTileSize=%u",
++ gcmOPT_VALUE(InternalSize),
++ gcmOPT_VALUE(InternalBaseAddress),
++ gcmOPT_VALUE(InternalAlignment),
++ gcmOPT_VALUE(ExternalSize),
++ gcmOPT_VALUE(ExternalBaseAddress),
++ gcmOPT_VALUE(ExternalAlignment),
++ gcmOPT_VALUE(HorizontalTileSize),
++ gcmOPT_VALUE(VerticalTileSize));
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryChipIdentity
++**
++** Query the identity of the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++** Pointer to the identity structure.
++**
++*/
++gceSTATUS
++gckHARDWARE_QueryChipIdentity(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ )
++{
++ gctUINT32 features;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Identity != gcvNULL);
++
++ /* Return chip model and revision. */
++ Identity->chipModel = Hardware->identity.chipModel;
++ Identity->chipRevision = Hardware->identity.chipRevision;
++
++ /* Return feature set. */
++ features = Hardware->identity.chipFeatures;
++
++ if ((((((gctUINT32) (features)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ /* Override fast clear by command line. */
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Hardware->allowFastClear) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ if ((((((gctUINT32) (features)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ))
++ {
++ /* Override compression by command line. */
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (Hardware->allowCompression) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++
++ /* Mark 2D pipe as available for GC500.0 through GC500.2 and GC300,
++ ** since they did not have this bit. */
++ if (((Hardware->identity.chipModel == gcv500) && (Hardware->identity.chipRevision <= 2))
++ || (Hardware->identity.chipModel == gcv300)
++ )
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ Identity->chipFeatures = features;
++
++ /* Return minor features. */
++ Identity->chipMinorFeatures = Hardware->identity.chipMinorFeatures;
++ Identity->chipMinorFeatures1 = Hardware->identity.chipMinorFeatures1;
++ Identity->chipMinorFeatures2 = Hardware->identity.chipMinorFeatures2;
++ Identity->chipMinorFeatures3 = Hardware->identity.chipMinorFeatures3;
++ Identity->chipMinorFeatures4 = Hardware->identity.chipMinorFeatures4;
++ Identity->chipMinorFeatures5 = Hardware->identity.chipMinorFeatures5;
++
++ /* Return chip specs. */
++ Identity->streamCount = Hardware->identity.streamCount;
++ Identity->registerMax = Hardware->identity.registerMax;
++ Identity->threadCount = Hardware->identity.threadCount;
++ Identity->shaderCoreCount = Hardware->identity.shaderCoreCount;
++ Identity->vertexCacheSize = Hardware->identity.vertexCacheSize;
++ Identity->vertexOutputBufferSize = Hardware->identity.vertexOutputBufferSize;
++ Identity->pixelPipes = Hardware->identity.pixelPipes;
++ Identity->instructionCount = Hardware->identity.instructionCount;
++ Identity->numConstants = Hardware->identity.numConstants;
++ Identity->bufferSize = Hardware->identity.bufferSize;
++ Identity->varyingsCount = Hardware->identity.varyingsCount;
++ Identity->superTileMode = Hardware->identity.superTileMode;
++#if gcdMULTI_GPU
++ Identity->gpuCoreCount = Hardware->identity.gpuCoreCount;
++#endif
++ Identity->chip2DControl = Hardware->identity.chip2DControl;
++
++ Identity->productID = Hardware->identity.productID;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SplitMemory
++**
++** Split a hardware specific memory address into a pool and offset.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT32 Address
++** Address in hardware specific format.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to a variable that will hold the pool type for the address.
++**
++** gctUINT32 * Offset
++** Pointer to a variable that will hold the offset for the address.
++*/
++gceSTATUS
++gckHARDWARE_SplitMemory(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Addres=0x%08x", Hardware, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Offset != gcvNULL);
++
++ if (Hardware->mmuVersion == 0)
++ {
++ /* Dispatch on memory type. */
++ switch ((((((gctUINT32) (Address)) >> (0 ? 31:31)) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) ))
++ {
++ case 0x0:
++ /* System memory. */
++ *Pool = gcvPOOL_SYSTEM;
++ break;
++
++ case 0x1:
++ /* Virtual memory. */
++ *Pool = gcvPOOL_VIRTUAL;
++ break;
++
++ default:
++ /* Invalid memory type. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Return offset of address. */
++ *Offset = (((((gctUINT32) (Address)) >> (0 ? 30:0)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1)))))) );
++ }
++ else
++ {
++ *Pool = gcvPOOL_SYSTEM;
++ *Offset = Address;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Pool=%d *Offset=0x%08x", *Pool, *Offset);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Execute
++**
++** Kickstart the hardware's command processor with an initialized command
++** buffer.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT32 Address
++** Hardware address of command buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes for the prefetch unit (until after the first LINK).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Execute(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Bytes=%lu",
++ Hardware, Address, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Enable all events. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00014, ~0U));
++
++ /* Write address register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00654, Address));
++
++ /* Build control register. */
++ control = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) ((Bytes + 7) >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ /* Set big endian */
++ if (Hardware->bigEndian)
++ {
++ control |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 21:20) - (0 ? 21:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? 21:20) - (0 ? 21:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20)));
++ }
++
++ /* Write control register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00658, control));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Started command buffer @ 0x%08x",
++ Address);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_WaitLink
++**
++** Append a WAIT/LINK command sequence at the specified location in the command
++** queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** WAIT/LINK command sequence at or gcvNULL just to query the size of the
++** WAIT/LINK command sequence.
++**
++** gctUINT32 Offset
++** Offset into command buffer required for alignment.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the WAIT/LINK command
++** sequence. If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** by the WAIT/LINK command sequence. If 'Bytes' is gcvNULL, nothing will
++** be returned.
++**
++** gctUINT32 * WaitOffset
++** Pointer to a variable that will receive the offset of the WAIT command
++** from the specified logcial pointer.
++** If 'WaitOffset' is gcvNULL nothing will be returned.
++**
++** gctSIZE_T * WaitSize
++** Pointer to a variable that will receive the number of bytes used by
++** the WAIT command. If 'LinkSize' is gcvNULL nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_WaitLink(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN OUT gctUINT32 * Bytes,
++ OUT gctUINT32 * WaitOffset,
++ OUT gctUINT32 * WaitSize
++ )
++{
++ static const gctUINT waitCount = 200;
++
++ gceSTATUS status;
++ gctUINT32 address;
++ gctUINT32_PTR logical;
++ gctUINT32 bytes;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x *Bytes=%lu",
++ Hardware, Logical, Offset, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical != gcvNULL) || (Bytes != gcvNULL));
++
++#if gcdMULTI_GPU && !gcdDISABLE_FE_L2
++ bytes = gcmALIGN(Offset + 40, 8) - Offset;
++#else
++ /* Compute number of bytes required. */
++ bytes = gcmALIGN(Offset + 16, 8) - Offset;
++#endif
++ /* Cast the input pointer. */
++ logical = (gctUINT32_PTR) Logical;
++
++ if (logical != gcvNULL)
++ {
++ /* Not enough space? */
++ if (*Bytes < bytes)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Convert logical into hardware specific address. */
++ gcmkONERROR(gckHARDWARE_ConvertLogical(Hardware, logical, gcvFALSE, &address));
++
++ /* Store the WAIT/LINK address. */
++ Hardware->lastWaitLink = address;
++
++ /* Append WAIT(count). */
++ logical[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (waitCount) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++#if gcdMULTI_GPU && !gcdDISABLE_FE_L2
++ logical[2] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | gcvCORE_3D_0_MASK;
++
++ logical[3] = 0;
++
++ /* LoadState(AQFlush, 1), flush. */
++ logical[4] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[5] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ logical[6] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | gcvCORE_3D_ALL_MASK;
++
++ logical[7] = 0;
++
++ /* Append LINK(2, address). */
++ logical[8] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[9] = address;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: WAIT %u", address, waitCount
++ );
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", address + 8, logical[3]);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: LINK 0x%08x, #%lu",
++ address + 16, address, bytes
++ );
++#else
++
++ /* Append LINK(2, address). */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3] = address;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: WAIT %u", address, waitCount
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: LINK 0x%08x, #%lu",
++ address + 8, address, bytes
++ );
++#endif
++ if (WaitOffset != gcvNULL)
++ {
++ /* Return the offset pointer to WAIT command. */
++ *WaitOffset = 0;
++ }
++
++ if (WaitSize != gcvNULL)
++ {
++ /* Return number of bytes used by the WAIT command. */
++#if gcdMULTI_GPU && !gcdDISABLE_FE_L2
++ *WaitSize = 32;
++#else
++ *WaitSize = 8;
++#endif
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the WAIT/LINK command
++ ** sequence. */
++ *Bytes = bytes;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *WaitOffset=0x%x *WaitSize=%lu",
++ gcmOPT_VALUE(Bytes), gcmOPT_VALUE(WaitOffset),
++ gcmOPT_VALUE(WaitSize));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_End
++**
++** Append an END command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** END command at or gcvNULL just to query the size of the END command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the END command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_End(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gctUINT32 address;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append END. */
++ logical[0] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: END", Logical);
++
++ /* Make sure the CPU writes out the data to memory. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, Logical));
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(Hardware, logical, gcvFALSE, &address));
++
++ Hardware->lastEnd = address;
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckHARDWARE_ChipEnable(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gceCORE_3D_MASK ChipEnable,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x ChipEnable=0x%x *Bytes=%lu",
++ Hardware, Logical, ChipEnable, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append CHIPENABLE. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ChipEnable;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: CHIPENABLE 0x%x", Logical, ChipEnable);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the CHIPENABLE command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_Nop
++**
++** Append a NOP command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** NOP command at or gcvNULL just to query the size of the NOP command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the NOP command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the NOP command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Nop(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append NOP. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: NOP", Logical);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the NOP command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Event
++**
++** Append an EVENT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the EVENT command at or gcvNULL just to query the size of the EVENT
++** command.
++**
++** gctUINT8 Event
++** Event ID to program.
++**
++** gceKERNEL_WHERE FromWhere
++** Location of the pipe to send the event.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the EVENT command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the EVENT command. If 'Bytes' is gcvNULL, nothing will be
++** returned.
++*/
++gceSTATUS
++gckHARDWARE_Event(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT8 Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gctUINT size;
++ gctUINT32 destination = 0;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Event=%u FromWhere=%d *Bytes=%lu",
++ Hardware, Logical, Event, FromWhere, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++ gcmkVERIFY_ARGUMENT(Event < 32);
++
++#if gcdMULTI_GPU
++ if (FromWhere == gcvKERNEL_COMMAND) FromWhere = gcvKERNEL_PIXEL;
++#endif
++
++ /* Determine the size of the command. */
++
++ size = (Hardware->extraEventStates && (FromWhere == gcvKERNEL_PIXEL))
++ ? gcmALIGN(8 + (1 + 5) * 4, 8) /* EVENT + 5 STATES */
++ : 8;
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < size)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ switch (FromWhere)
++ {
++ case gcvKERNEL_COMMAND:
++ /* From command processor. */
++ destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ break;
++
++ case gcvKERNEL_PIXEL:
++ /* From pixel engine. */
++ destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Append EVENT(Event, destiantion). */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1] = ((((gctUINT32) (destination)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (Event) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++
++ /* Make sure the event ID gets written out before GPU can access it. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical + 1));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ {
++ gctUINT32 phys;
++ gckOS_GetPhysicalAddress(Hardware->os, Logical, &phys);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: EVENT %d", phys, Event);
++ }
++#endif
++
++ /* Append the extra states. These are needed for the chips that do not
++ ** support back-to-back events due to the async interface. The extra
++ ** states add the necessary delay to ensure that event IDs do not
++ ** collide. */
++ if (size > 8)
++ {
++ logical[2] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0100) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++ logical[3] = 0;
++ logical[4] = 0;
++ logical[5] = 0;
++ logical[6] = 0;
++ logical[7] = 0;
++ }
++
++#if gcdINTERRUPT_STATISTIC
++ if (Event < gcmCOUNTOF(Hardware->kernel->eventObj->queues))
++ {
++ gckOS_AtomSetMask(Hardware->pendingEvent, 1 << Event);
++ }
++#endif
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT command. */
++ *Bytes = size;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_PipeSelect
++**
++** Append a PIPESELECT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the PIPESELECT command at or gcvNULL just to query the size of the
++** PIPESELECT command.
++**
++** gcePIPE_SELECT Pipe
++** Pipe value to select.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the PIPESELECT command.
++** If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the PIPESELECT command. If 'Bytes' is gcvNULL, nothing will be
++** returned.
++*/
++gceSTATUS
++gckHARDWARE_PipeSelect(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gcePIPE_SELECT Pipe,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Pipe=%d *Bytes=%lu",
++ Hardware, Logical, Pipe, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ /* Append a PipeSelect. */
++ if (Logical != gcvNULL)
++ {
++ gctUINT32 flush, stall;
++
++ if (*Bytes < 32)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ flush = (Pipe == gcvPIPE_2D)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++
++ stall = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LoadState(AQFlush, 1), flush. */
++ logical[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1]
++ = flush;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical, flush);
++
++ /* LoadState(AQSempahore, 1), stall. */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3]
++ = stall;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: SEMAPHORE 0x%x", logical + 2, stall);
++
++ /* Stall, stall. */
++ logical[4] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ logical[5] = stall;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: STALL 0x%x", logical + 4, stall);
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ logical[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[7] = (Pipe == gcvPIPE_2D)
++ ? 0x1
++ : 0x0;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: PIPE %d", logical + 6, Pipe);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the PIPESELECT command. */
++ *Bytes = 32;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Link
++**
++** Append a LINK command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the LINK command at or gcvNULL just to query the size of the LINK
++** command.
++**
++** gctUINT32 FetchAddress
++** Hardware address of destination of LINK.
++**
++** gctSIZE_T FetchSize
++** Number of bytes in destination of LINK.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the LINK command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the LINK command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Link(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT32 FetchSize,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gctUINT32 link;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x FetchAddress=0x%x FetchSize=%lu "
++ "*Bytes=%lu",
++ Hardware, Logical, FetchAddress, FetchSize,
++ gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ gcmkONERROR(
++ gckOS_WriteMemory(Hardware->os, logical + 1, FetchAddress));
++
++ /* Make sure the address got written before the LINK command. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical + 1));
++
++ /* Compute number of 64-byte aligned bytes to fetch. */
++ bytes = gcmALIGN(FetchAddress + FetchSize, 64) - FetchAddress;
++
++ /* Append LINK(bytes / 8), FetchAddress. */
++ link = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ gcmkONERROR(
++ gckOS_WriteMemory(Hardware->os, logical, link));
++
++ /* Memory barrier. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical));
++
++#if gcdLINK_QUEUE_SIZE && !gcdPROCESS_ADDRESS_SPACE
++ if ((Hardware->kernel->virtualCommandBuffer)
++ && (Hardware->kernel->stuckDump > 2)
++ )
++ {
++ gctBOOL in;
++
++ gcmkVERIFY_OK(gckCOMMAND_AddressInKernelCommandBuffer(
++ Hardware->kernel->command, FetchAddress, &in));
++
++ if (in == gcvFALSE)
++ {
++ /* Record user command buffer and context buffer link
++ ** information for stuck dump.
++ **/
++ gckLINKQUEUE_Enqueue(
++ &Hardware->linkQueue, FetchAddress, FetchAddress + (gctUINT)bytes);
++ }
++ }
++#endif
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the LINK command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_UpdateQueueTail
++**
++** Update the tail of the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the start of the command queue.
++**
++** gctUINT32 Offset
++** Offset into the command queue of the tail (last command).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_UpdateQueueTail(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x",
++ Hardware, Logical, Offset);
++
++ /* Verify the hardware. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Force a barrier. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, Logical));
++
++ /* Notify gckKERNEL object of change. */
++#if gcdMULTI_GPU
++ gcmkONERROR(
++ gckKERNEL_Notify(Hardware->kernel,
++ 0,
++ gcvNOTIFY_COMMAND_QUEUE,
++ gcvFALSE));
++#else
++ gcmkONERROR(
++ gckKERNEL_Notify(Hardware->kernel,
++ gcvNOTIFY_COMMAND_QUEUE,
++ gcvFALSE));
++#endif
++
++ if (status == gcvSTATUS_CHIP_NOT_READY)
++ {
++ gcmkONERROR(gcvSTATUS_DEVICE);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_ConvertLogical
++**
++** Convert a logical system address into a hardware specific address.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address to convert.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the memory in user space.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ConvertLogical(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gceSTATUS status;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x InUserSpace=%d",
++ Hardware, Logical, InUserSpace);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Convert logical address into a physical address. */
++ if (InUserSpace)
++ {
++ gcmkONERROR(gckOS_UserLogicalToPhysical(Hardware->os, Logical, &address));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, Logical, &address));
++ }
++
++ /* For old MMU, get GPU address according to baseAddress. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Subtract base address to get a GPU address. */
++ gcmkASSERT(address >= baseAddress);
++ address -= baseAddress;
++ }
++
++ /* Return hardware specific address. */
++ *Address = (Hardware->mmuVersion == 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (address) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)))
++ : address;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Interrupt
++**
++** Process an interrupt.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL InterruptValid
++** If gcvTRUE, this function will read the interrupt acknowledge
++** register, stores the data, and return whether or not the interrupt
++** is ours or not. If gcvFALSE, this functions will read the interrupt
++** acknowledge register and combine it with any stored value to handle
++** the event notifications.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Interrupt(
++ IN gckHARDWARE Hardware,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gctBOOL InterruptValid
++ )
++{
++ gckEVENT eventObj;
++ gctUINT32 data = 0;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x InterruptValid=%d", Hardware, InterruptValid);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Extract gckEVENT object. */
++ eventObj = Hardware->kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObj, gcvOBJ_EVENT);
++
++ if (InterruptValid)
++ {
++ /* Read AQIntrAcknowledge register. */
++#if gcdMULTI_GPU
++ if (Hardware->core == gcvCORE_MAJOR)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterByCoreId(Hardware->os,
++ Hardware->core,
++ CoreId,
++ 0x00010,
++ &data));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00010,
++ &data));
++ }
++#else
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00010,
++ &data));
++#endif
++
++ if (data == 0)
++ {
++ /* Not our interrupt. */
++ status = gcvSTATUS_NOT_OUR_INTERRUPT;
++ }
++ else
++ {
++
++#if gcdINTERRUPT_STATISTIC
++ gckOS_AtomClearMask(Hardware->pendingEvent, data);
++#endif
++
++ /* Inform gckEVENT of the interrupt. */
++ status = gckEVENT_Interrupt(eventObj,
++#if gcdMULTI_GPU
++ CoreId,
++#endif
++ data);
++ }
++ }
++ else
++ {
++ /* Handle events. */
++ status = gckEVENT_Notify(eventObj, 0);
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryCommandBuffer
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Alignment
++** Pointer to a variable receiving the alignment for each command.
++**
++** gctSIZE_T * ReservedHead
++** Pointer to a variable receiving the number of reserved bytes at the
++** head of each command buffer.
++**
++** gctSIZE_T * ReservedTail
++** Pointer to a variable receiving the number of bytes reserved at the
++** tail of each command buffer.
++*/
++gceSTATUS
++gckHARDWARE_QueryCommandBuffer(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Alignment,
++ OUT gctUINT32 * ReservedHead,
++ OUT gctUINT32 * ReservedTail
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Alignment != gcvNULL)
++ {
++ /* Align every 8 bytes. */
++ *Alignment = 8;
++ }
++
++ if (ReservedHead != gcvNULL)
++ {
++ /* Reserve space for SelectPipe(). */
++ *ReservedHead = 32;
++ }
++
++ if (ReservedTail != gcvNULL)
++ {
++ /* Reserve space for Link(). */
++ *ReservedTail = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Alignment=%lu *ReservedHead=%lu *ReservedTail=%lu",
++ gcmOPT_VALUE(Alignment), gcmOPT_VALUE(ReservedHead),
++ gcmOPT_VALUE(ReservedTail));
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QuerySystemMemory
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * SystemSize
++** Pointer to a variable that receives the maximum size of the system
++** memory.
++**
++** gctUINT32 * SystemBaseAddress
++** Poinetr to a variable that receives the base address for system
++** memory.
++*/
++gceSTATUS
++gckHARDWARE_QuerySystemMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (SystemSize != gcvNULL)
++ {
++ /* Maximum system memory can be 2GB. */
++ *SystemSize = 1U << 31;
++ }
++
++ if (SystemBaseAddress != gcvNULL)
++ {
++ /* Set system memory base address. */
++ *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*SystemSize=%lu *SystemBaseAddress=%lu",
++ gcmOPT_VALUE(SystemSize), gcmOPT_VALUE(SystemBaseAddress));
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_3D
++/*******************************************************************************
++**
++** gckHARDWARE_QueryShaderCaps
++**
++** Query the shader capabilities.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT * VertexUniforms
++** Pointer to a variable receiving the number of uniforms in the vertex
++** shader.
++**
++** gctUINT * FragmentUniforms
++** Pointer to a variable receiving the number of uniforms in the
++** fragment shader.
++**
++** gctBOOL * UnifiedUnforms
++** Pointer to a variable receiving whether the uniformas are unified.
++*/
++gceSTATUS
++gckHARDWARE_QueryShaderCaps(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctBOOL * UnifiedUnforms
++ )
++{
++ gctBOOL unifiedConst;
++ gctUINT32 vsConstMax;
++ gctUINT32 psConstMax;
++ gctUINT32 vsConstBase;
++ gctUINT32 psConstBase;
++ gctUINT32 ConstMax;
++
++ gcmkHEADER_ARG("Hardware=0x%x VertexUniforms=0x%x "
++ "FragmentUniforms=0x%x UnifiedUnforms=0x%x",
++ Hardware, VertexUniforms,
++ FragmentUniforms, UnifiedUnforms);
++
++ {if (Hardware->identity.numConstants > 256){ unifiedConst = gcvTRUE; vsConstBase = 0xC000; psConstBase = 0xC000; ConstMax = Hardware->identity.numConstants; vsConstMax = 256; psConstMax = ConstMax - vsConstMax;}else if (Hardware->identity.numConstants == 256){ if (Hardware->identity.chipModel == gcv2000 && Hardware->identity.chipRevision == 0x5118) { unifiedConst = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vsConstMax = 256; psConstMax = 64; ConstMax = 320; } else { unifiedConst = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vsConstMax = 256; psConstMax = 256; ConstMax = 512; }}else{ unifiedConst = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vsConstMax = 168; psConstMax = 64; ConstMax = 232;}};
++
++ if (VertexUniforms != gcvNULL)
++ {
++ /* Return the vs shader const count. */
++ *VertexUniforms = vsConstMax;
++ }
++
++ if (FragmentUniforms != gcvNULL)
++ {
++ /* Return the ps shader const count. */
++ *FragmentUniforms = psConstMax;
++ }
++
++ if (UnifiedUnforms != gcvNULL)
++ {
++ /* Return whether the uniformas are unified. */
++ *UnifiedUnforms = unifiedConst;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetMMU
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the page table.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_SetMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0;
++ gctUINT32 idle;
++ gctUINT32 timer = 0, delay = 1;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x", Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Convert the logical address into physical address. */
++ gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, Logical, &address));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Setting page table to 0x%08X",
++ address);
++
++ /* Write the AQMemoryFePageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00400,
++ address));
++
++ /* Write the AQMemoryRaPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00410,
++ address));
++
++ /* Write the AQMemoryTxPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00404,
++ address));
++
++
++ /* Write the AQMemoryPePageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00408,
++ address));
++
++ /* Write the AQMemoryPezPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0040C,
++ address));
++ }
++ else if (Hardware->enableMMU == gcvTRUE)
++ {
++ /* Execute prepared command sequence. */
++ gcmkONERROR(gckHARDWARE_Execute(
++ Hardware,
++ Hardware->functions[gcvHARDWARE_FUNCTION_MMU].address,
++ Hardware->functions[gcvHARDWARE_FUNCTION_MMU].bytes
++ ));
++
++ /* Wait until MMU configure finishes. */
++ do
++ {
++ gckOS_Delay(Hardware->os, delay);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(
++ Hardware->os,
++ Hardware->core,
++ 0x00004,
++ &idle));
++
++ timer += delay;
++ delay *= 2;
++
++#if gcdGPU_TIMEOUT
++ if (timer >= Hardware->kernel->timeOut)
++ {
++ /* Even if hardware is not reset correctly, let software
++ ** continue to avoid software stuck. Software will timeout again
++ ** and try to recover GPU in next timeout.
++ */
++ gcmkONERROR(gcvSTATUS_DEVICE);
++ }
++#endif
++ }
++ while (!(((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ));
++
++ /* Enable MMU. */
++ gcmkONERROR(gckOS_WriteRegisterEx(
++ Hardware->os,
++ Hardware->core,
++ 0x0018C,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (gcvTRUE) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_FlushMMU
++**
++** Flush the page table.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_FlushMMU(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command;
++ gctUINT32_PTR buffer;
++ gctUINT32 bufferSize;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 flushSize;
++ gctUINT32 count;
++ gctUINT32 physical;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Verify the gckCOMMAND object pointer. */
++ command = Hardware->kernel->command;
++
++ /* Flush the memory controller. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, 8, &pointer, &bufferSize
++ ));
++
++ buffer = (gctUINT32_PTR) pointer;
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ gcmkONERROR(gckCOMMAND_Execute(command, 8));
++ }
++ else
++ {
++ flushSize = 16 * 4;
++
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, flushSize, &pointer, &bufferSize
++ ));
++
++ buffer = (gctUINT32_PTR) pointer;
++
++ count = ((gctUINT)bufferSize - flushSize + 7) >> 3;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(command->os, buffer, &physical));
++
++ /* Flush cache. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Arm the PE-FE Semaphore. */
++ buffer[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[3]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ buffer[4]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ buffer[5]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ buffer[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[7]
++ = physical + 8 * gcmSIZEOF(gctUINT32);
++
++ /* Flush MMU cache. */
++ buffer[8]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[9]
++ = (((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) & ((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))));
++
++ /* Arm the PE-FE Semaphore. */
++ buffer[10]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[11]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ buffer[12]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ buffer[13]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ buffer[14]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[15]
++ = physical + flushSize;
++
++ gcmkONERROR(gckCOMMAND_Execute(command, flushSize));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetMMUStates(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gceSTATUS status;
++ gctUINT32 config, address;
++ gctUINT32_PTR buffer;
++ gctBOOL ace;
++ gctUINT32 reserveBytes = 16 + 4 * 4;
++
++ gctBOOL config2D;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Hardware->mmuVersion != 0);
++
++ ace = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_ACE);
++
++ if (ace)
++ {
++ reserveBytes += 8;
++ }
++
++ config2D = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_3D)
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_2D);
++
++ if (config2D)
++ {
++ reserveBytes +=
++ /* Pipe Select. */
++ 4 * 4
++ /* Configure MMU States. */
++ + 4 * 4
++ /* Semaphore stall */
++ + 4 * 8;
++ }
++
++ /* Convert logical address into physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, MtlbAddress, &config));
++
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, SafeAddress, &address));
++
++ if (address & 0x3F)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ switch (Mode)
++ {
++ case gcvMMU_MODE_1K:
++ if (config & 0x3FF)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ break;
++
++ case gcvMMU_MODE_4K:
++ if (config & 0xFFF)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (Logical != gcvNULL)
++ {
++ buffer = Logical;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = config;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = address;
++
++ if (ace)
++ {
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0068) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = 0;
++ }
++
++ do{*buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));} while(0);;
++
++ if (config2D)
++ {
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = 0x1;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = config;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = address;
++
++ do{*buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));} while(0);;
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = 0x0;
++
++ do{*buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));} while(0);;
++ }
++
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ *Bytes = reserveBytes;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER_NO();
++ return status;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++/*******************************************************************************
++**
++** gckHARDWARE_ConfigMMU
++**
++** Append a MMU Configuration command sequence at the specified location in the command
++** queue. That command sequence consists of mmu configuration, LINK and WAIT/LINK.
++** LINK is fetched and paresed with new mmu configuration.
++**
++** If MMU Configuration is not changed between commit, change last WAIT/LINK to
++** link to ENTRY.
++**
++** -+-----------+-----------+-----------------------------------------
++** | WAIT/LINK | WAIT/LINK |
++** -+-----------+-----------+-----------------------------------------
++** | /|\
++** \|/ |
++** +--------------------+
++** | ENTRY | ... | LINK |
++** +--------------------+
++**
++** If MMU Configuration is changed between commit, change last WAIT/LINK to
++** link to MMU CONFIGURATION command sequence, and there are an EVNET and
++** an END at the end of this command sequence, when interrupt handler
++** receives this event, it will start FE at ENTRY to continue the command
++** buffer execution.
++**
++** -+-----------+-------------------+---------+---------+-----------+--
++** | WAIT/LINK | MMU CONFIGURATION | EVENT | END | WAIT/LINK |
++** -+-----------+-------------------+---------+---------+-----------+--
++** | /|\ /|\
++** +-------------+ |
++** +--------------------+
++** | ENTRY | ... | LINK |
++** +--------------------+
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** command sequence at or gcvNULL just to query the size of the
++** command sequence.
++**
++** gctPOINTER MtlbLogical
++** Pointer to the current Master TLB.
++**
++** gctUINT32 Offset
++** Offset into command buffer required for alignment.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the command
++** sequence. If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** by the command sequence. If 'Bytes' is gcvNULL, nothing will
++** be returned.
++**
++** gctUINT32 * WaitLinkOffset
++** Pointer to a variable that will receive the offset of the WAIT/LINK command
++** from the specified logcial pointer.
++** If 'WaitLinkOffset' is gcvNULL nothing will be returned.
++**
++** gctSIZE_T * WaitLinkBytes
++** Pointer to a variable that will receive the number of bytes used by
++** the WAIT command.
++** If 'WaitLinkBytes' is gcvNULL nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_ConfigMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctPOINTER MtlbLogical,
++ IN gctUINT32 Offset,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctSIZE_T * WaitLinkOffset,
++ OUT gctSIZE_T * WaitLinkBytes
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes, bytesAligned;
++ gctUINT32 config;
++ gctUINT32_PTR buffer = (gctUINT32_PTR) Logical;
++ gctUINT32 physical;
++ gctUINT32 event;
++
++ gcmkHEADER_ARG("Hardware=0x%08X Logical=0x%08x MtlbLogical=0x%08X",
++ Hardware, Logical, MtlbLogical);
++
++ bytes
++ /* Flush cache states. */
++ = 18 * 4
++ /* MMU configuration states. */
++ + 6 * 4
++ /* EVENT. */
++ + 2 * 4
++ /* END. */
++ + 2 * 4
++ /* WAIT/LINK. */
++ + 4 * 4;
++
++ /* Compute number of bytes required. */
++ bytesAligned = gcmALIGN(Offset + bytes, 8) - Offset;
++
++ if (buffer != gcvNULL)
++ {
++ if (MtlbLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Get physical address of this command buffer segment. */
++ gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, buffer, &physical));
++
++ /* Get physical address of Master TLB. */
++ gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, MtlbLogical, &config));
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ /* Flush cache. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Flush tile status cache. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ /* Arm the PE-FE Semaphore. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = physical + 10 * gcmSIZEOF(gctUINT32);
++
++ /* Configure MMU. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++
++ = (((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) & ((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))));
++
++ /* Arm the PE-FE Semaphore. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = physical + 18 * 4;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++
++ = config;
++
++ /* Arm the PE-FE Semaphore. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Event 29. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ event = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ event = ((((gctUINT32) (event)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (29) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++
++ *buffer++
++ = event;
++
++ /* Append END. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ *Bytes = bytesAligned;
++ }
++
++ if (WaitLinkOffset != gcvNULL)
++ {
++ *WaitLinkOffset = bytes - 4 * 4;
++ }
++
++ if (WaitLinkBytes != gcvNULL)
++ {
++#if gcdMULTI_GPU
++ *WaitLinkBytes = 40;
++#else
++ *WaitLinkBytes = 4 * 4;
++#endif
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_BuildVirtualAddress
++**
++** Build a virtual address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctUINT32 Index
++** Index into page table.
++**
++** gctUINT32 Offset
++** Offset into page.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable receiving te hardware address.
++*/
++gceSTATUS
++gckHARDWARE_BuildVirtualAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Index=%u Offset=%u", Hardware, Index, Offset);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Build virtual address. */
++ *Address = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (Offset | (Index << 12)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_GetIdle(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Wait,
++ OUT gctUINT32 * Data
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle = 0;
++ gctINT retry, poll, pollCount;
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Hardware=0x%x Wait=%d", Hardware, Wait);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++
++ /* If we have to wait, try 100 polls per millisecond. */
++ pollCount = Wait ? 100 : 1;
++
++ /* At most, try for 1 second. */
++ for (retry = 0; retry < 1000; ++retry)
++ {
++ /* If we have to wait, try 100 polls per millisecond. */
++ for (poll = pollCount; poll > 0; --poll)
++ {
++ /* Read register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle));
++
++ /* Read the current FE address. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00664,
++ &address));
++
++
++ /* See if we have to wait for FE idle. */
++ if (_IsGPUIdle(idle)
++ && (address == Hardware->lastEnd + 8)
++ )
++ {
++ /* FE is idle. */
++ break;
++ }
++ }
++
++ /* Check if we need to wait for FE and FE is busy. */
++ if (Wait && !_IsGPUIdle(idle))
++ {
++ /* Wait a little. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "%s: Waiting for idle: 0x%08X",
++ __FUNCTION__, idle);
++
++ gcmkVERIFY_OK(gckOS_Delay(Hardware->os, 1));
++ }
++ else
++ {
++ break;
++ }
++ }
++
++ /* Return idle to caller. */
++ *Data = idle;
++
++#if defined(EMULATOR)
++ /* Wait a little while until CModel FE gets END.
++ * END is supposed to be appended by caller.
++ */
++ gckOS_Delay(gcvNULL, 100);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/* Flush the caches. */
++gceSTATUS
++gckHARDWARE_Flush(
++ IN gckHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gctUINT32 pipe;
++ gctUINT32 flush = 0;
++ gctBOOL flushTileStatus;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++ gctUINT32 reserveBytes
++ /* Semaphore/Stall */
++ = 4 * gcmSIZEOF(gctUINT32);
++
++ gcmkHEADER_ARG("Hardware=0x%x Flush=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Flush, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get current pipe. */
++ pipe = Hardware->kernel->command->pipeSelect;
++
++ /* Flush tile status cache. */
++ flushTileStatus = Flush & gcvFLUSH_TILE_STATUS;
++
++ /* Flush 3D color cache. */
++ if ((Flush & gcvFLUSH_COLOR) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ }
++
++ /* Flush 3D depth cache. */
++ if ((Flush & gcvFLUSH_DEPTH) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ /* Flush 3D texture cache. */
++ if ((Flush & gcvFLUSH_TEXTURE) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++ }
++
++ /* Flush 2D cache. */
++ if ((Flush & gcvFLUSH_2D) && (pipe == 0x1))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++ }
++
++#if gcdMULTI_GPU
++ /* Flush L2 cache. */
++ if ((Flush & gcvFLUSH_L2) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++#endif
++
++ /* Determine reserve bytes. */
++ if (flush)
++ {
++ reserveBytes += 2 * gcmSIZEOF(gctUINT32);
++ }
++
++ if (flushTileStatus)
++ {
++ reserveBytes += 2 * gcmSIZEOF(gctUINT32);
++ }
++
++ /* See if there is a valid flush. */
++ if ((flush == 0) && (flushTileStatus == gcvFALSE))
++ {
++ if (Bytes != gcvNULL)
++ {
++ /* No bytes required. */
++ *Bytes = 0;
++ }
++ }
++
++ else
++ {
++ /* Copy to command queue. */
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < reserveBytes)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ if (flush)
++ {
++ /* Append LOAD_STATE to AQFlush. */
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *logical++
++ = flush;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical - 1, flush);
++ }
++
++ if (flushTileStatus)
++ {
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH TILE STATUS 0x%x", logical - 1, logical[-1]);
++ }
++
++ /* Semaphore. */
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall. */
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x05 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* bytes required. */
++ *Bytes = reserveBytes;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetFastClear(
++ IN gckHARDWARE Hardware,
++ IN gctINT Enable,
++ IN gctINT Compression
++ )
++{
++#if gcdENABLE_3D
++ gctUINT32 debug;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Enable=%d Compression=%d",
++ Hardware, Enable, Compression);
++
++ /* Only process if fast clear is available. */
++ if ((((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ if (Enable == -1)
++ {
++ /* Determine automatic value for fast clear. */
++ Enable = ((Hardware->identity.chipModel != gcv500)
++ || (Hardware->identity.chipRevision >= 3)
++ ) ? 1 : 0;
++ }
++
++ if (Compression == -1)
++ {
++ /* Determine automatic value for compression. */
++ Compression = Enable
++ & (((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) );
++ }
++
++ /* Read AQMemoryDebug register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &debug));
++
++ /* Set fast clear bypass. */
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++
++ if (
++ ((((gctUINT32) (Hardware->identity.chipMinorFeatures2)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) ||
++ (Hardware->identity.chipModel >= gcv4000))
++ {
++ /* Set compression bypass. */
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21))) | (((gctUINT32) ((gctUINT32) (Compression == 0) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21)));
++ }
++
++ /* Write back AQMemoryDebug register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ debug));
++
++ /* Store fast clear and comprersison flags. */
++ Hardware->allowFastClear = Enable;
++ Hardware->allowCompression = Compression;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "FastClear=%d Compression=%d", Enable, Compression);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
++typedef enum
++{
++ gcvPOWER_FLAG_INITIALIZE = 1 << 0,
++ gcvPOWER_FLAG_STALL = 1 << 1,
++ gcvPOWER_FLAG_STOP = 1 << 2,
++ gcvPOWER_FLAG_START = 1 << 3,
++ gcvPOWER_FLAG_RELEASE = 1 << 4,
++ gcvPOWER_FLAG_DELAY = 1 << 5,
++ gcvPOWER_FLAG_SAVE = 1 << 6,
++ gcvPOWER_FLAG_ACQUIRE = 1 << 7,
++ gcvPOWER_FLAG_POWER_OFF = 1 << 8,
++ gcvPOWER_FLAG_CLOCK_OFF = 1 << 9,
++ gcvPOWER_FLAG_CLOCK_ON = 1 << 10,
++}
++gcePOWER_FLAGS;
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++static gctCONST_STRING
++_PowerEnum(gceCHIPPOWERSTATE State)
++{
++ const gctCONST_STRING states[] =
++ {
++ gcmSTRING(gcvPOWER_ON),
++ gcmSTRING(gcvPOWER_OFF),
++ gcmSTRING(gcvPOWER_IDLE),
++ gcmSTRING(gcvPOWER_SUSPEND),
++ gcmSTRING(gcvPOWER_SUSPEND_ATPOWERON),
++ gcmSTRING(gcvPOWER_OFF_ATPOWERON),
++ gcmSTRING(gcvPOWER_IDLE_BROADCAST),
++ gcmSTRING(gcvPOWER_SUSPEND_BROADCAST),
++ gcmSTRING(gcvPOWER_OFF_BROADCAST),
++ gcmSTRING(gcvPOWER_OFF_RECOVERY),
++ gcmSTRING(gcvPOWER_OFF_TIMEOUT),
++ gcmSTRING(gcvPOWER_ON_AUTO)
++ };
++
++ if ((State >= gcvPOWER_ON) && (State <= gcvPOWER_ON_AUTO))
++ {
++ return states[State - gcvPOWER_ON];
++ }
++
++ return "unknown";
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementState
++**
++** Set GPU to a specified power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE State
++** Power State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagementState(
++ IN gckHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command = gcvNULL;
++ gckOS os;
++ gctUINT flag, clock;
++ gctPOINTER buffer;
++ gctUINT32 bytes, requested;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++ gctBOOL stall = gcvTRUE;
++ gctBOOL broadcast = gcvFALSE;
++#if gcdPOWEROFF_TIMEOUT
++ gctBOOL timeout = gcvFALSE;
++ gctBOOL isAfter = gcvFALSE;
++ gctUINT32 currentTime;
++#endif
++ gctUINT32 process, thread;
++ gctBOOL commitEntered = gcvFALSE;
++ gctBOOL commandStarted = gcvFALSE;
++ gctBOOL isrStarted = gcvFALSE;
++
++#if gcdENABLE_PROFILING
++ gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime,
++ initTime, offTime, startTime, totalTime;
++#endif
++ gctBOOL global = gcvFALSE;
++ gctBOOL globalAcquired = gcvFALSE;
++ gctBOOL configMmu = gcvFALSE;
++
++ /* State transition flags. */
++ static const gctUINT flags[4][4] =
++ {
++ /* gcvPOWER_ON */
++ { /* ON */ 0,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_OFF */
++ { /* ON */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* OFF */ 0,
++ /* IDLE */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY,
++ /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_IDLE */
++ { /* ON */ gcvPOWER_FLAG_RELEASE,
++ /* OFF */ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ 0,
++ /* SUSPEND */ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_SUSPEND */
++ { /* ON */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* OFF */ gcvPOWER_FLAG_SAVE |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* SUSPEND */ 0,
++ },
++ };
++
++ /* Clocks. */
++ static const gctUINT clocks[4] =
++ {
++ /* gcvPOWER_ON */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (64) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_OFF */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_IDLE */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_SUSPEND */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++ };
++
++ gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State);
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Switching to power state %d(%s)",
++ State, _PowerEnum(State));
++#endif
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get the gckOS object pointer. */
++ os = Hardware->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Get the gckCOMMAND object pointer. */
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ /* Start profiler. */
++ gcmkPROFILE_INIT(freq, time);
++
++ /* Convert the broadcast power state. */
++ switch (State)
++ {
++ case gcvPOWER_SUSPEND_ATPOWERON:
++ /* Convert to SUSPEND and don't wait for STALL. */
++ State = gcvPOWER_SUSPEND;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_OFF_ATPOWERON:
++ /* Convert to OFF and don't wait for STALL. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_IDLE_BROADCAST:
++ /* Convert to IDLE and note we are inside broadcast. */
++ State = gcvPOWER_IDLE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_SUSPEND_BROADCAST:
++ /* Convert to SUSPEND and note we are inside broadcast. */
++ State = gcvPOWER_SUSPEND;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_BROADCAST:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_RECOVERY:
++ /* Convert to OFF and note we are inside recovery. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_ON_AUTO:
++ /* Convert to ON and note we are inside recovery. */
++ State = gcvPOWER_ON;
++ break;
++
++ case gcvPOWER_ON:
++ case gcvPOWER_IDLE:
++ case gcvPOWER_SUSPEND:
++ case gcvPOWER_OFF:
++ /* Mark as global power management. */
++ global = gcvTRUE;
++ break;
++
++#if gcdPOWEROFF_TIMEOUT
++ case gcvPOWER_OFF_TIMEOUT:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ /* Check time out */
++ timeout = gcvTRUE;
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++ if (Hardware->powerManagement == gcvFALSE
++ && State != gcvPOWER_ON
++ )
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Get current process and thread IDs. */
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ if (broadcast)
++ {
++ /* Try to acquire the power mutex. */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Check if we already own this mutex. */
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread)
++ )
++ {
++ /* Bail out on recursive power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ else if (State != gcvPOWER_ON)
++ {
++ /* Called from IST,
++ ** so waiting here will cause deadlock,
++ ** if lock holder call gckCOMMAND_Stall() */
++ status = gcvSTATUS_INVALID_REQUEST;
++ goto OnError;
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ }
++ }
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE));
++ }
++
++ /* Get time until mtuex acquired. */
++ gcmkPROFILE_QUERY(time, mutexTime);
++
++ Hardware->powerProcess = process;
++ Hardware->powerThread = thread;
++ mutexAcquired = gcvTRUE;
++
++ /* Grab control flags and clock. */
++ flag = flags[Hardware->chipPowerState][State];
++ clock = clocks[State];
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ if (State == gcvPOWER_ON)
++ {
++ clock = ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (Hardware->powerOnFscaleVal) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2)));
++ }
++#endif
++
++ if (State == gcvPOWER_SUSPEND && Hardware->chipPowerState == gcvPOWER_OFF && broadcast)
++ {
++#if gcdPOWER_SUSPEND_WHEN_IDLE
++ /* Do nothing */
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++#else
++ /* Clock should be on when switch power from off to suspend */
++ clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) ;
++#endif
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ if (timeout)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ gcmkONERROR(
++ gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter));
++
++ /* powerOffTime is pushed forward, give up.*/
++ if (isAfter
++ /* Expect a transition start from IDLE or SUSPEND. */
++ || (Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_OFF)
++ )
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Power Off GPU[%d] at %u [supposed to be at %u]",
++ Hardware->core, currentTime, Hardware->powerOffTime);
++ }
++#endif
++
++ if (flag == 0)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* If this is an internal power management, we have to check if we can grab
++ ** the global power semaphore. If we cannot, we have to wait until the
++ ** external world changes power management. */
++ if (!global)
++ {
++ /* Try to acquire the global semaphore. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ /* Called from thread routine which should NEVER sleep.*/
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++
++ /* Release the power mutex. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Releasing the power mutex.");
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ /* Wait for the semaphore. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Waiting for global semaphore.");
++ gcmkONERROR(gckOS_AcquireSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvTRUE;
++
++ /* Acquire the power mutex. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Reacquiring the power mutex.");
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ mutexAcquired = gcvTRUE;
++
++ /* chipPowerState may be changed by external world during the time
++ ** we give up powerMutex, so updating flag now is necessary. */
++ flag = flags[Hardware->chipPowerState][State];
++
++ if (flag == 0)
++ {
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ /* Error. */
++ gcmkONERROR(status);
++ }
++
++ /* Release the global semaphore again. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++ }
++ else
++ {
++ if (State == gcvPOWER_OFF || State == gcvPOWER_SUSPEND || State == gcvPOWER_IDLE)
++ {
++ /* Acquire the global semaphore if it has not been acquired. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status == gcvSTATUS_OK)
++ {
++ globalAcquired = gcvTRUE;
++ }
++ else if (status != gcvSTATUS_TIMEOUT)
++ {
++ /* Other errors. */
++ gcmkONERROR(status);
++ }
++ /* Ignore gcvSTATUS_TIMEOUT and leave globalAcquired as gcvFALSE.
++ ** gcvSTATUS_TIMEOUT means global semaphore has already
++ ** been acquired before this operation, so even if we fail,
++ ** we should not release it in our error handling. It should be
++ ** released by the next successful global gcvPOWER_ON. */
++ }
++
++ /* Global power management can't be aborted, so sync with
++ ** proceeding last commit. */
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++
++ /* avoid acquiring again. */
++ flag &= ~gcvPOWER_FLAG_ACQUIRE;
++ }
++ }
++
++ if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON))
++ {
++ /* Turn on the power. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE));
++
++ /* Mark clock and power as enabled. */
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++
++ for (;;)
++ {
++ /* Check if GPU is present and awake. */
++ status = _IsGPUPresent(Hardware);
++
++ /* Check if the GPU is not responding. */
++ if (status == gcvSTATUS_GPU_NOT_RESPONDING)
++ {
++ /* Turn off the power and clock. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvFALSE, gcvFALSE));
++
++ Hardware->clockState = gcvFALSE;
++ Hardware->powerState = gcvFALSE;
++
++ /* Wait a little. */
++ gckOS_Delay(os, 1);
++
++ /* Turn on the power and clock. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE));
++
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++
++ /* We need to initialize the hardware and start the command
++ * processor. */
++ flag |= gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_START;
++ }
++ else
++ {
++ /* Test for error. */
++ gcmkONERROR(status);
++
++ /* Break out of loop. */
++ break;
++ }
++ }
++ }
++
++ /* Get time until powered on. */
++ gcmkPROFILE_QUERY(time, onTime);
++
++ if ((flag & gcvPOWER_FLAG_STALL) && stall)
++ {
++ gctBOOL idle;
++ gctINT32 atomValue;
++
++ /* For global operation, all pending commits have already been
++ ** blocked by globalSemaphore or powerSemaphore.*/
++ if (!global)
++ {
++ /* Check commit atom. */
++ gcmkONERROR(gckOS_AtomGet(os, command->atomCommit, &atomValue));
++
++ if (atomValue > 0)
++ {
++ /* Commits are pending - abort power management. */
++ status = broadcast ? gcvSTATUS_CHIP_NOT_READY
++ : gcvSTATUS_MORE_DATA;
++ goto OnError;
++ }
++ }
++
++ if (broadcast)
++ {
++ /* Check for idle. */
++ gcmkONERROR(gckHARDWARE_QueryIdle(Hardware, &idle));
++
++ if (!idle)
++ {
++ status = gcvSTATUS_CHIP_NOT_READY;
++ goto OnError;
++ }
++ }
++
++ else
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvTRUE));
++ commitEntered = gcvTRUE;
++
++ /* Get the size of the flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(Hardware,
++ gcvFLUSH_ALL,
++ gcvNULL,
++ &requested));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(command,
++ requested,
++ &buffer,
++ &bytes));
++
++ /* Append a flush. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ Hardware, gcvFLUSH_ALL, buffer, &bytes
++ ));
++
++ /* Execute the command queue. */
++ gcmkONERROR(gckCOMMAND_Execute(command, requested));
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvTRUE));
++ commitEntered = gcvFALSE;
++
++ /* Wait to finish all commands. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckCOMMAND_Stall(command, gcvTRUE, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkONERROR(gckCOMMAND_Stall(command, gcvTRUE));
++#endif
++ }
++ }
++
++ /* Get time until stalled. */
++ gcmkPROFILE_QUERY(time, stallTime);
++
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++ }
++
++ if (flag & gcvPOWER_FLAG_STOP)
++ {
++ /* Stop the command parser. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvFALSE));
++
++ /* Stop the Isr. */
++ if (Hardware->stopIsr)
++ {
++ gcmkONERROR(Hardware->stopIsr(Hardware->isrContext));
++ }
++ }
++
++ /* Flush Cache before Power Off. */
++ if (flag & gcvPOWER_FLAG_POWER_OFF)
++ {
++ if (Hardware->clockState == gcvFALSE)
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ Hardware->core,
++ gcvTRUE,
++ gcvTRUE));
++
++ Hardware->clockState = gcvTRUE;
++
++ if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE)
++ {
++ /* Write the clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ clocks[0]));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clocks[0])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++ }
++ }
++
++ gcmkONERROR(gckCOMMAND_Start(command));
++
++ gcmkONERROR(_FlushCache(Hardware, command));
++
++ gckOS_Delay(gcvNULL, 1);
++
++ /* Stop the command parser. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvFALSE));
++
++ flag |= gcvPOWER_FLAG_CLOCK_OFF;
++ }
++
++ /* Get time until stopped. */
++ gcmkPROFILE_QUERY(time, stopTime);
++
++ /* Only process this when hardware is enabled. */
++ if (Hardware->clockState && Hardware->powerState
++ /* Don't touch clock control if dynamic frequency scaling is available. */
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE
++ )
++ {
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ if (Hardware->identity.chipModel == gcv4000
++ && ((Hardware->identity.chipRevision == 0x5208) || (Hardware->identity.chipRevision == 0x5222)))
++ {
++ clock &= ~2U;
++ }
++ }
++
++ /* Write the clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++ }
++
++ if (flag & gcvPOWER_FLAG_DELAY)
++ {
++ /* Wait for the specified amount of time to settle coming back from
++ ** power-off or suspend state. */
++ gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY));
++ }
++
++ /* Get time until delayed. */
++ gcmkPROFILE_QUERY(time, delayTime);
++
++ if (flag & gcvPOWER_FLAG_INITIALIZE)
++ {
++ /* Initialize hardware. */
++ gcmkONERROR(gckHARDWARE_InitializeHardware(Hardware));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(Hardware,
++ Hardware->allowFastClear,
++ Hardware->allowCompression));
++
++ /* Force the command queue to reload the next context. */
++ command->currContext = gcvNULL;
++
++ /* Need to config mmu after command start. */
++ configMmu = gcvTRUE;
++ }
++
++ /* Get time until initialized. */
++ gcmkPROFILE_QUERY(time, initTime);
++
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ Hardware->core,
++ (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE,
++ (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE));
++
++ /* Save current hardware power and clock states. */
++ Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE;
++ Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE;
++ }
++
++ /* Get time until off. */
++ gcmkPROFILE_QUERY(time, offTime);
++
++ if (flag & gcvPOWER_FLAG_START)
++ {
++ /* Start the command processor. */
++ gcmkONERROR(gckCOMMAND_Start(command));
++ commandStarted = gcvTRUE;
++
++ if (Hardware->startIsr)
++ {
++ /* Start the Isr. */
++ gcmkONERROR(Hardware->startIsr(Hardware->isrContext));
++ isrStarted = gcvTRUE;
++ }
++ }
++
++ /* Get time until started. */
++ gcmkPROFILE_QUERY(time, startTime);
++
++ if (flag & gcvPOWER_FLAG_RELEASE)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore));
++ acquired = gcvFALSE;
++
++ if (global)
++ {
++ /* Verify global semaphore has been acquired already before
++ ** we release it.
++ ** If it was acquired, gckOS_TryAcquireSemaphore will return
++ ** gcvSTATUS_TIMEOUT and we release it. Otherwise, global
++ ** semaphore will be acquried now, but it still is released
++ ** immediately. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status != gcvSTATUS_TIMEOUT)
++ {
++ gcmkONERROR(status);
++ }
++
++ /* Release the global semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++ }
++ }
++
++ /* Save the new power state. */
++ Hardware->chipPowerState = State;
++
++#if gcdDVFS
++ if (State == gcvPOWER_ON && Hardware->kernel->dvfs)
++ {
++ gckDVFS_Start(Hardware->kernel->dvfs);
++ }
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++ /* Reset power off time */
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout;
++
++ if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */
++ gcmkVERIFY_OK(gckOS_StartTimer(os,
++ Hardware->powerOffTimer,
++ Hardware->powerOffTimeout));
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer");
++
++ /* Cancel running timer when GPU enters ON or OFF. */
++ gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer));
++ }
++#endif
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* Get total time. */
++ gcmkPROFILE_QUERY(time, totalTime);
++#if gcdENABLE_PROFILING
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu",
++ freq, mutexTime, onTime, stallTime, stopTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ " delay:%llu init:%llu off:%llu start:%llu total:%llu",
++ delayTime, initTime, offTime, startTime, totalTime);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commandStarted)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_Stop(command, gcvFALSE));
++ }
++
++ if (isrStarted)
++ {
++ gcmkVERIFY_OK(Hardware->stopIsr(Hardware->isrContext));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, gcvTRUE));
++ }
++
++ if (acquired)
++ {
++ /* Release semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ command->powerSemaphore));
++ }
++
++ if (globalAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ Hardware->globalSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryPowerManagementState
++**
++** Get GPU power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE* State
++** Power State.
++**
++*/
++gceSTATUS
++gckHARDWARE_QueryPowerManagementState(
++ IN gckHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(State != gcvNULL);
++
++ /* Return the statue. */
++ *State = Hardware->chipPowerState;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagement
++**
++** Configure GPU power management function.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL PowerManagement
++** Power Mangement State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagement(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ if(!Hardware->powerManagementLock)
++ {
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE));
++
++ Hardware->powerManagement = PowerManagement;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementLock
++**
++** Disable dynamic GPU power management switch.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL Lock
++** Power Mangement Lock State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagementLock(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Lock
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->powerManagementLock = Lock;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++/*******************************************************************************
++**
++** gckHARDWARE_SetGpuProfiler
++**
++** Configure GPU profiler function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL GpuProfiler
++** GOU Profiler State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetGpuProfiler(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL GpuProfiler
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (GpuProfiler == gcvTRUE)
++ {
++ gctUINT32 data = 0;
++
++ /* Need to disable clock gating when doing profiling. */
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress +
++ 0x00100,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++
++ gcmkVERIFY_OK(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00100,
++ data));
++ }
++
++ Hardware->gpuProfiler = GpuProfiler;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ )
++{
++ gceSTATUS status;
++ gctUINT32 clock;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x FscaleValue=%d", Hardware, FscaleValue);
++
++ gcmkVERIFY_ARGUMENT(FscaleValue > 0 && FscaleValue <= 64);
++
++ gcmkONERROR(
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ Hardware->powerOnFscaleVal = FscaleValue;
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++ /* Disable all clock gating. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++ clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (FscaleValue) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++
++ /* Restore all clock gating. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++ gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ )
++{
++ *FscaleValue = Hardware->powerOnFscaleVal;
++ *MinFscaleValue = Hardware->minFscaleValue;
++ *MaxFscaleValue = 64;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_SetMinFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT MinFscaleValue
++ )
++{
++ if (MinFscaleValue >= 1 && MinFscaleValue <= 64)
++ {
++ Hardware->minFscaleValue = MinFscaleValue;
++ }
++
++ return gcvSTATUS_OK;
++}
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckHARDWARE_SetPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Timeout
++)
++{
++ gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout);
++
++ Hardware->powerOffTimeout = Timeout;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckHARDWARE_QueryPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++)
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ *Timeout = Hardware->powerOffTimeout;
++
++ gcmkFOOTER_ARG("*Timeout=%d", *Timeout);
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckHARDWARE_QueryIdle(
++ IN gckHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle, address;
++ gctBOOL isIdle;
++#if gcdMULTI_GPU > 1
++ gctUINT32 idle3D1 = 0;
++ gctUINT32 address3D1;
++ gctBOOL isIdle3D1 = gcvFALSE;
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ gctINT32 pendingInterrupt;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL);
++
++ /* We are idle when the power is not ON. */
++ if (Hardware->chipPowerState != gcvPOWER_ON)
++ {
++ isIdle = gcvTRUE;
++#if gcdMULTI_GPU > 1
++ isIdle3D1 = gcvTRUE;
++#endif
++ }
++
++ else
++ {
++ /* Read idle register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle));
++
++#if gcdMULTI_GPU > 1
++ if (Hardware->core == gcvCORE_MAJOR)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterByCoreId(Hardware->os,
++ Hardware->core,
++ gcvCORE_3D_1_ID,
++ 0x00004,
++ &idle3D1));
++ }
++#endif
++
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle)) >> (0 ? 1:1)) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 3:3)) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 4:4)) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 6:6)) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 7:7)) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 2:2)) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ isIdle = gcvFALSE;
++ }
++
++ else
++ {
++#if gcdSECURITY
++ isIdle = gcvTRUE;
++ address = 0;
++#else
++ /* Read the current FE address. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00664,
++ &address));
++
++ /* Test if address is inside the last WAIT/LINK sequence. */
++ if ((address >= Hardware->lastWaitLink)
++#if gcdMULTI_GPU
++ && (address <= Hardware->lastWaitLink + 40)
++#else
++ && (address <= Hardware->lastWaitLink + 16)
++#endif
++ )
++ {
++ /* FE is in last WAIT/LINK and the pipe is idle. */
++ isIdle = gcvTRUE;
++ }
++ else
++ {
++ /* FE is not in WAIT/LINK yet. */
++ isIdle = gcvFALSE;
++ }
++#endif
++ }
++
++#if gcdMULTI_GPU > 1
++ if (Hardware->core == gcvCORE_MAJOR)
++ {
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle3D1)) >> (0 ? 1:1)) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 3:3)) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 4:4)) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 6:6)) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 7:7)) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 2:2)) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ isIdle3D1 = gcvFALSE;
++ }
++
++ else
++ {
++ /* Read the current FE address. */
++ gcmkONERROR(gckOS_ReadRegisterByCoreId(Hardware->os,
++ Hardware->core,
++ gcvCORE_3D_1_ID,
++ 0x00664,
++ &address3D1));
++
++ /* Test if address is inside the last WAIT/LINK sequence. */
++ if ((address3D1 >= Hardware->lastWaitLink)
++ && (address3D1 <= Hardware->lastWaitLink + 40)
++ )
++ {
++ /* FE is in last WAIT/LINK and the pipe is idle. */
++ isIdle3D1 = gcvTRUE;
++ }
++ else
++ {
++ /* FE is not in WAIT/LINK yet. */
++ isIdle3D1 = gcvFALSE;
++ }
++ }
++ }
++#endif
++
++ }
++
++#if gcdINTERRUPT_STATISTIC
++ gcmkONERROR(gckOS_AtomGet(
++ Hardware->os,
++ Hardware->kernel->eventObj->interruptCount,
++ &pendingInterrupt
++ ));
++
++ if (pendingInterrupt)
++ {
++ isIdle = gcvFALSE;
++ }
++#endif
++
++#if gcdMULTI_GPU > 1
++ if (Hardware->core == gcvCORE_MAJOR)
++ {
++ *IsIdle = (isIdle & isIdle3D1);
++ }
++ else
++#endif
++ {
++ *IsIdle = isIdle;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** Handy macros that will help in reading those debug registers.
++*/
++
++#define gcmkREAD_DEBUG_REGISTER(control, block, index, data) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ index))); \
++ gcmkONERROR(\
++ gckOS_ReadRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_SIGNALS_##block##_Address, \
++ &profiler->data))
++
++#define gcmkREAD_DEBUG_REGISTER_N(control, block, index, data) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ index))); \
++ gcmkONERROR(\
++ gckOS_ReadRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_SIGNALS_##block##_Address, \
++ &data))
++
++#define gcmkRESET_DEBUG_REGISTER(control, block) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ 15))); \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ 0)))
++
++/*******************************************************************************
++**
++** gckHARDWARE_ProfileEngine2D
++**
++** Read the profile registers available in the 2D engine and sets them in the
++** profile. The function will also reset the pixelsRendered counter every time.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** OPTIONAL gcs2D_PROFILE_PTR Profile
++** Pointer to a gcs2D_Profile structure.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ProfileEngine2D(
++ IN gckHARDWARE Hardware,
++ OPTIONAL gcs2D_PROFILE_PTR Profile
++ )
++{
++ gceSTATUS status;
++ gcs2D_PROFILE_PTR profiler = Profile;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Profile != gcvNULL)
++ {
++ /* Read the cycle count. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &Profile->cycleCount));
++
++ /* Read pixels rendered by 2D engine. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &profiler->pixelsRendered));
++
++ /* Reset counter. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHARDWARE_QueryProfileRegisters(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ OUT gcsPROFILER_COUNTERS * Counters
++ )
++{
++ gceSTATUS status;
++ gcsPROFILER_COUNTERS * profiler = Counters;
++ gctUINT i, clock;
++ gctUINT32 colorKilled, colorDrawn, depthKilled, depthDrawn;
++ gctUINT32 totalRead, totalWrite;
++
++ gcmkHEADER_ARG("Hardware=0x%x Counters=0x%x", Hardware, Counters);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Read the counters. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &profiler->gpuCyclesCounter));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &profiler->gpuTotalCyclesCounter));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &profiler->gpuIdleCyclesCounter));
++
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ profiler->gpuTotalRead64BytesPerFrame = 0;
++ profiler->gpuTotalWrite64BytesPerFrame = 0;
++ profiler->pe_pixel_count_killed_by_color_pipe = 0;
++ profiler->pe_pixel_count_killed_by_depth_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_color_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_depth_pipe = 0;
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* BW */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &totalRead));
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &totalWrite));
++
++ profiler->gpuTotalRead64BytesPerFrame += totalRead;
++ profiler->gpuTotalWrite64BytesPerFrame += totalWrite;
++
++ /* PE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn));
++
++ profiler->pe_pixel_count_killed_by_color_pipe += colorKilled;
++ profiler->pe_pixel_count_killed_by_depth_pipe += depthKilled;
++ profiler->pe_pixel_count_drawn_by_color_pipe += colorDrawn;
++ profiler->pe_pixel_count_drawn_by_depth_pipe += depthDrawn;
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Reset counters. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* SH */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->ps_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_pixel_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vs_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_vertice_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_branch_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_texld_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_branch_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_texld_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* PA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_vtx_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_prim_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_output_prim_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_depth_clipped_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_trivial_rejected_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_culled_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* SE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_triangle_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_lines_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* RA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_pixel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_quad_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_quad_count_after_early_z));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_primitive_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_pipe_cache_miss_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_prefetch_cache_miss_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* TX */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_bilinear_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_trilinear_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_discarded_texture_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_texture_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_in_8B_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_hit_texel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_texel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* MC */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_pipeline));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_IP));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_write_req_8B_from_pipeline));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* HI */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_read_request_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_request_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_data_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++
++#if VIVANTE_PROFILER_CONTEXT
++#define gcmkUPDATE_PROFILE_DATA(data) \
++ profilerHistroy->data += profiler->data
++
++gceSTATUS
++gckHARDWARE_QueryContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ IN gckCONTEXT Context,
++ OUT gcsPROFILER_COUNTERS * Counters
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command = Hardware->kernel->command;
++ gcsPROFILER_COUNTERS * profiler = Counters;
++
++ gcmkHEADER_ARG("Hardware=0x%x Counters=0x%x", Hardware, Counters);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Acquire the context sequnence mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ command->os, command->mutexContextSeq, gcvINFINITE
++ ));
++
++ /* Read the counters. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ profiler, &Context->histroyProfiler, gcmSIZEOF(gcsPROFILER_COUNTERS)
++ ));
++
++ /* Reset counters. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ &Context->histroyProfiler, gcmSIZEOF(gcsPROFILER_COUNTERS)
++ ));
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os, command->mutexContextSeq
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gctUINT32
++CalcDelta(
++ IN gctUINT32 new,
++ IN gctUINT32 old
++ )
++{
++ if (new >= old)
++ {
++ return new - old;
++ }
++ else
++ {
++ return (gctUINT32)((gctUINT64)new + 0x100000000ll - old);
++ }
++}
++
++gceSTATUS
++gckHARDWARE_UpdateContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++ gcsPROFILER_COUNTERS * profiler = &Context->latestProfiler;
++ gcsPROFILER_COUNTERS * profilerHistroy = &Context->histroyProfiler;
++ gctUINT i, clock;
++ gctUINT32 colorKilled = 0, colorDrawn = 0, depthKilled = 0, depthDrawn = 0;
++ gctUINT32 totalRead, totalWrite;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 temp;
++ gctBOOL needResetShader = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x Context=0x%x", Hardware, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ chipModel = Hardware->identity.chipModel;
++ chipRevision = Hardware->identity.chipRevision;
++ if (chipModel == gcv2000 || (chipModel == gcv2100 && chipRevision == 0x5118))
++ {
++ needResetShader = gcvTRUE;
++ }
++
++ /* Read the counters. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &profiler->gpuCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuCyclesCounter);
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &profiler->gpuTotalCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuTotalCyclesCounter);
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &profiler->gpuIdleCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuIdleCyclesCounter);
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ profiler->gpuTotalRead64BytesPerFrame = 0;
++ profiler->gpuTotalWrite64BytesPerFrame = 0;
++ profiler->pe_pixel_count_killed_by_color_pipe = 0;
++ profiler->pe_pixel_count_killed_by_depth_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_color_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_depth_pipe = 0;
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* BW */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &totalRead));
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &totalWrite));
++
++ profiler->gpuTotalRead64BytesPerFrame += totalRead;
++ profiler->gpuTotalWrite64BytesPerFrame += totalWrite;
++ gcmkUPDATE_PROFILE_DATA(gpuTotalRead64BytesPerFrame);
++ gcmkUPDATE_PROFILE_DATA(gpuTotalWrite64BytesPerFrame);
++
++ /* PE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn));
++
++ profiler->pe_pixel_count_killed_by_color_pipe += colorKilled;
++ profiler->pe_pixel_count_killed_by_depth_pipe += depthKilled;
++ profiler->pe_pixel_count_drawn_by_color_pipe += colorDrawn;
++ profiler->pe_pixel_count_drawn_by_depth_pipe += depthDrawn;
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_killed_by_color_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_killed_by_depth_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_drawn_by_color_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_drawn_by_depth_pipe);
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++
++ /* Reset counters. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* SH */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->ps_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->ps_inst_counter;
++ profiler->ps_inst_counter = CalcDelta(temp, Context->prevPSInstCount);
++ Context->prevPSInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(ps_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_pixel_counter));
++ if (needResetShader)
++ {
++ temp = profiler->rendered_pixel_counter;
++ profiler->rendered_pixel_counter = CalcDelta(temp, Context->prevPSPixelCount);
++ Context->prevPSPixelCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(rendered_pixel_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vs_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vs_inst_counter;
++ profiler->vs_inst_counter = CalcDelta(temp, Context->prevVSInstCount);
++ Context->prevVSInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vs_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_vertice_counter));
++ if (needResetShader)
++ {
++ temp = profiler->rendered_vertice_counter;
++ profiler->rendered_vertice_counter = CalcDelta(temp, Context->prevVSVertexCount);
++ Context->prevVSVertexCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(rendered_vertice_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_branch_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vtx_branch_inst_counter;
++ profiler->vtx_branch_inst_counter = CalcDelta(temp, Context->prevVSBranchInstCount);
++ Context->prevVSBranchInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vtx_branch_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_texld_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vtx_texld_inst_counter;
++ profiler->vtx_texld_inst_counter = CalcDelta(temp, Context->prevVSTexInstCount);
++ Context->prevVSTexInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vtx_texld_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_branch_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->pxl_branch_inst_counter;
++ profiler->pxl_branch_inst_counter = CalcDelta(temp, Context->prevPSBranchInstCount);
++ Context->prevPSBranchInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(pxl_branch_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_texld_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->pxl_texld_inst_counter;
++ profiler->pxl_texld_inst_counter = CalcDelta(temp, Context->prevPSTexInstCount);
++ Context->prevPSTexInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(pxl_texld_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* PA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_vtx_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_input_vtx_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_prim_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_input_prim_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_output_prim_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_output_prim_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_depth_clipped_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_depth_clipped_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_trivial_rejected_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_trivial_rejected_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_culled_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_culled_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* SE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_triangle_count));
++ gcmkUPDATE_PROFILE_DATA(se_culled_triangle_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_lines_count));
++ gcmkUPDATE_PROFILE_DATA(se_culled_lines_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* RA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_pixel_count));
++ gcmkUPDATE_PROFILE_DATA(ra_valid_pixel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_quad_count));
++ gcmkUPDATE_PROFILE_DATA(ra_total_quad_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_quad_count_after_early_z));
++ gcmkUPDATE_PROFILE_DATA(ra_valid_quad_count_after_early_z);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_primitive_count));
++ gcmkUPDATE_PROFILE_DATA(ra_total_primitive_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_pipe_cache_miss_counter));
++ gcmkUPDATE_PROFILE_DATA(ra_pipe_cache_miss_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_prefetch_cache_miss_counter));
++ gcmkUPDATE_PROFILE_DATA(ra_prefetch_cache_miss_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* TX */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_bilinear_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_bilinear_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_trilinear_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_trilinear_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_discarded_texture_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_discarded_texture_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_texture_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_texture_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_count));
++ gcmkUPDATE_PROFILE_DATA(tx_mem_read_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_in_8B_count));
++ gcmkUPDATE_PROFILE_DATA(tx_mem_read_in_8B_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_miss_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_hit_texel_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_hit_texel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_texel_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_miss_texel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* MC */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_pipeline));
++ gcmkUPDATE_PROFILE_DATA(mc_total_read_req_8B_from_pipeline);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_IP));
++ gcmkUPDATE_PROFILE_DATA(mc_total_read_req_8B_from_IP);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_write_req_8B_from_pipeline));
++ gcmkUPDATE_PROFILE_DATA(mc_total_write_req_8B_from_pipeline);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* HI */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_read_request_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_read_request_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_request_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_write_request_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_data_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_write_data_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++
++#if VIVANTE_PROFILER_NEW
++gceSTATUS
++gckHARDWARE_InitProfiler(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++ /* Enable debug register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_ResetGPU(
++ IN gckHARDWARE Hardware,
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gctUINT32 control, idle;
++ gceSTATUS status;
++
++ for (;;)
++ {
++ /* Disable clock gating. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ Hardware->powerBaseAddress +
++ 0x00104,
++ 0x00000000));
++
++ control = ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)));
++
++ /* Disable pulse-eater. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ control));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ control));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ 0x00000900));
++
++ /* Wait for clock being stable. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Isolate the GPU. */
++ control = ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ control));
++
++ /* Set soft reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Wait for reset. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Reset soft reset bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Reset GPU isolation. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ control));
++
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++
++#if gcdMULTI_GPU > 1
++ if (Core == gcvCORE_MAJOR)
++ {
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterByCoreId(Os,
++ Core,
++ gcvCORE_3D_1_ID,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++ }
++#endif
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++
++#if gcdMULTI_GPU > 1
++ if (Core == gcvCORE_MAJOR)
++ {
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterByCoreId(Os,
++ Core,
++ gcvCORE_3D_1_ID,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++ }
++#endif
++ /* GPU is idle. */
++ break;
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_Reset(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++
++ /* Hardware reset. */
++ status = gckOS_ResetGPU(Hardware->os, Hardware->core);
++
++ if (gcmIS_ERROR(status))
++ {
++ if (Hardware->identity.chipRevision < 0x4600)
++ {
++ /* Not supported - we need the isolation bit. */
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++ /* Soft reset. */
++ gcmkONERROR(_ResetGPU(Hardware, Hardware->os, Hardware->core));
++ }
++
++ /* Initialize hardware. */
++ gcmkONERROR(gckHARDWARE_InitializeHardware(Hardware));
++
++ /* Jump to address into which GPU should run if it doesn't stuck. */
++ gcmkONERROR(gckHARDWARE_Execute(Hardware, Hardware->kernel->restoreAddress, 16));
++
++ gcmkPRINT("[galcore]: recovery done");
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkPRINT("[galcore]: Hardware not reset successfully, give up");
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetBaseAddress(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR BaseAddress
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL);
++
++ /* Test if we have a new Memory Controller. */
++ if (((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 22:22) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))))
++ {
++ /* No base address required. */
++ *BaseAddress = 0;
++ }
++ else
++ {
++ /* Get the base address from the OS. */
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, BaseAddress));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_NeedBaseAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 State,
++ OUT gctBOOL_PTR NeedBase
++ )
++{
++ gctBOOL need = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x State=0x%08x", Hardware, State);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(NeedBase != gcvNULL);
++
++ /* Make sure this is a load state. */
++ if (((((gctUINT32) (State)) >> (0 ? 31:27) & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))))
++ {
++#if gcdENABLE_3D
++ /* Get the state address. */
++ switch ((((((gctUINT32) (State)) >> (0 ? 15:0)) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1)))))) ))
++ {
++ case 0x0596:
++ case 0x0597:
++ case 0x0599:
++ case 0x059A:
++ case 0x05A9:
++ /* These states need a TRUE physical address. */
++ need = gcvTRUE;
++ break;
++ }
++#else
++ /* 2D addresses don't need a base address. */
++#endif
++ }
++
++ /* Return the flag. */
++ *NeedBase = need;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*NeedBase=%d", *NeedBase);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_SetIsrManager(
++ IN gckHARDWARE Hardware,
++ IN gctISRMANAGERFUNC StartIsr,
++ IN gctISRMANAGERFUNC StopIsr,
++ IN gctPOINTER Context
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Hardware=0x%x, StartIsr=0x%x, StopIsr=0x%x, Context=0x%x",
++ Hardware, StartIsr, StopIsr, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (StartIsr == gcvNULL ||
++ StopIsr == gcvNULL ||
++ Context == gcvNULL)
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ Hardware->startIsr = StartIsr;
++ Hardware->stopIsr = StopIsr;
++ Hardware->isrContext = Context;
++
++ /* Success. */
++ gcmkFOOTER();
++
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Compose
++**
++** Start a composition.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Compose(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Size,
++ IN gctUINT8 EventID
++ )
++{
++#if gcdENABLE_3D
++ gceSTATUS status;
++ gctUINT32_PTR triggerState;
++
++ gcmkHEADER_ARG("Hardware=0x%x Physical=0x%x Logical=0x%x"
++ " Offset=%d Size=%d EventID=%d",
++ Hardware, Physical, Logical, Offset, Size, EventID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(((Size + 8) & 63) == 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Program the trigger state. */
++ triggerState = (gctUINT32_PTR) ((gctUINT8_PTR) Logical + Offset + Size);
++ triggerState[0] = 0x0C03;
++ triggerState[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:4) - (0 ? 5:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:4) - (0 ? 5:4) + 1))))))) << (0 ? 5:4))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 5:4) - (0 ? 5:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:4) - (0 ? 5:4) + 1))))))) << (0 ? 5:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:16) - (0 ? 20:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16))) | (((gctUINT32) ((gctUINT32) (EventID) & ((gctUINT32) ((((1 ? 20:16) - (0 ? 20:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16)))
++ ;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the wait/link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Hardware->os, ProcessID, gcvNULL,
++ (gctUINT32)Physical, Logical, Offset + Size
++ ));
++#endif
++
++ /* Start composition. */
++ gcmkONERROR(gckOS_WriteRegisterEx(
++ Hardware->os, Hardware->core, 0x00554,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)))
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ /* Return the status. */
++ return gcvSTATUS_NOT_SUPPORTED;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_IsFeatureAvailable
++**
++** Verifies whether the specified feature is available in hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gceFEATURE Feature
++** Feature to be verified.
++*/
++gceSTATUS
++gckHARDWARE_IsFeatureAvailable(
++ IN gckHARDWARE Hardware,
++ IN gceFEATURE Feature
++ )
++{
++ gctBOOL available;
++
++ gcmkHEADER_ARG("Hardware=0x%x Feature=%d", Hardware, Feature);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Only features needed by common kernel logic added here. */
++ switch (Feature)
++ {
++ case gcvFEATURE_END_EVENT:
++ /*available = gcmVERIFYFIELDVALUE(Hardware->identity.chipMinorFeatures2,
++ GC_MINOR_FEATURES2, END_EVENT, AVAILABLE
++ );*/
++ available = gcvFALSE;
++ break;
++
++ case gcvFEATURE_MC20:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 22:22) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))));
++ break;
++
++ case gcvFEATURE_EARLY_Z:
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 16:16) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))));
++ break;
++
++ case gcvFEATURE_HZ:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))));
++ break;
++
++ case gcvFEATURE_NEW_HZ:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))));
++ break;
++
++ case gcvFEATURE_FAST_MSAA:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))));
++ break;
++
++ case gcvFEATURE_SMALL_MSAA:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures4)) >> (0 ? 18:18) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))));
++ break;
++
++ case gcvFEATURE_DYNAMIC_FREQUENCY_SCALING:
++ /* This feature doesn't apply for 2D cores. */
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures2)) >> (0 ? 14:14) & ((gctUINT32) ((((1 ? 14:14) - (0 ? 14:14) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 14:14) - (0 ? 14:14) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 14:14) - (0 ? 14:14) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 14:14) - (0 ? 14:14) + 1)))))))
++ && ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 2:2) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))));
++
++ if (Hardware->identity.chipModel == gcv1000 &&
++ (Hardware->identity.chipRevision == 0x5039 ||
++ Hardware->identity.chipRevision == 0x5040))
++ {
++ available = gcvFALSE;
++ }
++ break;
++
++ case gcvFEATURE_ACE:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 18:18) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))));
++ break;
++
++ case gcvFEATURE_HALTI2:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures4)) >> (0 ? 16:16) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))));
++ break;
++
++ case gcvFEATURE_PIPE_2D:
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 9:9) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))));
++ break;
++
++ case gcvFEATURE_PIPE_3D:
++#if gcdENABLE_3D
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 2:2) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))));
++#else
++ available = gcvFALSE;
++#endif
++ break;
++
++ case gcvFEATURE_FC_FLUSH_STALL:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures1)) >> (0 ? 31:31) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))));
++ break;
++
++ default:
++ gcmkFATAL("Invalid feature has been requested.");
++ available = gcvFALSE;
++ }
++
++ /* Return result. */
++ gcmkFOOTER_ARG("%d", available ? gcvSTATUS_TRUE : gcvSTATUS_FALSE);
++ return available ? gcvSTATUS_TRUE : gcvSTATUS_FALSE;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_DumpMMUException
++**
++** Dump the MMU debug info on an MMU exception.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_DumpMMUException(
++ IN gckHARDWARE Hardware
++ )
++{
++ gctUINT32 mmu = 0;
++ gctUINT32 mmuStatus = 0;
++ gctUINT32 address = 0;
++ gctUINT32 i = 0;
++ gctUINT32 mtlb = 0;
++ gctUINT32 stlb = 0;
++ gctUINT32 offset = 0;
++#if gcdPROCESS_ADDRESS_SPACE
++ gcsDATABASE_PTR database;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkPRINT("GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n",
++ Hardware->core,
++ Hardware->identity.chipModel,
++ Hardware->identity.chipRevision);
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** MMU ERROR DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00188,
++ &mmuStatus));
++
++ gcmkPRINT(" MMU status = 0x%08X\n", mmuStatus);
++
++ for (i = 0; i < 4; i += 1)
++ {
++ mmu = mmuStatus & 0xF;
++ mmuStatus >>= 4;
++
++ if (mmu == 0)
++ {
++ continue;
++ }
++
++ switch (mmu)
++ {
++ case 1:
++ gcmkPRINT(" MMU%d: slave not present\n", i);
++ break;
++
++ case 2:
++ gcmkPRINT(" MMU%d: page not present\n", i);
++ break;
++
++ case 3:
++ gcmkPRINT(" MMU%d: write violation\n", i);
++ break;
++
++ default:
++ gcmkPRINT(" MMU%d: unknown state\n", i);
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00190 + i * 4,
++ &address));
++
++ mtlb = (address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++ stlb = (address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++ offset = address & gcdMMU_OFFSET_4K_MASK;
++
++ gcmkPRINT(" MMU%d: exception address = 0x%08X\n", i, address);
++
++ gcmkPRINT(" MTLB entry = %d\n", mtlb);
++
++ gcmkPRINT(" STLB entry = %d\n", stlb);
++
++ gcmkPRINT(" Offset = 0x%08X (%d)\n", offset, offset);
++
++ gckMMU_DumpPageTableEntry(Hardware->kernel->mmu, address);
++
++#if gcdPROCESS_ADDRESS_SPACE
++ for (i = 0; i < gcmCOUNTOF(Hardware->kernel->db->db); ++i)
++ {
++ for (database = Hardware->kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ gcmkPRINT(" database [%d] :", database->processID);
++ gckMMU_DumpPageTableEntry(database->mmu, address);
++ }
++ }
++#endif
++ }
++
++ gckHARDWARE_DumpGPUState(Hardware);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_DumpGPUState
++**
++** Dump the GPU debug registers.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_DumpGPUState(
++ IN gckHARDWARE Hardware
++ )
++{
++ static gctCONST_STRING _cmdState[] =
++ {
++ "PAR_IDLE_ST", "PAR_DEC_ST", "PAR_ADR0_ST", "PAR_LOAD0_ST",
++ "PAR_ADR1_ST", "PAR_LOAD1_ST", "PAR_3DADR_ST", "PAR_3DCMD_ST",
++ "PAR_3DCNTL_ST", "PAR_3DIDXCNTL_ST", "PAR_INITREQDMA_ST",
++ "PAR_DRAWIDX_ST", "PAR_DRAW_ST", "PAR_2DRECT0_ST", "PAR_2DRECT1_ST",
++ "PAR_2DDATA0_ST", "PAR_2DDATA1_ST", "PAR_WAITFIFO_ST", "PAR_WAIT_ST",
++ "PAR_LINK_ST", "PAR_END_ST", "PAR_STALL_ST"
++ };
++
++ static gctCONST_STRING _cmdDmaState[] =
++ {
++ "CMD_IDLE_ST", "CMD_START_ST", "CMD_REQ_ST", "CMD_END_ST"
++ };
++
++ static gctCONST_STRING _cmdFetState[] =
++ {
++ "FET_IDLE_ST", "FET_RAMVALID_ST", "FET_VALID_ST"
++ };
++
++ static gctCONST_STRING _reqDmaState[] =
++ {
++ "REQ_IDLE_ST", "REQ_WAITIDX_ST", "REQ_CAL_ST"
++ };
++
++ static gctCONST_STRING _calState[] =
++ {
++ "CAL_IDLE_ST", "CAL_LDADR_ST", "CAL_IDXCALC_ST"
++ };
++
++ static gctCONST_STRING _veReqState[] =
++ {
++ "VER_IDLE_ST", "VER_CKCACHE_ST", "VER_MISS_ST"
++ };
++
++ static gcsiDEBUG_REGISTERS _dbgRegs[] =
++ {
++ { "RA", 0x474, 16, 0x448, 16, 0x12344321 },
++ { "TX", 0x474, 24, 0x44C, 16, 0x12211221 },
++ { "FE", 0x470, 0, 0x450, 16, 0xBABEF00D },
++ { "PE", 0x470, 16, 0x454, 16, 0xBABEF00D },
++ { "DE", 0x470, 8, 0x458, 16, 0xBABEF00D },
++ { "SH", 0x470, 24, 0x45C, 16, 0xDEADBEEF },
++ { "PA", 0x474, 0, 0x460, 16, 0x0000AAAA },
++ { "SE", 0x474, 8, 0x464, 16, 0x5E5E5E5E },
++ { "MC", 0x478, 0, 0x468, 16, 0x12345678 },
++ { "HI", 0x478, 8, 0x46C, 16, 0xAAAAAAAA }
++ };
++
++ static gctUINT32 _otherRegs[] =
++ {
++ 0x040, 0x044, 0x04C, 0x050, 0x054, 0x058, 0x05C, 0x060,
++ 0x43c, 0x440, 0x444, 0x414,
++ };
++
++ gceSTATUS status;
++ gckKERNEL kernel = gcvNULL;
++ gctUINT32 idle = 0, axi = 0;
++ gctUINT32 dmaAddress1 = 0, dmaAddress2 = 0;
++ gctUINT32 dmaState1 = 0, dmaState2 = 0;
++ gctUINT32 dmaLow = 0, dmaHigh = 0;
++ gctUINT32 cmdState = 0, cmdDmaState = 0, cmdFetState = 0;
++ gctUINT32 dmaReqState = 0, calState = 0, veReqState = 0;
++ gctUINT i;
++ gctUINT pipe = 0, pixelPipes = 0;
++ gctUINT32 control = 0, oldControl = 0;
++ gckOS os = Hardware->os;
++ gceCORE core = Hardware->core;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ kernel = Hardware->kernel;
++
++ gcmkPRINT_N(12, "GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n",
++ core,
++ Hardware->identity.chipModel,
++ Hardware->identity.chipRevision);
++
++ pixelPipes = Hardware->identity.pixelPipes
++ ? Hardware->identity.pixelPipes
++ : 1;
++
++ /* Reset register values. */
++ idle = axi =
++ dmaState1 = dmaState2 =
++ dmaAddress1 = dmaAddress2 =
++ dmaLow = dmaHigh = 0;
++
++ /* Verify whether DMA is running. */
++ gcmkONERROR(_VerifyDMA(
++ os, core, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2
++ ));
++
++ cmdState = dmaState2 & 0x1F;
++ cmdDmaState = (dmaState2 >> 8) & 0x03;
++ cmdFetState = (dmaState2 >> 10) & 0x03;
++ dmaReqState = (dmaState2 >> 12) & 0x03;
++ calState = (dmaState2 >> 14) & 0x03;
++ veReqState = (dmaState2 >> 16) & 0x03;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x004, &idle));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x00C, &axi));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x668, &dmaLow));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x66C, &dmaHigh));
++
++ gcmkPRINT_N(0, "**************************\n");
++ gcmkPRINT_N(0, "*** GPU STATE DUMP ***\n");
++ gcmkPRINT_N(0, "**************************\n");
++
++ gcmkPRINT_N(4, " axi = 0x%08X\n", axi);
++
++ gcmkPRINT_N(4, " idle = 0x%08X\n", idle);
++ if ((idle & 0x00000001) == 0) gcmkPRINT_N(0, " FE not idle\n");
++ if ((idle & 0x00000002) == 0) gcmkPRINT_N(0, " DE not idle\n");
++ if ((idle & 0x00000004) == 0) gcmkPRINT_N(0, " PE not idle\n");
++ if ((idle & 0x00000008) == 0) gcmkPRINT_N(0, " SH not idle\n");
++ if ((idle & 0x00000010) == 0) gcmkPRINT_N(0, " PA not idle\n");
++ if ((idle & 0x00000020) == 0) gcmkPRINT_N(0, " SE not idle\n");
++ if ((idle & 0x00000040) == 0) gcmkPRINT_N(0, " RA not idle\n");
++ if ((idle & 0x00000080) == 0) gcmkPRINT_N(0, " TX not idle\n");
++ if ((idle & 0x00000100) == 0) gcmkPRINT_N(0, " VG not idle\n");
++ if ((idle & 0x00000200) == 0) gcmkPRINT_N(0, " IM not idle\n");
++ if ((idle & 0x00000400) == 0) gcmkPRINT_N(0, " FP not idle\n");
++ if ((idle & 0x00000800) == 0) gcmkPRINT_N(0, " TS not idle\n");
++ if ((idle & 0x80000000) != 0) gcmkPRINT_N(0, " AXI low power mode\n");
++
++ if (
++ (dmaAddress1 == dmaAddress2)
++ && (dmaState1 == dmaState2)
++ )
++ {
++ gcmkPRINT_N(0, " DMA appears to be stuck at this address:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1);
++ }
++ else
++ {
++ if (dmaAddress1 == dmaAddress2)
++ {
++ gcmkPRINT_N(0, " DMA address is constant, but state is changing:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaState1);
++ gcmkPRINT_N(4, " 0x%08X\n", dmaState2);
++ }
++ else
++ {
++ gcmkPRINT_N(0, " DMA is running; known addresses are:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1);
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress2);
++ }
++ }
++
++ gcmkPRINT_N(4, " dmaLow = 0x%08X\n", dmaLow);
++ gcmkPRINT_N(4, " dmaHigh = 0x%08X\n", dmaHigh);
++ gcmkPRINT_N(4, " dmaState = 0x%08X\n", dmaState2);
++ gcmkPRINT_N(8, " command state = %d (%s)\n", cmdState, _cmdState [cmdState]);
++ gcmkPRINT_N(8, " command DMA state = %d (%s)\n", cmdDmaState, _cmdDmaState[cmdDmaState]);
++ gcmkPRINT_N(8, " command fetch state = %d (%s)\n", cmdFetState, _cmdFetState[cmdFetState]);
++ gcmkPRINT_N(8, " DMA request state = %d (%s)\n", dmaReqState, _reqDmaState[dmaReqState]);
++ gcmkPRINT_N(8, " cal state = %d (%s)\n", calState, _calState [calState]);
++ gcmkPRINT_N(8, " VE request state = %d (%s)\n", veReqState, _veReqState [veReqState]);
++
++ /* Record control. */
++ gckOS_ReadRegisterEx(os, core, 0x0, &oldControl);
++
++ for (pipe = 0; pipe < pixelPipes; pipe++)
++ {
++ gcmkPRINT_N(4, " Debug registers of pipe[%d]:\n", pipe);
++
++ /* Switch pipe. */
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x0, &control));
++ control &= ~(0xF << 20);
++ control |= (pipe << 20);
++ gcmkONERROR(gckOS_WriteRegisterEx(os, core, 0x0, control));
++
++ for (i = 0; i < gcmCOUNTOF(_dbgRegs); i += 1)
++ {
++ gcmkONERROR(_DumpDebugRegisters(os, core, &_dbgRegs[i]));
++ }
++
++ gcmkPRINT_N(0, " Other Registers:\n");
++ for (i = 0; i < gcmCOUNTOF(_otherRegs); i += 1)
++ {
++ gctUINT32 read;
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, _otherRegs[i], &read));
++ gcmkPRINT_N(12, " [0x%04X] 0x%08X\n", _otherRegs[i], read);
++ }
++ }
++
++ if (kernel->hardware->identity.chipFeatures & (1 << 4))
++ {
++ gctUINT32 read0, read1, write;
++
++ read0 = read1 = write = 0;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x43C, &read0));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x440, &read1));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x444, &write));
++
++ gcmkPRINT_N(4, " read0 = 0x%08X\n", read0);
++ gcmkPRINT_N(4, " read1 = 0x%08X\n", read1);
++ gcmkPRINT_N(4, " write = 0x%08X\n", write);
++ }
++
++ /* Restore control. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os, core, 0x0, oldControl));
++
++ /* dump stack. */
++ gckOS_DumpCallStack(os);
++
++OnError:
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckHARDWARE_ReadPerformanceRegister(
++ IN gckHARDWARE Hardware,
++ IN gctUINT PerformanceAddress,
++ IN gctUINT IndexAddress,
++ IN gctUINT IndexShift,
++ IN gctUINT Index,
++ OUT gctUINT32_PTR Value
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x PerformanceAddress=0x%x IndexAddress=0x%x "
++ "IndexShift=%u Index=%u",
++ Hardware, PerformanceAddress, IndexAddress, IndexShift,
++ Index);
++
++ /* Write the index. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ IndexAddress,
++ Index << IndexShift));
++
++ /* Read the register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ PerformanceAddress,
++ Value));
++
++ /* Test for reset. */
++ if (Index == 15)
++ {
++ /* Index another register to get out of reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, IndexAddress, 0));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=0x%x", *Value);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetFrameInfo(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_FRAME_INFO * FrameInfo
++ )
++{
++ gceSTATUS status;
++ gctUINT i, clock;
++ gcsHAL_FRAME_INFO info;
++#if gcdFRAME_DB_RESET
++ gctUINT reset;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Get profile tick. */
++ gcmkONERROR(gckOS_GetProfileTick(&info.ticks));
++
++ /* Read SH counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 4,
++ &info.shaderCycles));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 9,
++ &info.vsInstructionCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 12,
++ &info.vsTextureCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 7,
++ &info.psInstructionCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 14,
++ &info.psTextureCount));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 15,
++ &reset));
++#endif
++
++ /* Read PA counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 3,
++ &info.vertexCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 4,
++ &info.primitiveCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 7,
++ &info.rejectedPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 8,
++ &info.culledPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 6,
++ &info.clippedPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 5,
++ &info.outPrimitives));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 15,
++ &reset));
++#endif
++
++ /* Read RA counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 3,
++ &info.inPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 11,
++ &info.culledQuadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 1,
++ &info.totalQuadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 2,
++ &info.quadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 0,
++ &info.totalPixelCount));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 15,
++ &reset));
++#endif
++
++ /* Read TX counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 0,
++ &info.bilinearRequests));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 1,
++ &info.trilinearRequests));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 8,
++ &info.txHitCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 9,
++ &info.txMissCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 6,
++ &info.txBytes8));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 15,
++ &reset));
++#endif
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* Read cycle registers. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &info.cycles[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &info.idleCycles[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &info.mcCycles[i]));
++
++ /* Read bandwidth registers. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0005C,
++ &info.readRequests[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &info.readBytes8[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00050,
++ &info.writeRequests[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &info.writeBytes8[i]));
++
++ /* Read PE counters. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 0,
++ &info.colorKilled[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 2,
++ &info.colorDrawn[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 1,
++ &info.depthKilled[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 3,
++ &info.depthDrawn[i]));
++ }
++
++ /* Zero out remaning reserved counters. */
++ for (; i < 8; ++i)
++ {
++ info.readBytes8[i] = 0;
++ info.writeBytes8[i] = 0;
++ info.cycles[i] = 0;
++ info.idleCycles[i] = 0;
++ info.mcCycles[i] = 0;
++ info.readRequests[i] = 0;
++ info.writeRequests[i] = 0;
++ info.colorKilled[i] = 0;
++ info.colorDrawn[i] = 0;
++ info.depthKilled[i] = 0;
++ info.depthDrawn[i] = 0;
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Reset cycle and bandwidth counters. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 1));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ 0));
++
++#if gcdFRAME_DB_RESET
++ /* Reset PE counters. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 15,
++ &reset));
++#endif
++
++ /* Copy to user. */
++ gcmkONERROR(gckOS_CopyToUserData(Hardware->os,
++ &info,
++ FrameInfo,
++ gcmSIZEOF(info)));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdDVFS
++#define READ_FROM_EATER1 0
++
++gceSTATUS
++gckHARDWARE_QueryLoad(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Load
++ )
++{
++ gctUINT32 debug1;
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Load != gcvNULL);
++
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE);
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00110,
++ Load));
++#if READ_FROM_EATER1
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00134,
++ Load));
++#endif
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00114,
++ &debug1));
++
++ /* Patch result of 0x110 with result of 0x114. */
++ if ((debug1 & 0xFF) == 1)
++ {
++ *Load &= ~0xFF;
++ *Load |= 1;
++ }
++
++ if (((debug1 & 0xFF00) >> 8) == 1)
++ {
++ *Load &= ~(0xFF << 8);
++ *Load |= 1 << 8;
++ }
++
++ if (((debug1 & 0xFF0000) >> 16) == 1)
++ {
++ *Load &= ~(0xFF << 16);
++ *Load |= 1 << 16;
++ }
++
++ if (((debug1 & 0xFF000000) >> 24) == 1)
++ {
++ *Load &= ~(0xFF << 24);
++ *Load |= 1 << 24;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_INVALID_REQUEST;
++ }
++
++OnError:
++
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetDVFSPeroid(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 Frequency
++ )
++{
++ gceSTATUS status;
++ gctUINT32 period;
++ gctUINT32 eater;
++
++#if READ_FROM_EATER1
++ gctUINT32 period1;
++ gctUINT32 eater1;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%X Frequency=%d", Hardware, Frequency);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ period = 0;
++
++ while((64 << period) < (gcdDVFS_ANAYLSE_WINDOW * Frequency * 1000) )
++ {
++ period++;
++ }
++
++#if READ_FROM_EATER1
++ /*
++ * Peroid = F * 1000 * 1000 / (60 * 16 * 1024);
++ */
++ period1 = Frequency * 6250 / 6114;
++#endif
++
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE);
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ /* Get current configure. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &eater));
++
++ /* Change peroid. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (eater)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (period) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))));
++
++#if READ_FROM_EATER1
++ /* Config eater1. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00130,
++ &eater1));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00130,
++ ((((gctUINT32) (eater1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16))) | (((gctUINT32) ((gctUINT32) (period1) & ((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16)))));
++#endif
++ }
++ else
++ {
++ status = gcvSTATUS_INVALID_REQUEST;
++ }
++
++OnError:
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_InitDVFS(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 data;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "DVFS Configure=0x%X",
++ data);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ data));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_PrepareFunctions
++**
++** Generate command buffer snippets which will be used by gckHARDWARE, by which
++** gckHARDWARE can manipulate GPU by FE command without using gckCOMMAND to avoid
++** race condition and deadlock.
++**
++** Notice:
++** 1. Each snippet can only be executed when GPU is idle.
++** 2. Execution is triggered by AHB (0x658)
++** 3. Each snippet followed by END so software can sync with GPU by checking GPU
++** idle
++** 4. It is transparent to gckCOMMAND command buffer.
++**
++** Existing Snippets:
++** 1. MMU Configure
++** For new MMU, after GPU is reset, FE execute this command sequence to enble MMU.
++*/
++gceSTATUS
++gckHARDWARE_PrepareFunctions(
++ gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckOS os;
++ gctUINT32 offset = 0;
++ gctUINT32 mmuBytes;
++ gctUINT32 endBytes;
++ gctUINT8_PTR logical;
++
++ gcmkHEADER_ARG("%x", Hardware);
++
++ os = Hardware->os;
++
++ gcmkVERIFY_OK(gckOS_GetPageSize(os, &Hardware->functionBytes));
++
++ /* Allocate a command buffer. */
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(
++ os,
++ gcvFALSE,
++ &Hardware->functionBytes,
++ &Hardware->functionPhysical,
++ &Hardware->functionLogical
++ ));
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ os,
++ Hardware->functionLogical,
++ &Hardware->functionAddress
++ ));
++
++ if (Hardware->mmuVersion > 0)
++ {
++ /* MMU configure command sequence. */
++ logical = (gctUINT8_PTR)Hardware->functionLogical + offset;
++
++ Hardware->functions[gcvHARDWARE_FUNCTION_MMU].address
++ = Hardware->functionAddress + offset;
++
++ gcmkONERROR(gckHARDWARE_SetMMUStates(
++ Hardware,
++ Hardware->kernel->mmu->mtlbLogical,
++ gcvMMU_MODE_4K,
++ (gctUINT8_PTR)Hardware->kernel->mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
++ logical,
++ &mmuBytes
++ ));
++
++ offset += mmuBytes;
++
++ logical = (gctUINT8_PTR)Hardware->functionLogical + offset;
++
++ gcmkONERROR(gckHARDWARE_End(
++ Hardware,
++ gcvNULL,
++ &endBytes
++ ));
++
++ gcmkONERROR(gckHARDWARE_End(
++ Hardware,
++ logical,
++ &endBytes
++ ));
++
++ offset += endBytes;
++
++ Hardware->functions[gcvHARDWARE_FUNCTION_MMU].bytes = mmuBytes + endBytes;
++ }
++
++ gcmkASSERT(offset < Hardware->functionBytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_hardware.h 2015-05-01 14:57:59.563427001 -0500
+@@ -0,0 +1,160 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_h_
++#define __gc_hal_kernel_hardware_h_
++
++#if gcdENABLE_VG
++#include "gc_hal_kernel_hardware_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++typedef enum {
++ gcvHARDWARE_FUNCTION_MMU,
++ gcvHARDWARE_FUNCTION_FLUSH,
++
++ gcvHARDWARE_FUNCTION_NUM,
++}
++gceHARDWARE_FUNCTION;
++
++
++typedef struct _gcsHARWARE_FUNCTION
++{
++ /* Entry of the function. */
++ gctUINT32 address;
++
++ /* Bytes of the function. */
++ gctUINT32 bytes;
++}
++gcsHARDWARE_FUNCTION;
++
++/* gckHARDWARE object. */
++struct _gckHARDWARE
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gctKERNEL object. */
++ gckKERNEL kernel;
++
++ /* Pointer to gctOS object. */
++ gckOS os;
++
++ /* Core */
++ gceCORE core;
++
++ /* Chip characteristics. */
++ gcsHAL_QUERY_CHIP_IDENTITY identity;
++ gctBOOL allowFastClear;
++ gctBOOL allowCompression;
++ gctUINT32 powerBaseAddress;
++ gctBOOL extraEventStates;
++
++ /* Big endian */
++ gctBOOL bigEndian;
++
++ /* Chip status */
++ gctPOINTER powerMutex;
++ gctUINT32 powerProcess;
++ gctUINT32 powerThread;
++ gceCHIPPOWERSTATE chipPowerState;
++ gctUINT32 lastWaitLink;
++ gctUINT32 lastEnd;
++ gctBOOL clockState;
++ gctBOOL powerState;
++ gctPOINTER globalSemaphore;
++
++ gctISRMANAGERFUNC startIsr;
++ gctISRMANAGERFUNC stopIsr;
++ gctPOINTER isrContext;
++
++ gctUINT32 mmuVersion;
++
++ /* Whether use new MMU. It is meaningless
++ ** for old MMU since old MMU is always enabled.
++ */
++ gctBOOL enableMMU;
++
++ /* Type */
++ gceHARDWARE_TYPE type;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctUINT32 powerOffTime;
++ gctUINT32 powerOffTimeout;
++ gctPOINTER powerOffTimer;
++#endif
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ gctUINT32 powerOnFscaleVal;
++#endif
++ gctPOINTER pageTableDirty;
++
++#if gcdLINK_QUEUE_SIZE
++ struct _gckLINKQUEUE linkQueue;
++#endif
++
++ gctBOOL powerManagement;
++ gctBOOL powerManagementLock;
++ gctBOOL gpuProfiler;
++
++ gctBOOL endAfterFlushMmuCache;
++
++ gctUINT32 minFscaleValue;
++
++ gctPOINTER pendingEvent;
++
++ /* Function used by gckHARDWARE. */
++ gctPHYS_ADDR functionPhysical;
++ gctPOINTER functionLogical;
++ gctUINT32 functionAddress;
++ gctSIZE_T functionBytes;
++
++ gcsHARDWARE_FUNCTION functions[gcvHARDWARE_FUNCTION_NUM];
++};
++
++gceSTATUS
++gckHARDWARE_GetBaseAddress(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++gceSTATUS
++gckHARDWARE_NeedBaseAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 State,
++ OUT gctBOOL_PTR NeedBase
++ );
++
++gceSTATUS
++gckHARDWARE_GetFrameInfo(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_FRAME_INFO * FrameInfo
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_hardware_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_recorder.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_recorder.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_recorder.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/arch/gc_hal_kernel_recorder.c 2015-05-01 14:57:59.563427001 -0500
+@@ -0,0 +1,679 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_context.h"
++
++/*
++ * -----------------------
++ * HARDWARE STATE RECORDER
++ * -----------------------
++ *
++ * State mirror buffer is used to 'mirror' hardware states since hardware
++ * states can't be dumpped. It is a context buffer which stores 'global'
++ * context.
++ *
++ * For each commit, state recorder
++ * 1) Records context buffer (if there is) and command buffers in this commit.
++ * 2) Parse those buffers to estimate the state changed.
++ * 3) Stores result to a mirror buffer.
++ *
++ * == Commit 0 ====================================================================
++ *
++ * Context Buffer 0
++ *
++ * Command Buffer 0
++ *
++ * Mirror Buffer 0 <- Context Buffer 0 + Command Buffer 0
++ *
++ * == Commit 1 ====================================================================
++ *
++ * Command Buffer 1
++ *
++ * Mirror Buffer 1 <- Command buffer 1 + Mirror Buffer 0
++ *
++ * == Commit 2 ====================================================================
++ *
++ * Context Buffer 2 (optional)
++ *
++ * Command Buffer 2
++ *
++ * Mirror Buffer 2 <- Command buffer 2 + Context Buffer 2 + Mirror Buffer 1
++ *
++ * == Commit N ====================================================================
++ *
++ * For Commit N, these buffers are needed to reproduce hardware's behavior in
++ * this commit.
++ *
++ * Mirror Buffer [N - 1] : State Mirror accumlated by past commits,
++ * which is used to restore hardware state.
++ * Context Buffer [N] :
++ * Command Buffer [N] : Command buffer executed by hardware in this commit.
++ *
++ * If sequence of states programming matters, hardware's behavior can't be reproduced,
++ * but the state values stored in mirror buffer are assuring.
++ */
++
++/* Queue size. */
++#define gcdNUM_RECORDS 6
++
++typedef struct _gcsPARSER_HANDLER * gckPARSER_HANDLER;
++
++typedef void
++(*HandlerFunction)(
++ IN gckPARSER_HANDLER Handler,
++ IN gctUINT32 Addr,
++ IN gctUINT32 Data
++ );
++
++typedef struct _gcsPARSER_HANDLER
++{
++ gctUINT32 type;
++ gctUINT32 cmd;
++ gctPOINTER private;
++ HandlerFunction function;
++}
++gcsPARSER_HANDLER;
++
++typedef struct _gcsPARSER * gckPARSER;
++typedef struct _gcsPARSER
++{
++ gctUINT8_PTR currentCmdBufferAddr;
++
++ /* Current command. */
++ gctUINT32 lo;
++ gctUINT32 hi;
++
++ gctUINT8 cmdOpcode;
++ gctUINT16 cmdAddr;
++ gctUINT32 cmdSize;
++ gctUINT32 cmdRectCount;
++ gctUINT8 skip;
++ gctUINT32 skipCount;
++
++ gctBOOL allow;
++
++ /* Callback used by parser to handle a command. */
++ gckPARSER_HANDLER commandHandler;
++}
++gcsPARSER;
++
++typedef struct _gcsMIRROR
++{
++ gctUINT32_PTR logical[gcdNUM_RECORDS];
++ gctUINT32 bytes;
++ gcsSTATE_MAP_PTR map;
++ gctUINT32 stateCount;
++}
++gcsMIRROR;
++
++typedef struct _gcsDELTA
++{
++ gctUINT64 commitStamp;
++ gctUINT32_PTR command;
++ gctUINT32 commandBytes;
++ gctUINT32_PTR context;
++ gctUINT32 contextBytes;
++}
++gcsDELTA;
++
++typedef struct _gcsRECORDER
++{
++ gckOS os;
++ gcsMIRROR mirror;
++ gcsDELTA deltas[gcdNUM_RECORDS];
++
++ /* Index of current record. */
++ gctUINT index;
++
++ /* Number of records. */
++ gctUINT num;
++
++ /* Plugin used by gckPARSER. */
++ gcsPARSER_HANDLER recorderHandler;
++ gckPARSER parser;
++}
++gcsRECORDER;
++
++
++/******************************************************************************\
++***************************** Command Buffer Parser ****************************
++\******************************************************************************/
++
++/*
++** Command buffer parser checks command buffer in FE's view to make sure there
++** is no format error.
++**
++** Parser provide a callback mechnisam, so plug-in can be added to implement
++** other functions.
++*/
++
++static void
++_HandleLoadState(
++ IN OUT gckPARSER Parser
++ )
++{
++ gctUINT i;
++ gctUINT32_PTR data = (gctUINT32_PTR)Parser->currentCmdBufferAddr;
++ gctUINT32 cmdAddr = Parser->cmdAddr;
++
++ if (Parser->commandHandler == gcvNULL
++ || Parser->commandHandler->cmd != 0x01
++ )
++ {
++ /* No handler for this command. */
++ return;
++ }
++
++ for (i = 0; i < Parser->cmdSize; i++)
++ {
++ Parser->commandHandler->function(Parser->commandHandler, cmdAddr, *data);
++
++ /* Advance to next state. */
++ cmdAddr++;
++ data++;
++ }
++}
++
++static void
++_GetCommand(
++ IN OUT gckPARSER Parser
++ )
++{
++ gctUINT32 * buffer = (gctUINT32 *)Parser->currentCmdBufferAddr;
++
++ gctUINT16 cmdRectCount;
++ gctUINT16 cmdDataCount;
++
++ Parser->hi = buffer[0];
++ Parser->lo = buffer[1];
++
++ Parser->cmdOpcode = (((((gctUINT32) (Parser->hi)) >> (0 ? 31:27)) & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1)))))) );
++ Parser->cmdRectCount = 1;
++
++ switch (Parser->cmdOpcode)
++ {
++ case 0x01:
++ /* Extract count. */
++ Parser->cmdSize = (((((gctUINT32) (Parser->hi)) >> (0 ? 25:16)) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1)))))) );
++ if (Parser->cmdSize == 0)
++ {
++ /* 0 means 1024. */
++ Parser->cmdSize = 1024;
++ }
++ Parser->skip = (Parser->cmdSize & 0x1) ? 0 : 1;
++
++ /* Extract address. */
++ Parser->cmdAddr = (((((gctUINT32) (Parser->hi)) >> (0 ? 15:0)) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1)))))) );
++
++ Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 4;
++ Parser->skipCount = Parser->cmdSize + Parser->skip;
++ break;
++
++ case 0x05:
++ Parser->cmdSize = 4;
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2);
++ break;
++
++ case 0x06:
++ Parser->cmdSize = 5;
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2);
++ break;
++
++ case 0x0C:
++ Parser->cmdSize = 3;
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2);
++ break;
++
++ case 0x09:
++ Parser->cmdSize = 2;
++ Parser->cmdAddr = 0x0F16;
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2);
++ break;
++
++ case 0x04:
++ Parser->cmdSize = 1;
++ Parser->cmdAddr = 0x0F06;
++
++ cmdRectCount = (((((gctUINT32) (Parser->hi)) >> (0 ? 15:8)) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1)))))) );
++ cmdDataCount = (((((gctUINT32) (Parser->hi)) >> (0 ? 26:16)) & ((gctUINT32) ((((1 ? 26:16) - (0 ? 26:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:16) - (0 ? 26:16) + 1)))))) );
++
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2)
++ + cmdRectCount * 2
++ + gcmALIGN(cmdDataCount, 2);
++
++ Parser->cmdRectCount = cmdRectCount;
++ break;
++
++ case 0x03:
++ Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 8;
++ Parser->skipCount = 0;
++ break;
++
++ case 0x02:
++ Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 8;
++ Parser->skipCount = 0;
++ break;
++
++ default:
++ /* Unknown command is a risk. */
++ Parser->allow = gcvFALSE;
++ break;
++ }
++}
++
++static void
++_ParseCommand(
++ IN OUT gckPARSER Parser
++ )
++{
++ switch(Parser->cmdOpcode)
++ {
++ case 0x01:
++ _HandleLoadState(Parser);
++ break;
++ case 0x05:
++ case 0x06:
++ case 0x0C:
++ break;
++ case 0x04:
++ break;
++ default:
++ break;
++ }
++
++ /* Advance to next command. */
++ Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr
++ + (Parser->skipCount << 2);
++}
++
++gceSTATUS
++gckPARSER_Parse(
++ IN gckPARSER Parser,
++ IN gctUINT8_PTR Buffer,
++ IN gctUINT32 Bytes
++ )
++{
++ gckPARSER parser = Parser;
++ gctUINT8_PTR end = (gctUINT8_PTR)Buffer + Bytes;
++
++ /* Initialize parser. */
++ parser->currentCmdBufferAddr = (gctUINT8_PTR)Buffer;
++ parser->skip = 0;
++ parser->allow = gcvTRUE;
++
++ /* Go through command buffer until reaching the end
++ ** or meeting an error. */
++ do
++ {
++ _GetCommand(parser);
++
++ _ParseCommand(parser);
++ }
++ while ((parser->currentCmdBufferAddr < end) && (parser->allow == gcvTRUE));
++
++ if (parser->allow == gcvFALSE)
++ {
++ /* Error detected. */
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckPARSER_RegisterCommandHandler
++**
++** Register a command handler which will be called when parser get a command.
++**
++*/
++gceSTATUS
++gckPARSER_RegisterCommandHandler(
++ IN gckPARSER Parser,
++ IN gckPARSER_HANDLER Handler
++ )
++{
++ Parser->commandHandler = Handler;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckPARSER_Construct(
++ IN gckOS Os,
++ IN gckPARSER_HANDLER Handler,
++ OUT gckPARSER * Parser
++ )
++{
++ gceSTATUS status;
++ gckPARSER pointer;
++
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsPARSER), (gctPOINTER *)&pointer));
++
++ /* Put it here temp, should have a more general plug-in mechnisam. */
++ pointer->commandHandler = Handler;
++
++ *Parser = pointer;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++void
++gckPARSER_Destroy(
++ IN gckOS Os,
++ IN gckPARSER Parser
++ )
++{
++ gcmkOS_SAFE_FREE(Os, Parser);
++}
++
++/******************************************************************************\
++**************************** Hardware States Recorder **************************
++\******************************************************************************/
++
++static void
++_RecodeState(
++ IN gckPARSER_HANDLER Handler,
++ IN gctUINT32 Addr,
++ IN gctUINT32 Data
++ )
++{
++ gcmkVERIFY_OK(gckRECORDER_UpdateMirror(Handler->private, Addr, Data));
++}
++
++static gctUINT
++_Previous(
++ IN gctUINT Index
++ )
++{
++ if (Index == 0)
++ {
++ return gcdNUM_RECORDS - 1;
++ }
++
++ return Index - 1;
++}
++
++static gctUINT
++_Next(
++ IN gctUINT Index
++ )
++{
++ return (Index + 1) % gcdNUM_RECORDS;
++}
++
++gceSTATUS
++gckRECORDER_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ OUT gckRECORDER * Recorder
++ )
++{
++ gceSTATUS status;
++ gckCONTEXT context = gcvNULL;
++ gckRECORDER recorder = gcvNULL;
++ gctUINT32 mapSize;
++ gctUINT i;
++ gctBOOL virtualCommandBuffer = Hardware->kernel->virtualCommandBuffer;
++
++ /* TODO: We only need context buffer and state map, it should be able to get without construct a
++ ** new context.
++ ** Now it is leaked, since we can't free it when command buffer is gone.
++ */
++
++ /* MMU is not ready now. */
++ Hardware->kernel->virtualCommandBuffer = gcvFALSE;
++
++ gcmkONERROR(gckCONTEXT_Construct(Os, Hardware, 0, &context));
++
++ /* Restore. */
++ Hardware->kernel->virtualCommandBuffer = virtualCommandBuffer;
++
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsRECORDER), (gctPOINTER *)&recorder));
++
++ gckOS_ZeroMemory(recorder, gcmSIZEOF(gcsRECORDER));
++
++ /* Copy state map. */
++ recorder->mirror.stateCount = context->stateCount;
++
++ mapSize = context->stateCount * gcmSIZEOF(gcsSTATE_MAP);
++
++ gcmkONERROR(gckOS_Allocate(Os, mapSize, (gctPOINTER *)&recorder->mirror.map));
++
++ gckOS_MemCopy(recorder->mirror.map, context->map, mapSize);
++
++ /* Copy context buffer. */
++ recorder->mirror.bytes = context->totalSize;
++
++ for (i = 0; i < gcdNUM_RECORDS; i++)
++ {
++ gcmkONERROR(gckOS_Allocate(Os, context->totalSize, (gctPOINTER *)&recorder->mirror.logical[i]));
++ gckOS_MemCopy(recorder->mirror.logical[i], context->buffer->logical, context->totalSize);
++ }
++
++ for (i = 0; i < gcdNUM_RECORDS; i++)
++ {
++ /* TODO : Optimize size. */
++ gcmkONERROR(gckOS_Allocate(Os, gcdCMD_BUFFER_SIZE, (gctPOINTER *)&recorder->deltas[i].command));
++ gcmkONERROR(gckOS_Allocate(Os, context->totalSize, (gctPOINTER *)&recorder->deltas[i].context));
++ }
++
++ recorder->index = 0;
++ recorder->num = 0;
++
++ /* Initialize Parser plugin. */
++ recorder->recorderHandler.cmd = 0x01;
++ recorder->recorderHandler.private = recorder;
++ recorder->recorderHandler.function = _RecodeState;
++
++ gcmkONERROR(gckPARSER_Construct(Os, &recorder->recorderHandler, &recorder->parser));
++
++ recorder->os = Os;
++
++ *Recorder = recorder;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (recorder)
++ {
++ gckRECORDER_Destory(Os, recorder);
++ }
++
++ return status;
++}
++
++gceSTATUS
++gckRECORDER_Destory(
++ IN gckOS Os,
++ IN gckRECORDER Recorder
++ )
++{
++ gctUINT i;
++
++ if (Recorder->mirror.map)
++ {
++ gcmkOS_SAFE_FREE(Os, Recorder->mirror.map);
++ }
++
++ for (i = 0; i < gcdNUM_RECORDS; i++)
++ {
++ if (Recorder->mirror.logical[i])
++ {
++ gcmkOS_SAFE_FREE(Os, Recorder->mirror.logical[i]);
++ }
++ }
++
++ for (i = 0; i < gcdNUM_RECORDS; i++)
++ {
++ if (Recorder->deltas[i].command)
++ {
++ gcmkOS_SAFE_FREE(Os, Recorder->deltas[i].command);
++ }
++
++ if (Recorder->deltas[i].context)
++ {
++ gcmkOS_SAFE_FREE(Os, Recorder->deltas[i].context);
++ }
++ }
++
++ if (Recorder->parser)
++ {
++ gckPARSER_Destroy(Os, Recorder->parser);
++ }
++
++ gcmkOS_SAFE_FREE(Os, Recorder);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckRECORDER_UpdateMirror(
++ IN gckRECORDER Recorder,
++ IN gctUINT32 State,
++ IN gctUINT32 Data
++ )
++{
++ gctUINT32 index;
++ gcsSTATE_MAP_PTR map = Recorder->mirror.map;
++ gctUINT32_PTR buffer = Recorder->mirror.logical[Recorder->index];
++
++ if (State >= Recorder->mirror.stateCount)
++ {
++ /* Ignore them just like HW does. */
++ return gcvSTATUS_OK;
++ }
++
++ index = map[State].index;
++
++ if (index)
++ {
++ buffer[index] = Data;
++ }
++
++ return gcvSTATUS_OK;
++}
++
++void
++gckRECORDER_AdvanceIndex(
++ IN gckRECORDER Recorder,
++ IN gctUINT64 CommitStamp
++ )
++{
++ /* Get next record. */
++ gctUINT next = (Recorder->index + 1) % gcdNUM_RECORDS;
++
++ /* Record stamp of this commit. */
++ Recorder->deltas[Recorder->index].commitStamp = CommitStamp;
++
++ /* Mirror of next record is mirror of this record and delta in next record. */
++ gckOS_MemCopy(Recorder->mirror.logical[next],
++ Recorder->mirror.logical[Recorder->index], Recorder->mirror.bytes);
++
++ /* Advance to next record. */
++ Recorder->index = next;
++
++ Recorder->num = gcmMIN(Recorder->num + 1, gcdNUM_RECORDS - 1);
++
++
++ /* Reset delta. */
++ Recorder->deltas[Recorder->index].commandBytes = 0;
++ Recorder->deltas[Recorder->index].contextBytes = 0;
++}
++
++void
++gckRECORDER_Record(
++ IN gckRECORDER Recorder,
++ IN gctUINT8_PTR CommandBuffer,
++ IN gctUINT32 CommandBytes,
++ IN gctUINT8_PTR ContextBuffer,
++ IN gctUINT32 ContextBytes
++ )
++{
++ gcsDELTA * delta = &Recorder->deltas[Recorder->index];
++
++ if (CommandBytes != 0xFFFFFFFF)
++ {
++ gckPARSER_Parse(Recorder->parser, CommandBuffer, CommandBytes);
++ gckOS_MemCopy(delta->command, CommandBuffer, CommandBytes);
++ delta->commandBytes = CommandBytes;
++ }
++
++ if (ContextBytes != 0xFFFFFFFF)
++ {
++ gckPARSER_Parse(Recorder->parser, ContextBuffer, ContextBytes);
++ gckOS_MemCopy(delta->context, ContextBuffer, ContextBytes);
++ delta->contextBytes = ContextBytes;
++ }
++}
++
++void
++gckRECORDER_Dump(
++ IN gckRECORDER Recorder
++ )
++{
++ gctUINT last = Recorder->index;
++ gctUINT previous;
++ gctUINT i;
++ gcsMIRROR *mirror = &Recorder->mirror;
++ gcsDELTA *delta;
++ gckOS os = Recorder->os;
++
++ for (i = 0; i < Recorder->num; i++)
++ {
++ last = _Previous(last);
++ }
++
++ for (i = 0; i < Recorder->num; i++)
++ {
++ delta = &Recorder->deltas[last];
++
++ /* Dump record */
++ gcmkPRINT("#[commit %llu]", delta->commitStamp);
++
++ if (delta->commitStamp)
++ {
++ previous = _Previous(last);
++
++ gcmkPRINT("#[mirror]");
++ gckOS_DumpBuffer(os, mirror->logical[previous], mirror->bytes, gceDUMP_BUFFER_CONTEXT, gcvTRUE);
++ gcmkPRINT("@[kernel.execute]");
++ }
++
++ if (delta->contextBytes)
++ {
++ gckOS_DumpBuffer(os, delta->context, delta->contextBytes, gceDUMP_BUFFER_CONTEXT, gcvTRUE);
++ gcmkPRINT("@[kernel.execute]");
++ }
++
++ gckOS_DumpBuffer(os, delta->command, delta->commandBytes, gceDUMP_BUFFER_USER, gcvTRUE);
++ gcmkPRINT("@[kernel.execute]");
++
++ last = _Next(last);
++ }
++}
++
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.c 2015-05-01 14:57:59.563427001 -0500
+@@ -0,0 +1,932 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++
++#if gcdENABLE_VG
++
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++****************************** gckVGCOMMAND API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_InitializeInfo
++**
++** Initialize architecture dependent command buffer information.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGCOMMAND_InitializeInfo(
++ IN gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ do
++ {
++ /* Reset interrupts. */
++ Command->info.feBufferInt = -1;
++ Command->info.tsOverflowInt = -1;
++
++ /* Set command buffer attributes. */
++ Command->info.addressAlignment = 64;
++ Command->info.commandAlignment = 8;
++
++ /* Determine command alignment address mask. */
++ Command->info.addressMask = ((((gctUINT32) (Command->info.addressAlignment - 1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) ((gctUINT32) (0 ) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Query the number of bytes needed by the STATE command. */
++ gcmkERR_BREAK(gckVGCOMMAND_StateCommand(
++ Command, 0x0, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.stateCommandSize
++ ));
++
++ /* Query the number of bytes needed by the RESTART command. */
++ gcmkERR_BREAK(gckVGCOMMAND_RestartCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.restartCommandSize
++ ));
++
++ /* Query the number of bytes needed by the FETCH command. */
++ gcmkERR_BREAK(gckVGCOMMAND_FetchCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.fetchCommandSize
++ ));
++
++ /* Query the number of bytes needed by the CALL command. */
++ gcmkERR_BREAK(gckVGCOMMAND_CallCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.callCommandSize
++ ));
++
++ /* Query the number of bytes needed by the RETURN command. */
++ gcmkERR_BREAK(gckVGCOMMAND_ReturnCommand(
++ Command, gcvNULL,
++ &Command->info.returnCommandSize
++ ));
++
++ /* Query the number of bytes needed by the EVENT command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ Command, gcvNULL, gcvBLOCK_PIXEL, -1,
++ &Command->info.eventCommandSize
++ ));
++
++ /* Query the number of bytes needed by the END command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command, gcvNULL, -1,
++ &Command->info.endCommandSize
++ ));
++
++ /* Determine the tail reserve size. */
++ Command->info.staticTailSize = gcmMAX(
++ Command->info.fetchCommandSize,
++ gcmMAX(
++ Command->info.returnCommandSize,
++ Command->info.endCommandSize
++ )
++ );
++
++ /* Determine the maximum tail size. */
++ Command->info.dynamicTailSize
++ = Command->info.staticTailSize
++ + Command->info.eventCommandSize * gcvBLOCK_COUNT;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_StateCommand
++**
++** Append a STATE command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctUINT32 Pipe
++** Harwdare destination pipe.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** STATE command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 Address
++** Starting register address of the state buffer.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT32 Count
++** Number of states in state buffer.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the STATE command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the STATE command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_StateCommand(
++ IN gckVGCOMMAND Command,
++ IN gctUINT32 Pipe,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Address,
++ IN gctUINT32 Count,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Pipe=0x%x Logical=0x%x Address=0x%x Count=0x%x Bytes = 0x%x",
++ Command, Pipe, Logical, Address, Count, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append STATE. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) | (((gctUINT32) ((gctUINT32) (Pipe) & ((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the STATE command. */
++ *Bytes = 4 * (Count + 1);
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append LOAD_STATE. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the STATE command. */
++ *Bytes = 4 * (Count + 1);
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_RestartCommand
++**
++** Form a RESTART command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** RESTART command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this RESTART
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this RESTART command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the RESTART command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the RESTART command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_RestartCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++ gctUINT32 beginEndMark;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Determine Begin/End flag. */
++ beginEndMark = (FetchCount > 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)));
++
++ /* Append RESTART. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x9 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)))
++ | beginEndMark;
++
++ buffer[1]
++ = FetchAddress;
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the RESTART command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_FetchCommand
++**
++** Form a FETCH command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** FETCH command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this FETCH
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this FETCH command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the FETCH command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the FETCH command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_FetchCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append FETCH. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x5 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the FETCH command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append LINK. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the LINK command. */
++ *Bytes = 8;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_CallCommand
++**
++** Append a CALL command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** CALL command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this CALL
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this CALL command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the CALL command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the CALL command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_CallCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append CALL. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x6 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the CALL command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_ReturnCommand
++**
++** Append a RETURN command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** RETURN command at or gcvNULL to query the size of the command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the RETURN command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the RETURN command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_ReturnCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x Bytes = 0x%x",
++ Command, Logical, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append RETURN. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x7 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the RETURN command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_EventCommand
++**
++** Form an EVENT command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** EVENT command at or gcvNULL to query the size of the command.
++**
++** gctINT32 InterruptId
++** The ID of the interrupt to generate.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gceBLOCK Block
++** Block that will generate the interrupt.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the EVENT command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_EventCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gceBLOCK Block,
++ IN gctINT32 InterruptId,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x Block=0x%x InterruptId=0x%x Bytes = 0x%x",
++ Command, Logical, Block, InterruptId, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ typedef struct _gcsEVENTSTATES
++ {
++ /* Chips before VG21 use these values. */
++ gctUINT eventFromFE;
++ gctUINT eventFromPE;
++
++ /* VG21 chips and later use SOURCE field. */
++ gctUINT eventSource;
++ }
++ gcsEVENTSTATES;
++
++ static gcsEVENTSTATES states[] =
++ {
++ /* gcvBLOCK_COMMAND */
++ {
++ (gctUINT)~0,
++ (gctUINT)~0,
++ (gctUINT)~0
++ },
++
++ /* gcvBLOCK_TESSELLATOR */
++ {
++ 0x0,
++ 0x1,
++ 0x10
++ },
++
++ /* gcvBLOCK_TESSELLATOR2 */
++ {
++ 0x0,
++ 0x1,
++ 0x12
++ },
++
++ /* gcvBLOCK_TESSELLATOR3 */
++ {
++ 0x0,
++ 0x1,
++ 0x14
++ },
++
++ /* gcvBLOCK_RASTER */
++ {
++ 0x0,
++ 0x1,
++ 0x07,
++ },
++
++ /* gcvBLOCK_VG */
++ {
++ 0x0,
++ 0x1,
++ 0x0F
++ },
++
++ /* gcvBLOCK_VG2 */
++ {
++ 0x0,
++ 0x1,
++ 0x11
++ },
++
++ /* gcvBLOCK_VG3 */
++ {
++ 0x0,
++ 0x1,
++ 0x13
++ },
++
++ /* gcvBLOCK_PIXEL */
++ {
++ 0x0,
++ 0x1,
++ 0x07
++ },
++ };
++
++ /* Verify block ID. */
++ gcmkVERIFY_ARGUMENT(gcmIS_VALID_INDEX(Block, states));
++
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++ gcmkVERIFY_ARGUMENT(InterruptId <= ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))));
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12)));
++
++ /* Determine chip version. */
++ if (Command->vg21)
++ {
++ /* Get the event source for the block. */
++ gctUINT eventSource = states[Block].eventSource;
++
++ /* Supported? */
++ if (eventSource == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) ((gctUINT32) (eventSource) & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++ else
++ {
++ /* Get the event source for the block. */
++ gctUINT eventFromFE = states[Block].eventFromFE;
++ gctUINT eventFromPE = states[Block].eventFromPE;
++
++ /* Supported? */
++ if (eventFromFE == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (eventFromFE) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (eventFromPE) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Make sure the events are directly supported for the block. */
++ if (states[Block].eventSource == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++ gcmkVERIFY_ARGUMENT(InterruptId <= ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))));
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Determine event source. */
++ if (Block == gcvBLOCK_COMMAND)
++ {
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++ else
++ {
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT and END commands. */
++ *Bytes = 8;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_EndCommand
++**
++** Form an END command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** END command at or gcvNULL to query the size of the command.
++**
++** gctINT32 InterruptId
++** The ID of the interrupt to generate.
++** If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the END command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_EndCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctINT32 InterruptId,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x InterruptId=0x%x Bytes = 0x%x",
++ Command, Logical, InterruptId, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append END. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR memory;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++
++ /* Cast the buffer pointer. */
++ memory = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ memory[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ memory[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Append END. */
++ memory[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT and END commands. */
++ *Bytes = 16;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++#endif /* gcdENABLE_VG */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.h 2015-05-01 14:57:59.563427001 -0500
+@@ -0,0 +1,319 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_command_vg_h_
++#define __gc_hal_kernel_hardware_command_vg_h_
++
++/******************************************************************************\
++******************* Task and Interrupt Management Structures. ******************
++\******************************************************************************/
++
++/* Task storage header. */
++typedef struct _gcsTASK_STORAGE * gcsTASK_STORAGE_PTR;
++typedef struct _gcsTASK_STORAGE
++{
++ /* Next allocated storage buffer. */
++ gcsTASK_STORAGE_PTR next;
++}
++gcsTASK_STORAGE;
++
++/* Task container header. */
++typedef struct _gcsTASK_CONTAINER * gcsTASK_CONTAINER_PTR;
++typedef struct _gcsTASK_CONTAINER
++{
++ /* The number of tasks left to be processed in the container. */
++ gctINT referenceCount;
++
++ /* Size of the buffer. */
++ gctUINT size;
++
++ /* Link to the previous and the next allocated containers. */
++ gcsTASK_CONTAINER_PTR allocPrev;
++ gcsTASK_CONTAINER_PTR allocNext;
++
++ /* Link to the previous and the next containers in the free list. */
++ gcsTASK_CONTAINER_PTR freePrev;
++ gcsTASK_CONTAINER_PTR freeNext;
++}
++gcsTASK_CONTAINER;
++
++/* Kernel space task master table entry. */
++typedef struct _gcsBLOCK_TASK_ENTRY * gcsBLOCK_TASK_ENTRY_PTR;
++typedef struct _gcsBLOCK_TASK_ENTRY
++{
++ /* Pointer to the current task container for the block. */
++ gcsTASK_CONTAINER_PTR container;
++
++ /* Pointer to the current task data within the container. */
++ gcsTASK_HEADER_PTR task;
++
++ /* Pointer to the last link task within the container. */
++ gcsTASK_LINK_PTR link;
++
++ /* Number of interrupts allocated for this block. */
++ gctUINT interruptCount;
++
++ /* The index of the current interrupt. */
++ gctUINT interruptIndex;
++
++ /* Interrupt semaphore. */
++ gctSEMAPHORE interruptSemaphore;
++
++ /* Interrupt value array. */
++ gctINT32 interruptArray[32];
++}
++gcsBLOCK_TASK_ENTRY;
++
++
++/******************************************************************************\
++********************* Command Queue Management Structures. *********************
++\******************************************************************************/
++
++/* Command queue kernel element pointer. */
++typedef struct _gcsKERNEL_CMDQUEUE * gcsKERNEL_CMDQUEUE_PTR;
++
++/* Command queue object handler function type. */
++typedef gceSTATUS (* gctOBJECT_HANDLER) (
++ gckVGKERNEL Kernel,
++ gcsKERNEL_CMDQUEUE_PTR Entry
++ );
++
++/* Command queue kernel element. */
++typedef struct _gcsKERNEL_CMDQUEUE
++{
++ /* The number of buffers in the queue. */
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Pointer to the object handler function. */
++ gctOBJECT_HANDLER handler;
++}
++gcsKERNEL_CMDQUEUE;
++
++/* Command queue header. */
++typedef struct _gcsKERNEL_QUEUE_HEADER * gcsKERNEL_QUEUE_HEADER_PTR;
++typedef struct _gcsKERNEL_QUEUE_HEADER
++{
++ /* The size of the buffer in bytes. */
++ gctUINT size;
++
++ /* The number of pending entries to be processed. */
++ volatile gctUINT pending;
++
++ /* The current command queue entry. */
++ gcsKERNEL_CMDQUEUE_PTR currentEntry;
++
++ /* Next buffer. */
++ gcsKERNEL_QUEUE_HEADER_PTR next;
++}
++gcsKERNEL_QUEUE_HEADER;
++
++
++/******************************************************************************\
++******************************* gckVGCOMMAND Object *******************************
++\******************************************************************************/
++
++/* gckVGCOMMAND object. */
++struct _gckVGCOMMAND
++{
++ /***************************************************************************
++ ** Object data and pointers.
++ */
++
++ gcsOBJECT object;
++ gckVGKERNEL kernel;
++ gckOS os;
++ gckVGHARDWARE hardware;
++
++ /* Features. */
++ gctBOOL fe20;
++ gctBOOL vg20;
++ gctBOOL vg21;
++
++
++ /***************************************************************************
++ ** Enable command queue dumping.
++ */
++
++ gctBOOL enableDumping;
++
++
++ /***************************************************************************
++ ** Bus Error interrupt.
++ */
++
++ gctINT32 busErrorInt;
++
++
++ /***************************************************************************
++ ** Command buffer information.
++ */
++
++ gcsCOMMAND_BUFFER_INFO info;
++
++
++ /***************************************************************************
++ ** Synchronization objects.
++ */
++
++ gctPOINTER queueMutex;
++ gctPOINTER taskMutex;
++ gctPOINTER commitMutex;
++
++
++ /***************************************************************************
++ ** Task management.
++ */
++
++ /* The head of the storage buffer linked list. */
++ gcsTASK_STORAGE_PTR taskStorage;
++
++ /* Allocation size. */
++ gctUINT taskStorageGranularity;
++ gctUINT taskStorageUsable;
++
++ /* The free container list. */
++ gcsTASK_CONTAINER_PTR taskFreeHead;
++ gcsTASK_CONTAINER_PTR taskFreeTail;
++
++ /* Task table */
++ gcsBLOCK_TASK_ENTRY taskTable[gcvBLOCK_COUNT];
++
++
++ /***************************************************************************
++ ** Command queue.
++ */
++
++ /* Pointer to the allocated queue memory. */
++ gcsKERNEL_QUEUE_HEADER_PTR queue;
++
++ /* Pointer to the current available queue from which new queue entries
++ will be allocated. */
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++
++ /* If different from queueHead, points to the command queue which is
++ currently being executed by the hardware. */
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++
++ /* Points to the queue to merge the tail with when the tail is processed. */
++ gcsKERNEL_QUEUE_HEADER_PTR mergeQueue;
++
++ /* Queue overflow counter. */
++ gctUINT queueOverflow;
++
++
++ /***************************************************************************
++ ** Context.
++ */
++
++ /* Context counter used for unique ID. */
++ gctUINT64 contextCounter;
++
++ /* Current context ID. */
++ gctUINT64 currentContext;
++
++ /* Command queue power semaphore. */
++ gctPOINTER powerSemaphore;
++ gctINT32 powerStallInt;
++ gcsCMDBUFFER_PTR powerStallBuffer;
++ gctSIGNAL powerStallSignal;
++
++};
++
++/******************************************************************************\
++************************ gckVGCOMMAND Object Internal API. ***********************
++\******************************************************************************/
++
++/* Initialize architecture dependent command buffer information. */
++gceSTATUS
++gckVGCOMMAND_InitializeInfo(
++ IN gckVGCOMMAND Command
++ );
++
++/* Form a STATE command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_StateCommand(
++ IN gckVGCOMMAND Command,
++ IN gctUINT32 Pipe,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Address,
++ IN gctUINT32 Count,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form a RESTART command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_RestartCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form a FETCH command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_FetchCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form a CALL command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_CallCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form a RETURN command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_ReturnCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form an EVENT command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_EventCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gceBLOCK Block,
++ IN gctINT32 InterruptId,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form an END command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_EndCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctINT32 InterruptId,
++ IN OUT gctUINT32 * Bytes
++ );
++
++#endif /* __gc_hal_kernel_hardware_command_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.c 2015-05-01 14:57:59.563427001 -0500
+@@ -0,0 +1,2119 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++typedef enum
++{
++ gcvPOWER_FLAG_INITIALIZE = 1 << 0,
++ gcvPOWER_FLAG_STALL = 1 << 1,
++ gcvPOWER_FLAG_STOP = 1 << 2,
++ gcvPOWER_FLAG_START = 1 << 3,
++ gcvPOWER_FLAG_RELEASE = 1 << 4,
++ gcvPOWER_FLAG_DELAY = 1 << 5,
++ gcvPOWER_FLAG_SAVE = 1 << 6,
++ gcvPOWER_FLAG_ACQUIRE = 1 << 7,
++ gcvPOWER_FLAG_POWER_OFF = 1 << 8,
++ gcvPOWER_FLAG_CLOCK_OFF = 1 << 9,
++ gcvPOWER_FLAG_CLOCK_ON = 1 << 10,
++ gcvPOWER_FLAG_NOP = 1 << 11,
++}
++gcePOWER_FLAGS;
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_ResetGPU(
++ IN gckOS Os
++ )
++{
++ gctUINT32 control, idle;
++ gceSTATUS status;
++
++ /* Read register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ &control));
++
++ for (;;)
++ {
++ /* Disable clock gating. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00104,
++ 0x00000000));
++
++ /* Wait for clock being stable. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Isolate the GPU. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ control));
++
++ /* Set soft reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Wait for reset. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Reset soft reset bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Reset GPU isolation. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ control));
++
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++
++ /* GPU is idle. */
++ break;
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the error. */
++ return status;
++}
++
++
++static gceSTATUS
++_IdentifyHardware(
++ IN gckOS Os,
++ OUT gceCHIPMODEL * ChipModel,
++ OUT gctUINT32 * ChipRevision,
++ OUT gctUINT32 * ChipFeatures,
++ OUT gctUINT32 * ChipMinorFeatures,
++ OUT gctUINT32 * ChipMinorFeatures2
++ )
++{
++ gceSTATUS status;
++ gctUINT32 chipIdentity;
++
++ do
++ {
++ /* Read chip identity register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG, 0x00018, &chipIdentity));
++
++ /* Special case for older graphic cores. */
++ if (((((gctUINT32) (chipIdentity)) >> (0 ? 31:24) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))))
++ {
++ *ChipModel = gcv500;
++ *ChipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) );
++ }
++
++ else
++ {
++ /* Read chip identity register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG,
++ 0x00020,
++ (gctUINT32 *) ChipModel));
++
++ /* Read CHIP_REV register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG,
++ 0x00024,
++ ChipRevision));
++ }
++
++ /* Read chip feature register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x0001C, ChipFeatures
++ ));
++
++ /* Read chip minor feature register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x00034, ChipMinorFeatures
++ ));
++
++ /* Read chip minor feature register #2. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x00074, ChipMinorFeatures2
++ ));
++
++ gcmkTRACE(
++ gcvLEVEL_VERBOSE,
++ "ChipModel=0x%08X\n"
++ "ChipRevision=0x%08X\n"
++ "ChipFeatures=0x%08X\n"
++ "ChipMinorFeatures=0x%08X\n"
++ "ChipMinorFeatures2=0x%08X\n",
++ *ChipModel,
++ *ChipRevision,
++ *ChipFeatures,
++ *ChipMinorFeatures,
++ *ChipMinorFeatures2
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return the status. */
++ return status;
++}
++
++#if gcdPOWEROFF_TIMEOUT
++void
++_VGPowerTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckVGHARDWARE hardware = (gckVGHARDWARE)Data;
++ gcmkVERIFY_OK(
++ gckVGHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT));
++}
++#endif
++
++/******************************************************************************\
++****************************** gckVGHARDWARE API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Construct
++**
++** Construct a new gckVGHARDWARE object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an initialized gckOS object.
++**
++** OUTPUT:
++**
++** gckVGHARDWARE * Hardware
++** Pointer to a variable that will hold the pointer to the gckVGHARDWARE
++** object.
++*/
++gceSTATUS
++gckVGHARDWARE_Construct(
++ IN gckOS Os,
++ OUT gckVGHARDWARE * Hardware
++ )
++{
++ gckVGHARDWARE hardware = gcvNULL;
++ gceSTATUS status;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 chipFeatures;
++ gctUINT32 chipMinorFeatures;
++ gctUINT32 chipMinorFeatures2;
++
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x ", Os, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ do
++ {
++ gcmkERR_BREAK(gckOS_SetGPUPower(Os, gcvCORE_VG, gcvTRUE, gcvTRUE));
++
++ status = _ResetGPU(Os);
++
++ if (status != gcvSTATUS_OK)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "_ResetGPU failed: status=%d\n", status);
++ }
++
++ /* Identify the hardware. */
++ gcmkERR_BREAK(_IdentifyHardware(Os,
++ &chipModel, &chipRevision,
++ &chipFeatures, &chipMinorFeatures, &chipMinorFeatures2
++ ));
++
++ /* Allocate the gckVGHARDWARE object. */
++ gcmkERR_BREAK(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckVGHARDWARE), (gctPOINTER *) &hardware
++ ));
++
++ /* Initialize the gckVGHARDWARE object. */
++ hardware->object.type = gcvOBJ_HARDWARE;
++ hardware->os = Os;
++
++ /* Set chip identity. */
++ hardware->chipModel = chipModel;
++ hardware->chipRevision = chipRevision;
++ hardware->chipFeatures = chipFeatures;
++ hardware->chipMinorFeatures = chipMinorFeatures;
++ hardware->chipMinorFeatures2 = chipMinorFeatures2;
++
++ hardware->powerMutex = gcvNULL;
++ hardware->chipPowerState = gcvPOWER_ON;
++ hardware->chipPowerStateGlobal = gcvPOWER_ON;
++ hardware->clockState = gcvTRUE;
++ hardware->powerState = gcvTRUE;
++
++#if gcdPOWEROFF_TIMEOUT
++ hardware->powerOffTime = 0;
++ hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT;
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(Os,
++ _VGPowerTimerFunction,
++ (gctPOINTER)hardware,
++ &hardware->powerOffTimer));
++#endif
++
++ /* Determine whether FE 2.0 is present. */
++ hardware->fe20 = ((((gctUINT32) (hardware->chipFeatures)) >> (0 ? 28:28) & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))));
++
++ /* Determine whether VG 2.0 is present. */
++ hardware->vg20 = ((((gctUINT32) (hardware->chipMinorFeatures)) >> (0 ? 13:13) & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))));
++
++ /* Determine whether VG 2.1 is present. */
++ hardware->vg21 = ((((gctUINT32) (hardware->chipMinorFeatures)) >> (0 ? 18:18) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))));
++
++ /* Set default event mask. */
++ hardware->eventMask = 0xFFFFFFFF;
++
++ gcmkERR_BREAK(gckOS_AtomConstruct(Os, &hardware->pageTableDirty));
++
++ /* Set fast clear to auto. */
++ gcmkVERIFY_OK(gckVGHARDWARE_SetFastClear(hardware, -1));
++
++ gcmkERR_BREAK(gckOS_CreateMutex(Os, &hardware->powerMutex));
++
++ /* Enable power management by default. */
++ hardware->powerManagement = gcvTRUE;
++
++ /* Return pointer to the gckVGHARDWARE object. */
++ *Hardware = hardware;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++#if gcdPOWEROFF_TIMEOUT
++ if (hardware->powerOffTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer));
++ }
++#endif
++
++ gcmkVERIFY_OK(gckOS_SetGPUPower(Os, gcvCORE_VG, gcvFALSE, gcvFALSE));
++
++ if (hardware != gcvNULL && hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty));
++ }
++
++ if (hardware != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_Free(Os, hardware));
++ }
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Destroy
++**
++** Destroy an gckVGHARDWARE object.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_Destroy(
++ IN gckVGHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x ", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Mark the object as unknown. */
++ Hardware->object.type = gcvOBJ_UNKNOWN;
++
++ if (Hardware->powerMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(
++ Hardware->os, Hardware->powerMutex));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer));
++#endif
++
++ if (Hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty));
++ }
++
++ /* Free the object. */
++ status = gckOS_Free(Hardware->os, Hardware);
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QueryMemory
++**
++** Query the amount of memory available on the hardware.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * InternalSize
++** Pointer to a variable that will hold the size of the internal video
++** memory in bytes. If 'InternalSize' is gcvNULL, no information of the
++** internal memory will be returned.
++**
++** gctUINT32 * InternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * InternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctSIZE_T * ExternalSize
++** Pointer to a variable that will hold the size of the external video
++** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the
++** external memory will be returned.
++**
++** gctUINT32 * ExternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * ExternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * HorizontalTileSize
++** Number of horizontal pixels per tile. If 'HorizontalTileSize' is
++** gcvNULL, no horizontal pixel per tile will be returned.
++**
++** gctUINT32 * VerticalTileSize
++** Number of vertical pixels per tile. If 'VerticalTileSize' is
++** gcvNULL, no vertical pixel per tile will be returned.
++*/
++gceSTATUS
++gckVGHARDWARE_QueryMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x InternalSize=0x%x InternalBaseAddress=0x%x InternalAlignment=0x%x"
++ "ExternalSize=0x%x ExternalBaseAddress=0x%x ExternalAlignment=0x%x HorizontalTileSize=0x%x VerticalTileSize=0x%x",
++ Hardware, InternalSize, InternalBaseAddress, InternalAlignment,
++ ExternalSize, ExternalBaseAddress, ExternalAlignment, HorizontalTileSize, VerticalTileSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (InternalSize != gcvNULL)
++ {
++ /* No internal memory. */
++ *InternalSize = 0;
++ }
++
++ if (ExternalSize != gcvNULL)
++ {
++ /* No external memory. */
++ *ExternalSize = 0;
++ }
++
++ if (HorizontalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *HorizontalTileSize = 4;
++ }
++
++ if (VerticalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *VerticalTileSize = 4;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QueryChipIdentity
++**
++** Query the identity of the hardware.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gceCHIPMODEL * ChipModel
++** If 'ChipModel' is not gcvNULL, the variable it points to will
++** receive the model of the chip.
++**
++** gctUINT32 * ChipRevision
++** If 'ChipRevision' is not gcvNULL, the variable it points to will
++** receive the revision of the chip.
++**
++** gctUINT32 * ChipFeatures
++** If 'ChipFeatures' is not gcvNULL, the variable it points to will
++** receive the feature set of the chip.
++**
++** gctUINT32 * ChipMinorFeatures
++** If 'ChipMinorFeatures' is not gcvNULL, the variable it points to
++** will receive the minor feature set of the chip.
++**
++** gctUINT32 * ChipMinorFeatures2
++** If 'ChipMinorFeatures2' is not gcvNULL, the variable it points to
++** will receive the minor feature set of the chip.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_QueryChipIdentity(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPMODEL * ChipModel,
++ OUT gctUINT32 * ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures,
++ OUT gctUINT32* ChipMinorFeatures2
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x ChipModel=0x%x ChipRevision=0x%x ChipFeatures = 0x%x ChipMinorFeatures = 0x%x ChipMinorFeatures2 = 0x%x",
++ Hardware, ChipModel, ChipRevision, ChipFeatures, ChipMinorFeatures, ChipMinorFeatures2);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Return chip model. */
++ if (ChipModel != gcvNULL)
++ {
++ *ChipModel = Hardware->chipModel;
++ }
++
++ /* Return revision number. */
++ if (ChipRevision != gcvNULL)
++ {
++ *ChipRevision = Hardware->chipRevision;
++ }
++
++ /* Return feature set. */
++ if (ChipFeatures != gcvNULL)
++ {
++ gctUINT32 features = Hardware->chipFeatures;
++
++ if ((((((gctUINT32) (features)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Hardware->allowFastClear) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ /* Mark 2D pipe as available for GC500.0 since it did not have this *\
++ \* bit. */
++ if ((Hardware->chipModel == gcv500)
++ && (Hardware->chipRevision == 0)
++ )
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ /* Mark 2D pipe as available for GC300 since it did not have this *\
++ \* bit. */
++ if (Hardware->chipModel == gcv300)
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ *ChipFeatures = features;
++ }
++
++ /* Return minor feature set. */
++ if (ChipMinorFeatures != gcvNULL)
++ {
++ *ChipMinorFeatures = Hardware->chipMinorFeatures;
++ }
++
++ /* Return minor feature set #2. */
++ if (ChipMinorFeatures2 != gcvNULL)
++ {
++ *ChipMinorFeatures2 = Hardware->chipMinorFeatures2;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_ConvertFormat
++**
++** Convert an API format to hardware parameters.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gceSURF_FORMAT Format
++** API format to convert.
++**
++** OUTPUT:
++**
++** gctUINT32 * BitsPerPixel
++** Pointer to a variable that will hold the number of bits per pixel.
++**
++** gctUINT32 * BytesPerTile
++** Pointer to a variable that will hold the number of bytes per tile.
++*/
++gceSTATUS
++gckVGHARDWARE_ConvertFormat(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT32 * BitsPerPixel,
++ OUT gctUINT32 * BytesPerTile
++ )
++{
++ gctUINT32 bitsPerPixel;
++ gctUINT32 bytesPerTile;
++
++ gcmkHEADER_ARG("Hardware=0x%x Format=0x%x BitsPerPixel=0x%x BytesPerTile = 0x%x",
++ Hardware, Format, BitsPerPixel, BytesPerTile);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Dispatch on format. */
++ switch (Format)
++ {
++ case gcvSURF_A1:
++ case gcvSURF_L1:
++ /* 1-bpp format. */
++ bitsPerPixel = 1;
++ bytesPerTile = (1 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_A4:
++ /* 4-bpp format. */
++ bitsPerPixel = 4;
++ bytesPerTile = (4 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_INDEX8:
++ case gcvSURF_A8:
++ case gcvSURF_L8:
++ /* 8-bpp format. */
++ bitsPerPixel = 8;
++ bytesPerTile = (8 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_YV12:
++ /* 12-bpp planar YUV formats. */
++ bitsPerPixel = 12;
++ bytesPerTile = (12 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_NV12:
++ /* 12-bpp planar YUV formats. */
++ bitsPerPixel = 12;
++ bytesPerTile = (12 * 4 * 4) / 8;
++ break;
++
++ /* 4444 variations. */
++ case gcvSURF_X4R4G4B4:
++ case gcvSURF_A4R4G4B4:
++ case gcvSURF_R4G4B4X4:
++ case gcvSURF_R4G4B4A4:
++ case gcvSURF_B4G4R4X4:
++ case gcvSURF_B4G4R4A4:
++ case gcvSURF_X4B4G4R4:
++ case gcvSURF_A4B4G4R4:
++
++ /* 1555 variations. */
++ case gcvSURF_X1R5G5B5:
++ case gcvSURF_A1R5G5B5:
++ case gcvSURF_R5G5B5X1:
++ case gcvSURF_R5G5B5A1:
++ case gcvSURF_X1B5G5R5:
++ case gcvSURF_A1B5G5R5:
++ case gcvSURF_B5G5R5X1:
++ case gcvSURF_B5G5R5A1:
++
++ /* 565 variations. */
++ case gcvSURF_R5G6B5:
++ case gcvSURF_B5G6R5:
++
++ case gcvSURF_A8L8:
++ case gcvSURF_YUY2:
++ case gcvSURF_UYVY:
++ case gcvSURF_D16:
++ /* 16-bpp format. */
++ bitsPerPixel = 16;
++ bytesPerTile = (16 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_X8R8G8B8:
++ case gcvSURF_A8R8G8B8:
++ case gcvSURF_X8B8G8R8:
++ case gcvSURF_A8B8G8R8:
++ case gcvSURF_R8G8B8X8:
++ case gcvSURF_R8G8B8A8:
++ case gcvSURF_B8G8R8X8:
++ case gcvSURF_B8G8R8A8:
++ case gcvSURF_D32:
++ /* 32-bpp format. */
++ bitsPerPixel = 32;
++ bytesPerTile = (32 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_D24S8:
++ /* 24-bpp format. */
++ bitsPerPixel = 32;
++ bytesPerTile = (32 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_DXT1:
++ case gcvSURF_ETC1:
++ bitsPerPixel = 4;
++ bytesPerTile = (4 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_DXT2:
++ case gcvSURF_DXT3:
++ case gcvSURF_DXT4:
++ case gcvSURF_DXT5:
++ bitsPerPixel = 8;
++ bytesPerTile = (8 * 4 * 4) / 8;
++ break;
++
++ default:
++ /* Invalid format. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Set the result. */
++ if (BitsPerPixel != gcvNULL)
++ {
++ * BitsPerPixel = bitsPerPixel;
++ }
++
++ if (BytesPerTile != gcvNULL)
++ {
++ * BytesPerTile = bytesPerTile;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SplitMemory
++**
++** Split a hardware specific memory address into a pool and offset.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gctUINT32 Address
++** Address in hardware specific format.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to a variable that will hold the pool type for the address.
++**
++** gctUINT32 * Offset
++** Pointer to a variable that will hold the offset for the address.
++*/
++gceSTATUS
++gckVGHARDWARE_SplitMemory(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Pool=0x%x Offset = 0x%x",
++ Hardware, Address, Pool, Offset);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Offset != gcvNULL);
++
++ /* Dispatch on memory type. */
++ switch ((((((gctUINT32) (Address)) >> (0 ? 1:0)) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1)))))) ))
++ {
++ case 0x0:
++ /* System memory. */
++ *Pool = gcvPOOL_SYSTEM;
++ break;
++
++ case 0x2:
++ /* Virtual memory. */
++ *Pool = gcvPOOL_VIRTUAL;
++ break;
++
++ default:
++ /* Invalid memory type. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Return offset of address. */
++ *Offset = ((((gctUINT32) (Address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Execute
++**
++** Kickstart the hardware's command processor with an initialized command
++** buffer.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gctUINT32 Address
++** Address of the command buffer.
++**
++** gctSIZE_T Count
++** Number of command-sized data units to be executed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_Execute(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctUINT32 Count
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Count=0x%x",
++ Hardware, Address, Count);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ /* Enable all events. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00014,
++ Hardware->eventMask
++ ));
++
++ if (Hardware->fe20)
++ {
++ /* Write address register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00500,
++ gcmkFIXADDRESS(Address)
++ ));
++
++ /* Write control register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00504,
++ Count
++ ));
++ }
++ else
++ {
++ /* Write address register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00654,
++ gcmkFIXADDRESS(Address)
++ ));
++
++ /* Write control register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00658,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ ));
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_AlignToTile
++**
++** Align the specified width and height to tile boundaries.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to an gckVGHARDWARE object.
++**
++** gceSURF_TYPE Type
++** Type of alignment.
++**
++** gctUINT32 * Width
++** Pointer to the width to be aligned. If 'Width' is gcvNULL, no width
++** will be aligned.
++**
++** gctUINT32 * Height
++** Pointer to the height to be aligned. If 'Height' is gcvNULL, no height
++** will be aligned.
++**
++** OUTPUT:
++**
++** gctUINT32 * Width
++** Pointer to a variable that will receive the aligned width.
++**
++** gctUINT32 * Height
++** Pointer to a variable that will receive the aligned height.
++*/
++gceSTATUS
++gckVGHARDWARE_AlignToTile(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32 * Width,
++ IN OUT gctUINT32 * Height
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Type=0x%x Width=0x%x Height=0x%x",
++ Hardware, Type, Width, Height);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Width != gcvNULL)
++ {
++ /* Align the width. */
++ *Width = gcmALIGN(*Width, (Type == gcvSURF_TEXTURE) ? 4 : 16);
++ }
++
++ if (Height != gcvNULL)
++ {
++ /* Special case for VG images. */
++ if ((*Height == 0) && (Type == gcvSURF_IMAGE))
++ {
++ *Height = 4;
++ }
++ else
++ {
++ /* Align the height. */
++ *Height = gcmALIGN(*Height, 4);
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_ConvertLogical
++**
++** Convert a logical system address into a hardware specific address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to an gckVGHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address to convert.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the memory in user space.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_ConvertLogical(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x InUserSpace=%d Address=0x%x",
++ Hardware, Logical, InUserSpace, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ do
++ {
++ /* Convert logical address into a physical address. */
++ if (InUserSpace)
++ {
++ gcmkERR_BREAK(gckOS_UserLogicalToPhysical(
++ Hardware->os, Logical, &address
++ ));
++ }
++ else
++ {
++ gcmkERR_BREAK(gckOS_GetPhysicalAddress(
++ Hardware->os, Logical, &address
++ ));
++ }
++
++ /* Return hardware specific address. */
++ *Address = ((((gctUINT32) (address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QuerySystemMemory
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * SystemSize
++** Pointer to a variable that receives the maximum size of the system
++** memory.
++**
++** gctUINT32 * SystemBaseAddress
++** Poinetr to a variable that receives the base address for system
++** memory.
++*/
++gceSTATUS gckVGHARDWARE_QuerySystemMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x SystemSize=0x%x SystemBaseAddress=0x%x",
++ Hardware, SystemSize, SystemBaseAddress);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (SystemSize != gcvNULL)
++ {
++ /* Maximum system memory can be 2GB. */
++ *SystemSize = (gctSIZE_T)(1 << 31);
++ }
++
++ if (SystemBaseAddress != gcvNULL)
++ {
++ /* Set system memory base address. */
++ *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SetMMU
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the page table.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGHARDWARE_SetMMU(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x",
++ Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ do
++ {
++ /* Convert the logical address into an hardware address. */
++ gcmkERR_BREAK(gckVGHARDWARE_ConvertLogical(Hardware, Logical,
++ gcvFALSE, &address));
++
++ /* Write the AQMemoryFePageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00400,
++ gcmkFIXADDRESS(address)));
++
++ /* Write the AQMemoryTxPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00404,
++ gcmkFIXADDRESS(address)));
++
++ /* Write the AQMemoryPePageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00408,
++ gcmkFIXADDRESS(address)));
++
++ /* Write the AQMemoryPezPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x0040C,
++ gcmkFIXADDRESS(address)));
++
++ /* Write the AQMemoryRaPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00410,
++ gcmkFIXADDRESS(address)));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_FlushMMU
++**
++** Flush the page table.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGHARDWARE_FlushMMU(
++ IN gckVGHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckVGCOMMAND command;
++
++ gcmkHEADER_ARG("Hardware=0x%x ", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++ gctUINT32_PTR buffer;
++
++ /* Create a shortcut to the command buffer object. */
++ command = Hardware->kernel->command;
++
++ /* Allocate command buffer space. */
++ gcmkERR_BREAK(gckVGCOMMAND_Allocate(
++ command, 8, &commandBuffer, (gctPOINTER *) &buffer
++ ));
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ }
++ while(gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_BuildVirtualAddress
++**
++** Build a virtual address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** gctUINT32 Index
++** Index into page table.
++**
++** gctUINT32 Offset
++** Offset into page.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable receiving te hardware address.
++*/
++gceSTATUS gckVGHARDWARE_BuildVirtualAddress(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Hardware=0x%x Index=0x%x Offset=0x%x Address=0x%x",
++ Hardware, Index, Offset, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Build virtual address. */
++ address = (Index << 12) | Offset;
++
++ /* Set virtual type. */
++ address = ((((gctUINT32) (address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Set the result. */
++ *Address = address;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGHARDWARE_GetIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32 * Data
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x Data=0x%x", Hardware, Data);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ /* Read register and return. */
++ status = gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG, 0x00004, Data);
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVGHARDWARE_SetFastClear(
++ IN gckVGHARDWARE Hardware,
++ IN gctINT Enable
++ )
++{
++ gctUINT32 debug;
++ gceSTATUS status;
++
++ if (!(((((gctUINT32) (Hardware->chipFeatures)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ return gcvSTATUS_OK;
++ }
++
++ do
++ {
++ if (Enable == -1)
++ {
++ Enable = (Hardware->chipModel > gcv500) ||
++ ((Hardware->chipModel == gcv500) && (Hardware->chipRevision >= 3));
++ }
++
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00414,
++ &debug));
++
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++
++#ifdef AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1) == 32) ? ~0 : (~(~0 << ((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1))))))) << (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1) == 32) ? ~0 : (~(~0 << ((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1))))))) << (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION)));
++#endif
++
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00414,
++ debug));
++
++ Hardware->allowFastClear = Enable;
++
++ status = gcvFALSE;
++ }
++ while (gcvFALSE);
++
++ return status;
++}
++
++gceSTATUS
++gckVGHARDWARE_ReadInterrupt(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x IDs=0x%x", Hardware, IDs);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IDs != gcvNULL);
++
++ /* Read AQIntrAcknowledge register. */
++ status = gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00010,
++ IDs);
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS _CommandStall(
++ gckVGHARDWARE Hardware)
++{
++ gceSTATUS status;
++ gckVGCOMMAND command;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ gctUINT32_PTR buffer;
++ command = Hardware->kernel->command;
++
++ /* Allocate command buffer space. */
++ gcmkERR_BREAK(gckVGCOMMAND_Allocate(
++ command, 8, &command->powerStallBuffer,
++ (gctPOINTER *) &buffer
++ ));
++
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ command, buffer, gcvBLOCK_PIXEL,
++ command->powerStallInt, gcvNULL));
++
++ gcmkERR_BREAK(gckVGCOMMAND_Execute(
++ command,
++ command->powerStallBuffer
++ ));
++
++ /* Wait the signal. */
++ gcmkERR_BREAK(gckOS_WaitSignal(
++ command->os,
++ command->powerStallSignal,
++ command->kernel->kernel->timeOut));
++
++
++ }
++ while(gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementState
++**
++** Set GPU to a specified power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE State
++** Power State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_SetPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ )
++{
++ gceSTATUS status;
++ gckVGCOMMAND command = gcvNULL;
++ gckOS os;
++ gctUINT flag/*, clock*/;
++
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL stall = gcvTRUE;
++ gctBOOL commitMutex = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctBOOL timeout = gcvFALSE;
++ gctBOOL isAfter = gcvFALSE;
++ gctUINT32 currentTime;
++#endif
++
++ gctBOOL broadcast = gcvFALSE;
++ gctUINT32 process, thread;
++ gctBOOL global = gcvFALSE;
++
++#if gcdENABLE_PROFILING
++ gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime,
++ initTime, offTime, startTime, totalTime;
++#endif
++
++ /* State transition flags. */
++ static const gctUINT flags[4][4] =
++ {
++ /* gcvPOWER_ON */
++ { /* ON */ 0,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_NOP,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_OFF */
++ { /* ON */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* OFF */ 0,
++ /* IDLE */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_IDLE */
++ { /* ON */ gcvPOWER_FLAG_NOP,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ 0,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_SUSPEND */
++ { /* ON */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* OFF */ gcvPOWER_FLAG_SAVE |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* SUSPEND */ 0,
++ },
++ };
++
++ gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State);
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Switching to power state %d",
++ State);
++#endif
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get the gckOS object pointer. */
++ os = Hardware->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Get the gckCOMMAND object pointer. */
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ if (Hardware->powerManagement == gcvFALSE)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Start profiler. */
++ gcmkPROFILE_INIT(freq, time);
++
++ /* Convert the broadcast power state. */
++ switch (State)
++ {
++ case gcvPOWER_SUSPEND_ATPOWERON:
++ /* Convert to SUSPEND and don't wait for STALL. */
++ State = gcvPOWER_SUSPEND;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_OFF_ATPOWERON:
++ /* Convert to OFF and don't wait for STALL. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_IDLE_BROADCAST:
++ /* Convert to IDLE and note we are inside broadcast. */
++ State = gcvPOWER_IDLE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_SUSPEND_BROADCAST:
++ /* Convert to SUSPEND and note we are inside broadcast. */
++ State = gcvPOWER_SUSPEND;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_BROADCAST:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_RECOVERY:
++ /* Convert to OFF and note we are inside recovery. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_ON_AUTO:
++ /* Convert to ON and note we are inside recovery. */
++ State = gcvPOWER_ON;
++ break;
++
++ case gcvPOWER_ON:
++ case gcvPOWER_IDLE:
++ case gcvPOWER_SUSPEND:
++ case gcvPOWER_OFF:
++ /* Mark as global power management. */
++ global = gcvTRUE;
++ break;
++
++#if gcdPOWEROFF_TIMEOUT
++ case gcvPOWER_OFF_TIMEOUT:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ /* Check time out */
++ timeout = gcvTRUE;
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++ /* Get current process and thread IDs. */
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ /* Acquire the power mutex. */
++ if (broadcast)
++ {
++ /* Try to acquire the power mutex. */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Check if we already own this mutex. */
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread)
++ )
++ {
++ /* Bail out on recursive power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ else if (State == gcvPOWER_IDLE)
++ {
++ /* gcvPOWER_IDLE_BROADCAST is from IST,
++ ** so waiting here will cause deadlock,
++ ** if lock holder call gckCOMMAND_Stall() */
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ }
++ }
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE));
++ }
++
++ /* Get time until mtuex acquired. */
++ gcmkPROFILE_QUERY(time, mutexTime);
++
++ Hardware->powerProcess = process;
++ Hardware->powerThread = thread;
++ mutexAcquired = gcvTRUE;
++
++ /* Grab control flags and clock. */
++ flag = flags[Hardware->chipPowerState][State];
++ /*clock = clocks[State];*/
++
++#if gcdPOWEROFF_TIMEOUT
++ if (timeout)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ gcmkONERROR(
++ gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter));
++
++ /* powerOffTime is pushed forward, give up.*/
++ if (isAfter
++ /* Expect a transition start from IDLE. */
++ || (Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_OFF)
++ )
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++#endif
++
++ if (flag == 0)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* internal power control */
++ if (!global)
++ {
++ if (Hardware->chipPowerStateGlobal == gcvPOWER_OFF)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++
++ /* avoid acquiring again. */
++ flag &= ~gcvPOWER_FLAG_ACQUIRE;
++ }
++ }
++
++ if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON))
++ {
++ /* Turn on the power. */
++ gcmkONERROR(gckOS_SetGPUPower(os, gcvCORE_VG, gcvTRUE, gcvTRUE));
++
++ /* Mark clock and power as enabled. */
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++ }
++
++ /* Get time until powered on. */
++ gcmkPROFILE_QUERY(time, onTime);
++
++ if ((flag & gcvPOWER_FLAG_STALL) && stall)
++ {
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ command->os,
++ command->commitMutex,
++ gcvINFINITE
++ ));
++
++ commitMutex = gcvTRUE;
++
++ gcmkONERROR(_CommandStall(Hardware));
++ }
++
++ /* Get time until stalled. */
++ gcmkPROFILE_QUERY(time, stallTime);
++
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++
++ acquired = gcvTRUE;
++ }
++
++
++ /* Get time until stopped. */
++ gcmkPROFILE_QUERY(time, stopTime);
++
++
++ if (flag & gcvPOWER_FLAG_DELAY)
++ {
++ /* Wait for the specified amount of time to settle coming back from
++ ** power-off or suspend state. */
++ gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY));
++ }
++
++ /* Get time until delayed. */
++ gcmkPROFILE_QUERY(time, delayTime);
++
++ if (flag & gcvPOWER_FLAG_INITIALIZE)
++ {
++
++ /* Initialize GPU here, replaced by InitializeHardware later */
++ gcmkONERROR(gckVGHARDWARE_SetMMU(Hardware, Hardware->kernel->mmu->pageTableLogical));
++ gcmkVERIFY_OK(gckVGHARDWARE_SetFastClear(Hardware, -1));
++
++ /* Force the command queue to reload the next context. */
++ command->currentContext = 0;
++ }
++
++ /* Get time until initialized. */
++ gcmkPROFILE_QUERY(time, initTime);
++
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ gcvCORE_VG,
++ (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE,
++ (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE));
++
++ /* Save current hardware power and clock states. */
++ Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE;
++ Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE;
++ }
++
++ /* Get time until off. */
++ gcmkPROFILE_QUERY(time, offTime);
++
++
++ /* Get time until started. */
++ gcmkPROFILE_QUERY(time, startTime);
++
++ if (flag & gcvPOWER_FLAG_RELEASE)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore));
++ acquired = gcvFALSE;
++ }
++
++ /* Save the new power state. */
++ Hardware->chipPowerState = State;
++
++ if (global)
++ {
++ /* Save the new power state. */
++ Hardware->chipPowerStateGlobal = State;
++ }
++
++ if (commitMutex)
++ {
++ /* Acquire the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os,
++ command->commitMutex
++ ));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ /* Reset power off time */
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout;
++
++ if (State == gcvPOWER_IDLE)
++ {
++ /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */
++ gcmkVERIFY_OK(gckOS_StartTimer(os,
++ Hardware->powerOffTimer,
++ Hardware->powerOffTimeout));
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer");
++
++ /* Cancel running timer when GPU enters ON or OFF. */
++ gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer));
++ }
++#endif
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* Get total time. */
++ gcmkPROFILE_QUERY(time, totalTime);
++#if gcdENABLE_PROFILING
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu",
++ freq, mutexTime, onTime, stallTime, stopTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ " delay:%llu init:%llu off:%llu start:%llu total:%llu",
++ delayTime, initTime, offTime, startTime, totalTime);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ if (acquired)
++ {
++ /* Release semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ command->powerSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ if (commitMutex)
++ {
++ /* Acquire the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os,
++ command->commitMutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryPowerManagementState
++**
++** Get GPU power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE* State
++** Power State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_QueryPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(State != gcvNULL);
++
++ /* Return the statue. */
++ *State = Hardware->chipPowerState;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SetPowerManagement
++**
++** Configure GPU power management function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL PowerManagement
++** Power Mangement State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_SetPowerManagement(
++ IN gckVGHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->powerManagement = PowerManagement;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckVGHARDWARE_SetPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Timeout
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout);
++
++ Hardware->powerOffTimeout = Timeout;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ *Timeout = Hardware->powerOffTimeout;
++
++ gcmkFOOTER_ARG("*Timeout=%d", *Timeout);
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckVGHARDWARE_QueryIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL);
++
++ /* We are idle when the power is not ON. */
++ if (Hardware->chipPowerState != gcvPOWER_ON)
++ {
++ *IsIdle = gcvTRUE;
++ }
++
++ else
++ {
++ /* Read idle register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG, 0x00004, &idle));
++
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 8:8)) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 10:10)) & ((gctUINT32) ((((1 ? 10:10) - (0 ? 10:10) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 10:10) - (0 ? 10:10) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 11:11)) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ *IsIdle = gcvFALSE;
++ }
++
++ else
++ {
++ *IsIdle = gcvTRUE;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif /* gcdENABLE_VG */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/archvg/gc_hal_kernel_hardware_vg.h 2015-05-01 14:57:59.563427001 -0500
+@@ -0,0 +1,74 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_vg_h_
++#define __gc_hal_kernel_hardware_vg_h_
++
++/* gckHARDWARE object. */
++struct _gckVGHARDWARE
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckKERNEL object. */
++ gckVGKERNEL kernel;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Chip characteristics. */
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 chipFeatures;
++ gctUINT32 chipMinorFeatures;
++ gctUINT32 chipMinorFeatures2;
++ gctBOOL allowFastClear;
++
++ /* Features. */
++ gctBOOL fe20;
++ gctBOOL vg20;
++ gctBOOL vg21;
++
++ /* Event mask. */
++ gctUINT32 eventMask;
++
++ gctBOOL clockState;
++ gctBOOL powerState;
++ gctPOINTER powerMutex;
++ gctUINT32 powerProcess;
++ gctUINT32 powerThread;
++ gceCHIPPOWERSTATE chipPowerState;
++ gceCHIPPOWERSTATE chipPowerStateGlobal;
++ gctISRMANAGERFUNC startIsr;
++ gctISRMANAGERFUNC stopIsr;
++ gctPOINTER isrContext;
++ gctPOINTER pageTableDirty;
++#if gcdPOWEROFF_TIMEOUT
++ gctUINT32 powerOffTime;
++ gctUINT32 powerOffTimeout;
++ gctPOINTER powerOffTimer;
++#endif
++
++ gctBOOL powerManagement;
++};
++
++#endif /* __gc_hal_kernel_hardware_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.c 2015-05-01 14:57:59.567427001 -0500
+@@ -0,0 +1,5040 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++/*******************************************************************************
++***** Version Signature *******************************************************/
++
++#define _gcmTXT2STR(t) #t
++#define gcmTXT2STR(t) _gcmTXT2STR(t)
++const char * _VERSION = "\n\0$VERSION$"
++ gcmTXT2STR(gcvVERSION_MAJOR) "."
++ gcmTXT2STR(gcvVERSION_MINOR) "."
++ gcmTXT2STR(gcvVERSION_PATCH) ":"
++ gcmTXT2STR(gcvVERSION_BUILD) "$\n";
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++#define gcmDEFINE2TEXT(d) #d
++gctCONST_STRING _DispatchText[] =
++{
++ gcmDEFINE2TEXT(gcvHAL_QUERY_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_CHIP_IDENTITY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_NON_PAGED_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_NON_PAGED_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_CONTIGUOUS_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_RELEASE_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_MAP_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNMAP_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_MAP_USER_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNMAP_USER_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_LOCK_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNLOCK_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_EVENT_COMMIT),
++ gcmDEFINE2TEXT(gcvHAL_USER_SIGNAL),
++ gcmDEFINE2TEXT(gcvHAL_SIGNAL),
++ gcmDEFINE2TEXT(gcvHAL_WRITE_DATA),
++ gcmDEFINE2TEXT(gcvHAL_COMMIT),
++ gcmDEFINE2TEXT(gcvHAL_STALL),
++ gcmDEFINE2TEXT(gcvHAL_READ_REGISTER),
++ gcmDEFINE2TEXT(gcvHAL_WRITE_REGISTER),
++ gcmDEFINE2TEXT(gcvHAL_GET_PROFILE_SETTING),
++ gcmDEFINE2TEXT(gcvHAL_SET_PROFILE_SETTING),
++ gcmDEFINE2TEXT(gcvHAL_READ_ALL_PROFILE_REGISTERS),
++ gcmDEFINE2TEXT(gcvHAL_PROFILE_REGISTERS_2D),
++#if VIVANTE_PROFILER_PERDRAW
++ gcvHAL_READ_PROFILER_REGISTER_SETTING,
++#endif
++ gcmDEFINE2TEXT(gcvHAL_SET_POWER_MANAGEMENT_STATE),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_POWER_MANAGEMENT_STATE),
++ gcmDEFINE2TEXT(gcvHAL_GET_BASE_ADDRESS),
++ gcmDEFINE2TEXT(gcvHAL_SET_IDLE),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_KERNEL_SETTINGS),
++ gcmDEFINE2TEXT(gcvHAL_RESET),
++ gcmDEFINE2TEXT(gcvHAL_MAP_PHYSICAL),
++ gcmDEFINE2TEXT(gcvHAL_DEBUG),
++ gcmDEFINE2TEXT(gcvHAL_CACHE),
++ gcmDEFINE2TEXT(gcvHAL_TIMESTAMP),
++ gcmDEFINE2TEXT(gcvHAL_DATABASE),
++ gcmDEFINE2TEXT(gcvHAL_VERSION),
++ gcmDEFINE2TEXT(gcvHAL_CHIP_INFO),
++ gcmDEFINE2TEXT(gcvHAL_ATTACH),
++ gcmDEFINE2TEXT(gcvHAL_DETACH),
++ gcmDEFINE2TEXT(gcvHAL_COMPOSE),
++ gcmDEFINE2TEXT(gcvHAL_SET_TIMEOUT),
++ gcmDEFINE2TEXT(gcvHAL_GET_FRAME_INFO),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_COMMAND_BUFFER),
++ gcmDEFINE2TEXT(gcvHAL_COMMIT_DONE),
++ gcmDEFINE2TEXT(gcvHAL_DUMP_GPU_STATE),
++ gcmDEFINE2TEXT(gcvHAL_DUMP_EVENT),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER),
++ gcmDEFINE2TEXT(gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER),
++ gcmDEFINE2TEXT(gcvHAL_SET_FSCALE_VALUE),
++ gcmDEFINE2TEXT(gcvHAL_GET_FSCALE_VALUE),
++ gcmDEFINE2TEXT(gcvHAL_NAME_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_IMPORT_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_RESET_TIME_STAMP),
++ gcmDEFINE2TEXT(gcvHAL_READ_REGISTER_EX),
++ gcmDEFINE2TEXT(gcvHAL_WRITE_REGISTER_EX),
++ gcmDEFINE2TEXT(gcvHAL_SYNC_POINT),
++ gcmDEFINE2TEXT(gcvHAL_CREATE_NATIVE_FENCE),
++ gcmDEFINE2TEXT(gcvHAL_DESTROY_MMU),
++ gcmDEFINE2TEXT(gcvHAL_SHBUF),
++};
++#endif
++
++#if gcdGPU_TIMEOUT && gcdINTERRUPT_STATISTIC
++void
++_MonitorTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckKERNEL kernel = (gckKERNEL)Data;
++ gctUINT32 pendingInterrupt;
++ gctBOOL reset = gcvFALSE;
++ gctUINT32 mask;
++ gctUINT32 advance = kernel->timeOut/2;
++
++#if gcdENABLE_VG
++ if (kernel->core == gcvCORE_VG)
++ {
++ return;
++ }
++#endif
++
++ if (kernel->monitorTimerStop)
++ {
++ /* Stop. */
++ return;
++ }
++
++ gckOS_AtomGet(kernel->os, kernel->eventObj->interruptCount, &pendingInterrupt);
++
++ if (kernel->monitoring == gcvFALSE)
++ {
++ if (pendingInterrupt)
++ {
++ /* Begin to mointor GPU state. */
++ kernel->monitoring = gcvTRUE;
++
++ /* Record current state. */
++ kernel->lastCommitStamp = kernel->eventObj->lastCommitStamp;
++ kernel->restoreAddress = kernel->hardware->lastWaitLink;
++ gcmkVERIFY_OK(gckOS_AtomGet(
++ kernel->os,
++ kernel->hardware->pendingEvent,
++ &kernel->restoreMask
++ ));
++
++ /* Clear timeout. */
++ kernel->timer = 0;
++ }
++ }
++ else
++ {
++ if (pendingInterrupt)
++ {
++ gcmkVERIFY_OK(gckOS_AtomGet(
++ kernel->os,
++ kernel->hardware->pendingEvent,
++ &mask
++ ));
++
++ if (kernel->eventObj->lastCommitStamp == kernel->lastCommitStamp
++ && kernel->hardware->lastWaitLink == kernel->restoreAddress
++ && mask == kernel->restoreMask
++ )
++ {
++ /* GPU state is not changed, accumlate timeout. */
++ kernel->timer += advance;
++
++ if (kernel->timer >= kernel->timeOut)
++ {
++ /* GPU stuck, trigger reset. */
++ reset = gcvTRUE;
++ }
++ }
++ else
++ {
++ /* GPU state changed, cancel current timeout.*/
++ kernel->monitoring = gcvFALSE;
++ }
++ }
++ else
++ {
++ /* GPU finish all jobs, cancel current timeout*/
++ kernel->monitoring = gcvFALSE;
++ }
++ }
++
++ if (reset)
++ {
++ gckKERNEL_Recovery(kernel);
++
++ /* Work in this timeout is done. */
++ kernel->monitoring = gcvFALSE;
++ }
++
++ gcmkVERIFY_OK(gckOS_StartTimer(kernel->os, kernel->monitorTimer, advance));
++}
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++gceSTATUS
++_MapCommandBuffer(
++ IN gckKERNEL Kernel
++ )
++{
++ gceSTATUS status;
++ gctUINT32 i;
++ gctUINT32 physical;
++ gckMMU mmu;
++
++ gcmkONERROR(gckKERNEL_GetProcessMMU(Kernel, &mmu));
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; i++)
++ {
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Kernel->os,
++ Kernel->command->queues[i].logical,
++ &physical
++ ));
++
++ gcmkONERROR(gckMMU_FlatMapping(mmu, physical));
++ }
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++#endif
++
++void
++_DumpDriverConfigure(
++ IN gckKERNEL Kernel
++ )
++{
++ gcmkPRINT_N(0, "**************************\n");
++ gcmkPRINT_N(0, "*** GPU DRV CONFIG ***\n");
++ gcmkPRINT_N(0, "**************************\n");
++
++ gcmkPRINT("Galcore version %d.%d.%d.%d\n",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD);
++
++ gckOS_DumpParam();
++}
++
++void
++_DumpState(
++ IN gckKERNEL Kernel
++ )
++{
++ /* Dump GPU Debug registers. */
++ gcmkVERIFY_OK(gckHARDWARE_DumpGPUState(Kernel->hardware));
++
++ if (Kernel->virtualCommandBuffer)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_DumpExecutingBuffer(Kernel->command));
++ }
++
++ /* Dump Pending event. */
++ gcmkVERIFY_OK(gckEVENT_Dump(Kernel->eventObj));
++
++ /* Dump Process DB. */
++ gcmkVERIFY_OK(gckKERNEL_DumpProcessDB(Kernel));
++
++#if gcdRECORD_COMMAND
++ /* Dump record. */
++ gckRECORDER_Dump(Kernel->command->recorder);
++#endif
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Construct
++**
++** Construct a new gckKERNEL object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gceCORE Core
++** Specified core.
++**
++** IN gctPOINTER Context
++** Pointer to a driver defined context.
++**
++** IN gckDB SharedDB,
++** Pointer to a shared DB.
++**
++** OUTPUT:
++**
++** gckKERNEL * Kernel
++** Pointer to a variable that will hold the pointer to the gckKERNEL
++** object.
++*/
++
++gceSTATUS
++gckKERNEL_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Context,
++ IN gckDB SharedDB,
++ OUT gckKERNEL * Kernel
++ )
++{
++ gckKERNEL kernel = gcvNULL;
++ gceSTATUS status;
++ gctSIZE_T i;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++
++ /* Allocate the gckKERNEL object. */
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckKERNEL),
++ &pointer));
++
++ kernel = pointer;
++
++ /* Zero the object pointers. */
++ kernel->hardware = gcvNULL;
++ kernel->command = gcvNULL;
++ kernel->eventObj = gcvNULL;
++ kernel->mmu = gcvNULL;
++#if gcdDVFS
++ kernel->dvfs = gcvNULL;
++#endif
++ kernel->monitorTimer = gcvNULL;
++
++ /* Initialize the gckKERNEL object. */
++ kernel->object.type = gcvOBJ_KERNEL;
++ kernel->os = Os;
++ kernel->core = Core;
++
++ if (SharedDB == gcvNULL)
++ {
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckDB),
++ &pointer));
++
++ kernel->db = pointer;
++ kernel->dbCreated = gcvTRUE;
++ kernel->db->freeDatabase = gcvNULL;
++ kernel->db->freeRecord = gcvNULL;
++ kernel->db->dbMutex = gcvNULL;
++ kernel->db->lastDatabase = gcvNULL;
++ kernel->db->idleTime = 0;
++ kernel->db->lastIdle = 0;
++ kernel->db->lastSlowdown = 0;
++
++ for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i)
++ {
++ kernel->db->db[i] = gcvNULL;
++ }
++
++ /* Construct a database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->dbMutex));
++
++ /* Construct a video memory name database. */
++ gcmkONERROR(gckKERNEL_CreateIntegerDatabase(kernel, &kernel->db->nameDatabase));
++
++ /* Construct a video memory name database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->nameDatabaseMutex));
++
++ /* Construct a pointer name database. */
++ gcmkONERROR(gckKERNEL_CreateIntegerDatabase(kernel, &kernel->db->pointerDatabase));
++
++ /* Construct a pointer name database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->pointerDatabaseMutex));
++ }
++ else
++ {
++ kernel->db = SharedDB;
++ kernel->dbCreated = gcvFALSE;
++ }
++
++ for (i = 0; i < gcmCOUNTOF(kernel->timers); ++i)
++ {
++ kernel->timers[i].startTime = 0;
++ kernel->timers[i].stopTime = 0;
++ }
++
++ /* Save context. */
++ kernel->context = Context;
++
++ /* Construct atom holding number of clients. */
++ kernel->atomClients = gcvNULL;
++ gcmkONERROR(gckOS_AtomConstruct(Os, &kernel->atomClients));
++
++#if gcdENABLE_VG
++ kernel->vg = gcvNULL;
++
++ if (Core == gcvCORE_VG)
++ {
++ /* Construct the gckMMU object. */
++ gcmkONERROR(
++ gckVGKERNEL_Construct(Os, Context, kernel, &kernel->vg));
++
++ kernel->timeOut = gcdGPU_TIMEOUT;
++ }
++ else
++#endif
++ {
++ /* Construct the gckHARDWARE object. */
++ gcmkONERROR(
++ gckHARDWARE_Construct(Os, kernel->core, &kernel->hardware));
++
++ /* Set pointer to gckKERNEL object in gckHARDWARE object. */
++ kernel->hardware->kernel = kernel;
++
++ kernel->timeOut = kernel->hardware->type == gcvHARDWARE_2D
++ ? gcdGPU_2D_TIMEOUT
++ : gcdGPU_TIMEOUT
++ ;
++
++ /* Initialize virtual command buffer. */
++ /* TODO: Remove platform limitation after porting. */
++#if (defined(LINUX) || defined(__QNXNTO__))
++ kernel->virtualCommandBuffer = gcvTRUE;
++#else
++ kernel->virtualCommandBuffer = gcvFALSE;
++#endif
++
++#if gcdSECURITY
++ kernel->virtualCommandBuffer = gcvFALSE;
++#endif
++
++ /* Construct the gckCOMMAND object. */
++ gcmkONERROR(
++ gckCOMMAND_Construct(kernel, &kernel->command));
++
++ /* Construct the gckEVENT object. */
++ gcmkONERROR(
++ gckEVENT_Construct(kernel, &kernel->eventObj));
++
++ /* Construct the gckMMU object. */
++ gcmkONERROR(
++ gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu));
++
++ gcmkVERIFY_OK(gckOS_GetTime(&kernel->resetTimeStamp));
++
++ gcmkONERROR(gckHARDWARE_PrepareFunctions(kernel->hardware));
++
++ /* Initialize the hardware. */
++ gcmkONERROR(
++ gckHARDWARE_InitializeHardware(kernel->hardware));
++
++#if gcdDVFS
++ if (gckHARDWARE_IsFeatureAvailable(kernel->hardware,
++ gcvFEATURE_DYNAMIC_FREQUENCY_SCALING))
++ {
++ gcmkONERROR(gckDVFS_Construct(kernel->hardware, &kernel->dvfs));
++ gcmkONERROR(gckDVFS_Start(kernel->dvfs));
++ }
++#endif
++ }
++
++#if VIVANTE_PROFILER
++ /* Initialize profile setting */
++ kernel->profileEnable = gcvFALSE;
++ kernel->profileCleanRegister = gcvTRUE;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gcmkONERROR(gckOS_CreateSyncTimeline(Os, &kernel->timeline));
++#endif
++
++ kernel->recovery = gcvTRUE;
++ kernel->stuckDump = 1;
++
++ kernel->virtualBufferHead =
++ kernel->virtualBufferTail = gcvNULL;
++
++ gcmkONERROR(
++ gckOS_CreateMutex(Os, (gctPOINTER)&kernel->virtualBufferLock));
++
++#if gcdSECURITY
++ /* Connect to security service for this GPU. */
++ gcmkONERROR(gckKERNEL_SecurityOpen(kernel, kernel->core, &kernel->securityChannel));
++#endif
++
++#if gcdGPU_TIMEOUT && gcdINTERRUPT_STATISTIC
++ if (kernel->timeOut)
++ {
++ gcmkVERIFY_OK(gckOS_CreateTimer(
++ Os,
++ (gctTIMERFUNCTION)_MonitorTimerFunction,
++ (gctPOINTER)kernel,
++ &kernel->monitorTimer
++ ));
++
++ kernel->monitoring = gcvFALSE;
++
++ kernel->monitorTimerStop = gcvFALSE;
++
++ gcmkVERIFY_OK(gckOS_StartTimer(
++ Os,
++ kernel->monitorTimer,
++ 100
++ ));
++ }
++#endif
++
++ /* Return pointer to the gckKERNEL object. */
++ *Kernel = kernel;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (kernel != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Core != gcvCORE_VG)
++#endif
++ {
++ if (kernel->eventObj != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckEVENT_Destroy(kernel->eventObj));
++ }
++
++ if (kernel->command != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_Destroy(kernel->command));
++ }
++
++ if (kernel->hardware != gcvNULL)
++ {
++ /* Turn off the power. */
++ gcmkVERIFY_OK(gckOS_SetGPUPower(kernel->hardware->os,
++ kernel->hardware->core,
++ gcvFALSE,
++ gcvFALSE));
++ gcmkVERIFY_OK(gckHARDWARE_Destroy(kernel->hardware));
++ }
++ }
++
++ if (kernel->atomClients != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, kernel->atomClients));
++ }
++
++ if (kernel->dbCreated && kernel->db != gcvNULL)
++ {
++ if (kernel->db->dbMutex != gcvNULL)
++ {
++ /* Destroy the database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, kernel->db->dbMutex));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, kernel->db));
++ }
++
++ if (kernel->virtualBufferLock != gcvNULL)
++ {
++ /* Destroy the virtual command buffer mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, kernel->virtualBufferLock));
++ }
++
++#if gcdDVFS
++ if (kernel->dvfs)
++ {
++ gcmkVERIFY_OK(gckDVFS_Stop(kernel->dvfs));
++ gcmkVERIFY_OK(gckDVFS_Destroy(kernel->dvfs));
++ }
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ if (kernel->timeline)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySyncTimeline(Os, kernel->timeline));
++ }
++#endif
++
++ if (kernel->monitorTimer)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, kernel->monitorTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, kernel->monitorTimer));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, kernel));
++ }
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Destroy
++**
++** Destroy an gckKERNEL object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Destroy(
++ IN gckKERNEL Kernel
++ )
++{
++ gctSIZE_T i;
++ gcsDATABASE_PTR database, databaseNext;
++ gcsDATABASE_RECORD_PTR record, recordNext;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->debugMutex));
++#endif
++
++ /* Destroy the database. */
++ if (Kernel->dbCreated)
++ {
++ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
++ {
++ if (Kernel->db->db[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckKERNEL_DestroyProcessDB(Kernel, Kernel->db->db[i]->processID));
++ }
++ }
++
++ /* Free all databases. */
++ for (database = Kernel->db->freeDatabase;
++ database != gcvNULL;
++ database = databaseNext)
++ {
++ databaseNext = database->next;
++
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, database->counterMutex));
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, database));
++ }
++
++ if (Kernel->db->lastDatabase != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->lastDatabase->counterMutex));
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel->db->lastDatabase));
++ }
++
++ /* Free all database records. */
++ for (record = Kernel->db->freeRecord; record != gcvNULL; record = recordNext)
++ {
++ recordNext = record->next;
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record));
++ }
++
++ /* Destroy the database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Destroy video memory name database. */
++ gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Kernel->db->nameDatabase));
++
++ /* Destroy video memory name database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->nameDatabaseMutex));
++
++
++ /* Destroy id-pointer database. */
++ gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Kernel->db->pointerDatabase));
++
++ /* Destroy id-pointer database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++
++ /* Destroy the database. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel->db));
++
++ /* Notify stuck timer to quit. */
++ Kernel->monitorTimerStop = gcvTRUE;
++ }
++
++#if gcdENABLE_VG
++ if (Kernel->vg)
++ {
++ gcmkVERIFY_OK(gckVGKERNEL_Destroy(Kernel->vg));
++ }
++ else
++#endif
++ {
++ /* Destroy the gckMMU object. */
++ gcmkVERIFY_OK(gckMMU_Destroy(Kernel->mmu));
++
++ /* Destroy the gckCOMMNAND object. */
++ gcmkVERIFY_OK(gckCOMMAND_Destroy(Kernel->command));
++
++ /* Destroy the gckEVENT object. */
++ gcmkVERIFY_OK(gckEVENT_Destroy(Kernel->eventObj));
++
++ /* Destroy the gckHARDWARE object. */
++ gcmkVERIFY_OK(gckHARDWARE_Destroy(Kernel->hardware));
++ }
++
++ /* Detsroy the client atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Kernel->atomClients));
++
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->virtualBufferLock));
++
++#if gcdDVFS
++ if (Kernel->dvfs)
++ {
++ gcmkVERIFY_OK(gckDVFS_Stop(Kernel->dvfs));
++ gcmkVERIFY_OK(gckDVFS_Destroy(Kernel->dvfs));
++ }
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gcmkVERIFY_OK(gckOS_DestroySyncTimeline(Kernel->os, Kernel->timeline));
++#endif
++
++#if gcdSECURITY
++ gcmkVERIFY_OK(gckKERNEL_SecurityClose(Kernel->securityChannel));
++#endif
++
++ if (Kernel->monitorTimer)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Kernel->os, Kernel->monitorTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Kernel->os, Kernel->monitorTimer));
++ }
++
++ /* Mark the gckKERNEL object as unknown. */
++ Kernel->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckKERNEL object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** _AllocateMemory
++**
++** Private function to walk all required memory pools to allocate the requested
++** amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS
++gckKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ IN gctUINT32 Flag,
++ OUT gctUINT32 * Node
++ )
++{
++ gcePOOL pool;
++ gceSTATUS status;
++ gckVIDMEM videoMemory;
++ gctINT loopCount;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctBOOL tileStatusInVirtual;
++ gctBOOL contiguous = gcvFALSE;
++ gctBOOL cacheable = gcvFALSE;
++ gctSIZE_T bytes = Bytes;
++ gctUINT32 handle = 0;
++ gceDATABASE_TYPE type;
++
++ gcmkHEADER_ARG("Kernel=0x%x *Pool=%d Bytes=%lu Alignment=%lu Type=%d",
++ Kernel, *Pool, Bytes, Alignment, Type);
++
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes != 0);
++
++ /* Get basic type. */
++ Type &= 0xFF;
++
++ /* Check flags. */
++ contiguous = Flag & gcvALLOC_FLAG_CONTIGUOUS;
++ cacheable = Flag & gcvALLOC_FLAG_CACHEABLE;
++
++AllocateMemory:
++
++ /* Get initial pool. */
++ switch (pool = *Pool)
++ {
++ case gcvPOOL_DEFAULT:
++ case gcvPOOL_LOCAL:
++ pool = gcvPOOL_LOCAL_INTERNAL;
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_UNIFIED:
++ pool = gcvPOOL_SYSTEM;
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_CONTIGUOUS:
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ default:
++ loopCount = 1;
++ break;
++ }
++
++ while (loopCount-- > 0)
++ {
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ /* Create a gcuVIDMEM_NODE for virtual memory. */
++ gcmkONERROR(
++ gckVIDMEM_ConstructVirtual(Kernel, Flag | gcvALLOC_FLAG_NON_CONTIGUOUS, Bytes, &node));
++
++ bytes = node->Virtual.bytes;
++ node->Virtual.type = Type;
++
++ /* Success. */
++ break;
++ }
++
++ else
++ if (pool == gcvPOOL_CONTIGUOUS)
++ {
++#if gcdCONTIGUOUS_SIZE_LIMIT
++ if (Bytes > gcdCONTIGUOUS_SIZE_LIMIT && contiguous == gcvFALSE)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ }
++ else
++#endif
++ {
++ /* Create a gcuVIDMEM_NODE from contiguous memory. */
++ status = gckVIDMEM_ConstructVirtual(
++ Kernel,
++ Flag | gcvALLOC_FLAG_CONTIGUOUS,
++ Bytes,
++ &node);
++ }
++
++ if (gcmIS_SUCCESS(status))
++ {
++ bytes = node->Virtual.bytes;
++ node->Virtual.type = Type;
++
++ /* Memory allocated. */
++ break;
++ }
++ }
++
++ else
++ /* gcvPOOL_SYSTEM can't be cacheable. */
++ if (cacheable == gcvFALSE)
++ {
++ /* Get pointer to gckVIDMEM object for pool. */
++ status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Allocate memory. */
++#if defined(gcdLINEAR_SIZE_LIMIT)
++ /* 512 KB */
++ if (Bytes > gcdLINEAR_SIZE_LIMIT)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ }
++ else
++#endif
++ {
++ status = gckVIDMEM_AllocateLinear(Kernel,
++ videoMemory,
++ Bytes,
++ Alignment,
++ Type,
++ (*Pool == gcvPOOL_SYSTEM),
++ &node);
++ }
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Memory allocated. */
++ node->VidMem.pool = pool;
++ bytes = node->VidMem.bytes;
++ break;
++ }
++ }
++ }
++
++ if (pool == gcvPOOL_LOCAL_INTERNAL)
++ {
++ /* Advance to external memory. */
++ pool = gcvPOOL_LOCAL_EXTERNAL;
++ }
++
++ else
++ if (pool == gcvPOOL_LOCAL_EXTERNAL)
++ {
++ /* Advance to contiguous system memory. */
++ pool = gcvPOOL_SYSTEM;
++ }
++
++ else
++ if (pool == gcvPOOL_SYSTEM)
++ {
++ /* Advance to contiguous memory. */
++ pool = gcvPOOL_CONTIGUOUS;
++ }
++
++ else
++ if (pool == gcvPOOL_CONTIGUOUS)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg)
++ {
++ tileStatusInVirtual = gcvFALSE;
++ }
++ else
++#endif
++ {
++ tileStatusInVirtual =
++ gckHARDWARE_IsFeatureAvailable(Kernel->hardware,
++ gcvFEATURE_MC20);
++ }
++
++ if (Type == gcvSURF_TILE_STATUS && tileStatusInVirtual != gcvTRUE)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ if (contiguous)
++ {
++ break;
++ }
++
++ /* Advance to virtual memory. */
++ pool = gcvPOOL_VIRTUAL;
++ }
++
++ else
++ {
++ /* Out of pools. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++ }
++
++ if (node == gcvNULL)
++ {
++ if (contiguous)
++ {
++ /* Broadcast OOM message. */
++ status = gckOS_Broadcast(Kernel->os, Kernel->hardware, gcvBROADCAST_OUT_OF_MEMORY);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Get some memory. */
++ gckOS_Delay(gcvNULL, 1);
++ goto AllocateMemory;
++ }
++ }
++
++ /* Nothing allocated. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Allocate handle for this video memory. */
++ gcmkONERROR(
++ gckVIDMEM_NODE_Allocate(Kernel, node, Type, pool, &handle));
++
++ /* Return node and pool used for allocation. */
++ *Node = handle;
++ *Pool = pool;
++
++ /* Encode surface type and pool to database type. */
++ type = gcvDB_VIDEO_MEMORY
++ | (Type << gcdDB_VIDEO_MEMORY_TYPE_SHIFT)
++ | (pool << gcdDB_VIDEO_MEMORY_POOL_SHIFT);
++
++ /* Record in process db. */
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ ProcessID,
++ type,
++ gcmINT2PTR(handle),
++ gcvNULL,
++ bytes));
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*Pool=%d *Node=0x%x", *Pool, *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (handle)
++ {
++ /* Destroy handle allocated. */
++ gcmkVERIFY_OK(gckVIDMEM_HANDLE_Dereference(Kernel, ProcessID, handle));
++ }
++
++ if (node)
++ {
++ /* Free video memory allocated. */
++ gcmkVERIFY_OK(gckVIDMEM_Free(Kernel, node));
++ }
++
++ /* For some case like chrome with webgl test, it needs too much memory so that it invokes oom_killer
++ * And the case is killed by oom_killer, the user wants not to see the crash and hope the case iteself handles the condition
++ * So the patch reports the out_of_memory to the case */
++ if ( status == gcvSTATUS_OUT_OF_MEMORY && (Flag & gcvALLOC_FLAG_MEMLIMIT) )
++ gcmkPRINT("The running case is out_of_memory");
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_ReleaseVideoMemory
++**
++** Release handle of a video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID of current process.
++**
++** gctUINT32 Handle
++** Handle of video memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_ReleaseVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE nodeObject;
++ gceDATABASE_TYPE type;
++
++ gcmkHEADER_ARG("Kernel=0x%08X ProcessID=%d Handle=%d",
++ Kernel, ProcessID, Handle);
++
++ gcmkONERROR(
++ gckVIDMEM_HANDLE_Lookup(Kernel, ProcessID, Handle, &nodeObject));
++
++ type = gcvDB_VIDEO_MEMORY
++ | (nodeObject->type << gcdDB_VIDEO_MEMORY_TYPE_SHIFT)
++ | (nodeObject->pool << gcdDB_VIDEO_MEMORY_POOL_SHIFT);
++
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ ProcessID,
++ type,
++ gcmINT2PTR(Handle)));
++
++ gckVIDMEM_HANDLE_Dereference(Kernel, ProcessID, Handle);
++
++ gckVIDMEM_NODE_Dereference(Kernel, nodeObject);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_LockVideoMemory
++**
++** Lock a video memory node. It will generate a cpu virtual address used
++** by software and a GPU address used by GPU.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gceCORE Core
++** GPU to which video memory is locked.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS
++gckKERNEL_LockVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE nodeObject = gcvNULL;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctBOOL locked = gcvFALSE;
++ gctBOOL asynchronous = gcvFALSE;
++#ifndef __QNXNTO__
++ gctPOINTER pointer = gcvNULL;
++#endif
++
++ gcmkHEADER_ARG("Kernel=0x%08X ProcessID=%d",
++ Kernel, ProcessID);
++
++ gcmkONERROR(
++ gckVIDMEM_HANDLE_LookupAndReference(Kernel,
++ Interface->u.LockVideoMemory.node,
++ &nodeObject));
++
++ node = nodeObject->node;
++
++ Interface->u.LockVideoMemory.gid = 0;
++
++ /* Lock video memory. */
++ gcmkONERROR(
++ gckVIDMEM_Lock(Kernel,
++ nodeObject,
++ Interface->u.LockVideoMemory.cacheable,
++ &Interface->u.LockVideoMemory.address,
++ &Interface->u.LockVideoMemory.gid,
++ &Interface->u.LockVideoMemory.physicalAddress));
++
++ locked = gcvTRUE;
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Map video memory address into user space. */
++#ifdef __QNXNTO__
++ if (node->VidMem.logical == gcvNULL)
++ {
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemory(Kernel,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ ProcessID,
++ node->VidMem.bytes,
++ &node->VidMem.logical));
++ }
++ gcmkASSERT(node->VidMem.logical != gcvNULL);
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->VidMem.logical);
++#else
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemoryEx(Kernel,
++ Core,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ &pointer));
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(pointer);
++#endif
++ }
++ else
++ {
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->Virtual.logical);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkONERROR(gckVIDMEM_Node_Lock(
++ Kernel,
++ nodeObject,
++ &Interface->u.LockVideoMemory.address
++ ));
++#endif
++
++
++#if gcdSECURE_USER
++ /* Return logical address as physical address. */
++ Interface->u.LockVideoMemory.address =
++ (gctUINT32)(Interface->u.LockVideoMemory.memory);
++#endif
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ ProcessID, gcvDB_VIDEO_MEMORY_LOCKED,
++ gcmINT2PTR(Interface->u.LockVideoMemory.node),
++ gcvNULL,
++ 0));
++
++ gckVIDMEM_HANDLE_Reference(
++ Kernel, ProcessID, (gctUINT32)Interface->u.LockVideoMemory.node);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (locked)
++ {
++ /* Roll back the lock. */
++ gcmkVERIFY_OK(gckVIDMEM_Unlock(Kernel,
++ nodeObject,
++ gcvSURF_TYPE_UNKNOWN,
++ &asynchronous));
++
++ if (gcvTRUE == asynchronous)
++ {
++ /* Bottom Half */
++ gcmkVERIFY_OK(gckVIDMEM_Unlock(Kernel,
++ nodeObject,
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL));
++ }
++ }
++
++ if (nodeObject != gcvNULL)
++ {
++ gckVIDMEM_NODE_Dereference(Kernel, nodeObject);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_UnlockVideoMemory
++**
++** Unlock a video memory node.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID of current process.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS
++gckKERNEL_UnlockVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE nodeObject;
++ gcuVIDMEM_NODE_PTR node;
++
++ gcmkHEADER_ARG("Kernel=0x%08X ProcessID=%d",
++ Kernel, ProcessID);
++
++ gcmkONERROR(gckVIDMEM_HANDLE_Lookup(
++ Kernel,
++ ProcessID,
++ (gctUINT32)Interface->u.UnlockVideoMemory.node,
++ &nodeObject));
++
++ node = nodeObject->node;
++
++ /* Unlock video memory. */
++#if gcdSECURE_USER
++ /* Save node information before it disappears. */
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock video memory. */
++ gcmkONERROR(gckVIDMEM_Unlock(
++ Kernel,
++ nodeObject,
++ Interface->u.UnlockVideoMemory.type,
++ &Interface->u.UnlockVideoMemory.asynchroneous));
++
++#if gcdSECURE_USER
++ /* Flush the translation cache for virtual surfaces. */
++ if (logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(Kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QueryDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gctINT i;
++ gcuDATABASE_INFO tmp;
++
++ gceDATABASE_TYPE type[3] = {
++ gcvDB_VIDEO_MEMORY | (gcvPOOL_SYSTEM << gcdDB_VIDEO_MEMORY_POOL_SHIFT),
++ gcvDB_VIDEO_MEMORY | (gcvPOOL_CONTIGUOUS << gcdDB_VIDEO_MEMORY_POOL_SHIFT),
++ gcvDB_VIDEO_MEMORY | (gcvPOOL_VIRTUAL << gcdDB_VIDEO_MEMORY_POOL_SHIFT),
++ };
++
++ gcmkHEADER();
++
++ /* Query video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_VIDEO_MEMORY,
++ &Interface->u.Database.vidMem));
++
++ /* Query non-paged memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_NON_PAGED,
++ &Interface->u.Database.nonPaged));
++
++ /* Query contiguous memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_CONTIGUOUS,
++ &Interface->u.Database.contiguous));
++
++ /* Query GPU idle time. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_IDLE,
++ &Interface->u.Database.gpuIdle));
++ for (i = 0; i < 3; i++)
++ {
++ /* Query each video memory pool. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ type[i],
++ &Interface->u.Database.vidMemPool[i]));
++ }
++
++ /* Query virtual command buffer pool. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_COMMAND_BUFFER,
++ &tmp));
++
++ Interface->u.Database.vidMemPool[2].counters.bytes += tmp.counters.bytes;
++ Interface->u.Database.vidMemPool[2].counters.maxBytes += tmp.counters.maxBytes;
++ Interface->u.Database.vidMemPool[2].counters.totalBytes += tmp.counters.totalBytes;
++
++ Interface->u.Database.vidMem.counters.bytes += tmp.counters.bytes;
++ Interface->u.Database.vidMem.counters.maxBytes += tmp.counters.maxBytes;
++ Interface->u.Database.vidMem.counters.totalBytes += tmp.counters.totalBytes;
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gckKERNEL_DumpVidMemUsage(Kernel, Interface->u.Database.processID);
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_ConfigPowerManagement(
++ IN gckKERNEL Kernel,
++ IN OUT gcsHAL_INTERFACE * Interface
++)
++{
++ gceSTATUS status;
++ gctBOOL enable = Interface->u.ConfigPowerManagement.enable;
++
++ gcmkHEADER();
++
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(Kernel->hardware, enable));
++
++ if (enable == gcvTRUE)
++ {
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Kernel->hardware, gcvPOWER_ON));
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Dispatch
++**
++** Dispatch a command received from the user HAL layer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL FromUser
++** whether the call is from the user space.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS
++gckKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctPHYS_ADDR physical = gcvNULL;
++ gctSIZE_T bytes;
++ gctPOINTER logical = gcvNULL;
++ gctPOINTER info = gcvNULL;
++#if (gcdENABLE_3D || gcdENABLE_2D)
++ gckCONTEXT context = gcvNULL;
++#endif
++ gckKERNEL kernel = Kernel;
++ gctUINT32 address;
++ gctUINT32 processID;
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++ gctPOINTER logical;
++#endif
++ gctUINT32 paddr = gcvINVALID_ADDRESS;
++#if !USE_NEW_LINUX_SIGNAL
++ gctSIGNAL signal;
++#endif
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++
++ gckVIDMEM_NODE nodeObject;
++ gctBOOL powerMutexAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x FromUser=%d Interface=0x%x",
++ Kernel, FromUser, Interface);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "Dispatching command %d (%s)",
++ Interface->command, _DispatchText[Interface->command]);
++#endif
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gckOS_AcquireMutex(Kernel->os, Kernel->debugMutex, gcvINFINITE);
++#endif
++
++ /* Get the current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Kernel, processID, &cache));
++#endif
++
++ /* Dispatch on command. */
++ switch (Interface->command)
++ {
++ case gcvHAL_GET_BASE_ADDRESS:
++ /* Get base address. */
++ gcmkONERROR(
++ gckOS_GetBaseAddress(Kernel->os,
++ &Interface->u.GetBaseAddress.baseAddress));
++ break;
++
++ case gcvHAL_QUERY_VIDEO_MEMORY:
++ /* Query video memory size. */
++ gcmkONERROR(gckKERNEL_QueryVideoMemory(Kernel, Interface));
++ break;
++
++ case gcvHAL_QUERY_CHIP_IDENTITY:
++ /* Query chip identity. */
++ gcmkONERROR(
++ gckHARDWARE_QueryChipIdentity(
++ Kernel->hardware,
++ &Interface->u.QueryChipIdentity));
++ break;
++
++ case gcvHAL_MAP_MEMORY:
++ physical = gcmINT2PTR(Interface->u.MapMemory.physical);
++
++ /* Map memory. */
++ gcmkONERROR(
++ gckKERNEL_MapMemory(Kernel,
++ physical,
++ (gctSIZE_T) Interface->u.MapMemory.bytes,
++ &logical));
++
++ Interface->u.MapMemory.logical = gcmPTR_TO_UINT64(logical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_MAP_MEMORY,
++ logical,
++ physical,
++ (gctSIZE_T) Interface->u.MapMemory.bytes));
++ break;
++
++ case gcvHAL_UNMAP_MEMORY:
++ physical = gcmINT2PTR(Interface->u.UnmapMemory.physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_MAP_MEMORY,
++ gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical)));
++
++ /* Unmap memory. */
++ gcmkONERROR(
++ gckKERNEL_UnmapMemory(Kernel,
++ physical,
++ (gctSIZE_T) Interface->u.UnmapMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical)));
++ break;
++
++ case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
++ bytes = (gctSIZE_T) Interface->u.AllocateNonPagedMemory.bytes;
++
++ /* Allocate non-paged memory. */
++ gcmkONERROR(
++ gckOS_AllocateNonPagedMemory(
++ Kernel->os,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateNonPagedMemory.bytes = bytes;
++ Interface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_NON_PAGED,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateNonPagedMemory.physical),
++ bytes));
++ break;
++
++ case gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER:
++ bytes = (gctSIZE_T) Interface->u.AllocateVirtualCommandBuffer.bytes;
++
++ gcmkONERROR(
++ gckKERNEL_AllocateVirtualCommandBuffer(
++ Kernel,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateVirtualCommandBuffer.bytes = bytes;
++ Interface->u.AllocateVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateVirtualCommandBuffer.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_COMMAND_BUFFER,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateVirtualCommandBuffer.physical),
++ bytes));
++ break;
++
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ physical = gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_NON_PAGED,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkONERROR(gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++ /* Free non-paged memory. */
++ gcmkONERROR(
++ gckOS_FreeNonPagedMemory(Kernel->os,
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ physical,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical),
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes));
++#endif
++
++ gcmRELEASE_NAME(Interface->u.FreeNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY:
++ bytes = (gctSIZE_T) Interface->u.AllocateContiguousMemory.bytes;
++
++ /* Allocate contiguous memory. */
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Kernel->os,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateContiguousMemory.bytes = bytes;
++ Interface->u.AllocateContiguousMemory.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateContiguousMemory.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ Kernel->hardware,
++ logical,
++ gcvTRUE,
++ &Interface->u.AllocateContiguousMemory.address));
++
++ gcmkVERIFY_OK(gckKERNEL_AddProcessDB(
++ Kernel,
++ processID, gcvDB_CONTIGUOUS,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateContiguousMemory.physical),
++ bytes));
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ physical = gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_CONTIGUOUS,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkONERROR(gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical)));
++
++ /* Free contiguous memory. */
++ gcmkONERROR(
++ gckOS_FreeContiguous(Kernel->os,
++ physical,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical),
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical),
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes));
++#endif
++
++ gcmRELEASE_NAME(Interface->u.FreeContiguousMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_VIDEO_MEMORY:
++
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++
++ break;
++
++ case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
++ /* Allocate memory. */
++ gcmkONERROR(
++ gckKERNEL_AllocateLinearMemory(Kernel, processID,
++ &Interface->u.AllocateLinearVideoMemory.pool,
++ Interface->u.AllocateLinearVideoMemory.bytes,
++ Interface->u.AllocateLinearVideoMemory.alignment,
++ Interface->u.AllocateLinearVideoMemory.type,
++ Interface->u.AllocateLinearVideoMemory.flag,
++ &Interface->u.AllocateLinearVideoMemory.node));
++ break;
++
++ case gcvHAL_RELEASE_VIDEO_MEMORY:
++ /* Release video memory. */
++ gcmkONERROR(gckKERNEL_ReleaseVideoMemory(
++ Kernel, processID,
++ (gctUINT32)Interface->u.ReleaseVideoMemory.node
++ ));
++ break;
++
++ case gcvHAL_LOCK_VIDEO_MEMORY:
++ /* Lock video memory. */
++ gcmkONERROR(gckKERNEL_LockVideoMemory(Kernel, Kernel->core, processID, FromUser, Interface));
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ /* Unlock video memory. */
++ gcmkONERROR(gckKERNEL_UnlockVideoMemory(Kernel, processID, Interface));
++ break;
++
++ case gcvHAL_EVENT_COMMIT:
++ /* Commit an event queue. */
++#if gcdMULTI_GPU
++ if (Interface->u.Event.gpuMode == gcvMULTI_GPU_MODE_INDEPENDENT)
++ {
++ gcmkONERROR(
++ gckEVENT_Commit(Kernel->eventObj,
++ gcmUINT64_TO_PTR(Interface->u.Event.queue),
++ Interface->u.Event.chipEnable));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckEVENT_Commit(Kernel->eventObj,
++ gcmUINT64_TO_PTR(Interface->u.Event.queue),
++ gcvCORE_3D_ALL_MASK));
++ }
++#else
++ gcmkONERROR(
++ gckEVENT_Commit(Kernel->eventObj,
++ gcmUINT64_TO_PTR(Interface->u.Event.queue)));
++#endif
++ break;
++
++ case gcvHAL_COMMIT:
++ /* Commit a command and context buffer. */
++#if gcdMULTI_GPU
++ if (Interface->u.Commit.gpuMode == gcvMULTI_GPU_MODE_INDEPENDENT)
++ {
++ gcmkONERROR(
++ gckCOMMAND_Commit(Kernel->command,
++ Interface->u.Commit.context ?
++ gcmNAME_TO_PTR(Interface->u.Commit.context) : gcvNULL,
++ gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffer),
++ gcmUINT64_TO_PTR(Interface->u.Commit.delta),
++ gcmUINT64_TO_PTR(Interface->u.Commit.queue),
++ processID,
++ Interface->u.Commit.chipEnable));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckCOMMAND_Commit(Kernel->command,
++ Interface->u.Commit.context ?
++ gcmNAME_TO_PTR(Interface->u.Commit.context) : gcvNULL,
++ gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffer),
++ gcmUINT64_TO_PTR(Interface->u.Commit.delta),
++ gcmUINT64_TO_PTR(Interface->u.Commit.queue),
++ processID,
++ gcvCORE_3D_ALL_MASK));
++ }
++#else
++ gcmkONERROR(
++ gckCOMMAND_Commit(Kernel->command,
++ Interface->u.Commit.context ?
++ gcmNAME_TO_PTR(Interface->u.Commit.context) : gcvNULL,
++ gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffer),
++ gcmUINT64_TO_PTR(Interface->u.Commit.delta),
++ gcmUINT64_TO_PTR(Interface->u.Commit.queue),
++ processID));
++#endif
++
++ break;
++
++ case gcvHAL_STALL:
++ /* Stall the command queue. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckCOMMAND_Stall(Kernel->command, gcvFALSE, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkONERROR(gckCOMMAND_Stall(Kernel->command, gcvFALSE));
++#endif
++ break;
++
++ case gcvHAL_MAP_USER_MEMORY:
++ /* Map user memory to DMA. */
++ gcmkONERROR(
++ gckOS_MapUserMemory(Kernel->os,
++ Kernel->core,
++ gcmUINT64_TO_PTR(Interface->u.MapUserMemory.memory),
++ Interface->u.MapUserMemory.physical,
++ (gctSIZE_T) Interface->u.MapUserMemory.size,
++ &info,
++ &Interface->u.MapUserMemory.address));
++
++ Interface->u.MapUserMemory.info = gcmPTR_TO_NAME(info);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Interface->u.MapUserMemory.info),
++ gcmUINT64_TO_PTR(Interface->u.MapUserMemory.memory),
++ (gctSIZE_T) Interface->u.MapUserMemory.size));
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ address = Interface->u.UnmapUserMemory.address;
++ info = gcmNAME_TO_PTR(Interface->u.UnmapUserMemory.info);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Interface->u.UnmapUserMemory.info)));
++ /* Unmap user memory. */
++ gcmkONERROR(
++ gckOS_UnmapUserMemory(Kernel->os,
++ Kernel->core,
++ gcmUINT64_TO_PTR(Interface->u.UnmapUserMemory.memory),
++ (gctSIZE_T) Interface->u.UnmapUserMemory.size,
++ info,
++ address));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.UnmapUserMemory.memory),
++ (gctSIZE_T) Interface->u.UnmapUserMemory.size));
++#endif
++
++ gcmRELEASE_NAME(Interface->u.UnmapUserMemory.info);
++ break;
++
++#if !USE_NEW_LINUX_SIGNAL
++ case gcvHAL_USER_SIGNAL:
++ /* Dispatch depends on the user signal subcommands. */
++ switch(Interface->u.UserSignal.command)
++ {
++ case gcvUSER_SIGNAL_CREATE:
++ /* Create a signal used in the user space. */
++ gcmkONERROR(
++ gckOS_CreateUserSignal(Kernel->os,
++ Interface->u.UserSignal.manualReset,
++ &Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_DESTROY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++
++ /* Destroy the signal. */
++ gcmkONERROR(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++ break;
++
++ case gcvUSER_SIGNAL_SIGNAL:
++ /* Signal the signal. */
++ gcmkONERROR(
++ gckOS_SignalUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.state));
++ break;
++
++ case gcvUSER_SIGNAL_WAIT:
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.wait);
++
++ break;
++
++ case gcvUSER_SIGNAL_MAP:
++ gcmkONERROR(
++ gckOS_MapSignal(Kernel->os,
++ (gctSIGNAL)(gctUINTPTR_T)Interface->u.UserSignal.id,
++ (gctHANDLE)(gctUINTPTR_T)processID,
++ &signal));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_UNMAP:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++
++ /* Destroy the signal. */
++ gcmkONERROR(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++ break;
++
++ default:
++ /* Invalid user signal command. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++ break;
++#endif
++
++ case gcvHAL_SET_POWER_MANAGEMENT_STATE:
++ /* Set the power management state. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(
++ Kernel->hardware,
++ Interface->u.SetPowerManagement.state));
++ break;
++
++ case gcvHAL_QUERY_POWER_MANAGEMENT_STATE:
++ /* Chip is not idle. */
++ Interface->u.QueryPowerManagement.isIdle = gcvFALSE;
++
++ /* Query the power management state. */
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(
++ Kernel->hardware,
++ &Interface->u.QueryPowerManagement.state));
++
++ /* Query the idle state. */
++ gcmkONERROR(
++ gckHARDWARE_QueryIdle(Kernel->hardware,
++ &Interface->u.QueryPowerManagement.isIdle));
++ break;
++
++ case gcvHAL_READ_REGISTER:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE));
++ powerMutexAcquired = gcvTRUE;
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ /* Read a register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(
++ Kernel->os,
++ Kernel->core,
++ Interface->u.ReadRegisterData.address,
++ &Interface->u.ReadRegisterData.data));
++ }
++ else
++ {
++ /* Chip is in power-state. */
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ powerMutexAcquired = gcvFALSE;
++ }
++#else
++ /* No access from user land to read registers. */
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++#if gcdMULTI_GPU
++ case gcvHAL_READ_REGISTER_EX:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++ gctUINT32 coreId = 0;
++ gctUINT32 coreSelect = Interface->u.ReadRegisterDataEx.coreSelect;
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE));
++ powerMutexAcquired = gcvTRUE;
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ for (; coreSelect != 0; coreSelect >>= 1, coreId++)
++ {
++ if (coreSelect & 1UL)
++ {
++ /* Read a register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterByCoreId(
++ Kernel->os,
++ Kernel->core,
++ coreId,
++ Interface->u.ReadRegisterDataEx.address,
++ &Interface->u.ReadRegisterDataEx.data[coreId]));
++ }
++ }
++ }
++ else
++ {
++ for (coreId = 0; coreId < gcdMULTI_GPU; coreId++)
++ {
++ /* Chip is in power-state. */
++ Interface->u.ReadRegisterDataEx.data[coreId] = 0;
++ }
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ powerMutexAcquired = gcvFALSE;
++ }
++#else
++ gctUINT32 coreId;
++
++ /* No access from user land to read registers. */
++ for (coreId = 0; coreId < gcdMULTI_GPU; coreId++)
++ {
++ Interface->u.ReadRegisterDataEx.data[coreId] = 0;
++ }
++
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_WRITE_REGISTER_EX:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++ gctUINT32 coreId = 0;
++ gctUINT32 coreSelect = Interface->u.WriteRegisterDataEx.coreSelect;
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE));
++ powerMutexAcquired = gcvTRUE;
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ for (; coreSelect != 0; coreSelect >>= 1, coreId++)
++ {
++ if (coreSelect & 1UL)
++ {
++ /* Write a register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterByCoreId(
++ Kernel->os,
++ Kernel->core,
++ coreId,
++ Interface->u.WriteRegisterDataEx.address,
++ Interface->u.WriteRegisterDataEx.data[coreId]));
++ }
++ }
++ }
++ else
++ {
++ /* Chip is in power-state. */
++ for (coreId = 0; coreId < gcdMULTI_GPU; coreId++)
++ {
++ Interface->u.WriteRegisterDataEx.data[coreId] = 0;
++ }
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ powerMutexAcquired = gcvFALSE;
++ }
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++#endif
++
++ case gcvHAL_WRITE_REGISTER:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE));
++ powerMutexAcquired = gcvTRUE;
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ /* Write a register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Kernel->os,
++ Kernel->core,
++ Interface->u.WriteRegisterData.address,
++ Interface->u.WriteRegisterData.data));
++ }
++ else
++ {
++ /* Chip is in power-state. */
++ Interface->u.WriteRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ powerMutexAcquired = gcvFALSE;
++ }
++#else
++ /* No access from user land to write registers. */
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_READ_ALL_PROFILE_REGISTERS:
++#if VIVANTE_PROFILER && VIVANTE_PROFILER_CONTEXT
++ /* Read profile data according to the context. */
++ gcmkONERROR(
++ gckHARDWARE_QueryContextProfile(
++ Kernel->hardware,
++ Kernel->profileCleanRegister,
++ gcmNAME_TO_PTR(Interface->u.RegisterProfileData.context),
++ &Interface->u.RegisterProfileData.counters));
++#elif VIVANTE_PROFILER
++ /* Read all 3D profile registers. */
++ gcmkONERROR(
++ gckHARDWARE_QueryProfileRegisters(
++ Kernel->hardware,
++ Kernel->profileCleanRegister,
++ &Interface->u.RegisterProfileData.counters));
++#else
++ status = gcvSTATUS_OK;
++#endif
++ break;
++
++ case gcvHAL_PROFILE_REGISTERS_2D:
++#if VIVANTE_PROFILER
++ /* Read all 2D profile registers. */
++ gcmkONERROR(
++ gckHARDWARE_ProfileEngine2D(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.RegisterProfileData2D.hwProfile2D)));
++#else
++ status = gcvSTATUS_OK;
++#endif
++ break;
++
++ case gcvHAL_GET_PROFILE_SETTING:
++#if VIVANTE_PROFILER
++ /* Get profile setting */
++ Interface->u.GetProfileSetting.enable = Kernel->profileEnable;
++#endif
++
++ status = gcvSTATUS_OK;
++ break;
++
++ case gcvHAL_SET_PROFILE_SETTING:
++#if VIVANTE_PROFILER
++ /* Set profile setting */
++ if(Kernel->hardware->gpuProfiler)
++ {
++ Kernel->profileEnable = Interface->u.SetProfileSetting.enable;
++#if VIVANTE_PROFILER_NEW
++ if (Kernel->profileEnable)
++ gckHARDWARE_InitProfiler(Kernel->hardware);
++#endif
++ }
++ else
++ {
++ status = gcvSTATUS_NOT_SUPPORTED;
++ break;
++ }
++#endif
++
++ status = gcvSTATUS_OK;
++ break;
++
++#if VIVANTE_PROFILER_PERDRAW
++ case gcvHAL_READ_PROFILER_REGISTER_SETTING:
++ #if VIVANTE_PROFILER
++ Kernel->profileCleanRegister = Interface->u.SetProfilerRegisterClear.bclear;
++ #endif
++ status = gcvSTATUS_OK;
++ break;
++#endif
++
++ case gcvHAL_QUERY_KERNEL_SETTINGS:
++ /* Get kernel settings. */
++ gcmkONERROR(
++ gckKERNEL_QuerySettings(Kernel,
++ &Interface->u.QueryKernelSettings.settings));
++ break;
++
++ case gcvHAL_RESET:
++ /* Reset the hardware. */
++ gcmkONERROR(
++ gckHARDWARE_Reset(Kernel->hardware));
++ break;
++
++ case gcvHAL_DEBUG:
++ /* Set debug level and zones. */
++ if (Interface->u.Debug.set)
++ {
++ gckOS_SetDebugLevel(Interface->u.Debug.level);
++ gckOS_SetDebugZones(Interface->u.Debug.zones,
++ Interface->u.Debug.enable);
++ }
++
++ if (Interface->u.Debug.message[0] != '\0')
++ {
++ /* Print a message to the debugger. */
++ if (Interface->u.Debug.type == gcvMESSAGE_TEXT)
++ {
++ gckOS_CopyPrint(Interface->u.Debug.message);
++ }
++ else
++ {
++ gckOS_DumpBuffer(Kernel->os,
++ Interface->u.Debug.message,
++ Interface->u.Debug.messageSize,
++ gceDUMP_BUFFER_FROM_USER,
++ gcvTRUE);
++ }
++ }
++ status = gcvSTATUS_OK;
++ break;
++
++ case gcvHAL_DUMP_GPU_STATE:
++ {
++ gceCHIPPOWERSTATE power;
++
++ _DumpDriverConfigure(Kernel);
++
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(
++ Kernel->hardware,
++ &power
++ ));
++
++ if (power == gcvPOWER_ON)
++ {
++ Interface->u.ReadRegisterData.data = 1;
++
++ _DumpState(Kernel);
++ }
++ else
++ {
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++
++ gcmkPRINT("[galcore]: Can't dump state if GPU isn't POWER ON.");
++ }
++ }
++ break;
++
++ case gcvHAL_DUMP_EVENT:
++ break;
++
++ case gcvHAL_CACHE:
++
++ logical = gcmUINT64_TO_PTR(Interface->u.Cache.logical);
++
++ if (Interface->u.Cache.node)
++ {
++ gcmkONERROR(gckVIDMEM_HANDLE_Lookup(
++ Kernel,
++ processID,
++ Interface->u.Cache.node,
++ &nodeObject));
++
++ if (nodeObject->node->VidMem.memory->object.type == gcvOBJ_VIDMEM
++ || nodeObject->node->Virtual.contiguous
++ )
++ {
++ /* If memory is contiguous, get physical address. */
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Kernel->os, logical, (gctUINT32*)&paddr));
++ }
++ }
++
++ bytes = (gctSIZE_T) Interface->u.Cache.bytes;
++ switch(Interface->u.Cache.operation)
++ {
++ case gcvCACHE_FLUSH:
++ /* Clean and invalidate the cache. */
++ status = gckOS_CacheFlush(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++ case gcvCACHE_CLEAN:
++ /* Clean the cache. */
++ status = gckOS_CacheClean(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++ case gcvCACHE_INVALIDATE:
++ /* Invalidate the cache. */
++ status = gckOS_CacheInvalidate(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++
++ case gcvCACHE_MEMORY_BARRIER:
++ status = gckOS_MemoryBarrier(Kernel->os,
++ logical);
++ break;
++ default:
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ /* Check for invalid timer. */
++ if ((Interface->u.TimeStamp.timer >= gcmCOUNTOF(Kernel->timers))
++ || (Interface->u.TimeStamp.request != 2))
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Return timer results and reset timer. */
++ {
++ gcsTIMER_PTR timer = &(Kernel->timers[Interface->u.TimeStamp.timer]);
++ gctUINT64 timeDelta = 0;
++
++ if (timer->stopTime < timer->startTime )
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_TIMER_OVERFLOW);
++ }
++
++ timeDelta = timer->stopTime - timer->startTime;
++
++ /* Check truncation overflow. */
++ Interface->u.TimeStamp.timeDelta = (gctINT32) timeDelta;
++ /*bit0~bit30 is available*/
++ if (timeDelta>>31)
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_TIMER_OVERFLOW);
++ }
++
++ status = gcvSTATUS_OK;
++ }
++ break;
++
++ case gcvHAL_DATABASE:
++ gcmkONERROR(gckKERNEL_QueryDatabase(Kernel, processID, Interface));
++ break;
++
++ case gcvHAL_VERSION:
++ Interface->u.Version.major = gcvVERSION_MAJOR;
++ Interface->u.Version.minor = gcvVERSION_MINOR;
++ Interface->u.Version.patch = gcvVERSION_PATCH;
++ Interface->u.Version.build = gcvVERSION_BUILD;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "KERNEL version %d.%d.%d build %u %s %s",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH,
++ gcvVERSION_BUILD, gcvVERSION_DATE, gcvVERSION_TIME);
++#endif
++ break;
++
++ case gcvHAL_CHIP_INFO:
++ /* Only if not support multi-core */
++ Interface->u.ChipInfo.count = 1;
++ Interface->u.ChipInfo.types[0] = Kernel->hardware->type;
++ break;
++
++#if (gcdENABLE_3D || gcdENABLE_2D)
++ case gcvHAL_ATTACH:
++ /* Attach user process. */
++ gcmkONERROR(
++ gckCOMMAND_Attach(Kernel->command,
++ &context,
++ &bytes,
++ processID));
++
++ Interface->u.Attach.stateCount = bytes;
++ Interface->u.Attach.context = gcmPTR_TO_NAME(context);
++
++ if (Interface->u.Attach.map == gcvTRUE)
++ {
++ gcmkVERIFY_OK(
++ gckCONTEXT_MapBuffer(context,
++ Interface->u.Attach.physicals,
++ Interface->u.Attach.logicals,
++ &Interface->u.Attach.bytes));
++ }
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_CONTEXT,
++ gcmINT2PTR(Interface->u.Attach.context),
++ gcvNULL,
++ 0));
++ break;
++#endif
++
++ case gcvHAL_DETACH:
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_CONTEXT,
++ gcmINT2PTR(Interface->u.Detach.context)));
++
++ /* Detach user process. */
++ gcmkONERROR(
++ gckCOMMAND_Detach(Kernel->command,
++ gcmNAME_TO_PTR(Interface->u.Detach.context)));
++
++ gcmRELEASE_NAME(Interface->u.Detach.context);
++ break;
++
++ case gcvHAL_COMPOSE:
++ Interface->u.Compose.physical = gcmPTR_TO_UINT64(gcmNAME_TO_PTR(Interface->u.Compose.physical));
++ /* Start composition. */
++ gcmkONERROR(
++ gckEVENT_Compose(Kernel->eventObj,
++ &Interface->u.Compose));
++ break;
++
++ case gcvHAL_SET_TIMEOUT:
++ /* set timeOut value from user */
++ gckKERNEL_SetTimeOut(Kernel, Interface->u.SetTimeOut.timeOut);
++ break;
++
++ case gcvHAL_GET_FRAME_INFO:
++ gcmkONERROR(gckHARDWARE_GetFrameInfo(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.GetFrameInfo.frameInfo)));
++ break;
++
++ case gcvHAL_SET_FSCALE_VALUE:
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ status = gckHARDWARE_SetFscaleValue(Kernel->hardware,
++ Interface->u.SetFscaleValue.value);
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++ case gcvHAL_GET_FSCALE_VALUE:
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ status = gckHARDWARE_GetFscaleValue(Kernel->hardware,
++ &Interface->u.GetFscaleValue.value,
++ &Interface->u.GetFscaleValue.minValue,
++ &Interface->u.GetFscaleValue.maxValue);
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_NAME_VIDEO_MEMORY:
++ gcmkONERROR(gckVIDMEM_NODE_Name(Kernel,
++ Interface->u.NameVideoMemory.handle,
++ &Interface->u.NameVideoMemory.name));
++ break;
++
++ case gcvHAL_IMPORT_VIDEO_MEMORY:
++ gcmkONERROR(gckVIDMEM_NODE_Import(Kernel,
++ Interface->u.ImportVideoMemory.name,
++ &Interface->u.ImportVideoMemory.handle));
++
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ gcmINT2PTR(Interface->u.ImportVideoMemory.handle),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvHAL_GET_VIDEO_MEMORY_FD:
++ gcmkONERROR(gckVIDMEM_NODE_GetFd(
++ Kernel,
++ Interface->u.GetVideoMemoryFd.handle,
++ &Interface->u.GetVideoMemoryFd.fd
++ ));
++
++ /* No need to add it to processDB because OS will release all fds when
++ ** process quits.
++ */
++ break;
++
++ case gcvHAL_QUERY_RESET_TIME_STAMP:
++ Interface->u.QueryResetTimeStamp.timeStamp = Kernel->resetTimeStamp;
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)gcmNAME_TO_PTR(Interface->u.FreeVirtualCommandBuffer.physical);
++
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID,
++ gcvDB_COMMAND_BUFFER,
++ gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical)));
++
++ gcmkONERROR(gckOS_DestroyUserVirtualMapping(
++ Kernel->os,
++ buffer->physical,
++ (gctSIZE_T)Interface->u.FreeVirtualCommandBuffer.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical)));
++
++ gcmkONERROR(gckKERNEL_DestroyVirtualCommandBuffer(
++ Kernel,
++ (gctSIZE_T)Interface->u.FreeVirtualCommandBuffer.bytes,
++ (gctPHYS_ADDR)buffer,
++ gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical)));
++
++ gcmRELEASE_NAME(Interface->u.FreeVirtualCommandBuffer.physical);
++ break;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvHAL_SYNC_POINT:
++ {
++ gctSYNC_POINT syncPoint;
++
++ switch (Interface->u.SyncPoint.command)
++ {
++ case gcvSYNC_POINT_CREATE:
++ gcmkONERROR(gckOS_CreateSyncPoint(Kernel->os, &syncPoint));
++
++ Interface->u.SyncPoint.syncPoint = gcmPTR_TO_UINT64(syncPoint);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SYNC_POINT,
++ syncPoint,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSYNC_POINT_DESTROY:
++ syncPoint = gcmUINT64_TO_PTR(Interface->u.SyncPoint.syncPoint);
++
++ gcmkONERROR(gckOS_DestroySyncPoint(Kernel->os, syncPoint));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_SYNC_POINT,
++ syncPoint));
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ break;
++ }
++ }
++ break;
++
++ case gcvHAL_CREATE_NATIVE_FENCE:
++ {
++ gctINT fenceFD;
++ gctSYNC_POINT syncPoint =
++ gcmUINT64_TO_PTR(Interface->u.CreateNativeFence.syncPoint);
++
++ gcmkONERROR(
++ gckOS_CreateNativeFence(Kernel->os,
++ Kernel->timeline,
++ syncPoint,
++ &fenceFD));
++
++ Interface->u.CreateNativeFence.fenceFD = fenceFD;
++ }
++ break;
++#endif
++
++ case gcvHAL_SHBUF:
++ {
++ gctSHBUF shBuf;
++ gctPOINTER uData;
++ gctUINT32 bytes;
++
++ switch (Interface->u.ShBuf.command)
++ {
++ case gcvSHBUF_CREATE:
++ bytes = Interface->u.ShBuf.bytes;
++
++ /* Create. */
++ gcmkONERROR(gckKERNEL_CreateShBuffer(Kernel, bytes, &shBuf));
++
++ Interface->u.ShBuf.id = gcmPTR_TO_UINT64(shBuf);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID,
++ gcvDB_SHBUF,
++ shBuf,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSHBUF_DESTROY:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++
++ /* Check db first to avoid illegal destroy in the process. */
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID,
++ gcvDB_SHBUF,
++ shBuf));
++
++ gcmkONERROR(gckKERNEL_DestroyShBuffer(Kernel, shBuf));
++ break;
++
++ case gcvSHBUF_MAP:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++
++ /* Map for current process access. */
++ gcmkONERROR(gckKERNEL_MapShBuffer(Kernel, shBuf));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID,
++ gcvDB_SHBUF,
++ shBuf,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSHBUF_WRITE:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++ uData = gcmUINT64_TO_PTR(Interface->u.ShBuf.data);
++ bytes = Interface->u.ShBuf.bytes;
++
++ /* Write. */
++ gcmkONERROR(
++ gckKERNEL_WriteShBuffer(Kernel, shBuf, uData, bytes));
++ break;
++
++ case gcvSHBUF_READ:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++ uData = gcmUINT64_TO_PTR(Interface->u.ShBuf.data);
++ bytes = Interface->u.ShBuf.bytes;
++
++ /* Read. */
++ gcmkONERROR(
++ gckKERNEL_ReadShBuffer(Kernel,
++ shBuf,
++ uData,
++ bytes,
++ &bytes));
++
++ /* Return copied size. */
++ Interface->u.ShBuf.bytes = bytes;
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ break;
++ }
++ }
++ break;
++
++ case gcvHAL_CONFIG_POWER_MANAGEMENT:
++ gcmkONERROR(gckKERNEL_ConfigPowerManagement(Kernel, Interface));
++ break;
++
++ default:
++ /* Invalid command. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++OnError:
++ /* Save status. */
++ Interface->status = status;
++
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gckOS_ReleaseMutex(Kernel->os, Kernel->debugMutex);
++#endif
++
++ if (powerMutexAcquired == gcvTRUE)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AttachProcess
++**
++** Attach or detach a process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL Attach
++** gcvTRUE if a new process gets attached or gcFALSE when a process
++** gets detatched.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AttachProcess(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach
++ )
++{
++ gceSTATUS status;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Kernel=0x%x Attach=%d", Kernel, Attach);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Get current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(gckKERNEL_AttachProcessEx(Kernel, Attach, processID));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AttachProcessEx
++**
++** Attach or detach a process with the given PID. Can be paired with gckKERNEL_AttachProcess
++** provided the programmer is aware of the consequences.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL Attach
++** gcvTRUE if a new process gets attached or gcFALSE when a process
++** gets detatched.
++**
++** gctUINT32 PID
++** PID of the process to attach or detach.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AttachProcessEx(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach,
++ IN gctUINT32 PID
++ )
++{
++ gceSTATUS status;
++ gctINT32 old;
++
++ gcmkHEADER_ARG("Kernel=0x%x Attach=%d PID=%d", Kernel, Attach, PID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (Attach)
++ {
++ /* Increment the number of clients attached. */
++ gcmkONERROR(
++ gckOS_AtomIncrement(Kernel->os, Kernel->atomClients, &old));
++
++ if (old == 0)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ gcmkONERROR(gckOS_Broadcast(Kernel->os,
++ Kernel->hardware,
++ gcvBROADCAST_FIRST_PROCESS));
++ }
++ }
++
++ if (Kernel->dbCreated)
++ {
++ /* Create the process database. */
++ gcmkONERROR(gckKERNEL_CreateProcessDB(Kernel, PID));
++ }
++
++#if gcdPROCESS_ADDRESS_SPACE
++ /* Map kernel command buffer in the process's own MMU. */
++ gcmkONERROR(_MapCommandBuffer(Kernel));
++#endif
++ }
++ else
++ {
++ if (Kernel->dbCreated)
++ {
++ /* Clean up the process database. */
++ gcmkONERROR(gckKERNEL_DestroyProcessDB(Kernel, PID));
++
++ /* Save the last know process ID. */
++ Kernel->db->lastProcessID = PID;
++ }
++
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++#if gcdMULTI_GPU
++ status = gckEVENT_Submit(Kernel->eventObj, gcvTRUE, gcvFALSE, gcvCORE_3D_ALL_MASK);
++#else
++ status = gckEVENT_Submit(Kernel->eventObj, gcvTRUE, gcvFALSE);
++#endif
++
++ if (status == gcvSTATUS_INTERRUPTED && Kernel->eventObj->submitTimer)
++ {
++ gcmkONERROR(gckOS_StartTimer(Kernel->os,
++ Kernel->eventObj->submitTimer,
++ 1));
++ }
++ else
++ {
++ gcmkONERROR(status);
++ }
++ }
++
++ /* Decrement the number of clients attached. */
++ gcmkONERROR(
++ gckOS_AtomDecrement(Kernel->os, Kernel->atomClients, &old));
++
++ if (old == 1)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ /* Last client detached, switch to SUSPEND power state. */
++ gcmkONERROR(gckOS_Broadcast(Kernel->os,
++ Kernel->hardware,
++ gcvBROADCAST_LAST_PROCESS));
++ }
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(~0U);
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++gceSTATUS
++gckKERNEL_MapLogicalToPhysical(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN OUT gctPOINTER * Data
++ )
++{
++ gceSTATUS status;
++ static gctBOOL baseAddressValid = gcvFALSE;
++ static gctUINT32 baseAddress;
++ gctBOOL needBase;
++ gcskLOGICAL_CACHE_PTR slot;
++
++ gcmkHEADER_ARG("Kernel=0x%x Cache=0x%x *Data=0x%x",
++ Kernel, Cache, gcmOPT_POINTER(Data));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (!baseAddressValid)
++ {
++ /* Get base address. */
++ gcmkONERROR(gckHARDWARE_GetBaseAddress(Kernel->hardware, &baseAddress));
++
++ baseAddressValid = gcvTRUE;
++ }
++
++ /* Does this state load need a base address? */
++ gcmkONERROR(gckHARDWARE_NeedBaseAddress(Kernel->hardware,
++ ((gctUINT32_PTR) Data)[-1],
++ &needBase));
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LRU
++ {
++ gcskLOGICAL_CACHE_PTR next;
++ gctINT i;
++
++ /* Walk all used cache slots. */
++ for (i = 1, slot = Cache->cache[0].next, next = gcvNULL;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->next
++ )
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++ }
++
++ /* See if we had a miss. */
++ if (next == gcvNULL)
++ {
++ /* Use the tail of the cache. */
++ slot = Cache->cache[0].prev;
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++
++ /* Move slot to head of list. */
++ if (slot != Cache->cache[0].next)
++ {
++ /* Unlink. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Move to head of chain. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ {
++ gctINT i;
++ gcskLOGICAL_CACHE_PTR next = gcvNULL;
++ gcskLOGICAL_CACHE_PTR oldestSlot = gcvNULL;
++ slot = gcvNULL;
++
++ if (Cache->cacheIndex != gcvNULL)
++ {
++ /* Walk the cache forwards. */
++ for (i = 1, slot = Cache->cacheIndex;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->next)
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++
++ /* Determine age of this slot. */
++ if ((oldestSlot == gcvNULL)
++ || (oldestSlot->stamp > slot->stamp)
++ )
++ {
++ oldestSlot = slot;
++ }
++ }
++
++ if (next == gcvNULL)
++ {
++ /* Walk the cache backwards. */
++ for (slot = Cache->cacheIndex->prev;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->prev)
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++
++ /* Determine age of this slot. */
++ if ((oldestSlot == gcvNULL)
++ || (oldestSlot->stamp > slot->stamp)
++ )
++ {
++ oldestSlot = slot;
++ }
++ }
++ }
++ }
++
++ /* See if we had a miss. */
++ if (next == gcvNULL)
++ {
++ if (Cache->cacheFree != 0)
++ {
++ slot = &Cache->cache[Cache->cacheFree];
++ gcmkASSERT(slot->logical == gcvNULL);
++
++ ++ Cache->cacheFree;
++ if (Cache->cacheFree >= gcmCOUNTOF(Cache->cache))
++ {
++ Cache->cacheFree = 0;
++ }
++ }
++ else
++ {
++ /* Use the oldest cache slot. */
++ gcmkASSERT(oldestSlot != gcvNULL);
++ slot = oldestSlot;
++
++ /* Unlink from the chain. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append to the end. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++
++ /* Save time stamp. */
++ slot->stamp = ++ Cache->cacheStamp;
++
++ /* Save current slot for next lookup. */
++ Cache->cacheIndex = slot;
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ {
++ gctINT i;
++ gctUINT32 data = gcmPTR2INT32(*Data);
++ gctUINT32 key, index;
++ gcskLOGICAL_CACHE_PTR hash;
++
++ /* Generate a hash key. */
++ key = (data >> 24) + (data >> 16) + (data >> 8) + data;
++ index = key % gcmCOUNTOF(Cache->hash);
++
++ /* Get the hash entry. */
++ hash = &Cache->hash[index];
++
++ for (slot = hash->nextHash, i = 0;
++ (slot != gcvNULL) && (i < gcdSECURE_CACHE_SLOTS);
++ slot = slot->nextHash, ++i
++ )
++ {
++ if (slot->logical == (*Data))
++ {
++ break;
++ }
++ }
++
++ if (slot == gcvNULL)
++ {
++ /* Grab from the tail of the cache. */
++ slot = Cache->cache[0].prev;
++
++ /* Unlink slot from any hash table it is part of. */
++ if (slot->prevHash != gcvNULL)
++ {
++ slot->prevHash->nextHash = slot->nextHash;
++ }
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot->prevHash;
++ }
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++
++ if (hash->nextHash != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "Hash Collision: logical=0x%x key=0x%08x",
++ *Data, key);
++ }
++
++ /* Insert the slot at the head of the hash list. */
++ slot->nextHash = hash->nextHash;
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot;
++ }
++ slot->prevHash = hash;
++ hash->nextHash = slot;
++ }
++
++ /* Move slot to head of list. */
++ if (slot != Cache->cache[0].next)
++ {
++ /* Unlink. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Move to head of chain. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_TABLE
++ {
++ gctUINT32 index = (gcmPTR2INT32(*Data) % gcdSECURE_CACHE_SLOTS) + 1;
++
++ /* Get cache slot. */
++ slot = &Cache->cache[index];
++
++ /* Check for cache miss. */
++ if (slot->logical != *Data)
++ {
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++ }
++#endif
++
++ /* Return DMA address. */
++ *Data = gcmINT2PTR(slot->dma + (needBase ? baseAddress : 0));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_FlushTranslationCache(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gctINT i;
++ gcskLOGICAL_CACHE_PTR slot;
++ gctUINT8_PTR ptr;
++
++ gcmkHEADER_ARG("Kernel=0x%x Cache=0x%x Logical=0x%x Bytes=%lu",
++ Kernel, Cache, Logical, Bytes);
++
++ /* Do we need to flush the entire cache? */
++ if (Logical == gcvNULL)
++ {
++ /* Clear all cache slots. */
++ for (i = 1; i <= gcdSECURE_CACHE_SLOTS; ++i)
++ {
++ Cache->cache[i].logical = gcvNULL;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ Cache->cache[i].nextHash = gcvNULL;
++ Cache->cache[i].prevHash = gcvNULL;
++#endif
++}
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Zero the hash table. */
++ for (i = 0; i < gcmCOUNTOF(Cache->hash); ++i)
++ {
++ Cache->hash[i].nextHash = gcvNULL;
++ }
++#endif
++
++ /* Reset the cache functionality. */
++ Cache->cacheIndex = gcvNULL;
++ Cache->cacheFree = 1;
++ Cache->cacheStamp = 0;
++ }
++
++ else
++ {
++ gctUINT8_PTR low = (gctUINT8_PTR) Logical;
++ gctUINT8_PTR high = low + Bytes;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LRU
++ gcskLOGICAL_CACHE_PTR next;
++
++ /* Walk all used cache slots. */
++ for (i = 1, slot = Cache->cache[0].next;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = next
++ )
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Unlink slot. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append slot to tail of cache. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ gcskLOGICAL_CACHE_PTR next;
++
++ for (i = 1, slot = Cache->cache[0].next;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = next)
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Test if this slot is the current slot. */
++ if (slot == Cache->cacheIndex)
++ {
++ /* Move to next or previous slot. */
++ Cache->cacheIndex = (slot->next->logical != gcvNULL)
++ ? slot->next
++ : (slot->prev->logical != gcvNULL)
++ ? slot->prev
++ : gcvNULL;
++ }
++
++ /* Unlink slot from cache. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Insert slot to head of cache. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ slot->stamp = 0;
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ gctINT j;
++ gcskLOGICAL_CACHE_PTR hash, next;
++
++ /* Walk all hash tables. */
++ for (i = 0, hash = Cache->hash;
++ i < gcmCOUNTOF(Cache->hash);
++ ++i, ++hash)
++ {
++ /* Walk all slots in the hash. */
++ for (j = 0, slot = hash->nextHash;
++ (j < gcdSECURE_CACHE_SLOTS) && (slot != gcvNULL);
++ ++j, slot = next)
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Unlink slot from hash table. */
++ if (slot->prevHash == hash)
++ {
++ hash->nextHash = slot->nextHash;
++ }
++ else
++ {
++ slot->prevHash->nextHash = slot->nextHash;
++ }
++
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot->prevHash;
++ }
++
++ /* Unlink slot from cache. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append slot to tail of cache. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ slot->prevHash = gcvNULL;
++ slot->nextHash = gcvNULL;
++ }
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_TABLE
++ gctUINT32 index;
++
++ /* Loop while inside the range. */
++ for (i = 1; (low < high) && (i <= gcdSECURE_CACHE_SLOTS); ++i)
++ {
++ /* Get index into cache for this range. */
++ index = (gcmPTR2INT32(low) % gcdSECURE_CACHE_SLOTS) + 1;
++ slot = &Cache->cache[index];
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Remove entry from cache. */
++ slot->logical = gcvNULL;
++ }
++
++ /* Next block. */
++ low += gcdSECURE_CACHE_SLOTS;
++ }
++#endif
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckKERNEL_Recovery
++**
++** Try to recover the GPU from a fatal error.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Recovery(
++ IN gckKERNEL Kernel
++ )
++{
++ gceSTATUS status;
++ gckEVENT eventObj;
++ gckHARDWARE hardware;
++#if gcdSECURE_USER
++ gctUINT32 processID;
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ gctUINT32 mask = 0;
++ gckCOMMAND command;
++ gckENTRYDATA data;
++ gctUINT32 i = 0, count = 0;
++#if gcdINTERRUPT_STATISTIC
++ gctINT32 oldValue;
++#endif
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Grab gckEVENT object. */
++ eventObj = Kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObj, gcvOBJ_EVENT);
++
++ /* Grab gckHARDWARE object. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Grab gckCOMMAND object. */
++ command = Kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++#if gcdSECURE_USER
++ /* Flush the secure mapping cache. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Kernel, processID, &cache));
++ gcmkONERROR(gckKERNEL_FlushTranslationCache(Kernel, cache, gcvNULL, 0));
++#endif
++
++ if (Kernel->stuckDump == gcdSTUCK_DUMP_MINIMAL)
++ {
++ gcmkPRINT("[galcore]: GPU[%d] hang, automatic recovery.", Kernel->core);
++ }
++ else
++ {
++ _DumpDriverConfigure(Kernel);
++ _DumpState(Kernel);
++ }
++
++ if (Kernel->recovery == gcvFALSE)
++ {
++ gcmkPRINT("[galcore]: Stop driver to keep scene.");
++
++ for (;;)
++ {
++ gckOS_Delay(Kernel->os, 10000);
++ }
++ }
++
++ /* Clear queue. */
++ do
++ {
++ status = gckENTRYQUEUE_Dequeue(&command->queue, &data);
++ }
++ while (status == gcvSTATUS_OK);
++
++ /* Issuing a soft reset for the GPU. */
++ gcmkONERROR(gckHARDWARE_Reset(hardware));
++
++ mask = Kernel->restoreMask;
++
++ for (i = 0; i < 32; i++)
++ {
++ if (mask & (1 << i))
++ {
++ count++;
++ }
++ }
++
++ /* Handle all outstanding events now. */
++#if gcdSMP
++#if gcdMULTI_GPU
++ if (Kernel->core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending3D[i], mask));
++ }
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending, mask));
++ }
++#else
++ gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending, mask));
++#endif
++#else
++#if gcdMULTI_GPU
++ if (Kernel->core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ eventObj->pending3D[i] = mask;
++ }
++ }
++ else
++ {
++ eventObj->pending = mask;
++ }
++#else
++ eventObj->pending = mask;
++#endif
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ while (count--)
++ {
++ gcmkONERROR(gckOS_AtomDecrement(
++ Kernel->os,
++ eventObj->interruptCount,
++ &oldValue
++ ));
++ }
++
++ gckOS_AtomClearMask(Kernel->hardware->pendingEvent, mask);
++#endif
++
++ gcmkONERROR(gckEVENT_Notify(eventObj, 1));
++
++ gcmkVERIFY_OK(gckOS_GetTime(&Kernel->resetTimeStamp));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_OpenUserData
++**
++** Get access to the user data.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL NeedCopy
++** The flag indicating whether or not the data should be copied.
++**
++** gctPOINTER StaticStorage
++** Pointer to the kernel storage where the data is to be copied if
++** NeedCopy is gcvTRUE.
++**
++** gctPOINTER UserPointer
++** User pointer to the data.
++**
++** gctSIZE_T Size
++** Size of the data.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to the kernel pointer that will be pointing to the data.
++*/
++gceSTATUS
++gckKERNEL_OpenUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctPOINTER StaticStorage,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG(
++ "Kernel=0x%08X NeedCopy=%d StaticStorage=0x%08X "
++ "UserPointer=0x%08X Size=%lu KernelPointer=0x%08X",
++ Kernel, NeedCopy, StaticStorage, UserPointer, Size, KernelPointer
++ );
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(!NeedCopy || (StaticStorage != gcvNULL));
++ gcmkVERIFY_ARGUMENT(UserPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ if (NeedCopy)
++ {
++ /* Copy the user data to the static storage. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Kernel->os, StaticStorage, UserPointer, Size
++ ));
++
++ /* Set the kernel pointer. */
++ * KernelPointer = StaticStorage;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Map the user pointer. */
++ gcmkONERROR(gckOS_MapUserPointer(
++ Kernel->os, UserPointer, Size, &pointer
++ ));
++
++ /* Set the kernel pointer. */
++ * KernelPointer = pointer;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_CloseUserData
++**
++** Release resources associated with the user data connection opened by
++** gckKERNEL_OpenUserData.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL NeedCopy
++** The flag indicating whether or not the data should be copied.
++**
++** gctBOOL FlushData
++** If gcvTRUE, the data is written back to the user.
++**
++** gctPOINTER UserPointer
++** User pointer to the data.
++**
++** gctSIZE_T Size
++** Size of the data.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Kernel pointer to the data.
++*/
++gceSTATUS
++gckKERNEL_CloseUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctBOOL FlushData,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG(
++ "Kernel=0x%08X NeedCopy=%d FlushData=%d "
++ "UserPointer=0x%08X Size=%lu KernelPointer=0x%08X",
++ Kernel, NeedCopy, FlushData, UserPointer, Size, KernelPointer
++ );
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(UserPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Get a shortcut to the kernel pointer. */
++ pointer = * KernelPointer;
++
++ if (pointer != gcvNULL)
++ {
++ if (NeedCopy)
++ {
++ if (FlushData)
++ {
++ gcmkONERROR(gckOS_CopyToUserData(
++ Kernel->os, * KernelPointer, UserPointer, Size
++ ));
++ }
++ }
++ else
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Kernel->os,
++ UserPointer,
++ Size,
++ * KernelPointer
++ ));
++ }
++
++ /* Reset the kernel pointer. */
++ * KernelPointer = gcvNULL;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++void
++gckKERNEL_SetTimeOut(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 timeOut
++ )
++{
++ gcmkHEADER_ARG("Kernel=0x%x timeOut=%d", Kernel, timeOut);
++#if gcdGPU_TIMEOUT
++ Kernel->timeOut = timeOut;
++#endif
++ gcmkFOOTER_NO();
++}
++
++gceSTATUS
++gckKERNEL_AllocateVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckOS os = Kernel->os;
++ gceSTATUS status;
++ gctPOINTER logical = gcvNULL;
++ gctSIZE_T pageCount;
++ gctSIZE_T bytes = *Bytes;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer = gcvNULL;
++ gckMMU mmu;
++ gctUINT32 flag = gcvALLOC_FLAG_NON_CONTIGUOUS;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gcmkONERROR(gckOS_Allocate(os,
++ sizeof(gckVIRTUAL_COMMAND_BUFFER),
++ (gctPOINTER)&buffer));
++
++ gcmkONERROR(gckOS_ZeroMemory(buffer, sizeof(gckVIRTUAL_COMMAND_BUFFER)));
++
++ buffer->bytes = bytes;
++
++ gcmkONERROR(gckOS_AllocatePagedMemoryEx(os,
++ flag,
++ bytes,
++ gcvNULL,
++ &buffer->physical));
++
++ if (InUserSpace)
++ {
++ gcmkONERROR(gckOS_CreateUserVirtualMapping(os,
++ buffer->physical,
++ bytes,
++ &logical,
++ &pageCount));
++
++ *Logical =
++ buffer->userLogical = logical;
++ }
++ else
++ {
++ gcmkONERROR(gckOS_CreateKernelVirtualMapping(os,
++ buffer->physical,
++ bytes,
++ &logical,
++ &pageCount));
++
++ *Logical =
++ buffer->kernelLogical = logical;
++ }
++
++ buffer->pageCount = pageCount;
++ buffer->kernel = Kernel;
++
++ gcmkONERROR(gckOS_GetProcessID(&buffer->pid));
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkONERROR(gckKERNEL_GetProcessMMU(Kernel, &mmu));
++ buffer->mmu = mmu;
++#else
++ mmu = Kernel->mmu;
++#endif
++
++ gcmkONERROR(gckMMU_AllocatePages(mmu,
++ pageCount,
++ &buffer->pageTable,
++ &buffer->gpuAddress));
++
++
++ gcmkONERROR(gckOS_MapPagesEx(os,
++ Kernel->core,
++ buffer->physical,
++ pageCount,
++ buffer->gpuAddress,
++ buffer->pageTable));
++
++ gcmkONERROR(gckMMU_Flush(mmu, gcvSURF_INDEX));
++
++ *Physical = buffer;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "gpuAddress = %x pageCount = %d kernelLogical = %x userLogical=%x",
++ buffer->gpuAddress, buffer->pageCount,
++ buffer->kernelLogical, buffer->userLogical);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ if (Kernel->virtualBufferHead == gcvNULL)
++ {
++ Kernel->virtualBufferHead =
++ Kernel->virtualBufferTail = buffer;
++ }
++ else
++ {
++ buffer->prev = Kernel->virtualBufferTail;
++ Kernel->virtualBufferTail->next = buffer;
++ Kernel->virtualBufferTail = buffer;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Kernel->virtualBufferLock));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (buffer->gpuAddress)
++ {
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(mmu, buffer->pageTable, buffer->pageCount));
++#else
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(Kernel->mmu, buffer->pageTable, buffer->pageCount));
++#endif
++ }
++
++ if (buffer->userLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DestroyUserVirtualMapping(os,
++ buffer->physical,
++ bytes,
++ buffer->userLogical));
++ }
++
++ if (buffer->kernelLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DestroyKernelVirtualMapping(os,
++ buffer->physical,
++ bytes,
++ buffer->kernelLogical));
++ }
++
++ if (buffer->physical)
++ {
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(os, buffer->physical, bytes));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(os, buffer));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_DestroyVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ )
++{
++ gckOS os;
++ gckKERNEL kernel;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)Physical;
++
++ gcmkHEADER();
++ gcmkVERIFY_ARGUMENT(buffer != gcvNULL);
++
++ kernel = buffer->kernel;
++ os = kernel->os;
++
++ if (!buffer->userLogical)
++ {
++ gcmkVERIFY_OK(gckOS_DestroyKernelVirtualMapping(os,
++ buffer->physical,
++ Bytes,
++ Logical));
++ }
++
++#if !gcdPROCESS_ADDRESS_SPACE
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(kernel->mmu, buffer->pageTable, buffer->pageCount));
++#endif
++
++ gcmkVERIFY_OK(gckOS_UnmapPages(os, buffer->pageCount, buffer->gpuAddress));
++
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(os, buffer->physical, Bytes));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, kernel->virtualBufferLock, gcvINFINITE));
++
++ if (buffer == kernel->virtualBufferHead)
++ {
++ if ((kernel->virtualBufferHead = buffer->next) == gcvNULL)
++ {
++ kernel->virtualBufferTail = gcvNULL;
++ }
++ }
++ else
++ {
++ buffer->prev->next = buffer->next;
++
++ if (buffer == kernel->virtualBufferTail)
++ {
++ kernel->virtualBufferTail = buffer->prev;
++ }
++ else
++ {
++ buffer->next->prev = buffer->prev;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, kernel->virtualBufferLock));
++
++ gcmkVERIFY_OK(gckOS_Free(os, buffer));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckKERNEL_GetGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctPOINTER start;
++ gctUINT32 pid;
++
++ gcmkHEADER_ARG("Logical = %x InUserSpace=%d.", Logical, InUserSpace);
++
++ gcmkVERIFY_OK(gckOS_GetProcessID(&pid));
++
++ status = gcvSTATUS_INVALID_ADDRESS;
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ /* Walk all command buffer. */
++ for (buffer = Kernel->virtualBufferHead; buffer != gcvNULL; buffer = buffer->next)
++ {
++ if (InUserSpace)
++ {
++ start = buffer->userLogical;
++ }
++ else
++ {
++ start = buffer->kernelLogical;
++ }
++
++ if (start == gcvNULL)
++ {
++ continue;
++ }
++
++ if (Logical >= start
++ && (Logical < (gctPOINTER)((gctUINT8_PTR)start + buffer->pageCount * 4096))
++ && pid == buffer->pid
++ )
++ {
++ * Address = buffer->gpuAddress + (gctUINT32)((gctUINT8_PTR)Logical - (gctUINT8_PTR)start);
++ status = gcvSTATUS_OK;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->virtualBufferLock));
++
++ gcmkFOOTER_NO();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QueryGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GpuAddress,
++ OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer
++ )
++{
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctUINT32 start;
++ gceSTATUS status = gcvSTATUS_NOT_SUPPORTED;
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ /* Walk all command buffers. */
++ for (buffer = Kernel->virtualBufferHead; buffer != gcvNULL; buffer = buffer->next)
++ {
++ start = (gctUINT32)buffer->gpuAddress;
++
++ if (GpuAddress >= start && GpuAddress < (start + buffer->pageCount * 4096))
++ {
++ /* Find a range matched. */
++ *Buffer = buffer;
++ status = gcvSTATUS_OK;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->virtualBufferLock));
++
++ return status;
++}
++
++#if gcdLINK_QUEUE_SIZE
++static void
++gckLINKQUEUE_Dequeue(
++ IN gckLINKQUEUE LinkQueue
++ )
++{
++ gcmkASSERT(LinkQueue->count == gcdLINK_QUEUE_SIZE);
++
++ LinkQueue->count--;
++ LinkQueue->front = (LinkQueue->front + 1) % gcdLINK_QUEUE_SIZE;
++}
++
++void
++gckLINKQUEUE_Enqueue(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 start,
++ IN gctUINT32 end
++ )
++{
++ if (LinkQueue->count == gcdLINK_QUEUE_SIZE)
++ {
++ gckLINKQUEUE_Dequeue(LinkQueue);
++ }
++
++ gcmkASSERT(LinkQueue->count < gcdLINK_QUEUE_SIZE);
++
++ LinkQueue->count++;
++
++ LinkQueue->data[LinkQueue->rear].start = start;
++ LinkQueue->data[LinkQueue->rear].end = end;
++
++ gcmkVERIFY_OK(
++ gckOS_GetProcessID(&LinkQueue->data[LinkQueue->rear].pid));
++
++ LinkQueue->rear = (LinkQueue->rear + 1) % gcdLINK_QUEUE_SIZE;
++}
++
++void
++gckLINKQUEUE_GetData(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 Index,
++ OUT gckLINKDATA * Data
++ )
++{
++ gcmkASSERT(Index >= 0 && Index < gcdLINK_QUEUE_SIZE);
++
++ *Data = &LinkQueue->data[(Index + LinkQueue->front) % gcdLINK_QUEUE_SIZE];
++}
++#endif
++
++/*
++* gckENTRYQUEUE_Enqueue is called with Command->mutexQueue acquired.
++*/
++gceSTATUS
++gckENTRYQUEUE_Enqueue(
++ IN gckKERNEL Kernel,
++ IN gckENTRYQUEUE Queue,
++ IN gctUINT32 physical,
++ IN gctUINT32 bytes
++ )
++{
++ gctUINT32 next = (Queue->rear + 1) % gcdENTRY_QUEUE_SIZE;
++
++ if (next == Queue->front)
++ {
++ /* Queue is full. */
++ return gcvSTATUS_INVALID_REQUEST;
++ }
++
++ /* Copy data. */
++ Queue->data[Queue->rear].physical = physical;
++ Queue->data[Queue->rear].bytes = bytes;
++
++ gcmkVERIFY_OK(gckOS_MemoryBarrier(Kernel->os, &Queue->rear));
++
++ /* Update rear. */
++ Queue->rear = next;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckENTRYQUEUE_Dequeue(
++ IN gckENTRYQUEUE Queue,
++ OUT gckENTRYDATA * Data
++ )
++{
++ if (Queue->front == Queue->rear)
++ {
++ /* Queue is empty. */
++ return gcvSTATUS_INVALID_REQUEST;
++ }
++
++ /* Copy data. */
++ *Data = &Queue->data[Queue->front];
++
++ /* Update front. */
++ Queue->front = (Queue->front + 1) % gcdENTRY_QUEUE_SIZE;
++
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************\
++*************************** Pointer - ID translation ***************************
++\******************************************************************************/
++#define gcdID_TABLE_LENGTH 1024
++typedef struct _gcsINTEGERDB * gckINTEGERDB;
++typedef struct _gcsINTEGERDB
++{
++ gckOS os;
++ gctPOINTER* table;
++ gctPOINTER mutex;
++ gctUINT32 tableLen;
++ gctUINT32 currentID;
++ gctUINT32 unused;
++}
++gcsINTEGERDB;
++
++gceSTATUS
++gckKERNEL_CreateIntegerDatabase(
++ IN gckKERNEL Kernel,
++ OUT gctPOINTER * Database
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%08X Datbase=0x%08X", Kernel, Database);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Database != gcvNULL);
++
++ /* Allocate a database. */
++ gcmkONERROR(gckOS_Allocate(
++ Kernel->os, gcmSIZEOF(gcsINTEGERDB), (gctPOINTER *)&database));
++
++ gcmkONERROR(gckOS_ZeroMemory(database, gcmSIZEOF(gcsINTEGERDB)));
++
++ /* Allocate a pointer table. */
++ gcmkONERROR(gckOS_Allocate(
++ Kernel->os, gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH, (gctPOINTER *)&database->table));
++
++ gcmkONERROR(gckOS_ZeroMemory(database->table, gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH));
++
++ /* Allocate a database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Kernel->os, &database->mutex));
++
++ /* Initialize. */
++ database->currentID = 0;
++ database->unused = gcdID_TABLE_LENGTH;
++ database->os = Kernel->os;
++ database->tableLen = gcdID_TABLE_LENGTH;
++
++ *Database = database;
++
++ gcmkFOOTER_ARG("*Database=0x%08X", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Rollback. */
++ if (database)
++ {
++ if (database->table)
++ {
++ gcmkOS_SAFE_FREE(Kernel->os, database->table);
++ }
++
++ gcmkOS_SAFE_FREE(Kernel->os, database);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_DestroyIntegerDatabase(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Database
++ )
++{
++ gckINTEGERDB database = Database;
++
++ gcmkHEADER_ARG("Kernel=0x%08X Datbase=0x%08X", Kernel, Database);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Database != gcvNULL);
++
++ /* Destroy pointer table. */
++ gcmkOS_SAFE_FREE(Kernel->os, database->table);
++
++ /* Destroy database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, database->mutex));
++
++ /* Destroy database. */
++ gcmkOS_SAFE_FREE(Kernel->os, database);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckKERNEL_AllocateIntegerId(
++ IN gctPOINTER Database,
++ IN gctPOINTER Pointer,
++ OUT gctUINT32 * Id
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gctUINT32 i, unused, currentID, tableLen;
++ gctPOINTER * table;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Pointer=0x%08X", Database, Pointer);
++
++ gcmkVERIFY_ARGUMENT(Id != gcvNULL);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (database->unused < 1)
++ {
++ /* Extend table. */
++ gcmkONERROR(
++ gckOS_Allocate(os,
++ gcmSIZEOF(gctPOINTER) * (database->tableLen + gcdID_TABLE_LENGTH),
++ (gctPOINTER *)&table));
++
++ gcmkONERROR(gckOS_ZeroMemory(table + database->tableLen,
++ gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH));
++
++ /* Copy data from old table. */
++ gckOS_MemCopy(table,
++ database->table,
++ database->tableLen * gcmSIZEOF(gctPOINTER));
++
++ gcmkOS_SAFE_FREE(os, database->table);
++
++ /* Update databse with new allocated table. */
++ database->table = table;
++ database->currentID = database->tableLen;
++ database->tableLen += gcdID_TABLE_LENGTH;
++ database->unused += gcdID_TABLE_LENGTH;
++ }
++
++ table = database->table;
++ currentID = database->currentID;
++ tableLen = database->tableLen;
++ unused = database->unused;
++
++ /* Connect id with pointer. */
++ table[currentID] = Pointer;
++
++ *Id = currentID + 1;
++
++ /* Update the currentID. */
++ if (--unused > 0)
++ {
++ for (i = 0; i < tableLen; i++)
++ {
++ if (++currentID >= tableLen)
++ {
++ /* Wrap to the begin. */
++ currentID = 0;
++ }
++
++ if (table[currentID] == gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++
++ database->table = table;
++ database->currentID = currentID;
++ database->tableLen = tableLen;
++ database->unused = unused;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_ARG("*Id=%d", *Id);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_FreeIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Id=%d", Database, Id);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (!(Id > 0 && Id <= database->tableLen))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ Id -= 1;
++
++ database->table[Id] = gcvNULL;
++
++ if (database->unused++ == 0)
++ {
++ database->currentID = Id;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QueryIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * Pointer
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gctPOINTER pointer;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Id=%d", Database, Id);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (!(Id > 0 && Id <= database->tableLen))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ Id -= 1;
++
++ pointer = database->table[Id];
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ if (pointer)
++ {
++ *Pointer = pointer;
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ gcmkFOOTER_ARG("*Pointer=0x%08X", *Pointer);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++gctUINT32
++gckKERNEL_AllocateNameFromPointer(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Pointer
++ )
++{
++ gceSTATUS status;
++ gctUINT32 name;
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Pointer=0x%X", Kernel, Pointer);
++
++ gcmkONERROR(
++ gckKERNEL_AllocateIntegerId(database, Pointer, &name));
++
++ gcmkFOOTER_ARG("name=%d", name);
++ return name;
++
++OnError:
++ gcmkFOOTER();
++ return 0;
++}
++
++gctPOINTER
++gckKERNEL_QueryPointerFromName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ )
++{
++ gceSTATUS status;
++ gctPOINTER pointer = gcvNULL;
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Name=%d", Kernel, Name);
++
++ /* Lookup in database to get pointer. */
++ gcmkONERROR(gckKERNEL_QueryIntegerId(database, Name, &pointer));
++
++ gcmkFOOTER_ARG("pointer=0x%X", pointer);
++ return pointer;
++
++OnError:
++ gcmkFOOTER();
++ return gcvNULL;
++}
++
++gceSTATUS
++gckKERNEL_DeleteName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ )
++{
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Name=0x%X", Kernel, Name);
++
++ /* Free name if exists. */
++ gcmkVERIFY_OK(gckKERNEL_FreeIntegerId(database, Name));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckKERNEL_SetRecovery(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Recovery,
++ IN gctUINT32 StuckDump
++ )
++{
++ Kernel->recovery = Recovery;
++
++ if (Recovery == gcvFALSE)
++ {
++ /* Dump stuck information if Recovery is disabled. */
++ Kernel->stuckDump = gcmMAX(StuckDump, gcdSTUCK_DUMP_MIDDLE);
++ }
++
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++***** Shared Buffer ************************************************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckKERNEL_CreateShBuffer
++**
++** Create shared buffer.
++** The shared buffer can be used across processes. Other process needs call
++** gckKERNEL_MapShBuffer before use it.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctUINT32 Size
++** Specify the shared buffer size.
++**
++** OUTPUT:
++**
++** gctSHBUF * ShBuf
++** Pointer to hold return shared buffer handle.
++*/
++gceSTATUS
++gckKERNEL_CreateShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Size,
++ OUT gctSHBUF * ShBuf
++ )
++{
++ gceSTATUS status;
++ gcsSHBUF_PTR shBuf = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%X, Size=%u", Kernel, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (Size == 0)
++ {
++ /* Invalid size. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++ else if (Size > 1024)
++ {
++ /* Limite shared buffer size. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Create a shared buffer structure. */
++ gcmkONERROR(
++ gckOS_Allocate(Kernel->os,
++ sizeof (gcsSHBUF),
++ (gctPOINTER *)&shBuf));
++
++ /* Initialize shared buffer. */
++ shBuf->id = 0;
++ shBuf->reference = gcvNULL;
++ shBuf->size = Size;
++ shBuf->data = gcvNULL;
++
++ /* Allocate integer id for this shared buffer. */
++ gcmkONERROR(
++ gckKERNEL_AllocateIntegerId(Kernel->db->pointerDatabase,
++ shBuf,
++ &shBuf->id));
++
++ /* Allocate atom. */
++ gcmkONERROR(gckOS_AtomConstruct(Kernel->os, &shBuf->reference));
++
++ /* Set default reference count to 1. */
++ gcmkVERIFY_OK(gckOS_AtomSet(Kernel->os, shBuf->reference, 1));
++
++ /* Return integer id. */
++ *ShBuf = (gctSHBUF)(gctUINTPTR_T)shBuf->id;
++
++ gcmkFOOTER_ARG("*ShBuf=%u", shBuf->id);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Error roll back. */
++ if (shBuf != gcvNULL)
++ {
++ if (shBuf->id != 0)
++ {
++ gcmkVERIFY_OK(
++ gckKERNEL_FreeIntegerId(Kernel->db->pointerDatabase,
++ shBuf->id));
++ }
++
++ gcmkOS_SAFE_FREE(Kernel->os, shBuf);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_DestroyShBuffer
++**
++** Destroy shared buffer.
++** This will decrease reference of specified shared buffer and do actual
++** destroy when no reference on it.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSHBUF ShBuf
++** Specify the shared buffer to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_DestroyShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf
++ )
++{
++ gceSTATUS status;
++ gcsSHBUF_PTR shBuf;
++ gctINT32 oldValue = 0;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X ShBuf=%u",
++ Kernel, (gctUINT32)(gctUINTPTR_T) ShBuf);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(ShBuf != gcvNULL);
++
++ /* Acquire mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os,
++ Kernel->db->pointerDatabaseMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Find shared buffer structure. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(Kernel->db->pointerDatabase,
++ (gctUINT32)(gctUINTPTR_T)ShBuf,
++ (gctPOINTER)&shBuf));
++
++ gcmkASSERT(shBuf->id == (gctUINT32)(gctUINTPTR_T)ShBuf);
++
++ /* Decrease the reference count. */
++ gckOS_AtomDecrement(Kernel->os, shBuf->reference, &oldValue);
++
++ if (oldValue == 1)
++ {
++ /* Free integer id. */
++ gcmkVERIFY_OK(
++ gckKERNEL_FreeIntegerId(Kernel->db->pointerDatabase,
++ shBuf->id));
++
++ /* Free atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, shBuf->reference));
++
++ if (shBuf->data)
++ {
++ gcmkOS_SAFE_FREE(Kernel->os, shBuf->data);
++ shBuf->data = gcvNULL;
++ }
++
++ /* Free the shared buffer. */
++ gcmkOS_SAFE_FREE(Kernel->os, shBuf);
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapShBuffer
++**
++** Map shared buffer into this process so that it can be used in this process.
++** This will increase reference count on the specified shared buffer.
++** Call gckKERNEL_DestroyShBuffer to dereference.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSHBUF ShBuf
++** Specify the shared buffer to be mapped.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_MapShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf
++ )
++{
++ gceSTATUS status;
++ gcsSHBUF_PTR shBuf;
++ gctINT32 oldValue = 0;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X ShBuf=%u",
++ Kernel, (gctUINT32)(gctUINTPTR_T) ShBuf);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(ShBuf != gcvNULL);
++
++ /* Acquire mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os,
++ Kernel->db->pointerDatabaseMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Find shared buffer structure. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(Kernel->db->pointerDatabase,
++ (gctUINT32)(gctUINTPTR_T)ShBuf,
++ (gctPOINTER)&shBuf));
++
++ gcmkASSERT(shBuf->id == (gctUINT32)(gctUINTPTR_T)ShBuf);
++
++ /* Increase the reference count. */
++ gckOS_AtomIncrement(Kernel->os, shBuf->reference, &oldValue);
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_WriteShBuffer
++**
++** Write user data into shared buffer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSHBUF ShBuf
++** Specify the shared buffer to be written to.
++**
++** gctPOINTER UserData
++** User mode pointer to hold the source data.
++**
++** gctUINT32 ByteCount
++** Specify number of bytes to write. If this is larger than
++** shared buffer size, gcvSTATUS_INVALID_ARGUMENT is returned.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_WriteShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf,
++ IN gctPOINTER UserData,
++ IN gctUINT32 ByteCount
++ )
++{
++ gceSTATUS status;
++ gcsSHBUF_PTR shBuf;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X ShBuf=%u UserData=0x%X ByteCount=%u",
++ Kernel, (gctUINT32)(gctUINTPTR_T) ShBuf, UserData, ByteCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(ShBuf != gcvNULL);
++
++ /* Acquire mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os,
++ Kernel->db->pointerDatabaseMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Find shared buffer structure. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(Kernel->db->pointerDatabase,
++ (gctUINT32)(gctUINTPTR_T)ShBuf,
++ (gctPOINTER)&shBuf));
++
++ gcmkASSERT(shBuf->id == (gctUINT32)(gctUINTPTR_T)ShBuf);
++
++ if ((ByteCount > shBuf->size) ||
++ (ByteCount == 0) ||
++ (UserData == gcvNULL))
++ {
++ /* Exceeds buffer max size or invalid. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (shBuf->data == gcvNULL)
++ {
++ /* Allocate buffer data when first time write. */
++ gcmkONERROR(gckOS_Allocate(Kernel->os, ByteCount, &shBuf->data));
++ }
++
++ /* Copy data from user. */
++ gcmkONERROR(
++ gckOS_CopyFromUserData(Kernel->os,
++ shBuf->data,
++ UserData,
++ ByteCount));
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_ReadShBuffer
++**
++** Read data from shared buffer and copy to user pointer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSHBUF ShBuf
++** Specify the shared buffer to be read from.
++**
++** gctPOINTER UserData
++** User mode pointer to save output data.
++**
++** gctUINT32 ByteCount
++** Specify number of bytes to read.
++** If this is larger than shared buffer size, only avaiable bytes are
++** copied. If smaller, copy requested size.
++**
++** OUTPUT:
++**
++** gctUINT32 * BytesRead
++** Pointer to hold how many bytes actually read from shared buffer.
++*/
++gceSTATUS
++gckKERNEL_ReadShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf,
++ IN gctPOINTER UserData,
++ IN gctUINT32 ByteCount,
++ OUT gctUINT32 * BytesRead
++ )
++{
++ gceSTATUS status;
++ gcsSHBUF_PTR shBuf;
++ gctUINT32 bytes;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X ShBuf=%u UserData=0x%X ByteCount=%u",
++ Kernel, (gctUINT32)(gctUINTPTR_T) ShBuf, UserData, ByteCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(ShBuf != gcvNULL);
++
++ /* Acquire mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os,
++ Kernel->db->pointerDatabaseMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Find shared buffer structure. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(Kernel->db->pointerDatabase,
++ (gctUINT32)(gctUINTPTR_T)ShBuf,
++ (gctPOINTER)&shBuf));
++
++ gcmkASSERT(shBuf->id == (gctUINT32)(gctUINTPTR_T)ShBuf);
++
++ if (shBuf->data == gcvNULL)
++ {
++ *BytesRead = 0;
++
++ /* No data in shared buffer, skip copy. */
++ status = gcvSTATUS_SKIP;
++ goto OnError;
++ }
++ else if (ByteCount == 0)
++ {
++ /* Invalid size to read. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Determine bytes to copy. */
++ bytes = (ByteCount < shBuf->size) ? ByteCount : shBuf->size;
++
++ /* Copy data to user. */
++ gcmkONERROR(
++ gckOS_CopyToUserData(Kernel->os,
++ shBuf->data,
++ UserData,
++ bytes));
++
++ /* Return copied size. */
++ *BytesRead = bytes;
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_ARG("*BytesRead=%u", bytes);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++/*******************************************************************************
++***** Test Code ****************************************************************
++*******************************************************************************/
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command.c 2015-05-01 14:57:59.567427001 -0500
+@@ -0,0 +1,3423 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include "gc_hal_kernel_context.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** _NewQueue
++**
++** Allocate a new command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** OUTPUT:
++**
++** gckCOMMAND Command
++** gckCOMMAND object has been updated with a new command queue.
++*/
++static gceSTATUS
++_NewQueue(
++ IN OUT gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctINT currentIndex, newIndex;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Switch to the next command buffer. */
++ currentIndex = Command->index;
++ newIndex = (currentIndex + 1) % gcdCOMMAND_QUEUES;
++
++ /* Wait for availability. */
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.waitsignal]");
++#endif
++
++ gcmkONERROR(gckOS_WaitSignal(
++ Command->os,
++ Command->queues[newIndex].signal,
++ gcvINFINITE
++ ));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ if (newIndex < currentIndex)
++ {
++ Command->wrapCount += 1;
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 2 * 4,
++ "%s(%d): queue array wrapped around.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 3 * 4,
++ "%s(%d): total queue wrap arounds %d.\n",
++ __FUNCTION__, __LINE__, Command->wrapCount
++ );
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 3 * 4,
++ "%s(%d): switched to queue %d.\n",
++ __FUNCTION__, __LINE__, newIndex
++ );
++#endif
++
++ /* Update gckCOMMAND object with new command queue. */
++ Command->index = newIndex;
++ Command->newQueue = gcvTRUE;
++ Command->logical = Command->queues[newIndex].logical;
++ Command->address = Command->queues[newIndex].address;
++ Command->offset = 0;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Command->os,
++ Command->logical,
++ (gctUINT32 *) &Command->physical
++ ));
++
++ if (currentIndex != -1)
++ {
++ /* Mark the command queue as available. */
++ gcmkONERROR(gckEVENT_Signal(
++ Command->kernel->eventObj,
++ Command->queues[currentIndex].signal,
++ gcvKERNEL_COMMAND
++ ));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("Command->index=%d", Command->index);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_IncrementCommitAtom(
++ IN gckCOMMAND Command,
++ IN gctBOOL Increment
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctINT32 atomValue;
++ gctBOOL powerAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Grab the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, hardware->powerMutex, gcvINFINITE
++ ));
++ powerAcquired = gcvTRUE;
++
++ /* Increment the commit atom. */
++ if (Increment)
++ {
++ gcmkONERROR(gckOS_AtomIncrement(
++ Command->os, Command->atomCommit, &atomValue
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AtomDecrement(
++ Command->os, Command->atomCommit, &atomValue
++ ));
++ }
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(
++ Command->os, hardware->powerMutex
++ ));
++ powerAcquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (powerAcquired)
++ {
++ /* Release the power mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ Command->os, hardware->powerMutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++static gceSTATUS
++_ProcessHints(
++ IN gckCOMMAND Command,
++ IN gctUINT32 ProcessID,
++ IN gcoCMDBUF CommandBuffer
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gckKERNEL kernel;
++ gctBOOL needCopy = gcvFALSE;
++ gcskSECURE_CACHE_PTR cache;
++ gctUINT8_PTR commandBufferLogical;
++ gctUINT8_PTR hintedData;
++ gctUINT32_PTR hintArray;
++ gctUINT i, hintCount;
++
++ gcmkHEADER_ARG(
++ "Command=0x%08X ProcessID=%d CommandBuffer=0x%08X",
++ Command, ProcessID, CommandBuffer
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Reset state array pointer. */
++ hintArray = gcvNULL;
++
++ /* Get the kernel object. */
++ kernel = Command->kernel;
++
++ /* Get the cache form the database. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache));
++
++ /* Determine the start of the command buffer. */
++ commandBufferLogical
++ = (gctUINT8_PTR) CommandBuffer->logical
++ + CommandBuffer->startOffset;
++
++ /* Determine the number of records in the state array. */
++ hintCount = CommandBuffer->hintArrayTail - CommandBuffer->hintArray;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Command->os, ProcessID, &needCopy));
++
++ /* Get access to the state array. */
++ if (needCopy)
++ {
++ gctUINT copySize;
++
++ if (Command->hintArrayAllocated &&
++ (Command->hintArraySize < CommandBuffer->hintArraySize))
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Command->os, gcmUINT64_TO_PTR(Command->hintArray)));
++ Command->hintArraySize = gcvFALSE;
++ }
++
++ if (!Command->hintArrayAllocated)
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkONERROR(gckOS_Allocate(
++ Command->os,
++ CommandBuffer->hintArraySize,
++ &pointer
++ ));
++
++ Command->hintArray = gcmPTR_TO_UINT64(pointer);
++ Command->hintArrayAllocated = gcvTRUE;
++ Command->hintArraySize = CommandBuffer->hintArraySize;
++ }
++
++ hintArray = gcmUINT64_TO_PTR(Command->hintArray);
++ copySize = hintCount * gcmSIZEOF(gctUINT32);
++
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os,
++ hintArray,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ copySize
++ ));
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ CommandBuffer->hintArraySize,
++ &pointer
++ ));
++
++ hintArray = pointer;
++ }
++
++ /* Scan through the buffer. */
++ for (i = 0; i < hintCount; i += 1)
++ {
++ /* Determine the location of the hinted data. */
++ hintedData = commandBufferLogical + hintArray[i];
++
++ /* Map handle into physical address. */
++ gcmkONERROR(gckKERNEL_MapLogicalToPhysical(
++ kernel, cache, (gctPOINTER) hintedData
++ ));
++ }
++
++OnError:
++ /* Get access to the state array. */
++ if (!needCopy && (hintArray != gcvNULL))
++ {
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ CommandBuffer->hintArraySize,
++ hintArray
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_FlushMMU(
++ IN gckCOMMAND Command
++ )
++{
++#if gcdSECURITY
++ return gcvSTATUS_OK;
++#else
++ gceSTATUS status;
++ gctUINT32 oldValue;
++ gckHARDWARE hardware = Command->kernel->hardware;
++ gctBOOL pause = gcvFALSE;
++
++ gctUINT8_PTR pointer;
++ gctUINT32 eventBytes;
++ gctUINT32 endBytes;
++ gctUINT32 bufferSize;
++ gctUINT32 executeBytes;
++ gctUINT32 waitLinkBytes;
++
++ gcmkONERROR(gckOS_AtomicExchange(Command->os,
++ hardware->pageTableDirty,
++ 0,
++ &oldValue));
++
++ if (oldValue)
++ {
++ /* Page Table is upated, flush mmu before commit. */
++ gcmkONERROR(gckHARDWARE_FlushMMU(hardware));
++
++ if ((oldValue & gcvPAGE_TABLE_DIRTY_BIT_FE)
++ && (hardware->endAfterFlushMmuCache)
++ )
++ {
++ pause = gcvTRUE;
++ }
++ }
++
++ if (pause)
++ {
++ /* Query size. */
++ gcmkONERROR(gckHARDWARE_Event(hardware, gcvNULL, 0, gcvKERNEL_PIXEL, &eventBytes));
++ gcmkONERROR(gckHARDWARE_End(hardware, gcvNULL, &endBytes));
++
++ executeBytes = eventBytes + endBytes;
++
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ gcvNULL,
++ Command->offset + executeBytes,
++ &waitLinkBytes,
++ gcvNULL,
++ gcvNULL
++ ));
++
++ /* Reserve space. */
++ gcmkONERROR(gckCOMMAND_Reserve(
++ Command,
++ executeBytes,
++ (gctPOINTER *)&pointer,
++ &bufferSize
++ ));
++
++ /* Append EVENT(29). */
++ gcmkONERROR(gckHARDWARE_Event(
++ hardware,
++ pointer,
++ 29,
++ gcvKERNEL_PIXEL,
++ &eventBytes
++ ));
++
++ /* Append END. */
++ pointer += eventBytes;
++ gcmkONERROR(gckHARDWARE_End(hardware, pointer, &endBytes));
++
++ /* Store address to queue. */
++ gcmkONERROR(gckENTRYQUEUE_Enqueue(
++ Command->kernel,
++ &Command->queue,
++ Command->address + Command->offset + executeBytes,
++ waitLinkBytes
++ ));
++
++ gcmkONERROR(gckCOMMAND_Execute(Command, executeBytes));
++ }
++
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++#endif
++}
++
++static void
++_DumpBuffer(
++ IN gctPOINTER Buffer,
++ IN gctUINT32 GpuAddress,
++ IN gctSIZE_T Size
++ )
++{
++ gctSIZE_T i, line, left;
++ gctUINT32_PTR data = Buffer;
++
++ line = Size / 32;
++ left = Size % 32;
++
++ for (i = 0; i < line; i++)
++ {
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]);
++ data += 8;
++ GpuAddress += 8 * 4;
++ }
++
++ switch(left)
++ {
++ case 28:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5], data[6]);
++ break;
++ case 24:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5]);
++ break;
++ case 20:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4]);
++ break;
++ case 16:
++ gcmkPRINT("%X : %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3]);
++ break;
++ case 12:
++ gcmkPRINT("%X : %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2]);
++ break;
++ case 8:
++ gcmkPRINT("%X : %08X %08X ",
++ GpuAddress, data[0], data[1]);
++ break;
++ case 4:
++ gcmkPRINT("%X : %08X ",
++ GpuAddress, data[0]);
++ break;
++ default:
++ break;
++ }
++}
++
++static void
++_DumpKernelCommandBuffer(
++ IN gckCOMMAND Command
++ )
++{
++ gctINT i;
++ gctUINT32 physical = 0;
++ gctPOINTER entry = gcvNULL;
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; i++)
++ {
++ entry = Command->queues[i].logical;
++
++ gckOS_GetPhysicalAddress(Command->os, entry, &physical);
++
++ gcmkPRINT("Kernel command buffer %d\n", i);
++
++ _DumpBuffer(entry, physical, Command->pageSize);
++ }
++}
++
++/******************************************************************************\
++****************************** gckCOMMAND API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckCOMMAND_Construct
++**
++** Construct a new gckCOMMAND object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gckCOMMAND * Command
++** Pointer to a variable that will hold the pointer to the gckCOMMAND
++** object.
++*/
++gceSTATUS
++gckCOMMAND_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckCOMMAND * Command
++ )
++{
++ gckOS os;
++ gckCOMMAND command = gcvNULL;
++ gceSTATUS status;
++ gctINT i;
++ gctPOINTER pointer = gcvNULL;
++ gctSIZE_T pageSize;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Command != gcvNULL);
++
++ /* Extract the gckOS object. */
++ os = Kernel->os;
++
++ /* Allocate the gckCOMMAND structure. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckCOMMAND), &pointer));
++ command = pointer;
++
++ /* Reset the entire object. */
++ gcmkONERROR(gckOS_ZeroMemory(command, gcmSIZEOF(struct _gckCOMMAND)));
++
++ /* Initialize the gckCOMMAND object.*/
++ command->object.type = gcvOBJ_COMMAND;
++ command->kernel = Kernel;
++ command->os = os;
++
++ /* Get the command buffer requirements. */
++ gcmkONERROR(gckHARDWARE_QueryCommandBuffer(
++ Kernel->hardware,
++ &command->alignment,
++ &command->reservedHead,
++ &command->reservedTail
++ ));
++
++ /* Create the command queue mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexQueue));
++
++ /* Create the context switching mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexContext));
++
++#if VIVANTE_PROFILER_CONTEXT
++ /* Create the context switching mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexContextSeq));
++#endif
++
++ /* Create the power management semaphore. */
++ gcmkONERROR(gckOS_CreateSemaphore(os, &command->powerSemaphore));
++
++ /* Create the commit atom. */
++ gcmkONERROR(gckOS_AtomConstruct(os, &command->atomCommit));
++
++ /* Get the page size from teh OS. */
++ gcmkONERROR(gckOS_GetPageSize(os, &pageSize));
++
++ gcmkSAFECASTSIZET(command->pageSize, pageSize);
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&command->kernelProcessID));
++
++ /* Set hardware to pipe 0. */
++ command->pipeSelect = gcvPIPE_INVALID;
++
++ /* Pre-allocate the command queues. */
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(
++ os,
++ gcvFALSE,
++ &pageSize,
++ &command->queues[i].physical,
++ &command->queues[i].logical
++ ));
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ Kernel->hardware,
++ command->queues[i].logical,
++ gcvFALSE,
++ &command->queues[i].address
++ ));
++
++ gcmkONERROR(gckOS_CreateSignal(
++ os, gcvFALSE, &command->queues[i].signal
++ ));
++
++ gcmkONERROR(gckOS_Signal(
++ os, command->queues[i].signal, gcvTRUE
++ ));
++ }
++
++#if gcdRECORD_COMMAND
++ gcmkONERROR(gckRECORDER_Construct(os, Kernel->hardware, &command->recorder));
++#endif
++
++ /* No command queue in use yet. */
++ command->index = -1;
++ command->logical = gcvNULL;
++ command->newQueue = gcvFALSE;
++
++ /* Command is not yet running. */
++ command->running = gcvFALSE;
++
++ /* Command queue is idle. */
++ command->idle = gcvTRUE;
++
++ /* Commit stamp is zero. */
++ command->commitStamp = 0;
++
++ /* END event signal not created. */
++ command->endEventSignal = gcvNULL;
++
++ command->queue.front = 0;
++ command->queue.rear = 0;
++ command->queue.count = 0;
++
++ /* Return pointer to the gckCOMMAND object. */
++ *Command = command;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Command=0x%x", *Command);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (command != gcvNULL)
++ {
++ if (command->atomCommit != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, command->atomCommit));
++ }
++
++ if (command->powerSemaphore != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(os, command->powerSemaphore));
++ }
++
++ if (command->mutexContext != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexContext));
++ }
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (command->mutexContextSeq != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexContextSeq));
++ }
++#endif
++
++ if (command->mutexQueue != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexQueue));
++ }
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ if (command->queues[i].signal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ os, command->queues[i].signal
++ ));
++ }
++
++ if (command->queues[i].logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ os,
++ command->pageSize,
++ command->queues[i].physical,
++ command->queues[i].logical
++ ));
++ }
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, command));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Destroy
++**
++** Destroy an gckCOMMAND object.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Destroy(
++ IN gckCOMMAND Command
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Stop the command queue. */
++ gcmkVERIFY_OK(gckCOMMAND_Stop(Command, gcvFALSE));
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ gcmkASSERT(Command->queues[i].signal != gcvNULL);
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, Command->queues[i].signal
++ ));
++
++ gcmkASSERT(Command->queues[i].logical != gcvNULL);
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ Command->os,
++ Command->pageSize,
++ Command->queues[i].physical,
++ Command->queues[i].logical
++ ));
++ }
++
++ /* END event signal. */
++ if (Command->endEventSignal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, Command->endEventSignal
++ ));
++ }
++
++ /* Delete the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContext));
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (Command->mutexContextSeq != gcvNULL)
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContextSeq));
++#endif
++
++ /* Delete the command queue mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexQueue));
++
++ /* Destroy the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Command->os, Command->powerSemaphore));
++
++ /* Destroy the commit atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Command->os, Command->atomCommit));
++
++#if gcdSECURE_USER
++ /* Free state array. */
++ if (Command->hintArrayAllocated)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Command->os, gcmUINT64_TO_PTR(Command->hintArray)));
++ Command->hintArrayAllocated = gcvFALSE;
++ }
++#endif
++
++#if gcdRECORD_COMMAND
++ gckRECORDER_Destory(Command->os, Command->recorder);
++#endif
++
++ /* Mark object as unknown. */
++ Command->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckCOMMAND object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Command->os, Command));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_EnterCommit
++**
++** Acquire command queue synchronization objects.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_EnterCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctBOOL atomIncremented = gcvFALSE;
++ gctBOOL semaAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (!FromPower)
++ {
++ /* Increment COMMIT atom to let power management know that a commit is
++ ** in progress. */
++ gcmkONERROR(_IncrementCommitAtom(Command, gcvTRUE));
++ atomIncremented = gcvTRUE;
++
++ /* Notify the system the GPU has a commit. */
++ gcmkONERROR(gckOS_Broadcast(Command->os,
++ hardware,
++ gcvBROADCAST_GPU_COMMIT));
++
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(Command->os,
++ Command->powerSemaphore));
++ semaAcquired = gcvTRUE;
++ }
++
++ /* Grab the conmmand queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Command->os,
++ Command->mutexQueue,
++ gcvINFINITE));
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (semaAcquired)
++ {
++ /* Release the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore
++ ));
++ }
++
++ if (atomIncremented)
++ {
++ /* Decrement the commit atom. */
++ gcmkVERIFY_OK(_IncrementCommitAtom(
++ Command, gcvFALSE
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_ExitCommit
++**
++** Release command queue synchronization objects.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_ExitCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexQueue));
++
++ if (!FromPower)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(Command->os,
++ Command->powerSemaphore));
++
++ /* Decrement the commit atom. */
++ gcmkONERROR(_IncrementCommitAtom(Command, gcvFALSE));
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Start
++**
++** Start up the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to start.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Start(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctUINT32 waitOffset = 0;
++ gctUINT32 waitLinkBytes;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->running)
++ {
++ /* Command queue already running. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract the gckHARDWARE object. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (Command->logical == gcvNULL)
++ {
++ /* Start at beginning of a new queue. */
++ gcmkONERROR(_NewQueue(Command));
++ }
++
++ /* Start at beginning of page. */
++ Command->offset = 0;
++
++ /* Set abvailable number of bytes for WAIT/LINK command sequence. */
++ waitLinkBytes = Command->pageSize;
++
++ /* Append WAIT/LINK. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ Command->logical,
++ 0,
++ &waitLinkBytes,
++ &waitOffset,
++ &Command->waitSize
++ ));
++
++ Command->waitLogical = (gctUINT8_PTR) Command->logical + waitOffset;
++ Command->waitPhysical = (gctUINT8_PTR) Command->physical + waitOffset;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the wait/link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)Command->physical,
++ Command->logical,
++ waitLinkBytes
++ ));
++#endif
++
++ /* Adjust offset. */
++ Command->offset = waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++#if gcdSECURITY
++ /* Start FE by calling security service. */
++ gckKERNEL_SecurityStartCommand(
++ Command->kernel
++ );
++#else
++ /* Enable command processor. */
++ gcmkONERROR(gckHARDWARE_Execute(
++ hardware,
++ Command->address,
++ waitLinkBytes
++ ));
++#endif
++
++ /* Command queue is running. */
++ Command->running = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Stop
++**
++** Stop the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to stop.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Stop(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromRecovery
++ )
++{
++ gckHARDWARE hardware;
++ gceSTATUS status;
++ gctUINT32 idle;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (!Command->running)
++ {
++ /* Command queue is not running. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract the gckHARDWARE object. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (gckHARDWARE_IsFeatureAvailable(hardware,
++ gcvFEATURE_END_EVENT) == gcvSTATUS_TRUE)
++ {
++ /* Allocate the signal. */
++ if (Command->endEventSignal == gcvNULL)
++ {
++ gcmkONERROR(gckOS_CreateSignal(Command->os,
++ gcvTRUE,
++ &Command->endEventSignal));
++ }
++
++ /* Append the END EVENT command to trigger the signal. */
++ gcmkONERROR(gckEVENT_Stop(Command->kernel->eventObj,
++ Command->kernelProcessID,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->endEventSignal,
++ &Command->waitSize));
++ }
++ else
++ {
++ /* Replace last WAIT with END. */
++ gcmkONERROR(gckHARDWARE_End(
++ hardware, Command->waitLogical, &Command->waitSize
++ ));
++
++#if gcdSECURITY
++ gcmkONERROR(gckKERNEL_SecurityExecute(
++ Command->kernel, Command->waitLogical, 8
++ ));
++#endif
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(Command->kernel->hardware,
++ Command->logical,
++ Command->offset));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the END. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ /* Wait for idle. */
++ gcmkONERROR(gckHARDWARE_GetIdle(hardware, !FromRecovery, &idle));
++ }
++
++ /* Command queue is no longer running. */
++ Command->running = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Commit
++**
++** Commit a command buffer to the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gckCONTEXT Context
++** Pointer to a gckCONTEXT object.
++**
++** gcoCMDBUF CommandBuffer
++** Pointer to a gcoCMDBUF object.
++**
++** gcsSTATE_DELTA_PTR StateDelta
++** Pointer to the state delta.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++#if gcdMULTI_GPU
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID,
++ IN gceCORE_3D_MASK ChipEnable
++ )
++#else
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID
++ )
++#endif
++{
++ gceSTATUS status;
++ gctBOOL commitEntered = gcvFALSE;
++ gctBOOL contextAcquired = gcvFALSE;
++ gckHARDWARE hardware;
++ gctBOOL needCopy = gcvFALSE;
++ gcsQUEUE_PTR eventRecord = gcvNULL;
++ gcsQUEUE _eventRecord;
++ gcsQUEUE_PTR nextEventRecord;
++ gctBOOL commandBufferMapped = gcvFALSE;
++ gcoCMDBUF commandBufferObject = gcvNULL;
++
++#if !gcdNULL_DRIVER
++ gcsCONTEXT_PTR contextBuffer;
++ struct _gcoCMDBUF _commandBufferObject;
++ gctPHYS_ADDR commandBufferPhysical;
++ gctUINT8_PTR commandBufferLogical = gcvNULL;
++ gctUINT32 commandBufferAddress = 0;
++ gctUINT8_PTR commandBufferLink = gcvNULL;
++ gctUINT commandBufferSize;
++ gctSIZE_T nopBytes;
++ gctUINT32 pipeBytes;
++ gctUINT32 linkBytes;
++ gctSIZE_T bytes;
++ gctUINT32 offset;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR entryPhysical;
++#endif
++ gctPOINTER entryLogical;
++ gctUINT32 entryAddress;
++ gctUINT32 entryBytes;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR exitPhysical;
++#endif
++ gctPOINTER exitLogical;
++ gctUINT32 exitAddress;
++ gctUINT32 exitBytes;
++ gctPHYS_ADDR waitLinkPhysical;
++ gctPOINTER waitLinkLogical;
++ gctUINT32 waitLinkAddress;
++ gctUINT32 waitLinkBytes;
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctUINT32 waitOffset;
++ gctUINT32 waitSize;
++
++#ifdef __QNXNTO__
++ gctPOINTER userCommandBufferLogical = gcvNULL;
++ gctBOOL userCommandBufferLogicalMapped = gcvFALSE;
++ gctPOINTER userCommandBufferLink = gcvNULL;
++ gctBOOL userCommandBufferLinkMapped = gcvFALSE;
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gctSIZE_T mmuConfigureBytes;
++ gctPOINTER mmuConfigureLogical = gcvNULL;
++ gctUINT32 mmuConfigureAddress;
++ gctPOINTER mmuConfigurePhysical = 0;
++ gctSIZE_T mmuConfigureWaitLinkOffset;
++ gckMMU mmu;
++ gctSIZE_T reservedBytes;
++ gctUINT32 oldValue;
++#endif
++
++#if gcdDUMP_COMMAND
++ gctPOINTER contextDumpLogical = gcvNULL;
++ gctSIZE_T contextDumpBytes = 0;
++ gctPOINTER bufferDumpLogical = gcvNULL;
++ gctSIZE_T bufferDumpBytes = 0;
++# endif
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ gctBOOL sequenceAcquired = gcvFALSE;
++#endif
++
++ gctPOINTER pointer = gcvNULL;
++
++#if gcdMULTI_GPU
++ gctSIZE_T chipEnableBytes;
++#endif
++
++ gcmkHEADER_ARG(
++ "Command=0x%x CommandBuffer=0x%x ProcessID=%d",
++ Command, CommandBuffer, ProcessID
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->kernel->hardware->type== gcvHARDWARE_2D)
++ {
++ /* There is no context for 2D. */
++ Context = gcvNULL;
++ }
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkONERROR(gckKERNEL_GetProcessMMU(Command->kernel, &mmu));
++
++ gcmkONERROR(gckOS_AtomicExchange(Command->os,
++ mmu->pageTableDirty[Command->kernel->core],
++ 0,
++ &oldValue));
++#else
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ if((Command->kernel->hardware->gpuProfiler) && (Command->kernel->profileEnable))
++ {
++ /* Acquire the context sequnence mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContextSeq, gcvINFINITE
++ ));
++ sequenceAcquired = gcvTRUE;
++ }
++#endif
++
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(Command, gcvFALSE));
++ commitEntered = gcvTRUE;
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ contextAcquired = gcvTRUE;
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Command->os, ProcessID, &needCopy));
++
++#if gcdNULL_DRIVER
++ /* Context switch required? */
++ if ((Context != gcvNULL) && (Command->currContext != Context))
++ {
++ /* Yes, merge in the deltas. */
++ gckCONTEXT_Update(Context, ProcessID, StateDelta);
++
++ /* Update the current context. */
++ Command->currContext = Context;
++ }
++#else
++ if (needCopy)
++ {
++ commandBufferObject = &_commandBufferObject;
++
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os,
++ commandBufferObject,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF)
++ ));
++
++ gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER);
++ }
++ else
++ {
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ &pointer
++ ));
++
++ commandBufferObject = pointer;
++
++ gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER);
++ commandBufferMapped = gcvTRUE;
++ }
++
++ /* Query the size of NOP command. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware, gcvNULL, &nopBytes
++ ));
++
++ /* Query the size of pipe select command sequence. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ hardware, gcvNULL, gcvPIPE_3D, &pipeBytes
++ ));
++
++ /* Query the size of LINK command. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware, gcvNULL, 0, 0, &linkBytes
++ ));
++
++#if gcdMULTI_GPU
++ /* Query the size of chip enable command sequence. */
++ gcmkONERROR(gckHARDWARE_ChipEnable(
++ hardware, gcvNULL, 0, &chipEnableBytes
++ ));
++#endif
++
++ /* Compute the command buffer entry and the size. */
++ commandBufferLogical
++ = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical)
++ + commandBufferObject->startOffset;
++
++ /* Get the hardware address. */
++ if (Command->kernel->virtualCommandBuffer)
++ {
++ gcmkONERROR(gckKERNEL_GetGPUAddress(
++ Command->kernel,
++ commandBufferLogical,
++ gcvTRUE,
++ &commandBufferAddress
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ hardware,
++ commandBufferLogical,
++ gcvTRUE,
++ &commandBufferAddress
++ ));
++ }
++
++ /* Get the physical address. */
++ gcmkONERROR(gckOS_UserLogicalToPhysical(
++ Command->os,
++ commandBufferLogical,
++ (gctUINT32_PTR)&commandBufferPhysical
++ ));
++
++#ifdef __QNXNTO__
++ userCommandBufferLogical = (gctPOINTER) commandBufferLogical;
++
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ userCommandBufferLogical,
++ 0,
++ &pointer));
++
++ commandBufferLogical = pointer;
++
++ userCommandBufferLogicalMapped = gcvTRUE;
++#endif
++
++ commandBufferSize
++ = commandBufferObject->offset
++ + Command->reservedTail
++ - commandBufferObject->startOffset;
++
++ gcmkONERROR(_FlushMMU(Command));
++
++ /* Get the current offset. */
++ offset = Command->offset;
++
++ /* Compute number of bytes left in current kernel command queue. */
++ bytes = Command->pageSize - offset;
++
++#if gcdMULTI_GPU
++ if (Command->kernel->core == gcvCORE_MAJOR)
++ {
++ commandBufferSize += chipEnableBytes;
++
++ gcmkONERROR(gckHARDWARE_ChipEnable(
++ hardware,
++ commandBufferLogical + pipeBytes,
++ ChipEnable,
++ &chipEnableBytes
++ ));
++
++ gcmkONERROR(gckHARDWARE_ChipEnable(
++ hardware,
++ commandBufferLogical + commandBufferSize - linkBytes - chipEnableBytes,
++ gcvCORE_3D_ALL_MASK,
++ &chipEnableBytes
++ ));
++ }
++ else
++ {
++ commandBufferSize += nopBytes;
++
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ commandBufferLogical + pipeBytes,
++ &nopBytes
++ ));
++
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ commandBufferLogical + commandBufferSize - linkBytes - nopBytes,
++ &nopBytes
++ ));
++ }
++#endif
++
++ /* Query the size of WAIT/LINK command sequence. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ gcvNULL,
++ offset,
++ &waitLinkBytes,
++ gcvNULL,
++ gcvNULL
++ ));
++
++ /* Is there enough space in the current command queue? */
++ if (bytes < waitLinkBytes)
++ {
++ /* No, create a new one. */
++ gcmkONERROR(_NewQueue(Command));
++
++ /* Get the new current offset. */
++ offset = Command->offset;
++
++ /* Recompute the number of bytes in the new kernel command queue. */
++ bytes = Command->pageSize - offset;
++ gcmkASSERT(bytes >= waitLinkBytes);
++ }
++
++ /* Compute the location if WAIT/LINK command sequence. */
++ waitLinkPhysical = (gctUINT8_PTR) Command->physical + offset;
++ waitLinkLogical = (gctUINT8_PTR) Command->logical + offset;
++ waitLinkAddress = Command->address + offset;
++
++ /* Context switch required? */
++ if (Context == gcvNULL)
++ {
++ /* See if we have to switch pipes for the command buffer. */
++ if (commandBufferObject->entryPipe == Command->pipeSelect)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the entry command buffer pipes
++ ** are different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) commandBufferPhysical + offset;
++#endif
++ entryLogical = commandBufferLogical + offset;
++ entryAddress = commandBufferAddress + offset;
++ entryBytes = commandBufferSize - offset;
++
++ Command->currContext = gcvNULL;
++ }
++ else if (Command->currContext != Context)
++ {
++ /* Temporary disable context length oprimization. */
++ Context->dirty = gcvTRUE;
++
++ /* Get the current context buffer. */
++ contextBuffer = Context->buffer;
++
++ /* Yes, merge in the deltas. */
++ gcmkONERROR(gckCONTEXT_Update(Context, ProcessID, StateDelta));
++
++ /* Determine context entry and exit points. */
++ if (0)
++ {
++ /* Reset 2D dirty flag. */
++ Context->dirty2D = gcvFALSE;
++
++ if (Context->dirty || commandBufferObject->using3D)
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 2D and 3D are used.
++ */
++
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryAddress = contextBuffer->address + pipeBytes;
++ entryBytes = Context->bufferSize - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryAddress = contextBuffer->address;
++ entryBytes = Context->bufferSize;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Ensure the NOP between 2D and 3D is in place so that the
++ execution falls through from 2D to 3D. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ contextBuffer->link2D,
++ &nopBytes
++ ));
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++
++ /* Mark context as not dirty. */
++ Context->dirty = gcvFALSE;
++ }
++ else
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 2D only command buffer.
++ */
++
++ /* Mark 3D as dirty. */
++ Context->dirty3D = gcvTRUE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryAddress = contextBuffer->address + pipeBytes;
++ entryBytes = Context->entryOffset3D - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryAddress = contextBuffer->address;
++ entryBytes = Context->entryOffset3D;
++ }
++
++ /* Store the current context buffer. */
++ Context->dirtyBuffer = contextBuffer;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_2D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* 3D is not used, generate a LINK from the end of 2D part of
++ the context buffer to the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link2D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++
++ /* Not using 2D. */
++ else
++ {
++
++ /* Store the current context buffer. */
++ Context->dirtyBuffer = contextBuffer;
++
++ if (Context->dirty || commandBufferObject->using3D)
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 3D only command buffer.
++ */
++
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Determine context buffer entry offset. */
++ offset = (Command->pipeSelect == gcvPIPE_3D)
++
++ /* Skip pipe switching sequence. */
++ ? Context->entryOffset3D + Context->pipeSelectBytes
++
++ /* Do not skip pipe switching sequence. */
++ : Context->entryOffset3D;
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + offset;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + offset;
++ entryAddress = contextBuffer->address + offset;
++ entryBytes = Context->bufferSize - offset;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: "XD" command buffer - neither 2D nor 3D.
++ */
++
++ /* Mark 3D as dirty. */
++ Context->dirty3D = gcvTRUE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_3D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical
++ = (gctUINT8_PTR) contextBuffer->physical
++ + Context->entryOffsetXDFrom3D;
++#endif
++ entryLogical
++ = (gctUINT8_PTR) contextBuffer->logical
++ + Context->entryOffsetXDFrom3D;
++
++ entryAddress
++ = contextBuffer->address
++ + Context->entryOffsetXDFrom3D;
++
++ entryBytes
++ = Context->bufferSize
++ - Context->entryOffsetXDFrom3D;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical
++ = (gctUINT8_PTR) contextBuffer->physical
++ + Context->entryOffsetXDFrom2D;
++#endif
++ entryLogical
++ = (gctUINT8_PTR) contextBuffer->logical
++ + Context->entryOffsetXDFrom2D;
++
++ entryAddress
++ = contextBuffer->address
++ + Context->entryOffsetXDFrom2D;
++
++ entryBytes
++ = Context->totalSize
++ - Context->entryOffsetXDFrom2D;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the context buffer cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)entryPhysical,
++ entryLogical,
++ entryBytes
++ ));
++#endif
++
++ /* Update the current context. */
++ Command->currContext = Context;
++
++#if gcdDUMP_COMMAND
++ contextDumpLogical = entryLogical;
++ contextDumpBytes = entryBytes;
++#endif
++
++#if gcdSECURITY
++ /* Commit context buffer to trust zone. */
++ gckKERNEL_SecurityExecute(
++ Command->kernel,
++ entryLogical,
++ entryBytes - 8
++ );
++#endif
++
++#if gcdRECORD_COMMAND
++ gckRECORDER_Record(
++ Command->recorder,
++ gcvNULL,
++ 0xFFFFFFFF,
++ entryLogical,
++ entryBytes - 8
++ );
++#endif
++ }
++
++ /* Same context. */
++ else
++ {
++ /* Determine context entry and exit points. */
++ if (commandBufferObject->using2D && Context->dirty2D)
++ {
++ /* Reset 2D dirty flag. */
++ Context->dirty2D = gcvFALSE;
++
++ /* Get the "dirty" context buffer. */
++ contextBuffer = Context->dirtyBuffer;
++
++ if (commandBufferObject->using3D && Context->dirty3D)
++ {
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryAddress = contextBuffer->address + pipeBytes;
++ entryBytes = Context->bufferSize - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryAddress = contextBuffer->address;
++ entryBytes = Context->bufferSize;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Ensure the NOP between 2D and 3D is in place so that the
++ execution falls through from 2D to 3D. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ contextBuffer->link2D,
++ &nopBytes
++ ));
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryAddress = contextBuffer->address + pipeBytes;
++ entryBytes = Context->entryOffset3D - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryAddress = contextBuffer->address;
++ entryBytes = Context->entryOffset3D;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_2D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* 3D is not used, generate a LINK from the end of 2D part of
++ the context buffer to the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link2D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++ else
++ {
++ if (commandBufferObject->using3D && Context->dirty3D)
++ {
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Get the "dirty" context buffer. */
++ contextBuffer = Context->dirtyBuffer;
++
++ /* Determine context buffer entry offset. */
++ offset = (Command->pipeSelect == gcvPIPE_3D)
++
++ /* Skip pipe switching sequence. */
++ ? Context->entryOffset3D + pipeBytes
++
++ /* Do not skip pipe switching sequence. */
++ : Context->entryOffset3D;
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + offset;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + offset;
++ entryAddress = contextBuffer->address + offset;
++ entryBytes = Context->bufferSize - offset;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferAddress + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /* See if we have to switch pipes for the command buffer. */
++ if (commandBufferObject->entryPipe == Command->pipeSelect)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the entry command buffer pipes
++ ** are different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) commandBufferPhysical + offset;
++#endif
++ entryLogical = commandBufferLogical + offset;
++ entryAddress = commandBufferAddress + offset;
++ entryBytes = commandBufferSize - offset;
++ }
++ }
++ }
++
++#if gcdDUMP_COMMAND
++ bufferDumpLogical = commandBufferLogical + offset;
++ bufferDumpBytes = commandBufferSize - offset;
++#endif
++
++#if gcdSECURE_USER
++ /* Process user hints. */
++ gcmkONERROR(_ProcessHints(Command, ProcessID, commandBufferObject));
++#endif
++
++ /* Determine the location to jump to for the command buffer being
++ ** scheduled. */
++ if (Command->newQueue)
++ {
++ /* New command queue, jump to the beginning of it. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ exitPhysical = Command->physical;
++#endif
++
++ exitLogical = Command->logical;
++ exitAddress = Command->address;
++ exitBytes = Command->offset + waitLinkBytes;
++ }
++ else
++ {
++ /* Still within the preexisting command queue, jump to the new
++ WAIT/LINK command sequence. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ exitPhysical = waitLinkPhysical;
++#endif
++ exitLogical = waitLinkLogical;
++ exitAddress = waitLinkAddress;
++ exitBytes = waitLinkBytes;
++ }
++
++ /* Add a new WAIT/LINK command sequence. When the command buffer which is
++ currently being scheduled is fully executed by the GPU, the FE will
++ jump to this WAIT/LINK sequence. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ waitLinkLogical,
++ offset,
++ &waitLinkBytes,
++ &waitOffset,
++ &waitSize
++ ));
++
++ /* Compute the location if WAIT command. */
++ waitPhysical = (gctUINT8_PTR) waitLinkPhysical + waitOffset;
++ waitLogical = (gctUINT8_PTR) waitLinkLogical + waitOffset;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the command queue cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)exitPhysical,
++ exitLogical,
++ exitBytes
++ ));
++#endif
++
++ /* Determine the location of the LINK command in the command buffer. */
++ commandBufferLink
++ = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical)
++ + commandBufferObject->offset;
++
++#ifdef __QNXNTO__
++ userCommandBufferLink = (gctPOINTER) commandBufferLink;
++
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ userCommandBufferLink,
++ 0,
++ &pointer));
++
++ commandBufferLink = pointer;
++
++ userCommandBufferLinkMapped = gcvTRUE;
++#endif
++
++#if gcdMULTI_GPU
++ if (Command->kernel->core == gcvCORE_MAJOR)
++ {
++ commandBufferLink += chipEnableBytes;
++ }
++ else
++ {
++ commandBufferLink += nopBytes;
++ }
++#endif
++
++ /* Generate a LINK from the end of the command buffer being scheduled
++ back to the kernel command queue. */
++#if !gcdSECURITY
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ commandBufferLink,
++ exitAddress,
++ exitBytes,
++ &linkBytes
++ ));
++#endif
++
++#ifdef __QNXNTO__
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os,
++ userCommandBufferLink,
++ 0,
++ commandBufferLink));
++
++ userCommandBufferLinkMapped = gcvFALSE;
++#endif
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the command buffer cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ ProcessID,
++ gcvNULL,
++ (gctUINT32)commandBufferPhysical,
++ commandBufferLogical,
++ commandBufferSize
++ ));
++#endif
++
++#if gcdRECORD_COMMAND
++ gckRECORDER_Record(
++ Command->recorder,
++ commandBufferLogical + offset,
++ commandBufferSize - offset - 8,
++ gcvNULL,
++ 0xFFFFFFFF
++ );
++
++ gckRECORDER_AdvanceIndex(Command->recorder, Command->commitStamp);
++
++ Command->commitStamp++;
++#endif
++
++#if gcdSECURITY
++ /* Submit command buffer to trust zone. */
++ gckKERNEL_SecurityExecute(
++ Command->kernel,
++ commandBufferLogical + offset,
++ commandBufferSize - offset - 8
++ );
++#else
++ /* Generate a LINK from the previous WAIT/LINK command sequence to the
++ entry determined above (either the context or the command buffer).
++ This LINK replaces the WAIT instruction from the previous WAIT/LINK
++ pair, therefore we use WAIT metrics for generation of this LINK.
++ This action will execute the entire sequence. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ Command->waitLogical,
++ entryAddress,
++ entryBytes,
++ &Command->waitSize
++ ));
++#endif
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ Command->waitLogical,
++ Command->waitSize,
++ gceDUMP_BUFFER_LINK,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ contextDumpLogical,
++ contextDumpBytes,
++ gceDUMP_BUFFER_CONTEXT,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ bufferDumpLogical,
++ bufferDumpBytes,
++ gceDUMP_BUFFER_USER,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ waitLinkLogical,
++ waitLinkBytes,
++ gceDUMP_BUFFER_WAITLINK,
++ gcvFALSE
++ );
++
++ /* Update the current pipe. */
++ Command->pipeSelect = commandBufferObject->exitPipe;
++
++ /* Update command queue offset. */
++ Command->offset += waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Update address of last WAIT. */
++ Command->waitPhysical = waitPhysical;
++ Command->waitLogical = waitLogical;
++ Command->waitSize = waitSize;
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ hardware, Command->logical, Command->offset
++ ));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.commit]");
++#endif
++#endif /* gcdNULL_DRIVER */
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ contextAcquired = gcvFALSE;
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(Command, gcvFALSE));
++ commitEntered = gcvFALSE;
++
++#if VIVANTE_PROFILER_CONTEXT
++ if(sequenceAcquired)
++ {
++#if gcdMULTI_GPU
++ gcmkONERROR(gckCOMMAND_Stall(Command, gcvTRUE, ChipEnable));
++#else
++ gcmkONERROR(gckCOMMAND_Stall(Command, gcvTRUE));
++#endif
++ if (Command->currContext)
++ {
++ gcmkONERROR(gckHARDWARE_UpdateContextProfile(
++ hardware,
++ Command->currContext));
++ }
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContextSeq));
++ sequenceAcquired = gcvFALSE;
++ }
++#endif
++
++ /* Loop while there are records in the queue. */
++ while (EventQueue != gcvNULL)
++ {
++ if (needCopy)
++ {
++ /* Point to stack record. */
++ eventRecord = &_eventRecord;
++
++ /* Copy the data from the client. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os, eventRecord, EventQueue, gcmSIZEOF(gcsQUEUE)
++ ));
++ }
++ else
++ {
++ /* Map record into kernel memory. */
++ gcmkONERROR(gckOS_MapUserPointer(Command->os,
++ EventQueue,
++ gcmSIZEOF(gcsQUEUE),
++ &pointer));
++
++ eventRecord = pointer;
++ }
++
++ /* Append event record to event queue. */
++ gcmkONERROR(gckEVENT_AddList(
++ Command->kernel->eventObj, &eventRecord->iface, gcvKERNEL_PIXEL, gcvTRUE, gcvFALSE
++ ));
++
++ /* Next record in the queue. */
++ nextEventRecord = gcmUINT64_TO_PTR(eventRecord->next);
++
++ if (!needCopy)
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os, EventQueue, gcmSIZEOF(gcsQUEUE), (gctPOINTER *) eventRecord
++ ));
++
++ eventRecord = gcvNULL;
++ }
++
++ EventQueue = nextEventRecord;
++ }
++
++ if (Command->kernel->eventObj->queueHead == gcvNULL
++ && Command->kernel->hardware->powerManagement == gcvTRUE
++ )
++ {
++ /* Commit done event by which work thread knows all jobs done. */
++ gcmkVERIFY_OK(
++ gckEVENT_CommitDone(Command->kernel->eventObj, gcvKERNEL_PIXEL));
++ }
++
++ /* Submit events. */
++#if gcdMULTI_GPU
++ status = gckEVENT_Submit(Command->kernel->eventObj, gcvTRUE, gcvFALSE, ChipEnable);
++#else
++ status = gckEVENT_Submit(Command->kernel->eventObj, gcvTRUE, gcvFALSE);
++#endif
++ if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkTRACE(
++ gcvLEVEL_INFO,
++ "%s(%d): Intterupted in gckEVENT_Submit",
++ __FUNCTION__, __LINE__
++ );
++ status = gcvSTATUS_OK;
++ }
++ else
++ {
++ gcmkONERROR(status);
++ }
++
++#ifdef __QNXNTO__
++ if (userCommandBufferLogicalMapped)
++ {
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os,
++ userCommandBufferLogical,
++ 0,
++ commandBufferLogical));
++
++ userCommandBufferLogicalMapped = gcvFALSE;
++ }
++#endif
++
++ /* Unmap the command buffer pointer. */
++ if (commandBufferMapped)
++ {
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ commandBufferObject
++ ));
++
++ commandBufferMapped = gcvFALSE;
++ }
++
++ /* Return status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if ((eventRecord != gcvNULL) && !needCopy)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ EventQueue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) eventRecord
++ ));
++ }
++
++ if (contextAcquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(Command, gcvFALSE));
++ }
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (sequenceAcquired)
++ {
++ /* Release the context sequence mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContextSeq));
++ }
++#endif
++
++#ifdef __QNXNTO__
++ if (userCommandBufferLinkMapped)
++ {
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os,
++ userCommandBufferLink,
++ 0,
++ commandBufferLink));
++ }
++
++ if (userCommandBufferLogicalMapped)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ userCommandBufferLogical,
++ 0,
++ commandBufferLogical));
++ }
++#endif
++
++ /* Unmap the command buffer pointer. */
++ if (commandBufferMapped)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ commandBufferObject
++ ));
++ }
++
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Reserve
++**
++** Reserve space in the command queue. Also acquire the command queue mutex.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctSIZE_T RequestedBytes
++** Number of bytes previously reserved.
++**
++** OUTPUT:
++**
++** gctPOINTER * Buffer
++** Pointer to a variable that will receive the address of the reserved
++** space.
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable that will receive the number of bytes
++** available in the command queue.
++*/
++gceSTATUS
++gckCOMMAND_Reserve(
++ IN gckCOMMAND Command,
++ IN gctUINT32 RequestedBytes,
++ OUT gctPOINTER * Buffer,
++ OUT gctUINT32 * BufferSize
++ )
++{
++ gceSTATUS status;
++ gctUINT32 bytes;
++ gctUINT32 requiredBytes;
++ gctUINT32 requestedAligned;
++
++ gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu", Command, RequestedBytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Compute aligned number of reuested bytes. */
++ requestedAligned = gcmALIGN(RequestedBytes, Command->alignment);
++
++ /* Another WAIT/LINK command sequence will have to be appended after
++ the requested area being reserved. Compute the number of bytes
++ required for WAIT/LINK at the location after the reserved area. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ Command->kernel->hardware,
++ gcvNULL,
++ Command->offset + requestedAligned,
++ &requiredBytes,
++ gcvNULL,
++ gcvNULL
++ ));
++
++ /* Compute total number of bytes required. */
++ requiredBytes += requestedAligned;
++
++ /* Compute number of bytes available in command queue. */
++ bytes = Command->pageSize - Command->offset;
++
++ /* Is there enough space in the current command queue? */
++ if (bytes < requiredBytes)
++ {
++ /* Create a new command queue. */
++ gcmkONERROR(_NewQueue(Command));
++
++ /* Recompute the number of bytes in the new kernel command queue. */
++ bytes = Command->pageSize - Command->offset;
++
++ /* Still not enough space? */
++ if (bytes < requiredBytes)
++ {
++ /* Rare case, not enough room in command queue. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++ }
++
++ /* Return pointer to empty slot command queue. */
++ *Buffer = (gctUINT8 *) Command->logical + Command->offset;
++
++ /* Return number of bytes left in command queue. */
++ *BufferSize = bytes;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Buffer=0x%x *BufferSize=%lu", *Buffer, *BufferSize);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Execute
++**
++** Execute a previously reserved command queue by appending a WAIT/LINK command
++** sequence after it and modifying the last WAIT into a LINK command. The
++** command FIFO mutex will be released whether this function succeeds or not.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctSIZE_T RequestedBytes
++** Number of bytes previously reserved.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Execute(
++ IN gckCOMMAND Command,
++ IN gctUINT32 RequestedBytes
++ )
++{
++ gceSTATUS status;
++
++ gctPHYS_ADDR waitLinkPhysical;
++ gctUINT8_PTR waitLinkLogical;
++ gctUINT32 waitLinkOffset;
++ gctUINT32 waitLinkBytes;
++
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctUINT32 waitOffset;
++ gctUINT32 waitBytes;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR execPhysical;
++#endif
++ gctPOINTER execLogical;
++ gctUINT32 execAddress;
++ gctUINT32 execBytes;
++
++ gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu", Command, RequestedBytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Compute offset for WAIT/LINK. */
++ waitLinkOffset = Command->offset + RequestedBytes;
++
++ /* Compute number of bytes left in command queue. */
++ waitLinkBytes = Command->pageSize - waitLinkOffset;
++
++ /* Compute the location if WAIT/LINK command sequence. */
++ waitLinkPhysical = (gctUINT8_PTR) Command->physical + waitLinkOffset;
++ waitLinkLogical = (gctUINT8_PTR) Command->logical + waitLinkOffset;
++
++ /* Append WAIT/LINK in command queue. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ Command->kernel->hardware,
++ waitLinkLogical,
++ waitLinkOffset,
++ &waitLinkBytes,
++ &waitOffset,
++ &waitBytes
++ ));
++
++ /* Compute the location if WAIT command. */
++ waitPhysical = (gctUINT8_PTR) waitLinkPhysical + waitOffset;
++ waitLogical = waitLinkLogical + waitOffset;
++
++ /* Determine the location to jump to for the command buffer being
++ ** scheduled. */
++ if (Command->newQueue)
++ {
++ /* New command queue, jump to the beginning of it. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ execPhysical = Command->physical;
++#endif
++ execLogical = Command->logical;
++ execAddress = Command->address;
++ execBytes = waitLinkOffset + waitLinkBytes;
++ }
++ else
++ {
++ /* Still within the preexisting command queue, jump directly to the
++ reserved area. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ execPhysical = (gctUINT8 *) Command->physical + Command->offset;
++#endif
++ execLogical = (gctUINT8 *) Command->logical + Command->offset;
++ execAddress = Command->address + Command->offset;
++ execBytes = RequestedBytes + waitLinkBytes;
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)execPhysical,
++ execLogical,
++ execBytes
++ ));
++#endif
++
++ /* Convert the last WAIT into a LINK. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Command->kernel->hardware,
++ Command->waitLogical,
++ execAddress,
++ execBytes,
++ &Command->waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ (gctUINT32)Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ Command->waitLogical,
++ Command->waitSize,
++ gceDUMP_BUFFER_LINK,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ execLogical,
++ execBytes,
++ gceDUMP_BUFFER_KERNEL,
++ gcvFALSE
++ );
++
++ /* Update the pointer to the last WAIT. */
++ Command->waitPhysical = waitPhysical;
++ Command->waitLogical = waitLogical;
++ Command->waitSize = waitBytes;
++
++ /* Update the command queue. */
++ Command->offset += RequestedBytes + waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ Command->kernel->hardware, Command->logical, Command->offset
++ ));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.execute]");
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Stall
++**
++** The calling thread will be suspended until the command queue has been
++** completed.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++#if gcdMULTI_GPU
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower,
++ IN gceCORE_3D_MASK ChipEnable
++ )
++#else
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++#endif
++{
++#if gcdNULL_DRIVER
++ /* Do nothing with infinite hardware. */
++ return gcvSTATUS_OK;
++#else
++ gckOS os;
++ gckHARDWARE hardware;
++ gckEVENT eventObject;
++ gceSTATUS status;
++ gctSIGNAL signal = gcvNULL;
++ gctUINT timer = 0;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Extract the gckOS object pointer. */
++ os = Command->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckHARDWARE object pointer. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Extract the gckEVENT object pointer. */
++ eventObject = Command->kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObject, gcvOBJ_EVENT);
++
++ /* Allocate the signal. */
++ gcmkONERROR(gckOS_CreateSignal(os, gcvTRUE, &signal));
++
++ /* Append the EVENT command to trigger the signal. */
++ gcmkONERROR(gckEVENT_Signal(eventObject, signal, gcvKERNEL_PIXEL));
++
++ /* Submit the event queue. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckEVENT_Submit(eventObject, gcvTRUE, FromPower, ChipEnable));
++#else
++ gcmkONERROR(gckEVENT_Submit(eventObject, gcvTRUE, FromPower));
++#endif
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.stall]");
++#endif
++
++ if (status == gcvSTATUS_CHIP_NOT_READY)
++ {
++ /* Error. */
++ goto OnError;
++ }
++
++ do
++ {
++ /* Wait for the signal. */
++ status = gckOS_WaitSignal(os, signal, gcdGPU_ADVANCETIMER);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT32 idle;
++
++ /* Read idle register. */
++ gcmkVERIFY_OK(gckHARDWARE_GetIdle(
++ hardware, gcvFALSE, &idle
++ ));
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): idle=%08x",
++ __FUNCTION__, __LINE__, idle
++ );
++
++ gcmkVERIFY_OK(gckOS_MemoryBarrier(os, gcvNULL));
++#endif
++
++ /* Advance timer. */
++ timer += gcdGPU_ADVANCETIMER;
++ }
++ else if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkONERROR(gcvSTATUS_INTERRUPTED);
++ }
++
++ }
++ while (gcmIS_ERROR(status));
++
++ /* Bail out on timeout. */
++ if (gcmIS_ERROR(status))
++ {
++ /* Broadcast the stuck GPU. */
++ gcmkONERROR(gckOS_Broadcast(
++ os, hardware, gcvBROADCAST_GPU_STUCK
++ ));
++ }
++
++ /* Delete the signal. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(os, signal));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (signal != gcvNULL)
++ {
++ /* Free the signal. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(os, signal));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Attach
++**
++** Attach user process.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** OUTPUT:
++**
++** gckCONTEXT * Context
++** Pointer to a variable that will receive a pointer to a new
++** gckCONTEXT object.
++**
++** gctSIZE_T * StateCount
++** Pointer to a variable that will receive the number of states
++** in the context buffer.
++*/
++#if (gcdENABLE_3D || gcdENABLE_2D)
++gceSTATUS
++gckCOMMAND_Attach(
++ IN gckCOMMAND Command,
++ OUT gckCONTEXT * Context,
++ OUT gctSIZE_T * StateCount,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ acquired = gcvTRUE;
++
++ /* Construct a gckCONTEXT object. */
++ gcmkONERROR(gckCONTEXT_Construct(
++ Command->os,
++ Command->kernel->hardware,
++ ProcessID,
++ Context
++ ));
++
++ /* Return the number of states in the context. */
++ * StateCount = (* Context)->stateCount;
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Context=0x%x", *Context);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release mutex. */
++ if (acquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckCOMMAND_Detach
++**
++** Detach user process.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gckCONTEXT Context
++** Pointer to a gckCONTEXT object to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Detach(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x Context=0x%x", Command, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ acquired = gcvTRUE;
++
++ /* Construct a gckCONTEXT object. */
++ gcmkONERROR(gckCONTEXT_Destroy(Context));
++
++ if (Command->currContext == Context)
++ {
++ /* Detach from gckCOMMAND object if the destoryed context is current context. */
++ Command->currContext = gcvNULL;
++ }
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release mutex. */
++ if (acquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_DumpExecutingBuffer
++**
++** Dump the command buffer which GPU is executing.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_DumpExecutingBuffer(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctUINT32 gpuAddress;
++ gctSIZE_T pageCount;
++ gctPOINTER entry;
++ gckOS os = Command->os;
++ gckKERNEL kernel = Command->kernel;
++ gctINT pid;
++ gctUINT32 i, rear;
++ gctUINT32 start, end;
++ gctUINT32 dumpFront, dumpRear;
++ gckLINKQUEUE queue = &kernel->hardware->linkQueue;
++ gckLINKQUEUE queueMirror;
++ gctUINT32 bytes;
++ gckLINKDATA linkData;
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("**** COMMAND BUF DUMP ****\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(os, kernel->core, 0x664, &gpuAddress));
++
++ gcmkPRINT("DMA Address 0x%08X", gpuAddress);
++
++ if (Command->kernel->stuckDump > gcdSTUCK_DUMP_MIDDLE)
++ {
++ gcmkPRINT("Dump Level is %d", Command->kernel->stuckDump);
++
++ /* Duplicate queue because it will be changed.*/
++ gcmkONERROR(gckOS_AllocateMemory(os,
++ sizeof(struct _gckLINKQUEUE),
++ (gctPOINTER *)&queueMirror));
++
++ gckOS_MemCopy(queueMirror,
++ queue,
++ sizeof(struct _gckLINKQUEUE));
++
++ /* If kernel command buffer link to a context buffer, then link to a user command
++ ** buffer, the second link will be in queue first, so we must fix this.
++ ** In Queue: C1 U1 U2 C2 U3 U4 U5 C3
++ ** Real: C1 X1 U1 C2 U2 U3 U4 C3 U5
++ ** Command buffer X1 which is after C1 is out of queue, so C1 is meaningless.
++ */
++ for (i = 0; i < gcdLINK_QUEUE_SIZE; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, i, &linkData);
++
++ status = gckKERNEL_QueryGPUAddress(kernel, linkData->start, &buffer);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Can't find it in virtual command buffer list, ignore it. */
++ continue;
++ }
++
++ if (buffer->kernelLogical)
++ {
++ /* It is a context buffer. */
++ if (i == 0)
++ {
++ /* The real command buffer is out, so clear this slot. */
++ linkData->start = 0;
++ linkData->end = 0;
++ linkData->pid = 0;
++ }
++ else
++ {
++ /* switch context buffer and command buffer. */
++ struct _gckLINKDATA tmp = *linkData;
++ gckLINKDATA linkDataPrevious;
++
++ gckLINKQUEUE_GetData(queueMirror, i - 1, &linkDataPrevious);
++ *linkData = *linkDataPrevious;
++ *linkDataPrevious = tmp;
++ }
++ }
++ }
++
++ /* Clear search result. */
++ dumpFront = dumpRear = gcvINFINITE;
++
++ gcmkPRINT("Link Stack:");
++
++ /* Search stuck address in link queue from rear. */
++ rear = gcdLINK_QUEUE_SIZE - 1;
++ for (i = 0; i < gcdLINK_QUEUE_SIZE; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, rear, &linkData);
++
++ start = linkData->start;
++ end = linkData->end;
++ pid = linkData->pid;
++
++ if (gpuAddress >= start && gpuAddress < end)
++ {
++ /* Find latest matched command buffer. */
++ gcmkPRINT(" %d, [%08X - %08X]", pid, start, end);
++
++ /* Initiliaze dump information. */
++ dumpFront = dumpRear = rear;
++ }
++
++ /* Advance to previous one. */
++ rear--;
++
++ if (dumpFront != gcvINFINITE)
++ {
++ break;
++ }
++ }
++
++ if (dumpFront == gcvINFINITE)
++ {
++ /* Can't find matched record in link queue, dump kernel command buffer. */
++ _DumpKernelCommandBuffer(Command);
++
++ /* Free local copy. */
++ gcmkOS_SAFE_FREE(os, queueMirror);
++ return gcvSTATUS_OK;
++ }
++
++ /* Search the last context buffer linked. */
++ while (rear > 0)
++ {
++ gckLINKQUEUE_GetData(queueMirror, rear, &linkData);
++
++ gcmkPRINT(" %d, [%08X - %08X]",
++ linkData->pid,
++ linkData->start,
++ linkData->end);
++
++ status = gckKERNEL_QueryGPUAddress(kernel, linkData->start, &buffer);
++
++ if (gcmIS_SUCCESS(status) && buffer->kernelLogical)
++ {
++ /* Find a context buffer. */
++ dumpFront = rear;
++ break;
++ }
++
++ rear--;
++ }
++
++ if (dumpFront == dumpRear)
++ {
++ /* No context buffer is found, dump all we got.*/
++ dumpFront = 0;
++ }
++
++ /* Dump from last context buffer to last command buffer where hang happens. */
++ for (i = dumpFront; i <= dumpRear; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, i, &linkData);
++
++ /* Get gpu address of this command buffer. */
++ gpuAddress = linkData->start;
++ bytes = linkData->end - gpuAddress;
++
++ /* Get the whole buffer. */
++ status = gckKERNEL_QueryGPUAddress(kernel, gpuAddress, &buffer);
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkPRINT("Buffer [%08X - %08X] is lost or not belong to current process",
++ linkData->start,
++ linkData->end);
++ continue;
++ }
++
++ /* Get kernel logical for dump. */
++ if (buffer->kernelLogical)
++ {
++ /* Get kernel logical directly if it is a context buffer. */
++ entry = buffer->kernelLogical;
++ gcmkPRINT("Context Buffer:");
++ }
++ else
++ {
++ /* Make it accessiable by kernel if it is a user command buffer. */
++ gcmkVERIFY_OK(
++ gckOS_CreateKernelVirtualMapping(os,
++ buffer->physical,
++ buffer->bytes,
++ &entry,
++ &pageCount));
++ gcmkPRINT("User Command Buffer:");
++ }
++
++ /* Dump from the entry. */
++ _DumpBuffer((gctUINT8_PTR)entry + (gpuAddress - buffer->gpuAddress), gpuAddress, bytes);
++
++ /* Release kernel logical address if neccessary. */
++ if (!buffer->kernelLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DestroyKernelVirtualMapping(os,
++ buffer->physical,
++ buffer->bytes,
++ entry));
++ }
++ }
++
++ /* Free local copy. */
++ gcmkOS_SAFE_FREE(os, queueMirror);
++ return gcvSTATUS_OK;
++ OnError:
++ return status;
++ }
++ else
++ {
++ gcmkPRINT("Dump Level is %d, dump memory near the stuck address",
++ Command->kernel->stuckDump);
++
++ /* Without link queue information, we don't know the entry of last command
++ ** buffer, just dump the page where GPU stuck. */
++ status = gckKERNEL_QueryGPUAddress(kernel, gpuAddress, &buffer);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkVERIFY_OK(
++ gckOS_CreateKernelVirtualMapping(os,
++ buffer->physical,
++ buffer->bytes,
++ &entry,
++ &pageCount));
++
++ if (entry)
++ {
++ gctUINT32 offset = gpuAddress - buffer->gpuAddress;
++ gctPOINTER entryDump = entry;
++
++ /* Dump one pages. */
++ gctUINT32 bytes = 4096;
++
++ /* Align to page. */
++ offset &= 0xfffff000;
++
++ /* Kernel address of page where stall point stay. */
++ entryDump = (gctUINT8_PTR)entryDump + offset;
++
++ /* Align to page. */
++ gpuAddress &= 0xfffff000;
++
++ gcmkPRINT("User Command Buffer:\n");
++ _DumpBuffer(entryDump, gpuAddress, bytes);
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_DestroyKernelVirtualMapping(os,
++ buffer->physical,
++ buffer->bytes,
++ entry));
++ }
++ else
++ {
++ _DumpKernelCommandBuffer(Command);
++ }
++
++ return gcvSTATUS_OK;
++ }
++}
++
++gceSTATUS
++gckCOMMAND_AddressInKernelCommandBuffer(
++ IN gckCOMMAND Command,
++ IN gctUINT32 Address,
++ OUT gctBOOL *In
++ )
++{
++ gctBOOL in = gcvFALSE;
++ gctINT i;
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; i++)
++ {
++ if ((Address >= Command->queues[i].address)
++ && (Address < (Command->queues[i].address + Command->pageSize))
++ )
++ {
++ in = gcvTRUE;
++ break;
++ }
++ }
++
++ *In = in;
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command_vg.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command_vg.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_command_vg.c 2015-05-01 14:57:59.571427001 -0500
+@@ -0,0 +1,3787 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++*********************************** Debugging **********************************
++\******************************************************************************/
++
++#define gcvDISABLE_TIMEOUT 1
++#define gcvDUMP_COMMAND_BUFFER 0
++#define gcvDUMP_COMMAND_LINES 0
++
++
++#if gcvDEBUG || defined(EMULATOR) || gcvDISABLE_TIMEOUT
++# define gcvQUEUE_TIMEOUT ~0
++#else
++# define gcvQUEUE_TIMEOUT 10
++#endif
++
++
++/******************************************************************************\
++********************************** Definitions *********************************
++\******************************************************************************/
++
++/* Minimum buffer size. */
++#define gcvMINUMUM_BUFFER \
++ gcmSIZEOF(gcsKERNEL_QUEUE_HEADER) + \
++ gcmSIZEOF(gcsKERNEL_CMDQUEUE) * 2
++
++#define gcmDECLARE_INTERRUPT_HANDLER(Block, Number) \
++ static gceSTATUS \
++ _EventHandler_##Block##_##Number( \
++ IN gckVGKERNEL Kernel \
++ )
++
++#define gcmDEFINE_INTERRUPT_HANDLER(Block, Number) \
++ gcmDECLARE_INTERRUPT_HANDLER(Block, Number) \
++ { \
++ return _EventHandler_Block( \
++ Kernel, \
++ &Kernel->command->taskTable[gcvBLOCK_##Block], \
++ gcvFALSE \
++ ); \
++ }
++
++#define gcmDEFINE_INTERRUPT_HANDLER_ENTRY(Block, Number) \
++ { gcvBLOCK_##Block, _EventHandler_##Block##_##Number }
++
++/* Block interrupt handling table entry. */
++typedef struct _gcsBLOCK_INTERRUPT_HANDLER * gcsBLOCK_INTERRUPT_HANDLER_PTR;
++typedef struct _gcsBLOCK_INTERRUPT_HANDLER
++{
++ gceBLOCK block;
++ gctINTERRUPT_HANDLER handler;
++}
++gcsBLOCK_INTERRUPT_HANDLER;
++
++/* Queue control functions. */
++typedef struct _gcsQUEUE_UPDATE_CONTROL * gcsQUEUE_UPDATE_CONTROL_PTR;
++typedef struct _gcsQUEUE_UPDATE_CONTROL
++{
++ gctOBJECT_HANDLER execute;
++ gctOBJECT_HANDLER update;
++ gctOBJECT_HANDLER lastExecute;
++ gctOBJECT_HANDLER lastUpdate;
++}
++gcsQUEUE_UPDATE_CONTROL;
++
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_FlushMMU(
++ IN gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctUINT32 oldValue;
++ gckVGHARDWARE hardware = Command->hardware;
++
++ gcmkONERROR(gckOS_AtomicExchange(Command->os,
++ hardware->pageTableDirty,
++ 0,
++ &oldValue));
++
++ if (oldValue)
++ {
++ /* Page Table is upated, flush mmu before commit. */
++ gcmkONERROR(gckVGHARDWARE_FlushMMU(hardware));
++ }
++
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++}
++
++static gceSTATUS
++_WaitForIdle(
++ IN gckVGCOMMAND Command,
++ IN gcsKERNEL_QUEUE_HEADER_PTR Queue
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctUINT32 idle;
++ gctUINT timeout = 0;
++
++ /* Loop while not idle. */
++ while (Queue->pending)
++ {
++ /* Did we reach the timeout limit? */
++ if (timeout == gcvQUEUE_TIMEOUT)
++ {
++ /* Hardware is probably dead... */
++ return gcvSTATUS_TIMEOUT;
++ }
++
++ /* Sleep for 100ms. */
++ gcmkERR_BREAK(gckOS_Delay(Command->os, 100));
++
++ /* Not the first loop? */
++ if (timeout > 0)
++ {
++ /* Read IDLE register. */
++ gcmkVERIFY_OK(gckVGHARDWARE_GetIdle(Command->hardware, &idle));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_COMMAND,
++ "%s: timeout, IDLE=%08X\n",
++ __FUNCTION__, idle
++ );
++ }
++
++ /* Increment the timeout counter. */
++ timeout += 1;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gctINT32
++_GetNextInterrupt(
++ IN gckVGCOMMAND Command,
++ IN gceBLOCK Block
++ )
++{
++ gctUINT index;
++ gcsBLOCK_TASK_ENTRY_PTR entry;
++ gctINT32 interrupt;
++
++ /* Get the block entry. */
++ entry = &Command->taskTable[Block];
++
++ /* Make sure we have initialized interrupts. */
++ gcmkASSERT(entry->interruptCount > 0);
++
++ /* Decrement the interrupt usage semaphore. */
++ gcmkVERIFY_OK(gckOS_DecrementSemaphore(
++ Command->os, entry->interruptSemaphore
++ ));
++
++ /* Get the value index. */
++ index = entry->interruptIndex;
++
++ /* Get the interrupt value. */
++ interrupt = entry->interruptArray[index];
++
++ /* Must be a valid value. */
++ gcmkASSERT((interrupt >= 0) && (interrupt <= 31));
++
++ /* Advance the index to the next value. */
++ index += 1;
++
++ /* Set the new index. */
++ entry->interruptIndex = (index == entry->interruptCount)
++ ? 0
++ : index;
++
++ /* Return interrupt value. */
++ return interrupt;
++}
++
++
++/******************************************************************************\
++***************************** Task Storage Management **************************
++\******************************************************************************/
++
++/* Minimum task buffer size. */
++#define gcvMIN_TASK_BUFFER \
++( \
++ gcmSIZEOF(gcsTASK_CONTAINER) + 128 \
++)
++
++/* Free list terminator. */
++#define gcvFREE_TASK_TERMINATOR \
++( \
++ (gcsTASK_CONTAINER_PTR) gcmINT2PTR(~0) \
++)
++
++
++/*----------------------------------------------------------------------------*/
++/*------------------- Allocated Task Buffer List Management ------------------*/
++
++static void
++_InsertTaskBuffer(
++ IN gcsTASK_CONTAINER_PTR AddAfter,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR addBefore;
++
++ /* Cannot add before the first buffer. */
++ gcmkASSERT(AddAfter != gcvNULL);
++
++ /* Create a shortcut to the next buffer. */
++ addBefore = AddAfter->allocNext;
++
++ /* Initialize the links. */
++ Buffer->allocPrev = AddAfter;
++ Buffer->allocNext = addBefore;
++
++ /* Link to the previous buffer. */
++ AddAfter->allocNext = Buffer;
++
++ /* Link to the next buffer. */
++ if (addBefore != gcvNULL)
++ {
++ addBefore->allocPrev = Buffer;
++ }
++}
++
++static void
++_RemoveTaskBuffer(
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR prev;
++ gcsTASK_CONTAINER_PTR next;
++
++ /* Cannot remove the first buffer. */
++ gcmkASSERT(Buffer->allocPrev != gcvNULL);
++
++ /* Create shortcuts to the previous and next buffers. */
++ prev = Buffer->allocPrev;
++ next = Buffer->allocNext;
++
++ /* Tail buffer? */
++ if (next == gcvNULL)
++ {
++ /* Remove from the list. */
++ prev->allocNext = gcvNULL;
++ }
++
++ /* Buffer from the middle. */
++ else
++ {
++ prev->allocNext = next;
++ next->allocPrev = prev;
++ }
++}
++
++
++/*----------------------------------------------------------------------------*/
++/*--------------------- Free Task Buffer List Management ---------------------*/
++
++static void
++_AppendToFreeList(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ /* Cannot be a part of the free list already. */
++ gcmkASSERT(Buffer->freePrev == gcvNULL);
++ gcmkASSERT(Buffer->freeNext == gcvNULL);
++
++ /* First buffer to add? */
++ if (Command->taskFreeHead == gcvNULL)
++ {
++ /* Terminate the links. */
++ Buffer->freePrev = gcvFREE_TASK_TERMINATOR;
++ Buffer->freeNext = gcvFREE_TASK_TERMINATOR;
++
++ /* Initialize the list pointer. */
++ Command->taskFreeHead = Command->taskFreeTail = Buffer;
++ }
++
++ /* Not the first, add after the tail. */
++ else
++ {
++ /* Initialize the new tail buffer. */
++ Buffer->freePrev = Command->taskFreeTail;
++ Buffer->freeNext = gcvFREE_TASK_TERMINATOR;
++
++ /* Add after the tail. */
++ Command->taskFreeTail->freeNext = Buffer;
++ Command->taskFreeTail = Buffer;
++ }
++}
++
++static void
++_RemoveFromFreeList(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ /* Has to be a part of the free list. */
++ gcmkASSERT(Buffer->freePrev != gcvNULL);
++ gcmkASSERT(Buffer->freeNext != gcvNULL);
++
++ /* Head buffer? */
++ if (Buffer->freePrev == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Tail buffer as well? */
++ if (Buffer->freeNext == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Reset the list pointer. */
++ Command->taskFreeHead = Command->taskFreeTail = gcvNULL;
++ }
++
++ /* No, just the head. */
++ else
++ {
++ /* Update the head. */
++ Command->taskFreeHead = Buffer->freeNext;
++
++ /* Terminate the next buffer. */
++ Command->taskFreeHead->freePrev = gcvFREE_TASK_TERMINATOR;
++ }
++ }
++
++ /* Not the head. */
++ else
++ {
++ /* Tail buffer? */
++ if (Buffer->freeNext == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Update the tail. */
++ Command->taskFreeTail = Buffer->freePrev;
++
++ /* Terminate the previous buffer. */
++ Command->taskFreeTail->freeNext = gcvFREE_TASK_TERMINATOR;
++ }
++
++ /* A buffer in the middle. */
++ else
++ {
++ /* Remove the buffer from the list. */
++ Buffer->freePrev->freeNext = Buffer->freeNext;
++ Buffer->freeNext->freePrev = Buffer->freePrev;
++ }
++ }
++
++ /* Reset free list pointers. */
++ Buffer->freePrev = gcvNULL;
++ Buffer->freeNext = gcvNULL;
++}
++
++
++/*----------------------------------------------------------------------------*/
++/*-------------------------- Task Buffer Allocation --------------------------*/
++
++static void
++_SplitTaskBuffer(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer,
++ IN gctUINT Size
++ )
++{
++ /* Determine the size of the new buffer. */
++ gctINT splitBufferSize = Buffer->size - Size;
++ gcmkASSERT(splitBufferSize >= 0);
++
++ /* Is the split buffer big enough to become a separate buffer? */
++ if (splitBufferSize >= gcvMIN_TASK_BUFFER)
++ {
++ /* Place the new path data. */
++ gcsTASK_CONTAINER_PTR splitBuffer = (gcsTASK_CONTAINER_PTR)
++ (
++ (gctUINT8_PTR) Buffer + Size
++ );
++
++ /* Set the trimmed buffer size. */
++ Buffer->size = Size;
++
++ /* Initialize the split buffer. */
++ splitBuffer->referenceCount = 0;
++ splitBuffer->size = splitBufferSize;
++ splitBuffer->freePrev = gcvNULL;
++ splitBuffer->freeNext = gcvNULL;
++
++ /* Link in. */
++ _InsertTaskBuffer(Buffer, splitBuffer);
++ _AppendToFreeList(Command, splitBuffer);
++ }
++}
++
++static gceSTATUS
++_AllocateTaskContainer(
++ IN gckVGCOMMAND Command,
++ IN gctUINT Size,
++ OUT gcsTASK_CONTAINER_PTR * Buffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x Size=0x%x, Buffer ==0x%x", Command, Size, Buffer);
++
++ /* Verify arguments. */
++ gcmkVERIFY_ARGUMENT(Buffer != gcvNULL);
++
++ do
++ {
++ gcsTASK_STORAGE_PTR storage;
++ gcsTASK_CONTAINER_PTR buffer;
++
++ /* Adjust the size. */
++ Size += gcmSIZEOF(gcsTASK_CONTAINER);
++
++ /* Adjust the allocation size if not big enough. */
++ if (Size > Command->taskStorageUsable)
++ {
++ Command->taskStorageGranularity
++ = gcmALIGN(Size + gcmSIZEOF(gcsTASK_STORAGE), 1024);
++
++ Command->taskStorageUsable
++ = Command->taskStorageGranularity - gcmSIZEOF(gcsTASK_STORAGE);
++ }
++
++ /* Is there a free buffer available? */
++ else if (Command->taskFreeHead != gcvNULL)
++ {
++ /* Set the initial free buffer. */
++ gcsTASK_CONTAINER_PTR buffer = Command->taskFreeHead;
++
++ do
++ {
++ /* Is the buffer big enough? */
++ if (buffer->size >= Size)
++ {
++ /* Remove the buffer from the free list. */
++ _RemoveFromFreeList(Command, buffer);
++
++ /* Split the buffer. */
++ _SplitTaskBuffer(Command, buffer, Size);
++
++ /* Set the result. */
++ * Buffer = buffer;
++
++ gcmkFOOTER_ARG("*Buffer=0x%x",*Buffer);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++
++ /* Get the next free buffer. */
++ buffer = buffer->freeNext;
++ }
++ while (buffer != gcvFREE_TASK_TERMINATOR);
++ }
++
++ /* Allocate a container. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Command->os,
++ Command->taskStorageGranularity,
++ (gctPOINTER *) &storage
++ ));
++
++ /* Link in the storage buffer. */
++ storage->next = Command->taskStorage;
++ Command->taskStorage = storage;
++
++ /* Place the task buffer. */
++ buffer = (gcsTASK_CONTAINER_PTR) (storage + 1);
++
++ /* Determine the size of the buffer. */
++ buffer->size
++ = Command->taskStorageGranularity
++ - gcmSIZEOF(gcsTASK_STORAGE);
++
++ /* Initialize the task buffer. */
++ buffer->referenceCount = 0;
++ buffer->allocPrev = gcvNULL;
++ buffer->allocNext = gcvNULL;
++ buffer->freePrev = gcvNULL;
++ buffer->freeNext = gcvNULL;
++
++ /* Split the buffer. */
++ _SplitTaskBuffer(Command, buffer, Size);
++
++ /* Set the result. */
++ * Buffer = buffer;
++
++ gcmkFOOTER_ARG("*Buffer=0x%x",*Buffer);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++static void
++_FreeTaskContainer(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR prev;
++ gcsTASK_CONTAINER_PTR next;
++ gcsTASK_CONTAINER_PTR merged;
++
++ gctUINT32 mergedSize;
++
++ /* Verify arguments. */
++ gcmkASSERT(Buffer != gcvNULL);
++ gcmkASSERT(Buffer->freePrev == gcvNULL);
++ gcmkASSERT(Buffer->freeNext == gcvNULL);
++
++ /* Get shortcuts to the previous and next path data buffers. */
++ prev = Buffer->allocPrev;
++ next = Buffer->allocNext;
++
++ /* Is the previous path data buffer already free? */
++ if (prev && prev->freeNext)
++ {
++ /* The previous path data buffer is the one that remains. */
++ merged = prev;
++
++ /* Is the next path data buffer already free? */
++ if (next && next->freeNext)
++ {
++ /* Merge all three path data buffers into the previous. */
++ mergedSize = prev->size + Buffer->size + next->size;
++
++ /* Remove the next path data buffer. */
++ _RemoveFromFreeList(Command, next);
++ _RemoveTaskBuffer(next);
++ }
++ else
++ {
++ /* Merge the current path data buffer into the previous. */
++ mergedSize = prev->size + Buffer->size;
++ }
++
++ /* Delete the current path data buffer. */
++ _RemoveTaskBuffer(Buffer);
++
++ /* Set new size. */
++ merged->size = mergedSize;
++ }
++ else
++ {
++ /* The current path data buffer is the one that remains. */
++ merged = Buffer;
++
++ /* Is the next buffer already free? */
++ if (next && next->freeNext)
++ {
++ /* Merge the next into the current. */
++ mergedSize = Buffer->size + next->size;
++
++ /* Remove the next buffer. */
++ _RemoveFromFreeList(Command, next);
++ _RemoveTaskBuffer(next);
++
++ /* Set new size. */
++ merged->size = mergedSize;
++ }
++
++ /* Add the current buffer into the free list. */
++ _AppendToFreeList(Command, merged);
++ }
++}
++
++gceSTATUS
++_RemoveRecordFromProcesDB(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_HEADER_PTR Task
++ )
++{
++ gceSTATUS status;
++ gcsTASK_PTR task = (gcsTASK_PTR)((gctUINT8_PTR)Task - sizeof(gcsTASK));
++ gcsTASK_FREE_VIDEO_MEMORY_PTR freeVideoMemory;
++ gcsTASK_UNLOCK_VIDEO_MEMORY_PTR unlockVideoMemory;
++ gctINT pid;
++ gctUINT32 size;
++ gctUINT32 handle;
++ gckKERNEL kernel = Command->kernel->kernel;
++ gckVIDMEM_NODE unlockNode = gcvNULL;
++ gckVIDMEM_NODE nodeObject = gcvNULL;
++ gceDATABASE_TYPE type;
++
++ /* Get the total size of all tasks. */
++ size = task->size;
++
++ gcmkVERIFY_OK(gckOS_GetProcessID((gctUINT32_PTR)&pid));
++
++ do
++ {
++ switch (Task->id)
++ {
++ case gcvTASK_FREE_VIDEO_MEMORY:
++ freeVideoMemory = (gcsTASK_FREE_VIDEO_MEMORY_PTR)Task;
++
++ handle = (gctUINT32)freeVideoMemory->node;
++
++ status = gckVIDMEM_HANDLE_Lookup(
++ Command->kernel->kernel,
++ pid,
++ handle,
++ &nodeObject);
++
++ if (gcmIS_ERROR(status))
++ {
++ return status;
++ }
++
++ gckVIDMEM_HANDLE_Dereference(kernel, pid, handle);
++ freeVideoMemory->node = gcmALL_TO_UINT32(nodeObject);
++
++ type = gcvDB_VIDEO_MEMORY
++ | (nodeObject->type << gcdDB_VIDEO_MEMORY_TYPE_SHIFT)
++ | (nodeObject->pool << gcdDB_VIDEO_MEMORY_POOL_SHIFT);
++
++ /* Remove record from process db. */
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Command->kernel->kernel,
++ pid,
++ type,
++ gcmINT2PTR(handle)));
++
++ /* Advance to next task. */
++ size -= sizeof(gcsTASK_FREE_VIDEO_MEMORY);
++ Task = (gcsTASK_HEADER_PTR)(freeVideoMemory + 1);
++
++ break;
++ case gcvTASK_UNLOCK_VIDEO_MEMORY:
++ unlockVideoMemory = (gcsTASK_UNLOCK_VIDEO_MEMORY_PTR)Task;
++
++ /* Remove record from process db. */
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Command->kernel->kernel,
++ pid,
++ gcvDB_VIDEO_MEMORY_LOCKED,
++ gcmUINT64_TO_PTR(unlockVideoMemory->node)));
++
++ handle = (gctUINT32)unlockVideoMemory->node;
++
++ status = gckVIDMEM_HANDLE_Lookup(
++ Command->kernel->kernel,
++ pid,
++ handle,
++ &unlockNode);
++
++ if (gcmIS_ERROR(status))
++ {
++ return status;
++ }
++
++ gckVIDMEM_HANDLE_Dereference(kernel, pid, handle);
++ unlockVideoMemory->node = gcmPTR_TO_UINT64(unlockNode);
++
++ /* Advance to next task. */
++ size -= sizeof(gcsTASK_UNLOCK_VIDEO_MEMORY);
++ Task = (gcsTASK_HEADER_PTR)(unlockVideoMemory + 1);
++
++ break;
++ default:
++ /* Skip the whole task. */
++ size = 0;
++ break;
++ }
++ }
++ while(size);
++
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************\
++********************************* Task Scheduling ******************************
++\******************************************************************************/
++
++static gceSTATUS
++_ScheduleTasks(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable,
++ IN gctUINT8_PTR PreviousEnd
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gctINT block;
++ gcsTASK_CONTAINER_PTR container;
++ gcsTASK_MASTER_ENTRY_PTR userTaskEntry;
++ gcsBLOCK_TASK_ENTRY_PTR kernelTaskEntry;
++ gcsTASK_PTR userTask;
++ gctUINT8_PTR kernelTask;
++ gctINT32 interrupt;
++ gctUINT8_PTR eventCommand;
++
++#ifdef __QNXNTO__
++ gcsTASK_PTR oldUserTask = gcvNULL;
++ gctPOINTER pointer;
++#endif
++
++ /* Nothing to schedule? */
++ if (TaskTable->size == 0)
++ {
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->taskMutex,
++ gcvINFINITE
++ ));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ do
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " number of tasks scheduled = %d\n"
++ " size of event data in bytes = %d\n",
++ TaskTable->count,
++ TaskTable->size
++ );
++
++ /* Allocate task buffer. */
++ gcmkERR_BREAK(_AllocateTaskContainer(
++ Command,
++ TaskTable->size,
++ &container
++ ));
++
++ /* Determine the task data pointer. */
++ kernelTask = (gctUINT8_PTR) (container + 1);
++
++ /* Initialize the reference count. */
++ container->referenceCount = TaskTable->count;
++
++ /* Process tasks. */
++ for (block = gcvBLOCK_COUNT - 1; block >= 0; block -= 1)
++ {
++ /* Get the current user table entry. */
++ userTaskEntry = &TaskTable->table[block];
++
++ /* Are there tasks scheduled? */
++ if (userTaskEntry->head == gcvNULL)
++ {
++ /* No, skip to the next block. */
++ continue;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " processing tasks for block %d\n",
++ block
++ );
++
++ /* Get the current kernel table entry. */
++ kernelTaskEntry = &Command->taskTable[block];
++
++ /* Are there tasks for the current block scheduled? */
++ if (kernelTaskEntry->container == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " first task container for the block added\n",
++ block
++ );
++
++ /* Nothing yet, set the container buffer pointer. */
++ kernelTaskEntry->container = container;
++ kernelTaskEntry->task = (gcsTASK_HEADER_PTR) kernelTask;
++ }
++
++ /* Yes, append to the end. */
++ else
++ {
++ kernelTaskEntry->link->cotainer = container;
++ kernelTaskEntry->link->task = (gcsTASK_HEADER_PTR) kernelTask;
++ }
++
++ /* Set initial task. */
++ userTask = userTaskEntry->head;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " copying user tasks over to the kernel\n"
++ );
++
++ /* Copy tasks. */
++ do
++ {
++ gcsTASK_HEADER_PTR taskHeader;
++
++#ifdef __QNXNTO__
++ oldUserTask = userTask;
++
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ oldUserTask,
++ 0,
++ &pointer));
++
++ userTask = pointer;
++#endif
++
++ taskHeader = (gcsTASK_HEADER_PTR) (userTask + 1);
++
++ gcmkVERIFY_OK(_RemoveRecordFromProcesDB(Command, taskHeader));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " task ID = %d, size = %d\n",
++ ((gcsTASK_HEADER_PTR) (userTask + 1))->id,
++ userTask->size
++ );
++
++#ifdef __QNXNTO__
++ if (taskHeader->id == gcvTASK_SIGNAL)
++ {
++ ((gcsTASK_SIGNAL_PTR)taskHeader)->coid = TaskTable->coid;
++ ((gcsTASK_SIGNAL_PTR)taskHeader)->rcvid = TaskTable->rcvid;
++ }
++#endif
++
++ /* Copy the task data. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ kernelTask, taskHeader, userTask->size
++ ));
++
++ /* Advance to the next task. */
++ kernelTask += userTask->size;
++ userTask = userTask->next;
++
++#ifdef __QNXNTO__
++ gcmkERR_BREAK(gckOS_UnmapUserPointer(
++ Command->os,
++ oldUserTask,
++ 0,
++ pointer));
++#endif
++ }
++ while (userTask != gcvNULL);
++
++ /* Update link pointer in the header. */
++ kernelTaskEntry->link = (gcsTASK_LINK_PTR) kernelTask;
++
++ /* Initialize link task. */
++ kernelTaskEntry->link->id = gcvTASK_LINK;
++ kernelTaskEntry->link->cotainer = gcvNULL;
++ kernelTaskEntry->link->task = gcvNULL;
++
++ /* Advance the task data pointer. */
++ kernelTask += gcmSIZEOF(gcsTASK_LINK);
++ }
++ }
++ while (gcvFALSE);
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->taskMutex
++ ));
++
++ /* Assign interrupts to the blocks. */
++ eventCommand = PreviousEnd;
++
++ for (block = gcvBLOCK_COUNT - 1; block >= 0; block -= 1)
++ {
++ /* Get the current user table entry. */
++ userTaskEntry = &TaskTable->table[block];
++
++ /* Are there tasks scheduled? */
++ if (userTaskEntry->head == gcvNULL)
++ {
++ /* No, skip to the next block. */
++ continue;
++ }
++
++ /* Get the interrupt number. */
++ interrupt = _GetNextInterrupt(Command, block);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): block = %d interrupt = %d\n",
++ __FUNCTION__, __LINE__,
++ block, interrupt
++ );
++
++ /* Determine the command position. */
++ eventCommand -= Command->info.eventCommandSize;
++
++ /* Append an EVENT command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ Command, eventCommand, block, interrupt, gcvNULL
++ ));
++ }
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++******************************** Memory Management *****************************
++\******************************************************************************/
++
++static gceSTATUS
++_HardwareToKernel(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM memory;
++ gctUINT32 offset;
++ gctUINT32 nodePhysical;
++ gctPOINTER *logical;
++ gctSIZE_T bytes;
++ status = gcvSTATUS_OK;
++
++ memory = Node->VidMem.memory;
++
++ if (memory->object.type == gcvOBJ_VIDMEM)
++ {
++ nodePhysical = memory->baseAddress
++ + (gctUINT32)Node->VidMem.offset
++ + Node->VidMem.alignment;
++ bytes = Node->VidMem.bytes;
++ logical = &Node->VidMem.kernelVirtual;
++ }
++ else
++ {
++ nodePhysical = Node->Virtual.physicalAddress;
++ bytes = Node->Virtual.bytes;
++ logical = &Node->Virtual.kernelVirtual;
++ }
++
++ if (*logical == gcvNULL)
++ {
++ status = gckOS_MapPhysical(Os, nodePhysical, bytes, logical);
++
++ if (gcmkIS_ERROR(status))
++ {
++ return status;
++ }
++ }
++
++ offset = Address - nodePhysical;
++ *KernelPointer = (gctPOINTER)((gctUINT8_PTR)(*logical) + offset);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_ConvertUserCommandBufferPointer(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR UserCommandBuffer,
++ OUT gcsCMDBUFFER_PTR * KernelCommandBuffer
++ )
++{
++ gceSTATUS status, last;
++ gcsCMDBUFFER_PTR mappedUserCommandBuffer = gcvNULL;
++ gckKERNEL kernel = Command->kernel->kernel;
++ gctUINT32 pid;
++ gckVIDMEM_NODE node;
++
++ gckOS_GetProcessID(&pid);
++
++ do
++ {
++ gctUINT32 headerAddress;
++
++ /* Map the command buffer structure into the kernel space. */
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ UserCommandBuffer,
++ gcmSIZEOF(gcsCMDBUFFER),
++ (gctPOINTER *) &mappedUserCommandBuffer
++ ));
++
++ /* Determine the address of the header. */
++ headerAddress
++ = mappedUserCommandBuffer->address
++ - mappedUserCommandBuffer->bufferOffset;
++
++ gcmkERR_BREAK(gckVIDMEM_HANDLE_Lookup(
++ kernel,
++ pid,
++ gcmPTR2INT32(mappedUserCommandBuffer->node),
++ &node));
++
++ /* Translate the logical address to the kernel space. */
++ gcmkERR_BREAK(_HardwareToKernel(
++ Command->os,
++ node->node,
++ headerAddress,
++ (gctPOINTER *) KernelCommandBuffer
++ ));
++ }
++ while (gcvFALSE);
++
++ /* Unmap the user command buffer. */
++ if (mappedUserCommandBuffer != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_UnmapUserPointer(
++ Command->os,
++ UserCommandBuffer,
++ gcmSIZEOF(gcsCMDBUFFER),
++ mappedUserCommandBuffer
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_AllocateLinear(
++ IN gckVGCOMMAND Command,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ OUT gcuVIDMEM_NODE_PTR * Node,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ gceSTATUS status, last;
++ gctPOINTER logical;
++ gctPHYS_ADDR physical;
++ gctUINT32 address;
++ gctSIZE_T size = Size;
++
++ do
++ {
++ gcmkERR_BREAK(gckOS_AllocateContiguous(
++ Command->os,
++ gcvFALSE,
++ &size,
++ &physical,
++ &logical
++ ));
++
++ gcmkERR_BREAK(gckOS_GetPhysicalAddress(Command->os, logical, &address));
++
++ /* Set return values. */
++ * Node = physical;
++ * Address = address;
++ * Logical = logical;
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (physical != gcvNULL)
++ {
++ /* Free the command buffer. */
++ gcmkCHECK_STATUS(gckOS_FreeContiguous(Command->os, physical, logical, size));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_FreeLinear(
++ IN gckVGKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ do
++ {
++ gcmkERR_BREAK(gckOS_FreeContiguous(Kernel->os, Node, Logical, 1));
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++_AllocateCommandBuffer(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer
++ )
++{
++ gceSTATUS status, last;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gcsCMDBUFFER_PTR commandBuffer = gcvNULL;
++
++ do
++ {
++ gctUINT alignedHeaderSize;
++ gctUINT requestedSize;
++ gctUINT allocationSize;
++ gctUINT32 address = 0;
++ gctUINT8_PTR endCommand;
++
++ /* Determine the aligned header size. */
++ alignedHeaderSize
++ = (gctUINT32)gcmALIGN(gcmSIZEOF(gcsCMDBUFFER), Command->info.addressAlignment);
++
++ /* Align the requested size. */
++ requestedSize
++ = (gctUINT32)gcmALIGN(Size, Command->info.commandAlignment);
++
++ /* Determine the size of the buffer to allocate. */
++ allocationSize
++ = alignedHeaderSize
++ + requestedSize
++ + (gctUINT32)Command->info.staticTailSize;
++
++ /* Allocate the command buffer. */
++ gcmkERR_BREAK(_AllocateLinear(
++ Command,
++ allocationSize,
++ Command->info.addressAlignment,
++ &node,
++ &address,
++ (gctPOINTER *) &commandBuffer
++ ));
++
++ /* Initialize the structure. */
++ commandBuffer->completion = gcvVACANT_BUFFER;
++ commandBuffer->node = node;
++ commandBuffer->address = address + alignedHeaderSize;
++ commandBuffer->bufferOffset = alignedHeaderSize;
++ commandBuffer->size = requestedSize;
++ commandBuffer->offset = requestedSize;
++ commandBuffer->nextAllocated = gcvNULL;
++ commandBuffer->nextSubBuffer = gcvNULL;
++
++ /* Determine the data count. */
++ commandBuffer->dataCount
++ = (requestedSize + Command->info.staticTailSize)
++ / Command->info.commandAlignment;
++
++ /* Determine the location of the END command. */
++ endCommand
++ = (gctUINT8_PTR) commandBuffer
++ + alignedHeaderSize
++ + requestedSize;
++
++ /* Append an END command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command,
++ endCommand,
++ Command->info.feBufferInt,
++ gcvNULL
++ ));
++
++ /* Set the return pointer. */
++ * CommandBuffer = commandBuffer;
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ /* Free the command buffer. */
++ gcmkCHECK_STATUS(_FreeLinear(Command->kernel, node, commandBuffer));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_FreeCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ /* Free the buffer. */
++ status = _FreeLinear(Kernel, CommandBuffer->node, CommandBuffer);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++****************************** TS Overflow Handler *****************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_TSOverflow(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): **** TS OVERFLOW ENCOUNTERED ****\n",
++ __FUNCTION__, __LINE__
++ );
++
++ return gcvSTATUS_OK;
++}
++
++
++/******************************************************************************\
++****************************** Bus Error Handler *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_BusError(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): **** BUS ERROR ENCOUNTERED ****\n",
++ __FUNCTION__, __LINE__
++ );
++
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************\
++****************************** Power Stall Handler *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_PowerStall(
++ IN gckVGKERNEL Kernel
++ )
++{
++ /* Signal. */
++ return gckOS_Signal(
++ Kernel->os,
++ Kernel->command->powerStallSignal,
++ gcvTRUE);
++}
++
++/******************************************************************************\
++******************************** Task Routines *********************************
++\******************************************************************************/
++
++typedef gceSTATUS (* gctTASKROUTINE) (
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskLink(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskCluster(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskIncrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskDecrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskSignal(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskLockdown(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskUnlockVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskFreeVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskFreeContiguousMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskUnmapUserMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gctTASKROUTINE _taskRoutine[] =
++{
++ _TaskLink, /* gcvTASK_LINK */
++ _TaskCluster, /* gcvTASK_CLUSTER */
++ _TaskIncrement, /* gcvTASK_INCREMENT */
++ _TaskDecrement, /* gcvTASK_DECREMENT */
++ _TaskSignal, /* gcvTASK_SIGNAL */
++ _TaskLockdown, /* gcvTASK_LOCKDOWN */
++ _TaskUnlockVideoMemory, /* gcvTASK_UNLOCK_VIDEO_MEMORY */
++ _TaskFreeVideoMemory, /* gcvTASK_FREE_VIDEO_MEMORY */
++ _TaskFreeContiguousMemory, /* gcvTASK_FREE_CONTIGUOUS_MEMORY */
++ _TaskUnmapUserMemory, /* gcvTASK_UNMAP_USER_MEMORY */
++};
++
++static gceSTATUS
++_TaskLink(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ /* Cast the task pointer. */
++ gcsTASK_LINK_PTR task = (gcsTASK_LINK_PTR) TaskHeader->task;
++
++ /* Save the pointer to the container. */
++ gcsTASK_CONTAINER_PTR container = TaskHeader->container;
++
++ /* No more tasks in the list? */
++ if (task->task == gcvNULL)
++ {
++ /* Reset the entry. */
++ TaskHeader->container = gcvNULL;
++ TaskHeader->task = gcvNULL;
++ TaskHeader->link = gcvNULL;
++ }
++ else
++ {
++ /* Update the entry. */
++ TaskHeader->container = task->cotainer;
++ TaskHeader->task = task->task;
++ }
++
++ /* Decrement the task buffer reference. */
++ gcmkASSERT(container->referenceCount >= 0);
++ if (container->referenceCount == 0)
++ {
++ /* Free the container. */
++ _FreeTaskContainer(Command, container);
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_TaskCluster(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ /* Cast the task pointer. */
++ gcsTASK_CLUSTER_PTR cluster = (gcsTASK_CLUSTER_PTR) TaskHeader->task;
++
++ /* Get the number of tasks. */
++ gctUINT taskCount = cluster->taskCount;
++
++ /* Advance to the next task. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (cluster + 1);
++
++ /* Perform all tasks in the cluster. */
++ while (taskCount)
++ {
++ /* Perform the current task. */
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ Command,
++ TaskHeader
++ ));
++
++ /* Update the task count. */
++ taskCount -= 1;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskIncrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_INCREMENT_PTR task = (gcsTASK_INCREMENT_PTR) TaskHeader->task;
++
++ /* Convert physical into logical address. */
++ gctUINT32_PTR logical;
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->address,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &logical
++ ));
++
++ /* Increment data. */
++ (* logical) += 1;
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(gckOS_UnmapPhysical(
++ Command->os,
++ logical,
++ gcmSIZEOF(gctUINT32)
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskDecrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_DECREMENT_PTR task = (gcsTASK_DECREMENT_PTR) TaskHeader->task;
++
++ /* Convert physical into logical address. */
++ gctUINT32_PTR logical;
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->address,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &logical
++ ));
++
++ /* Decrement data. */
++ (* logical) -= 1;
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(gckOS_UnmapPhysical(
++ Command->os,
++ logical,
++ gcmSIZEOF(gctUINT32)
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskSignal(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_SIGNAL_PTR task = (gcsTASK_SIGNAL_PTR) TaskHeader->task;
++
++
++ /* Map the signal into kernel space. */
++#ifdef __QNXNTO__
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, task->signal, task->rcvid, task->coid
++ ));
++#else
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, task->signal, task->process
++ ));
++#endif /* __QNXNTO__ */
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskLockdown(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++ gctUINT32_PTR userCounter = gcvNULL;
++ gctUINT32_PTR kernelCounter = gcvNULL;
++ gctSIGNAL signal = gcvNULL;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_LOCKDOWN_PTR task = (gcsTASK_LOCKDOWN_PTR) TaskHeader->task;
++
++ /* Convert physical addresses into logical. */
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->userCounter,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &userCounter
++ ));
++
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->kernelCounter,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &kernelCounter
++ ));
++
++ /* Update the kernel counter. */
++ (* kernelCounter) += 1;
++
++ /* Are the counters equal? */
++ if ((* userCounter) == (* kernelCounter))
++ {
++ /* Map the signal into kernel space. */
++ gcmkERR_BREAK(gckOS_MapSignal(
++ Command->os, task->signal, task->process, &signal
++ ));
++
++ if (signal == gcvNULL)
++ {
++ /* Signal. */
++ gcmkERR_BREAK(gckOS_Signal(
++ Command->os, task->signal, gcvTRUE
++ ));
++ }
++ else
++ {
++ /* Signal. */
++ gcmkERR_BREAK(gckOS_Signal(
++ Command->os, signal, gcvTRUE
++ ));
++ }
++ }
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Destroy the mapped signal. */
++ if (signal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, signal
++ ));
++ }
++
++ /* Unmap the physical memory. */
++ if (kernelCounter != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapPhysical(
++ Command->os,
++ kernelCounter,
++ gcmSIZEOF(gctUINT32)
++ ));
++ }
++
++ if (userCounter != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapPhysical(
++ Command->os,
++ userCounter,
++ gcmSIZEOF(gctUINT32)
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskUnlockVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_UNLOCK_VIDEO_MEMORY_PTR task
++ = (gcsTASK_UNLOCK_VIDEO_MEMORY_PTR) TaskHeader->task;
++
++ /* Unlock video memory. */
++ gcmkERR_BREAK(gckVIDMEM_Unlock(
++ Command->kernel->kernel,
++ (gckVIDMEM_NODE)gcmUINT64_TO_PTR(task->node),
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL));
++
++ gcmkERR_BREAK(gckVIDMEM_NODE_Dereference(
++ Command->kernel->kernel,
++ gcmUINT64_TO_PTR(task->node)));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskFreeVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_FREE_VIDEO_MEMORY_PTR task
++ = (gcsTASK_FREE_VIDEO_MEMORY_PTR) TaskHeader->task;
++
++ /* Free video memory. */
++ gcmkERR_BREAK(gckVIDMEM_NODE_Dereference(
++ Command->kernel->kernel,
++ gcmINT2PTR(task->node)));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskFreeContiguousMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR task
++ = (gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR) TaskHeader->task;
++
++ /* Free contiguous memory. */
++ gcmkERR_BREAK(gckOS_FreeContiguous(
++ Command->os, task->physical, task->logical, task->bytes
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskUnmapUserMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++ gctPOINTER info;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_UNMAP_USER_MEMORY_PTR task
++ = (gcsTASK_UNMAP_USER_MEMORY_PTR) TaskHeader->task;
++
++ info = gckKERNEL_QueryPointerFromName(
++ Command->kernel->kernel, gcmALL_TO_UINT32(task->info));
++
++ /* Unmap the user memory. */
++ gcmkERR_BREAK(gckOS_UnmapUserMemory(
++ Command->os, gcvCORE_VG, task->memory, task->size, info, task->address
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++/******************************************************************************\
++************ Hardware Block Interrupt Handlers For Scheduled Events ************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_Block(
++ IN gckVGKERNEL Kernel,
++ IN gcsBLOCK_TASK_ENTRY_PTR TaskHeader,
++ IN gctBOOL ProcessAll
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK, last;
++
++ gcmkHEADER_ARG("Kernel=0x%x TaskHeader=0x%x ProcessAll=0x%x", Kernel, TaskHeader, ProcessAll);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (TaskHeader->task == gcvNULL)
++ {
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++
++ do
++ {
++ gckVGCOMMAND command;
++
++ /* Get the command buffer object. */
++ command = Kernel->command;
++
++ /* Increment the interrupt usage semaphore. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ command->os, TaskHeader->interruptSemaphore
++ ));
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ command->os,
++ command->taskMutex,
++ gcvINFINITE
++ ));
++
++ /* Verify inputs. */
++ gcmkASSERT(TaskHeader != gcvNULL);
++ gcmkASSERT(TaskHeader->container != gcvNULL);
++ gcmkASSERT(TaskHeader->task != gcvNULL);
++ gcmkASSERT(TaskHeader->link != gcvNULL);
++
++ /* Process tasks. */
++ do
++ {
++ /* Process the current task. */
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ command,
++ TaskHeader
++ ));
++
++ /* Is the next task is LINK? */
++ if (TaskHeader->task->id == gcvTASK_LINK)
++ {
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ command,
++ TaskHeader
++ ));
++
++ /* Done. */
++ break;
++ }
++ }
++ while (ProcessAll);
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ command->os,
++ command->taskMutex
++ ));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gcmDECLARE_INTERRUPT_HANDLER(COMMAND, 0)
++{
++ gceSTATUS status, last;
++
++ gcmkHEADER_ARG("Kernel=0x%x ", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++
++ do
++ {
++ gckVGCOMMAND command;
++ gcsKERNEL_QUEUE_HEADER_PTR mergeQueue;
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++ gcsKERNEL_CMDQUEUE_PTR entry;
++ gctUINT entryCount;
++
++ /* Get the command buffer object. */
++ command = Kernel->command;
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ command->os,
++ command->queueMutex,
++ gcvINFINITE
++ ));
++
++ /* Get the current queue. */
++ queueTail = command->queueTail;
++
++ /* Get the current queue entry. */
++ entry = queueTail->currentEntry;
++
++ /* Get the number of entries in the queue. */
++ entryCount = queueTail->pending;
++
++ /* Process all entries. */
++ while (gcvTRUE)
++ {
++ /* Call post-execution function. */
++ status = entry->handler(Kernel, entry);
++
++ /* Failed? */
++ if (gcmkIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR,
++ gcvZONE_COMMAND,
++ "[%s] line %d: post action failed.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Executed the next buffer? */
++ if (status == gcvSTATUS_EXECUTED)
++ {
++ /* Update the queue. */
++ queueTail->pending = entryCount;
++ queueTail->currentEntry = entry;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++
++ /* Break out of the loop. */
++ break;
++ }
++
++ /* Advance to the next entry. */
++ entry += 1;
++ entryCount -= 1;
++
++ /* Last entry? */
++ if (entryCount == 0)
++ {
++ /* Reset the queue to idle. */
++ queueTail->pending = 0;
++
++ /* Get a shortcut to the queue to merge with. */
++ mergeQueue = command->mergeQueue;
++
++ /* Merge the queues if necessary. */
++ if (mergeQueue != queueTail)
++ {
++ gcmkASSERT(mergeQueue < queueTail);
++ gcmkASSERT(mergeQueue->next == queueTail);
++
++ mergeQueue->size
++ += gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ + queueTail->size;
++
++ mergeQueue->next = queueTail->next;
++ }
++
++ /* Advance to the next queue. */
++ queueTail = queueTail->next;
++
++ /* Did it wrap around? */
++ if (command->queue == queueTail)
++ {
++ /* Reset merge queue. */
++ command->mergeQueue = queueTail;
++ }
++
++ /* Set new queue. */
++ command->queueTail = queueTail;
++
++ /* Is the next queue scheduled? */
++ if (queueTail->pending > 0)
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* The first entry must be a command buffer. */
++ commandBuffer = queueTail->currentEntry->commandBuffer;
++
++ /* Start the command processor. */
++ status = gckVGHARDWARE_Execute(
++ command->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Failed? */
++ if (gcmkIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR,
++ gcvZONE_COMMAND,
++ "[%s] line %d: failed to start the next queue.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ }
++ else
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(
++ Kernel->command->hardware, gcvPOWER_IDLE_BROADCAST
++ );
++ }
++
++ /* Break out of the loop. */
++ break;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ command->os,
++ command->queueMutex
++ ));
++ }
++ while (gcvFALSE);
++
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/* Define standard block interrupt handlers. */
++gcmDEFINE_INTERRUPT_HANDLER(TESSELLATOR, 0)
++gcmDEFINE_INTERRUPT_HANDLER(VG, 0)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 0)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 1)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 2)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 3)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 4)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 5)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 6)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 7)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 8)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 9)
++
++/* The entries in the array are arranged by event priority. */
++static gcsBLOCK_INTERRUPT_HANDLER _blockHandlers[] =
++{
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(TESSELLATOR, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(VG, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 1),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 2),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 3),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 4),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 5),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 6),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 7),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 8),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 9),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(COMMAND, 0),
++};
++
++
++/******************************************************************************\
++************************* Static Command Buffer Handlers ***********************
++\******************************************************************************/
++
++static gceSTATUS
++_UpdateStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_ExecuteStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Cast the command buffer header. */
++ commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateStaticCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UpdateLastStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++#if gcvDEBUG || gcdFORCE_MESSAGES
++ /* Get the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Validate the command buffer. */
++ gcmkASSERT(commandBuffer->completion != gcvNULL);
++ gcmkASSERT(commandBuffer->completion != gcvVACANT_BUFFER);
++
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): processing all tasks scheduled for FE.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Perform scheduled tasks. */
++ return _EventHandler_Block(
++ Kernel,
++ &Kernel->command->taskTable[gcvBLOCK_COMMAND],
++ gcvTRUE
++ );
++}
++
++static gceSTATUS
++_ExecuteLastStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateLastStaticCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++************************* Dynamic Command Buffer Handlers **********************
++\******************************************************************************/
++
++static gceSTATUS
++_UpdateDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_ExecuteDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateDynamicCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UpdateLastDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++#if gcvDEBUG || gcdFORCE_MESSAGES
++ /* Get the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Validate the command buffer. */
++ gcmkASSERT(commandBuffer->completion != gcvNULL);
++ gcmkASSERT(commandBuffer->completion != gcvVACANT_BUFFER);
++
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): processing all tasks scheduled for FE.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Perform scheduled tasks. */
++ return _EventHandler_Block(
++ Kernel,
++ &Kernel->command->taskTable[gcvBLOCK_COMMAND],
++ gcvTRUE
++ );
++}
++
++static gceSTATUS
++_ExecuteLastDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateLastDynamicCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++********************************* Other Handlers *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_FreeKernelCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ /* Free the command buffer. */
++ status = _FreeCommandBuffer(Kernel, Entry->commandBuffer);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++******************************* Queue Management *******************************
++\******************************************************************************/
++
++#if gcvDUMP_COMMAND_BUFFER
++static void
++_DumpCommandQueue(
++ IN gckVGCOMMAND Command,
++ IN gcsKERNEL_QUEUE_HEADER_PTR QueueHeader,
++ IN gctUINT EntryCount
++ )
++{
++ gcsKERNEL_CMDQUEUE_PTR entry;
++ gctUINT queueIndex;
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ static gctUINT arrayCount = 0;
++#endif
++
++ /* Is dumpinng enabled? */
++ if (!Commad->enableDumping)
++ {
++ return;
++ }
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ "COMMAND QUEUE DUMP: %d entries\n", EntryCount
++ );
++#endif
++
++ /* Get the pointer to the first entry. */
++ entry = QueueHeader->currentEntry;
++
++ /* Iterate through the queue. */
++ for (queueIndex = 0; queueIndex < EntryCount; queueIndex += 1)
++ {
++ gcsCMDBUFFER_PTR buffer;
++ gctUINT bufferCount;
++ gctUINT bufferIndex;
++ gctUINT i, count;
++ gctUINT size;
++ gctUINT32_PTR data;
++
++#if gcvDUMP_COMMAND_LINES
++ gctUINT lineNumber;
++#endif
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ "ENTRY %d\n", queueIndex
++ );
++#endif
++
++ /* Reset the count. */
++ bufferCount = 0;
++
++ /* Set the initial buffer. */
++ buffer = entry->commandBuffer;
++
++ /* Loop through all subbuffers. */
++ while (buffer)
++ {
++ /* Update the count. */
++ bufferCount += 1;
++
++ /* Advance to the next subbuffer. */
++ buffer = buffer->nextSubBuffer;
++ }
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ if (bufferCount > 1)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER SET: %d buffers.\n",
++ bufferCount
++ );
++ }
++#endif
++
++ /* Reset the buffer index. */
++ bufferIndex = 0;
++
++ /* Set the initial buffer. */
++ buffer = entry->commandBuffer;
++
++ /* Loop through all subbuffers. */
++ while (buffer)
++ {
++ /* Determine the size of the buffer. */
++ size = buffer->dataCount * Command->info.commandAlignment;
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ /* A single buffer? */
++ if (bufferCount == 1)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER: count=%d (0x%X), size=%d bytes @ %08X.\n",
++ buffer->dataCount,
++ buffer->dataCount,
++ size,
++ buffer->address
++ );
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER %d: count=%d (0x%X), size=%d bytes @ %08X\n",
++ bufferIndex,
++ buffer->dataCount,
++ buffer->dataCount,
++ size,
++ buffer->address
++ );
++ }
++#endif
++
++ /* Determine the number of double words to print. */
++ count = size / 4;
++
++ /* Determine the buffer location. */
++ data = (gctUINT32_PTR)
++ (
++ (gctUINT8_PTR) buffer + buffer->bufferOffset
++ );
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "unsigned int _" gcvCOMMAND_BUFFER_NAME "_%d[] =\n",
++ arrayCount
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "{\n"
++ );
++
++ arrayCount += 1;
++#endif
++
++#if gcvDUMP_COMMAND_LINES
++ /* Reset the line number. */
++ lineNumber = 0;
++#endif
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ count -= 2;
++#endif
++
++ for (i = 0; i < count; i += 1)
++ {
++ if ((i % 8) == 0)
++ {
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "\t");
++#else
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, " ");
++#endif
++ }
++
++#if gcvDUMP_COMMAND_LINES
++ if (lineNumber == gcvDUMP_COMMAND_LINES)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, " . . . . . . . . .\n");
++ break;
++ }
++#endif
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "0x%08X", data[i]);
++
++ if (i + 1 == count)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "\n");
++
++#if gcvDUMP_COMMAND_LINES
++ lineNumber += 1;
++#endif
++ }
++ else
++ {
++ if (((i + 1) % 8) == 0)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, ",\n");
++
++#if gcvDUMP_COMMAND_LINES
++ lineNumber += 1;
++#endif
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, ", ");
++ }
++ }
++ }
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "};\n\n"
++ );
++#endif
++
++ /* Advance to the next subbuffer. */
++ buffer = buffer->nextSubBuffer;
++ bufferIndex += 1;
++ }
++
++ /* Advance to the next entry. */
++ entry += 1;
++ }
++}
++#endif
++
++static gceSTATUS
++_LockCurrentQueue(
++ IN gckVGCOMMAND Command,
++ OUT gcsKERNEL_CMDQUEUE_PTR * Entries,
++ OUT gctUINT_PTR EntryCount
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++
++ /* Get a shortcut to the head of the queue. */
++ queueHead = Command->queueHead;
++
++ /* Is the head buffer still being worked on? */
++ if (queueHead->pending)
++ {
++ /* Increment overflow count. */
++ Command->queueOverflow += 1;
++
++ /* Wait until the head becomes idle. */
++ gcmkERR_BREAK(_WaitForIdle(Command, queueHead));
++ }
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->queueMutex,
++ gcvINFINITE
++ ));
++
++ /* Determine the first queue entry. */
++ queueHead->currentEntry = (gcsKERNEL_CMDQUEUE_PTR)
++ (
++ (gctUINT8_PTR) queueHead + gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ );
++
++ /* Set the pointer to the first entry. */
++ * Entries = queueHead->currentEntry;
++
++ /* Determine the number of available entries. */
++ * EntryCount = queueHead->size / gcmSIZEOF(gcsKERNEL_CMDQUEUE);
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UnlockCurrentQueue(
++ IN gckVGCOMMAND Command,
++ IN gctUINT EntryCount
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++#if !gcdENABLE_INFINITE_SPEED_HW
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++ gcsKERNEL_QUEUE_HEADER_PTR queueNext;
++ gctUINT queueSize;
++ gctUINT newSize;
++ gctUINT unusedSize;
++
++ /* Get shortcut to the head and to the tail of the queue. */
++ queueTail = Command->queueTail;
++ queueHead = Command->queueHead;
++
++ /* Dump the command buffer. */
++#if gcvDUMP_COMMAND_BUFFER
++ _DumpCommandQueue(Command, queueHead, EntryCount);
++#endif
++
++ /* Get a shortcut to the current queue size. */
++ queueSize = queueHead->size;
++
++ /* Determine the new queue size. */
++ newSize = EntryCount * gcmSIZEOF(gcsKERNEL_CMDQUEUE);
++ gcmkASSERT(newSize <= queueSize);
++
++ /* Determine the size of the unused area. */
++ unusedSize = queueSize - newSize;
++
++ /* Is the unused area big enough to become a buffer? */
++ if (unusedSize >= gcvMINUMUM_BUFFER)
++ {
++ gcsKERNEL_QUEUE_HEADER_PTR nextHead;
++
++ /* Place the new header. */
++ nextHead = (gcsKERNEL_QUEUE_HEADER_PTR)
++ (
++ (gctUINT8_PTR) queueHead
++ + gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ + newSize
++ );
++
++ /* Initialize the buffer. */
++ nextHead->size = unusedSize - gcmSIZEOF(gcsKERNEL_QUEUE_HEADER);
++ nextHead->pending = 0;
++
++ /* Link the buffer in. */
++ nextHead->next = queueHead->next;
++ queueHead->next = nextHead;
++ queueNext = nextHead;
++
++ /* Update the size of the current buffer. */
++ queueHead->size = newSize;
++ }
++
++ /* Not big enough. */
++ else
++ {
++ /* Determine the next queue. */
++ queueNext = queueHead->next;
++ }
++
++ /* Mark the buffer as busy. */
++ queueHead->pending = EntryCount;
++
++ /* Advance to the next buffer. */
++ Command->queueHead = queueNext;
++
++ /* Start the command processor if the queue was empty. */
++ if (queueTail == queueHead)
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* The first entry must be a command buffer. */
++ commandBuffer = queueTail->currentEntry->commandBuffer;
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Command->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++ }
++
++ /* The queue was not empty. */
++ else
++ {
++ /* Advance the merge buffer if needed. */
++ if (queueHead == Command->mergeQueue)
++ {
++ Command->mergeQueue = queueNext;
++ }
++ }
++#endif
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->queueMutex
++ ));
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++
++/******************************************************************************\
++****************************** gckVGCOMMAND API Code *****************************
++\******************************************************************************/
++gceSTATUS
++gckVGCOMMAND_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT TaskGranularity,
++ IN gctUINT QueueSize,
++ OUT gckVGCOMMAND * Command
++ )
++{
++ gceSTATUS status, last;
++ gckVGCOMMAND command = gcvNULL;
++ gcsKERNEL_QUEUE_HEADER_PTR queue;
++ gctUINT i, j;
++
++ gcmkHEADER_ARG("Kernel=0x%x TaskGranularity=0x%x QueueSize=0x%x Command=0x%x",
++ Kernel, TaskGranularity, QueueSize, Command);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(QueueSize >= gcvMINUMUM_BUFFER);
++ gcmkVERIFY_ARGUMENT(Command != gcvNULL);
++
++ do
++ {
++ /***********************************************************************
++ ** Generic object initialization.
++ */
++
++ /* Allocate the gckVGCOMMAND structure. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ gcmSIZEOF(struct _gckVGCOMMAND),
++ (gctPOINTER *) &command
++ ));
++
++ /* Initialize the object. */
++ command->object.type = gcvOBJ_COMMAND;
++
++ /* Set the object pointers. */
++ command->kernel = Kernel;
++ command->os = Kernel->os;
++ command->hardware = Kernel->hardware;
++
++ /* Reset pointers. */
++ command->queue = gcvNULL;
++ command->queueMutex = gcvNULL;
++ command->taskMutex = gcvNULL;
++ command->commitMutex = gcvNULL;
++
++ command->powerStallBuffer = gcvNULL;
++ command->powerStallSignal = gcvNULL;
++ command->powerSemaphore = gcvNULL;
++
++ /* Reset context states. */
++ command->contextCounter = 0;
++ command->currentContext = 0;
++
++ /* Enable command buffer dumping. */
++ command->enableDumping = gcvTRUE;
++
++ /* Set features. */
++ command->fe20 = Kernel->hardware->fe20;
++ command->vg20 = Kernel->hardware->vg20;
++ command->vg21 = Kernel->hardware->vg21;
++
++ /* Reset task table .*/
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ command->taskTable, gcmSIZEOF(command->taskTable)
++ ));
++
++ /* Query command buffer attributes. */
++ gcmkERR_BREAK(gckVGCOMMAND_InitializeInfo(command));
++
++ /* Create the control mutexes. */
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->queueMutex));
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->taskMutex));
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->commitMutex));
++
++ /* Create the power management semaphore. */
++ gcmkERR_BREAK(gckOS_CreateSemaphore(Kernel->os,
++ &command->powerSemaphore));
++
++ gcmkERR_BREAK(gckOS_CreateSignal(Kernel->os,
++ gcvFALSE, &command->powerStallSignal));
++
++ /***********************************************************************
++ ** Command queue initialization.
++ */
++
++ /* Allocate the command queue. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ QueueSize,
++ (gctPOINTER *) &command->queue
++ ));
++
++ /* Initialize the command queue. */
++ queue = command->queue;
++
++ queue->size = QueueSize - gcmSIZEOF(gcsKERNEL_QUEUE_HEADER);
++ queue->pending = 0;
++ queue->next = queue;
++
++ command->queueHead =
++ command->queueTail =
++ command->mergeQueue = command->queue;
++
++ command->queueOverflow = 0;
++
++
++ /***********************************************************************
++ ** Enable TS overflow interrupt.
++ */
++
++ command->info.tsOverflowInt = 0;
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->info.tsOverflowInt,
++ _EventHandler_TSOverflow
++ ));
++
++ /* Mask out the interrupt. */
++ Kernel->hardware->eventMask &= ~(1 << command->info.tsOverflowInt);
++
++
++ /***********************************************************************
++ ** Enable Bus Error interrupt.
++ */
++
++ /* Hardwired to bit 31. */
++ command->busErrorInt = 31;
++
++ /* Enable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->busErrorInt,
++ _EventHandler_BusError
++ ));
++
++
++ command->powerStallInt = 30;
++ /* Enable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->powerStallInt,
++ _EventHandler_PowerStall
++ ));
++
++ /***********************************************************************
++ ** Task management initialization.
++ */
++
++ command->taskStorage = gcvNULL;
++ command->taskStorageGranularity = TaskGranularity;
++ command->taskStorageUsable = TaskGranularity - gcmSIZEOF(gcsTASK_STORAGE);
++
++ command->taskFreeHead = gcvNULL;
++ command->taskFreeTail = gcvNULL;
++
++ /* Enable block handlers. */
++ for (i = 0; i < gcmCOUNTOF(_blockHandlers); i += 1)
++ {
++ /* Get the target hardware block. */
++ gceBLOCK block = _blockHandlers[i].block;
++
++ /* Get the interrupt array entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &command->taskTable[block];
++
++ /* Determine the interrupt value index. */
++ gctUINT index = entry->interruptCount;
++
++ /* Create the block semaphore. */
++ if (entry->interruptSemaphore == gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_CreateSemaphoreVG(
++ command->os, &entry->interruptSemaphore
++ ));
++ }
++
++ /* Enable auto-detection. */
++ entry->interruptArray[index] = -1;
++
++ /* Enable interrupt for the block. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &entry->interruptArray[index],
++ _blockHandlers[i].handler
++ ));
++
++ /* Update the number of registered interrupts. */
++ entry->interruptCount += 1;
++
++ /* Inrement the semaphore to allow the usage of the registered
++ interrupt. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ command->os, entry->interruptSemaphore
++ ));
++
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Get the FE interrupt. */
++ command->info.feBufferInt
++ = command->taskTable[gcvBLOCK_COMMAND].interruptArray[0];
++
++ /* Return gckVGCOMMAND object pointer. */
++ *Command = command;
++
++ gcmkFOOTER_ARG("*Command=0x%x",*Command);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (command != gcvNULL)
++ {
++ /* Disable block handlers. */
++ for (i = 0; i < gcvBLOCK_COUNT; i += 1)
++ {
++ /* Get the task table entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &command->taskTable[i];
++
++ /* Destroy the semaphore. */
++ if (entry->interruptSemaphore != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DestroySemaphore(
++ command->os, entry->interruptSemaphore
++ ));
++ }
++
++ /* Disable all enabled interrupts. */
++ for (j = 0; j < entry->interruptCount; j += 1)
++ {
++ /* Must be a valid value. */
++ gcmkASSERT(entry->interruptArray[j] >= 0);
++ gcmkASSERT(entry->interruptArray[j] <= 31);
++
++ /* Disable the interrupt. */
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ entry->interruptArray[j]
++ ));
++ }
++ }
++
++ /* Disable the bus error interrupt. */
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ command->busErrorInt
++ ));
++
++ /* Disable TS overflow interrupt. */
++ if (command->info.tsOverflowInt != -1)
++ {
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ command->info.tsOverflowInt
++ ));
++ }
++
++ /* Delete the commit mutex. */
++ if (command->commitMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->commitMutex
++ ));
++ }
++
++ /* Delete the command queue mutex. */
++ if (command->taskMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->taskMutex
++ ));
++ }
++
++ /* Delete the command queue mutex. */
++ if (command->queueMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->queueMutex
++ ));
++ }
++
++ /* Delete the command queue. */
++ if (command->queue != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_Free(
++ Kernel->os, command->queue
++ ));
++ }
++
++ if (command->powerSemaphore != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(
++ Kernel->os, command->powerSemaphore));
++ }
++
++ if (command->powerStallSignal != gcvNULL)
++ {
++ /* Create the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Kernel->os,
++ command->powerStallSignal));
++ }
++
++ /* Free the gckVGCOMMAND structure. */
++ gcmkCHECK_STATUS(gckOS_Free(
++ Kernel->os, command
++ ));
++ }
++
++ gcmkFOOTER();
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Destroy(
++ OUT gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ do
++ {
++ gctUINT i;
++ gcsTASK_STORAGE_PTR nextStorage;
++
++ if (Command->queueHead != gcvNULL)
++ {
++ /* Wait until the head becomes idle. */
++ gcmkERR_BREAK(_WaitForIdle(Command, Command->queueHead));
++ }
++
++ /* Disable block handlers. */
++ for (i = 0; i < gcvBLOCK_COUNT; i += 1)
++ {
++ /* Get the interrupt array entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &Command->taskTable[i];
++
++ /* Determine the index of the last interrupt in the array. */
++ gctINT index = entry->interruptCount - 1;
++
++ /* Destroy the semaphore. */
++ if (entry->interruptSemaphore != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Command->os, entry->interruptSemaphore
++ ));
++ }
++
++ /* Disable all enabled interrupts. */
++ while (index >= 0)
++ {
++ /* Must be a valid value. */
++ gcmkASSERT(entry->interruptArray[index] >= 0);
++ gcmkASSERT(entry->interruptArray[index] <= 31);
++
++ /* Disable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ entry->interruptArray[index]
++ ));
++
++ /* Update to the next interrupt. */
++ index -= 1;
++ entry->interruptCount -= 1;
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Disable the bus error interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ Command->busErrorInt
++ ));
++
++ /* Disable TS overflow interrupt. */
++ if (Command->info.tsOverflowInt != -1)
++ {
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ Command->info.tsOverflowInt
++ ));
++
++ Command->info.tsOverflowInt = -1;
++ }
++
++ /* Delete the commit mutex. */
++ if (Command->commitMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->commitMutex
++ ));
++
++ Command->commitMutex = gcvNULL;
++ }
++
++ /* Delete the command queue mutex. */
++ if (Command->taskMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->taskMutex
++ ));
++
++ Command->taskMutex = gcvNULL;
++ }
++
++ /* Delete the command queue mutex. */
++ if (Command->queueMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->queueMutex
++ ));
++
++ Command->queueMutex = gcvNULL;
++ }
++
++ if (Command->powerSemaphore != gcvNULL)
++ {
++ /* Destroy the power management semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Command->os, Command->powerSemaphore));
++ }
++
++ if (Command->powerStallSignal != gcvNULL)
++ {
++ /* Create the power management semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySignal(
++ Command->os,
++ Command->powerStallSignal));
++ }
++
++ if (Command->queue != gcvNULL)
++ {
++ /* Delete the command queue. */
++ gcmkERR_BREAK(gckOS_Free(
++ Command->os, Command->queue
++ ));
++ }
++
++ /* Destroy all allocated buffers. */
++ while (Command->taskStorage)
++ {
++ /* Copy the buffer pointer. */
++ nextStorage = Command->taskStorage->next;
++
++ /* Free the current container. */
++ gcmkERR_BREAK(gckOS_Free(
++ Command->os, Command->taskStorage
++ ));
++
++ /* Advance to the next one. */
++ Command->taskStorage = nextStorage;
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Mark the object as unknown. */
++ Command->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGCOMMAND structure. */
++ gcmkERR_BREAK(gckOS_Free(Command->os, Command));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Restore the object type if failed. */
++ Command->object.type = gcvOBJ_COMMAND;
++
++ gcmkFOOTER();
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_QueryCommandBuffer(
++ IN gckVGCOMMAND Command,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Information=0x%x", Command, Information);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Information != gcvNULL);
++
++ /* Copy the information. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ Information, &Command->info, sizeof(gcsCOMMAND_BUFFER_INFO)
++ ));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGCOMMAND_Allocate(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer,
++ OUT gctPOINTER * Data
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x Size=0x%x CommandBuffer=0x%x Data=0x%x",
++ Command, Size, CommandBuffer, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ do
++ {
++ /* Allocate the buffer. */
++ gcmkERR_BREAK(_AllocateCommandBuffer(Command, Size, CommandBuffer));
++
++ /* Determine the data pointer. */
++ * Data = (gctUINT8_PTR) (*CommandBuffer) + (* CommandBuffer)->bufferOffset;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Free(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x CommandBuffer=0x%x",
++ Command, CommandBuffer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(CommandBuffer != gcvNULL);
++
++ /* Free command buffer. */
++ status = _FreeCommandBuffer(Command->kernel, CommandBuffer);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Execute(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x CommandBuffer=0x%x",
++ Command, CommandBuffer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(CommandBuffer != gcvNULL);
++
++ do
++ {
++ gctUINT queueLength;
++ gcsKERNEL_CMDQUEUE_PTR kernelEntry;
++
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_LockCurrentQueue(
++ Command, &kernelEntry, &queueLength
++ ));
++
++ /* Set the buffer. */
++ kernelEntry->commandBuffer = CommandBuffer;
++ kernelEntry->handler = _FreeKernelCommandBuffer;
++
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_UnlockCurrentQueue(
++ Command, 1
++ ));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Commit(
++ IN gckVGCOMMAND Command,
++ IN gcsVGCONTEXT_PTR Context,
++ IN gcsVGCMDQUEUE_PTR Queue,
++ IN gctUINT EntryCount,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable
++ )
++{
++ /*
++ The first buffer is executed through a direct gckVGHARDWARE_Execute call,
++ therefore only an update is needed after the execution is over. All
++ consequent buffers need to be executed upon the first update call from
++ the FE interrupt handler.
++ */
++
++ static gcsQUEUE_UPDATE_CONTROL _dynamicBuffer[] =
++ {
++ {
++ _UpdateDynamicCommandBuffer,
++ _UpdateDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer
++ },
++ {
++ _ExecuteDynamicCommandBuffer,
++ _UpdateDynamicCommandBuffer,
++ _ExecuteLastDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer
++ }
++ };
++
++ static gcsQUEUE_UPDATE_CONTROL _staticBuffer[] =
++ {
++ {
++ _UpdateStaticCommandBuffer,
++ _UpdateStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer
++ },
++ {
++ _ExecuteStaticCommandBuffer,
++ _UpdateStaticCommandBuffer,
++ _ExecuteLastStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer
++ }
++ };
++
++ gceSTATUS status, last;
++
++#ifdef __QNXNTO__
++ gcsVGCONTEXT_PTR userContext = gcvNULL;
++ gctBOOL userContextMapped = gcvFALSE;
++ gcsTASK_MASTER_TABLE_PTR userTaskTable = gcvNULL;
++ gctBOOL userTaskTableMapped = gcvFALSE;
++ gctPOINTER pointer = gcvNULL;
++#endif
++
++ gcmkHEADER_ARG("Command=0x%x Context=0x%x Queue=0x%x EntryCount=0x%x TaskTable=0x%x",
++ Command, Context, Queue, EntryCount, TaskTable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Context != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++ gcmkVERIFY_ARGUMENT(EntryCount > 1);
++
++ do
++ {
++ gctBOOL haveFETasks;
++ gctUINT queueSize;
++ gcsVGCMDQUEUE_PTR mappedQueue;
++ gcsVGCMDQUEUE_PTR userEntry;
++ gcsKERNEL_CMDQUEUE_PTR kernelEntry;
++ gcsQUEUE_UPDATE_CONTROL_PTR queueControl;
++ gctUINT currentLength;
++ gctUINT queueLength;
++ gctUINT entriesQueued;
++ gctUINT8_PTR previousEnd;
++ gctBOOL previousDynamic;
++ gctBOOL previousExecuted;
++ gctUINT controlIndex;
++
++#ifdef __QNXNTO__
++ /* Map the context into the kernel space. */
++ userContext = Context;
++
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ userContext,
++ gcmSIZEOF(*userContext),
++ &pointer));
++
++ Context = pointer;
++
++ userContextMapped = gcvTRUE;
++
++ /* Map the taskTable into the kernel space. */
++ userTaskTable = TaskTable;
++
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ userTaskTable,
++ gcmSIZEOF(*userTaskTable),
++ &pointer));
++
++ TaskTable = pointer;
++
++ userTaskTableMapped = gcvTRUE;
++
++ /* Update the signal info. */
++ TaskTable->coid = Context->coid;
++ TaskTable->rcvid = Context->rcvid;
++#endif
++
++ gcmkERR_BREAK(gckVGHARDWARE_SetPowerManagementState(
++ Command->hardware, gcvPOWER_ON_AUTO
++ ));
++
++ /* Acquire the power semaphore. */
++ gcmkERR_BREAK(gckOS_AcquireSemaphore(
++ Command->os, Command->powerSemaphore
++ ));
++
++ /* Acquire the mutex. */
++ status = gckOS_AcquireMutex(
++ Command->os,
++ Command->commitMutex,
++ gcvINFINITE
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore));
++ break;
++ }
++
++ do
++ {
++ gcmkERR_BREAK(_FlushMMU(Command));
++
++ /* Assign a context ID if not yet assigned. */
++ if (Context->id == 0)
++ {
++ /* Assign the next context number. */
++ Context->id = ++ Command->contextCounter;
++
++ /* See if we overflowed. */
++ if (Command->contextCounter == 0)
++ {
++ /* We actually did overflow, wow... */
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ break;
++ }
++ }
++
++ /* The first entry in the queue is always the context buffer.
++ Verify whether the user context is the same as the current
++ context and if that's the case, skip the first entry. */
++ if (Context->id == Command->currentContext)
++ {
++ /* Same context as before, skip the first entry. */
++ EntryCount -= 1;
++ Queue += 1;
++
++ /* Set the signal to avoid user waiting. */
++#ifdef __QNXNTO__
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, Context->signal, Context->rcvid, Context->coid
++ ));
++#else
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, Context->signal, Context->process
++ ));
++
++#endif /* __QNXNTO__ */
++
++ }
++ else
++ {
++ /* Different user context - keep the first entry.
++ Set the user context as the current one. */
++ Command->currentContext = Context->id;
++ }
++
++ /* Reset pointers. */
++ queueControl = gcvNULL;
++ previousEnd = gcvNULL;
++
++ /* Determine whether there are FE tasks to be performed. */
++ haveFETasks = (TaskTable->table[gcvBLOCK_COMMAND].head != gcvNULL);
++
++ /* Determine the size of the queue. */
++ queueSize = EntryCount * gcmSIZEOF(gcsVGCMDQUEUE);
++
++ /* Map the command queue into the kernel space. */
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ Queue,
++ queueSize,
++ (gctPOINTER *) &mappedQueue
++ ));
++
++ /* Set the first entry. */
++ userEntry = mappedQueue;
++
++ /* Process the command queue. */
++ while (EntryCount)
++ {
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_LockCurrentQueue(
++ Command, &kernelEntry, &queueLength
++ ));
++
++ /* Determine the number of entries to process. */
++ currentLength = (queueLength < EntryCount)
++ ? queueLength
++ : EntryCount;
++
++ /* Update the number of the entries left to process. */
++ EntryCount -= currentLength;
++
++ /* Reset previous flags. */
++ previousDynamic = gcvFALSE;
++ previousExecuted = gcvFALSE;
++
++ /* Set the initial control index. */
++ controlIndex = 0;
++
++ /* Process entries. */
++ for (entriesQueued = 0; entriesQueued < currentLength; entriesQueued += 1)
++ {
++ /* Get the kernel pointer to the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = gcvNULL;
++ gcmkERR_BREAK(_ConvertUserCommandBufferPointer(
++ Command,
++ userEntry->commandBuffer,
++ &commandBuffer
++ ));
++
++ /* Is it a dynamic command buffer? */
++ if (userEntry->dynamic)
++ {
++ /* Select dynamic buffer control functions. */
++ queueControl = &_dynamicBuffer[controlIndex];
++ }
++
++ /* No, a static command buffer. */
++ else
++ {
++ /* Select static buffer control functions. */
++ queueControl = &_staticBuffer[controlIndex];
++ }
++
++ /* Set the command buffer pointer to the entry. */
++ kernelEntry->commandBuffer = commandBuffer;
++
++ /* If the previous entry was a dynamic command buffer,
++ link it to the current. */
++ if (previousDynamic)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_FetchCommand(
++ Command,
++ previousEnd,
++ commandBuffer->address,
++ commandBuffer->dataCount,
++ gcvNULL
++ ));
++
++ /* The buffer will be auto-executed, only need to
++ update it after it has been executed. */
++ kernelEntry->handler = queueControl->update;
++
++ /* The buffer is only being updated. */
++ previousExecuted = gcvFALSE;
++ }
++ else
++ {
++ /* Set the buffer up for execution. */
++ kernelEntry->handler = queueControl->execute;
++
++ /* The buffer is being updated. */
++ previousExecuted = gcvTRUE;
++ }
++
++ /* The current buffer's END command becomes the last END. */
++ previousEnd
++ = ((gctUINT8_PTR) commandBuffer)
++ + commandBuffer->bufferOffset
++ + commandBuffer->dataCount * Command->info.commandAlignment
++ - Command->info.staticTailSize;
++
++ /* Update the last entry info. */
++ previousDynamic = userEntry->dynamic;
++
++ /* Advance entries. */
++ userEntry += 1;
++ kernelEntry += 1;
++
++ /* Update the control index. */
++ controlIndex = 1;
++ }
++
++ /* If the previous entry was a dynamic command buffer,
++ terminate it with an END. */
++ if (previousDynamic)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command,
++ previousEnd,
++ Command->info.feBufferInt,
++ gcvNULL
++ ));
++ }
++
++ /* Last buffer? */
++ if (EntryCount == 0)
++ {
++ /* Modify the last command buffer's routines to handle
++ tasks if any.*/
++ if (haveFETasks)
++ {
++ if (previousExecuted)
++ {
++ kernelEntry[-1].handler = queueControl->lastExecute;
++ }
++ else
++ {
++ kernelEntry[-1].handler = queueControl->lastUpdate;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->queueMutex
++ ));
++ /* Schedule tasks. */
++ gcmkERR_BREAK(_ScheduleTasks(Command, TaskTable, previousEnd));
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->queueMutex,
++ gcvINFINITE
++ ));
++ }
++
++ /* Unkock and schedule the current queue for execution. */
++ gcmkERR_BREAK(_UnlockCurrentQueue(
++ Command, currentLength
++ ));
++ }
++
++
++ /* Unmap the user command buffer. */
++ gcmkERR_BREAK(gckOS_UnmapUserPointer(
++ Command->os,
++ Queue,
++ queueSize,
++ mappedQueue
++ ));
++ }
++ while (gcvFALSE);
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ Command->os,
++ Command->commitMutex
++ ));
++
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore));
++ }
++ while (gcvFALSE);
++
++#ifdef __QNXNTO__
++ if (userContextMapped)
++ {
++ /* Unmap the user context. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ userContext,
++ gcmSIZEOF(*userContext),
++ Context));
++ }
++
++ if (userTaskTableMapped)
++ {
++ /* Unmap the user taskTable. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ userTaskTable,
++ gcmSIZEOF(*userTaskTable),
++ TaskTable));
++ }
++#endif
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_db.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_db.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_db.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_db.c 2015-05-01 14:57:59.571427001 -0500
+@@ -0,0 +1,1861 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_DATABASE
++
++/*******************************************************************************
++***** Private fuctions ********************************************************/
++
++#define _GetSlot(database, x) \
++ (gctUINT32)(gcmPTR_TO_UINT64(x) % gcmCOUNTOF(database->list))
++
++/*******************************************************************************
++** gckKERNEL_NewDatabase
++**
++** Create a new database structure and insert it to the head of the hash list.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID that identifies the database.
++**
++** OUTPUT:
++**
++** gcsDATABASE_PTR * Database
++** Pointer to a variable receiving the database structure pointer on
++** success.
++*/
++static gceSTATUS
++gckKERNEL_NewDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcsDATABASE_PTR * Database
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctBOOL acquired = gcvFALSE;
++ gctSIZE_T slot;
++ gcsDATABASE_PTR existingDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Compute the hash for the database. */
++ slot = ProcessID % gcmCOUNTOF(Kernel->db->db);
++
++ /* Walk the hash list. */
++ for (existingDatabase = Kernel->db->db[slot];
++ existingDatabase != gcvNULL;
++ existingDatabase = existingDatabase->next)
++ {
++ if (existingDatabase->processID == ProcessID)
++ {
++ /* One process can't be added twice. */
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++ }
++
++ if (Kernel->db->freeDatabase != gcvNULL)
++ {
++ /* Allocate a database from the free list. */
++ database = Kernel->db->freeDatabase;
++ Kernel->db->freeDatabase = database->next;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Allocate a new database from the heap. */
++ gcmkONERROR(gckOS_Allocate(Kernel->os,
++ gcmSIZEOF(gcsDATABASE),
++ &pointer));
++
++ gckOS_ZeroMemory(pointer, gcmSIZEOF(gcsDATABASE));
++
++ database = pointer;
++
++ gcmkONERROR(gckOS_CreateMutex(Kernel->os, &database->counterMutex));
++ }
++
++ /* Insert the database into the hash. */
++ database->next = Kernel->db->db[slot];
++ Kernel->db->db[slot] = database;
++
++ /* Save the hash slot. */
++ database->slot = slot;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the database. */
++ *Database = database;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Database=0x%x", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindDatabase
++**
++** Find a database identified by a process ID and move it to the head of the
++** hash list.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID that identifies the database.
++**
++** gctBOOL LastProcessID
++** gcvTRUE if searching for the last known process ID. gcvFALSE if
++** we need to search for the process ID specified by the ProcessID
++** argument.
++**
++** OUTPUT:
++**
++** gcsDATABASE_PTR * Database
++** Pointer to a variable receiving the database structure pointer on
++** success.
++*/
++gceSTATUS
++gckKERNEL_FindDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ OUT gcsDATABASE_PTR * Database
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database, previous;
++ gctSIZE_T slot;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d LastProcessID=%d",
++ Kernel, ProcessID, LastProcessID);
++
++ /* Compute the hash for the database. */
++ slot = ProcessID % gcmCOUNTOF(Kernel->db->db);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Check whether we are getting the last known database. */
++ if (LastProcessID)
++ {
++ /* Use last database. */
++ database = Kernel->db->lastDatabase;
++
++ if (database == gcvNULL)
++ {
++ /* Database not found. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++ }
++ else
++ {
++ /* Walk the hash list. */
++ for (previous = gcvNULL, database = Kernel->db->db[slot];
++ database != gcvNULL;
++ database = database->next)
++ {
++ if (database->processID == ProcessID)
++ {
++ /* Found it! */
++ break;
++ }
++
++ previous = database;
++ }
++
++ if (database == gcvNULL)
++ {
++ /* Database not found. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (previous != gcvNULL)
++ {
++ /* Move database to the head of the hash list. */
++ previous->next = database->next;
++ database->next = Kernel->db->db[slot];
++ Kernel->db->db[slot] = database;
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the database. */
++ *Database = database;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Database=0x%x", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DeleteDatabase
++**
++** Remove a database from the hash list and delete its structure.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to the database structure to remove.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++static gceSTATUS
++gckKERNEL_DeleteDatabase(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Check slot value. */
++ gcmkVERIFY_ARGUMENT(Database->slot < gcmCOUNTOF(Kernel->db->db));
++
++ if (Database->slot < gcmCOUNTOF(Kernel->db->db))
++ {
++ /* Check if database if the head of the hash list. */
++ if (Kernel->db->db[Database->slot] == Database)
++ {
++ /* Remove the database from the hash list. */
++ Kernel->db->db[Database->slot] = Database->next;
++ }
++ else
++ {
++ /* Walk the has list to find the database. */
++ for (database = Kernel->db->db[Database->slot];
++ database != gcvNULL;
++ database = database->next
++ )
++ {
++ /* Check if the next list entry is this database. */
++ if (database->next == Database)
++ {
++ /* Remove the database from the hash list. */
++ database->next = Database->next;
++ break;
++ }
++ }
++
++ if (database == gcvNULL)
++ {
++ /* Ouch! Something got corrupted. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++ }
++ }
++
++ if (Kernel->db->lastDatabase != gcvNULL)
++ {
++ /* Insert database to the free list. */
++ Kernel->db->lastDatabase->next = Kernel->db->freeDatabase;
++ Kernel->db->freeDatabase = Kernel->db->lastDatabase;
++ }
++
++ /* Keep database as the last database. */
++ Kernel->db->lastDatabase = Database;
++
++ /* Destory handle db. */
++ gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Database->handleDatabase));
++ Database->handleDatabase = gcvNULL;
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Database->handleDatabaseMutex));
++ Database->handleDatabaseMutex = gcvNULL;
++
++#if gcdPROCESS_ADDRESS_SPACE
++ /* Destory process MMU. */
++ gcmkVERIFY_OK(gckEVENT_DestroyMmu(Kernel->eventObj, Database->mmu, gcvKERNEL_PIXEL));
++ Database->mmu = gcvNULL;
++#endif
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_NewRecord
++**
++** Create a new database record structure and insert it to the head of the
++** database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** OUTPUT:
++**
++** gcsDATABASE_RECORD_PTR * Record
++** Pointer to a variable receiving the database record structure
++** pointer on success.
++*/
++static gceSTATUS
++gckKERNEL_NewRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gctUINT32 Slot,
++ OUT gcsDATABASE_RECORD_PTR * Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_RECORD_PTR record = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (Kernel->db->freeRecord != gcvNULL)
++ {
++ /* Allocate the record from the free list. */
++ record = Kernel->db->freeRecord;
++ Kernel->db->freeRecord = record->next;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Allocate the record from the heap. */
++ gcmkONERROR(gckOS_Allocate(Kernel->os,
++ gcmSIZEOF(gcsDATABASE_RECORD),
++ &pointer));
++
++ record = pointer;
++ }
++
++ /* Insert the record in the database. */
++ record->next = Database->list[Slot];
++ Database->list[Slot] = record;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the record. */
++ *Record = record;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Record=0x%x", *Record);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++ if (record != gcvNULL)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DeleteRecord
++**
++** Remove a database record from the database and delete its structure.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to remove.
++**
++** gctPOINTER Data
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gctSIZE_T_PTR Bytes
++** Pointer to a variable that receives the size of the record deleted.
++** Can be gcvNULL if the size is not required.
++*/
++static gceSTATUS
++gckKERNEL_DeleteRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Data,
++ OUT gctSIZE_T_PTR Bytes OPTIONAL
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_RECORD_PTR record, previous;
++ gctUINT32 slot = _GetSlot(Database, Data);
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x",
++ Kernel, Database, Type, Data);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Scan the database for this record. */
++ for (record = Database->list[slot], previous = gcvNULL;
++ record != gcvNULL;
++ record = record->next
++ )
++ {
++ if ((record->type == Type)
++ && (record->data == Data)
++ )
++ {
++ /* Found it! */
++ break;
++ }
++
++ previous = record;
++ }
++
++ if (record == gcvNULL)
++ {
++ /* Ouch! This record is not found? */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return size of record. */
++ *Bytes = record->bytes;
++ }
++
++ /* Remove record from database. */
++ if (previous == gcvNULL)
++ {
++ Database->list[slot] = record->next;
++ }
++ else
++ {
++ previous->next = record->next;
++ }
++
++ /* Insert record in free list. */
++ record->next = Kernel->db->freeRecord;
++ Kernel->db->freeRecord = record;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindRecord
++**
++** Find a database record from the database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to remove.
++**
++** gctPOINTER Data
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gctSIZE_T_PTR Bytes
++** Pointer to a variable that receives the size of the record deleted.
++** Can be gcvNULL if the size is not required.
++*/
++static gceSTATUS
++gckKERNEL_FindRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Data,
++ OUT gcsDATABASE_RECORD_PTR Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsDATABASE_RECORD_PTR record;
++ gctUINT32 slot = _GetSlot(Database, Data);
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x",
++ Kernel, Database, Type, Data);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Scan the database for this record. */
++ for (record = Database->list[slot];
++ record != gcvNULL;
++ record = record->next
++ )
++ {
++ if ((record->type == Type)
++ && (record->data == Data)
++ )
++ {
++ /* Found it! */
++ break;
++ }
++ }
++
++ if (record == gcvNULL)
++ {
++ /* Ouch! This record is not found? */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (Record != gcvNULL)
++ {
++ /* Return information of record. */
++ gcmkONERROR(
++ gckOS_MemCopy(Record, record, sizeof(gcsDATABASE_RECORD)));
++ }
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("Record=0x%x", Record);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++***** Public API **************************************************************/
++
++/*******************************************************************************
++** gckKERNEL_CreateProcessDB
++**
++** Create a new process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_CreateProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database = gcvNULL;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Create a new database. */
++ gcmkONERROR(gckKERNEL_NewDatabase(Kernel, ProcessID, &database));
++
++ /* Initialize the database. */
++ database->processID = ProcessID;
++ database->vidMem.bytes = 0;
++ database->vidMem.maxBytes = 0;
++ database->vidMem.totalBytes = 0;
++ database->nonPaged.bytes = 0;
++ database->nonPaged.maxBytes = 0;
++ database->nonPaged.totalBytes = 0;
++ database->contiguous.bytes = 0;
++ database->contiguous.maxBytes = 0;
++ database->contiguous.totalBytes = 0;
++ database->mapMemory.bytes = 0;
++ database->mapMemory.maxBytes = 0;
++ database->mapMemory.totalBytes = 0;
++ database->mapUserMemory.bytes = 0;
++ database->mapUserMemory.maxBytes = 0;
++ database->mapUserMemory.totalBytes = 0;
++ database->virtualCommandBuffer.bytes = 0;
++ database->virtualCommandBuffer.maxBytes = 0;
++ database->virtualCommandBuffer.totalBytes = 0;
++
++ for (i = 0; i < gcmCOUNTOF(database->list); i++)
++ {
++ database->list[i] = gcvNULL;
++ }
++
++ for (i = 0; i < gcvSURF_NUM_TYPES; i++)
++ {
++ database->vidMemType[i].bytes = 0;
++ database->vidMemType[i].maxBytes = 0;
++ database->vidMemType[i].totalBytes = 0;
++ }
++
++ for (i = 0; i < gcvPOOL_NUMBER_OF_POOLS; i++)
++ {
++ database->vidMemPool[i].bytes = 0;
++ database->vidMemPool[i].maxBytes = 0;
++ database->vidMemPool[i].totalBytes = 0;
++ }
++
++ gcmkASSERT(database->handleDatabase == gcvNULL);
++ gcmkONERROR(
++ gckKERNEL_CreateIntegerDatabase(Kernel, &database->handleDatabase));
++
++ gcmkASSERT(database->handleDatabaseMutex == gcvNULL);
++ gcmkONERROR(
++ gckOS_CreateMutex(Kernel->os, &database->handleDatabaseMutex));
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkASSERT(database->mmu == gcvNULL);
++ gcmkONERROR(
++ gckMMU_Construct(Kernel, gcdMMU_SIZE, &database->mmu));
++#endif
++
++#if gcdSECURE_USER
++ {
++ gctINT slot;
++ gcskSECURE_CACHE * cache = &database->cache;
++
++ /* Setup the linked list of cache nodes. */
++ for (slot = 1; slot <= gcdSECURE_CACHE_SLOTS; ++slot)
++ {
++ cache->cache[slot].logical = gcvNULL;
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ cache->cache[slot].prev = &cache->cache[slot - 1];
++ cache->cache[slot].next = &cache->cache[slot + 1];
++# endif
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ cache->cache[slot].nextHash = gcvNULL;
++ cache->cache[slot].prevHash = gcvNULL;
++# endif
++ }
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ /* Setup the head and tail of the cache. */
++ cache->cache[0].next = &cache->cache[1];
++ cache->cache[0].prev = &cache->cache[gcdSECURE_CACHE_SLOTS];
++ cache->cache[0].logical = gcvNULL;
++
++ /* Fix up the head and tail pointers. */
++ cache->cache[0].next->prev = &cache->cache[0];
++ cache->cache[0].prev->next = &cache->cache[0];
++# endif
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Zero out the hash table. */
++ for (slot = 0; slot < gcmCOUNTOF(cache->hash); ++slot)
++ {
++ cache->hash[slot].logical = gcvNULL;
++ cache->hash[slot].nextHash = gcvNULL;
++ }
++# endif
++
++ /* Initialize cache index. */
++ cache->cacheIndex = gcvNULL;
++ cache->cacheFree = 1;
++ cache->cacheStamp = 0;
++ }
++#endif
++
++ /* Reset idle timer. */
++ Kernel->db->lastIdle = 0;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AddProcessDB
++**
++** Add a record to a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to add.
++**
++** gctPOINTER Pointer
++** Data of the record to add.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the record to add.
++**
++** gctSIZE_T Size
++** Size of the record to add.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AddProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcsDATABASE_RECORD_PTR record = gcvNULL;
++ gcsDATABASE_COUNTERS * count;
++ gctUINT32 vidMemType;
++ gcePOOL vidMemPool;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x "
++ "Physical=0x%x Size=%lu",
++ Kernel, ProcessID, Type, Pointer, Physical, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Decode type. */
++ vidMemType = (Type & gcdDB_VIDEO_MEMORY_TYPE_MASK) >> gcdDB_VIDEO_MEMORY_TYPE_SHIFT;
++ vidMemPool = (Type & gcdDB_VIDEO_MEMORY_POOL_MASK) >> gcdDB_VIDEO_MEMORY_POOL_SHIFT;
++
++ Type &= gcdDATABASE_TYPE_MASK;
++
++ /* Special case the idle record. */
++ if (Type == gcvDB_IDLE)
++ {
++ gctUINT64 time;
++
++ /* Get the current profile time. */
++ gcmkONERROR(gckOS_GetProfileTick(&time));
++
++ if ((ProcessID == 0) && (Kernel->db->lastIdle != 0))
++ {
++ /* Out of idle, adjust time it was idle. */
++ Kernel->db->idleTime += time - Kernel->db->lastIdle;
++ Kernel->db->lastIdle = 0;
++ }
++ else if (ProcessID == 1)
++ {
++ /* Save current idle time. */
++ Kernel->db->lastIdle = time;
++ }
++
++#if gcdDYNAMIC_SPEED
++ {
++ /* Test for first call. */
++ if (Kernel->db->lastSlowdown == 0)
++ {
++ /* Save milliseconds. */
++ Kernel->db->lastSlowdown = time;
++ Kernel->db->lastSlowdownIdle = Kernel->db->idleTime;
++ }
++ else
++ {
++ /* Compute ellapsed time in milliseconds. */
++ gctUINT delta = gckOS_ProfileToMS(time - Kernel->db->lastSlowdown);
++
++ /* Test for end of period. */
++ if (delta >= gcdDYNAMIC_SPEED)
++ {
++ /* Compute number of idle milliseconds. */
++ gctUINT idle = gckOS_ProfileToMS(
++ Kernel->db->idleTime - Kernel->db->lastSlowdownIdle);
++
++ /* Broadcast to slow down the GPU. */
++ gcmkONERROR(gckOS_BroadcastCalibrateSpeed(Kernel->os,
++ Kernel->hardware,
++ idle,
++ delta));
++
++ /* Save current time. */
++ Kernel->db->lastSlowdown = time;
++ Kernel->db->lastSlowdownIdle = Kernel->db->idleTime;
++ }
++ }
++ }
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Create a new record in the database. */
++ gcmkONERROR(gckKERNEL_NewRecord(Kernel, database, _GetSlot(database, Pointer), &record));
++
++ /* Initialize the record. */
++ record->kernel = Kernel;
++ record->type = Type;
++ record->data = Pointer;
++ record->physical = Physical;
++ record->bytes = Size;
++
++ /* Get pointer to counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ count = &database->vidMem;
++ break;
++
++ case gcvDB_NON_PAGED:
++ count = &database->nonPaged;
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ count = &database->contiguous;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ count = &database->mapMemory;
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ count = &database->mapUserMemory;
++ break;
++
++ case gcvDB_COMMAND_BUFFER:
++ count = &database->virtualCommandBuffer;
++ break;
++
++ default:
++ count = gcvNULL;
++ break;
++ }
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, database->counterMutex, gcvINFINITE));
++
++ if (count != gcvNULL)
++ {
++ /* Adjust counters. */
++ count->totalBytes += Size;
++ count->bytes += Size;
++
++ if (count->bytes > count->maxBytes)
++ {
++ count->maxBytes = count->bytes;
++ }
++ }
++
++ if (Type == gcvDB_VIDEO_MEMORY)
++ {
++ count = &database->vidMemType[vidMemType];
++
++ /* Adjust counters. */
++ count->totalBytes += Size;
++ count->bytes += Size;
++
++ if (count->bytes > count->maxBytes)
++ {
++ count->maxBytes = count->bytes;
++ }
++
++ count = &database->vidMemPool[vidMemPool];
++
++ /* Adjust counters. */
++ count->totalBytes += Size;
++ count->bytes += Size;
++
++ if (count->bytes > count->maxBytes)
++ {
++ count->maxBytes = count->bytes;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, database->counterMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_RemoveProcessDB
++**
++** Remove a record from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to remove.
++**
++** gctPOINTER Pointer
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_RemoveProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctSIZE_T bytes = 0;
++ gctUINT32 vidMemType;
++ gcePOOL vidMempool;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x",
++ Kernel, ProcessID, Type, Pointer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Decode type. */
++ vidMemType = (Type & gcdDB_VIDEO_MEMORY_TYPE_MASK) >> gcdDB_VIDEO_MEMORY_TYPE_SHIFT;
++ vidMempool = (Type & gcdDB_VIDEO_MEMORY_POOL_MASK) >> gcdDB_VIDEO_MEMORY_POOL_SHIFT;
++
++ Type &= gcdDATABASE_TYPE_MASK;
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Delete the record. */
++ gcmkONERROR(
++ gckKERNEL_DeleteRecord(Kernel, database, Type, Pointer, &bytes));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, database->counterMutex, gcvINFINITE));
++
++ /* Update counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ database->vidMem.bytes -= bytes;
++ database->vidMemType[vidMemType].bytes -= bytes;
++ database->vidMemPool[vidMempool].bytes -= bytes;
++ break;
++
++ case gcvDB_NON_PAGED:
++ database->nonPaged.bytes -= bytes;
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ database->contiguous.bytes -= bytes;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ database->mapMemory.bytes -= bytes;
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ database->mapUserMemory.bytes -= bytes;
++ break;
++
++ case gcvDB_COMMAND_BUFFER:
++ database->virtualCommandBuffer.bytes -= bytes;
++ break;
++
++ default:
++ break;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, database->counterMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindProcessDB
++**
++** Find a record from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to remove.
++**
++** gctPOINTER Pointer
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gcsDATABASE_RECORD_PTR Record
++** Copy of record.
++*/
++gceSTATUS
++gckKERNEL_FindProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 ThreadID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ OUT gcsDATABASE_RECORD_PTR Record
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x",
++ Kernel, ProcessID, ThreadID, Type, Pointer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Find the record. */
++ gcmkONERROR(
++ gckKERNEL_FindRecord(Kernel, database, Type, Pointer, Record));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DestroyProcessDB
++**
++** Destroy a process database. If the database contains any records, the data
++** inside those records will be deleted as well. This aids in the cleanup if
++** a process has died unexpectedly or has memory leaks.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_DestroyProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcsDATABASE_RECORD_PTR record, next;
++ gctBOOL asynchronous = gcvTRUE;
++ gckVIDMEM_NODE nodeObject;
++ gctPHYS_ADDR physical;
++ gckKERNEL kernel = Kernel;
++ gctUINT32 handle;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): VidMem: total=%lu max=%lu",
++ ProcessID, database->vidMem.totalBytes,
++ database->vidMem.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): NonPaged: total=%lu max=%lu",
++ ProcessID, database->nonPaged.totalBytes,
++ database->nonPaged.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Contiguous: total=%lu max=%lu",
++ ProcessID, database->contiguous.totalBytes,
++ database->contiguous.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Idle time=%llu",
++ ProcessID, Kernel->db->idleTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Map: total=%lu max=%lu",
++ ProcessID, database->mapMemory.totalBytes,
++ database->mapMemory.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Map: total=%lu max=%lu",
++ ProcessID, database->mapUserMemory.totalBytes,
++ database->mapUserMemory.maxBytes);
++
++ if (database->list != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "Process %d has entries in its database:",
++ ProcessID);
++ }
++
++ for(i = 0; i < gcmCOUNTOF(database->list); i++)
++ {
++
++ /* Walk all records. */
++ for (record = database->list[i]; record != gcvNULL; record = next)
++ {
++ /* Next next record. */
++ next = record->next;
++
++ /* Dispatch on record type. */
++ switch (record->type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ gcmkERR_BREAK(gckVIDMEM_HANDLE_Lookup(record->kernel,
++ ProcessID,
++ gcmPTR2INT32(record->data),
++ &nodeObject));
++
++ /* Free the video memory. */
++ gcmkVERIFY_OK(gckVIDMEM_HANDLE_Dereference(record->kernel,
++ ProcessID,
++ gcmPTR2INT32(record->data)));
++
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(record->kernel,
++ nodeObject));
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: VIDEO_MEMORY 0x%x (status=%d)",
++ record->data, status);
++ break;
++
++ case gcvDB_NON_PAGED:
++ physical = gcmNAME_TO_PTR(record->physical);
++ /* Unmap user logical memory first. */
++ status = gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ record->bytes,
++ record->data);
++
++ /* Free the non paged memory. */
++ status = gckEVENT_FreeNonPagedMemory(Kernel->eventObj,
++ record->bytes,
++ physical,
++ record->data,
++ gcvKERNEL_PIXEL);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: NON_PAGED 0x%x, bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++
++ case gcvDB_COMMAND_BUFFER:
++ /* Free the command buffer. */
++ status = gckEVENT_DestroyVirtualCommandBuffer(record->kernel->eventObj,
++ record->bytes,
++ gcmNAME_TO_PTR(record->physical),
++ record->data,
++ gcvKERNEL_PIXEL);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: COMMAND_BUFFER 0x%x, bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ physical = gcmNAME_TO_PTR(record->physical);
++ /* Unmap user logical memory first. */
++ status = gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ record->bytes,
++ record->data);
++
++ /* Free the contiguous memory. */
++ status = gckEVENT_FreeContiguousMemory(Kernel->eventObj,
++ record->bytes,
++ physical,
++ record->data,
++ gcvKERNEL_PIXEL);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: CONTIGUOUS 0x%x bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++
++ case gcvDB_SIGNAL:
++#if USE_NEW_LINUX_SIGNAL
++ status = gcvSTATUS_NOT_SUPPORTED;
++#else
++ /* Free the user signal. */
++ status = gckOS_DestroyUserSignal(Kernel->os,
++ gcmPTR2INT32(record->data));
++#endif /* USE_NEW_LINUX_SIGNAL */
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: SIGNAL %d (status=%d)",
++ (gctINT)(gctUINTPTR_T)record->data, status);
++ break;
++
++ case gcvDB_VIDEO_MEMORY_LOCKED:
++ handle = gcmPTR2INT32(record->data);
++
++ gcmkERR_BREAK(gckVIDMEM_HANDLE_Lookup(record->kernel,
++ ProcessID,
++ handle,
++ &nodeObject));
++
++ /* Unlock what we still locked */
++ status = gckVIDMEM_Unlock(record->kernel,
++ nodeObject,
++ nodeObject->type,
++ &asynchronous);
++
++#if gcdENABLE_VG
++ if (record->kernel->core == gcvCORE_VG)
++ {
++ if (gcmIS_SUCCESS(status) && (gcvTRUE == asynchronous))
++ {
++ /* TODO: we maybe need to schedule a event here */
++ status = gckVIDMEM_Unlock(record->kernel,
++ nodeObject,
++ nodeObject->type,
++ gcvNULL);
++ }
++
++ gcmkVERIFY_OK(gckVIDMEM_HANDLE_Dereference(record->kernel,
++ ProcessID,
++ handle));
++
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(record->kernel,
++ nodeObject));
++ }
++ else
++#endif
++ {
++ gcmkVERIFY_OK(gckVIDMEM_HANDLE_Dereference(record->kernel,
++ ProcessID,
++ handle));
++
++ if (gcmIS_SUCCESS(status) && (gcvTRUE == asynchronous))
++ {
++ status = gckEVENT_Unlock(record->kernel->eventObj,
++ gcvKERNEL_PIXEL,
++ nodeObject,
++ nodeObject->type);
++ }
++ else
++ {
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(record->kernel,
++ nodeObject));
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: VIDEO_MEMORY_LOCKED 0x%x (status=%d)",
++ record->data, status);
++ break;
++
++ case gcvDB_CONTEXT:
++ /* TODO: Free the context */
++ status = gckCOMMAND_Detach(Kernel->command, gcmNAME_TO_PTR(record->data));
++ gcmRELEASE_NAME(record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: CONTEXT 0x%x (status=%d)",
++ record->data, status);
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ /* Unmap memory. */
++ status = gckKERNEL_UnmapMemory(Kernel,
++ record->physical,
++ record->bytes,
++ record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: MAP MEMORY %d (status=%d)",
++ gcmPTR2INT32(record->data), status);
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ /* TODO: Unmap user memory. */
++ status = gckOS_UnmapUserMemory(Kernel->os,
++ Kernel->core,
++ record->physical,
++ record->bytes,
++ gcmNAME_TO_PTR(record->data),
++ 0);
++ gcmRELEASE_NAME(record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: MAP USER MEMORY %d (status=%d)",
++ gcmPTR2INT32(record->data), status);
++ break;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvDB_SYNC_POINT:
++ /* Free the user signal. */
++ status = gckOS_DestroySyncPoint(Kernel->os,
++ (gctSYNC_POINT) record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: SYNC POINT %d (status=%d)",
++ (gctINT)(gctUINTPTR_T)record->data, status);
++ break;
++#endif
++
++ case gcvDB_SHBUF:
++ /* Free shared buffer. */
++ status = gckKERNEL_DestroyShBuffer(Kernel,
++ (gctSHBUF) record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: SHBUF %u (status=%d)",
++ (gctUINT32)(gctUINTPTR_T) record->data, status);
++ break;
++
++ default:
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DATABASE,
++ "DB: Correcupted record=0x%08x type=%d",
++ record, record->type);
++ break;
++ }
++
++ /* Delete the record. */
++ gcmkONERROR(gckKERNEL_DeleteRecord(Kernel,
++ database,
++ record->type,
++ record->data,
++ gcvNULL));
++ }
++
++ }
++
++ /* Delete the database. */
++ gcmkONERROR(gckKERNEL_DeleteDatabase(Kernel, database));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_QueryProcessDB
++**
++** Query a process database for the current usage of a particular record type.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gctBOOL LastProcessID
++** gcvTRUE if searching for the last known process ID. gcvFALSE if
++** we need to search for the process ID specified by the ProcessID
++** argument.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to query.
++**
++** OUTPUT:
++**
++** gcuDATABASE_INFO * Info
++** Pointer to a variable that receives the requested information.
++*/
++gceSTATUS
++gckKERNEL_QueryProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ IN gceDATABASE_TYPE Type,
++ OUT gcuDATABASE_INFO * Info
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcePOOL vidMemPool;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Info=0x%x",
++ Kernel, ProcessID, Type, Info);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ /* Deocde pool. */
++ vidMemPool = (Type & gcdDB_VIDEO_MEMORY_POOL_MASK) >> gcdDB_VIDEO_MEMORY_POOL_SHIFT;
++
++ Type &= gcdDATABASE_TYPE_MASK;
++
++ /* Find the database. */
++ if(Type != gcvDB_IDLE)
++ {
++ gcmkONERROR(
++ gckKERNEL_FindDatabase(Kernel, ProcessID, LastProcessID, &database));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, database->counterMutex, gcvINFINITE));
++
++ /* Get pointer to counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ if (vidMemPool != gcvPOOL_UNKNOWN)
++ {
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMemPool[vidMemPool],
++ gcmSIZEOF(database->vidMemPool[vidMemPool]));
++ }
++ else
++ {
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMem,
++ gcmSIZEOF(database->vidMem));
++ }
++ break;
++
++ case gcvDB_NON_PAGED:
++ gckOS_MemCopy(&Info->counters,
++ &database->nonPaged,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ gckOS_MemCopy(&Info->counters,
++ &database->contiguous,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->mapMemory,
++ gcmSIZEOF(database->mapMemory));
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->mapUserMemory,
++ gcmSIZEOF(database->mapUserMemory));
++ break;
++
++ case gcvDB_COMMAND_BUFFER:
++ gckOS_MemCopy(&Info->counters,
++ &database->virtualCommandBuffer,
++ gcmSIZEOF(database->virtualCommandBuffer));
++ break;
++
++ default:
++ break;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, database->counterMutex));
++ }
++ else
++ {
++ Info->time = Kernel->db->idleTime;
++ Kernel->db->idleTime = 0;
++ }
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_FindHandleDatbase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gctPOINTER * HandleDatabase,
++ OUT gctPOINTER * HandleDatabaseMutex
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d",
++ Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ *HandleDatabase = database->handleDatabase;
++ *HandleDatabaseMutex = database->handleDatabaseMutex;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++gceSTATUS
++gckKERNEL_GetProcessMMU(
++ IN gckKERNEL Kernel,
++ OUT gckMMU * Mmu
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctUINT32 processID;
++
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, processID, gcvFALSE, &database));
++
++ *Mmu = database->mmu;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++#endif
++
++#if gcdSECURE_USER
++/*******************************************************************************
++** gckKERNEL_GetProcessDBCache
++**
++** Get teh secure cache from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** gcskSECURE_CACHE_PTR * Cache
++** Pointer to a variable that receives the secure cache pointer.
++*/
++gceSTATUS
++gckKERNEL_GetProcessDBCache(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcskSECURE_CACHE_PTR * Cache
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Cache != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Return the pointer to the cache. */
++ *Cache = &database->cache;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Cache=0x%x", *Cache);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++gckKERNEL_DumpProcessDB(
++ IN gckKERNEL Kernel
++ )
++{
++ gcsDATABASE_PTR database;
++ gctINT i, pid;
++ gctUINT8 name[24];
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Acquire the database mutex. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** PROCESS DB DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkPRINT_N(8, "%-8s%s\n", "PID", "NAME");
++ /* Walk the databases. */
++ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
++ {
++ for (database = Kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ pid = database->processID;
++
++ gcmkVERIFY_OK(gckOS_ZeroMemory(name, gcmSIZEOF(name)));
++
++ gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name));
++
++ gcmkPRINT_N(8, "%-8d%s\n", pid, name);
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_DumpCounter(
++ IN gcsDATABASE_COUNTERS * Counter,
++ IN gctCONST_STRING Name
++ )
++{
++ gcmkPRINT("%s:", Name);
++ gcmkPRINT(" Currently allocated : %10lld", Counter->bytes);
++ gcmkPRINT(" Maximum allocated : %10lld", Counter->maxBytes);
++ gcmkPRINT(" Total allocated : %10lld", Counter->totalBytes);
++}
++
++gceSTATUS
++gckKERNEL_DumpVidMemUsage(
++ IN gckKERNEL Kernel,
++ IN gctINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcsDATABASE_COUNTERS * counter;
++ gctUINT32 i = 0;
++
++ static gctCONST_STRING surfaceTypes[] = {
++ "UNKNOWN",
++ "INDEX",
++ "VERTEX",
++ "TEXTURE",
++ "RENDER_TARGET",
++ "DEPTH",
++ "BITMAP",
++ "TILE_STATUS",
++ "IMAGE",
++ "MASK",
++ "SCISSOR",
++ "HIERARCHICAL_DEPTH",
++ };
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d",
++ Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Find the database. */
++ gcmkONERROR(
++ gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ gcmkPRINT("VidMem Usage (Process %d):", ProcessID);
++
++ /* Get pointer to counters. */
++ counter = &database->vidMem;
++
++ _DumpCounter(counter, "Total Video Memory");
++
++ for (i = 0; i < gcvSURF_NUM_TYPES; i++)
++ {
++ counter = &database->vidMemType[i];
++
++ _DumpCounter(counter, surfaceTypes[i]);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_debug.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_debug.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_debug.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_debug.c 2015-05-01 14:57:59.575427001 -0500
+@@ -0,0 +1,2785 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include <gc_hal_kernel_debug.h>
++
++/******************************************************************************\
++******************************** Debug Variables *******************************
++\******************************************************************************/
++
++static gceSTATUS _lastError = gcvSTATUS_OK;
++static gctUINT32 _debugLevel = gcvLEVEL_ERROR;
++/*
++_debugZones config value
++Please Reference define in gc_hal_base.h
++*/
++static gctUINT32 _debugZones = gcvZONE_NONE;
++
++/******************************************************************************\
++********************************* Debug Switches *******************************
++\******************************************************************************/
++
++/*
++ gcdBUFFERED_OUTPUT
++
++ When set to non-zero, all output is collected into a buffer with the
++ specified size. Once the buffer gets full, the debug buffer will be
++ printed to the console. gcdBUFFERED_SIZE determines the size of the buffer.
++*/
++#define gcdBUFFERED_OUTPUT 0
++
++/*
++ gcdBUFFERED_SIZE
++
++ When set to non-zero, all output is collected into a buffer with the
++ specified size. Once the buffer gets full, the debug buffer will be
++ printed to the console.
++*/
++#define gcdBUFFERED_SIZE (1024 * 1024 * 2)
++
++/*
++ gcdDMA_BUFFER_COUNT
++
++ If greater then zero, the debugger will attempt to find the command buffer
++ where DMA is currently executing and then print this buffer and
++ (gcdDMA_BUFFER_COUNT - 1) buffers before the current one. If set to zero
++ or the current buffer is not found, all buffers are printed.
++*/
++#define gcdDMA_BUFFER_COUNT 0
++
++/*
++ gcdTHREAD_BUFFERS
++
++ When greater then one, will accumulate messages from the specified number
++ of threads in separate output buffers.
++*/
++#define gcdTHREAD_BUFFERS 1
++
++/*
++ gcdENABLE_OVERFLOW
++
++ When set to non-zero, and the output buffer gets full, instead of being
++ printed, it will be allowed to overflow removing the oldest messages.
++*/
++#define gcdENABLE_OVERFLOW 1
++
++/*
++ gcdSHOW_LINE_NUMBER
++
++ When enabledm each print statement will be preceeded with the current
++ line number.
++*/
++#define gcdSHOW_LINE_NUMBER 0
++
++/*
++ gcdSHOW_PROCESS_ID
++
++ When enabledm each print statement will be preceeded with the current
++ process ID.
++*/
++#define gcdSHOW_PROCESS_ID 0
++
++/*
++ gcdSHOW_THREAD_ID
++
++ When enabledm each print statement will be preceeded with the current
++ thread ID.
++*/
++#define gcdSHOW_THREAD_ID 0
++
++/*
++ gcdSHOW_TIME
++
++ When enabled each print statement will be preceeded with the current
++ high-resolution time.
++*/
++#define gcdSHOW_TIME 0
++
++
++/******************************************************************************\
++****************************** Miscellaneous Macros ****************************
++\******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmDBGASSERT(Expression, Format, Value) \
++ if (!(Expression)) \
++ { \
++ _DirectPrint( \
++ "*** gcmDBGASSERT ***************************\n" \
++ " function : %s\n" \
++ " line : %d\n" \
++ " expression : " #Expression "\n" \
++ " actual value : " Format "\n", \
++ __FUNCTION__, __LINE__, Value \
++ ); \
++ }
++#else
++# define gcmDBGASSERT(Expression, Format, Value)
++#endif
++
++#define gcmPTRALIGNMENT(Pointer, Alignemnt) \
++( \
++ gcmALIGN(gcmPTR2INT32(Pointer), Alignemnt) - gcmPTR2INT32(Pointer) \
++)
++
++#if gcdALIGNBYSIZE
++# define gcmISALIGNED(Offset, Alignment) \
++ (((Offset) & ((Alignment) - 1)) == 0)
++
++# define gcmkALIGNPTR(Type, Pointer, Alignment) \
++ Pointer = (Type) gcmINT2PTR(gcmALIGN(gcmPTR2INT32(Pointer), Alignment))
++#else
++# define gcmISALIGNED(Offset, Alignment) \
++ gcvTRUE
++
++# define gcmkALIGNPTR(Type, Pointer, Alignment)
++#endif
++
++#define gcmALIGNSIZE(Offset, Size) \
++ ((Size - Offset) + Size)
++
++#define gcdHAVEPREFIX \
++( \
++ gcdSHOW_TIME \
++ || gcdSHOW_LINE_NUMBER \
++ || gcdSHOW_PROCESS_ID \
++ || gcdSHOW_THREAD_ID \
++)
++
++#if gcdHAVEPREFIX
++
++# define gcdOFFSET 0
++
++#if gcdSHOW_TIME
++#if gcmISALIGNED(gcdOFFSET, 8)
++# define gcdTIMESIZE gcmSIZEOF(gctUINT64)
++# elif gcdOFFSET == 4
++# define gcdTIMESIZE gcmALIGNSIZE(4, gcmSIZEOF(gctUINT64))
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 8
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT64)
++# define gcdTIMEFORMAT "0x%016llX"
++# else
++# define gcdTIMEFORMAT ", 0x%016llX"
++# endif
++# else
++# define gcdTIMESIZE 0
++# define gcdTIMEFORMAT
++# endif
++
++#if gcdSHOW_LINE_NUMBER
++#if gcmISALIGNED(gcdOFFSET, 8)
++# define gcdNUMSIZE gcmSIZEOF(gctUINT64)
++# elif gcdOFFSET == 4
++# define gcdNUMSIZE gcmALIGNSIZE(4, gcmSIZEOF(gctUINT64))
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 8
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT64)
++# define gcdNUMFORMAT "%8llu"
++# else
++# define gcdNUMFORMAT ", %8llu"
++# endif
++# else
++# define gcdNUMSIZE 0
++# define gcdNUMFORMAT
++# endif
++
++#if gcdSHOW_PROCESS_ID
++#if gcmISALIGNED(gcdOFFSET, 4)
++# define gcdPIDSIZE gcmSIZEOF(gctUINT32)
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 4
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdPIDFORMAT "pid=%5d"
++# else
++# define gcdPIDFORMAT ", pid=%5d"
++# endif
++# else
++# define gcdPIDSIZE 0
++# define gcdPIDFORMAT
++# endif
++
++#if gcdSHOW_THREAD_ID
++#if gcmISALIGNED(gcdOFFSET, 4)
++# define gcdTIDSIZE gcmSIZEOF(gctUINT32)
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 4
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdTIDFORMAT "tid=%5d"
++# else
++# define gcdTIDFORMAT ", tid=%5d"
++# endif
++# else
++# define gcdTIDSIZE 0
++# define gcdTIDFORMAT
++# endif
++
++# define gcdPREFIX_SIZE \
++ ( \
++ gcdTIMESIZE \
++ + gcdNUMSIZE \
++ + gcdPIDSIZE \
++ + gcdTIDSIZE \
++ )
++
++ static const char * _prefixFormat =
++ "["
++ gcdTIMEFORMAT
++ gcdNUMFORMAT
++ gcdPIDFORMAT
++ gcdTIDFORMAT
++ "] ";
++
++#else
++
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdPREFIX_SIZE 0
++
++#endif
++
++/* Assumed largest variable argument leader size. */
++#define gcdVARARG_LEADER gcmSIZEOF(gctUINT64)
++
++/* Alignnments. */
++#if gcdALIGNBYSIZE
++# define gcdPREFIX_ALIGNMENT gcdPREFIX_LEADER
++# define gcdVARARG_ALIGNMENT gcdVARARG_LEADER
++#else
++# define gcdPREFIX_ALIGNMENT 0
++# define gcdVARARG_ALIGNMENT 0
++#endif
++
++#if gcdBUFFERED_OUTPUT
++# define gcdOUTPUTPREFIX _AppendPrefix
++# define gcdOUTPUTSTRING _AppendString
++# define gcdOUTPUTCOPY _AppendCopy
++# define gcdOUTPUTBUFFER _AppendBuffer
++#else
++# define gcdOUTPUTPREFIX _PrintPrefix
++# define gcdOUTPUTSTRING _PrintString
++# define gcdOUTPUTCOPY _PrintString
++# define gcdOUTPUTBUFFER _PrintBuffer
++#endif
++
++/******************************************************************************\
++****************************** Private Structures ******************************
++\******************************************************************************/
++
++typedef enum _gceBUFITEM
++{
++ gceBUFITEM_NONE,
++ gcvBUFITEM_PREFIX,
++ gcvBUFITEM_STRING,
++ gcvBUFITEM_COPY,
++ gcvBUFITEM_BUFFER
++}
++gceBUFITEM;
++
++/* Common item head/buffer terminator. */
++typedef struct _gcsBUFITEM_HEAD * gcsBUFITEM_HEAD_PTR;
++typedef struct _gcsBUFITEM_HEAD
++{
++ gceBUFITEM type;
++}
++gcsBUFITEM_HEAD;
++
++/* String prefix (for ex. [ 1,tid=0x019A]) */
++typedef struct _gcsBUFITEM_PREFIX * gcsBUFITEM_PREFIX_PTR;
++typedef struct _gcsBUFITEM_PREFIX
++{
++ gceBUFITEM type;
++#if gcdHAVEPREFIX
++ gctPOINTER prefixData;
++#endif
++}
++gcsBUFITEM_PREFIX;
++
++/* Buffered string. */
++typedef struct _gcsBUFITEM_STRING * gcsBUFITEM_STRING_PTR;
++typedef struct _gcsBUFITEM_STRING
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gctCONST_STRING message;
++ gctPOINTER messageData;
++ gctUINT messageDataSize;
++}
++gcsBUFITEM_STRING;
++
++/* Buffered string (copy of the string is included with the record). */
++typedef struct _gcsBUFITEM_COPY * gcsBUFITEM_COPY_PTR;
++typedef struct _gcsBUFITEM_COPY
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gctPOINTER messageData;
++ gctUINT messageDataSize;
++}
++gcsBUFITEM_COPY;
++
++/* Memory buffer. */
++typedef struct _gcsBUFITEM_BUFFER * gcsBUFITEM_BUFFER_PTR;
++typedef struct _gcsBUFITEM_BUFFER
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gceDUMP_BUFFER bufferType;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ gctUINT32 dmaAddress;
++#endif
++
++ gctUINT dataSize;
++ gctUINT32 address;
++#if gcdHAVEPREFIX
++ gctPOINTER prefixData;
++#endif
++}
++gcsBUFITEM_BUFFER;
++
++typedef struct _gcsBUFFERED_OUTPUT * gcsBUFFERED_OUTPUT_PTR;
++typedef struct _gcsBUFFERED_OUTPUT
++{
++#if gcdTHREAD_BUFFERS > 1
++ gctUINT32 threadID;
++#endif
++
++#if gcdSHOW_LINE_NUMBER
++ gctUINT64 lineNumber;
++#endif
++
++ gctINT indent;
++
++#if gcdBUFFERED_OUTPUT
++ gctINT start;
++ gctINT index;
++ gctINT count;
++ gctUINT8 buffer[gcdBUFFERED_SIZE];
++#endif
++
++ gcsBUFFERED_OUTPUT_PTR prev;
++ gcsBUFFERED_OUTPUT_PTR next;
++}
++gcsBUFFERED_OUTPUT;
++
++typedef gctUINT (* gcfPRINTSTRING) (
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ );
++
++typedef gctINT (* gcfGETITEMSIZE) (
++ IN gcsBUFITEM_HEAD_PTR Item
++ );
++
++/******************************************************************************\
++******************************* Private Variables ******************************
++\******************************************************************************/
++
++static gcsBUFFERED_OUTPUT _outputBuffer[gcdTHREAD_BUFFERS];
++static gcsBUFFERED_OUTPUT_PTR _outputBufferHead = gcvNULL;
++static gcsBUFFERED_OUTPUT_PTR _outputBufferTail = gcvNULL;
++
++/******************************************************************************\
++****************************** Item Size Functions *****************************
++\******************************************************************************/
++
++#if gcdBUFFERED_OUTPUT
++static gctINT
++_GetTerminatorItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ return gcmSIZEOF(gcsBUFITEM_HEAD);
++}
++
++static gctINT
++_GetPrefixItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_PREFIX_PTR item = (gcsBUFITEM_PREFIX_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++ return vlen + gcdPREFIX_SIZE;
++#else
++ return gcmSIZEOF(gcsBUFITEM_PREFIX);
++#endif
++}
++
++static gctINT
++_GetStringItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_STRING_PTR item = (gcsBUFITEM_STRING_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++ return vlen + item->messageDataSize;
++}
++
++static gctINT
++_GetCopyItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_COPY_PTR item = (gcsBUFITEM_COPY_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++ return vlen + item->messageDataSize;
++}
++
++static gctINT
++_GetBufferItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_BUFFER_PTR item = (gcsBUFITEM_BUFFER_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++ return vlen + gcdPREFIX_SIZE + item->dataSize;
++#else
++ gcsBUFITEM_BUFFER_PTR item = (gcsBUFITEM_BUFFER_PTR) Item;
++ return gcmSIZEOF(gcsBUFITEM_BUFFER) + item->dataSize;
++#endif
++}
++
++static gcfGETITEMSIZE _itemSize[] =
++{
++ _GetTerminatorItemSize,
++ _GetPrefixItemSize,
++ _GetStringItemSize,
++ _GetCopyItemSize,
++ _GetBufferItemSize
++};
++#endif
++
++/******************************************************************************\
++******************************* Printing Functions *****************************
++\******************************************************************************/
++
++#if gcdDEBUG || gcdBUFFERED_OUTPUT
++static void
++_DirectPrint(
++ gctCONST_STRING Message,
++ ...
++ )
++{
++ gctINT len;
++ char buffer[768];
++ gctARGUMENTS arguments;
++
++ gcmkARGUMENTS_START(arguments, Message);
++ len = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), Message, &arguments);
++ gcmkARGUMENTS_END(arguments);
++
++ buffer[len] = '\0';
++ gcmkOUTPUT_STRING(buffer);
++}
++#endif
++
++static int
++_AppendIndent(
++ IN gctINT Indent,
++ IN char * Buffer,
++ IN int BufferSize
++ )
++{
++ gctINT i;
++
++ gctINT len = 0;
++ gctINT indent = Indent % 40;
++
++ for (i = 0; i < indent; i += 1)
++ {
++ Buffer[len++] = ' ';
++ }
++
++ if (indent != Indent)
++ {
++ len += gcmkSPRINTF(
++ Buffer + len, BufferSize - len, " <%d> ", Indent
++ );
++
++ Buffer[len] = '\0';
++ }
++
++ return len;
++}
++
++#if gcdHAVEPREFIX
++static void
++_PrintPrefix(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ char buffer[768];
++ gctINT len;
++
++ /* Format the string. */
++ len = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), _prefixFormat, Data);
++ buffer[len] = '\0';
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++}
++#endif
++
++static void
++_PrintString(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ char buffer[768];
++ gctINT len;
++
++ /* Append the indent string. */
++ len = _AppendIndent(Indent, buffer, gcmSIZEOF(buffer));
++
++ /* Format the string. */
++ len += gcmkVSPRINTF(buffer + len, gcmSIZEOF(buffer) - len, Message, Data);
++ buffer[len] = '\0';
++
++ /* Add end-of-line if missing. */
++ if (buffer[len - 1] != '\n')
++ {
++ buffer[len++] = '\n';
++ buffer[len] = '\0';
++ }
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++}
++
++static void
++_PrintBuffer(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctPOINTER PrefixData,
++ IN gctPOINTER Data,
++ IN gctUINT Address,
++ IN gctUINT DataSize,
++ IN gceDUMP_BUFFER Type,
++ IN gctUINT32 DmaAddress
++ )
++{
++ static gctCONST_STRING _titleString[] =
++ {
++ "CONTEXT BUFFER",
++ "USER COMMAND BUFFER",
++ "KERNEL COMMAND BUFFER",
++ "LINK BUFFER",
++ "WAIT LINK BUFFER",
++ ""
++ };
++
++ static const gctINT COLUMN_COUNT = 8;
++
++ gctUINT i, count, column, address;
++ gctUINT32_PTR data;
++ gctCHAR buffer[768];
++ gctUINT indent, len;
++ gctBOOL command;
++
++ /* Append space for the prefix. */
++#if gcdHAVEPREFIX
++ indent = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), _prefixFormat, PrefixData);
++ buffer[indent] = '\0';
++#else
++ indent = 0;
++#endif
++
++ /* Append the indent string. */
++ indent += _AppendIndent(
++ Indent, buffer + indent, gcmSIZEOF(buffer) - indent
++ );
++
++ switch (Type)
++ {
++ case gceDUMP_BUFFER_CONTEXT:
++ case gceDUMP_BUFFER_USER:
++ case gceDUMP_BUFFER_KERNEL:
++ case gceDUMP_BUFFER_LINK:
++ case gceDUMP_BUFFER_WAITLINK:
++ /* Form and print the title string. */
++ gcmkSPRINTF2(
++ buffer + indent, gcmSIZEOF(buffer) - indent,
++ "%s%s\n", _titleString[Type],
++ ((DmaAddress >= Address) && (DmaAddress < Address + DataSize))
++ ? " (CURRENT)" : ""
++ );
++
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Terminate the string. */
++ buffer[indent] = '\0';
++
++ /* This is a command buffer. */
++ command = gcvTRUE;
++ break;
++
++ case gceDUMP_BUFFER_FROM_USER:
++ /* This is not a command buffer. */
++ command = gcvFALSE;
++
++ /* No title. */
++ break;
++
++ default:
++ gcmDBGASSERT(gcvFALSE, "%s", "invalid buffer type");
++
++ /* This is not a command buffer. */
++ command = gcvFALSE;
++ }
++
++ /* Overwrite the prefix with spaces. */
++ for (i = 0; i < indent; i += 1)
++ {
++ buffer[i] = ' ';
++ }
++
++ /* Form and print the opening string. */
++ if (command)
++ {
++ gcmkSPRINTF2(
++ buffer + indent, gcmSIZEOF(buffer) - indent,
++ "@[kernel.command %08X %08X\n", Address, DataSize
++ );
++
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Terminate the string. */
++ buffer[indent] = '\0';
++ }
++
++ /* Get initial address. */
++ address = Address;
++
++ /* Cast the data pointer. */
++ data = (gctUINT32_PTR) Data;
++
++ /* Compute the number of double words. */
++ count = DataSize / gcmSIZEOF(gctUINT32);
++
++ /* Print the buffer. */
++ for (i = 0, len = indent, column = 0; i < count; i += 1)
++ {
++ /* Append the address. */
++ if (column == 0)
++ {
++ len += gcmkSPRINTF(
++ buffer + len, gcmSIZEOF(buffer) - len, "0x%08X:", address
++ );
++ }
++
++ /* Append the data value. */
++ len += gcmkSPRINTF2(
++ buffer + len, gcmSIZEOF(buffer) - len, "%c%08X",
++ (address == DmaAddress)? '>' : ' ', data[i]
++ );
++
++ buffer[len] = '\0';
++
++ /* Update the address. */
++ address += gcmSIZEOF(gctUINT32);
++
++ /* Advance column count. */
++ column += 1;
++
++ /* End of line? */
++ if ((column % COLUMN_COUNT) == 0)
++ {
++ /* Append EOL. */
++ gcmkSTRCAT(buffer + len, gcmSIZEOF(buffer) - len, "\n");
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Reset. */
++ len = indent;
++ column = 0;
++ }
++ }
++
++ /* Print the last partial string. */
++ if (column != 0)
++ {
++ /* Append EOL. */
++ gcmkSTRCAT(buffer + len, gcmSIZEOF(buffer) - len, "\n");
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++ }
++
++ /* Form and print the opening string. */
++ if (command)
++ {
++ buffer[indent] = '\0';
++ gcmkSTRCAT(buffer, gcmSIZEOF(buffer), "] -- command\n");
++ gcmkOUTPUT_STRING(buffer);
++ }
++}
++
++#if gcdBUFFERED_OUTPUT
++static gctUINT
++_PrintNone(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ /* Return the size of the node. */
++ return gcmSIZEOF(gcsBUFITEM_HEAD);
++}
++
++static gctUINT
++_PrintPrefixWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_PREFIX_PTR item;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_PREFIX_PTR) Item;
++
++ /* Print the message. */
++ _PrintPrefix(OutputBuffer, item->prefixData);
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + gcdPREFIX_SIZE;
++#else
++ return gcmSIZEOF(gcsBUFITEM_PREFIX);
++#endif
++}
++
++static gctUINT
++_PrintStringWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_STRING_PTR item;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_STRING_PTR) Item;
++
++ /* Print the message. */
++ _PrintString(
++ OutputBuffer,
++ item->indent, item->message, item->messageDataSize, item->messageData
++ );
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + item->messageDataSize;
++}
++
++static gctUINT
++_PrintCopyWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_COPY_PTR item;
++ gctCONST_STRING message;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_COPY_PTR) Item;
++
++ /* Determine the string pointer. */
++ message = (gctCONST_STRING) (item + 1);
++
++ /* Print the message. */
++ _PrintString(
++ OutputBuffer,
++ item->indent, message, item->messageDataSize, item->messageData
++ );
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + item->messageDataSize;
++}
++
++static gctUINT
++_PrintBufferWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gctUINT32 dmaAddress;
++ gcsBUFITEM_BUFFER_PTR item;
++ gctPOINTER data;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_BUFFER_PTR) Item;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ dmaAddress = item->dmaAddress;
++#else
++ dmaAddress = 0xFFFFFFFF;
++#endif
++
++ if (dmaAddress != 0)
++ {
++ /* Compute the data address. */
++ data = ((gctUINT8_PTR) item->prefixData) + gcdPREFIX_SIZE;
++
++ /* Print buffer. */
++ _PrintBuffer(
++ OutputBuffer,
++ item->indent, item->prefixData,
++ data, item->address, item->dataSize,
++ item->bufferType, dmaAddress
++ );
++ }
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + gcdPREFIX_SIZE + item->dataSize;
++#else
++ gctUINT32 dmaAddress;
++ gcsBUFITEM_BUFFER_PTR item;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_BUFFER_PTR) Item;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ dmaAddress = item->dmaAddress;
++#else
++ dmaAddress = 0xFFFFFFFF;
++#endif
++
++ if (dmaAddress != 0)
++ {
++ /* Print buffer. */
++ _PrintBuffer(
++ OutputBuffer,
++ item->indent, gcvNULL,
++ item + 1, item->address, item->dataSize,
++ item->bufferType, dmaAddress
++ );
++ }
++
++ /* Return the size of the node. */
++ return gcmSIZEOF(gcsBUFITEM_BUFFER) + item->dataSize;
++#endif
++}
++
++static gcfPRINTSTRING _printArray[] =
++{
++ _PrintNone,
++ _PrintPrefixWrapper,
++ _PrintStringWrapper,
++ _PrintCopyWrapper,
++ _PrintBufferWrapper
++};
++#endif
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++#if gcdBUFFERED_OUTPUT
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++static gcsBUFITEM_BUFFER_PTR
++_FindCurrentDMABuffer(
++ gctUINT32 DmaAddress
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++ gcsBUFITEM_BUFFER_PTR dmaCurrent;
++
++ /* Reset the current buffer. */
++ dmaCurrent = gcvNULL;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ gcsBUFITEM_BUFFER_PTR buffer = (gcsBUFITEM_BUFFER_PTR) item;
++
++ if ((DmaAddress >= buffer->address) &&
++ (DmaAddress < buffer->address + buffer->dataSize))
++ {
++ dmaCurrent = buffer;
++ }
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++
++ /* Return result. */
++ return dmaCurrent;
++}
++
++static void
++_EnableAllDMABuffers(
++ void
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ gcsBUFITEM_BUFFER_PTR buffer = (gcsBUFITEM_BUFFER_PTR) item;
++
++ /* Enable the buffer. */
++ buffer->dmaAddress = ~0U;
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++}
++
++static void
++_EnableDMABuffers(
++ gctUINT32 DmaAddress,
++ gcsBUFITEM_BUFFER_PTR CurrentDMABuffer
++ )
++{
++ gctINT i, skip, index;
++ gcsBUFITEM_HEAD_PTR item;
++ gcsBUFITEM_BUFFER_PTR buffers[gcdDMA_BUFFER_COUNT];
++
++ /* Reset buffer pointers. */
++ gckOS_ZeroMemory(buffers, gcmSIZEOF(buffers));
++
++ /* Set the current buffer index. */
++ index = -1;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items until the current DMA buffer is found. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ /* Advance the index. */
++ index = (index + 1) % gcdDMA_BUFFER_COUNT;
++
++ /* Add to the buffer array. */
++ buffers[index] = (gcsBUFITEM_BUFFER_PTR) item;
++
++ /* Stop if this is the current DMA buffer. */
++ if ((gcsBUFITEM_BUFFER_PTR) item == CurrentDMABuffer)
++ {
++ break;
++ }
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++
++ /* Enable the found buffers. */
++ gcmDBGASSERT(index != -1, "%d", index);
++
++ for (i = 0; i < gcdDMA_BUFFER_COUNT; i += 1)
++ {
++ if (buffers[index] == gcvNULL)
++ {
++ break;
++ }
++
++ buffers[index]->dmaAddress = DmaAddress;
++
++ index -= 1;
++
++ if (index == -1)
++ {
++ index = gcdDMA_BUFFER_COUNT - 1;
++ }
++ }
++}
++#endif
++
++static void
++_Flush(
++ gctUINT32 DmaAddress
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++
++ gcsBUFFERED_OUTPUT_PTR outputBuffer = _outputBufferHead;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ if ((outputBuffer != gcvNULL) && (outputBuffer->count != 0))
++ {
++ /* Find the current DMA buffer. */
++ gcsBUFITEM_BUFFER_PTR dmaCurrent = _FindCurrentDMABuffer(DmaAddress);
++
++ /* Was the current buffer found? */
++ if (dmaCurrent == gcvNULL)
++ {
++ /* No, print all buffers. */
++ _EnableAllDMABuffers();
++ }
++ else
++ {
++ /* Yes, enable only specified number of buffers. */
++ _EnableDMABuffers(DmaAddress, dmaCurrent);
++ }
++ }
++#endif
++
++ while (outputBuffer != gcvNULL)
++ {
++ if (outputBuffer->count != 0)
++ {
++ _DirectPrint("********************************************************************************\n");
++ _DirectPrint("FLUSHING DEBUG OUTPUT BUFFER (%d elements).\n", outputBuffer->count);
++ _DirectPrint("********************************************************************************\n");
++
++ item = (gcsBUFITEM_HEAD_PTR) &outputBuffer->buffer[outputBuffer->start];
++
++ for (i = 0; i < outputBuffer->count; i += 1)
++ {
++ skip = (* _printArray[item->type]) (outputBuffer, item);
++
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) outputBuffer->buffer;
++ }
++ }
++
++ outputBuffer->start = 0;
++ outputBuffer->index = 0;
++ outputBuffer->count = 0;
++ }
++
++ outputBuffer = outputBuffer->next;
++ }
++}
++
++static gcsBUFITEM_HEAD_PTR
++_AllocateItem(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Size
++ )
++{
++ gctINT skip;
++ gcsBUFITEM_HEAD_PTR item, next;
++
++#if gcdENABLE_OVERFLOW
++ if (
++ (OutputBuffer->index + Size >= gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ ||
++ (
++ (OutputBuffer->index < OutputBuffer->start) &&
++ (OutputBuffer->index + Size >= OutputBuffer->start)
++ )
++ )
++ {
++ if (OutputBuffer->index + Size >= gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ {
++ if (OutputBuffer->index < OutputBuffer->start)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->start];
++
++ while (item->type != gceBUFITEM_NONE)
++ {
++ skip = (* _itemSize[item->type]) (item);
++
++ OutputBuffer->start += skip;
++ OutputBuffer->count -= 1;
++
++ item->type = gceBUFITEM_NONE;
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++ }
++
++ OutputBuffer->start = 0;
++ }
++
++ OutputBuffer->index = 0;
++ }
++
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->start];
++
++ while (OutputBuffer->start - OutputBuffer->index <= Size)
++ {
++ skip = (* _itemSize[item->type]) (item);
++
++ OutputBuffer->start += skip;
++ OutputBuffer->count -= 1;
++
++ item->type = gceBUFITEM_NONE;
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ if (item->type == gceBUFITEM_NONE)
++ {
++ OutputBuffer->start = 0;
++ break;
++ }
++ }
++ }
++#else
++ if (OutputBuffer->index + Size > gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ {
++ _DirectPrint("\nMessage buffer full; forcing message flush.\n\n");
++ _Flush(~0U);
++ }
++#endif
++
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->index];
++
++ OutputBuffer->index += Size;
++ OutputBuffer->count += 1;
++
++ next = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + Size);
++ next->type = gceBUFITEM_NONE;
++
++ return item;
++}
++
++#if gcdALIGNBYSIZE
++static void
++_FreeExtraSpace(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Item,
++ IN gctINT ItemSize,
++ IN gctINT FreeSize
++ )
++{
++ gcsBUFITEM_HEAD_PTR next;
++
++ OutputBuffer->index -= FreeSize;
++
++ next = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) Item + ItemSize);
++ next->type = gceBUFITEM_NONE;
++}
++#endif
++
++#if gcdHAVEPREFIX
++static void
++_AppendPrefix(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR prefixData;
++ gcsBUFITEM_PREFIX_PTR item;
++ gctINT allocSize;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_PREFIX)
++ + gcdPREFIX_SIZE
++ + gcdPREFIX_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_PREFIX_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial prefix data pointer. */
++ prefixData = (gctUINT8_PTR) (item + 1);
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ alignment = gcmPTRALIGNMENT(prefixData, gcdPREFIX_ALIGNMENT);
++ prefixData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_PREFIX;
++ item->prefixData = prefixData;
++
++ /* Copy argument value. */
++ memcpy(prefixData, Data, gcdPREFIX_SIZE);
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size = gcmSIZEOF(gcsBUFITEM_PREFIX) + gcdPREFIX_SIZE + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++#endif
++
++static void
++_AppendString(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR messageData;
++ gcsBUFITEM_STRING_PTR item;
++ gctINT allocSize;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_STRING)
++ + ArgumentSize
++ + gcdVARARG_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_STRING_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial message data pointer. */
++ messageData = (gctUINT8_PTR) (item + 1);
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ alignment = gcmPTRALIGNMENT(messageData, gcdVARARG_ALIGNMENT);
++ messageData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_STRING;
++ item->indent = Indent;
++ item->message = Message;
++ item->messageData = messageData;
++ item->messageDataSize = ArgumentSize;
++
++ /* Copy argument value. */
++ if (ArgumentSize != 0)
++ {
++ memcpy(messageData, Data, ArgumentSize);
++ }
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size = gcmSIZEOF(gcsBUFITEM_STRING) + ArgumentSize + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++
++static void
++_AppendCopy(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR messageData;
++ gcsBUFITEM_COPY_PTR item;
++ gctINT allocSize;
++ gctINT messageLength;
++ gctCONST_STRING message;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ /* Get the length of the string. */
++ messageLength = strlen(Message) + 1;
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_COPY)
++ + messageLength
++ + ArgumentSize
++ + gcdVARARG_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_COPY_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Determine the message placement. */
++ message = (gctCONST_STRING) (item + 1);
++
++ /* Compute the initial message data pointer. */
++ messageData = (gctUINT8_PTR) message + messageLength;
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ if (ArgumentSize == 0)
++ {
++ alignment = 0;
++ }
++ else
++ {
++ alignment = gcmPTRALIGNMENT(messageData, gcdVARARG_ALIGNMENT);
++ messageData += alignment;
++ }
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_COPY;
++ item->indent = Indent;
++ item->messageData = messageData;
++ item->messageDataSize = ArgumentSize;
++
++ /* Copy the message. */
++ memcpy((gctPOINTER) message, Message, messageLength);
++
++ /* Copy argument value. */
++ if (ArgumentSize != 0)
++ {
++ memcpy(messageData, Data, ArgumentSize);
++ }
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size
++ = gcmSIZEOF(gcsBUFITEM_COPY)
++ + messageLength
++ + ArgumentSize
++ + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++
++static void
++_AppendBuffer(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctPOINTER PrefixData,
++ IN gctPOINTER Data,
++ IN gctUINT Address,
++ IN gctUINT DataSize,
++ IN gceDUMP_BUFFER Type,
++ IN gctUINT32 DmaAddress
++ )
++{
++#if gcdHAVEPREFIX
++ gctUINT8_PTR prefixData;
++ gcsBUFITEM_BUFFER_PTR item;
++ gctINT allocSize;
++ gctPOINTER data;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ gcmDBGASSERT(DataSize != 0, "%d", DataSize);
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_BUFFER)
++ + gcdPREFIX_SIZE
++ + gcdPREFIX_ALIGNMENT
++ + DataSize;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_BUFFER_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial prefix data pointer. */
++ prefixData = (gctUINT8_PTR) (item + 1);
++
++#if gcdALIGNBYSIZE
++ /* Align the data pointer as necessary. */
++ alignment = gcmPTRALIGNMENT(prefixData, gcdPREFIX_ALIGNMENT);
++ prefixData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_BUFFER;
++ item->indent = Indent;
++ item->bufferType = Type;
++ item->dataSize = DataSize;
++ item->address = Address;
++ item->prefixData = prefixData;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ item->dmaAddress = DmaAddress;
++#endif
++
++ /* Copy prefix data. */
++ memcpy(prefixData, PrefixData, gcdPREFIX_SIZE);
++
++ /* Compute the data pointer. */
++ data = prefixData + gcdPREFIX_SIZE;
++
++ /* Copy argument value. */
++ memcpy(data, Data, DataSize);
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size
++ = gcmSIZEOF(gcsBUFITEM_BUFFER)
++ + gcdPREFIX_SIZE
++ + alignment
++ + DataSize;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++#else
++ gcsBUFITEM_BUFFER_PTR item;
++ gctINT size;
++
++ gcmDBGASSERT(DataSize != 0, "%d", DataSize);
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ size = gcmSIZEOF(gcsBUFITEM_BUFFER) + DataSize;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_BUFFER_PTR) _AllocateItem(OutputBuffer, size);
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_BUFFER;
++ item->indent = Indent;
++ item->dataSize = DataSize;
++ item->address = Address;
++
++ /* Copy argument value. */
++ memcpy(item + 1, Data, DataSize);
++#endif
++}
++#endif
++
++static gcmINLINE void
++_InitBuffers(
++ void
++ )
++{
++ int i;
++
++ if (_outputBufferHead == gcvNULL)
++ {
++ for (i = 0; i < gcdTHREAD_BUFFERS; i += 1)
++ {
++ if (_outputBufferTail == gcvNULL)
++ {
++ _outputBufferHead = &_outputBuffer[i];
++ }
++ else
++ {
++ _outputBufferTail->next = &_outputBuffer[i];
++ }
++
++#if gcdTHREAD_BUFFERS > 1
++ _outputBuffer[i].threadID = ~0U;
++#endif
++
++ _outputBuffer[i].prev = _outputBufferTail;
++ _outputBuffer[i].next = gcvNULL;
++
++ _outputBufferTail = &_outputBuffer[i];
++ }
++ }
++}
++
++static gcmINLINE gcsBUFFERED_OUTPUT_PTR
++_GetOutputBuffer(
++ void
++ )
++{
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++
++#if gcdTHREAD_BUFFERS > 1
++ /* Get the current thread ID. */
++ gctUINT32 ThreadID = gcmkGETTHREADID();
++
++ /* Locate the output buffer for the thread. */
++ outputBuffer = _outputBufferHead;
++
++ while (outputBuffer != gcvNULL)
++ {
++ if (outputBuffer->threadID == ThreadID)
++ {
++ break;
++ }
++
++ outputBuffer = outputBuffer->next;
++ }
++
++ /* No matching buffer found? */
++ if (outputBuffer == gcvNULL)
++ {
++ /* Get the tail for the buffer. */
++ outputBuffer = _outputBufferTail;
++
++ /* Move it to the head. */
++ _outputBufferTail = _outputBufferTail->prev;
++ _outputBufferTail->next = gcvNULL;
++
++ outputBuffer->prev = gcvNULL;
++ outputBuffer->next = _outputBufferHead;
++
++ _outputBufferHead->prev = outputBuffer;
++ _outputBufferHead = outputBuffer;
++
++ /* Reset the buffer. */
++ outputBuffer->threadID = ThreadID;
++#if gcdBUFFERED_OUTPUT
++ outputBuffer->start = 0;
++ outputBuffer->index = 0;
++ outputBuffer->count = 0;
++#endif
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber = 0;
++#endif
++ }
++#else
++ outputBuffer = _outputBufferHead;
++#endif
++
++ return outputBuffer;
++}
++
++static gcmINLINE int _GetArgumentSize(
++ IN gctCONST_STRING Message
++ )
++{
++ int i, count;
++
++ gcmDBGASSERT(Message != gcvNULL, "%p", Message);
++
++ for (i = 0, count = 0; Message[i]; i += 1)
++ {
++ if (Message[i] == '%')
++ {
++ count += 1;
++ }
++ }
++
++ return count * gcmSIZEOF(gctUINT32);
++}
++
++#if gcdHAVEPREFIX
++static void
++_InitPrefixData(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR data = (gctUINT8_PTR) Data;
++
++#if gcdSHOW_TIME
++ {
++ gctUINT64 time;
++ gckOS_GetProfileTick(&time);
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT64));
++ * ((gctUINT64_PTR) data) = time;
++ data += gcmSIZEOF(gctUINT64);
++ }
++#endif
++
++#if gcdSHOW_LINE_NUMBER
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT64));
++ * ((gctUINT64_PTR) data) = OutputBuffer->lineNumber;
++ data += gcmSIZEOF(gctUINT64);
++ }
++#endif
++
++#if gcdSHOW_PROCESS_ID
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT32));
++ * ((gctUINT32_PTR) data) = gcmkGETPROCESSID();
++ data += gcmSIZEOF(gctUINT32);
++ }
++#endif
++
++#if gcdSHOW_THREAD_ID
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT32));
++ * ((gctUINT32_PTR) data) = gcmkGETTHREADID();
++ }
++#endif
++}
++#endif
++
++static void
++_Print(
++ IN gctUINT ArgumentSize,
++ IN gctBOOL CopyMessage,
++ IN gctCONST_STRING Message,
++ IN gctARGUMENTS * Arguments
++ )
++{
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++ gcmkDECLARE_LOCK(lockHandle);
++
++ gcmkLOCKSECTION(lockHandle);
++
++ /* Initialize output buffer list. */
++ _InitBuffers();
++
++ /* Locate the proper output buffer. */
++ outputBuffer = _GetOutputBuffer();
++
++ /* Update the line number. */
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber += 1;
++#endif
++
++ /* Print prefix. */
++#if gcdHAVEPREFIX
++ {
++ gctUINT8_PTR alignedPrefixData;
++ gctUINT8 prefixData[gcdPREFIX_SIZE + gcdPREFIX_ALIGNMENT];
++
++ /* Compute aligned pointer. */
++ alignedPrefixData = prefixData;
++ gcmkALIGNPTR(gctUINT8_PTR, alignedPrefixData, gcdPREFIX_ALIGNMENT);
++
++ /* Initialize the prefix data. */
++ _InitPrefixData(outputBuffer, alignedPrefixData);
++
++ /* Print the prefix. */
++ gcdOUTPUTPREFIX(outputBuffer, alignedPrefixData);
++ }
++#endif
++
++ /* Form the indent string. */
++ if (strncmp(Message, "--", 2) == 0)
++ {
++ outputBuffer->indent -= 2;
++ }
++
++ /* Print the message. */
++ if (CopyMessage)
++ {
++ gcdOUTPUTCOPY(
++ outputBuffer, outputBuffer->indent,
++ Message, ArgumentSize, (gctPOINTER) Arguments
++ );
++ }
++ else
++ {
++ gcdOUTPUTSTRING(
++ outputBuffer, outputBuffer->indent,
++ Message, ArgumentSize, ((gctPOINTER) Arguments)
++ );
++ }
++
++ /* Check increasing indent. */
++ if (strncmp(Message, "++", 2) == 0)
++ {
++ outputBuffer->indent += 2;
++ }
++
++ gcmkUNLOCKSECTION(lockHandle);
++}
++
++
++/******************************************************************************\
++********************************* Debug Macros *********************************
++\******************************************************************************/
++
++#ifdef __QNXNTO__
++
++extern volatile unsigned g_nQnxInIsrs;
++
++#define gcmDEBUGPRINT(ArgumentSize, CopyMessage, Message) \
++{ \
++ if (atomic_add_value(&g_nQnxInIsrs, 1) == 0) \
++ { \
++ gctARGUMENTS __arguments__; \
++ gcmkARGUMENTS_START(__arguments__, Message); \
++ _Print(ArgumentSize, CopyMessage, Message, &__arguments__); \
++ gcmkARGUMENTS_END(__arguments__); \
++ } \
++ atomic_sub(&g_nQnxInIsrs, 1); \
++}
++
++#else
++
++#define gcmDEBUGPRINT(ArgumentSize, CopyMessage, Message) \
++{ \
++ gctARGUMENTS __arguments__; \
++ gcmkARGUMENTS_START(__arguments__, Message); \
++ _Print(ArgumentSize, CopyMessage, Message, &__arguments__); \
++ gcmkARGUMENTS_END(__arguments__); \
++}
++
++#endif
++
++/******************************************************************************\
++********************************** Debug Code **********************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_Print
++**
++** Send a message to the debugger.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_PrintN
++**
++** Send a message to the debugger.
++**
++** INPUT:
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_PrintN(
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyPrint
++**
++** Send a message to the debugger. If in buffered output mode, the entire
++** message will be copied into the buffer instead of using the pointer to
++** the string.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_CopyPrint(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvTRUE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DumpBuffer
++**
++** Print the contents of the specified buffer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctPOINTER Buffer
++** Pointer to the buffer to print.
++**
++** gctUINT Size
++** Size of the buffer.
++**
++** gceDUMP_BUFFER Type
++** Buffer type.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DumpBuffer(
++ IN gckOS Os,
++ IN gctPOINTER Buffer,
++ IN gctUINT Size,
++ IN gceDUMP_BUFFER Type,
++ IN gctBOOL CopyMessage
++ )
++{
++ gctUINT32 address = 0;
++ gcsBUFFERED_OUTPUT_PTR outputBuffer = gcvNULL;
++ static gctBOOL userLocked;
++ gctCHAR *buffer = (gctCHAR*)Buffer;
++
++ gcmkDECLARE_LOCK(lockHandle);
++
++ /* Request lock when not coming from user,
++ or coming from user and not yet locked
++ and message is starting with @[. */
++ if (Type == gceDUMP_BUFFER_FROM_USER)
++ {
++ if ((Size > 2)
++ && (buffer[0] == '@')
++ && (buffer[1] == '['))
++ {
++ /* Beginning of a user dump. */
++ gcmkLOCKSECTION(lockHandle);
++ userLocked = gcvTRUE;
++ }
++ /* Else, let it pass through. */
++ }
++ else
++ {
++ gcmkLOCKSECTION(lockHandle);
++ userLocked = gcvFALSE;
++ }
++
++ if (Buffer != gcvNULL)
++ {
++ /* Initialize output buffer list. */
++ _InitBuffers();
++
++ /* Locate the proper output buffer. */
++ outputBuffer = _GetOutputBuffer();
++
++ /* Update the line number. */
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber += 1;
++#endif
++
++ /* Get the physical address of the buffer. */
++ if (Type != gceDUMP_BUFFER_FROM_USER)
++ {
++ gcmkVERIFY_OK(gckOS_GetPhysicalAddress(Os, Buffer, &address));
++ }
++ else
++ {
++ address = 0;
++ }
++
++#if gcdHAVEPREFIX
++ {
++ gctUINT8_PTR alignedPrefixData;
++ gctUINT8 prefixData[gcdPREFIX_SIZE + gcdPREFIX_ALIGNMENT];
++
++ /* Compute aligned pointer. */
++ alignedPrefixData = prefixData;
++ gcmkALIGNPTR(gctUINT8_PTR, alignedPrefixData, gcdPREFIX_ALIGNMENT);
++
++ /* Initialize the prefix data. */
++ _InitPrefixData(outputBuffer, alignedPrefixData);
++
++ /* Print/schedule the buffer. */
++ gcdOUTPUTBUFFER(
++ outputBuffer, outputBuffer->indent,
++ alignedPrefixData, Buffer, address, Size, Type, 0
++ );
++ }
++#else
++ /* Print/schedule the buffer. */
++ if (Type == gceDUMP_BUFFER_FROM_USER)
++ {
++ gcdOUTPUTSTRING(
++ outputBuffer, outputBuffer->indent,
++ Buffer, 0, gcvNULL
++ );
++ }
++ else
++ {
++ gcdOUTPUTBUFFER(
++ outputBuffer, outputBuffer->indent,
++ gcvNULL, Buffer, address, Size, Type, 0
++ );
++ }
++#endif
++ }
++
++ /* Unlock when not coming from user,
++ or coming from user and not yet locked. */
++ if (userLocked)
++ {
++ if ((Size > 4)
++ && (buffer[0] == ']')
++ && (buffer[1] == ' ')
++ && (buffer[2] == '-')
++ && (buffer[3] == '-'))
++ {
++ /* End of a user dump. */
++ gcmkUNLOCKSECTION(lockHandle);
++ userLocked = gcvFALSE;
++ }
++ /* Else, let it pass through, don't unlock. */
++ }
++ else
++ {
++ gcmkUNLOCKSECTION(lockHandle);
++ }
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTrace
++**
++** Send a leveled message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level of message.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if (Level > _debugLevel)
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceN
++**
++** Send a leveled message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level of message.
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceN(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if (Level > _debugLevel)
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceZone
++**
++** Send a leveled and zoned message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level for message.
++**
++** gctUINT32 Zone
++** Debug zone for message.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if ((Level > _debugLevel) || !(Zone & _debugZones))
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceZoneN
++**
++** Send a leveled and zoned message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level for message.
++**
++** gctUINT32 Zone
++** Debug zone for message.
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceZoneN(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if ((Level > _debugLevel) || !(Zone & _debugZones))
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugBreak
++**
++** Break into the debugger.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_DebugBreak(
++ void
++ )
++{
++ gckOS_DebugTrace(gcvLEVEL_ERROR, "%s(%d)", __FUNCTION__, __LINE__);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugFatal
++**
++** Send a message to the debugger and break into the debugger.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmkPRINT_VERSION();
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++
++ /* Break into the debugger. */
++ gckOS_DebugBreak();
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugLevel
++**
++** Set the debug level.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** New debug level.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugLevel(
++ IN gctUINT32 Level
++ )
++{
++ _debugLevel = Level;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugZone
++**
++** Set the debug zone.
++**
++** INPUT:
++**
++** gctUINT32 Zone
++** New debug zone.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_SetDebugZone(
++ IN gctUINT32 Zone
++ )
++{
++ _debugZones = Zone;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugLevelZone
++**
++** Set the debug level and zone.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** New debug level.
++**
++** gctUINT32 Zone
++** New debug zone.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ )
++{
++ _debugLevel = Level;
++ _debugZones = Zone;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugZones
++**
++** Enable or disable debug zones.
++**
++** INPUT:
++**
++** gctUINT32 Zones
++** Debug zones to enable or disable.
++**
++** gctBOOL Enable
++** Set to gcvTRUE to enable the zones (or the Zones with the current
++** zones) or gcvFALSE to disable the specified Zones.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ )
++{
++ if (Enable)
++ {
++ /* Enable the zones. */
++ _debugZones |= Zones;
++ }
++ else
++ {
++ /* Disable the zones. */
++ _debugZones &= ~Zones;
++ }
++}
++
++/*******************************************************************************
++**
++** gckOS_Verify
++**
++** Called to verify the result of a function call.
++**
++** INPUT:
++**
++** gceSTATUS Status
++** Function call result.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_Verify(
++ IN gceSTATUS status
++ )
++{
++ _lastError = status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugFlush
++**
++** Force messages to be flushed out.
++**
++** INPUT:
++**
++** gctCONST_STRING CallerName
++** Name of the caller function.
++**
++** gctUINT LineNumber
++** Line number of the caller.
++**
++** gctUINT32 DmaAddress
++** The current DMA address or ~0U to ignore.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugFlush(
++ gctCONST_STRING CallerName,
++ gctUINT LineNumber,
++ gctUINT32 DmaAddress
++ )
++{
++#if gcdBUFFERED_OUTPUT
++ _DirectPrint("\nFlush requested by %s(%d).\n\n", CallerName, LineNumber);
++ _Flush(DmaAddress);
++#endif
++}
++gctCONST_STRING
++gckOS_DebugStatus2Name(
++ gceSTATUS status
++ )
++{
++ switch (status)
++ {
++ case gcvSTATUS_OK:
++ return "gcvSTATUS_OK";
++ case gcvSTATUS_TRUE:
++ return "gcvSTATUS_TRUE";
++ case gcvSTATUS_NO_MORE_DATA:
++ return "gcvSTATUS_NO_MORE_DATA";
++ case gcvSTATUS_CACHED:
++ return "gcvSTATUS_CACHED";
++ case gcvSTATUS_MIPMAP_TOO_LARGE:
++ return "gcvSTATUS_MIPMAP_TOO_LARGE";
++ case gcvSTATUS_NAME_NOT_FOUND:
++ return "gcvSTATUS_NAME_NOT_FOUND";
++ case gcvSTATUS_NOT_OUR_INTERRUPT:
++ return "gcvSTATUS_NOT_OUR_INTERRUPT";
++ case gcvSTATUS_MISMATCH:
++ return "gcvSTATUS_MISMATCH";
++ case gcvSTATUS_MIPMAP_TOO_SMALL:
++ return "gcvSTATUS_MIPMAP_TOO_SMALL";
++ case gcvSTATUS_LARGER:
++ return "gcvSTATUS_LARGER";
++ case gcvSTATUS_SMALLER:
++ return "gcvSTATUS_SMALLER";
++ case gcvSTATUS_CHIP_NOT_READY:
++ return "gcvSTATUS_CHIP_NOT_READY";
++ case gcvSTATUS_NEED_CONVERSION:
++ return "gcvSTATUS_NEED_CONVERSION";
++ case gcvSTATUS_SKIP:
++ return "gcvSTATUS_SKIP";
++ case gcvSTATUS_DATA_TOO_LARGE:
++ return "gcvSTATUS_DATA_TOO_LARGE";
++ case gcvSTATUS_INVALID_CONFIG:
++ return "gcvSTATUS_INVALID_CONFIG";
++ case gcvSTATUS_CHANGED:
++ return "gcvSTATUS_CHANGED";
++ case gcvSTATUS_NOT_SUPPORT_DITHER:
++ return "gcvSTATUS_NOT_SUPPORT_DITHER";
++
++ case gcvSTATUS_INVALID_ARGUMENT:
++ return "gcvSTATUS_INVALID_ARGUMENT";
++ case gcvSTATUS_INVALID_OBJECT:
++ return "gcvSTATUS_INVALID_OBJECT";
++ case gcvSTATUS_OUT_OF_MEMORY:
++ return "gcvSTATUS_OUT_OF_MEMORY";
++ case gcvSTATUS_MEMORY_LOCKED:
++ return "gcvSTATUS_MEMORY_LOCKED";
++ case gcvSTATUS_MEMORY_UNLOCKED:
++ return "gcvSTATUS_MEMORY_UNLOCKED";
++ case gcvSTATUS_HEAP_CORRUPTED:
++ return "gcvSTATUS_HEAP_CORRUPTED";
++ case gcvSTATUS_GENERIC_IO:
++ return "gcvSTATUS_GENERIC_IO";
++ case gcvSTATUS_INVALID_ADDRESS:
++ return "gcvSTATUS_INVALID_ADDRESS";
++ case gcvSTATUS_CONTEXT_LOSSED:
++ return "gcvSTATUS_CONTEXT_LOSSED";
++ case gcvSTATUS_TOO_COMPLEX:
++ return "gcvSTATUS_TOO_COMPLEX";
++ case gcvSTATUS_BUFFER_TOO_SMALL:
++ return "gcvSTATUS_BUFFER_TOO_SMALL";
++ case gcvSTATUS_INTERFACE_ERROR:
++ return "gcvSTATUS_INTERFACE_ERROR";
++ case gcvSTATUS_NOT_SUPPORTED:
++ return "gcvSTATUS_NOT_SUPPORTED";
++ case gcvSTATUS_MORE_DATA:
++ return "gcvSTATUS_MORE_DATA";
++ case gcvSTATUS_TIMEOUT:
++ return "gcvSTATUS_TIMEOUT";
++ case gcvSTATUS_OUT_OF_RESOURCES:
++ return "gcvSTATUS_OUT_OF_RESOURCES";
++ case gcvSTATUS_INVALID_DATA:
++ return "gcvSTATUS_INVALID_DATA";
++ case gcvSTATUS_INVALID_MIPMAP:
++ return "gcvSTATUS_INVALID_MIPMAP";
++ case gcvSTATUS_NOT_FOUND:
++ return "gcvSTATUS_NOT_FOUND";
++ case gcvSTATUS_NOT_ALIGNED:
++ return "gcvSTATUS_NOT_ALIGNED";
++ case gcvSTATUS_INVALID_REQUEST:
++ return "gcvSTATUS_INVALID_REQUEST";
++ case gcvSTATUS_GPU_NOT_RESPONDING:
++ return "gcvSTATUS_GPU_NOT_RESPONDING";
++ case gcvSTATUS_TIMER_OVERFLOW:
++ return "gcvSTATUS_TIMER_OVERFLOW";
++ case gcvSTATUS_VERSION_MISMATCH:
++ return "gcvSTATUS_VERSION_MISMATCH";
++ case gcvSTATUS_LOCKED:
++ return "gcvSTATUS_LOCKED";
++ case gcvSTATUS_INTERRUPTED:
++ return "gcvSTATUS_INTERRUPTED";
++ case gcvSTATUS_DEVICE:
++ return "gcvSTATUS_DEVICE";
++ case gcvSTATUS_NOT_MULTI_PIPE_ALIGNED:
++ return "gcvSTATUS_NOT_MULTI_PIPE_ALIGNED";
++
++ /* Linker errors. */
++ case gcvSTATUS_GLOBAL_TYPE_MISMATCH:
++ return "gcvSTATUS_GLOBAL_TYPE_MISMATCH";
++ case gcvSTATUS_TOO_MANY_ATTRIBUTES:
++ return "gcvSTATUS_TOO_MANY_ATTRIBUTES";
++ case gcvSTATUS_TOO_MANY_UNIFORMS:
++ return "gcvSTATUS_TOO_MANY_UNIFORMS";
++ case gcvSTATUS_TOO_MANY_SAMPLER:
++ return "gcvSTATUS_TOO_MANY_SAMPLER";
++ case gcvSTATUS_TOO_MANY_VARYINGS:
++ return "gcvSTATUS_TOO_MANY_VARYINGS";
++ case gcvSTATUS_UNDECLARED_VARYING:
++ return "gcvSTATUS_UNDECLARED_VARYING";
++ case gcvSTATUS_VARYING_TYPE_MISMATCH:
++ return "gcvSTATUS_VARYING_TYPE_MISMATCH";
++ case gcvSTATUS_MISSING_MAIN:
++ return "gcvSTATUS_MISSING_MAIN";
++ case gcvSTATUS_NAME_MISMATCH:
++ return "gcvSTATUS_NAME_MISMATCH";
++ case gcvSTATUS_INVALID_INDEX:
++ return "gcvSTATUS_INVALID_INDEX";
++ case gcvSTATUS_UNIFORM_MISMATCH:
++ return "gcvSTATUS_UNIFORM_MISMATCH";
++ case gcvSTATUS_UNSAT_LIB_SYMBOL:
++ return "gcvSTATUS_UNSAT_LIB_SYMBOL";
++ case gcvSTATUS_TOO_MANY_SHADERS:
++ return "gcvSTATUS_TOO_MANY_SHADERS";
++ case gcvSTATUS_LINK_INVALID_SHADERS:
++ return "gcvSTATUS_LINK_INVALID_SHADERS";
++ case gcvSTATUS_CS_NO_WORKGROUP_SIZE:
++ return "gcvSTATUS_CS_NO_WORKGROUP_SIZE";
++ case gcvSTATUS_LINK_LIB_ERROR:
++ return "gcvSTATUS_LINK_LIB_ERROR";
++ case gcvSTATUS_SHADER_VERSION_MISMATCH:
++ return "gcvSTATUS_SHADER_VERSION_MISMATCH";
++ case gcvSTATUS_TOO_MANY_INSTRUCTION:
++ return "gcvSTATUS_TOO_MANY_INSTRUCTION";
++ case gcvSTATUS_SSBO_MISMATCH:
++ return "gcvSTATUS_SSBO_MISMATCH";
++ case gcvSTATUS_TOO_MANY_OUTPUT:
++ return "gcvSTATUS_TOO_MANY_OUTPUT";
++ case gcvSTATUS_TOO_MANY_INPUT:
++ return "gcvSTATUS_TOO_MANY_INPUT";
++ case gcvSTATUS_NOT_SUPPORT_CL:
++ return "gcvSTATUS_NOT_SUPPORT_CL";
++ case gcvSTATUS_NOT_SUPPORT_INTEGER:
++ return "gcvSTATUS_NOT_SUPPORT_INTEGER";
++
++ /* Compiler errors. */
++ case gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR:
++ return "gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR";
++ case gcvSTATUS_COMPILER_FE_PARSER_ERROR:
++ return "gcvSTATUS_COMPILER_FE_PARSER_ERROR";
++
++ default:
++ return "nil";
++ }
++}
++
++/*******************************************************************************
++***** Binary Trace *************************************************************
++*******************************************************************************/
++
++/*******************************************************************************
++** _VerifyMessage
++**
++** Verify a binary trace message, decode it to human readable string and print
++** it.
++**
++** ARGUMENTS:
++**
++** gctCONST_STRING Buffer
++** Pointer to buffer to store.
++**
++** gctSIZE_T Bytes
++** Buffer length.
++*/
++void
++_VerifyMessage(
++ IN gctCONST_STRING Buffer,
++ IN gctSIZE_T Bytes
++ )
++{
++ char arguments[150] = {0};
++ char format[100] = {0};
++
++ gctSTRING function;
++ gctPOINTER args;
++ gctUINT32 numArguments;
++ int i = 0;
++ gctUINT32 functionBytes;
++
++ gcsBINARY_TRACE_MESSAGE_PTR message = (gcsBINARY_TRACE_MESSAGE_PTR)Buffer;
++
++ /* Check signature. */
++ if (message->signature != 0x7FFFFFFF)
++ {
++ gcmkPRINT("Signature error");
++ return;
++ }
++
++ /* Get function name. */
++ function = (gctSTRING)&message->payload;
++ functionBytes = (gctUINT32)strlen(function) + 1;
++
++ /* Get arguments number. */
++ numArguments = message->numArguments;
++
++ /* Get arguments . */
++ args = function + functionBytes;
++
++ /* Prepare format string. */
++ while (numArguments--)
++ {
++ format[i++] = '%';
++ format[i++] = 'x';
++ format[i++] = ' ';
++ }
++
++ format[i] = '\0';
++
++ if (numArguments)
++ {
++ gcmkVSPRINTF(arguments, 150, format, (gctARGUMENTS *) &args);
++ }
++
++ gcmkPRINT("[%d](%d): %s(%d) %s",
++ message->pid,
++ message->tid,
++ function,
++ message->line,
++ arguments);
++}
++
++
++/*******************************************************************************
++** gckOS_WriteToRingBuffer
++**
++** Store a buffer to ring buffer.
++**
++** ARGUMENTS:
++**
++** gctCONST_STRING Buffer
++** Pointer to buffer to store.
++**
++** gctSIZE_T Bytes
++** Buffer length.
++*/
++void
++gckOS_WriteToRingBuffer(
++ IN gctCONST_STRING Buffer,
++ IN gctSIZE_T Bytes
++ )
++{
++
++}
++
++/*******************************************************************************
++** gckOS_BinaryTrace
++**
++** Output a binary trace message.
++**
++** ARGUMENTS:
++**
++** gctCONST_STRING Function
++** Pointer to function name.
++**
++** gctINT Line
++** Line number.
++**
++** gctCONST_STRING Text OPTIONAL
++** Optional pointer to a descriptive text.
++**
++** ...
++** Optional arguments to the descriptive text.
++*/
++void
++gckOS_BinaryTrace(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text OPTIONAL,
++ ...
++ )
++{
++ static gctUINT32 messageSignature = 0x7FFFFFFF;
++ char buffer[gcdBINARY_TRACE_MESSAGE_SIZE];
++ gctUINT32 numArguments = 0;
++ gctUINT32 functionBytes;
++ gctUINT32 i = 0;
++ gctSTRING payload;
++ gcsBINARY_TRACE_MESSAGE_PTR message = (gcsBINARY_TRACE_MESSAGE_PTR)buffer;
++
++ /* Calculate arguments number. */
++ if (Text)
++ {
++ while (Text[i] != '\0')
++ {
++ if (Text[i] == '%')
++ {
++ numArguments++;
++ }
++ i++;
++ }
++ }
++
++ message->signature = messageSignature;
++ message->pid = gcmkGETPROCESSID();
++ message->tid = gcmkGETTHREADID();
++ message->line = Line;
++ message->numArguments = numArguments;
++
++ payload = (gctSTRING)&message->payload;
++
++ /* Function name. */
++ functionBytes = (gctUINT32)gcmkSTRLEN(Function) + 1;
++ gcmkMEMCPY(payload, Function, functionBytes);
++
++ /* Advance to next payload. */
++ payload += functionBytes;
++
++ /* Arguments value. */
++ if (numArguments)
++ {
++ gctARGUMENTS p;
++ gcmkARGUMENTS_START(p, Text);
++
++ for (i = 0; i < numArguments; ++i)
++ {
++ gctPOINTER value = gcmkARGUMENTS_ARG(p, gctPOINTER);
++ gcmkMEMCPY(payload, &value, gcmSIZEOF(gctPOINTER));
++ payload += gcmSIZEOF(gctPOINTER);
++ }
++
++ gcmkARGUMENTS_END(p);
++ }
++
++ gcmkASSERT(payload - buffer <= gcdBINARY_TRACE_MESSAGE_SIZE);
++
++
++ /* Send buffer to ring buffer. */
++ gckOS_WriteToRingBuffer(buffer, (gctUINT32)(payload - buffer));
++}
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_event.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_event.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_event.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_event.c 2015-05-01 14:57:59.575427001 -0500
+@@ -0,0 +1,3459 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include "gc_hal_kernel_buffer.h"
++
++#ifdef __QNXNTO__
++#include <atomic.h>
++#include "gc_hal_kernel_qnx.h"
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_EVENT
++
++#define gcdEVENT_ALLOCATION_COUNT (4096 / gcmSIZEOF(gcsHAL_INTERFACE))
++#define gcdEVENT_MIN_THRESHOLD 4
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++
++static gceSTATUS
++gckEVENT_AllocateQueue(
++ IN gckEVENT Event,
++ OUT gcsEVENT_QUEUE_PTR * Queue
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++
++ /* Do we have free queues? */
++ if (Event->freeList == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Move one free queue from the free list. */
++ * Queue = Event->freeList;
++ Event->freeList = Event->freeList->next;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Queue=0x%x", gcmOPT_POINTER(Queue));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckEVENT_FreeQueue(
++ IN gckEVENT Event,
++ OUT gcsEVENT_QUEUE_PTR Queue
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++
++ /* Move one free queue from the free list. */
++ Queue->next = Event->freeList;
++ Event->freeList = Queue;
++
++ /* Success. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckEVENT_FreeRecord(
++ IN gckEVENT Event,
++ IN gcsEVENT_PTR Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->freeEventMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Push the record on the free list. */
++ Record->next = Event->freeEventList;
++ Event->freeEventList = Record;
++ Event->freeEventCount += 1;
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++gckEVENT_IsEmpty(
++ IN gckEVENT Event,
++ OUT gctBOOL_PTR IsEmpty
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T i;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(IsEmpty != gcvNULL);
++
++ /* Assume the event queue is empty. */
++ *IsEmpty = gcvTRUE;
++
++ /* Walk the event queue. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ /* Check whether this event is in use. */
++ if (Event->queues[i].head != gcvNULL)
++ {
++ /* The event is in use, hence the queue is not empty. */
++ *IsEmpty = gcvFALSE;
++ break;
++ }
++ }
++
++ /* Try acquiring the mutex. */
++ status = gckOS_AcquireMutex(Event->os, Event->eventQueueMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Timeout - queue is no longer empty. */
++ *IsEmpty = gcvFALSE;
++ }
++ else
++ {
++ /* Bail out on error. */
++ gcmkONERROR(status);
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*IsEmpty=%d", gcmOPT_VALUE(IsEmpty));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_TryToIdleGPU(
++ IN gckEVENT Event
++)
++{
++ gceSTATUS status;
++ gctBOOL empty = gcvFALSE, idle = gcvFALSE;
++ gctBOOL powerLocked = gcvFALSE;
++ gckHARDWARE hardware;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Grab gckHARDWARE object. */
++ hardware = Event->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Check whether the event queue is empty. */
++ gcmkONERROR(gckEVENT_IsEmpty(Event, &empty));
++
++ if (empty)
++ {
++ status = gckOS_AcquireMutex(hardware->os, hardware->powerMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ powerLocked = gcvTRUE;
++
++ /* Query whether the hardware is idle. */
++ gcmkONERROR(gckHARDWARE_QueryIdle(Event->kernel->hardware, &idle));
++
++ gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex));
++ powerLocked = gcvFALSE;
++
++ if (idle)
++ {
++ /* Inform the system of idle GPU. */
++ gcmkONERROR(gckOS_Broadcast(Event->os,
++ Event->kernel->hardware,
++ gcvBROADCAST_GPU_IDLE));
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (powerLocked)
++ {
++ gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++__RemoveRecordFromProcessDB(
++ IN gckEVENT Event,
++ IN gcsEVENT_PTR Record
++ )
++{
++ gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ while (Record != gcvNULL)
++ {
++ if (Record->info.command == gcvHAL_SIGNAL)
++ {
++ /* TODO: Find a better place to bind signal to hardware.*/
++ gcmkVERIFY_OK(gckOS_SignalSetHardware(Event->os,
++ gcmUINT64_TO_PTR(Record->info.u.Signal.signal),
++ Event->kernel->hardware));
++ }
++
++ if (Record->fromKernel)
++ {
++ /* No need to check db if event is from kernel. */
++ Record = Record->next;
++ continue;
++ }
++
++ switch (Record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_NON_PAGED,
++ gcmUINT64_TO_PTR(Record->info.u.FreeNonPagedMemory.logical)));
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_CONTIGUOUS,
++ gcmUINT64_TO_PTR(Record->info.u.FreeContiguousMemory.logical)));
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_LOCKED,
++ gcmUINT64_TO_PTR(Record->info.u.UnlockVideoMemory.node)));
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Record->info.u.UnmapUserMemory.info)));
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_COMMAND_BUFFER,
++ gcmUINT64_TO_PTR(Record->info.u.FreeVirtualCommandBuffer.logical)));
++ break;
++
++ default:
++ break;
++ }
++
++ Record = Record->next;
++ }
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_ReleaseVideoMemoryHandle(
++ IN gckKERNEL Kernel,
++ IN OUT gcsEVENT_PTR Record,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE nodeObject;
++ gctUINT32 handle;
++
++ switch(Interface->command)
++ {
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ handle = (gctUINT32)Interface->u.UnlockVideoMemory.node;
++
++ gcmkONERROR(gckVIDMEM_HANDLE_Lookup(
++ Kernel, Record->processID, handle, &nodeObject));
++
++ Record->info.u.UnlockVideoMemory.node = gcmPTR_TO_UINT64(nodeObject);
++
++ gckVIDMEM_HANDLE_Dereference(Kernel, Record->processID, handle);
++ break;
++
++ default:
++ break;
++ }
++
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++}
++
++/*******************************************************************************
++**
++** _QueryFlush
++**
++** Check the type of surfaces which will be released by current event and
++** determine the cache needed to flush.
++**
++*/
++static gceSTATUS
++_QueryFlush(
++ IN gckEVENT Event,
++ IN gcsEVENT_PTR Record,
++ OUT gceKERNEL_FLUSH *Flush
++ )
++{
++ gceKERNEL_FLUSH flush = 0;
++ gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ while (Record != gcvNULL)
++ {
++ switch (Record->info.command)
++ {
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ switch(Record->info.u.UnlockVideoMemory.type)
++ {
++ case gcvSURF_TILE_STATUS:
++ flush |= gcvFLUSH_TILE_STATUS;
++ break;
++ case gcvSURF_RENDER_TARGET:
++ flush |= gcvFLUSH_COLOR;
++ break;
++ case gcvSURF_DEPTH:
++ flush |= gcvFLUSH_DEPTH;
++ break;
++ case gcvSURF_TEXTURE:
++ flush |= gcvFLUSH_TEXTURE;
++ break;
++ case gcvSURF_TYPE_UNKNOWN:
++ gcmkASSERT(0);
++ break;
++ default:
++ break;
++ }
++ break;
++ case gcvHAL_UNMAP_USER_MEMORY:
++ *Flush = gcvFLUSH_ALL;
++ return gcvSTATUS_OK;
++
++ default:
++ break;
++ }
++
++ Record = Record->next;
++ }
++
++ *Flush = flush;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_SubmitTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckEVENT event = (gckEVENT)Data;
++#if gcdMULTI_GPU
++ gcmkVERIFY_OK(gckEVENT_Submit(event, gcvTRUE, gcvFALSE, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkVERIFY_OK(gckEVENT_Submit(event, gcvTRUE, gcvFALSE));
++#endif
++}
++
++/******************************************************************************\
++******************************* gckEVENT API Code *******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckEVENT_Construct
++**
++** Construct a new gckEVENT object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gckEVENT * Event
++** Pointer to a variable that receives the gckEVENT object pointer.
++*/
++gceSTATUS
++gckEVENT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckEVENT * Event
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++ gckEVENT eventObj = gcvNULL;
++ int i;
++ gcsEVENT_PTR record;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Event != gcvNULL);
++
++ /* Extract the pointer to the gckOS object. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Allocate the gckEVENT object. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckEVENT), &pointer));
++
++ eventObj = pointer;
++
++ /* Reset the object. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(eventObj, gcmSIZEOF(struct _gckEVENT)));
++
++ /* Initialize the gckEVENT object. */
++ eventObj->object.type = gcvOBJ_EVENT;
++ eventObj->kernel = Kernel;
++ eventObj->os = os;
++
++ /* Create the mutexes. */
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventQueueMutex));
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->freeEventMutex));
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventListMutex));
++
++ /* Create a bunch of event reccords. */
++ for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1)
++ {
++ /* Allocate an event record. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsEVENT), &pointer));
++
++ record = pointer;
++
++ /* Push it on the free list. */
++ record->next = eventObj->freeEventList;
++ eventObj->freeEventList = record;
++ eventObj->freeEventCount += 1;
++ }
++
++ /* Initialize the free list of event queues. */
++ for (i = 0; i < gcdREPO_LIST_COUNT; i += 1)
++ {
++ eventObj->repoList[i].next = eventObj->freeList;
++ eventObj->freeList = &eventObj->repoList[i];
++ }
++
++ /* Construct the atom. */
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->freeAtom));
++ gcmkONERROR(gckOS_AtomSet(os,
++ eventObj->freeAtom,
++ gcmCOUNTOF(eventObj->queues)));
++
++#if gcdSMP
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending));
++
++#if gcdMULTI_GPU
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending3D[i]));
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending3DMask[i]));
++ }
++
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pendingMask));
++#endif
++
++#endif
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(os,
++ _SubmitTimerFunction,
++ (gctPOINTER)eventObj,
++ &eventObj->submitTimer));
++
++#if gcdINTERRUPT_STATISTIC
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->interruptCount));
++ gcmkONERROR(gckOS_AtomSet(os,eventObj->interruptCount, 0));
++#endif
++
++ /* Return pointer to the gckEVENT object. */
++ *Event = eventObj;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Event=0x%x", *Event);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (eventObj != gcvNULL)
++ {
++ if (eventObj->eventQueueMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventQueueMutex));
++ }
++
++ if (eventObj->freeEventMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->freeEventMutex));
++ }
++
++ if (eventObj->eventListMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventListMutex));
++ }
++
++ while (eventObj->freeEventList != gcvNULL)
++ {
++ record = eventObj->freeEventList;
++ eventObj->freeEventList = record->next;
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, record));
++ }
++
++ if (eventObj->freeAtom != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->freeAtom));
++ }
++
++#if gcdSMP
++ if (eventObj->pending != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending));
++ }
++
++#if gcdMULTI_GPU
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ if (eventObj->pending3D[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending3D[i]));
++ }
++
++ if (eventObj->pending3DMask[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending3DMask[i]));
++ }
++ }
++#endif
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ if (eventObj->interruptCount)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->interruptCount));
++ }
++#endif
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, eventObj));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Destroy
++**
++** Destroy an gckEVENT object.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Destroy(
++ IN gckEVENT Event
++ )
++{
++ gcsEVENT_PTR record;
++ gcsEVENT_QUEUE_PTR queue;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ if (Event->submitTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Event->os, Event->submitTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Event->os, Event->submitTimer));
++ }
++
++ /* Delete the queue mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventQueueMutex));
++
++ /* Free all free events. */
++ while (Event->freeEventList != gcvNULL)
++ {
++ record = Event->freeEventList;
++ Event->freeEventList = record->next;
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record));
++ }
++
++ /* Delete the free mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->freeEventMutex));
++
++ /* Free all pending queues. */
++ while (Event->queueHead != gcvNULL)
++ {
++ /* Get the current queue. */
++ queue = Event->queueHead;
++
++ /* Free all pending events. */
++ while (queue->head != gcvNULL)
++ {
++ record = queue->head;
++ queue->head = record->next;
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_WARNING, gcvZONE_EVENT,
++ gcmSIZEOF(record) + gcmSIZEOF(queue->source),
++ "Event record 0x%x is still pending for %d.",
++ record, queue->source
++ );
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record));
++ }
++
++ /* Remove the top queue from the list. */
++ if (Event->queueHead == Event->queueTail)
++ {
++ Event->queueHead =
++ Event->queueTail = gcvNULL;
++ }
++ else
++ {
++ Event->queueHead = Event->queueHead->next;
++ }
++
++ /* Free the queue. */
++ gcmkVERIFY_OK(gckEVENT_FreeQueue(Event, queue));
++ }
++
++ /* Delete the list mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventListMutex));
++
++ /* Delete the atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->freeAtom));
++
++#if gcdSMP
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending));
++
++#if gcdMULTI_GPU
++ {
++ gctINT i;
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending3D[i]));
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending3DMask[i]));
++ }
++ }
++#endif
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->interruptCount));
++#endif
++
++ /* Mark the gckEVENT object as unknown. */
++ Event->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckEVENT object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, Event));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_GetEvent
++**
++** Reserve the next available hardware event.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL Wait
++** Set to gcvTRUE to force the function to wait if no events are
++** immediately available.
++**
++** gceKERNEL_WHERE Source
++** Source of the event.
++**
++** OUTPUT:
++**
++** gctUINT8 * EventID
++** Reserved event ID.
++*/
++#define gcdINVALID_EVENT_PTR ((gcsEVENT_PTR)gcvMAXUINTPTR_T)
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckEVENT_GetEvent(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ OUT gctUINT8 * EventID,
++ IN gceKERNEL_WHERE Source,
++ IN gceCORE_3D_MASK ChipEnable
++ )
++#else
++gceSTATUS
++gckEVENT_GetEvent(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ OUT gctUINT8 * EventID,
++ IN gceKERNEL_WHERE Source
++ )
++#endif
++{
++ gctINT i, id;
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctINT32 free;
++#if gcdMULTI_GPU
++ gctINT j;
++#endif
++
++ gcmkHEADER_ARG("Event=0x%x Source=%d", Event, Source);
++
++ while (gcvTRUE)
++ {
++ /* Grab the queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Walk through all events. */
++ id = Event->lastID;
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ gctINT nextID = gckMATH_ModuloInt((id + 1),
++ gcmCOUNTOF(Event->queues));
++
++ if (Event->queues[id].head == gcvNULL)
++ {
++ *EventID = (gctUINT8) id;
++
++ Event->lastID = (gctUINT8) nextID;
++
++ /* Save time stamp of event. */
++ Event->queues[id].head = gcdINVALID_EVENT_PTR;
++ Event->queues[id].stamp = ++(Event->stamp);
++ Event->queues[id].source = Source;
++
++#if gcdMULTI_GPU
++ Event->queues[id].chipEnable = ChipEnable;
++
++ if (ChipEnable == gcvCORE_3D_ALL_MASK)
++ {
++ gckOS_AtomSetMask(Event->pendingMask, (1 << id));
++
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ gckOS_AtomSetMask(Event->pending3DMask[j], (1 << id));
++ }
++ }
++ else
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ if (ChipEnable & (1 << j))
++ {
++ gckOS_AtomSetMask(Event->pending3DMask[j], (1 << id));
++ }
++ }
++ }
++#endif
++
++ gcmkONERROR(gckOS_AtomDecrement(Event->os,
++ Event->freeAtom,
++ &free));
++#if gcdDYNAMIC_SPEED
++ if (free <= gcdDYNAMIC_EVENT_THRESHOLD)
++ {
++ gcmkONERROR(gckOS_BroadcastHurry(
++ Event->os,
++ Event->kernel->hardware,
++ gcdDYNAMIC_EVENT_THRESHOLD - free));
++ }
++#endif
++
++ /* Release the queue mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os,
++ Event->eventQueueMutex));
++
++ /* Success. */
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(id),
++ "Using id=%d",
++ id
++ );
++
++ gcmkFOOTER_ARG("*EventID=%u", *EventID);
++ return gcvSTATUS_OK;
++ }
++
++ id = nextID;
++ }
++
++#if gcdDYNAMIC_SPEED
++ /* No free events, speed up the GPU right now! */
++ gcmkONERROR(gckOS_BroadcastHurry(Event->os,
++ Event->kernel->hardware,
++ gcdDYNAMIC_EVENT_THRESHOLD));
++#endif
++
++ /* Release the queue mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Fail if wait is not requested. */
++ if (!Wait)
++ {
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Delay a while. */
++ gcmkONERROR(gckOS_Delay(Event->os, 1));
++ }
++
++OnError:
++ if (acquired)
++ {
++ /* Release the queue mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_AllocateRecord
++**
++** Allocate a record for the new event.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL AllocateAllowed
++** State for allocation if out of free events.
++**
++** OUTPUT:
++**
++** gcsEVENT_PTR * Record
++** Allocated event record.
++*/
++gceSTATUS
++gckEVENT_AllocateRecord(
++ IN gckEVENT Event,
++ IN gctBOOL AllocateAllowed,
++ OUT gcsEVENT_PTR * Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctINT i;
++ gcsEVENT_PTR record;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Event=0x%x AllocateAllowed=%d", Event, AllocateAllowed);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->freeEventMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Test if we are below the allocation threshold. */
++ if ( (AllocateAllowed && (Event->freeEventCount < gcdEVENT_MIN_THRESHOLD)) ||
++ (Event->freeEventCount == 0) )
++ {
++ /* Allocate a bunch of records. */
++ for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1)
++ {
++ /* Allocate an event record. */
++ gcmkONERROR(gckOS_Allocate(Event->os,
++ gcmSIZEOF(gcsEVENT),
++ &pointer));
++
++ record = pointer;
++
++ /* Push it on the free list. */
++ record->next = Event->freeEventList;
++ Event->freeEventList = record;
++ Event->freeEventCount += 1;
++ }
++ }
++
++ *Record = Event->freeEventList;
++ Event->freeEventList = Event->freeEventList->next;
++ Event->freeEventCount -= 1;
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Record=0x%x", gcmOPT_POINTER(Record));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_AddList
++**
++** Add a new event to the list of events.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsHAL_INTERFACE_PTR Interface
++** Pointer to the interface for the event to be added.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** gctBOOL AllocateAllowed
++** State for allocation if out of free events.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_AddList(
++ IN gckEVENT Event,
++ IN gcsHAL_INTERFACE_PTR Interface,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctBOOL AllocateAllowed,
++ IN gctBOOL FromKernel
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsEVENT_PTR record = gcvNULL;
++ gcsEVENT_QUEUE_PTR queue;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Interface=0x%x",
++ Event, Interface);
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, _GC_OBJ_ZONE,
++ "FromWhere=%d AllocateAllowed=%d",
++ FromWhere, AllocateAllowed);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ /* Verify the event command. */
++ gcmkASSERT
++ ( (Interface->command == gcvHAL_FREE_NON_PAGED_MEMORY)
++ || (Interface->command == gcvHAL_FREE_CONTIGUOUS_MEMORY)
++ || (Interface->command == gcvHAL_WRITE_DATA)
++ || (Interface->command == gcvHAL_UNLOCK_VIDEO_MEMORY)
++ || (Interface->command == gcvHAL_SIGNAL)
++ || (Interface->command == gcvHAL_UNMAP_USER_MEMORY)
++ || (Interface->command == gcvHAL_TIMESTAMP)
++ || (Interface->command == gcvHAL_COMMIT_DONE)
++ || (Interface->command == gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER)
++ || (Interface->command == gcvHAL_SYNC_POINT)
++ || (Interface->command == gcvHAL_DESTROY_MMU)
++ );
++
++ /* Validate the source. */
++ if ((FromWhere != gcvKERNEL_COMMAND) && (FromWhere != gcvKERNEL_PIXEL))
++ {
++ /* Invalid argument. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Allocate a free record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, AllocateAllowed, &record));
++
++ /* Termninate the record. */
++ record->next = gcvNULL;
++
++ /* Record the committer. */
++ record->fromKernel = FromKernel;
++
++ /* Copy the event interface into the record. */
++ gckOS_MemCopy(&record->info, Interface, gcmSIZEOF(record->info));
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&record->processID));
++
++ gcmkONERROR(__RemoveRecordFromProcessDB(Event, record));
++
++ /* Handle is belonged to current process, it must be released now. */
++ if (FromKernel == gcvFALSE)
++ {
++ status = _ReleaseVideoMemoryHandle(Event->kernel, record, Interface);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Ingore error because there are other events in the queue. */
++ status = gcvSTATUS_OK;
++ goto OnError;
++ }
++ }
++
++#ifdef __QNXNTO__
++ record->kernel = Event->kernel;
++#endif
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->eventListMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Do we need to allocate a new queue? */
++ if ((Event->queueTail == gcvNULL) || (Event->queueTail->source < FromWhere))
++ {
++ /* Allocate a new queue. */
++ gcmkONERROR(gckEVENT_AllocateQueue(Event, &queue));
++
++ /* Initialize the queue. */
++ queue->source = FromWhere;
++ queue->head = gcvNULL;
++ queue->next = gcvNULL;
++
++ /* Attach it to the list of allocated queues. */
++ if (Event->queueTail == gcvNULL)
++ {
++ Event->queueHead =
++ Event->queueTail = queue;
++ }
++ else
++ {
++ Event->queueTail->next = queue;
++ Event->queueTail = queue;
++ }
++ }
++ else
++ {
++ queue = Event->queueTail;
++ }
++
++ /* Attach the record to the queue. */
++ if (queue->head == gcvNULL)
++ {
++ queue->head = record;
++ queue->tail = record;
++ }
++ else
++ {
++ queue->tail->next = record;
++ queue->tail = record;
++ }
++
++ /* Unmap user space logical address.
++ * Linux kernel does not support unmap the memory of other process any more since 3.5.
++ * Let's unmap memory of self process before submit the event to gpu.
++ * */
++ switch(Interface->command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkONERROR(gckOS_UnmapUserLogical(
++ Event->os,
++ gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical),
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++ break;
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkONERROR(gckOS_UnmapUserLogical(
++ Event->os,
++ gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical),
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical)));
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)gcmNAME_TO_PTR(Interface->u.FreeVirtualCommandBuffer.physical);
++ if (buffer->userLogical)
++ {
++ gcmkONERROR(gckOS_DestroyUserVirtualMapping(
++ Event->os,
++ buffer->physical,
++ (gctSIZE_T) Interface->u.FreeVirtualCommandBuffer.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeVirtualCommandBuffer.logical)));
++ }
++ break;
++
++ default:
++ break;
++ }
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ }
++
++ if (record != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Unlock
++**
++** Schedule an event to unlock virtual memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union that specifies the virtual memory
++** to unlock.
++**
++** gceSURF_TYPE Type
++** Type of surface to unlock.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Unlock(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctPOINTER Node,
++ IN gceSURF_TYPE Type
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x FromWhere=%d Node=0x%x Type=%d",
++ Event, FromWhere, Node, Type);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Mark the event as an unlock. */
++ iface.command = gcvHAL_UNLOCK_VIDEO_MEMORY;
++ iface.u.UnlockVideoMemory.node = gcmPTR_TO_UINT64(Node);
++ iface.u.UnlockVideoMemory.type = Type;
++ iface.u.UnlockVideoMemory.asynchroneous = 0;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeNonPagedMemory
++**
++** Schedule an event to free non-paged memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIZE_T Bytes
++** Number of bytes of non-paged memory to free.
++**
++** gctPHYS_ADDR Physical
++** Physical address of non-paged memory to free.
++**
++** gctPOINTER Logical
++** Logical address of non-paged memory to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++*/
++gceSTATUS
++gckEVENT_FreeNonPagedMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_NON_PAGED_MEMORY;
++ iface.u.FreeNonPagedMemory.bytes = Bytes;
++ iface.u.FreeNonPagedMemory.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeNonPagedMemory.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckEVENT_DestroyVirtualCommandBuffer(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER;
++ iface.u.FreeVirtualCommandBuffer.bytes = Bytes;
++ iface.u.FreeVirtualCommandBuffer.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeContigiuousMemory
++**
++** Schedule an event to free contiguous memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIZE_T Bytes
++** Number of bytes of contiguous memory to free.
++**
++** gctPHYS_ADDR Physical
++** Physical address of contiguous memory to free.
++**
++** gctPOINTER Logical
++** Logical address of contiguous memory to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++*/
++gceSTATUS
++gckEVENT_FreeContiguousMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_CONTIGUOUS_MEMORY;
++ iface.u.FreeContiguousMemory.bytes = Bytes;
++ iface.u.FreeContiguousMemory.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeContiguousMemory.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Signal
++**
++** Schedule an event to trigger a signal.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIGNAL Signal
++** Pointer to the signal to trigger.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Signal(
++ IN gckEVENT Event,
++ IN gctSIGNAL Signal,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x Signal=0x%x FromWhere=%d",
++ Event, Signal, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ /* Mark the event as a signal. */
++ iface.command = gcvHAL_SIGNAL;
++ iface.u.Signal.signal = gcmPTR_TO_UINT64(Signal);
++#ifdef __QNXNTO__
++ iface.u.Signal.coid = 0;
++ iface.u.Signal.rcvid = 0;
++#endif
++ iface.u.Signal.auxSignal = 0;
++ iface.u.Signal.process = 0;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_CommitDone
++**
++** Schedule an event to wake up work thread when commit is done by GPU.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_CommitDone(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x FromWhere=%d", Event, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ iface.command = gcvHAL_COMMIT_DONE;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++gceSTATUS
++gckEVENT_DestroyMmu(
++ IN gckEVENT Event,
++ IN gckMMU Mmu,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x FromWhere=%d", Event, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ iface.command = gcvHAL_DESTROY_MMU;
++ iface.u.DestroyMmu.mmu = gcmPTR_TO_UINT64(Mmu);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckEVENT_Submit
++**
++** Submit the current event queue to the GPU.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL Wait
++** Submit requires one vacant event; if Wait is set to not zero,
++** and there are no vacant events at this time, the function will
++** wait until an event becomes vacant so that submission of the
++** queue is successful.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++#if gcdMULTI_GPU
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower,
++ IN gceCORE_3D_MASK ChipEnable
++ )
++#else
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower
++ )
++#endif
++{
++ gceSTATUS status;
++ gctUINT8 id = 0xFF;
++ gcsEVENT_QUEUE_PTR queue;
++ gctBOOL acquired = gcvFALSE;
++ gckCOMMAND command = gcvNULL;
++ gctBOOL commitEntered = gcvFALSE;
++#if !gcdNULL_DRIVER
++ gctUINT32 bytes;
++ gctPOINTER buffer;
++#endif
++
++#if gcdMULTI_GPU
++ gctSIZE_T chipEnableBytes;
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ gctINT32 oldValue;
++#endif
++
++#if gcdSECURITY
++ gctPOINTER reservedBuffer;
++#endif
++
++ gctUINT32 flushBytes;
++ gctUINT32 executeBytes;
++ gckHARDWARE hardware;
++
++ gceKERNEL_FLUSH flush = gcvFALSE;
++
++ gcmkHEADER_ARG("Event=0x%x Wait=%d", Event, Wait);
++
++ /* Get gckCOMMAND object. */
++ command = Event->kernel->command;
++ hardware = Event->kernel->hardware;
++
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ gckOS_GetTicks(&Event->lastCommitStamp);
++
++ /* Are there event queues? */
++ if (Event->queueHead != gcvNULL)
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, FromPower));
++ commitEntered = gcvTRUE;
++
++ /* Process all queues. */
++ while (Event->queueHead != gcvNULL)
++ {
++ /* Acquire the list mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventListMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Get the current queue. */
++ queue = Event->queueHead;
++
++ /* Allocate an event ID. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckEVENT_GetEvent(Event, Wait, &id, queue->source, ChipEnable));
++#else
++ gcmkONERROR(gckEVENT_GetEvent(Event, Wait, &id, queue->source));
++#endif
++
++ /* Copy event list to event ID queue. */
++ Event->queues[id].head = queue->head;
++
++ /* Remove the top queue from the list. */
++ if (Event->queueHead == Event->queueTail)
++ {
++ Event->queueHead = gcvNULL;
++ Event->queueTail = gcvNULL;
++ }
++ else
++ {
++ Event->queueHead = Event->queueHead->next;
++ }
++
++ /* Free the queue. */
++ gcmkONERROR(gckEVENT_FreeQueue(Event, queue));
++
++ /* Release the list mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ acquired = gcvFALSE;
++
++ /* Determine cache needed to flush. */
++ gcmkVERIFY_OK(_QueryFlush(Event, Event->queues[id].head, &flush));
++
++#if gcdINTERRUPT_STATISTIC
++ gcmkVERIFY_OK(gckOS_AtomIncrement(
++ Event->os,
++ Event->interruptCount,
++ &oldValue
++ ));
++#endif
++
++#if gcdNULL_DRIVER
++ /* Notify immediately on infinite hardware. */
++ gcmkONERROR(gckEVENT_Interrupt(Event, 1 << id));
++
++ gcmkONERROR(gckEVENT_Notify(Event, 0));
++#else
++ /* Get the size of the hardware event. */
++ gcmkONERROR(gckHARDWARE_Event(
++ hardware,
++ gcvNULL,
++ id,
++ Event->queues[id].source,
++ &bytes
++ ));
++
++ /* Get the size of flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ hardware,
++ flush,
++ gcvNULL,
++ &flushBytes
++ ));
++
++ bytes += flushBytes;
++
++#if gcdMULTI_GPU
++ gcmkONERROR(gckHARDWARE_ChipEnable(
++ hardware,
++ gcvNULL,
++ 0,
++ &chipEnableBytes
++ ));
++
++ bytes += chipEnableBytes * 2;
++#endif
++
++ /* Total bytes need to execute. */
++ executeBytes = bytes;
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(command, bytes, &buffer, &bytes));
++#if gcdSECURITY
++ reservedBuffer = buffer;
++#endif
++
++#if gcdMULTI_GPU
++ gcmkONERROR(gckHARDWARE_ChipEnable(
++ hardware,
++ buffer,
++ ChipEnable,
++ &chipEnableBytes
++ ));
++
++ buffer = (gctUINT8_PTR)buffer + chipEnableBytes;
++#endif
++
++ /* Set the flush in the command queue. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ hardware,
++ flush,
++ buffer,
++ &flushBytes
++ ));
++
++ /* Advance to next command. */
++ buffer = (gctUINT8_PTR)buffer + flushBytes;
++
++ /* Set the hardware event in the command queue. */
++ gcmkONERROR(gckHARDWARE_Event(
++ hardware,
++ buffer,
++ id,
++ Event->queues[id].source,
++ &bytes
++ ));
++
++ /* Advance to next command. */
++ buffer = (gctUINT8_PTR)buffer + bytes;
++
++#if gcdMULTI_GPU
++ gcmkONERROR(gckHARDWARE_ChipEnable(
++ hardware,
++ buffer,
++ gcvCORE_3D_ALL_MASK,
++ &chipEnableBytes
++ ));
++#endif
++
++#if gcdSECURITY
++ gckKERNEL_SecurityExecute(
++ Event->kernel,
++ reservedBuffer,
++ executeBytes
++ );
++#else
++ /* Execute the hardware event. */
++ gcmkONERROR(gckCOMMAND_Execute(command, executeBytes));
++#endif
++#endif
++ }
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, FromPower));
++
++#if !gcdNULL_DRIVER
++ gcmkVERIFY_OK(_TryToIdleGPU(Event));
++#endif
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Need to unroll the mutex acquire. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, FromPower));
++ }
++
++ if (id != 0xFF)
++ {
++ /* Need to unroll the event allocation. */
++ Event->queues[id].head = gcvNULL;
++ }
++
++ if (status == gcvSTATUS_GPU_NOT_RESPONDING)
++ {
++ /* Broadcast GPU stuck. */
++ status = gckOS_Broadcast(Event->os,
++ Event->kernel->hardware,
++ gcvBROADCAST_GPU_STUCK);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Commit
++**
++** Commit an event queue from the user.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsQUEUE_PTR Queue
++** User event queue.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++#if gcdMULTI_GPU
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue,
++ IN gceCORE_3D_MASK ChipEnable
++ )
++#else
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue
++ )
++#endif
++{
++ gceSTATUS status;
++ gcsQUEUE_PTR record = gcvNULL, next;
++ gctUINT32 processID;
++ gctBOOL needCopy = gcvFALSE;
++
++ gcmkHEADER_ARG("Event=0x%x Queue=0x%x", Event, Queue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Get the current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Query if we need to copy the client data. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Event->os, processID, &needCopy));
++
++ /* Loop while there are records in the queue. */
++ while (Queue != gcvNULL)
++ {
++ gcsQUEUE queue;
++
++ if (needCopy)
++ {
++ /* Point to stack record. */
++ record = &queue;
++
++ /* Copy the data from the client. */
++ gcmkONERROR(gckOS_CopyFromUserData(Event->os,
++ record,
++ Queue,
++ gcmSIZEOF(gcsQUEUE)));
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Map record into kernel memory. */
++ gcmkONERROR(gckOS_MapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ &pointer));
++
++ record = pointer;
++ }
++
++ /* Append event record to event queue. */
++ gcmkONERROR(
++ gckEVENT_AddList(Event, &record->iface, gcvKERNEL_PIXEL, gcvTRUE, gcvFALSE));
++
++ /* Next record in the queue. */
++ next = gcmUINT64_TO_PTR(record->next);
++
++ if (!needCopy)
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(
++ gckOS_UnmapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) record));
++ record = gcvNULL;
++ }
++
++ Queue = next;
++ }
++
++ /* Submit the event list. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE, ChipEnable));
++#else
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE));
++#endif
++
++ /* Success */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if ((record != gcvNULL) && !needCopy)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Compose
++**
++** Schedule a composition event and start a composition.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsHAL_COMPOSE_PTR Info
++** Pointer to the composition structure.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Compose(
++ IN gckEVENT Event,
++ IN gcsHAL_COMPOSE_PTR Info
++ )
++{
++ gceSTATUS status;
++ gcsEVENT_PTR headRecord;
++ gcsEVENT_PTR tailRecord;
++ gcsEVENT_PTR tempRecord;
++ gctUINT8 id = 0xFF;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Event=0x%x Info=0x%x", Event, Info);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ /* Allocate an event ID. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL));
++#endif
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ headRecord = tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->process;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->signal;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++
++ /* Allocate another record for user signal #1. */
++ if (gcmUINT64_TO_PTR(Info->userSignal1) != gcvNULL)
++ {
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ tailRecord->next = tempRecord;
++ tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->userProcess;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->userSignal1;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++ }
++
++ /* Allocate another record for user signal #2. */
++ if (gcmUINT64_TO_PTR(Info->userSignal2) != gcvNULL)
++ {
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ tailRecord->next = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->userProcess;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->userSignal2;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++ }
++
++ /* Set the event list. */
++ Event->queues[id].head = headRecord;
++
++ /* Start composition. */
++ gcmkONERROR(gckHARDWARE_Compose(
++ Event->kernel->hardware, processID,
++ gcmUINT64_TO_PTR(Info->physical), gcmUINT64_TO_PTR(Info->logical), Info->offset, Info->size, id
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Interrupt
++**
++** Called by the interrupt service routine to store the triggered interrupt
++** mask to be later processed by gckEVENT_Notify.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 Data
++** Mask for the 32 interrupts.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Interrupt(
++ IN gckEVENT Event,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gctUINT32 Data
++ )
++{
++#if gcdMULTI_GPU
++#if defined(WIN32)
++ gctUINT32 i;
++#endif
++#endif
++ gcmkHEADER_ARG("Event=0x%x Data=0x%x", Event, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ if (Data & 0x20000000)
++ {
++ gckENTRYDATA data;
++ gctUINT32 idle;
++ Data &= ~0x20000000;
++
++#if gcdMULTI_GPU
++ if (Event->kernel->core == gcvCORE_MAJOR)
++#endif
++ {
++ /* Get first entry information. */
++ gcmkVERIFY_OK(
++ gckENTRYQUEUE_Dequeue(&Event->kernel->command->queue, &data));
++
++ /* Make sure FE is idle. */
++ do
++ {
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(
++ Event->os,
++ Event->kernel->core,
++ 0x4,
++ &idle));
++ }
++ while (idle != 0x7FFFFFFF);
++
++ /* Start Command Parser. */
++ gcmkVERIFY_OK(gckHARDWARE_Execute(
++ Event->kernel->hardware,
++ data->physical,
++ data->bytes
++ ));
++ }
++ }
++
++ /* Combine current interrupt status with pending flags. */
++#if gcdSMP
++#if gcdMULTI_GPU
++ if (Event->kernel->core == gcvCORE_MAJOR)
++ {
++ gckOS_AtomSetMask(Event->pending3D[CoreId], Data);
++ }
++ else
++#endif
++ {
++ gckOS_AtomSetMask(Event->pending, Data);
++ }
++#elif defined(__QNXNTO__)
++#if gcdMULTI_GPU
++ if (Event->kernel->core == gcvCORE_MAJOR)
++ {
++ atomic_set(&Event->pending3D[CoreId], Data);
++ }
++ else
++#endif
++ {
++ atomic_set(&Event->pending, Data);
++ }
++#else
++#if gcdMULTI_GPU
++#if defined(WIN32)
++ if (Event->kernel->core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ Event->pending3D[i] |= Data;
++ }
++ }
++ else
++#else
++ if (Event->kernel->core == gcvCORE_MAJOR)
++ {
++ Event->pending3D[CoreId] |= Data;
++ }
++ else
++#endif
++#endif
++ {
++ Event->pending |= Data;
++ }
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ {
++ gctINT j = 0;
++ gctINT32 oldValue;
++
++ for (j = 0; j < gcmCOUNTOF(Event->queues); j++)
++ {
++ if ((Data & (1 << j)))
++ {
++ gcmkVERIFY_OK(gckOS_AtomDecrement(Event->os,
++ Event->interruptCount,
++ &oldValue));
++ }
++ }
++ }
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Notify
++**
++** Process all triggered interrupts.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Notify(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctINT i;
++ gcsEVENT_QUEUE * queue;
++ gctUINT mask = 0;
++ gctBOOL acquired = gcvFALSE;
++ gctPOINTER info;
++ gctSIGNAL signal;
++ gctUINT pending = 0;
++ gckKERNEL kernel = Event->kernel;
++#if gcdMULTI_GPU
++ gceCORE core = Event->kernel->core;
++ gctUINT32 busy;
++ gctUINT32 oldValue;
++ gctUINT pendingMask;
++#endif
++#if !gcdSMP
++ gctBOOL suspended = gcvFALSE;
++#endif
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gctINT eventNumber = 0;
++#endif
++ gctINT32 free;
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ gckVIDMEM_NODE nodeObject;
++ gcuVIDMEM_NODE_PTR node;
++
++ gcmkHEADER_ARG("Event=0x%x IDs=0x%x", Event, IDs);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ gcmDEBUG_ONLY(
++ if (IDs != 0)
++ {
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Queue(%d): stamp=%llu source=%d",
++ i,
++ Event->queues[i].stamp,
++ Event->queues[i].source);
++ }
++ }
++ }
++ );
++
++#if gcdMULTI_GPU
++ /* Set busy flag. */
++ gckOS_AtomicExchange(Event->os, &Event->busy, 1, &busy);
++ if (busy)
++ {
++ /* Another thread is already busy - abort. */
++ goto OnSuccess;
++ }
++#endif
++
++ for (;;)
++ {
++ gcsEVENT_PTR record;
++#if gcdMULTI_GPU
++ gctUINT32 pend[gcdMULTI_GPU];
++ gctUINT32 pendMask[gcdMULTI_GPU];
++#endif
++
++ /* Grab the mutex queue. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++#if gcdSMP
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ /* Get current interrupts. */
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ gckOS_AtomGet(Event->os, Event->pending3D[i], (gctINT32_PTR)&pend[i]);
++ gckOS_AtomGet(Event->os, Event->pending3DMask[i], (gctINT32_PTR)&pendMask[i]);
++ }
++
++ gckOS_AtomGet(Event->os, Event->pendingMask, (gctINT32_PTR)&pendingMask);
++ }
++ else
++#endif
++ {
++ gckOS_AtomGet(Event->os, Event->pending, (gctINT32_PTR)&pending);
++ }
++#else
++ /* Suspend interrupts. */
++ gcmkONERROR(gckOS_SuspendInterruptEx(Event->os, Event->kernel->core));
++ suspended = gcvTRUE;
++
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ /* Get current interrupts. */
++ pend[i] = Event->pending3D[i];
++ pendMask[i] = Event->pending3DMask[i];
++ }
++
++ pendingMask = Event->pendingMask;
++ }
++ else
++#endif
++ {
++ pending = Event->pending;
++ }
++
++ /* Resume interrupts. */
++ gcmkONERROR(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
++ suspended = gcvFALSE;
++#endif
++
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ gctUINT32 bad_pend = (pend[i] & ~pendMask[i]);
++
++ if (bad_pend != 0)
++ {
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(bad_pend) + gcmSIZEOF(i),
++ "Interrupts 0x%x are not unexpected for Core%d.",
++ bad_pend, i
++ );
++
++ gckOS_AtomClearMask(Event->pending3D[i], bad_pend);
++
++ pend[i] &= pendMask[i];
++ }
++ }
++
++ pending = (pend[0] & pend[1] & pendingMask) /* Check combined events on both GPUs */
++ | (pend[0] & ~pendingMask) /* Check individual events on GPU 0 */
++ | (pend[1] & ~pendingMask); /* Check individual events on GPU 1 */
++ }
++#endif
++
++ if (pending == 0)
++ {
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* No more pending interrupts - done. */
++ break;
++ }
++
++ if (pending & 0x80000000)
++ {
++ gctUINT32 AQAxiStatus = 0;
++ gckOS_ReadRegisterEx(Event->os, Event->kernel->hardware->core, 0xC, &AQAxiStatus);
++
++ gcmkPRINT("GPU[%d]: AXI BUS ERROR, AQAxiStatus=0x%x\n", Event->kernel->hardware->core, AQAxiStatus);
++ pending &= 0x7FFFFFFF;
++ }
++
++ if (pending & 0x40000000)
++ {
++ gckHARDWARE_DumpMMUException(Event->kernel->hardware);
++
++ pending &= 0xBFFFFFFF;
++ }
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(pending),
++ "Pending interrupts 0x%x",
++ pending
++ );
++
++ queue = gcvNULL;
++
++ gcmDEBUG_ONLY(
++ if (IDs == 0)
++ {
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Queue(%d): stamp=%llu source=%d",
++ i,
++ Event->queues[i].stamp,
++ Event->queues[i].source);
++ }
++ }
++ }
++ );
++
++ /* Find the oldest pending interrupt. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if ((Event->queues[i].head != gcvNULL)
++ && (pending & (1 << i))
++ )
++ {
++ if ((queue == gcvNULL)
++ || (Event->queues[i].stamp < queue->stamp)
++ )
++ {
++ queue = &Event->queues[i];
++ mask = 1 << i;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ eventNumber = i;
++#endif
++ }
++ }
++ }
++
++ if (queue == gcvNULL)
++ {
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(pending),
++ "Interrupts 0x%x are not pending.",
++ pending
++ );
++
++#if gcdSMP
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ /* Mark pending interrupts as handled. */
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ gckOS_AtomClearMask(Event->pending3D[i], pending);
++ gckOS_AtomClearMask(Event->pending3DMask[i], pending);
++ }
++
++ gckOS_AtomClearMask(Event->pendingMask, pending);
++ }
++ else
++#endif
++ {
++ gckOS_AtomClearMask(Event->pending, pending);
++ }
++
++#elif defined(__QNXNTO__)
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ atomic_clr((gctUINT32_PTR)&Event->pending3D[i], pending);
++ atomic_clr((gctUINT32_PTR)&Event->pending3DMask[i], pending);
++ }
++
++ atomic_clr((gctUINT32_PTR)&Event->pendingMask, pending);
++ }
++ else
++#endif
++ {
++ atomic_clr((gctUINT32_PTR)&Event->pending, pending);
++ }
++#else
++ /* Suspend interrupts. */
++ gcmkONERROR(gckOS_SuspendInterruptEx(Event->os, Event->kernel->core));
++ suspended = gcvTRUE;
++
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ /* Mark pending interrupts as handled. */
++ Event->pending3D[i] &= ~pending;
++ Event->pending3DMask[i] &= ~pending;
++ }
++ }
++ else
++#endif
++ {
++ Event->pending &= ~pending;
++ }
++
++ /* Resume interrupts. */
++ gcmkONERROR(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
++ suspended = gcvFALSE;
++#endif
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++ break;
++ }
++
++ /* Check whether there is a missed interrupt. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if ((Event->queues[i].head != gcvNULL)
++ && (Event->queues[i].stamp < queue->stamp)
++ && (Event->queues[i].source <= queue->source)
++#if gcdMULTI_GPU
++ && (Event->queues[i].chipEnable == queue->chipEnable)
++#endif
++ )
++ {
++ gcmkTRACE_N(
++ gcvLEVEL_ERROR,
++ gcmSIZEOF(i) + gcmSIZEOF(Event->queues[i].stamp),
++ "Event %d lost (stamp %llu)",
++ i, Event->queues[i].stamp
++ );
++
++ /* Use this event instead. */
++ queue = &Event->queues[i];
++ mask = 0;
++ }
++ }
++
++ if (mask != 0)
++ {
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(eventNumber),
++ "Processing interrupt %d",
++ eventNumber
++ );
++#endif
++ }
++
++#if gcdSMP
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ /* Mark pending interrupt as handled. */
++ gckOS_AtomClearMask(Event->pending3D[i], mask);
++ gckOS_AtomClearMask(Event->pending3DMask[i], mask);
++ }
++
++ gckOS_AtomClearMask(Event->pendingMask, mask);
++ }
++ else
++#endif
++ {
++ gckOS_AtomClearMask(Event->pending, mask);
++ }
++
++#elif defined(__QNXNTO__)
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ atomic_clr(&Event->pending3D[i], mask);
++ atomic_clr(&Event->pending3DMask[i], mask);
++ }
++
++ atomic_clr(&Event->pendingMask, mask);
++ }
++ else
++#endif
++ {
++ atomic_clr(&Event->pending, mask);
++ }
++#else
++ /* Suspend interrupts. */
++ gcmkONERROR(gckOS_SuspendInterruptEx(Event->os, Event->kernel->core));
++ suspended = gcvTRUE;
++
++#if gcdMULTI_GPU
++ if (core == gcvCORE_MAJOR)
++ {
++ for (i = 0; i < gcdMULTI_GPU; i++)
++ {
++ /* Mark pending interrupt as handled. */
++ Event->pending3D[i] &= ~mask;
++ Event->pending3DMask[i] &= ~mask;
++ }
++
++ Event->pendingMask &= ~mask;
++ }
++ else
++#endif
++ {
++ Event->pending &= ~mask;
++ }
++
++ /* Resume interrupts. */
++ gcmkONERROR(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
++ suspended = gcvFALSE;
++#endif
++
++ /* Grab the event head. */
++ record = queue->head;
++
++ /* Now quickly clear its event list. */
++ queue->head = gcvNULL;
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Increase the number of free events. */
++ gcmkONERROR(gckOS_AtomIncrement(Event->os, Event->freeAtom, &free));
++
++ /* Walk all events for this interrupt. */
++ while (record != gcvNULL)
++ {
++ gcsEVENT_PTR recordNext;
++#ifndef __QNXNTO__
++ gctPOINTER logical;
++#endif
++#if gcdSECURE_USER
++ gctSIZE_T bytes;
++#endif
++
++ /* Grab next record. */
++ recordNext = record->next;
++
++#ifdef __QNXNTO__
++ /* Assign record->processID as the pid for this galcore thread.
++ * Used in OS calls like gckOS_UnlockMemory() which do not take a pid.
++ */
++ drv_thread_specific_key_assign(record->processID, 0, Event->kernel->core);
++#endif
++
++#if gcdSECURE_USER
++ /* Get the cache that belongs to this process. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Event->kernel,
++ record->processID,
++ &cache));
++#endif
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.command),
++ "Processing event type: %d",
++ record->info.command
++ );
++
++ switch (record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_NON_PAGED_MEMORY: 0x%x",
++ gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical));
++
++ /* Free non-paged memory. */
++ status = gckOS_FreeNonPagedMemory(
++ Event->os,
++ (gctSIZE_T) record->info.u.FreeNonPagedMemory.bytes,
++ gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeNonPagedMemory.logical));
++
++ if (gcmIS_SUCCESS(status))
++ {
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->record.u.FreeNonPagedMemory.logical),
++ (gctSIZE_T) record->record.u.FreeNonPagedMemory.bytes));
++#endif
++ }
++ gcmRELEASE_NAME(record->info.u.FreeNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_CONTIGUOUS_MEMORY: 0x%x",
++ gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical));
++
++ /* Unmap the user memory. */
++ status = gckOS_FreeContiguous(
++ Event->os,
++ gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeContiguousMemory.logical),
++ (gctSIZE_T) record->info.u.FreeContiguousMemory.bytes);
++
++ if (gcmIS_SUCCESS(status))
++ {
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(event->event.u.FreeContiguousMemory.logical),
++ (gctSIZE_T) event->event.u.FreeContiguousMemory.bytes));
++#endif
++ }
++ gcmRELEASE_NAME(record->info.u.FreeContiguousMemory.physical);
++ break;
++
++ case gcvHAL_WRITE_DATA:
++#ifndef __QNXNTO__
++ /* Convert physical into logical address. */
++ gcmkERR_BREAK(
++ gckOS_MapPhysical(Event->os,
++ record->info.u.WriteData.address,
++ gcmSIZEOF(gctUINT32),
++ &logical));
++
++ /* Write data. */
++ gcmkERR_BREAK(
++ gckOS_WriteMemory(Event->os,
++ logical,
++ record->info.u.WriteData.data));
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(
++ gckOS_UnmapPhysical(Event->os,
++ logical,
++ gcmSIZEOF(gctUINT32)));
++#else
++ /* Write data. */
++ gcmkERR_BREAK(
++ gckOS_WriteMemory(Event->os,
++ (gctPOINTER)
++ record->info.u.WriteData.address,
++ record->info.u.WriteData.data));
++#endif
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_UNLOCK_VIDEO_MEMORY: 0x%x",
++ record->info.u.UnlockVideoMemory.node);
++
++ nodeObject = gcmUINT64_TO_PTR(record->info.u.UnlockVideoMemory.node);
++
++ node = nodeObject->node;
++
++ /* Save node information before it disappears. */
++#if gcdSECURE_USER
++ node = event->event.u.UnlockVideoMemory.node;
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock. */
++ status = gckVIDMEM_Unlock(
++ Event->kernel,
++ nodeObject,
++ record->info.u.UnlockVideoMemory.type,
++ gcvNULL);
++
++#if gcdSECURE_USER
++ if (gcmIS_SUCCESS(status) && (logical != gcvNULL))
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Unlock(
++ Event->kernel,
++ nodeObject,
++ record->processID
++ ));
++#endif
++
++ status = gckVIDMEM_NODE_Dereference(Event->kernel, nodeObject);
++ break;
++
++ case gcvHAL_SIGNAL:
++ signal = gcmUINT64_TO_PTR(record->info.u.Signal.signal);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_SIGNAL: 0x%x",
++ signal);
++
++#ifdef __QNXNTO__
++ if ((record->info.u.Signal.coid == 0)
++ && (record->info.u.Signal.rcvid == 0)
++ )
++ {
++ /* Kernel signal. */
++ gcmkERR_BREAK(
++ gckOS_Signal(Event->os,
++ signal,
++ gcvTRUE));
++ }
++ else
++ {
++ /* User signal. */
++ gcmkERR_BREAK(
++ gckOS_UserSignal(Event->os,
++ signal,
++ record->info.u.Signal.rcvid,
++ record->info.u.Signal.coid));
++ }
++#else
++ /* Set signal. */
++ if (gcmUINT64_TO_PTR(record->info.u.Signal.process) == gcvNULL)
++ {
++ /* Kernel signal. */
++ gcmkERR_BREAK(
++ gckOS_Signal(Event->os,
++ signal,
++ gcvTRUE));
++ }
++ else
++ {
++ /* User signal. */
++ gcmkERR_BREAK(
++ gckOS_UserSignal(Event->os,
++ signal,
++ gcmUINT64_TO_PTR(record->info.u.Signal.process)));
++ }
++
++ gcmkASSERT(record->info.u.Signal.auxSignal == 0);
++#endif
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ info = gcmNAME_TO_PTR(record->info.u.UnmapUserMemory.info);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_UNMAP_USER_MEMORY: 0x%x",
++ info);
++
++ /* Unmap the user memory. */
++ status = gckOS_UnmapUserMemory(
++ Event->os,
++ Event->kernel->core,
++ gcmUINT64_TO_PTR(record->info.u.UnmapUserMemory.memory),
++ (gctSIZE_T) record->info.u.UnmapUserMemory.size,
++ info,
++ record->info.u.UnmapUserMemory.address);
++
++#if gcdSECURE_USER
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->info.u.UnmapUserMemory.memory),
++ (gctSIZE_T) record->info.u.UnmapUserMemory.size));
++ }
++#endif
++ gcmRELEASE_NAME(record->info.u.UnmapUserMemory.info);
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_TIMESTAMP: %d %d",
++ record->info.u.TimeStamp.timer,
++ record->info.u.TimeStamp.request);
++
++ /* Process the timestamp. */
++ switch (record->info.u.TimeStamp.request)
++ {
++ case 0:
++ status = gckOS_GetTime(&Event->kernel->timers[
++ record->info.u.TimeStamp.timer].
++ stopTime);
++ break;
++
++ case 1:
++ status = gckOS_GetTime(&Event->kernel->timers[
++ record->info.u.TimeStamp.timer].
++ startTime);
++ break;
++
++ default:
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.u.TimeStamp.request),
++ "Invalid timestamp request: %d",
++ record->info.u.TimeStamp.request
++ );
++
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkVERIFY_OK(
++ gckKERNEL_DestroyVirtualCommandBuffer(Event->kernel,
++ (gctSIZE_T) record->info.u.FreeVirtualCommandBuffer.bytes,
++ gcmNAME_TO_PTR(record->info.u.FreeVirtualCommandBuffer.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeVirtualCommandBuffer.logical)
++ ));
++ gcmRELEASE_NAME(record->info.u.FreeVirtualCommandBuffer.physical);
++ break;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvHAL_SYNC_POINT:
++ {
++ gctSYNC_POINT syncPoint;
++
++ syncPoint = gcmUINT64_TO_PTR(record->info.u.SyncPoint.syncPoint);
++ status = gckOS_SignalSyncPoint(Event->os, syncPoint);
++ }
++ break;
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++ case gcvHAL_DESTROY_MMU:
++ status = gckMMU_Destroy(gcmUINT64_TO_PTR(record->info.u.DestroyMmu.mmu));
++ break;
++#endif
++
++ case gcvHAL_COMMIT_DONE:
++ break;
++
++ default:
++ /* Invalid argument. */
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.command),
++ "Unknown event type: %d",
++ record->info.command
++ );
++
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ /* Make sure there are no errors generated. */
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_WARNING, gcvZONE_EVENT,
++ gcmSIZEOF(status),
++ "Event produced status: %d(%s)",
++ status, gckOS_DebugStatus2Name(status));
++ }
++
++ /* Free the event. */
++ gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record));
++
++ /* Advance to next record. */
++ record = recordNext;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Handled interrupt 0x%x", mask);
++ }
++
++#if gcdMULTI_GPU
++ /* Clear busy flag. */
++ gckOS_AtomicExchange(Event->os, &Event->busy, 0, &oldValue);
++#endif
++
++ if (IDs == 0)
++ {
++ gcmkONERROR(_TryToIdleGPU(Event));
++ }
++
++#if gcdMULTI_GPU
++OnSuccess:
++#endif
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++#if !gcdSMP
++ if (suspended)
++ {
++ /* Resume interrupts. */
++ gcmkVERIFY_OK(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
++ }
++#endif
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckEVENT_FreeProcess
++**
++** Free all events owned by a particular process ID.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 ProcessID
++** Process ID of the process to be freed up.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_FreeProcess(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID
++ )
++{
++ gctSIZE_T i;
++ gctBOOL acquired = gcvFALSE;
++ gcsEVENT_PTR record, next;
++ gceSTATUS status;
++ gcsEVENT_PTR deleteHead, deleteTail;
++
++ gcmkHEADER_ARG("Event=0x%x ProcessID=%d", Event, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Walk through all queues. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ /* Grab the event queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Grab the mutex head. */
++ record = Event->queues[i].head;
++ Event->queues[i].head = gcvNULL;
++ Event->queues[i].tail = gcvNULL;
++ deleteHead = gcvNULL;
++ deleteTail = gcvNULL;
++
++ while (record != gcvNULL)
++ {
++ next = record->next;
++ if (record->processID == ProcessID)
++ {
++ if (deleteHead == gcvNULL)
++ {
++ deleteHead = record;
++ }
++ else
++ {
++ deleteTail->next = record;
++ }
++
++ deleteTail = record;
++ }
++ else
++ {
++ if (Event->queues[i].head == gcvNULL)
++ {
++ Event->queues[i].head = record;
++ }
++ else
++ {
++ Event->queues[i].tail->next = record;
++ }
++
++ Event->queues[i].tail = record;
++ }
++
++ record->next = gcvNULL;
++ record = next;
++ }
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Loop through the entire list of events. */
++ for (record = deleteHead; record != gcvNULL; record = next)
++ {
++ /* Get the next event record. */
++ next = record->next;
++
++ /* Free the event record. */
++ gcmkONERROR(gckEVENT_FreeRecord(Event, record));
++ }
++ }
++ }
++
++ gcmkONERROR(_TryToIdleGPU(Event));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release the event queue mutex. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckEVENT_Stop
++**
++** Stop the hardware using the End event mechanism.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIGNAL Signal
++** Pointer to the signal to trigger.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Stop(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Logical,
++ IN gctSIGNAL Signal,
++ IN OUT gctUINT32 * waitSize
++ )
++{
++ gceSTATUS status;
++ /* gctSIZE_T waitSize;*/
++ gcsEVENT_PTR record;
++ gctUINT8 id = 0xFF;
++
++ gcmkHEADER_ARG("Event=0x%x ProcessID=%u Handle=0x%x Logical=0x%x "
++ "Signal=0x%x",
++ Event, ProcessID, Handle, Logical, Signal);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Submit the current event queue. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE));
++#endif
++#if gcdMULTI_GPU
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, gcvKERNEL_PIXEL));
++#endif
++
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &record));
++
++ /* Initialize the record. */
++ record->next = gcvNULL;
++ record->processID = ProcessID;
++ record->info.command = gcvHAL_SIGNAL;
++ record->info.u.Signal.signal = gcmPTR_TO_UINT64(Signal);
++#ifdef __QNXNTO__
++ record->info.u.Signal.coid = 0;
++ record->info.u.Signal.rcvid = 0;
++#endif
++ record->info.u.Signal.auxSignal = 0;
++ record->info.u.Signal.process = 0;
++
++ /* Append the record. */
++ Event->queues[id].head = record;
++
++ /* Replace last WAIT with END. */
++ gcmkONERROR(gckHARDWARE_End(
++ Event->kernel->hardware, Logical, waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the END. */
++ gcmkONERROR(gckOS_CacheClean(
++ Event->os,
++ ProcessID,
++ gcvNULL,
++ (gctUINT32)Handle,
++ Logical,
++ *waitSize
++ ));
++#endif
++
++ /* Wait for the signal. */
++ gcmkONERROR(gckOS_WaitSignal(Event->os, Signal, gcvINFINITE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static void
++_PrintRecord(
++ gcsEVENT_PTR record
++ )
++{
++ switch (record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_NON_PAGED_MEMORY");
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_CONTIGUOUS_MEMORY");
++ break;
++
++ case gcvHAL_WRITE_DATA:
++ gcmkPRINT(" gcvHAL_WRITE_DATA");
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkPRINT(" gcvHAL_UNLOCK_VIDEO_MEMORY");
++ break;
++
++ case gcvHAL_SIGNAL:
++ gcmkPRINT(" gcvHAL_SIGNAL process=%d signal=0x%x",
++ record->info.u.Signal.process,
++ record->info.u.Signal.signal);
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ gcmkPRINT(" gcvHAL_UNMAP_USER_MEMORY");
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ gcmkPRINT(" gcvHAL_TIMESTAMP");
++ break;
++
++ case gcvHAL_COMMIT_DONE:
++ gcmkPRINT(" gcvHAL_COMMIT_DONE");
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkPRINT(" gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER logical=0x%08x",
++ record->info.u.FreeVirtualCommandBuffer.logical);
++ break;
++
++ case gcvHAL_SYNC_POINT:
++ gcmkPRINT(" gcvHAL_SYNC_POINT syncPoint=0x%08x",
++ gcmUINT64_TO_PTR(record->info.u.SyncPoint.syncPoint));
++
++ break;
++
++ case gcvHAL_DESTROY_MMU:
++ gcmkPRINT(" gcvHAL_DESTORY_MMU mmu=0x%08x",
++ gcmUINT64_TO_PTR(record->info.u.DestroyMmu.mmu));
++
++ break;
++ default:
++ gcmkPRINT(" Illegal Event %d", record->info.command);
++ break;
++ }
++}
++
++/*******************************************************************************
++** gckEVENT_Dump
++**
++** Dump record in event queue when stuck happens.
++** No protection for the event queue.
++**/
++gceSTATUS
++gckEVENT_Dump(
++ IN gckEVENT Event
++ )
++{
++ gcsEVENT_QUEUE_PTR queueHead = Event->queueHead;
++ gcsEVENT_QUEUE_PTR queue;
++ gcsEVENT_PTR record = gcvNULL;
++ gctINT i;
++#if gcdINTERRUPT_STATISTIC
++ gctINT32 pendingInterrupt;
++ gctUINT32 intrAcknowledge;
++#endif
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** EVENT STATE DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkPRINT(" Unsumbitted Event:");
++ while(queueHead)
++ {
++ queue = queueHead;
++ record = queueHead->head;
++
++ gcmkPRINT(" [%x]:", queue);
++ while(record)
++ {
++ _PrintRecord(record);
++ record = record->next;
++ }
++
++ if (queueHead == Event->queueTail)
++ {
++ queueHead = gcvNULL;
++ }
++ else
++ {
++ queueHead = queueHead->next;
++ }
++ }
++
++ gcmkPRINT(" Untriggered Event:");
++ for (i = 0; i < gcmCOUNTOF(Event->queues); i++)
++ {
++ queue = &Event->queues[i];
++ record = queue->head;
++
++ gcmkPRINT(" [%d]:", i);
++ while(record)
++ {
++ _PrintRecord(record);
++ record = record->next;
++ }
++ }
++
++#if gcdINTERRUPT_STATISTIC
++ gckOS_AtomGet(Event->os, Event->interruptCount, &pendingInterrupt);
++ gcmkPRINT(" Number of Pending Interrupt: %d", pendingInterrupt);
++
++ if (Event->kernel->recovery == 0)
++ {
++ gckOS_ReadRegisterEx(
++ Event->os,
++ Event->kernel->core,
++ 0x10,
++ &intrAcknowledge
++ );
++
++ gcmkPRINT(" INTR_ACKNOWLEDGE=0x%x", intrAcknowledge);
++ }
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel.h 2015-05-01 14:57:59.575427001 -0500
+@@ -0,0 +1,1489 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_h_
++#define __gc_hal_kernel_h_
++
++#include "gc_hal.h"
++#include "gc_hal_kernel_hardware.h"
++#include "gc_hal_driver.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_kernel_vg.h"
++#endif
++
++#if gcdSECURITY
++#include "gc_hal_security_interface.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++/*******************************************************************************
++***** New MMU Defination *******************************************************/
++#define gcdMMU_MTLB_SHIFT 22
++#define gcdMMU_STLB_4K_SHIFT 12
++#define gcdMMU_STLB_64K_SHIFT 16
++
++#define gcdMMU_MTLB_BITS (32 - gcdMMU_MTLB_SHIFT)
++#define gcdMMU_PAGE_4K_BITS gcdMMU_STLB_4K_SHIFT
++#define gcdMMU_STLB_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_4K_BITS)
++#define gcdMMU_PAGE_64K_BITS gcdMMU_STLB_64K_SHIFT
++#define gcdMMU_STLB_64K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_64K_BITS)
++
++#define gcdMMU_MTLB_ENTRY_NUM (1 << gcdMMU_MTLB_BITS)
++#define gcdMMU_MTLB_SIZE (gcdMMU_MTLB_ENTRY_NUM << 2)
++#define gcdMMU_STLB_4K_ENTRY_NUM (1 << gcdMMU_STLB_4K_BITS)
++#define gcdMMU_STLB_4K_SIZE (gcdMMU_STLB_4K_ENTRY_NUM << 2)
++#define gcdMMU_PAGE_4K_SIZE (1 << gcdMMU_STLB_4K_SHIFT)
++#define gcdMMU_STLB_64K_ENTRY_NUM (1 << gcdMMU_STLB_64K_BITS)
++#define gcdMMU_STLB_64K_SIZE (gcdMMU_STLB_64K_ENTRY_NUM << 2)
++#define gcdMMU_PAGE_64K_SIZE (1 << gcdMMU_STLB_64K_SHIFT)
++
++#define gcdMMU_MTLB_MASK (~((1U << gcdMMU_MTLB_SHIFT)-1))
++#define gcdMMU_STLB_4K_MASK ((~0U << gcdMMU_STLB_4K_SHIFT) ^ gcdMMU_MTLB_MASK)
++#define gcdMMU_PAGE_4K_MASK (gcdMMU_PAGE_4K_SIZE - 1)
++#define gcdMMU_STLB_64K_MASK ((~((1U << gcdMMU_STLB_64K_SHIFT)-1)) ^ gcdMMU_MTLB_MASK)
++#define gcdMMU_PAGE_64K_MASK (gcdMMU_PAGE_64K_SIZE - 1)
++
++/* Page offset definitions. */
++#define gcdMMU_OFFSET_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_4K_BITS)
++#define gcdMMU_OFFSET_4K_MASK ((1U << gcdMMU_OFFSET_4K_BITS) - 1)
++#define gcdMMU_OFFSET_16K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_16K_BITS)
++#define gcdMMU_OFFSET_16K_MASK ((1U << gcdMMU_OFFSET_16K_BITS) - 1)
++
++#define gcdMMU_MTLB_PRESENT 0x00000001
++#define gcdMMU_MTLB_EXCEPTION 0x00000002
++#define gcdMMU_MTLB_4K_PAGE 0x00000000
++
++#define gcdMMU_STLB_PRESENT 0x00000001
++#define gcdMMU_STLB_EXCEPTION 0x00000002
++#define gcdMMU_STLB_4K_PAGE 0x00000000
++
++/*******************************************************************************
++***** Stuck Dump Level ********************************************************/
++
++#define gcdSTUCK_DUMP_MINIMAL 1
++#define gcdSTUCK_DUMP_MIDDLE 2
++#define gcdSTUCK_DUMP_MAXIMAL 3
++
++/*******************************************************************************
++***** Process Secure Cache ****************************************************/
++
++#define gcdSECURE_CACHE_LRU 1
++#define gcdSECURE_CACHE_LINEAR 2
++#define gcdSECURE_CACHE_HASH 3
++#define gcdSECURE_CACHE_TABLE 4
++
++#define gcvPAGE_TABLE_DIRTY_BIT_OTHER (1 << 0)
++#define gcvPAGE_TABLE_DIRTY_BIT_FE (1 << 1)
++
++typedef struct _gcskLOGICAL_CACHE * gcskLOGICAL_CACHE_PTR;
++typedef struct _gcskLOGICAL_CACHE gcskLOGICAL_CACHE;
++struct _gcskLOGICAL_CACHE
++{
++ /* Logical address. */
++ gctPOINTER logical;
++
++ /* DMAable address. */
++ gctUINT32 dma;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Pointer to the previous and next hash tables. */
++ gcskLOGICAL_CACHE_PTR nextHash;
++ gcskLOGICAL_CACHE_PTR prevHash;
++#endif
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ /* Pointer to the previous and next slot. */
++ gcskLOGICAL_CACHE_PTR next;
++ gcskLOGICAL_CACHE_PTR prev;
++#endif
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ /* Time stamp. */
++ gctUINT64 stamp;
++#endif
++};
++
++typedef struct _gcskSECURE_CACHE * gcskSECURE_CACHE_PTR;
++typedef struct _gcskSECURE_CACHE
++{
++ /* Cache memory. */
++ gcskLOGICAL_CACHE cache[1 + gcdSECURE_CACHE_SLOTS];
++
++ /* Last known index for LINEAR mode. */
++ gcskLOGICAL_CACHE_PTR cacheIndex;
++
++ /* Current free slot for LINEAR mode. */
++ gctUINT32 cacheFree;
++
++ /* Time stamp for LINEAR mode. */
++ gctUINT64 cacheStamp;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Hash table for HASH mode. */
++ gcskLOGICAL_CACHE hash[256];
++#endif
++}
++gcskSECURE_CACHE;
++
++/*******************************************************************************
++***** Process Database Management *********************************************/
++
++typedef enum _gceDATABASE_TYPE
++{
++ gcvDB_VIDEO_MEMORY = 1, /* Video memory created. */
++ gcvDB_COMMAND_BUFFER, /* Command Buffer. */
++ gcvDB_NON_PAGED, /* Non paged memory. */
++ gcvDB_CONTIGUOUS, /* Contiguous memory. */
++ gcvDB_SIGNAL, /* Signal. */
++ gcvDB_VIDEO_MEMORY_LOCKED, /* Video memory locked. */
++ gcvDB_CONTEXT, /* Context */
++ gcvDB_IDLE, /* GPU idle. */
++ gcvDB_MAP_MEMORY, /* Map memory */
++ gcvDB_MAP_USER_MEMORY, /* Map user memory */
++ gcvDB_SYNC_POINT, /* Sync point. */
++ gcvDB_SHBUF, /* Shared buffer. */
++}
++gceDATABASE_TYPE;
++
++#define gcdDATABASE_TYPE_MASK 0x000000FF
++#define gcdDB_VIDEO_MEMORY_TYPE_MASK 0x0000FF00
++#define gcdDB_VIDEO_MEMORY_TYPE_SHIFT 8
++
++#define gcdDB_VIDEO_MEMORY_POOL_MASK 0x00FF0000
++#define gcdDB_VIDEO_MEMORY_POOL_SHIFT 16
++
++typedef struct _gcsDATABASE_RECORD * gcsDATABASE_RECORD_PTR;
++typedef struct _gcsDATABASE_RECORD
++{
++ /* Pointer to kernel. */
++ gckKERNEL kernel;
++
++ /* Pointer to next database record. */
++ gcsDATABASE_RECORD_PTR next;
++
++ /* Type of record. */
++ gceDATABASE_TYPE type;
++
++ /* Data for record. */
++ gctPOINTER data;
++ gctPHYS_ADDR physical;
++ gctSIZE_T bytes;
++}
++gcsDATABASE_RECORD;
++
++typedef struct _gcsDATABASE * gcsDATABASE_PTR;
++typedef struct _gcsDATABASE
++{
++ /* Pointer to next entry is hash list. */
++ gcsDATABASE_PTR next;
++ gctSIZE_T slot;
++
++ /* Process ID. */
++ gctUINT32 processID;
++
++ /* Sizes to query. */
++ gcsDATABASE_COUNTERS vidMem;
++ gcsDATABASE_COUNTERS nonPaged;
++ gcsDATABASE_COUNTERS contiguous;
++ gcsDATABASE_COUNTERS mapUserMemory;
++ gcsDATABASE_COUNTERS mapMemory;
++ gcsDATABASE_COUNTERS virtualCommandBuffer;
++
++ gcsDATABASE_COUNTERS vidMemType[gcvSURF_NUM_TYPES];
++ /* Counter for each video memory pool. */
++ gcsDATABASE_COUNTERS vidMemPool[gcvPOOL_NUMBER_OF_POOLS];
++ gctPOINTER counterMutex;
++
++ /* Idle time management. */
++ gctUINT64 lastIdle;
++ gctUINT64 idle;
++
++ /* Pointer to database. */
++ gcsDATABASE_RECORD_PTR list[48];
++
++#if gcdSECURE_USER
++ /* Secure cache. */
++ gcskSECURE_CACHE cache;
++#endif
++
++ gctPOINTER handleDatabase;
++ gctPOINTER handleDatabaseMutex;
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gckMMU mmu;
++#endif
++}
++gcsDATABASE;
++
++typedef struct _gcsRECORDER * gckRECORDER;
++
++typedef struct _gcsFDPRIVATE * gcsFDPRIVATE_PTR;
++typedef struct _gcsFDPRIVATE
++{
++ gctINT (* release) (gcsFDPRIVATE_PTR Private);
++}
++gcsFDPRIVATE;
++
++/* Create a process database that will contain all its allocations. */
++gceSTATUS
++gckKERNEL_CreateProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ );
++
++/* Add a record to the process database. */
++gceSTATUS
++gckKERNEL_AddProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Size
++ );
++
++/* Remove a record to the process database. */
++gceSTATUS
++gckKERNEL_RemoveProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer
++ );
++
++/* Destroy the process database. */
++gceSTATUS
++gckKERNEL_DestroyProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ );
++
++/* Find a record to the process database. */
++gceSTATUS
++gckKERNEL_FindProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 ThreadID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ OUT gcsDATABASE_RECORD_PTR Record
++ );
++
++/* Query the process database. */
++gceSTATUS
++gckKERNEL_QueryProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ IN gceDATABASE_TYPE Type,
++ OUT gcuDATABASE_INFO * Info
++ );
++
++/* Dump the process database. */
++gceSTATUS
++gckKERNEL_DumpProcessDB(
++ IN gckKERNEL Kernel
++ );
++
++/* Dump the video memory usage for process specified. */
++gceSTATUS
++gckKERNEL_DumpVidMemUsage(
++ IN gckKERNEL Kernel,
++ IN gctINT32 ProcessID
++ );
++
++gceSTATUS
++gckKERNEL_FindDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ OUT gcsDATABASE_PTR * Database
++ );
++
++gceSTATUS
++gckKERNEL_FindHandleDatbase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gctPOINTER * HandleDatabase,
++ OUT gctPOINTER * HandleDatabaseMutex
++ );
++
++gceSTATUS
++gckKERNEL_GetProcessMMU(
++ IN gckKERNEL Kernel,
++ OUT gckMMU * Mmu
++ );
++
++gceSTATUS
++gckKERNEL_SetRecovery(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Recovery,
++ IN gctUINT32 StuckDump
++ );
++
++gceSTATUS
++gckMMU_FlatMapping(
++ IN gckMMU Mmu,
++ IN gctUINT32 Physical
++ );
++
++gceSTATUS
++gckMMU_GetPageEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address,
++ IN gctUINT32_PTR *PageTable
++ );
++
++gceSTATUS
++gckMMU_FreePagesEx(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address,
++ IN gctSIZE_T PageCount
++ );
++
++gceSTATUS
++gckKERNEL_CreateIntegerDatabase(
++ IN gckKERNEL Kernel,
++ OUT gctPOINTER * Database
++ );
++
++gceSTATUS
++gckKERNEL_DestroyIntegerDatabase(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Database
++ );
++
++gceSTATUS
++gckKERNEL_AllocateIntegerId(
++ IN gctPOINTER Database,
++ IN gctPOINTER Pointer,
++ OUT gctUINT32 * Id
++ );
++
++gceSTATUS
++gckKERNEL_FreeIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id
++ );
++
++gceSTATUS
++gckKERNEL_QueryIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * Pointer
++ );
++
++/* Pointer rename */
++gctUINT32
++gckKERNEL_AllocateNameFromPointer(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Pointer
++ );
++
++gctPOINTER
++gckKERNEL_QueryPointerFromName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ );
++
++gceSTATUS
++gckKERNEL_DeleteName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ );
++
++#if gcdSECURE_USER
++/* Get secure cache from the process database. */
++gceSTATUS
++gckKERNEL_GetProcessDBCache(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcskSECURE_CACHE_PTR * Cache
++ );
++#endif
++
++/*******************************************************************************
++********* Timer Management ****************************************************/
++typedef struct _gcsTIMER * gcsTIMER_PTR;
++typedef struct _gcsTIMER
++{
++ /* Start and Stop time holders. */
++ gctUINT64 startTime;
++ gctUINT64 stopTime;
++}
++gcsTIMER;
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++
++/* gckDB object. */
++struct _gckDB
++{
++ /* Database management. */
++ gcsDATABASE_PTR db[16];
++ gctPOINTER dbMutex;
++ gcsDATABASE_PTR freeDatabase;
++ gcsDATABASE_RECORD_PTR freeRecord;
++ gcsDATABASE_PTR lastDatabase;
++ gctUINT32 lastProcessID;
++ gctUINT64 lastIdle;
++ gctUINT64 idleTime;
++ gctUINT64 lastSlowdown;
++ gctUINT64 lastSlowdownIdle;
++ gctPOINTER nameDatabase;
++ gctPOINTER nameDatabaseMutex;
++
++ gctPOINTER pointerDatabase;
++ gctPOINTER pointerDatabaseMutex;
++};
++
++typedef struct _gckVIRTUAL_COMMAND_BUFFER * gckVIRTUAL_COMMAND_BUFFER_PTR;
++typedef struct _gckVIRTUAL_COMMAND_BUFFER
++{
++ gctPHYS_ADDR physical;
++ gctPOINTER userLogical;
++ gctPOINTER kernelLogical;
++ gctSIZE_T bytes;
++ gctSIZE_T pageCount;
++ gctPOINTER pageTable;
++ gctUINT32 gpuAddress;
++ gctUINT pid;
++ gckVIRTUAL_COMMAND_BUFFER_PTR next;
++ gckVIRTUAL_COMMAND_BUFFER_PTR prev;
++ gckKERNEL kernel;
++#if gcdPROCESS_ADDRESS_SPACE
++ gckMMU mmu;
++#endif
++}
++gckVIRTUAL_COMMAND_BUFFER;
++
++/* gckKERNEL object. */
++struct _gckKERNEL
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Core */
++ gceCORE core;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* Pointer to gckCOMMAND object. */
++ gckCOMMAND command;
++
++ /* Pointer to gckEVENT object. */
++ gckEVENT eventObj;
++
++ /* Pointer to context. */
++ gctPOINTER context;
++
++ /* Pointer to gckMMU object. */
++ gckMMU mmu;
++
++ /* Arom holding number of clients. */
++ gctPOINTER atomClients;
++
++#if VIVANTE_PROFILER
++ /* Enable profiling */
++ gctBOOL profileEnable;
++ /* Clear profile register or not*/
++ gctBOOL profileCleanRegister;
++#endif
++
++#ifdef QNX_SINGLE_THREADED_DEBUGGING
++ gctPOINTER debugMutex;
++#endif
++
++ /* Database management. */
++ gckDB db;
++ gctBOOL dbCreated;
++
++ gctUINT64 resetTimeStamp;
++
++ /* Pointer to gckEVENT object. */
++ gcsTIMER timers[8];
++ gctUINT32 timeOut;
++
++#if gcdENABLE_VG
++ gckVGKERNEL vg;
++#endif
++
++ /* Virtual command buffer list. */
++ gckVIRTUAL_COMMAND_BUFFER_PTR virtualBufferHead;
++ gckVIRTUAL_COMMAND_BUFFER_PTR virtualBufferTail;
++ gctPOINTER virtualBufferLock;
++
++ /* Enable virtual command buffer. */
++ gctBOOL virtualCommandBuffer;
++
++#if gcdDVFS
++ gckDVFS dvfs;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gctHANDLE timeline;
++#endif
++
++ /* Enable recovery. */
++ gctBOOL recovery;
++
++ /* Level of dump information after stuck. */
++ gctUINT stuckDump;
++
++#if gcdSECURITY
++ gctUINT32 securityChannel;
++#endif
++
++ /* Timer to monitor GPU stuck. */
++ gctPOINTER monitorTimer;
++
++ /* Flag to quit monitor timer. */
++ gctBOOL monitorTimerStop;
++
++ /* Monitor states. */
++ gctBOOL monitoring;
++ gctUINT32 lastCommitStamp;
++ gctUINT32 timer;
++ gctUINT32 restoreAddress;
++ gctUINT32 restoreMask;
++};
++
++struct _FrequencyHistory
++{
++ gctUINT32 frequency;
++ gctUINT32 count;
++};
++
++/* gckDVFS object. */
++struct _gckDVFS
++{
++ gckOS os;
++ gckHARDWARE hardware;
++ gctPOINTER timer;
++ gctUINT32 pollingTime;
++ gctBOOL stop;
++ gctUINT32 totalConfig;
++ gctUINT32 loads[8];
++ gctUINT8 currentScale;
++ struct _FrequencyHistory frequencyHistory[16];
++};
++
++/* gckCOMMAND object. */
++struct _gckCOMMAND
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to required object. */
++ gckKERNEL kernel;
++ gckOS os;
++
++ /* Number of bytes per page. */
++ gctUINT32 pageSize;
++
++ /* Current pipe select. */
++ gcePIPE_SELECT pipeSelect;
++
++ /* Command queue running flag. */
++ gctBOOL running;
++
++ /* Idle flag and commit stamp. */
++ gctBOOL idle;
++ gctUINT64 commitStamp;
++
++ /* Command queue mutex. */
++ gctPOINTER mutexQueue;
++
++ /* Context switching mutex. */
++ gctPOINTER mutexContext;
++
++#if VIVANTE_PROFILER_CONTEXT
++ /* Context sequence mutex. */
++ gctPOINTER mutexContextSeq;
++#endif
++
++ /* Command queue power semaphore. */
++ gctPOINTER powerSemaphore;
++
++ /* Current command queue. */
++ struct _gcskCOMMAND_QUEUE
++ {
++ gctSIGNAL signal;
++ gctPHYS_ADDR physical;
++ gctPOINTER logical;
++ gctUINT32 address;
++ }
++ queues[gcdCOMMAND_QUEUES];
++
++ gctPHYS_ADDR physical;
++ gctPOINTER logical;
++ gctUINT32 address;
++ gctUINT32 offset;
++ gctINT index;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gctUINT wrapCount;
++#endif
++
++ /* The command queue is new. */
++ gctBOOL newQueue;
++
++ /* Context management. */
++ gckCONTEXT currContext;
++
++ /* Pointer to last WAIT command. */
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctUINT32 waitSize;
++
++ /* Command buffer alignment. */
++ gctUINT32 alignment;
++ gctUINT32 reservedHead;
++ gctUINT32 reservedTail;
++
++ /* Commit counter. */
++ gctPOINTER atomCommit;
++
++ /* Kernel process ID. */
++ gctUINT32 kernelProcessID;
++
++ /* End Event signal. */
++ gctSIGNAL endEventSignal;
++
++#if gcdSECURE_USER
++ /* Hint array copy buffer. */
++ gctBOOL hintArrayAllocated;
++ gctUINT hintArraySize;
++ gctUINT32_PTR hintArray;
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gckMMU currentMmu;
++#endif
++ struct _gckENTRYQUEUE queue;
++};
++
++typedef struct _gcsEVENT * gcsEVENT_PTR;
++
++/* Structure holding one event to be processed. */
++typedef struct _gcsEVENT
++{
++ /* Pointer to next event in queue. */
++ gcsEVENT_PTR next;
++
++ /* Event information. */
++ gcsHAL_INTERFACE info;
++
++ /* Process ID owning the event. */
++ gctUINT32 processID;
++
++#ifdef __QNXNTO__
++ /* Kernel. */
++ gckKERNEL kernel;
++#endif
++
++ gctBOOL fromKernel;
++}
++gcsEVENT;
++
++/* Structure holding a list of events to be processed by an interrupt. */
++typedef struct _gcsEVENT_QUEUE * gcsEVENT_QUEUE_PTR;
++typedef struct _gcsEVENT_QUEUE
++{
++ /* Time stamp. */
++ gctUINT64 stamp;
++
++ /* Source of the event. */
++ gceKERNEL_WHERE source;
++
++#if gcdMULTI_GPU
++ /* Which chip(s) of the event */
++ gceCORE_3D_MASK chipEnable;
++#endif
++
++ /* Pointer to head of event queue. */
++ gcsEVENT_PTR head;
++
++ /* Pointer to tail of event queue. */
++ gcsEVENT_PTR tail;
++
++ /* Next list of events. */
++ gcsEVENT_QUEUE_PTR next;
++}
++gcsEVENT_QUEUE;
++
++/*
++ gcdREPO_LIST_COUNT defines the maximum number of event queues with different
++ hardware module sources that may coexist at the same time. Only two sources
++ are supported - gcvKERNEL_COMMAND and gcvKERNEL_PIXEL. gcvKERNEL_COMMAND
++ source is used only for managing the kernel command queue and is only issued
++ when the current command queue gets full. Since we commit event queues every
++ time we commit command buffers, in the worst case we can have up to three
++ pending event queues:
++ - gcvKERNEL_PIXEL
++ - gcvKERNEL_COMMAND (queue overflow)
++ - gcvKERNEL_PIXEL
++*/
++#define gcdREPO_LIST_COUNT 3
++
++/* gckEVENT object. */
++struct _gckEVENT
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to required objects. */
++ gckOS os;
++ gckKERNEL kernel;
++
++ /* Time stamp. */
++ gctUINT64 stamp;
++ gctUINT32 lastCommitStamp;
++
++ /* Queue mutex. */
++ gctPOINTER eventQueueMutex;
++
++ /* Array of event queues. */
++ gcsEVENT_QUEUE queues[29];
++ gctUINT8 lastID;
++ gctPOINTER freeAtom;
++
++ /* Pending events. */
++#if gcdSMP
++#if gcdMULTI_GPU
++ gctPOINTER pending3D[gcdMULTI_GPU];
++ gctPOINTER pending3DMask[gcdMULTI_GPU];
++ gctPOINTER pendingMask;
++#endif
++ gctPOINTER pending;
++#else
++#if gcdMULTI_GPU
++ volatile gctUINT pending3D[gcdMULTI_GPU];
++ volatile gctUINT pending3DMask[gcdMULTI_GPU];
++ volatile gctUINT pendingMask;
++#endif
++ volatile gctUINT pending;
++#endif
++#if gcdMULTI_GPU
++ gctUINT32 busy;
++#endif
++
++ /* List of free event structures and its mutex. */
++ gcsEVENT_PTR freeEventList;
++ gctSIZE_T freeEventCount;
++ gctPOINTER freeEventMutex;
++
++ /* Event queues. */
++ gcsEVENT_QUEUE_PTR queueHead;
++ gcsEVENT_QUEUE_PTR queueTail;
++ gcsEVENT_QUEUE_PTR freeList;
++ gcsEVENT_QUEUE repoList[gcdREPO_LIST_COUNT];
++ gctPOINTER eventListMutex;
++
++ gctPOINTER submitTimer;
++
++#if gcdINTERRUPT_STATISTIC
++ gctPOINTER interruptCount;
++#endif
++
++#if gcdRECORD_COMMAND
++ gckRECORDER recorder;
++#endif
++};
++
++/* Free all events belonging to a process. */
++gceSTATUS
++gckEVENT_FreeProcess(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID
++ );
++
++gceSTATUS
++gckEVENT_Stop(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Logical,
++ IN gctSIGNAL Signal,
++ IN OUT gctUINT32 * waitSize
++ );
++
++typedef struct _gcsLOCK_INFO * gcsLOCK_INFO_PTR;
++typedef struct _gcsLOCK_INFO
++{
++ gctUINT32 GPUAddresses[gcdMAX_GPU_COUNT];
++ gctPOINTER pageTables[gcdMAX_GPU_COUNT];
++ gctUINT32 lockeds[gcdMAX_GPU_COUNT];
++ gckKERNEL lockKernels[gcdMAX_GPU_COUNT];
++ gckMMU lockMmus[gcdMAX_GPU_COUNT];
++}
++gcsLOCK_INFO;
++
++typedef struct _gcsGPU_MAP * gcsGPU_MAP_PTR;
++typedef struct _gcsGPU_MAP
++{
++ gctINT pid;
++ gcsLOCK_INFO lockInfo;
++ gcsGPU_MAP_PTR prev;
++ gcsGPU_MAP_PTR next;
++}
++gcsGPU_MAP;
++
++/* gcuVIDMEM_NODE structure. */
++typedef union _gcuVIDMEM_NODE
++{
++ /* Allocated from gckVIDMEM. */
++ struct _gcsVIDMEM_NODE_VIDMEM
++ {
++ /* Owner of this node. */
++ gckVIDMEM memory;
++
++ /* Dual-linked list of nodes. */
++ gcuVIDMEM_NODE_PTR next;
++ gcuVIDMEM_NODE_PTR prev;
++
++ /* Dual linked list of free nodes. */
++ gcuVIDMEM_NODE_PTR nextFree;
++ gcuVIDMEM_NODE_PTR prevFree;
++
++ /* Information for this node. */
++ gctSIZE_T offset;
++ gctSIZE_T bytes;
++ gctUINT32 alignment;
++
++#ifdef __QNXNTO__
++ /* Client virtual address. */
++ gctPOINTER logical;
++#endif
++
++ /* Locked counter. */
++ gctINT32 locked;
++
++ /* Memory pool. */
++ gcePOOL pool;
++ gctUINT32 physical;
++
++ /* Process ID owning this memory. */
++ gctUINT32 processID;
++
++#if gcdENABLE_VG
++ gctPOINTER kernelVirtual;
++#endif
++ }
++ VidMem;
++
++ /* Allocated from gckOS. */
++ struct _gcsVIDMEM_NODE_VIRTUAL
++ {
++ /* Pointer to gckKERNEL object. */
++ gckKERNEL kernel;
++
++ /* Information for this node. */
++ /* Contiguously allocated? */
++ gctBOOL contiguous;
++ /* mdl record pointer... a kmalloc address. Process agnostic. */
++ gctPHYS_ADDR physical;
++ gctSIZE_T bytes;
++ /* do_mmap_pgoff address... mapped per-process. */
++ gctPOINTER logical;
++
++#if gcdENABLE_VG
++ /* Physical address of this node, only meaningful when it is contiguous. */
++ gctUINT32 physicalAddress;
++
++ /* Kernel logical of this node. */
++ gctPOINTER kernelVirtual;
++#endif
++
++ /* Customer private handle */
++ gctUINT32 gid;
++
++ /* Page table information. */
++ /* Used only when node is not contiguous */
++ gctSIZE_T pageCount;
++
++ /* Used only when node is not contiguous */
++ gctPOINTER pageTables[gcdMAX_GPU_COUNT];
++ /* Pointer to gckKERNEL object who lock this. */
++ gckKERNEL lockKernels[gcdMAX_GPU_COUNT];
++ /* Actual physical address */
++ gctUINT32 addresses[gcdMAX_GPU_COUNT];
++
++ /* Locked counter. */
++ gctINT32 lockeds[gcdMAX_GPU_COUNT];
++
++ /* Process ID owning this memory. */
++ gctUINT32 processID;
++
++ /* Surface type. */
++ gceSURF_TYPE type;
++ }
++ Virtual;
++}
++gcuVIDMEM_NODE;
++
++/* gckVIDMEM object. */
++struct _gckVIDMEM
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Information for this video memory heap. */
++ gctUINT32 baseAddress;
++ gctSIZE_T bytes;
++ gctSIZE_T freeBytes;
++
++ /* Mapping for each type of surface. */
++ gctINT mapping[gcvSURF_NUM_TYPES];
++
++ /* Sentinel nodes for up to 8 banks. */
++ gcuVIDMEM_NODE sentinel[8];
++
++ /* Allocation threshold. */
++ gctSIZE_T threshold;
++
++ /* The heap mutex. */
++ gctPOINTER mutex;
++};
++
++typedef struct _gcsVIDMEM_NODE
++{
++ /* Pointer to gcuVIDMEM_NODE. */
++ gcuVIDMEM_NODE_PTR node;
++
++ /* Mutex to protect node. */
++ gctPOINTER mutex;
++
++ /* Reference count. */
++ gctPOINTER reference;
++
++ /* Name for client to import. */
++ gctUINT32 name;
++
++#if gcdPROCESS_ADDRESS_SPACE
++ /* Head of mapping list. */
++ gcsGPU_MAP_PTR mapHead;
++
++ /* Tail of mapping list. */
++ gcsGPU_MAP_PTR mapTail;
++
++ gctPOINTER mapMutex;
++#endif
++
++ /* Surface Type. */
++ gceSURF_TYPE type;
++
++ /* Pool from which node is allocated. */
++ gcePOOL pool;
++}
++gcsVIDMEM_NODE;
++
++typedef struct _gcsVIDMEM_HANDLE * gckVIDMEM_HANDLE;
++typedef struct _gcsVIDMEM_HANDLE
++{
++ /* Pointer to gckVIDMEM_NODE. */
++ gckVIDMEM_NODE node;
++
++ /* Handle for current process. */
++ gctUINT32 handle;
++
++ /* Reference count for this handle. */
++ gctPOINTER reference;
++}
++gcsVIDMEM_HANDLE;
++
++typedef struct _gcsSHBUF * gcsSHBUF_PTR;
++typedef struct _gcsSHBUF
++{
++ /* ID. */
++ gctUINT32 id;
++
++ /* Reference count. */
++ gctPOINTER reference;
++
++ /* Data size. */
++ gctUINT32 size;
++
++ /* Data. */
++ gctPOINTER data;
++}
++gcsSHBUF;
++
++gceSTATUS
++gckVIDMEM_HANDLE_Reference(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle
++ );
++
++gceSTATUS
++gckVIDMEM_HANDLE_Dereference(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle
++ );
++
++gceSTATUS
++gckVIDMEM_NODE_Allocate(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR VideoNode,
++ IN gceSURF_TYPE Type,
++ IN gcePOOL Pool,
++ IN gctUINT32 * Handle
++ );
++
++gceSTATUS
++gckVIDMEM_Node_Lock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ OUT gctUINT32 *Address
++ );
++
++gceSTATUS
++gckVIDMEM_NODE_Unlock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ IN gctUINT32 ProcessID
++ );
++
++gceSTATUS
++gckVIDMEM_NODE_Dereference(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node
++ );
++
++gceSTATUS
++gckVIDMEM_NODE_Name(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Handle,
++ IN gctUINT32 * Name
++ );
++
++gceSTATUS
++gckVIDMEM_NODE_Import(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name,
++ IN gctUINT32 * Handle
++ );
++
++gceSTATUS
++gckVIDMEM_HANDLE_LookupAndReference(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Handle,
++ OUT gckVIDMEM_NODE * Node
++ );
++
++gceSTATUS
++gckVIDMEM_HANDLE_Lookup(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle,
++ OUT gckVIDMEM_NODE * Node
++ );
++
++gceSTATUS
++gckVIDMEM_NODE_GetFd(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Handle,
++ OUT gctINT * Fd
++ );
++
++#if gcdPROCESS_ADDRESS_SPACE
++gceSTATUS
++gckEVENT_DestroyMmu(
++ IN gckEVENT Event,
++ IN gckMMU Mmu,
++ IN gceKERNEL_WHERE FromWhere
++ );
++#endif
++
++/* gckMMU object. */
++struct _gckMMU
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* The page table mutex. */
++ gctPOINTER pageTableMutex;
++
++ /* Page table information. */
++ gctSIZE_T pageTableSize;
++ gctPHYS_ADDR pageTablePhysical;
++ gctUINT32_PTR pageTableLogical;
++ gctUINT32 pageTableEntries;
++
++ /* Master TLB information. */
++ gctSIZE_T mtlbSize;
++ gctPHYS_ADDR mtlbPhysical;
++ gctUINT32_PTR mtlbLogical;
++ gctUINT32 mtlbEntries;
++
++ /* Free entries. */
++ gctUINT32 heapList;
++ gctBOOL freeNodes;
++
++ gctPOINTER staticSTLB;
++ gctBOOL enabled;
++
++ gctUINT32 dynamicMappingStart;
++
++ gctUINT32_PTR mapLogical;
++#if gcdPROCESS_ADDRESS_SPACE
++ gctPOINTER pageTableDirty[gcdMAX_GPU_COUNT];
++ gctPOINTER stlbs;
++#endif
++};
++
++gceSTATUS
++gckOS_CreateKernelVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ );
++
++gceSTATUS
++gckOS_DestroyKernelVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gckOS_CreateUserVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ );
++
++gceSTATUS
++gckOS_DestroyUserVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gckOS_GetFd(
++ IN gctSTRING Name,
++ IN gcsFDPRIVATE_PTR Private,
++ OUT gctINT *Fd
++ );
++
++gceSTATUS
++gckKERNEL_AllocateVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++gceSTATUS
++gckKERNEL_DestroyVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gckKERNEL_GetGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ );
++
++gceSTATUS
++gckKERNEL_QueryGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GpuAddress,
++ OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer
++ );
++
++gceSTATUS
++gckKERNEL_AttachProcess(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach
++ );
++
++gceSTATUS
++gckKERNEL_AttachProcessEx(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach,
++ IN gctUINT32 PID
++ );
++
++#if gcdSECURE_USER
++gceSTATUS
++gckKERNEL_MapLogicalToPhysical(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN OUT gctPOINTER * Data
++ );
++
++gceSTATUS
++gckKERNEL_FlushTranslationCache(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++#endif
++
++gceSTATUS
++gckHARDWARE_QueryIdle(
++ IN gckHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ );
++
++#if gcdSECURITY
++gceSTATUS
++gckKERNEL_SecurityOpen(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GPU,
++ OUT gctUINT32 *Channel
++ );
++
++/*
++** Close a security service channel
++*/
++gceSTATUS
++gckKERNEL_SecurityClose(
++ IN gctUINT32 Channel
++ );
++
++/*
++** Security service interface.
++*/
++gceSTATUS
++gckKERNEL_SecurityCallService(
++ IN gctUINT32 Channel,
++ IN OUT gcsTA_INTERFACE * Interface
++ );
++
++gceSTATUS
++gckKERNEL_SecurityStartCommand(
++ IN gckKERNEL Kernel
++ );
++
++gceSTATUS
++gckKERNEL_SecurityAllocateSecurityMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Bytes,
++ OUT gctUINT32 * Handle
++ );
++
++gceSTATUS
++gckKERNEL_SecurityExecute(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Buffer,
++ IN gctUINT32 Bytes
++ );
++
++gceSTATUS
++gckKERNEL_SecurityMapMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 *PhysicalArray,
++ IN gctUINT32 PageCount,
++ OUT gctUINT32 * GPUAddress
++ );
++
++gceSTATUS
++gckKERNEL_SecurityUnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GPUAddress,
++ IN gctUINT32 PageCount
++ );
++
++#endif
++
++gceSTATUS
++gckKERNEL_CreateShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Size,
++ OUT gctSHBUF * ShBuf
++ );
++
++gceSTATUS
++gckKERNEL_DestroyShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf
++ );
++
++gceSTATUS
++gckKERNEL_MapShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf
++ );
++
++gceSTATUS
++gckKERNEL_WriteShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf,
++ IN gctPOINTER UserData,
++ IN gctUINT32 ByteCount
++ );
++
++gceSTATUS
++gckKERNEL_ReadShBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSHBUF ShBuf,
++ IN gctPOINTER UserData,
++ IN gctUINT32 ByteCount,
++ OUT gctUINT32 * BytesRead
++ );
++
++
++/******************************************************************************\
++******************************* gckCONTEXT Object *******************************
++\******************************************************************************/
++
++gceSTATUS
++gckCONTEXT_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ OUT gckCONTEXT * Context
++ );
++
++gceSTATUS
++gckCONTEXT_Destroy(
++ IN gckCONTEXT Context
++ );
++
++gceSTATUS
++gckCONTEXT_Update(
++ IN gckCONTEXT Context,
++ IN gctUINT32 ProcessID,
++ IN gcsSTATE_DELTA_PTR StateDelta
++ );
++
++gceSTATUS
++gckCONTEXT_MapBuffer(
++ IN gckCONTEXT Context,
++ OUT gctUINT32 *Physicals,
++ OUT gctUINT64 *Logicals,
++ OUT gctUINT32 *Bytes
++ );
++
++#if gcdLINK_QUEUE_SIZE
++void
++gckLINKQUEUE_Enqueue(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 start,
++ IN gctUINT32 end
++ );
++
++void
++gckLINKQUEUE_GetData(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 Index,
++ OUT gckLINKDATA * Data
++ );
++#endif
++
++gceSTATUS
++gckENTRYQUEUE_Enqueue(
++ IN gckKERNEL Kernel,
++ IN gckENTRYQUEUE Queue,
++ IN gctUINT32 physical,
++ IN gctUINT32 bytes
++ );
++
++gceSTATUS
++gckENTRYQUEUE_Dequeue(
++ IN gckENTRYQUEUE Queue,
++ OUT gckENTRYDATA * Data
++ );
++
++/******************************************************************************\
++****************************** gckRECORDER Object ******************************
++\******************************************************************************/
++gceSTATUS
++gckRECORDER_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ OUT gckRECORDER * Recorder
++ );
++
++gceSTATUS
++gckRECORDER_Destory(
++ IN gckOS Os,
++ IN gckRECORDER Recorder
++ );
++
++void
++gckRECORDER_AdvanceIndex(
++ gckRECORDER Recorder,
++ gctUINT64 CommitStamp
++ );
++
++void
++gckRECORDER_Record(
++ gckRECORDER Recorder,
++ gctUINT8_PTR CommandBuffer,
++ gctUINT32 CommandBytes,
++ gctUINT8_PTR ContextBuffer,
++ gctUINT32 ContextBytes
++ );
++
++void
++gckRECORDER_Dump(
++ gckRECORDER Recorder
++ );
++
++gceSTATUS
++gckRECORDER_UpdateMirror(
++ gckRECORDER Recorder,
++ gctUINT32 State,
++ gctUINT32 Data
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_heap.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_heap.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_heap.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_heap.c 2015-05-01 14:57:59.575427001 -0500
+@@ -0,0 +1,858 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/**
++** @file
++** gckHEAP object for kernel HAL layer. The heap implemented here is an arena-
++** based memory allocation. An arena-based memory heap allocates data quickly
++** from specified arenas and reduces memory fragmentation.
++**
++*/
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_HEAP
++
++/*******************************************************************************
++***** Structures ***************************************************************
++*******************************************************************************/
++#define gcdIN_USE ((gcskNODE_PTR)gcvMAXUINTPTR_T)
++
++typedef struct _gcskNODE * gcskNODE_PTR;
++typedef struct _gcskNODE
++{
++ /* Number of byets in node. */
++ gctSIZE_T bytes;
++
++ /* Pointer to next free node, or gcvNULL to mark the node as freed, or
++ ** gcdIN_USE to mark the node as used. */
++ gcskNODE_PTR next;
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Time stamp of allocation. */
++ gctUINT64 timeStamp;
++#endif
++}
++gcskNODE;
++
++typedef struct _gcskHEAP * gcskHEAP_PTR;
++typedef struct _gcskHEAP
++{
++ /* Linked list. */
++ gcskHEAP_PTR next;
++ gcskHEAP_PTR prev;
++
++ /* Heap size. */
++ gctSIZE_T size;
++
++ /* Free list. */
++ gcskNODE_PTR freeList;
++}
++gcskHEAP;
++
++struct _gckHEAP
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to a gckOS object. */
++ gckOS os;
++
++ /* Locking mutex. */
++ gctPOINTER mutex;
++
++ /* Allocation parameters. */
++ gctSIZE_T allocationSize;
++
++ /* Heap list. */
++ gcskHEAP_PTR heap;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT64 timeStamp;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Profile information. */
++ gctUINT32 allocCount;
++ gctUINT64 allocBytes;
++ gctUINT64 allocBytesMax;
++ gctUINT64 allocBytesTotal;
++ gctUINT32 heapCount;
++ gctUINT32 heapCountMax;
++ gctUINT64 heapMemory;
++ gctUINT64 heapMemoryMax;
++#endif
++};
++
++/*******************************************************************************
++***** Static Support Functions *************************************************
++*******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++static gctSIZE_T
++_DumpHeap(
++ IN gcskHEAP_PTR Heap
++ )
++{
++ gctPOINTER p;
++ gctSIZE_T leaked = 0;
++
++ /* Start at first node. */
++ for (p = Heap + 1;;)
++ {
++ /* Convert the pointer. */
++ gcskNODE_PTR node = (gcskNODE_PTR) p;
++
++ /* Check if this is a used node. */
++ if (node->next == gcdIN_USE)
++ {
++ /* Print the leaking node. */
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_HEAP,
++ "Detected leaking: node=0x%x bytes=%lu timeStamp=%llu "
++ "(%08X %c%c%c%c)",
++ node, node->bytes, node->timeStamp,
++ ((gctUINT32_PTR) (node + 1))[0],
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[0]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[1]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[2]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[3]));
++
++ /* Add leaking byte count. */
++ leaked += node->bytes;
++ }
++
++ /* Test for end of heap. */
++ if (node->bytes == 0)
++ {
++ break;
++ }
++
++ else
++ {
++ /* Move to next node. */
++ p = (gctUINT8_PTR) node + node->bytes;
++ }
++ }
++
++ /* Return the number of leaked bytes. */
++ return leaked;
++}
++#endif
++
++static gceSTATUS
++_CompactKernelHeap(
++ IN gckHEAP Heap
++ )
++{
++ gcskHEAP_PTR heap, next;
++ gctPOINTER p;
++ gcskHEAP_PTR freeList = gcvNULL;
++
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ /* Walk all the heaps. */
++ for (heap = Heap->heap; heap != gcvNULL; heap = next)
++ {
++ gcskNODE_PTR lastFree = gcvNULL;
++
++ /* Zero out the free list. */
++ heap->freeList = gcvNULL;
++
++ /* Start at the first node. */
++ for (p = (gctUINT8_PTR) (heap + 1);;)
++ {
++ /* Convert the pointer. */
++ gcskNODE_PTR node = (gcskNODE_PTR) p;
++
++ gcmkASSERT(p <= (gctPOINTER) ((gctUINT8_PTR) (heap + 1) + heap->size));
++
++ /* Test if this node not used. */
++ if (node->next != gcdIN_USE)
++ {
++ /* Test if this is the end of the heap. */
++ if (node->bytes == 0)
++ {
++ break;
++ }
++
++ /* Test of this is the first free node. */
++ else if (lastFree == gcvNULL)
++ {
++ /* Initialzie the free list. */
++ heap->freeList = node;
++ lastFree = node;
++ }
++
++ else
++ {
++ /* Test if this free node is contiguous with the previous
++ ** free node. */
++ if ((gctUINT8_PTR) lastFree + lastFree->bytes == p)
++ {
++ /* Just increase the size of the previous free node. */
++ lastFree->bytes += node->bytes;
++ }
++ else
++ {
++ /* Add to linked list. */
++ lastFree->next = node;
++ lastFree = node;
++ }
++ }
++ }
++
++ /* Move to next node. */
++ p = (gctUINT8_PTR) node + node->bytes;
++ }
++
++ /* Mark the end of the chain. */
++ if (lastFree != gcvNULL)
++ {
++ lastFree->next = gcvNULL;
++ }
++
++ /* Get next heap. */
++ next = heap->next;
++
++ /* Check if the entire heap is free. */
++ if ((heap->freeList != gcvNULL)
++ && (heap->freeList->bytes == heap->size - gcmSIZEOF(gcskNODE))
++ )
++ {
++ /* Remove the heap from the linked list. */
++ if (heap->prev == gcvNULL)
++ {
++ Heap->heap = next;
++ }
++ else
++ {
++ heap->prev->next = next;
++ }
++
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap->prev;
++ }
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profiling. */
++ Heap->heapCount -= 1;
++ Heap->heapMemory -= heap->size + gcmSIZEOF(gcskHEAP);
++#endif
++
++ /* Add this heap to the list of heaps that need to be freed. */
++ heap->next = freeList;
++ freeList = heap;
++ }
++ }
++
++ if (freeList != gcvNULL)
++ {
++ /* Release the mutex, remove any chance for a dead lock. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Free all heaps in the free list. */
++ for (heap = freeList; heap != gcvNULL; heap = next)
++ {
++ /* Get pointer to the next heap. */
++ next = heap->next;
++
++ /* Free the heap. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP,
++ "Freeing heap 0x%x (%lu bytes)",
++ heap, heap->size + gcmSIZEOF(gcskHEAP));
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap));
++ }
++
++ /* Acquire the mutex again. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++***** gckHEAP API Code *********************************************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckHEAP_Construct
++**
++** Construct a new gckHEAP object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctSIZE_T AllocationSize
++** Minimum size per arena.
++**
++** OUTPUT:
++**
++** gckHEAP * Heap
++** Pointer to a variable that will hold the pointer to the gckHEAP
++** object.
++*/
++gceSTATUS
++gckHEAP_Construct(
++ IN gckOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gckHEAP * Heap
++ )
++{
++ gceSTATUS status;
++ gckHEAP heap = gcvNULL;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x AllocationSize=%lu", Os, AllocationSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Heap != gcvNULL);
++
++ /* Allocate the gckHEAP object. */
++ gcmkONERROR(gckOS_AllocateMemory(Os,
++ gcmSIZEOF(struct _gckHEAP),
++ &pointer));
++
++ heap = pointer;
++
++ /* Initialize the gckHEAP object. */
++ heap->object.type = gcvOBJ_HEAP;
++ heap->os = Os;
++ heap->allocationSize = AllocationSize;
++ heap->heap = gcvNULL;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ heap->timeStamp = 0;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Zero the counters. */
++ heap->allocCount = 0;
++ heap->allocBytes = 0;
++ heap->allocBytesMax = 0;
++ heap->allocBytesTotal = 0;
++ heap->heapCount = 0;
++ heap->heapCountMax = 0;
++ heap->heapMemory = 0;
++ heap->heapMemoryMax = 0;
++#endif
++
++ /* Create the mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &heap->mutex));
++
++ /* Return the pointer to the gckHEAP object. */
++ *Heap = heap;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Heap=0x%x", *Heap);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (heap != gcvNULL)
++ {
++ /* Free the heap structure. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Os, heap));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Destroy
++**
++** Destroy a gckHEAP object.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHEAP_Destroy(
++ IN gckHEAP Heap
++ )
++{
++ gcskHEAP_PTR heap;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctSIZE_T leaked = 0;
++#endif
++
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ for (heap = Heap->heap; heap != gcvNULL; heap = Heap->heap)
++ {
++ /* Unlink heap from linked list. */
++ Heap->heap = heap->next;
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Check for leaked memory. */
++ leaked += _DumpHeap(heap);
++#endif
++
++ /* Free the heap. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap));
++ }
++
++ /* Free the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Heap->os, Heap->mutex));
++
++ /* Free the heap structure. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, Heap));
++
++ /* Success. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gcmkFOOTER_ARG("leaked=%lu", leaked);
++#else
++ gcmkFOOTER_NO();
++#endif
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Allocate
++**
++** Allocate data from the heap.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object.
++**
++** IN gctSIZE_T Bytes
++** Number of byte to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the address of the allocated
++** memory.
++*/
++gceSTATUS
++gckHEAP_Allocate(
++ IN gckHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gctBOOL acquired = gcvFALSE;
++ gcskHEAP_PTR heap;
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gcskNODE_PTR node, used, prevFree = gcvNULL;
++ gctPOINTER memory = gcvNULL;
++
++ gcmkHEADER_ARG("Heap=0x%x Bytes=%lu", Heap, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Determine number of bytes required for a node. */
++ bytes = gcmALIGN(Bytes + gcmSIZEOF(gcskNODE), 8);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ /* Check if this allocation is bigger than the default allocation size. */
++ if (bytes > Heap->allocationSize - gcmSIZEOF(gcskHEAP) - gcmSIZEOF(gcskNODE))
++ {
++ /* Adjust allocation size. */
++ Heap->allocationSize = bytes * 2;
++ }
++
++ else if (Heap->heap != gcvNULL)
++ {
++ gctINT i;
++
++ /* 2 retries, since we might need to compact. */
++ for (i = 0; i < 2; ++i)
++ {
++ /* Walk all the heaps. */
++ for (heap = Heap->heap; heap != gcvNULL; heap = heap->next)
++ {
++ /* Check if this heap has enough bytes to hold the request. */
++ if (bytes <= heap->size - gcmSIZEOF(gcskNODE))
++ {
++ prevFree = gcvNULL;
++
++ /* Walk the chain of free nodes. */
++ for (node = heap->freeList;
++ node != gcvNULL;
++ node = node->next
++ )
++ {
++ gcmkASSERT(node->next != gcdIN_USE);
++
++ /* Check if this free node has enough bytes. */
++ if (node->bytes >= bytes)
++ {
++ /* Use the node. */
++ goto UseNode;
++ }
++
++ /* Save current free node for linked list management. */
++ prevFree = node;
++ }
++ }
++ }
++
++ if (i == 0)
++ {
++ /* Compact the heap. */
++ gcmkVERIFY_OK(_CompactKernelHeap(Heap));
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "===== KERNEL HEAP =====");
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of allocations : %12u",
++ Heap->allocCount);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of bytes allocated : %12llu",
++ Heap->allocBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum allocation size : %12llu",
++ Heap->allocBytesMax);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Total number of bytes allocated : %12llu",
++ Heap->allocBytesTotal);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of heaps : %12u",
++ Heap->heapCount);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Heap memory in bytes : %12llu",
++ Heap->heapMemory);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum number of heaps : %12u",
++ Heap->heapCountMax);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum heap memory in bytes : %12llu",
++ Heap->heapMemoryMax);
++#endif
++ }
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkONERROR(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ acquired = gcvFALSE;
++
++ /* Allocate a new heap. */
++ gcmkONERROR(
++ gckOS_AllocateMemory(Heap->os,
++ Heap->allocationSize,
++ &memory));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP,
++ "Allocated heap 0x%x (%lu bytes)",
++ memory, Heap->allocationSize);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ /* Use the allocated memory as the heap. */
++ heap = (gcskHEAP_PTR) memory;
++
++ /* Insert this heap to the head of the chain. */
++ heap->next = Heap->heap;
++ heap->prev = gcvNULL;
++ heap->size = Heap->allocationSize - gcmSIZEOF(gcskHEAP);
++
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap;
++ }
++ Heap->heap = heap;
++
++ /* Mark the end of the heap. */
++ node = (gcskNODE_PTR) ( (gctUINT8_PTR) heap
++ + Heap->allocationSize
++ - gcmSIZEOF(gcskNODE)
++ );
++ node->bytes = 0;
++ node->next = gcvNULL;
++
++ /* Create a free list. */
++ node = (gcskNODE_PTR) (heap + 1);
++ heap->freeList = node;
++
++ /* Initialize the free list. */
++ node->bytes = heap->size - gcmSIZEOF(gcskNODE);
++ node->next = gcvNULL;
++
++ /* No previous free. */
++ prevFree = gcvNULL;
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profiling. */
++ Heap->heapCount += 1;
++ Heap->heapMemory += Heap->allocationSize;
++
++ if (Heap->heapCount > Heap->heapCountMax)
++ {
++ Heap->heapCountMax = Heap->heapCount;
++ }
++ if (Heap->heapMemory > Heap->heapMemoryMax)
++ {
++ Heap->heapMemoryMax = Heap->heapMemory;
++ }
++#endif
++
++UseNode:
++ /* Verify some stuff. */
++ gcmkASSERT(heap != gcvNULL);
++ gcmkASSERT(node != gcvNULL);
++ gcmkASSERT(node->bytes >= bytes);
++
++ if (heap->prev != gcvNULL)
++ {
++ /* Unlink the heap from the linked list. */
++ heap->prev->next = heap->next;
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap->prev;
++ }
++
++ /* Move the heap to the front of the list. */
++ heap->next = Heap->heap;
++ heap->prev = gcvNULL;
++ Heap->heap = heap;
++ heap->next->prev = heap;
++ }
++
++ /* Check if there is enough free space left after usage for another free
++ ** node. */
++ if (node->bytes - bytes >= gcmSIZEOF(gcskNODE))
++ {
++ /* Allocated used space from the back of the free list. */
++ used = (gcskNODE_PTR) ((gctUINT8_PTR) node + node->bytes - bytes);
++
++ /* Adjust the number of free bytes. */
++ node->bytes -= bytes;
++ gcmkASSERT(node->bytes >= gcmSIZEOF(gcskNODE));
++ }
++ else
++ {
++ /* Remove this free list from the chain. */
++ if (prevFree == gcvNULL)
++ {
++ heap->freeList = node->next;
++ }
++ else
++ {
++ prevFree->next = node->next;
++ }
++
++ /* Consume the entire free node. */
++ used = (gcskNODE_PTR) node;
++ bytes = node->bytes;
++ }
++
++ /* Mark node as used. */
++ used->bytes = bytes;
++ used->next = gcdIN_USE;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ used->timeStamp = ++Heap->timeStamp;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profile counters. */
++ Heap->allocCount += 1;
++ Heap->allocBytes += bytes;
++ Heap->allocBytesMax = gcmMAX(Heap->allocBytes, Heap->allocBytesMax);
++ Heap->allocBytesTotal += bytes;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Return pointer to memory. */
++ *Memory = used + 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++ }
++
++ if (memory != gcvNULL)
++ {
++ /* Free the heap memory. */
++ gckOS_FreeMemory(Heap->os, memory);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Free
++**
++** Free allocated memory from the heap.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object.
++**
++** IN gctPOINTER Memory
++** Pointer to memory to free.
++**
++** OUTPUT:
++**
++** NOTHING.
++*/
++gceSTATUS
++gckHEAP_Free(
++ IN gckHEAP Heap,
++ IN gctPOINTER Memory
++ )
++{
++ gcskNODE_PTR node;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Heap=0x%x Memory=0x%x", Heap, Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ /* Pointer to structure. */
++ node = (gcskNODE_PTR) Memory - 1;
++
++ /* Mark the node as freed. */
++ node->next = gcvNULL;
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profile counters. */
++ Heap->allocBytes -= node->bytes;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHEAP_ProfileStart(
++ IN gckHEAP Heap
++ )
++{
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++
++ /* Zero the counters. */
++ Heap->allocCount = 0;
++ Heap->allocBytes = 0;
++ Heap->allocBytesMax = 0;
++ Heap->allocBytesTotal = 0;
++ Heap->heapCount = 0;
++ Heap->heapCountMax = 0;
++ Heap->heapMemory = 0;
++ Heap->heapMemoryMax = 0;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHEAP_ProfileEnd(
++ IN gckHEAP Heap,
++ IN gctCONST_STRING Title
++ )
++{
++ gcmkHEADER_ARG("Heap=0x%x Title=0x%x", Heap, Title);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Title != gcvNULL);
++
++ gcmkPRINT("");
++ gcmkPRINT("=====[ HEAP - %s ]=====", Title);
++ gcmkPRINT("Number of allocations : %12u", Heap->allocCount);
++ gcmkPRINT("Number of bytes allocated : %12llu", Heap->allocBytes);
++ gcmkPRINT("Maximum allocation size : %12llu", Heap->allocBytesMax);
++ gcmkPRINT("Total number of bytes allocated : %12llu", Heap->allocBytesTotal);
++ gcmkPRINT("Number of heaps : %12u", Heap->heapCount);
++ gcmkPRINT("Heap memory in bytes : %12llu", Heap->heapMemory);
++ gcmkPRINT("Maximum number of heaps : %12u", Heap->heapCountMax);
++ gcmkPRINT("Maximum heap memory in bytes : %12llu", Heap->heapMemoryMax);
++ gcmkPRINT("==============================================");
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif /* VIVANTE_PROFILER */
++
++/*******************************************************************************
++***** Test Code ****************************************************************
++*******************************************************************************/
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_interrupt_vg.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_interrupt_vg.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_interrupt_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_interrupt_vg.c 2015-05-01 14:57:59.575427001 -0500
+@@ -0,0 +1,877 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++/******************************************************************************\
++*********************** Support Functions and Definitions **********************
++\******************************************************************************/
++
++/* Interruot statistics will be accumulated if not zero. */
++#define gcmENABLE_INTERRUPT_STATISTICS 0
++
++#define _GC_OBJ_ZONE gcvZONE_INTERRUPT
++
++/* Object structure. */
++struct _gckVGINTERRUPT
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* gckVGKERNEL pointer. */
++ gckVGKERNEL kernel;
++
++ /* gckOS pointer. */
++ gckOS os;
++
++ /* Interrupt handlers. */
++ gctINTERRUPT_HANDLER handlers[32];
++
++ /* Main interrupt handler thread. */
++ gctTHREAD handler;
++ gctBOOL terminate;
++
++ /* Interrupt FIFO. */
++ gctSEMAPHORE fifoValid;
++ gctUINT32 fifo[256];
++ gctUINT fifoItems;
++ gctUINT8 head;
++ gctUINT8 tail;
++
++ /* Interrupt statistics. */
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gctUINT maxFifoItems;
++ gctUINT fifoOverflow;
++ gctUINT maxSimultaneous;
++ gctUINT multipleCount;
++#endif
++};
++
++
++/*******************************************************************************
++**
++** _ProcessInterrupt
++**
++** The interrupt processor.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++static void
++_ProcessInterrupt(
++ gckVGINTERRUPT Interrupt,
++ gctUINT_PTR TriggeredCount
++ )
++#else
++static void
++_ProcessInterrupt(
++ gckVGINTERRUPT Interrupt
++ )
++#endif
++{
++ gceSTATUS status;
++ gctUINT32 triggered;
++ gctUINT i;
++
++ /* Advance to the next entry. */
++ Interrupt->tail += 1;
++ Interrupt->fifoItems -= 1;
++
++ /* Get the interrupt value. */
++ triggered = Interrupt->fifo[Interrupt->tail];
++ gcmkASSERT(triggered != 0);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: triggered=0x%08X\n",
++ __FUNCTION__,
++ triggered
++ );
++
++ /* Walk through all possible interrupts. */
++ for (i = 0; i < gcmSIZEOF(Interrupt->handlers); i += 1)
++ {
++ /* Test if interrupt happened. */
++ if ((triggered & 1) == 1)
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ if (TriggeredCount != gcvNULL)
++ {
++ (* TriggeredCount) += 1;
++ }
++#endif
++
++ /* Make sure we have valid handler. */
++ if (Interrupt->handlers[i] == gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s: Interrupt %d isn't registered.\n",
++ __FUNCTION__, i
++ );
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: interrupt=%d\n",
++ __FUNCTION__,
++ i
++ );
++
++ /* Call the handler. */
++ status = Interrupt->handlers[i] (Interrupt->kernel);
++
++ if (gcmkIS_ERROR(status))
++ {
++ /* Failed to signal the semaphore. */
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s: Error %d incrementing the semaphore #%d.\n",
++ __FUNCTION__, status, i
++ );
++ }
++ }
++ }
++
++ /* Next interrupt. */
++ triggered >>= 1;
++
++ /* No more interrupts to handle? */
++ if (triggered == 0)
++ {
++ break;
++ }
++ }
++}
++
++
++/*******************************************************************************
++**
++** _MainInterruptHandler
++**
++** The main interrupt thread serves the interrupt FIFO and calls registered
++** handlers for the interrupts that occured. The handlers are called in the
++** sequence interrupts occured with the exception when multiple interrupts
++** occured at the same time. In that case the handler calls are "sorted" by
++** the interrupt number therefore giving the interrupts with lower numbers
++** higher priority.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++static gctTHREADFUNCRESULT gctTHREADFUNCTYPE
++_MainInterruptHandler(
++ gctTHREADFUNCPARAMETER ThreadParameter
++ )
++{
++ gceSTATUS status;
++ gckVGINTERRUPT interrupt;
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gctUINT count;
++#endif
++
++ /* Cast the object. */
++ interrupt = (gckVGINTERRUPT) ThreadParameter;
++
++ /* Enter the loop. */
++ while (gcvTRUE)
++ {
++ /* Wait for an interrupt. */
++ status = gckOS_DecrementSemaphore(interrupt->os, interrupt->fifoValid);
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* System termination request? */
++ if (status == gcvSTATUS_TERMINATE)
++ {
++ break;
++ }
++
++ /* Driver is shutting down? */
++ if (interrupt->terminate)
++ {
++ break;
++ }
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ /* Reset triggered count. */
++ count = 0;
++
++ /* Process the interrupt. */
++ _ProcessInterrupt(interrupt, &count);
++
++ /* Update conters. */
++ if (count > interrupt->maxSimultaneous)
++ {
++ interrupt->maxSimultaneous = count;
++ }
++
++ if (count > 1)
++ {
++ interrupt->multipleCount += 1;
++ }
++#else
++ /* Process the interrupt. */
++ _ProcessInterrupt(interrupt);
++#endif
++ }
++
++ return 0;
++}
++
++
++/*******************************************************************************
++**
++** _StartInterruptHandler / _StopInterruptHandler
++**
++** Main interrupt handler routine control.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++static gceSTATUS
++_StartInterruptHandler(
++ gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status, last;
++
++ do
++ {
++ /* Objects must not be already created. */
++ gcmkASSERT(Interrupt->fifoValid == gcvNULL);
++ gcmkASSERT(Interrupt->handler == gcvNULL);
++
++ /* Reset the termination request. */
++ Interrupt->terminate = gcvFALSE;
++
++#if !gcdENABLE_INFINITE_SPEED_HW
++ /* Construct the fifo semaphore. */
++ gcmkERR_BREAK(gckOS_CreateSemaphoreVG(
++ Interrupt->os, &Interrupt->fifoValid
++ ));
++
++ /* Start the interrupt handler thread. */
++ gcmkERR_BREAK(gckOS_StartThread(
++ Interrupt->os,
++ _MainInterruptHandler,
++ Interrupt,
++ &Interrupt->handler
++ ));
++#endif
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (Interrupt->fifoValid != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DestroySemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ Interrupt->fifoValid = gcvNULL;
++ }
++
++ /* Return the status. */
++ return status;
++}
++
++static gceSTATUS
++_StopInterruptHandler(
++ gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Does the thread exist? */
++ if (Interrupt->handler == gcvNULL)
++ {
++ /* The semaphore must be NULL as well. */
++ gcmkASSERT(Interrupt->fifoValid == gcvNULL);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* The semaphore must exist as well. */
++ gcmkASSERT(Interrupt->fifoValid != gcvNULL);
++
++ /* Set the termination request. */
++ Interrupt->terminate = gcvTRUE;
++
++ /* Unlock the thread. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ /* Wait until the thread quits. */
++ gcmkERR_BREAK(gckOS_StopThread(
++ Interrupt->os,
++ Interrupt->handler
++ ));
++
++ /* Destroy the semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ /* Reset handles. */
++ Interrupt->handler = gcvNULL;
++ Interrupt->fifoValid = gcvNULL;
++ }
++ while (gcvFALSE);
++
++ /* Return the status. */
++ return status;
++}
++
++
++/******************************************************************************\
++***************************** Interrupt Object API *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Construct
++**
++** Construct an interrupt object.
++**
++** INPUT:
++**
++** Kernel
++** Pointer to the gckVGKERNEL object.
++**
++** OUTPUT:
++**
++** Interrupt
++** Pointer to the new gckVGINTERRUPT object.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Construct(
++ IN gckVGKERNEL Kernel,
++ OUT gckVGINTERRUPT * Interrupt
++ )
++{
++ gceSTATUS status;
++ gckVGINTERRUPT interrupt = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x Interrupt=0x%x", Kernel, Interrupt);
++
++ /* Verify argeuments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interrupt != gcvNULL);
++
++ do
++ {
++ /* Allocate the gckVGINTERRUPT structure. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ gcmSIZEOF(struct _gckVGINTERRUPT),
++ (gctPOINTER *) &interrupt
++ ));
++
++ /* Reset the object data. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ interrupt, gcmSIZEOF(struct _gckVGINTERRUPT)
++ ));
++
++ /* Initialize the object. */
++ interrupt->object.type = gcvOBJ_INTERRUPT;
++
++ /* Initialize the object pointers. */
++ interrupt->kernel = Kernel;
++ interrupt->os = Kernel->os;
++
++ /* Initialize the current FIFO position. */
++ interrupt->head = (gctUINT8)~0;
++ interrupt->tail = (gctUINT8)~0;
++
++ /* Start the thread. */
++ gcmkERR_BREAK(_StartInterruptHandler(interrupt));
++
++ /* Return interrupt object. */
++ *Interrupt = interrupt;
++
++ gcmkFOOTER_ARG("*Interrup=0x%x", *Interrupt);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (interrupt != gcvNULL)
++ {
++ /* Free the gckVGINTERRUPT structure. */
++ gcmkVERIFY_OK(gckOS_Free(interrupt->os, interrupt));
++ }
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Destroy
++**
++** Destroy an interrupt object.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to the gckVGINTERRUPT object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Destroy(
++ IN gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++ do
++ {
++ /* Stop the interrupt thread. */
++ gcmkERR_BREAK(_StopInterruptHandler(Interrupt));
++
++ /* Mark the object as unknown. */
++ Interrupt->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGINTERRUPT structure. */
++ gcmkERR_BREAK(gckOS_Free(Interrupt->os, Interrupt));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_DumpState
++**
++** Print the current state of the interrupt manager.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#if gcvDEBUG
++gceSTATUS
++gckVGINTERRUPT_DumpState(
++ IN gckVGINTERRUPT Interrupt
++ )
++{
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++ /* Print the header. */
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: INTERRUPT OBJECT STATUS\n",
++ __FUNCTION__
++ );
++
++ /* Print statistics. */
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Maximum number of FIFO items accumulated at a single time: %d\n",
++ Interrupt->maxFifoItems
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Interrupt FIFO overflow happened times: %d\n",
++ Interrupt->fifoOverflow
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Maximum number of interrupts simultaneously generated: %d\n",
++ Interrupt->maxSimultaneous
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Number of times when there were multiple interrupts generated: %d\n",
++ Interrupt->multipleCount
++ );
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " The current number of entries in the FIFO: %d\n",
++ Interrupt->fifoItems
++ );
++
++ /* Print the FIFO contents. */
++ if (Interrupt->fifoItems != 0)
++ {
++ gctUINT8 index;
++ gctUINT8 last;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " FIFO current contents:\n"
++ );
++
++ /* Get the current pointers. */
++ index = Interrupt->tail;
++ last = Interrupt->head;
++
++ while (index != last)
++ {
++ /* Advance to the next entry. */
++ index += 1;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " %d: 0x%08X\n",
++ index, Interrupt->fifo[index]
++ );
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Enable
++**
++** Enable the specified interrupt.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** Id
++** Pointer to the variable that holds the interrupt number to be
++** registered in range 0..31.
++** If the value is less then 0, gckVGINTERRUPT_Enable will attempt
++** to find an unused interrupt. If such interrupt is found, the number
++** will be assigned to the variable if the functuion call succeedes.
++**
++** Handler
++** Pointer to the handler to register for the interrupt.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Enable(
++ IN gckVGINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ )
++{
++ gceSTATUS status;
++ gctINT32 i;
++
++ gcmkHEADER_ARG("Interrupt=0x%x Id=0x%x Handler=0x%x", Interrupt, Id, Handler);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++ gcmkVERIFY_ARGUMENT(Id != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Handler != gcvNULL);
++
++ do
++ {
++ /* See if we need to allocate an ID. */
++ if (*Id < 0)
++ {
++ /* Find the first unused interrupt handler. */
++ for (i = 0; i < gcmCOUNTOF(Interrupt->handlers); ++i)
++ {
++ if (Interrupt->handlers[i] == gcvNULL)
++ {
++ break;
++ }
++ }
++
++ /* No unused innterrupts? */
++ if (i == gcmCOUNTOF(Interrupt->handlers))
++ {
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ break;
++ }
++
++ /* Update the interrupt ID. */
++ *Id = i;
++ }
++
++ /* Make sure the ID is in range. */
++ else if (*Id >= gcmCOUNTOF(Interrupt->handlers))
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ /* Set interrupt handler. */
++ Interrupt->handlers[*Id] = Handler;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Disable
++**
++** Disable the specified interrupt.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** Id
++** Interrupt number to be disabled in range 0..31.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Disable(
++ IN gckVGINTERRUPT Interrupt,
++ IN gctINT32 Id
++ )
++{
++ gcmkHEADER_ARG("Interrupt=0x%x Id=0x%x", Interrupt, Id);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++ gcmkVERIFY_ARGUMENT((Id >= 0) && (Id < gcmCOUNTOF(Interrupt->handlers)));
++
++ /* Reset interrupt handler. */
++ Interrupt->handlers[Id] = gcvNULL;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Enque
++**
++** Read the interrupt status register and put the value in the interrupt FIFO.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#ifndef __QNXNTO__
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt
++ )
++#else
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt,
++ OUT gckOS *Os,
++ OUT gctSEMAPHORE *Semaphore
++ )
++#endif
++{
++ gceSTATUS status;
++ gctUINT32 triggered;
++
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++#ifdef __QNXNTO__
++ *Os = gcvNULL;
++ *Semaphore = gcvNULL;
++#endif
++
++ do
++ {
++ /* Read interrupt status register. */
++ gcmkERR_BREAK(gckVGHARDWARE_ReadInterrupt(
++ Interrupt->kernel->hardware, &triggered
++ ));
++
++ /* Mask out TS overflow interrupt */
++ triggered &= 0xfffffffe;
++
++ /* No interrupts to process? */
++ if (triggered == 0)
++ {
++ status = gcvSTATUS_NOT_OUR_INTERRUPT;
++ break;
++ }
++
++ /* FIFO overflow? */
++ if (Interrupt->fifoItems == gcmCOUNTOF(Interrupt->fifo))
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ Interrupt->fifoOverflow += 1;
++#endif
++
++ /* OR the interrupt with the last value in the FIFO. */
++ Interrupt->fifo[Interrupt->head] |= triggered;
++
++ /* Success (kind of). */
++ status = gcvSTATUS_OK;
++ }
++ else
++ {
++ /* Advance to the next entry. */
++ Interrupt->head += 1;
++ Interrupt->fifoItems += 1;
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ if (Interrupt->fifoItems > Interrupt->maxFifoItems)
++ {
++ Interrupt->maxFifoItems = Interrupt->fifoItems;
++ }
++#endif
++
++ /* Set the new value. */
++ Interrupt->fifo[Interrupt->head] = triggered;
++
++#ifndef __QNXNTO__
++ /* Increment the FIFO semaphore. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++#else
++ *Os = Interrupt->os;
++ *Semaphore = Interrupt->fifoValid;
++#endif
++
++ /* Windows kills our threads prematurely when the application
++ exists. Verify here that the thread is still alive. */
++ status = gckOS_VerifyThread(Interrupt->os, Interrupt->handler);
++
++ /* Has the thread been prematurely terminated? */
++ if (status != gcvSTATUS_OK)
++ {
++ /* Process all accumulated interrupts. */
++ while (Interrupt->head != Interrupt->tail)
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ /* Process the interrupt. */
++ _ProcessInterrupt(Interrupt, gcvNULL);
++#else
++ /* Process the interrupt. */
++ _ProcessInterrupt(Interrupt);
++#endif
++ }
++
++ /* Set success. */
++ status = gcvSTATUS_OK;
++ }
++ }
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu.c 2015-05-01 14:57:59.579427001 -0500
+@@ -0,0 +1,2260 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_MMU
++
++typedef enum _gceMMU_TYPE
++{
++ gcvMMU_USED = (0 << 4),
++ gcvMMU_SINGLE = (1 << 4),
++ gcvMMU_FREE = (2 << 4),
++}
++gceMMU_TYPE;
++
++#define gcmENTRY_TYPE(x) (x & 0xF0)
++
++#define gcdMMU_TABLE_DUMP 0
++
++#define gcdUSE_MMU_EXCEPTION 1
++
++/*
++ gcdMMU_CLEAR_VALUE
++
++ The clear value for the entry of the old MMU.
++*/
++#ifndef gcdMMU_CLEAR_VALUE
++# define gcdMMU_CLEAR_VALUE 0x00000ABC
++#endif
++
++#define gcdVERTEX_START (128 << 10)
++
++typedef struct _gcsMMU_STLB *gcsMMU_STLB_PTR;
++
++typedef struct _gcsMMU_STLB
++{
++ gctPHYS_ADDR physical;
++ gctUINT32_PTR logical;
++ gctSIZE_T size;
++ gctUINT32 physBase;
++ gctSIZE_T pageCount;
++ gctUINT32 mtlbIndex;
++ gctUINT32 mtlbEntryNum;
++ gcsMMU_STLB_PTR next;
++} gcsMMU_STLB;
++
++#if gcdSHARED_PAGETABLE
++typedef struct _gcsSharedPageTable * gcsSharedPageTable_PTR;
++typedef struct _gcsSharedPageTable
++{
++ /* Shared gckMMU object. */
++ gckMMU mmu;
++
++ /* Hardwares which use this shared pagetable. */
++ gckHARDWARE hardwares[gcdMAX_GPU_COUNT];
++
++ /* Number of cores use this shared pagetable. */
++ gctUINT32 reference;
++}
++gcsSharedPageTable;
++
++static gcsSharedPageTable_PTR sharedPageTable = gcvNULL;
++#endif
++
++#if gcdMIRROR_PAGETABLE
++typedef struct _gcsMirrorPageTable * gcsMirrorPageTable_PTR;
++typedef struct _gcsMirrorPageTable
++{
++ /* gckMMU objects. */
++ gckMMU mmus[gcdMAX_GPU_COUNT];
++
++ /* Hardwares which use this shared pagetable. */
++ gckHARDWARE hardwares[gcdMAX_GPU_COUNT];
++
++ /* Number of cores use this shared pagetable. */
++ gctUINT32 reference;
++}
++gcsMirrorPageTable;
++
++static gcsMirrorPageTable_PTR mirrorPageTable = gcvNULL;
++static gctPOINTER mirrorPageTableMutex = gcvNULL;
++#endif
++
++typedef struct _gcsDynamicSpaceNode * gcsDynamicSpaceNode_PTR;
++typedef struct _gcsDynamicSpaceNode
++{
++ gctUINT32 start;
++ gctINT32 entries;
++}
++gcsDynamicSpaceNode;
++
++static void
++_WritePageEntry(
++ IN gctUINT32_PTR PageEntry,
++ IN gctUINT32 EntryValue
++ )
++{
++ static gctUINT16 data = 0xff00;
++
++ if (*(gctUINT8 *)&data == 0xff)
++ {
++ *PageEntry = gcmSWAB32(EntryValue);
++ }
++ else
++ {
++ *PageEntry = EntryValue;
++ }
++}
++
++static gctUINT32
++_ReadPageEntry(
++ IN gctUINT32_PTR PageEntry
++ )
++{
++ static gctUINT16 data = 0xff00;
++ gctUINT32 entryValue;
++
++ if (*(gctUINT8 *)&data == 0xff)
++ {
++ entryValue = *PageEntry;
++ return gcmSWAB32(entryValue);
++ }
++ else
++ {
++ return *PageEntry;
++ }
++}
++
++static gceSTATUS
++_FillPageTable(
++ IN gctUINT32_PTR PageTable,
++ IN gctUINT32 PageCount,
++ IN gctUINT32 EntryValue
++)
++{
++ gctUINT i;
++
++ for (i = 0; i < PageCount; i++)
++ {
++ _WritePageEntry(PageTable + i, EntryValue);
++ }
++
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_Link(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 Next
++ )
++{
++ if (Index >= Mmu->pageTableEntries)
++ {
++ /* Just move heap pointer. */
++ Mmu->heapList = Next;
++ }
++ else
++ {
++ /* Address page table. */
++ gctUINT32_PTR map = Mmu->mapLogical;
++
++ /* Dispatch on node type. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[Index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Set single index. */
++ _WritePageEntry(&map[Index], (Next << 8) | gcvMMU_SINGLE);
++ break;
++
++ case gcvMMU_FREE:
++ /* Set index. */
++ _WritePageEntry(&map[Index + 1], Next);
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", Index);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_AddFree(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 Node,
++ IN gctUINT32 Count
++ )
++{
++ gctUINT32_PTR map = Mmu->mapLogical;
++
++ if (Count == 1)
++ {
++ /* Initialize a single page node. */
++ _WritePageEntry(map + Node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
++ }
++ else
++ {
++ /* Initialize the node. */
++ _WritePageEntry(map + Node + 0, (Count << 8) | gcvMMU_FREE);
++ _WritePageEntry(map + Node + 1, ~0U);
++ }
++
++ /* Append the node. */
++ return _Link(Mmu, Index, Node);
++}
++
++static gceSTATUS
++_Collect(
++ IN gckMMU Mmu
++ )
++{
++ gctUINT32_PTR map = Mmu->mapLogical;
++ gceSTATUS status;
++ gctUINT32 i, previous, start = 0, count = 0;
++
++ previous = Mmu->heapList = ~0U;
++ Mmu->freeNodes = gcvFALSE;
++
++ /* Walk the entire page table. */
++ for (i = 0; i < Mmu->pageTableEntries; ++i)
++ {
++ /* Dispatch based on type of page. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[i])))
++ {
++ case gcvMMU_USED:
++ /* Used page, so close any open node. */
++ if (count > 0)
++ {
++ /* Add the node. */
++ gcmkONERROR(_AddFree(Mmu, previous, start, count));
++
++ /* Reset the node. */
++ previous = start;
++ count = 0;
++ }
++ break;
++
++ case gcvMMU_SINGLE:
++ /* Single free node. */
++ if (count++ == 0)
++ {
++ /* Start a new node. */
++ start = i;
++ }
++ break;
++
++ case gcvMMU_FREE:
++ /* A free node. */
++ if (count == 0)
++ {
++ /* Start a new node. */
++ start = i;
++ }
++
++ /* Advance the count. */
++ count += _ReadPageEntry(&map[i]) >> 8;
++
++ /* Advance the index into the page table. */
++ i += (_ReadPageEntry(&map[i]) >> 8) - 1;
++ break;
++
++ default:
++ gcmkFATAL("MMU page table correcupted at index %u!", i);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++ }
++
++ /* See if we have an open node left. */
++ if (count > 0)
++ {
++ /* Add the node to the list. */
++ gcmkONERROR(_AddFree(Mmu, previous, start, count));
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_MMU,
++ "Performed a garbage collection of the MMU heap.");
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the staus. */
++ return status;
++}
++
++static gctUINT32
++_SetPage(gctUINT32 PageAddress)
++{
++ return PageAddress
++ /* writable */
++ | (1 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0);
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++gctUINT32
++_AddressToIndex(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address
++ )
++{
++ gctUINT32 mtlbOffset = (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++ gctUINT32 stlbOffset = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++
++ return (mtlbOffset - Mmu->dynamicMappingStart) * gcdMMU_STLB_4K_ENTRY_NUM + stlbOffset;
++}
++
++gctUINT32
++_MtlbOffset(
++ gctUINT32 Address
++ )
++{
++ return (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++}
++
++gctUINT32
++_StlbOffset(
++ gctUINT32 Address
++ )
++{
++ return (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++}
++
++static gceSTATUS
++_AllocateStlb(
++ IN gckOS Os,
++ OUT gcsMMU_STLB_PTR *Stlb
++ )
++{
++ gceSTATUS status;
++ gcsMMU_STLB_PTR stlb;
++ gctPOINTER pointer;
++
++ /* Allocate slave TLB record. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsMMU_STLB), &pointer));
++ stlb = pointer;
++
++ stlb->size = gcdMMU_STLB_4K_SIZE;
++
++ /* Allocate slave TLB entries. */
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Os,
++ gcvFALSE,
++ &stlb->size,
++ &stlb->physical,
++ (gctPOINTER)&stlb->logical
++ ));
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(Os, stlb->logical, &stlb->physBase));
++
++#if gcdUSE_MMU_EXCEPTION
++ _FillPageTable(stlb->logical, stlb->size / 4, gcdMMU_STLB_EXCEPTION);
++#else
++ gckOS_ZeroMemory(stlb->logical, stlb->size);
++#endif
++
++ *Stlb = stlb;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++gceSTATUS
++_SetupProcessAddressSpace(
++ IN gckMMU Mmu
++ )
++{
++ gceSTATUS status;
++ gctINT numEntries = 0;
++ gctUINT32_PTR map;
++
++ numEntries = gcdPROCESS_ADDRESS_SPACE_SIZE
++ /* Address space mapped by one MTLB entry. */
++ / (1 << gcdMMU_MTLB_SHIFT);
++
++ Mmu->dynamicMappingStart = 0;
++
++ Mmu->pageTableSize = numEntries * 4096;
++
++ Mmu->pageTableEntries = Mmu->pageTableSize / gcmSIZEOF(gctUINT32);
++
++ gcmkONERROR(gckOS_Allocate(Mmu->os,
++ Mmu->pageTableSize,
++ (void **)&Mmu->mapLogical));
++
++ /* Initilization. */
++ map = Mmu->mapLogical;
++ _WritePageEntry(map, (Mmu->pageTableEntries << 8) | gcvMMU_FREE);
++ _WritePageEntry(map + 1, ~0U);
++ Mmu->heapList = 0;
++ Mmu->freeNodes = gcvFALSE;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++#else
++static gceSTATUS
++_FillFlatMapping(
++ IN gckMMU Mmu,
++ IN gctUINT32 PhysBase,
++ OUT gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gcsMMU_STLB_PTR head = gcvNULL, pre = gcvNULL;
++ gctUINT32 start = PhysBase & (~gcdMMU_PAGE_64K_MASK);
++ gctUINT32 end = (PhysBase + Size - 1) & (~gcdMMU_PAGE_64K_MASK);
++ gctUINT32 mStart = start >> gcdMMU_MTLB_SHIFT;
++ gctUINT32 mEnd = end >> gcdMMU_MTLB_SHIFT;
++ gctUINT32 sStart = (start & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
++ gctUINT32 sEnd = (end & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
++ gctBOOL ace = gckHARDWARE_IsFeatureAvailable(Mmu->hardware, gcvFEATURE_ACE);
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ while (mStart <= mEnd)
++ {
++ gcmkASSERT(mStart < gcdMMU_MTLB_ENTRY_NUM);
++ if (*(Mmu->mtlbLogical + mStart) == 0)
++ {
++ gcsMMU_STLB_PTR stlb;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 last = (mStart == mEnd) ? sEnd : (gcdMMU_STLB_64K_ENTRY_NUM - 1);
++ gctUINT32 mtlbEntry;
++
++ gcmkONERROR(gckOS_Allocate(Mmu->os, sizeof(struct _gcsMMU_STLB), &pointer));
++ stlb = pointer;
++
++ stlb->mtlbEntryNum = 0;
++ stlb->next = gcvNULL;
++ stlb->physical = gcvNULL;
++ stlb->logical = gcvNULL;
++ stlb->size = gcdMMU_STLB_64K_SIZE;
++ stlb->pageCount = 0;
++
++ if (pre == gcvNULL)
++ {
++ pre = head = stlb;
++ }
++ else
++ {
++ gcmkASSERT(pre->next == gcvNULL);
++ pre->next = stlb;
++ pre = stlb;
++ }
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(Mmu->os,
++ gcvFALSE,
++ &stlb->size,
++ &stlb->physical,
++ (gctPOINTER)&stlb->logical));
++
++ gcmkONERROR(gckOS_ZeroMemory(stlb->logical, stlb->size));
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Mmu->os,
++ stlb->logical,
++ &stlb->physBase));
++
++ if (stlb->physBase & (gcdMMU_STLB_64K_SIZE - 1))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ mtlbEntry = stlb->physBase
++ /* 64KB page size */
++ | (1 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0);
++
++ if (ace)
++ {
++ mtlbEntry = mtlbEntry
++ /* Secure */
++ | (1 << 4);
++ }
++
++ _WritePageEntry(Mmu->mtlbLogical + mStart, mtlbEntry);
++
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ mStart,
++ _ReadPageEntry(Mmu->mtlbLogical + mStart));
++#endif
++
++ stlb->mtlbIndex = mStart;
++ stlb->mtlbEntryNum = 1;
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): STLB: logical:%08x -> physical:%08x\n",
++ __FUNCTION__, __LINE__,
++ stlb->logical,
++ stlb->physBase);
++#endif
++
++ while (sStart <= last)
++ {
++ gcmkASSERT(!(start & gcdMMU_PAGE_64K_MASK));
++ _WritePageEntry(stlb->logical + sStart, _SetPage(start));
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert STLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ sStart,
++ _ReadPageEntry(stlb->logical + sStart));
++#endif
++ /* next page. */
++ start += gcdMMU_PAGE_64K_SIZE;
++ sStart++;
++ stlb->pageCount++;
++ }
++
++ sStart = 0;
++ ++mStart;
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ }
++
++ /* Insert the stlb into staticSTLB. */
++ if (Mmu->staticSTLB == gcvNULL)
++ {
++ Mmu->staticSTLB = head;
++ }
++ else
++ {
++ gcmkASSERT(pre == gcvNULL);
++ gcmkASSERT(pre->next == gcvNULL);
++ pre->next = Mmu->staticSTLB;
++ Mmu->staticSTLB = head;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Roll back. */
++ while (head != gcvNULL)
++ {
++ pre = head;
++ head = head->next;
++
++ if (pre->physical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ pre->physical,
++ pre->logical,
++ pre->size));
++ }
++
++ if (pre->mtlbEntryNum != 0)
++ {
++ gcmkASSERT(pre->mtlbEntryNum == 1);
++ _WritePageEntry(Mmu->mtlbLogical + pre->mtlbIndex, 0);
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, pre));
++ }
++
++ if (mutex)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ return status;
++}
++
++static gceSTATUS
++_FindDynamicSpace(
++ IN gckMMU Mmu,
++ OUT gcsDynamicSpaceNode_PTR *Array,
++ OUT gctINT * Size
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctPOINTER pointer = gcvNULL;
++ gcsDynamicSpaceNode_PTR array = gcvNULL;
++ gctINT size = 0;
++ gctINT i = 0, nodeStart = -1, nodeEntries = 0;
++
++ /* Allocate memory for the array. */
++ gcmkONERROR(gckOS_Allocate(Mmu->os,
++ gcmSIZEOF(*array) * (gcdMMU_MTLB_ENTRY_NUM / 2),
++ &pointer));
++
++ array = (gcsDynamicSpaceNode_PTR)pointer;
++
++ /* Loop all the entries. */
++ while (i < gcdMMU_MTLB_ENTRY_NUM)
++ {
++ if (!Mmu->mtlbLogical[i])
++ {
++ if (nodeStart < 0)
++ {
++ /* This is the first entry of the dynamic space. */
++ nodeStart = i;
++ nodeEntries = 1;
++ }
++ else
++ {
++ /* Other entries of the dynamic space. */
++ nodeEntries++;
++ }
++ }
++ else if (nodeStart >= 0)
++ {
++ /* Save the previous node. */
++ array[size].start = nodeStart;
++ array[size].entries = nodeEntries;
++ size++;
++
++ /* Reset the start. */
++ nodeStart = -1;
++ nodeEntries = 0;
++ }
++
++ i++;
++ }
++
++ /* Save the previous node. */
++ if (nodeStart >= 0)
++ {
++ array[size].start = nodeStart;
++ array[size].entries = nodeEntries;
++ size++;
++ }
++
++#if gcdMMU_TABLE_DUMP
++ for (i = 0; i < size; i++)
++ {
++ gckOS_Print("%s(%d): [%d]: start=%d, entries=%d.\n",
++ __FUNCTION__, __LINE__,
++ i,
++ array[i].start,
++ array[i].entries);
++ }
++#endif
++
++ *Array = array;
++ *Size = size;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (pointer != gcvNULL)
++ {
++ gckOS_Free(Mmu->os, pointer);
++ }
++
++ return status;
++}
++
++static gceSTATUS
++_SetupDynamicSpace(
++ IN gckMMU Mmu
++ )
++{
++ gceSTATUS status;
++ gcsDynamicSpaceNode_PTR nodeArray = gcvNULL;
++ gctINT i, nodeArraySize = 0;
++ gctUINT32 physical;
++ gctINT numEntries = 0;
++ gctUINT32_PTR map;
++ gctBOOL acquired = gcvFALSE;
++ gctUINT32 mtlbEntry;
++ gctBOOL ace = gckHARDWARE_IsFeatureAvailable(Mmu->hardware, gcvFEATURE_ACE);
++
++ /* Find all the dynamic address space. */
++ gcmkONERROR(_FindDynamicSpace(Mmu, &nodeArray, &nodeArraySize));
++
++ /* TODO: We only use the largest one for now. */
++ for (i = 0; i < nodeArraySize; i++)
++ {
++ if (nodeArray[i].entries > numEntries)
++ {
++ Mmu->dynamicMappingStart = nodeArray[i].start;
++ numEntries = nodeArray[i].entries;
++ }
++ }
++
++ gckOS_Free(Mmu->os, (gctPOINTER)nodeArray);
++
++ Mmu->pageTableSize = numEntries * 4096;
++
++ gcmkSAFECASTSIZET(Mmu->pageTableEntries, Mmu->pageTableSize / gcmSIZEOF(gctUINT32));
++
++ gcmkONERROR(gckOS_Allocate(Mmu->os,
++ Mmu->pageTableSize,
++ (void **)&Mmu->mapLogical));
++
++ /* Construct Slave TLB. */
++ gcmkONERROR(gckOS_AllocateContiguous(Mmu->os,
++ gcvFALSE,
++ &Mmu->pageTableSize,
++ &Mmu->pageTablePhysical,
++ (gctPOINTER)&Mmu->pageTableLogical));
++
++#if gcdUSE_MMU_EXCEPTION
++ gcmkONERROR(_FillPageTable(Mmu->pageTableLogical,
++ Mmu->pageTableEntries,
++ /* Enable exception */
++ 1 << 1));
++#else
++ /* Invalidate all entries. */
++ gcmkONERROR(gckOS_ZeroMemory(Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++#endif
++
++ /* Initilization. */
++ map = Mmu->mapLogical;
++ _WritePageEntry(map, (Mmu->pageTableEntries << 8) | gcvMMU_FREE);
++ _WritePageEntry(map + 1, ~0U);
++ Mmu->heapList = 0;
++ Mmu->freeNodes = gcvFALSE;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(Mmu->os,
++ Mmu->pageTableLogical,
++ &physical));
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Map to Master TLB. */
++ for (i = (gctINT)Mmu->dynamicMappingStart;
++ i < (gctINT)Mmu->dynamicMappingStart + numEntries;
++ i++)
++ {
++ mtlbEntry = physical
++ /* 4KB page size */
++ | (0 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0);
++
++ if (ace)
++ {
++ mtlbEntry = mtlbEntry
++ /* Secure */
++ | (1 << 4);
++ }
++
++ _WritePageEntry(Mmu->mtlbLogical + i, mtlbEntry);
++
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ i,
++ _ReadPageEntry(Mmu->mtlbLogical + i));
++#endif
++ physical += gcdMMU_STLB_4K_SIZE;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (Mmu->mapLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_Free(Mmu->os, (gctPOINTER) Mmu->mapLogical));
++
++
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ (gctPOINTER) Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** _Construct
++**
++** Construct a new gckMMU object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSIZE_T MmuSize
++** Number of bytes for the page table.
++**
++** OUTPUT:
++**
++** gckMMU * Mmu
++** Pointer to a variable that receives the gckMMU object pointer.
++*/
++gceSTATUS
++_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ )
++{
++ gckOS os;
++ gckHARDWARE hardware;
++ gceSTATUS status;
++ gckMMU mmu = gcvNULL;
++ gctUINT32_PTR map;
++ gctPOINTER pointer = gcvNULL;
++#if gcdPROCESS_ADDRESS_SPACE
++ gctUINT32 i;
++ gctUINT32 physical;
++#endif
++ gctUINT32 physBase;
++ gctUINT32 physSize;
++ gctUINT32 gpuAddress;
++
++ gcmkHEADER_ARG("Kernel=0x%x MmuSize=%lu", Kernel, MmuSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(MmuSize > 0);
++ gcmkVERIFY_ARGUMENT(Mmu != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Allocate memory for the gckMMU object. */
++ gcmkONERROR(gckOS_Allocate(os, sizeof(struct _gckMMU), &pointer));
++
++ mmu = pointer;
++
++ /* Initialize the gckMMU object. */
++ mmu->object.type = gcvOBJ_MMU;
++ mmu->os = os;
++ mmu->hardware = hardware;
++ mmu->pageTableMutex = gcvNULL;
++ mmu->pageTableLogical = gcvNULL;
++ mmu->mtlbLogical = gcvNULL;
++ mmu->staticSTLB = gcvNULL;
++ mmu->enabled = gcvFALSE;
++ mmu->mapLogical = gcvNULL;
++
++ /* Create the page table mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &mmu->pageTableMutex));
++
++ if (hardware->mmuVersion == 0)
++ {
++ mmu->pageTableSize = MmuSize;
++
++ /* Construct address space management table. */
++ gcmkONERROR(gckOS_Allocate(mmu->os,
++ mmu->pageTableSize,
++ &pointer));
++
++ mmu->mapLogical = pointer;
++
++ /* Construct page table read by GPU. */
++ gcmkONERROR(gckOS_AllocateContiguous(mmu->os,
++ gcvFALSE,
++ &mmu->pageTableSize,
++ &mmu->pageTablePhysical,
++ (gctPOINTER)&mmu->pageTableLogical));
++
++
++ /* Compute number of entries in page table. */
++ gcmkSAFECASTSIZET(mmu->pageTableEntries, mmu->pageTableSize / sizeof(gctUINT32));
++
++ /* Mark all pages as free. */
++ map = mmu->mapLogical;
++
++#if gcdMMU_CLEAR_VALUE
++ _FillPageTable(mmu->pageTableLogical, mmu->pageTableEntries, gcdMMU_CLEAR_VALUE);
++#endif
++
++ _WritePageEntry(map, (mmu->pageTableEntries << 8) | gcvMMU_FREE);
++ _WritePageEntry(map + 1, ~0U);
++ mmu->heapList = 0;
++ mmu->freeNodes = gcvFALSE;
++ }
++ else
++ {
++ /* Allocate the 4K mode MTLB table. */
++ mmu->mtlbSize = gcdMMU_MTLB_SIZE + 64;
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->mtlbSize,
++ &mmu->mtlbPhysical,
++ &pointer));
++
++ mmu->mtlbLogical = pointer;
++
++#if gcdPROCESS_ADDRESS_SPACE
++ _FillPageTable(pointer, mmu->mtlbSize / 4, gcdMMU_MTLB_EXCEPTION);
++
++ /* Allocate a array to store stlbs. */
++ gcmkONERROR(gckOS_Allocate(os, mmu->mtlbSize, &mmu->stlbs));
++
++ gckOS_ZeroMemory(mmu->stlbs, mmu->mtlbSize);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ gcmkONERROR(gckOS_AtomConstruct(os, &mmu->pageTableDirty[i]));
++ }
++
++ _SetupProcessAddressSpace(mmu);
++
++ /* Map kernel command buffer in MMU. */
++ for (i = 0; i < gcdCOMMAND_QUEUES; i++)
++ {
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ mmu->os,
++ Kernel->command->queues[i].logical,
++ &physical
++ ));
++
++ gcmkONERROR(gckMMU_FlatMapping(mmu, physical));
++ }
++#else
++ /* Invalid all the entries. */
++ gcmkONERROR(
++ gckOS_ZeroMemory(pointer, mmu->mtlbSize));
++
++ gcmkONERROR(
++ gckOS_QueryOption(mmu->os, "physBase", &physBase));
++
++ gcmkONERROR(
++ gckOS_QueryOption(mmu->os, "physSize", &physSize));
++
++ gcmkONERROR(
++ gckOS_CPUPhysicalToGPUPhysical(mmu->os, physBase, &gpuAddress));
++
++ /* Setup [physBase - physSize) flat mapping. */
++ gcmkONERROR(_FillFlatMapping(
++ mmu,
++ gpuAddress,
++ physSize
++ ));
++
++ gcmkONERROR(_SetupDynamicSpace(mmu));
++#endif
++ }
++
++ /* Return the gckMMU object pointer. */
++ *Mmu = mmu;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Mmu=0x%x", *Mmu);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (mmu != gcvNULL)
++ {
++ if (mmu->mapLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_Free(os, (gctPOINTER) mmu->mapLogical));
++
++
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(os,
++ mmu->pageTablePhysical,
++ (gctPOINTER) mmu->pageTableLogical,
++ mmu->pageTableSize));
++ }
++
++ if (mmu->mtlbLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(os,
++ mmu->mtlbPhysical,
++ (gctPOINTER) mmu->mtlbLogical,
++ mmu->mtlbSize));
++ }
++
++ if (mmu->pageTableMutex != gcvNULL)
++ {
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, mmu->pageTableMutex));
++ }
++
++ /* Mark the gckMMU object as unknown. */
++ mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the allocates memory. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, mmu));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** _Destroy
++**
++** Destroy a gckMMU object.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++_Destroy(
++ IN gckMMU Mmu
++ )
++{
++#if gcdPROCESS_ADDRESS_SPACE
++ gctUINT32 i;
++#endif
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ while (Mmu->staticSTLB != gcvNULL)
++ {
++ gcsMMU_STLB_PTR pre = Mmu->staticSTLB;
++ Mmu->staticSTLB = pre->next;
++
++ if (pre->physical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ pre->physical,
++ pre->logical,
++ pre->size));
++ }
++
++ if (pre->mtlbEntryNum != 0)
++ {
++ gcmkASSERT(pre->mtlbEntryNum == 1);
++ _WritePageEntry(Mmu->mtlbLogical + pre->mtlbIndex, 0);
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): clean MTLB[%d]\n",
++ __FUNCTION__, __LINE__,
++ pre->mtlbIndex);
++#endif
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, pre));
++ }
++
++ if (Mmu->hardware->mmuVersion != 0)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->mtlbPhysical,
++ (gctPOINTER) Mmu->mtlbLogical,
++ Mmu->mtlbSize));
++ }
++
++ /* Free address space management table. */
++ if (Mmu->mapLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_Free(Mmu->os, (gctPOINTER) Mmu->mapLogical));
++ }
++
++ if (Mmu->pageTableLogical != gcvNULL)
++ {
++ /* Free page table. */
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ (gctPOINTER) Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++ }
++
++ /* Delete the page table mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->pageTableMutex));
++
++#if gcdPROCESS_ADDRESS_SPACE
++ for (i = 0; i < Mmu->mtlbSize / 4; i++)
++ {
++ struct _gcsMMU_STLB *stlb = ((struct _gcsMMU_STLB **)Mmu->stlbs)[i];
++
++ if (stlb)
++ {
++ gcmkVERIFY_OK(gckOS_FreeContiguous(
++ Mmu->os,
++ stlb->physical,
++ stlb->logical,
++ stlb->size));
++
++ gcmkOS_SAFE_FREE(Mmu->os, stlb);
++ }
++ }
++
++ gcmkOS_SAFE_FREE(Mmu->os, Mmu->stlbs);
++#endif
++
++ /* Mark the gckMMU object as unknown. */
++ Mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckMMU object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, Mmu));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** _AdjstIndex
++**
++** Adjust the index from which we search for a usable node to make sure
++** index allocated is greater than Start.
++*/
++gceSTATUS
++_AdjustIndex(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 PageCount,
++ IN gctUINT32 Start,
++ OUT gctUINT32 * IndexAdjusted
++ )
++{
++ gceSTATUS status;
++ gctUINT32 index = Index;
++ gctUINT32_PTR map = Mmu->mapLogical;
++
++ gcmkHEADER();
++
++ for (; index < Mmu->pageTableEntries;)
++ {
++ gctUINT32 result = 0;
++ gctUINT32 nodeSize = 0;
++
++ if (index >= Start)
++ {
++ break;
++ }
++
++ switch (gcmENTRY_TYPE(map[index]))
++ {
++ case gcvMMU_SINGLE:
++ nodeSize = 1;
++ break;
++
++ case gcvMMU_FREE:
++ nodeSize = map[index] >> 8;
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ if (nodeSize > PageCount)
++ {
++ result = index + (nodeSize - PageCount);
++
++ if (result >= Start)
++ {
++ break;
++ }
++ }
++
++ switch (gcmENTRY_TYPE(map[index]))
++ {
++ case gcvMMU_SINGLE:
++ index = map[index] >> 8;
++ break;
++
++ case gcvMMU_FREE:
++ index = map[index + 1];
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ *IndexAdjusted = index;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ )
++{
++#if gcdSHARED_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG("Kernel=0x%08x", Kernel);
++
++ if (sharedPageTable == gcvNULL)
++ {
++ gcmkONERROR(
++ gckOS_Allocate(Kernel->os,
++ sizeof(struct _gcsSharedPageTable),
++ &pointer));
++ sharedPageTable = pointer;
++
++ gcmkONERROR(
++ gckOS_ZeroMemory(sharedPageTable,
++ sizeof(struct _gcsSharedPageTable)));
++
++ gcmkONERROR(_Construct(Kernel, MmuSize, &sharedPageTable->mmu));
++ }
++
++ *Mmu = sharedPageTable->mmu;
++
++ sharedPageTable->hardwares[sharedPageTable->reference] = Kernel->hardware;
++
++ sharedPageTable->reference++;
++
++ gcmkFOOTER_ARG("sharedPageTable->reference=%lu", sharedPageTable->reference);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (sharedPageTable)
++ {
++ if (sharedPageTable->mmu)
++ {
++ gcmkVERIFY_OK(gckMMU_Destroy(sharedPageTable->mmu));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, sharedPageTable));
++ }
++
++ gcmkFOOTER();
++ return status;
++#elif gcdMIRROR_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG("Kernel=0x%08x", Kernel);
++
++ if (mirrorPageTable == gcvNULL)
++ {
++ gcmkONERROR(
++ gckOS_Allocate(Kernel->os,
++ sizeof(struct _gcsMirrorPageTable),
++ &pointer));
++ mirrorPageTable = pointer;
++
++ gcmkONERROR(
++ gckOS_ZeroMemory(mirrorPageTable,
++ sizeof(struct _gcsMirrorPageTable)));
++
++ gcmkONERROR(
++ gckOS_CreateMutex(Kernel->os, &mirrorPageTableMutex));
++ }
++
++ gcmkONERROR(_Construct(Kernel, MmuSize, Mmu));
++
++ mirrorPageTable->mmus[mirrorPageTable->reference] = *Mmu;
++
++ mirrorPageTable->hardwares[mirrorPageTable->reference] = Kernel->hardware;
++
++ mirrorPageTable->reference++;
++
++ gcmkFOOTER_ARG("mirrorPageTable->reference=%lu", mirrorPageTable->reference);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mirrorPageTable && mirrorPageTable->reference == 0)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, mirrorPageTable));
++ }
++
++ gcmkFOOTER();
++ return status;
++#else
++ return _Construct(Kernel, MmuSize, Mmu);
++#endif
++}
++
++gceSTATUS
++gckMMU_Destroy(
++ IN gckMMU Mmu
++ )
++{
++#if gcdSHARED_PAGETABLE
++ gckOS os = Mmu->os;
++
++ sharedPageTable->reference--;
++
++ if (sharedPageTable->reference == 0)
++ {
++ if (sharedPageTable->mmu)
++ {
++ gcmkVERIFY_OK(_Destroy(Mmu));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, sharedPageTable));
++ }
++
++ return gcvSTATUS_OK;
++#elif gcdMIRROR_PAGETABLE
++ mirrorPageTable->reference--;
++
++ if (mirrorPageTable->reference == 0)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, mirrorPageTable));
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, mirrorPageTableMutex));
++ }
++
++ return _Destroy(Mmu);
++#else
++ return _Destroy(Mmu);
++#endif
++}
++
++/*******************************************************************************
++**
++** gckMMU_AllocatePages
++**
++** Allocate pages inside the page table.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** gctSIZE_T PageCount
++** Number of pages to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * PageTable
++** Pointer to a variable that receives the base address of the page
++** table.
++**
++** gctUINT32 * Address
++** Pointer to a variable that receives the hardware specific address.
++*/
++gceSTATUS
++_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gctUINT32 index = 0, previous = ~0U, left;
++ gctUINT32_PTR map;
++ gctBOOL gotIt;
++ gctUINT32 address;
++ gctUINT32 pageCount;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageCount=%lu", Mmu, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++
++ if (PageCount > Mmu->pageTableEntries)
++ {
++ /* Not enough pages avaiable. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ gcmkSAFECASTSIZET(pageCount, PageCount);
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ /* Cast pointer to page table. */
++ for (map = Mmu->mapLogical, gotIt = gcvFALSE; !gotIt;)
++ {
++ index = Mmu->heapList;
++
++ if ((Mmu->hardware->mmuVersion == 0) && (Type == gcvSURF_VERTEX))
++ {
++ gcmkONERROR(_AdjustIndex(
++ Mmu,
++ index,
++ pageCount,
++ gcdVERTEX_START / gcmSIZEOF(gctUINT32),
++ &index
++ ));
++ }
++
++ /* Walk the heap list. */
++ for (; !gotIt && (index < Mmu->pageTableEntries);)
++ {
++ /* Check the node type. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Single odes are valid if we only need 1 page. */
++ if (pageCount == 1)
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ /* Move to next node. */
++ previous = index;
++ index = _ReadPageEntry(&map[index]) >> 8;
++ }
++ break;
++
++ case gcvMMU_FREE:
++ /* Test if the node has enough space. */
++ if (pageCount <= (_ReadPageEntry(&map[index]) >> 8))
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ /* Move to next node. */
++ previous = index;
++ index = _ReadPageEntry(&map[index + 1]);
++ }
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ /* Test if we are out of memory. */
++ if (index >= Mmu->pageTableEntries)
++ {
++ if (Mmu->freeNodes)
++ {
++ /* Time to move out the trash! */
++ gcmkONERROR(_Collect(Mmu));
++ }
++ else
++ {
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++ }
++
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Unlink single node from free list. */
++ gcmkONERROR(
++ _Link(Mmu, previous, _ReadPageEntry(&map[index]) >> 8));
++ break;
++
++ case gcvMMU_FREE:
++ /* Check how many pages will be left. */
++ left = (_ReadPageEntry(&map[index]) >> 8) - pageCount;
++ switch (left)
++ {
++ case 0:
++ /* The entire node is consumed, just unlink it. */
++ gcmkONERROR(
++ _Link(Mmu, previous, _ReadPageEntry(&map[index + 1])));
++ break;
++
++ case 1:
++ /* One page will remain. Convert the node to a single node and
++ ** advance the index. */
++ _WritePageEntry(&map[index], (_ReadPageEntry(&map[index + 1]) << 8) | gcvMMU_SINGLE);
++ index ++;
++ break;
++
++ default:
++ /* Enough pages remain for a new node. However, we will just adjust
++ ** the size of the current node and advance the index. */
++ _WritePageEntry(&map[index], (left << 8) | gcvMMU_FREE);
++ index += left;
++ break;
++ }
++ break;
++ }
++
++ /* Mark node as used. */
++ gcmkONERROR(_FillPageTable(&map[index], pageCount, gcvMMU_USED));
++
++ /* Return pointer to page table. */
++ *PageTable = &Mmu->pageTableLogical[index];
++
++ /* Build virtual address. */
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(
++ gckHARDWARE_BuildVirtualAddress(Mmu->hardware, index, 0, &address));
++ }
++ else
++ {
++ gctUINT32 masterOffset = index / gcdMMU_STLB_4K_ENTRY_NUM
++ + Mmu->dynamicMappingStart;
++ gctUINT32 slaveOffset = index % gcdMMU_STLB_4K_ENTRY_NUM;
++
++ address = (masterOffset << gcdMMU_MTLB_SHIFT)
++ | (slaveOffset << gcdMMU_STLB_4K_SHIFT);
++ }
++
++ if (Address != gcvNULL)
++ {
++ *Address = address;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*PageTable=0x%x *Address=%08x",
++ *PageTable, gcmOPT_VALUE(Address));
++ return gcvSTATUS_OK;
++
++OnError:
++
++ if (mutex)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckMMU_FreePages
++**
++** Free pages inside the page table.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** gctPOINTER PageTable
++** Base address of the page table to free.
++**
++** gctSIZE_T PageCount
++** Number of pages to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++ gctUINT32_PTR node;
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctUINT32 pageCount;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=%lu",
++ Mmu, PageTable, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ gcmkSAFECASTSIZET(pageCount, PageCount);
++
++ /* Get the node by index. */
++ node = Mmu->mapLogical + ((gctUINT32_PTR)PageTable - Mmu->pageTableLogical);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++#if gcdMMU_CLEAR_VALUE
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ _FillPageTable(PageTable, pageCount, gcdMMU_CLEAR_VALUE);
++ }
++#endif
++
++ if (PageCount == 1)
++ {
++ /* Single page node. */
++ _WritePageEntry(node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ _WritePageEntry(PageTable, (1 << 1));
++#endif
++ }
++ else
++ {
++ /* Mark the node as free. */
++ _WritePageEntry(node, (pageCount << 8) | gcvMMU_FREE);
++ _WritePageEntry(node + 1, ~0U);
++
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ gcmkVERIFY_OK(_FillPageTable(PageTable, pageCount, 1 << 1));
++#endif
++ }
++
++ /* We have free nodes. */
++ Mmu->freeNodes = gcvTRUE;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ return gckMMU_AllocatePagesEx(
++ Mmu, PageCount, gcvSURF_TYPE_UNKNOWN, PageTable, Address);
++}
++
++gceSTATUS
++gckMMU_AllocatePagesEx(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pageTable;
++ gctUINT32 address;
++ gctINT i;
++ gckMMU mmu;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL allocated = gcvFALSE;
++
++ gckOS_AcquireMutex(Mmu->os, mirrorPageTableMutex, gcvINFINITE);
++ acquired = gcvTRUE;
++
++ /* Allocate page table for current MMU. */
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ if (Mmu == mirrorPageTable->mmus[i])
++ {
++ gcmkONERROR(_AllocatePages(Mmu, PageCount, Type, PageTable, Address));
++ allocated = gcvTRUE;
++ }
++ }
++
++ /* Allocate page table for other MMUs. */
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (Mmu != mmu)
++ {
++ gcmkONERROR(_AllocatePages(mmu, PageCount, Type, &pageTable, &address));
++ gcmkASSERT(address == *Address);
++ }
++ }
++
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++ acquired = gcvFALSE;
++
++ return gcvSTATUS_OK;
++OnError:
++
++ if (allocated)
++ {
++ /* Page tables for multiple GPU always keep the same. So it is impossible
++ * the fist one allocates successfully but others fail.
++ */
++ gcmkASSERT(0);
++ }
++
++ if (acquired)
++ {
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++ }
++
++ return status;
++#else
++ return _AllocatePages(Mmu, PageCount, Type, PageTable, Address);
++#endif
++}
++
++gceSTATUS
++gckMMU_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gctINT i;
++ gctUINT32 offset;
++ gckMMU mmu;
++
++ gckOS_AcquireMutex(Mmu->os, mirrorPageTableMutex, gcvINFINITE);
++
++ gcmkVERIFY_OK(_FreePages(Mmu, PageTable, PageCount));
++
++ offset = (gctUINT32)PageTable - (gctUINT32)Mmu->pageTableLogical;
++
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (mmu != Mmu)
++ {
++ gcmkVERIFY_OK(_FreePages(mmu, mmu->pageTableLogical + offset/4, PageCount));
++ }
++ }
++
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++
++ return gcvSTATUS_OK;
++#else
++ return _FreePages(Mmu, PageTable, PageCount);
++#endif
++}
++
++gceSTATUS
++gckMMU_SetPage(
++ IN gckMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gctUINT32_PTR pageEntry;
++ gctINT i;
++ gckMMU mmu;
++ gctUINT32 offset = (gctUINT32)PageEntry - (gctUINT32)Mmu->pageTableLogical;
++#endif
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageEntry != gcvNULL);
++ gcmkVERIFY_ARGUMENT(!(PageAddress & 0xFFF));
++
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ _WritePageEntry(PageEntry, PageAddress);
++ }
++ else
++ {
++ _WritePageEntry(PageEntry, _SetPage(PageAddress));
++ }
++
++#if gcdMIRROR_PAGETABLE
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (mmu != Mmu)
++ {
++ pageEntry = mmu->pageTableLogical + offset / 4;
++
++ if (mmu->hardware->mmuVersion == 0)
++ {
++ _WritePageEntry(pageEntry, PageAddress);
++ }
++ else
++ {
++ _WritePageEntry(pageEntry, _SetPage(PageAddress));
++ }
++ }
++
++ }
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++gceSTATUS
++gckMMU_GetPageEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address,
++ IN gctUINT32_PTR *PageTable
++ )
++{
++ gceSTATUS status;
++ struct _gcsMMU_STLB *stlb;
++ struct _gcsMMU_STLB **stlbs = Mmu->stlbs;
++ gctUINT32 offset = _MtlbOffset(Address);
++ gctUINT32 mtlbEntry;
++ gctBOOL ace = gckHARDWARE_IsFeatureAvailable(Mmu->hardware, gcvFEATURE_ACE);
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT((Address & 0xFFF) == 0);
++
++ stlb = stlbs[offset];
++
++ if (stlb == gcvNULL)
++ {
++ gcmkONERROR(_AllocateStlb(Mmu->os, &stlb));
++
++ mtlbEntry = stlb->physBase
++ | gcdMMU_MTLB_4K_PAGE
++ | gcdMMU_MTLB_PRESENT
++ ;
++
++ if (ace)
++ {
++ mtlbEntry = mtlbEntry
++ /* Secure */
++ | (1 << 4);
++ }
++
++ /* Insert Slave TLB address to Master TLB entry.*/
++ _WritePageEntry(Mmu->mtlbLogical + offset, mtlbEntry);
++
++ /* Record stlb. */
++ stlbs[offset] = stlb;
++ }
++
++ *PageTable = &stlb->logical[_StlbOffset(Address)];
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++_CheckMap(
++ IN gckMMU Mmu
++ )
++{
++ gceSTATUS status;
++ gctUINT32_PTR map = Mmu->mapLogical;
++ gctUINT32 index;
++
++ for (index = Mmu->heapList; index < Mmu->pageTableEntries;)
++ {
++ /* Check the node type. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Move to next node. */
++ index = _ReadPageEntry(&map[index]) >> 8;
++ break;
++
++ case gcvMMU_FREE:
++ /* Move to next node. */
++ index = _ReadPageEntry(&map[index + 1]);
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index [%u] = %x!", index, map[index]);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++gceSTATUS
++gckMMU_FlatMapping(
++ IN gckMMU Mmu,
++ IN gctUINT32 Physical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 index = _AddressToIndex(Mmu, Physical);
++ gctUINT32 i;
++ gctBOOL gotIt = gcvFALSE;
++ gctUINT32_PTR map = Mmu->mapLogical;
++ gctUINT32 previous = ~0U;
++ gctUINT32_PTR pageTable;
++
++ gckMMU_GetPageEntry(Mmu, Physical, &pageTable);
++
++ _WritePageEntry(pageTable, _SetPage(Physical));
++
++ if (map)
++ {
++ /* Find node which contains index. */
++ for (i = 0; !gotIt && (i < Mmu->pageTableEntries);)
++ {
++ gctUINT32 numPages;
++
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[i])))
++ {
++ case gcvMMU_SINGLE:
++ if (i == index)
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ previous = i;
++ i = _ReadPageEntry(&map[i]) >> 8;
++ }
++ break;
++
++ case gcvMMU_FREE:
++ numPages = _ReadPageEntry(&map[i]) >> 8;
++ if (index >= i && index < i + numPages)
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ previous = i;
++ i = _ReadPageEntry(&map[i + 1]);
++ }
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[i])))
++ {
++ case gcvMMU_SINGLE:
++ /* Unlink single node from free list. */
++ gcmkONERROR(
++ _Link(Mmu, previous, _ReadPageEntry(&map[i]) >> 8));
++ break;
++
++ case gcvMMU_FREE:
++ /* Split the node. */
++ {
++ gctUINT32 start;
++ gctUINT32 next = _ReadPageEntry(&map[i+1]);
++ gctUINT32 total = _ReadPageEntry(&map[i]) >> 8;
++ gctUINT32 countLeft = index - i;
++ gctUINT32 countRight = total - countLeft - 1;
++
++ if (countLeft)
++ {
++ start = i;
++ _AddFree(Mmu, previous, start, countLeft);
++ previous = start;
++ }
++
++ if (countRight)
++ {
++ start = index + 1;
++ _AddFree(Mmu, previous, start, countRight);
++ previous = start;
++ }
++
++ _Link(Mmu, previous, next);
++ }
++ break;
++ }
++ }
++
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Roll back. */
++ return status;
++}
++
++
++
++gceSTATUS
++gckMMU_FreePagesEx(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address,
++ IN gctSIZE_T PageCount
++ )
++{
++ gctUINT32_PTR node;
++ gceSTATUS status;
++
++#if gcdUSE_MMU_EXCEPTION
++ gctUINT32 i;
++ struct _gcsMMU_STLB *stlb;
++ struct _gcsMMU_STLB **stlbs = Mmu->stlbs;
++#endif
++
++ gcmkHEADER_ARG("Mmu=0x%x Address=0x%x PageCount=%lu",
++ Mmu, Address, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ /* Get the node by index. */
++ node = Mmu->mapLogical + _AddressToIndex(Mmu, Address);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++
++ if (PageCount == 1)
++ {
++ /* Single page node. */
++ _WritePageEntry(node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
++ }
++ else
++ {
++ /* Mark the node as free. */
++ _WritePageEntry(node, (PageCount << 8) | gcvMMU_FREE);
++ _WritePageEntry(node + 1, ~0U);
++ }
++
++ /* We have free nodes. */
++ Mmu->freeNodes = gcvTRUE;
++
++#if gcdUSE_MMU_EXCEPTION
++ for (i = 0; i < PageCount; i++)
++ {
++ /* Get */
++ stlb = stlbs[_MtlbOffset(Address)];
++
++ /* Enable exception */
++ stlb->logical[_StlbOffset(Address)] = gcdMMU_STLB_EXCEPTION;
++ }
++#endif
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++gckMMU_Flush(
++ IN gckMMU Mmu,
++ IN gceSURF_TYPE Type
++ )
++{
++ gckHARDWARE hardware;
++ gctUINT32 mask;
++ gctINT i;
++
++ if (Type == gcvSURF_VERTEX || Type == gcvSURF_INDEX)
++ {
++ mask = gcvPAGE_TABLE_DIRTY_BIT_FE;
++ }
++ else
++ {
++ mask = gcvPAGE_TABLE_DIRTY_BIT_OTHER;
++ }
++
++#if gcdPROCESS_ADDRESS_SPACE
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ gcmkVERIFY_OK(
++ gckOS_AtomSetMask(Mmu->pageTableDirty[i], mask));
++ }
++#else
++#if gcdSHARED_PAGETABLE
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ hardware = sharedPageTable->hardwares[i];
++ if (hardware)
++ {
++ gcmkVERIFY_OK(gckOS_AtomSetMask(hardware->pageTableDirty, mask));
++ }
++ }
++#elif gcdMIRROR_PAGETABLE
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ hardware = mirrorPageTable->hardwares[i];
++
++ /* Notify cores who use this page table. */
++ gcmkVERIFY_OK(
++ gckOS_AtomSetMask(hardware->pageTableDirty, mask));
++ }
++#else
++ hardware = Mmu->hardware;
++ gcmkVERIFY_OK(
++ gckOS_AtomSetMask(hardware->pageTableDirty, mask));
++#endif
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckMMU_DumpPageTableEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address
++ )
++{
++#if gcdPROCESS_ADDRESS_SPACE
++ gcsMMU_STLB_PTR *stlbs = Mmu->stlbs;
++ gcsMMU_STLB_PTR stlbDesc = stlbs[_MtlbOffset(Address)];
++#else
++ gctUINT32_PTR pageTable;
++ gctUINT32 index;
++ gctUINT32 mtlb, stlb;
++#endif
++
++ gcmkHEADER_ARG("Mmu=0x%08X Address=0x%08X", Mmu, Address);
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkASSERT(Mmu->hardware->mmuVersion > 0);
++
++#if gcdPROCESS_ADDRESS_SPACE
++ if (stlbDesc)
++ {
++ gcmkPRINT(" STLB entry = 0x%08X",
++ _ReadPageEntry(&stlbDesc->logical[_StlbOffset(Address)]));
++ }
++ else
++ {
++ gcmkPRINT(" MTLB entry is empty.");
++ }
++#else
++ mtlb = (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++
++ if (mtlb >= Mmu->dynamicMappingStart)
++ {
++ stlb = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++
++ pageTable = Mmu->pageTableLogical;
++
++ index = (mtlb - Mmu->dynamicMappingStart)
++ * gcdMMU_STLB_4K_ENTRY_NUM
++ + stlb;
++
++ gcmkPRINT(" Page table entry = 0x%08X", _ReadPageEntry(pageTable + index));
++ }
++ else
++ {
++ gcsMMU_STLB_PTR stlbObj = Mmu->staticSTLB;
++ gctUINT32 entry = Mmu->mtlbLogical[mtlb];
++
++ stlb = (Address & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
++
++ entry &= 0xFFFFFFF0;
++
++ while (stlbObj)
++ {
++
++ if (entry == stlbObj->physBase)
++ {
++ gcmkPRINT(" Page table entry = 0x%08X", stlbObj->logical[stlb]);
++ break;
++ }
++
++ stlbObj = stlbObj->next;
++ }
++ }
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************
++****************************** T E S T C O D E ******************************
++******************************************************************************/
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu_vg.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu_vg.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_mmu_vg.c 2015-05-01 14:57:59.579427001 -0500
+@@ -0,0 +1,522 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_MMU
++
++/*******************************************************************************
++**
++** gckVGMMU_Construct
++**
++** Construct a new gckVGMMU object.
++**
++** INPUT:
++**
++** gckVGKERNEL Kernel
++** Pointer to an gckVGKERNEL object.
++**
++** gctSIZE_T MmuSize
++** Number of bytes for the page table.
++**
++** OUTPUT:
++**
++** gckVGMMU * Mmu
++** Pointer to a variable that receives the gckVGMMU object pointer.
++*/
++gceSTATUS gckVGMMU_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT32 MmuSize,
++ OUT gckVGMMU * Mmu
++ )
++{
++ gckOS os;
++ gckVGHARDWARE hardware;
++ gceSTATUS status;
++ gckVGMMU mmu;
++ gctUINT32 * pageTable;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x MmuSize=0x%x Mmu=0x%x", Kernel, MmuSize, Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(MmuSize > 0);
++ gcmkVERIFY_ARGUMENT(Mmu != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckVGHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Allocate memory for the gckVGMMU object. */
++ status = gckOS_Allocate(os, sizeof(struct _gckVGMMU), (gctPOINTER *) &mmu);
++
++ if (status < 0)
++ {
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not allocate gckVGMMU object.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Initialize the gckVGMMU object. */
++ mmu->object.type = gcvOBJ_MMU;
++ mmu->os = os;
++ mmu->hardware = hardware;
++
++ /* Create the mutex. */
++ status = gckOS_CreateMutex(os, &mmu->mutex);
++
++ if (status < 0)
++ {
++ /* Roll back. */
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ gcmkFOOTER();
++ /* Error. */
++ return status;
++ }
++
++ /* Allocate the page table. */
++ mmu->pageTableSize = (gctUINT32)MmuSize;
++ status = gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->pageTableSize,
++ &mmu->pageTablePhysical,
++ &mmu->pageTableLogical);
++
++ if (status < 0)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex));
++
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not allocate page table.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Compute number of entries in page table. */
++ mmu->entryCount = (gctUINT32)mmu->pageTableSize / sizeof(gctUINT32);
++ mmu->entry = 0;
++
++ /* Mark the entire page table as available. */
++ pageTable = (gctUINT32 *) mmu->pageTableLogical;
++ for (i = 0; i < mmu->entryCount; i++)
++ {
++ pageTable[i] = (gctUINT32)~0;
++ }
++
++ /* Set page table address. */
++ status = gckVGHARDWARE_SetMMU(hardware, mmu->pageTableLogical);
++
++ if (status < 0)
++ {
++ /* Free the page table. */
++ gcmkVERIFY_OK(gckOS_FreeContiguous(mmu->os,
++ mmu->pageTablePhysical,
++ mmu->pageTableLogical,
++ mmu->pageTableSize));
++
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex));
++
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not program page table.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Return the gckVGMMU object pointer. */
++ *Mmu = mmu;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): %u entries at %p.(0x%08X)\n",
++ __FUNCTION__, __LINE__,
++ mmu->entryCount,
++ mmu->pageTableLogical,
++ mmu->pageTablePhysical
++ );
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_Destroy
++**
++** Destroy a nAQMMU object.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGMMU_Destroy(
++ IN gckVGMMU Mmu
++ )
++{
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ /* Free the page table. */
++ gcmkVERIFY_OK(gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->mutex));
++
++ /* Mark the gckVGMMU object as unknown. */
++ Mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGMMU object. */
++ gcmkVERIFY_OK(gckOS_Free(Mmu->os, Mmu));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_AllocatePages
++**
++** Allocate pages inside the page table.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** gctSIZE_T PageCount
++** Number of pages to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * PageTable
++** Pointer to a variable that receives the base address of the page
++** table.
++**
++** gctUINT32 * Address
++** Pointer to a variable that receives the hardware specific address.
++*/
++gceSTATUS gckVGMMU_AllocatePages(
++ IN gckVGMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctUINT32 tail, index, i;
++ gctUINT32 * table;
++ gctBOOL allocated = gcvFALSE;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageCount=0x%x PageTable=0x%x Address=0x%x",
++ Mmu, PageCount, PageTable, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ if (PageCount > Mmu->entryCount)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): page table too small for %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ gcmkFOOTER_NO();
++ /* Not enough pages avaiable. */
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ /* Grab the mutex. */
++ status = gckOS_AcquireMutex(Mmu->os, Mmu->mutex, gcvINFINITE);
++
++ if (status < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): could not acquire mutex.\n"
++ ,__FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ /* Error. */
++ return status;
++ }
++
++ /* Compute the tail for this allocation. */
++ tail = Mmu->entryCount - (gctUINT32)PageCount;
++
++ /* Walk all entries until we find enough slots. */
++ for (index = Mmu->entry; index <= tail;)
++ {
++ /* Access page table. */
++ table = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ /* See if all slots are available. */
++ for (i = 0; i < PageCount; i++, table++)
++ {
++ if (*table != ~0)
++ {
++ /* Start from next slot. */
++ index += i + 1;
++ break;
++ }
++ }
++
++ if (i == PageCount)
++ {
++ /* Bail out if we have enough page entries. */
++ allocated = gcvTRUE;
++ break;
++ }
++ }
++
++ if (!allocated)
++ {
++ if (status >= 0)
++ {
++ /* Walk all entries until we find enough slots. */
++ for (index = 0; index <= tail;)
++ {
++ /* Access page table. */
++ table = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ /* See if all slots are available. */
++ for (i = 0; i < PageCount; i++, table++)
++ {
++ if (*table != ~0)
++ {
++ /* Start from next slot. */
++ index += i + 1;
++ break;
++ }
++ }
++
++ if (i == PageCount)
++ {
++ /* Bail out if we have enough page entries. */
++ allocated = gcvTRUE;
++ break;
++ }
++ }
++ }
++ }
++
++ if (!allocated && (status >= 0))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): not enough free pages for %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ /* Not enough empty slots available. */
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ if (status >= 0)
++ {
++ /* Build virtual address. */
++ status = gckVGHARDWARE_BuildVirtualAddress(Mmu->hardware,
++ index,
++ 0,
++ Address);
++
++ if (status >= 0)
++ {
++ /* Update current entry into page table. */
++ Mmu->entry = index + (gctUINT32)PageCount;
++
++ /* Return pointer to page table. */
++ *PageTable = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): allocated %u pages at index %u (0x%08X) @ %p.\n",
++ __FUNCTION__, __LINE__,
++ PageCount,
++ index,
++ *Address,
++ *PageTable
++ );
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->mutex));
++ gcmkFOOTER();
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_FreePages
++**
++** Free pages inside the page table.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** gctPOINTER PageTable
++** Base address of the page table to free.
++**
++** gctSIZE_T PageCount
++** Number of pages to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGMMU_FreePages(
++ IN gckVGMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++ gctUINT32 * table;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=0x%x",
++ Mmu, PageTable, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): freeing %u pages at index %u @ %p.\n",
++ __FUNCTION__, __LINE__,
++ PageCount,
++ ((gctUINT32 *) PageTable - (gctUINT32 *) Mmu->pageTableLogical),
++ PageTable
++ );
++
++ /* Convert pointer. */
++ table = (gctUINT32 *) PageTable;
++
++ /* Mark the page table entries as available. */
++ while (PageCount-- > 0)
++ {
++ *table++ = (gctUINT32)~0;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGMMU_SetPage(
++ IN gckVGMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ )
++{
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageEntry != gcvNULL);
++ gcmkVERIFY_ARGUMENT(!(PageAddress & 0xFFF));
++
++ *PageEntry = PageAddress;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGMMU_Flush(
++ IN gckVGMMU Mmu
++ )
++{
++ gckVGHARDWARE hardware;
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ hardware = Mmu->hardware;
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_power.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_power.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_power.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_power.c 2015-05-01 14:57:59.579427001 -0500
+@@ -0,0 +1,347 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_POWER
++
++/******************************************************************************\
++************************ Dynamic Voltage Frequency Setting *********************
++\******************************************************************************/
++#if gcdDVFS
++static gctUINT32
++_GetLoadHistory(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Select,
++ IN gctUINT32 Index
++)
++{
++ return Dvfs->loads[Index];
++}
++
++static void
++_IncreaseScale(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Load,
++ OUT gctUINT8 *Scale
++ )
++{
++ if (Dvfs->currentScale < 32)
++ {
++ *Scale = Dvfs->currentScale + 8;
++ }
++ else
++ {
++ *Scale = Dvfs->currentScale + 8;
++ *Scale = gcmMIN(64, *Scale);
++ }
++}
++
++static void
++_RecordFrequencyHistory(
++ gckDVFS Dvfs,
++ gctUINT32 Frequency
++ )
++{
++ gctUINT32 i = 0;
++
++ struct _FrequencyHistory *history = Dvfs->frequencyHistory;
++
++ for (i = 0; i < 16; i++)
++ {
++ if (history->frequency == Frequency)
++ {
++ break;
++ }
++
++ if (history->frequency == 0)
++ {
++ history->frequency = Frequency;
++ break;
++ }
++
++ history++;
++ }
++
++ if (i < 16)
++ {
++ history->count++;
++ }
++}
++
++static gctUINT32
++_GetFrequencyHistory(
++ gckDVFS Dvfs,
++ gctUINT32 Frequency
++ )
++{
++ gctUINT32 i = 0;
++
++ struct _FrequencyHistory * history = Dvfs->frequencyHistory;
++
++ for (i = 0; i < 16; i++)
++ {
++ if (history->frequency == Frequency)
++ {
++ break;
++ }
++
++ history++;
++ }
++
++ if (i < 16)
++ {
++ return history->count;
++ }
++
++ return 0;
++}
++
++static void
++_Policy(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Load,
++ OUT gctUINT8 *Scale
++ )
++{
++ gctUINT8 load[4], nextLoad;
++ gctUINT8 scale;
++
++ /* Last 4 history. */
++ load[0] = (Load & 0xFF);
++ load[1] = (Load & 0xFF00) >> 8;
++ load[2] = (Load & 0xFF0000) >> 16;
++ load[3] = (Load & 0xFF000000) >> 24;
++
++ /* Determine target scale. */
++ if (load[0] > 54)
++ {
++ _IncreaseScale(Dvfs, Load, &scale);
++ }
++ else
++ {
++ nextLoad = (load[0] + load[1] + load[2] + load[3])/4;
++
++ scale = Dvfs->currentScale * (nextLoad) / 54;
++
++ scale = gcmMAX(1, scale);
++ scale = gcmMIN(64, scale);
++ }
++
++ Dvfs->totalConfig++;
++
++ Dvfs->loads[(load[0]-1)/8]++;
++
++ *Scale = scale;
++
++
++ if (Dvfs->totalConfig % 100 == 0)
++ {
++ gcmkPRINT("=======================================================");
++ gcmkPRINT("GPU Load: %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d",
++ 8, 16, 24, 32, 40, 48, 56, 64);
++ gcmkPRINT(" %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d",
++ _GetLoadHistory(Dvfs,2, 0),
++ _GetLoadHistory(Dvfs,2, 1),
++ _GetLoadHistory(Dvfs,2, 2),
++ _GetLoadHistory(Dvfs,2, 3),
++ _GetLoadHistory(Dvfs,2, 4),
++ _GetLoadHistory(Dvfs,2, 5),
++ _GetLoadHistory(Dvfs,2, 6),
++ _GetLoadHistory(Dvfs,2, 7)
++ );
++
++ gcmkPRINT("Frequency(MHz) %-8d %-8d %-8d %-8d %-8d",
++ 58, 120, 240, 360, 480);
++ gcmkPRINT(" %-8d %-8d %-8d %-8d %-8d",
++ _GetFrequencyHistory(Dvfs, 58),
++ _GetFrequencyHistory(Dvfs,120),
++ _GetFrequencyHistory(Dvfs,240),
++ _GetFrequencyHistory(Dvfs,360),
++ _GetFrequencyHistory(Dvfs,480)
++ );
++ }
++}
++
++static void
++_TimerFunction(
++ gctPOINTER Data
++ )
++{
++ gceSTATUS status;
++ gckDVFS dvfs = (gckDVFS) Data;
++ gckHARDWARE hardware = dvfs->hardware;
++ gctUINT32 value;
++ gctUINT32 frequency;
++ gctUINT8 scale;
++ gctUINT32 t1, t2, consumed;
++
++ gckOS_GetTicks(&t1);
++
++ gcmkONERROR(gckHARDWARE_QueryLoad(hardware, &value));
++
++ /* determine target sacle. */
++ _Policy(dvfs, value, &scale);
++
++ /* Set frequency and voltage. */
++ gcmkONERROR(gckOS_SetGPUFrequency(hardware->os, hardware->core, scale));
++
++ /* Query real frequency. */
++ gcmkONERROR(
++ gckOS_QueryGPUFrequency(hardware->os,
++ hardware->core,
++ &frequency,
++ &dvfs->currentScale));
++
++ _RecordFrequencyHistory(dvfs, frequency);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER,
++ "Current frequency = %d",
++ frequency);
++
++ /* Set period. */
++ gcmkONERROR(gckHARDWARE_SetDVFSPeroid(hardware, frequency));
++
++OnError:
++ /* Determine next querying time. */
++ gckOS_GetTicks(&t2);
++
++ consumed = gcmMIN(((long)t2 - (long)t1), 5);
++
++ if (dvfs->stop == gcvFALSE)
++ {
++ gcmkVERIFY_OK(gckOS_StartTimer(hardware->os,
++ dvfs->timer,
++ dvfs->pollingTime - consumed));
++ }
++
++ return;
++}
++
++gceSTATUS
++gckDVFS_Construct(
++ IN gckHARDWARE Hardware,
++ OUT gckDVFS * Dvfs
++ )
++{
++ gceSTATUS status;
++ gctPOINTER pointer;
++ gckDVFS dvfs = gcvNULL;
++ gckOS os = Hardware->os;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ /* Allocate a gckDVFS manager. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckDVFS), &pointer));
++
++ gckOS_ZeroMemory(pointer, gcmSIZEOF(struct _gckDVFS));
++
++ dvfs = pointer;
++
++ /* Initialization. */
++ dvfs->hardware = Hardware;
++ dvfs->pollingTime = gcdDVFS_POLLING_TIME;
++ dvfs->os = Hardware->os;
++ dvfs->currentScale = 64;
++
++ /* Create a polling timer. */
++ gcmkONERROR(gckOS_CreateTimer(os, _TimerFunction, pointer, &dvfs->timer));
++
++ /* Initialize frequency and voltage adjustment helper. */
++ gcmkONERROR(gckOS_PrepareGPUFrequency(os, Hardware->core));
++
++ /* Return result. */
++ *Dvfs = dvfs;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (dvfs)
++ {
++ if (dvfs->timer)
++ {
++ gcmkVERIFY_OK(gckOS_DestroyTimer(os, dvfs->timer));
++ }
++
++ gcmkOS_SAFE_FREE(os, dvfs);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckDVFS_Destroy(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ /* Deinitialize helper fuunction. */
++ gcmkVERIFY_OK(gckOS_FinishGPUFrequency(Dvfs->os, Dvfs->hardware->core));
++
++ /* DestroyTimer. */
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Dvfs->os, Dvfs->timer));
++
++ gcmkOS_SAFE_FREE(Dvfs->os, Dvfs);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckDVFS_Start(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ gckHARDWARE_InitDVFS(Dvfs->hardware);
++
++ Dvfs->stop = gcvFALSE;
++
++ gckOS_StartTimer(Dvfs->os, Dvfs->timer, Dvfs->pollingTime);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckDVFS_Stop(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ Dvfs->stop = gcvTRUE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_precomp.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_precomp.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_precomp.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_precomp.h 2015-05-01 14:57:59.579427001 -0500
+@@ -0,0 +1,29 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_precomp_h_
++#define __gc_hal_kernel_precomp_h_
++
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel.h"
++
++#endif /* __gc_hal_kernel_precomp_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_security.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_security.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_security.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_security.c 2015-05-01 14:57:59.579427001 -0500
+@@ -0,0 +1,239 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++
++
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++#if gcdSECURITY
++
++/*
++** Open a security service channel.
++*/
++gceSTATUS
++gckKERNEL_SecurityOpen(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GPU,
++ OUT gctUINT32 *Channel
++ )
++{
++ gceSTATUS status;
++
++ gcmkONERROR(gckOS_OpenSecurityChannel(Kernel->os, Kernel->core, Channel));
++ gcmkONERROR(gckOS_InitSecurityChannel(*Channel));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++/*
++** Close a security service channel
++*/
++gceSTATUS
++gckKERNEL_SecurityClose(
++ IN gctUINT32 Channel
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*
++** Security service interface.
++*/
++gceSTATUS
++gckKERNEL_SecurityCallService(
++ IN gctUINT32 Channel,
++ IN OUT gcsTA_INTERFACE * Interface
++)
++{
++ gceSTATUS status;
++ gcmkHEADER();
++
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ gckOS_CallSecurityService(Channel, Interface);
++
++ status = Interface->result;
++
++ gcmkONERROR(status);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityStartCommand(
++ IN gckKERNEL Kernel
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_START_COMMAND;
++ iface.u.StartCommand.gpu = Kernel->core;
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityAllocateSecurityMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Bytes,
++ OUT gctUINT32 * Handle
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_ALLOCATE_SECRUE_MEMORY;
++ iface.u.AllocateSecurityMemory.bytes = Bytes;
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ *Handle = iface.u.AllocateSecurityMemory.memory_handle;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityExecute(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Buffer,
++ IN gctUINT32 Bytes
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_EXECUTE;
++ iface.u.Execute.command_buffer = (gctUINT32 *)Buffer;
++ iface.u.Execute.gpu = Kernel->core;
++ iface.u.Execute.command_buffer_length = Bytes;
++
++#if defined(LINUX)
++ gcmkONERROR(gckOS_GetPhysicalAddress(Kernel->os, Buffer,
++ (gctUINT32 *)&iface.u.Execute.command_buffer));
++#endif
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ Kernel->hardware, 0, 0
++ ));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityMapMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 *PhysicalArray,
++ IN gctUINT32 PageCount,
++ OUT gctUINT32 * GPUAddress
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_MAP_MEMORY;
++
++#if defined(LINUX)
++ gcmkONERROR(gckOS_GetPhysicalAddress(Kernel->os, PhysicalArray,
++ (gctUINT32 *)&iface.u.MapMemory.physicals));
++#endif
++
++ iface.u.MapMemory.pageCount = PageCount;
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ *GPUAddress = iface.u.MapMemory.gpuAddress;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityUnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GPUAddress,
++ IN gctUINT32 PageCount
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_UNMAP_MEMORY;
++
++ iface.u.UnmapMemory.gpuAddress = GPUAddress;
++ iface.u.UnmapMemory.pageCount = PageCount;
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.c 2015-05-01 14:57:59.579427001 -0500
+@@ -0,0 +1,833 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_VG
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckKERNEL_Construct
++**
++** Construct a new gckKERNEL object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN gctPOINTER Context
++** Pointer to a driver defined context.
++**
++** OUTPUT:
++**
++** gckKERNEL * Kernel
++** Pointer to a variable that will hold the pointer to the gckKERNEL
++** object.
++*/
++gceSTATUS gckVGKERNEL_Construct(
++ IN gckOS Os,
++ IN gctPOINTER Context,
++ IN gckKERNEL inKernel,
++ OUT gckVGKERNEL * Kernel
++ )
++{
++ gceSTATUS status;
++ gckVGKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++
++ do
++ {
++ /* Allocate the gckKERNEL object. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Os,
++ sizeof(struct _gckVGKERNEL),
++ (gctPOINTER *) &kernel
++ ));
++
++ /* Initialize the gckKERNEL object. */
++ kernel->object.type = gcvOBJ_KERNEL;
++ kernel->os = Os;
++ kernel->context = Context;
++ kernel->hardware = gcvNULL;
++ kernel->interrupt = gcvNULL;
++ kernel->command = gcvNULL;
++ kernel->mmu = gcvNULL;
++ kernel->kernel = inKernel;
++
++ /* Construct the gckVGHARDWARE object. */
++ gcmkERR_BREAK(gckVGHARDWARE_Construct(
++ Os, &kernel->hardware
++ ));
++
++ /* Set pointer to gckKERNEL object in gckVGHARDWARE object. */
++ kernel->hardware->kernel = kernel;
++
++ /* Construct the gckVGINTERRUPT object. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Construct(
++ kernel, &kernel->interrupt
++ ));
++
++ /* Construct the gckVGCOMMAND object. */
++ gcmkERR_BREAK(gckVGCOMMAND_Construct(
++ kernel, gcmKB2BYTES(8), gcmKB2BYTES(2), &kernel->command
++ ));
++
++ /* Construct the gckVGMMU object. */
++ gcmkERR_BREAK(gckVGMMU_Construct(
++ kernel, gcmKB2BYTES(32), &kernel->mmu
++ ));
++
++ /* Return pointer to the gckKERNEL object. */
++ *Kernel = kernel;
++
++ gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (kernel != gcvNULL)
++ {
++ if (kernel->mmu != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGMMU_Destroy(kernel->mmu));
++ }
++
++ if (kernel->command != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGCOMMAND_Destroy(kernel->command));
++ }
++
++ if (kernel->interrupt != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGINTERRUPT_Destroy(kernel->interrupt));
++ }
++
++ if (kernel->hardware != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGHARDWARE_Destroy(kernel->hardware));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(Os, kernel));
++ }
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Destroy
++**
++** Destroy an gckKERNEL object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGKERNEL_Destroy(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ do
++ {
++ /* Destroy the gckVGMMU object. */
++ if (Kernel->mmu != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGMMU_Destroy(Kernel->mmu));
++ Kernel->mmu = gcvNULL;
++ }
++
++ /* Destroy the gckVGCOMMAND object. */
++ if (Kernel->command != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_Destroy(Kernel->command));
++ Kernel->command = gcvNULL;
++ }
++
++ /* Destroy the gckVGINTERRUPT object. */
++ if (Kernel->interrupt != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGINTERRUPT_Destroy(Kernel->interrupt));
++ Kernel->interrupt = gcvNULL;
++ }
++
++ /* Destroy the gckVGHARDWARE object. */
++ if (Kernel->hardware != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGHARDWARE_Destroy(Kernel->hardware));
++ Kernel->hardware = gcvNULL;
++ }
++
++ /* Mark the gckKERNEL object as unknown. */
++ Kernel->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckKERNEL object. */
++ gcmkERR_BREAK(gckOS_Free(Kernel->os, Kernel));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_AllocateLinearMemory
++**
++** Function walks all required memory pools and allocates the requested
++** amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcePOOL * Pool
++** Pointer the desired memory pool.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** gctSIZE_T Alignment
++** Required buffer alignment.
++**
++** gceSURF_TYPE Type
++** Surface type.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to the actual pool where the memory was allocated.
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Allocated node.
++*/
++gceSTATUS
++gckVGKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gcePOOL pool;
++ gceSTATUS status;
++ gckVIDMEM videoMemory;
++
++ /* Get initial pool. */
++ switch (pool = *Pool)
++ {
++ case gcvPOOL_DEFAULT:
++ case gcvPOOL_LOCAL:
++ pool = gcvPOOL_LOCAL_INTERNAL;
++ break;
++
++ case gcvPOOL_UNIFIED:
++ pool = gcvPOOL_SYSTEM;
++ break;
++
++ default:
++ break;
++ }
++
++ do
++ {
++ /* Verify the number of bytes to allocate. */
++ if (Bytes == 0)
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ /* Create a gcuVIDMEM_NODE for virtual memory. */
++ gcmkERR_BREAK(gckVIDMEM_ConstructVirtual(Kernel, gcvFALSE, Bytes, Node));
++
++ /* Success. */
++ break;
++ }
++
++ else
++ {
++ /* Get pointer to gckVIDMEM object for pool. */
++ status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory);
++
++ if (status == gcvSTATUS_OK)
++ {
++ /* Allocate memory. */
++ status = gckVIDMEM_AllocateLinear(Kernel,
++ videoMemory,
++ Bytes,
++ Alignment,
++ Type,
++ (*Pool == gcvPOOL_SYSTEM),
++ Node);
++
++ if (status == gcvSTATUS_OK)
++ {
++ /* Memory allocated. */
++ break;
++ }
++ }
++ }
++
++ if (pool == gcvPOOL_LOCAL_INTERNAL)
++ {
++ /* Advance to external memory. */
++ pool = gcvPOOL_LOCAL_EXTERNAL;
++ }
++ else if (pool == gcvPOOL_LOCAL_EXTERNAL)
++ {
++ /* Advance to contiguous system memory. */
++ pool = gcvPOOL_SYSTEM;
++ }
++ else if (pool == gcvPOOL_SYSTEM)
++ {
++ /* Advance to virtual memory. */
++ pool = gcvPOOL_VIRTUAL;
++ }
++ else
++ {
++ /* Out of pools. */
++ break;
++ }
++ }
++ /* Loop only for multiple selection pools. */
++ while ((*Pool == gcvPOOL_DEFAULT)
++ || (*Pool == gcvPOOL_LOCAL)
++ || (*Pool == gcvPOOL_UNIFIED)
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Return pool used for allocation. */
++ *Pool = pool;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Dispatch
++**
++** Dispatch a command received from the user HAL layer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS gckVGKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE * kernelInterface = Interface;
++ gctUINT32 processID;
++ gckKERNEL kernel = Kernel;
++ gctPOINTER info = gcvNULL;
++ gctPHYS_ADDR physical = gcvNULL;
++ gctPOINTER logical = gcvNULL;
++ gctSIZE_T bytes = 0;
++
++ gcmkHEADER_ARG("Kernel=0x%x Interface=0x%x ", Kernel, Interface);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Dispatch on command. */
++ switch (Interface->command)
++ {
++ case gcvHAL_QUERY_VIDEO_MEMORY:
++ /* Query video memory size. */
++ gcmkERR_BREAK(gckKERNEL_QueryVideoMemory(
++ Kernel, kernelInterface
++ ));
++ break;
++
++ case gcvHAL_QUERY_CHIP_IDENTITY:
++ /* Query chip identity. */
++ gcmkERR_BREAK(gckVGHARDWARE_QueryChipIdentity(
++ Kernel->vg->hardware,
++ &kernelInterface->u.QueryChipIdentity.chipModel,
++ &kernelInterface->u.QueryChipIdentity.chipRevision,
++ &kernelInterface->u.QueryChipIdentity.chipFeatures,
++ &kernelInterface->u.QueryChipIdentity.chipMinorFeatures,
++ &kernelInterface->u.QueryChipIdentity.chipMinorFeatures2
++ ));
++ break;
++
++ case gcvHAL_QUERY_COMMAND_BUFFER:
++ /* Query command buffer information. */
++ gcmkERR_BREAK(gckKERNEL_QueryCommandBuffer(
++ Kernel,
++ &kernelInterface->u.QueryCommandBuffer.information
++ ));
++ break;
++ case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
++ bytes = (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes;
++ /* Allocate non-paged memory. */
++ gcmkERR_BREAK(gckOS_AllocateNonPagedMemory(
++ Kernel->os,
++ gcvTRUE,
++ &bytes,
++ &physical,
++ &logical
++ ));
++
++ kernelInterface->u.AllocateNonPagedMemory.bytes = bytes;
++ kernelInterface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ kernelInterface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++ break;
++
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ physical = gcmNAME_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.physical);
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkERR_BREAK(gckOS_UnmapUserLogical(
++ Kernel->os,
++ physical,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ /* Free non-paged memory. */
++ gcmkERR_BREAK(gckOS_FreeNonPagedMemory(
++ Kernel->os,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ physical,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ gcmRELEASE_NAME(kernelInterface->u.AllocateNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY:
++ bytes = (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes;
++ /* Allocate contiguous memory. */
++ gcmkERR_BREAK(gckOS_AllocateContiguous(
++ Kernel->os,
++ gcvTRUE,
++ &bytes,
++ &physical,
++ &logical
++ ));
++
++ kernelInterface->u.AllocateNonPagedMemory.bytes = bytes;
++ kernelInterface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ kernelInterface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ physical = gcmNAME_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.physical);
++ /* Unmap user logical out of physical memory first. */
++ gcmkERR_BREAK(gckOS_UnmapUserLogical(
++ Kernel->os,
++ physical,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ /* Free contiguous memory. */
++ gcmkERR_BREAK(gckOS_FreeContiguous(
++ Kernel->os,
++ physical,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical),
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes
++ ));
++
++ gcmRELEASE_NAME(kernelInterface->u.AllocateNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_VIDEO_MEMORY:
++ gcmkERR_BREAK(gcvSTATUS_NOT_SUPPORTED);
++ break;
++
++ case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
++ /* Allocate memory. */
++ gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
++ Kernel, processID,
++ &kernelInterface->u.AllocateLinearVideoMemory.pool,
++ kernelInterface->u.AllocateLinearVideoMemory.bytes,
++ kernelInterface->u.AllocateLinearVideoMemory.alignment,
++ kernelInterface->u.AllocateLinearVideoMemory.type,
++ kernelInterface->u.AllocateLinearVideoMemory.flag,
++ &kernelInterface->u.AllocateLinearVideoMemory.node
++ ));
++
++ break;
++
++ case gcvHAL_RELEASE_VIDEO_MEMORY:
++ /* Free video memory. */
++ gcmkERR_BREAK(gckKERNEL_ReleaseVideoMemory(
++ Kernel, processID,
++ (gctUINT32)kernelInterface->u.ReleaseVideoMemory.node
++ ));
++
++ break;
++
++ case gcvHAL_MAP_MEMORY:
++ /* Map memory. */
++ gcmkERR_BREAK(gckKERNEL_MapMemory(
++ Kernel,
++ gcmINT2PTR(kernelInterface->u.MapMemory.physical),
++ (gctSIZE_T) kernelInterface->u.MapMemory.bytes,
++ &logical
++ ));
++ kernelInterface->u.MapMemory.logical = gcmPTR_TO_UINT64(logical);
++ break;
++
++ case gcvHAL_UNMAP_MEMORY:
++ /* Unmap memory. */
++ gcmkERR_BREAK(gckKERNEL_UnmapMemory(
++ Kernel,
++ gcmINT2PTR(kernelInterface->u.MapMemory.physical),
++ (gctSIZE_T) kernelInterface->u.MapMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.MapMemory.logical)
++ ));
++ break;
++
++ case gcvHAL_MAP_USER_MEMORY:
++ /* Map user memory to DMA. */
++ gcmkERR_BREAK(gckOS_MapUserMemory(
++ Kernel->os,
++ gcvCORE_VG,
++ gcmUINT64_TO_PTR(kernelInterface->u.MapUserMemory.memory),
++ kernelInterface->u.MapUserMemory.physical,
++ (gctSIZE_T) kernelInterface->u.MapUserMemory.size,
++ &info,
++ &kernelInterface->u.MapUserMemory.address
++ ));
++
++ kernelInterface->u.MapUserMemory.info = gcmPTR_TO_NAME(info);
++
++ /* Clear temp storage. */
++ info = gcvNULL;
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ /* Unmap user memory. */
++ gcmkERR_BREAK(gckOS_UnmapUserMemory(
++ Kernel->os,
++ gcvCORE_VG,
++ gcmUINT64_TO_PTR(kernelInterface->u.UnmapUserMemory.memory),
++ (gctSIZE_T) kernelInterface->u.UnmapUserMemory.size,
++ gcmNAME_TO_PTR(kernelInterface->u.UnmapUserMemory.info),
++ kernelInterface->u.UnmapUserMemory.address
++ ));
++
++ gcmRELEASE_NAME(kernelInterface->u.UnmapUserMemory.info);
++ break;
++
++ case gcvHAL_LOCK_VIDEO_MEMORY:
++ gcmkONERROR(gckKERNEL_LockVideoMemory(Kernel, gcvCORE_VG, processID, FromUser, Interface));
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkONERROR(gckKERNEL_UnlockVideoMemory(Kernel, processID, Interface));
++ break;
++
++ case gcvHAL_USER_SIGNAL:
++#if !USE_NEW_LINUX_SIGNAL
++ /* Dispatch depends on the user signal subcommands. */
++ switch(Interface->u.UserSignal.command)
++ {
++ case gcvUSER_SIGNAL_CREATE:
++ /* Create a signal used in the user space. */
++ gcmkERR_BREAK(
++ gckOS_CreateUserSignal(Kernel->os,
++ Interface->u.UserSignal.manualReset,
++ &Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_DESTROY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++
++ /* Destroy the signal. */
++ gcmkERR_BREAK(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++
++ break;
++
++ case gcvUSER_SIGNAL_SIGNAL:
++ /* Signal the signal. */
++ gcmkERR_BREAK(
++ gckOS_SignalUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.state));
++ break;
++
++ case gcvUSER_SIGNAL_WAIT:
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.wait);
++ break;
++
++ default:
++ /* Invalid user signal command. */
++ gcmkERR_BREAK(gcvSTATUS_INVALID_ARGUMENT);
++ }
++#endif
++ break;
++
++ case gcvHAL_COMMIT:
++ /* Commit a command and context buffer. */
++ gcmkERR_BREAK(gckVGCOMMAND_Commit(
++ Kernel->vg->command,
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.context),
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.queue),
++ kernelInterface->u.VGCommit.entryCount,
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.taskTable)
++ ));
++ break;
++ case gcvHAL_VERSION:
++ kernelInterface->u.Version.major = gcvVERSION_MAJOR;
++ kernelInterface->u.Version.minor = gcvVERSION_MINOR;
++ kernelInterface->u.Version.patch = gcvVERSION_PATCH;
++ kernelInterface->u.Version.build = gcvVERSION_BUILD;
++ status = gcvSTATUS_OK;
++ break;
++
++ case gcvHAL_GET_BASE_ADDRESS:
++ /* Get base address. */
++ gcmkERR_BREAK(
++ gckOS_GetBaseAddress(Kernel->os,
++ &kernelInterface->u.GetBaseAddress.baseAddress));
++ break;
++ case gcvHAL_IMPORT_VIDEO_MEMORY:
++ gcmkONERROR(gckVIDMEM_NODE_Import(Kernel,
++ Interface->u.ImportVideoMemory.name,
++ &Interface->u.ImportVideoMemory.handle));
++ gcmkONERROR(gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ gcmINT2PTR(Interface->u.ImportVideoMemory.handle),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvHAL_NAME_VIDEO_MEMORY:
++ gcmkONERROR(gckVIDMEM_NODE_Name(Kernel,
++ Interface->u.NameVideoMemory.handle,
++ &Interface->u.NameVideoMemory.name));
++ break;
++
++ case gcvHAL_DATABASE:
++ gcmkONERROR(gckKERNEL_QueryDatabase(Kernel, processID, Interface));
++ break;
++ case gcvHAL_SHBUF:
++ {
++ gctSHBUF shBuf;
++ gctPOINTER uData;
++ gctUINT32 bytes;
++
++ switch (Interface->u.ShBuf.command)
++ {
++ case gcvSHBUF_CREATE:
++ bytes = Interface->u.ShBuf.bytes;
++
++ /* Create. */
++ gcmkONERROR(gckKERNEL_CreateShBuffer(Kernel, bytes, &shBuf));
++
++ Interface->u.ShBuf.id = gcmPTR_TO_UINT64(shBuf);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID,
++ gcvDB_SHBUF,
++ shBuf,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSHBUF_DESTROY:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++
++ /* Check db first to avoid illegal destroy in the process. */
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID,
++ gcvDB_SHBUF,
++ shBuf));
++
++ gcmkONERROR(gckKERNEL_DestroyShBuffer(Kernel, shBuf));
++ break;
++
++ case gcvSHBUF_MAP:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++
++ /* Map for current process access. */
++ gcmkONERROR(gckKERNEL_MapShBuffer(Kernel, shBuf));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID,
++ gcvDB_SHBUF,
++ shBuf,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSHBUF_WRITE:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++ uData = gcmUINT64_TO_PTR(Interface->u.ShBuf.data);
++ bytes = Interface->u.ShBuf.bytes;
++
++ /* Write. */
++ gcmkONERROR(
++ gckKERNEL_WriteShBuffer(Kernel, shBuf, uData, bytes));
++ break;
++
++ case gcvSHBUF_READ:
++ shBuf = gcmUINT64_TO_PTR(Interface->u.ShBuf.id);
++ uData = gcmUINT64_TO_PTR(Interface->u.ShBuf.data);
++ bytes = Interface->u.ShBuf.bytes;
++
++ /* Read. */
++ gcmkONERROR(
++ gckKERNEL_ReadShBuffer(Kernel,
++ shBuf,
++ uData,
++ bytes,
++ &bytes));
++
++ /* Return copied size. */
++ Interface->u.ShBuf.bytes = bytes;
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ break;
++ }
++ }
++ break;
++ default:
++ /* Invalid command. */
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++OnError:
++ /* Save status. */
++ kernelInterface->status = status;
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_QueryCommandBuffer
++**
++** Query command buffer attributes.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gcsCOMMAND_BUFFER_INFO_PTR Information
++** Pointer to the information structure to receive buffer attributes.
++*/
++gceSTATUS
++gckKERNEL_QueryCommandBuffer(
++ IN gckKERNEL Kernel,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=0x%x *Pool=0x%x",
++ Kernel, Information);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Get the information. */
++ status = gckVGCOMMAND_QueryCommandBuffer(Kernel->vg->command, Information);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_vg.h 2015-05-01 14:57:59.579427001 -0500
+@@ -0,0 +1,85 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_vg_h_
++#define __gc_hal_kernel_vg_h_
++
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel_hardware.h"
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++
++/* gckKERNEL object. */
++struct _gckVGKERNEL
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckVGHARDWARE hardware;
++
++ /* Pointer to gckINTERRUPT object. */
++ gckVGINTERRUPT interrupt;
++
++ /* Pointer to gckCOMMAND object. */
++ gckVGCOMMAND command;
++
++ /* Pointer to context. */
++ gctPOINTER context;
++
++ /* Pointer to gckMMU object. */
++ gckVGMMU mmu;
++
++ gckKERNEL kernel;
++};
++
++/* gckMMU object. */
++struct _gckVGMMU
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckVGHARDWARE hardware;
++
++ /* The page table mutex. */
++ gctPOINTER mutex;
++
++ /* Page table information. */
++ gctSIZE_T pageTableSize;
++ gctPHYS_ADDR pageTablePhysical;
++ gctPOINTER pageTableLogical;
++
++ /* Allocation index. */
++ gctUINT32 entryCount;
++ gctUINT32 entry;
++};
++
++#endif /* __gc_hal_kernel_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_video_memory.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_video_memory.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_video_memory.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/gc_hal_kernel_video_memory.c 2015-05-01 14:57:59.583427001 -0500
+@@ -0,0 +1,2807 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_VIDMEM
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** _Split
++**
++** Split a node on the required byte boundary.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to the node to split.
++**
++** gctSIZE_T Bytes
++** Number of bytes to keep in the node.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gctBOOL
++** gcvTRUE if the node was split successfully, or gcvFALSE if there is an
++** error.
++**
++*/
++static gctBOOL
++_Split(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gctPOINTER pointer = gcvNULL;
++
++ /* Make sure the byte boundary makes sense. */
++ if ((Bytes <= 0) || (Bytes > Node->VidMem.bytes))
++ {
++ return gcvFALSE;
++ }
++
++ /* Allocate a new gcuVIDMEM_NODE object. */
++ if (gcmIS_ERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(gcuVIDMEM_NODE),
++ &pointer)))
++ {
++ /* Error. */
++ return gcvFALSE;
++ }
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE structure. */
++ node->VidMem.offset = Node->VidMem.offset + Bytes;
++ node->VidMem.bytes = Node->VidMem.bytes - Bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.locked = 0;
++ node->VidMem.memory = Node->VidMem.memory;
++ node->VidMem.pool = Node->VidMem.pool;
++ node->VidMem.physical = Node->VidMem.physical;
++#ifdef __QNXNTO__
++ node->VidMem.processID = 0;
++ node->VidMem.logical = gcvNULL;
++#endif
++
++ /* Insert node behind specified node. */
++ node->VidMem.next = Node->VidMem.next;
++ node->VidMem.prev = Node;
++ Node->VidMem.next = node->VidMem.next->VidMem.prev = node;
++
++ /* Insert free node behind specified node. */
++ node->VidMem.nextFree = Node->VidMem.nextFree;
++ node->VidMem.prevFree = Node;
++ Node->VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node;
++
++ /* Adjust size of specified node. */
++ Node->VidMem.bytes = Bytes;
++
++ /* Success. */
++ return gcvTRUE;
++}
++
++/*******************************************************************************
++**
++** _Merge
++**
++** Merge two adjacent nodes together.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to the first of the two nodes to merge.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++*/
++static gceSTATUS
++_Merge(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gceSTATUS status;
++
++ /* Save pointer to next node. */
++ node = Node->VidMem.next;
++
++ /* This is a good time to make sure the heap is not corrupted. */
++ if (Node->VidMem.offset + Node->VidMem.bytes != node->VidMem.offset)
++ {
++ /* Corrupted heap. */
++ gcmkASSERT(
++ Node->VidMem.offset + Node->VidMem.bytes == node->VidMem.offset);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++
++ /* Adjust byte count. */
++ Node->VidMem.bytes += node->VidMem.bytes;
++
++ /* Unlink next node from linked list. */
++ Node->VidMem.next = node->VidMem.next;
++ Node->VidMem.nextFree = node->VidMem.nextFree;
++
++ Node->VidMem.next->VidMem.prev =
++ Node->VidMem.nextFree->VidMem.prevFree = Node;
++
++ /* Free next node. */
++ status = gcmkOS_SAFE_FREE(Os, node);
++ return status;
++}
++
++/******************************************************************************\
++******************************* gckVIDMEM API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVIDMEM_ConstructVirtual
++**
++** Construct a new gcuVIDMEM_NODE union for virtual memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSIZE_T Bytes
++** Number of byte to allocate.
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that receives the gcuVIDMEM_NODE union pointer.
++*/
++gceSTATUS
++gckVIDMEM_ConstructVirtual(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Flag,
++ IN gctSIZE_T Bytes,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctPOINTER pointer = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("Kernel=0x%x Flag=%x Bytes=%lu", Kernel, Flag, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Allocate an gcuVIDMEM_NODE union. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE union for virtual memory. */
++ node->Virtual.kernel = Kernel;
++ node->Virtual.contiguous = Flag & gcvALLOC_FLAG_CONTIGUOUS;
++ node->Virtual.logical = gcvNULL;
++#if gcdENABLE_VG
++ node->Virtual.kernelVirtual = gcvNULL;
++#endif
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ node->Virtual.lockeds[i] = 0;
++ node->Virtual.pageTables[i] = gcvNULL;
++ node->Virtual.lockKernels[i] = gcvNULL;
++ }
++
++ gcmkONERROR(gckOS_GetProcessID(&node->Virtual.processID));
++
++ /* Allocate the virtual memory. */
++ gcmkONERROR(
++ gckOS_AllocatePagedMemoryEx(os,
++ Flag,
++ node->Virtual.bytes = Bytes,
++ &node->Virtual.gid,
++ &node->Virtual.physical));
++
++ /* Return pointer to the gcuVIDMEM_NODE union. */
++ *Node = node;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Created virtual node 0x%x for %u bytes @ 0x%x",
++ node, Bytes, node->Virtual.physical);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ /* Free the structure. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, node));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_DestroyVirtual
++**
++** Destroy an gcuVIDMEM_NODE union for virtual memory.
++**
++** INPUT:
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_DestroyVirtual(
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gckOS os;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);
++
++ /* Extact the gckOS object pointer. */
++ os = Node->Virtual.kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Delete the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, Node));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Construct
++**
++** Construct a new gckVIDMEM object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 BaseAddress
++** Base address for the video memory heap.
++**
++** gctSIZE_T Bytes
++** Number of bytes in the video memory heap.
++**
++** gctSIZE_T Threshold
++** Minimum number of bytes beyond am allocation before the node is
++** split. Can be used as a minimum alignment requirement.
++**
++** gctSIZE_T BankSize
++** Number of bytes per physical memory bank. Used by bank
++** optimization.
++**
++** OUTPUT:
++**
++** gckVIDMEM * Memory
++** Pointer to a variable that will hold the pointer to the gckVIDMEM
++** object.
++*/
++gceSTATUS
++gckVIDMEM_Construct(
++ IN gckOS Os,
++ IN gctUINT32 BaseAddress,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Threshold,
++ IN gctSIZE_T BankSize,
++ OUT gckVIDMEM * Memory
++ )
++{
++ gckVIDMEM memory = gcvNULL;
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node;
++ gctINT i, banks = 0;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 heapBytes;
++ gctUINT32 bankSize;
++
++ gcmkHEADER_ARG("Os=0x%x BaseAddress=%08x Bytes=%lu Threshold=%lu "
++ "BankSize=%lu",
++ Os, BaseAddress, Bytes, Threshold, BankSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ gcmkSAFECASTSIZET(heapBytes, Bytes);
++ gcmkSAFECASTSIZET(bankSize, BankSize);
++
++ /* Allocate the gckVIDMEM object. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct _gckVIDMEM), &pointer));
++
++ memory = pointer;
++
++ /* Initialize the gckVIDMEM object. */
++ memory->object.type = gcvOBJ_VIDMEM;
++ memory->os = Os;
++
++ /* Set video memory heap information. */
++ memory->baseAddress = BaseAddress;
++ memory->bytes = heapBytes;
++ memory->freeBytes = heapBytes;
++ memory->threshold = Threshold;
++ memory->mutex = gcvNULL;
++
++ BaseAddress = 0;
++
++ /* Walk all possible banks. */
++ for (i = 0; i < gcmCOUNTOF(memory->sentinel); ++i)
++ {
++ gctUINT32 bytes;
++
++ if (BankSize == 0)
++ {
++ /* Use all bytes for the first bank. */
++ bytes = heapBytes;
++ }
++ else
++ {
++ /* Compute number of bytes for this bank. */
++ bytes = gcmALIGN(BaseAddress + 1, bankSize) - BaseAddress;
++
++ if (bytes > heapBytes)
++ {
++ /* Make sure we don't exceed the total number of bytes. */
++ bytes = heapBytes;
++ }
++ }
++
++ if (bytes == 0)
++ {
++ /* Mark heap is not used. */
++ memory->sentinel[i].VidMem.next =
++ memory->sentinel[i].VidMem.prev =
++ memory->sentinel[i].VidMem.nextFree =
++ memory->sentinel[i].VidMem.prevFree = gcvNULL;
++ continue;
++ }
++
++ /* Allocate one gcuVIDMEM_NODE union. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE union. */
++ node->VidMem.memory = memory;
++
++ node->VidMem.next =
++ node->VidMem.prev =
++ node->VidMem.nextFree =
++ node->VidMem.prevFree = &memory->sentinel[i];
++
++ node->VidMem.offset = BaseAddress;
++ node->VidMem.bytes = bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.physical = 0;
++ node->VidMem.pool = gcvPOOL_UNKNOWN;
++
++ node->VidMem.locked = 0;
++
++#ifdef __QNXNTO__
++ node->VidMem.processID = 0;
++ node->VidMem.logical = gcvNULL;
++#endif
++
++#if gcdENABLE_VG
++ node->VidMem.kernelVirtual = gcvNULL;
++#endif
++
++ /* Initialize the linked list of nodes. */
++ memory->sentinel[i].VidMem.next =
++ memory->sentinel[i].VidMem.prev =
++ memory->sentinel[i].VidMem.nextFree =
++ memory->sentinel[i].VidMem.prevFree = node;
++
++ /* Mark sentinel. */
++ memory->sentinel[i].VidMem.bytes = 0;
++
++ /* Adjust address for next bank. */
++ BaseAddress += bytes;
++ heapBytes -= bytes;
++ banks ++;
++ }
++
++ /* Assign all the bank mappings. */
++ memory->mapping[gcvSURF_RENDER_TARGET] = banks - 1;
++ memory->mapping[gcvSURF_BITMAP] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_DEPTH] = banks - 1;
++ memory->mapping[gcvSURF_HIERARCHICAL_DEPTH] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TEXTURE] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_VERTEX] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_INDEX] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TILE_STATUS] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TYPE_UNKNOWN] = 0;
++
++#if gcdENABLE_VG
++ memory->mapping[gcvSURF_IMAGE] = 0;
++ memory->mapping[gcvSURF_MASK] = 0;
++ memory->mapping[gcvSURF_SCISSOR] = 0;
++#endif
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] INDEX: bank %d",
++ memory->mapping[gcvSURF_INDEX]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] VERTEX: bank %d",
++ memory->mapping[gcvSURF_VERTEX]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] TEXTURE: bank %d",
++ memory->mapping[gcvSURF_TEXTURE]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] RENDER_TARGET: bank %d",
++ memory->mapping[gcvSURF_RENDER_TARGET]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] DEPTH: bank %d",
++ memory->mapping[gcvSURF_DEPTH]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] TILE_STATUS: bank %d",
++ memory->mapping[gcvSURF_TILE_STATUS]);
++
++ /* Allocate the mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &memory->mutex));
++
++ /* Return pointer to the gckVIDMEM object. */
++ *Memory = memory;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (memory != gcvNULL)
++ {
++ if (memory->mutex != gcvNULL)
++ {
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, memory->mutex));
++ }
++
++ for (i = 0; i < banks; ++i)
++ {
++ /* Free the heap. */
++ gcmkASSERT(memory->sentinel[i].VidMem.next != gcvNULL);
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory->sentinel[i].VidMem.next));
++ }
++
++ /* Free the object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Destroy
++**
++** Destroy an gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_Destroy(
++ IN gckVIDMEM Memory
++ )
++{
++ gcuVIDMEM_NODE_PTR node, next;
++ gctINT i;
++
++ gcmkHEADER_ARG("Memory=0x%x", Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++
++ /* Walk all sentinels. */
++ for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ /* Bail out of the heap is not used. */
++ if (Memory->sentinel[i].VidMem.next == gcvNULL)
++ {
++ break;
++ }
++
++ /* Walk all the nodes until we reach the sentinel. */
++ for (node = Memory->sentinel[i].VidMem.next;
++ node->VidMem.bytes != 0;
++ node = next)
++ {
++ /* Save pointer to the next node. */
++ next = node->VidMem.next;
++
++ /* Free the node. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, node));
++ }
++ }
++
++ /* Free the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Memory->os, Memory->mutex));
++
++ /* Mark the object as unknown. */
++ Memory->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVIDMEM object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, Memory));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_BANK_ALIGNMENT
++
++#if !gcdBANK_BIT_START
++#error gcdBANK_BIT_START not defined.
++#endif
++
++#if !gcdBANK_BIT_END
++#error gcdBANK_BIT_END not defined.
++#endif
++/*******************************************************************************
++** _GetSurfaceBankAlignment
++**
++** Return the required offset alignment required to the make BaseAddress
++** aligned properly.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gcoOS object.
++**
++** gceSURF_TYPE Type
++** Type of allocation.
++**
++** gctUINT32 BaseAddress
++** Base address of current video memory node.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR AlignmentOffset
++** Pointer to a variable that will hold the number of bytes to skip in
++** the current video memory node in order to make the alignment bank
++** aligned.
++*/
++static gceSTATUS
++_GetSurfaceBankAlignment(
++ IN gckKERNEL Kernel,
++ IN gceSURF_TYPE Type,
++ IN gctUINT32 BaseAddress,
++ OUT gctUINT32_PTR AlignmentOffset
++ )
++{
++ gctUINT32 bank;
++ /* To retrieve the bank. */
++ static const gctUINT32 bankMask = (0xFFFFFFFF << gcdBANK_BIT_START)
++ ^ (0xFFFFFFFF << (gcdBANK_BIT_END + 1));
++
++ /* To retrieve the bank and all the lower bytes. */
++ static const gctUINT32 byteMask = ~(0xFFFFFFFF << (gcdBANK_BIT_END + 1));
++
++ gcmkHEADER_ARG("Type=%d BaseAddress=0x%x ", Type, BaseAddress);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(AlignmentOffset != gcvNULL);
++
++ switch (Type)
++ {
++ case gcvSURF_RENDER_TARGET:
++ bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
++
++ /* Align to the first bank. */
++ *AlignmentOffset = (bank == 0) ?
++ 0 :
++ ((1 << (gcdBANK_BIT_END + 1)) + 0) - (BaseAddress & byteMask);
++ break;
++
++ case gcvSURF_DEPTH:
++ bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
++
++ /* Align to the third bank. */
++ *AlignmentOffset = (bank == 2) ?
++ 0 :
++ ((1 << (gcdBANK_BIT_END + 1)) + (2 << gcdBANK_BIT_START)) - (BaseAddress & byteMask);
++
++ /* Minimum 256 byte alignment needed for fast_msaa. */
++ if ((gcdBANK_CHANNEL_BIT > 7) ||
++ ((gckHARDWARE_IsFeatureAvailable(Kernel->hardware, gcvFEATURE_FAST_MSAA) != gcvSTATUS_TRUE) &&
++ (gckHARDWARE_IsFeatureAvailable(Kernel->hardware, gcvFEATURE_SMALL_MSAA) != gcvSTATUS_TRUE)))
++ {
++ /* Add a channel offset at the channel bit. */
++ *AlignmentOffset += (1 << gcdBANK_CHANNEL_BIT);
++ }
++ break;
++
++ default:
++ /* no alignment needed. */
++ *AlignmentOffset = 0;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER_ARG("*AlignmentOffset=%u", *AlignmentOffset);
++ return gcvSTATUS_OK;
++}
++#endif
++
++static gcuVIDMEM_NODE_PTR
++_FindNode(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctINT Bank,
++ IN gctSIZE_T Bytes,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Alignment
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 alignment;
++
++#if gcdENABLE_BANK_ALIGNMENT
++ gctUINT32 bankAlignment;
++ gceSTATUS status;
++#endif
++
++ if (Memory->sentinel[Bank].VidMem.nextFree == gcvNULL)
++ {
++ /* No free nodes left. */
++ return gcvNULL;
++ }
++
++#if gcdENABLE_BANK_ALIGNMENT
++ /* Walk all free nodes until we have one that is big enough or we have
++ ** reached the sentinel. */
++ for (node = Memory->sentinel[Bank].VidMem.nextFree;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.nextFree)
++ {
++ if (node->VidMem.bytes < Bytes)
++ {
++ continue;
++ }
++
++ gcmkONERROR(_GetSurfaceBankAlignment(
++ Kernel,
++ Type,
++ node->VidMem.memory->baseAddress + node->VidMem.offset,
++ &bankAlignment));
++
++ bankAlignment = gcmALIGN(bankAlignment, *Alignment);
++
++ /* Compute number of bytes to skip for alignment. */
++ alignment = (*Alignment == 0)
++ ? 0
++ : (*Alignment - (node->VidMem.offset % *Alignment));
++
++ if (alignment == *Alignment)
++ {
++ /* Node is already aligned. */
++ alignment = 0;
++ }
++
++ if (node->VidMem.bytes >= Bytes + alignment + bankAlignment)
++ {
++ /* This node is big enough. */
++ *Alignment = alignment + bankAlignment;
++ return node;
++ }
++ }
++#endif
++
++ /* Walk all free nodes until we have one that is big enough or we have
++ reached the sentinel. */
++ for (node = Memory->sentinel[Bank].VidMem.nextFree;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.nextFree)
++ {
++ gctUINT offset;
++
++ gctINT modulo;
++
++ gcmkSAFECASTSIZET(offset, node->VidMem.offset);
++
++ modulo = gckMATH_ModuloInt(offset, *Alignment);
++
++ /* Compute number of bytes to skip for alignment. */
++ alignment = (*Alignment == 0) ? 0 : (*Alignment - modulo);
++
++ if (alignment == *Alignment)
++ {
++ /* Node is already aligned. */
++ alignment = 0;
++ }
++
++ if (node->VidMem.bytes >= Bytes + alignment)
++ {
++ /* This node is big enough. */
++ *Alignment = alignment;
++ return node;
++ }
++ }
++
++#if gcdENABLE_BANK_ALIGNMENT
++OnError:
++#endif
++ /* Not enough memory. */
++ return gcvNULL;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_AllocateLinear
++**
++** Allocate linear memory from the gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** gctUINT32 Alignment
++** Byte alignment for allocation.
++**
++** gceSURF_TYPE Type
++** Type of surface to allocate (use by bank optimization).
++**
++** gctBOOL Specified
++** If user must use this pool, it should set Specified to gcvTRUE,
++** otherwise allocator may reserve some memory for other usage, such
++** as small block size allocation request.
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that will hold the allocated memory node.
++*/
++gceSTATUS
++gckVIDMEM_AllocateLinear(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ IN gctBOOL Specified,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 alignment;
++ gctINT bank, i;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Memory=0x%x Bytes=%lu Alignment=%u Type=%d",
++ Memory, Bytes, Alignment, Type);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Type < gcvSURF_NUM_TYPES);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ if (Bytes > Memory->freeBytes)
++ {
++ /* Not enough memory. */
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++ }
++
++#if gcdSMALL_BLOCK_SIZE
++ if ((Memory->freeBytes < (Memory->bytes/gcdRATIO_FOR_SMALL_MEMORY))
++ && (Bytes >= gcdSMALL_BLOCK_SIZE)
++ && (Specified == gcvFALSE)
++ )
++ {
++ /* The left memory is for small memory.*/
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++ }
++#endif
++
++ /* Find the default bank for this surface type. */
++ gcmkASSERT((gctINT) Type < gcmCOUNTOF(Memory->mapping));
++ bank = Memory->mapping[Type];
++ alignment = Alignment;
++
++ /* Find a free node in the default bank. */
++ node = _FindNode(Kernel, Memory, bank, Bytes, Type, &alignment);
++
++ /* Out of memory? */
++ if (node == gcvNULL)
++ {
++ /* Walk all lower banks. */
++ for (i = bank - 1; i >= 0; --i)
++ {
++ /* Find a free node inside the current bank. */
++ node = _FindNode(Kernel, Memory, i, Bytes, Type, &alignment);
++ if (node != gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++
++ if (node == gcvNULL)
++ {
++ /* Walk all upper banks. */
++ for (i = bank + 1; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ if (Memory->sentinel[i].VidMem.nextFree == gcvNULL)
++ {
++ /* Abort when we reach unused banks. */
++ break;
++ }
++
++ /* Find a free node inside the current bank. */
++ node = _FindNode(Kernel, Memory, i, Bytes, Type, &alignment);
++ if (node != gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++
++ if (node == gcvNULL)
++ {
++ /* Out of memory. */
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++ }
++
++ /* Do we have an alignment? */
++ if (alignment > 0)
++ {
++ /* Split the node so it is aligned. */
++ if (_Split(Memory->os, node, alignment))
++ {
++ /* Successful split, move to aligned node. */
++ node = node->VidMem.next;
++
++ /* Remove alignment. */
++ alignment = 0;
++ }
++ }
++
++ /* Do we have enough memory after the allocation to split it? */
++ if (node->VidMem.bytes - Bytes > Memory->threshold)
++ {
++ /* Adjust the node size. */
++ _Split(Memory->os, node, Bytes);
++ }
++
++ /* Remove the node from the free list. */
++ node->VidMem.prevFree->VidMem.nextFree = node->VidMem.nextFree;
++ node->VidMem.nextFree->VidMem.prevFree = node->VidMem.prevFree;
++ node->VidMem.nextFree =
++ node->VidMem.prevFree = gcvNULL;
++
++ /* Fill in the information. */
++ node->VidMem.alignment = alignment;
++ node->VidMem.memory = Memory;
++#ifdef __QNXNTO__
++ node->VidMem.logical = gcvNULL;
++ gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID));
++#endif
++
++ /* Adjust the number of free bytes. */
++ Memory->freeBytes -= node->VidMem.bytes;
++
++#if gcdENABLE_VG
++ node->VidMem.kernelVirtual = gcvNULL;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++
++ /* Return the pointer to the node. */
++ *Node = node;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Allocated %u bytes @ 0x%x [0x%08X]",
++ node->VidMem.bytes, node, node->VidMem.offset);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Free
++**
++** Free an allocated video memory node.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_Free(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gceSTATUS status;
++ gckKERNEL kernel = gcvNULL;
++ gckVIDMEM memory = gcvNULL;
++ gcuVIDMEM_NODE_PTR node;
++ gctBOOL mutexAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ if ((Node == gcvNULL)
++ || (Node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /**************************** Video Memory ********************************/
++
++ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Extract pointer to gckVIDMEM object owning the node. */
++ memory = Node->VidMem.memory;
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE));
++
++ mutexAcquired = gcvTRUE;
++
++#ifdef __QNXNTO__
++ /* Unmap the video memory. */
++ if (Node->VidMem.logical != gcvNULL)
++ {
++ gckKERNEL_UnmapVideoMemory(
++ Kernel,
++ Node->VidMem.logical,
++ Node->VidMem.processID,
++ Node->VidMem.bytes);
++ Node->VidMem.logical = gcvNULL;
++ }
++
++ /* Reset. */
++ Node->VidMem.processID = 0;
++
++ /* Don't try to re-free an already freed node. */
++ if ((Node->VidMem.nextFree == gcvNULL)
++ && (Node->VidMem.prevFree == gcvNULL)
++ )
++#endif
++ {
++#if gcdENABLE_VG
++ if (Node->VidMem.kernelVirtual)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "%s(%d) Unmap %x from kernel space.",
++ __FUNCTION__, __LINE__,
++ Node->VidMem.kernelVirtual);
++
++ gcmkVERIFY_OK(
++ gckOS_UnmapPhysical(memory->os,
++ Node->VidMem.kernelVirtual,
++ Node->VidMem.bytes));
++
++ Node->VidMem.kernelVirtual = gcvNULL;
++ }
++#endif
++
++ /* Check if Node is already freed. */
++ if (Node->VidMem.nextFree)
++ {
++ /* Node is alread freed. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ /* Update the number of free bytes. */
++ memory->freeBytes += Node->VidMem.bytes;
++
++ /* Find the next free node. */
++ for (node = Node->VidMem.next;
++ node != gcvNULL && node->VidMem.nextFree == gcvNULL;
++ node = node->VidMem.next) ;
++
++ /* Insert this node in the free list. */
++ Node->VidMem.nextFree = node;
++ Node->VidMem.prevFree = node->VidMem.prevFree;
++
++ Node->VidMem.prevFree->VidMem.nextFree =
++ node->VidMem.prevFree = Node;
++
++ /* Is the next node a free node and not the sentinel? */
++ if ((Node->VidMem.next == Node->VidMem.nextFree)
++ && (Node->VidMem.next->VidMem.bytes != 0)
++ )
++ {
++ /* Merge this node with the next node. */
++ gcmkONERROR(_Merge(memory->os, node = Node));
++ gcmkASSERT(node->VidMem.nextFree != node);
++ gcmkASSERT(node->VidMem.prevFree != node);
++ }
++
++ /* Is the previous node a free node and not the sentinel? */
++ if ((Node->VidMem.prev == Node->VidMem.prevFree)
++ && (Node->VidMem.prev->VidMem.bytes != 0)
++ )
++ {
++ /* Merge this node with the previous node. */
++ gcmkONERROR(_Merge(memory->os, node = Node->VidMem.prev));
++ gcmkASSERT(node->VidMem.nextFree != node);
++ gcmkASSERT(node->VidMem.prevFree != node);
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Node 0x%x is freed.",
++ Node);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ /* Get gckKERNEL object. */
++ kernel = Node->Virtual.kernel;
++
++ /* Verify the gckKERNEL object pointer. */
++ gcmkVERIFY_OBJECT(kernel, gcvOBJ_KERNEL);
++
++#if gcdENABLE_VG
++ if (Node->Virtual.kernelVirtual)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "%s(%d) Unmap %x from kernel space.",
++ __FUNCTION__, __LINE__,
++ Node->Virtual.kernelVirtual);
++
++ gcmkVERIFY_OK(
++ gckOS_UnmapPhysical(kernel->os,
++ Node->Virtual.kernelVirtual,
++ Node->Virtual.bytes));
++
++ Node->Virtual.kernelVirtual = gcvNULL;
++ }
++#endif
++
++ /* Free the virtual memory. */
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(kernel->os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes));
++
++ /* Destroy the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutexAcquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ memory->os, memory->mutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if !gcdPROCESS_ADDRESS_SPACE
++/*******************************************************************************
++**
++** _NeedVirtualMapping
++**
++** Whether setup GPU page table for video node.
++**
++** INPUT:
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** gceCORE Core
++** Id of current GPU.
++**
++** OUTPUT:
++** gctBOOL * NeedMapping
++** A pointer hold the result whether Node should be mapping.
++*/
++static gceSTATUS
++_NeedVirtualMapping(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gcuVIDMEM_NODE_PTR Node,
++ OUT gctBOOL * NeedMapping
++)
++{
++ gceSTATUS status;
++ gctUINT32 phys;
++ gctUINT32 end;
++ gcePOOL pool;
++ gctUINT32 offset;
++ gctUINT32 baseAddress;
++ gctUINT32 bytes;
++
++ gcmkHEADER_ARG("Node=0x%X", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++ gcmkVERIFY_ARGUMENT(NeedMapping != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Core < gcdMAX_GPU_COUNT);
++
++ if (Node->Virtual.contiguous)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ *NeedMapping = gcvFALSE;
++ }
++ else
++#endif
++ {
++ /* Convert logical address into a physical address. */
++ gcmkONERROR(gckOS_UserLogicalToPhysical(
++ Kernel->os, Node->Virtual.logical, &phys
++ ));
++
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++
++ gcmkASSERT(phys >= baseAddress);
++
++ /* Subtract baseAddress to get a GPU address used for programming. */
++ phys -= baseAddress;
++
++ /* If part of region is belong to gcvPOOL_VIRTUAL,
++ ** whole region has to be mapped. */
++ gcmkSAFECASTSIZET(bytes, Node->Virtual.bytes);
++ end = phys + bytes - 1;
++
++ gcmkONERROR(gckHARDWARE_SplitMemory(
++ Kernel->hardware, end, &pool, &offset
++ ));
++
++ *NeedMapping = (pool == gcvPOOL_VIRTUAL);
++ }
++ }
++ else
++ {
++ *NeedMapping = gcvTRUE;
++ }
++
++ gcmkFOOTER_ARG("*NeedMapping=%d", *NeedMapping);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++gcsGPU_MAP_PTR
++_FindGPUMap(
++ IN gcsGPU_MAP_PTR Head,
++ IN gctINT ProcessID
++ )
++{
++ gcsGPU_MAP_PTR map = Head;
++
++ while (map)
++ {
++ if (map->pid == ProcessID)
++ {
++ return map;
++ }
++
++ map = map->next;
++ }
++
++ return gcvNULL;
++}
++
++gcsGPU_MAP_PTR
++_CreateGPUMap(
++ IN gckOS Os,
++ IN gcsGPU_MAP_PTR *Head,
++ IN gcsGPU_MAP_PTR *Tail,
++ IN gctINT ProcessID
++ )
++{
++ gcsGPU_MAP_PTR gpuMap;
++ gctPOINTER pointer = gcvNULL;
++
++ gckOS_Allocate(Os, sizeof(gcsGPU_MAP), &pointer);
++
++ if (pointer == gcvNULL)
++ {
++ return gcvNULL;
++ }
++
++ gpuMap = pointer;
++
++ gckOS_ZeroMemory(pointer, sizeof(gcsGPU_MAP));
++
++ gpuMap->pid = ProcessID;
++
++ if (!*Head)
++ {
++ *Head = *Tail = gpuMap;
++ }
++ else
++ {
++ gpuMap->prev = *Tail;
++ (*Tail)->next = gpuMap;
++ *Tail = gpuMap;
++ }
++
++ return gpuMap;
++}
++
++void
++_DestroyGPUMap(
++ IN gckOS Os,
++ IN gcsGPU_MAP_PTR *Head,
++ IN gcsGPU_MAP_PTR *Tail,
++ IN gcsGPU_MAP_PTR gpuMap
++ )
++{
++
++ if (gpuMap == *Head)
++ {
++ if ((*Head = gpuMap->next) == gcvNULL)
++ {
++ *Tail = gcvNULL;
++ }
++ }
++ else
++ {
++ gpuMap->prev->next = gpuMap->next;
++ if (gpuMap == *Tail)
++ {
++ *Tail = gpuMap->prev;
++ }
++ else
++ {
++ gpuMap->next->prev = gpuMap->prev;
++ }
++ }
++
++ gcmkOS_SAFE_FREE(Os, gpuMap);
++}
++#endif
++
++/*******************************************************************************
++**
++** gckVIDMEM_Lock
++**
++** Lock a video memory node and return its hardware specific address.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable that will hold the hardware specific address.
++**
++** gctUINT32 * PhysicalAddress
++** Pointer to a variable that will hold the bus address of a contiguous
++** video node.
++*/
++gceSTATUS
++gckVIDMEM_Lock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ IN gctBOOL Cacheable,
++ OUT gctUINT32 * Address,
++ OUT gctUINT32 * Gid,
++ OUT gctUINT64 * PhysicalAddress
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL locked = gcvFALSE;
++ gckOS os = gcvNULL;
++#if !gcdPROCESS_ADDRESS_SPACE
++ gctBOOL needMapping = gcvFALSE;
++#endif
++ gctUINT32 baseAddress;
++ gctUINT32 physicalAddress;
++ gcuVIDMEM_NODE_PTR node = Node->node;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ if ((node == gcvNULL)
++ || (node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Node->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /**************************** Video Memory ********************************/
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ gctUINT32 offset;
++
++ if (Cacheable == gcvTRUE)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++
++ /* Increment the lock count. */
++ node->VidMem.locked ++;
++
++ /* Return the physical address of the node. */
++ gcmkSAFECASTSIZET(offset, node->VidMem.offset);
++
++ *Address = node->VidMem.memory->baseAddress
++ + offset
++ + node->VidMem.alignment;
++
++ physicalAddress = *Address;
++
++ /* Get hardware specific address. */
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ if (Kernel->hardware->mmuVersion == 0)
++ {
++ /* Convert physical to GPU address for old mmu. */
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++ gcmkASSERT(*Address > baseAddress);
++ *Address -= baseAddress;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(
++ Kernel->os,
++ *Address,
++ Address
++ ));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Locked node 0x%x (%d) @ 0x%08X",
++ node,
++ node->VidMem.locked,
++ *Address);
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ else
++ {
++
++ *Gid = node->Virtual.gid;
++
++#if gcdPAGED_MEMORY_CACHEABLE
++ /* Force video memory cacheable. */
++ Cacheable = gcvTRUE;
++#endif
++
++ gcmkONERROR(
++ gckOS_LockPages(os,
++ node->Virtual.physical,
++ node->Virtual.bytes,
++ Cacheable,
++ &node->Virtual.logical,
++ &node->Virtual.pageCount));
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ os,
++ node->Virtual.logical,
++ &physicalAddress
++ ));
++
++#if gcdENABLE_VG
++ node->Virtual.physicalAddress = physicalAddress;
++#endif
++
++#if !gcdPROCESS_ADDRESS_SPACE
++ /* Increment the lock count. */
++ if (node->Virtual.lockeds[Kernel->core] ++ == 0)
++ {
++ locked = gcvTRUE;
++
++ gcmkONERROR(_NeedVirtualMapping(Kernel, Kernel->core, node, &needMapping));
++
++ if (needMapping == gcvFALSE)
++ {
++ /* Get hardware specific address. */
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ gcmkONERROR(gckVGHARDWARE_ConvertLogical(
++ Kernel->vg->hardware,
++ node->Virtual.logical,
++ gcvTRUE,
++ &node->Virtual.addresses[Kernel->core]));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ Kernel->hardware,
++ node->Virtual.logical,
++ gcvTRUE,
++ &node->Virtual.addresses[Kernel->core]));
++ }
++ }
++ else
++ {
++#if gcdSECURITY
++ gctPHYS_ADDR physicalArrayPhysical;
++ gctPOINTER physicalArrayLogical;
++
++ gcmkONERROR(gckOS_AllocatePageArray(
++ os,
++ node->Virtual.physical,
++ node->Virtual.pageCount,
++ &physicalArrayLogical,
++ &physicalArrayPhysical
++ ));
++
++ gcmkONERROR(gckKERNEL_SecurityMapMemory(
++ Kernel,
++ physicalArrayLogical,
++ node->Virtual.pageCount,
++ &node->Virtual.addresses[Kernel->core]
++ ));
++
++ gcmkONERROR(gckOS_FreeNonPagedMemory(
++ os,
++ 1,
++ physicalArrayPhysical,
++ physicalArrayLogical
++ ));
++#else
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ /* Allocate pages inside the MMU. */
++ gcmkONERROR(
++ gckVGMMU_AllocatePages(Kernel->vg->mmu,
++ node->Virtual.pageCount,
++ &node->Virtual.pageTables[Kernel->core],
++ &node->Virtual.addresses[Kernel->core]));
++ }
++ else
++#endif
++ {
++ /* Allocate pages inside the MMU. */
++ gcmkONERROR(
++ gckMMU_AllocatePagesEx(Kernel->mmu,
++ node->Virtual.pageCount,
++ node->Virtual.type,
++ &node->Virtual.pageTables[Kernel->core],
++ &node->Virtual.addresses[Kernel->core]));
++ }
++
++ node->Virtual.lockKernels[Kernel->core] = Kernel;
++
++ /* Map the pages. */
++ gcmkONERROR(
++ gckOS_MapPagesEx(os,
++ Kernel->core,
++ node->Virtual.physical,
++ node->Virtual.pageCount,
++ node->Virtual.addresses[Kernel->core],
++ node->Virtual.pageTables[Kernel->core]));
++
++#if gcdENABLE_VG
++ if (Kernel->core == gcvCORE_VG)
++ {
++ gcmkONERROR(gckVGMMU_Flush(Kernel->vg->mmu));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckMMU_Flush(Kernel->mmu, node->Virtual.type));
++ }
++#endif
++ }
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Mapped virtual node 0x%x to 0x%08X",
++ node,
++ node->Virtual.addresses[Kernel->core]);
++ }
++
++ /* Return hardware address. */
++ *Address = node->Virtual.addresses[Kernel->core];
++#endif
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mutex));
++
++ *PhysicalAddress = (gctUINT64)physicalAddress;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (locked)
++ {
++ if (node->Virtual.pageTables[Kernel->core] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ /* Free the pages from the MMU. */
++ gcmkVERIFY_OK(
++ gckVGMMU_FreePages(Kernel->vg->mmu,
++ node->Virtual.pageTables[Kernel->core],
++ node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ /* Free the pages from the MMU. */
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(Kernel->mmu,
++ node->Virtual.pageTables[Kernel->core],
++ node->Virtual.pageCount));
++ }
++ node->Virtual.pageTables[Kernel->core] = gcvNULL;
++ node->Virtual.lockKernels[Kernel->core] = gcvNULL;
++ }
++
++ /* Unlock the pages. */
++ gcmkVERIFY_OK(
++ gckOS_UnlockPages(os,
++ node->Virtual.physical,
++ node->Virtual.bytes,
++ node->Virtual.logical
++ ));
++
++ node->Virtual.lockeds[Kernel->core]--;
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Unlock
++**
++** Unlock a video memory node.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a locked gcuVIDMEM_NODE union.
++**
++** gceSURF_TYPE Type
++** Type of surface to unlock.
++**
++** gctBOOL * Asynchroneous
++** Pointer to a variable specifying whether the surface should be
++** unlocked asynchroneously or not.
++**
++** OUTPUT:
++**
++** gctBOOL * Asynchroneous
++** Pointer to a variable receiving the number of bytes used in the
++** command buffer specified by 'Commands'. If gcvNULL, there is no
++** command buffer.
++*/
++gceSTATUS
++gckVIDMEM_Unlock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ IN gceSURF_TYPE Type,
++ IN OUT gctBOOL * Asynchroneous
++ )
++{
++ gceSTATUS status;
++ gckOS os = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++ gcuVIDMEM_NODE_PTR node = Node->node;
++
++ gcmkHEADER_ARG("Node=0x%x Type=%d *Asynchroneous=%d",
++ Node, Type, gcmOPT_VALUE(Asynchroneous));
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Get the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Verify the arguments. */
++ if ((node == gcvNULL)
++ || (node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Node->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /**************************** Video Memory ********************************/
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ if (node->VidMem.locked <= 0)
++ {
++ /* The surface was not locked. */
++ status = gcvSTATUS_MEMORY_UNLOCKED;
++ goto OnError;
++ }
++
++ if (Asynchroneous != gcvNULL)
++ {
++ /* Schedule an event to sync with GPU. */
++ *Asynchroneous = gcvTRUE;
++ }
++ else
++ {
++ /* Decrement the lock count. */
++ node->VidMem.locked --;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Unlocked node 0x%x (%d)",
++ node,
++ node->VidMem.locked);
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ else
++ {
++
++
++ if (Asynchroneous == gcvNULL)
++ {
++#if !gcdPROCESS_ADDRESS_SPACE
++ if (node->Virtual.lockeds[Kernel->core] == 0)
++ {
++ status = gcvSTATUS_MEMORY_UNLOCKED;
++ goto OnError;
++ }
++
++ /* Decrement lock count. */
++ -- node->Virtual.lockeds[Kernel->core];
++
++ /* See if we can unlock the resources. */
++ if (node->Virtual.lockeds[Kernel->core] == 0)
++ {
++#if gcdSECURITY
++ if (node->Virtual.addresses[Kernel->core] > 0x80000000)
++ {
++ gcmkONERROR(gckKERNEL_SecurityUnmapMemory(
++ Kernel,
++ node->Virtual.addresses[Kernel->core],
++ node->Virtual.pageCount
++ ));
++ }
++#else
++ /* Free the page table. */
++ if (node->Virtual.pageTables[Kernel->core] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ gcmkONERROR(
++ gckVGMMU_FreePages(Kernel->vg->mmu,
++ node->Virtual.pageTables[Kernel->core],
++ node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(
++ gckMMU_FreePages(Kernel->mmu,
++ node->Virtual.pageTables[Kernel->core],
++ node->Virtual.pageCount));
++ }
++
++ gcmkONERROR(gckOS_UnmapPages(
++ Kernel->os,
++ node->Virtual.pageCount,
++ node->Virtual.addresses[Kernel->core]
++ ));
++
++ /* Mark page table as freed. */
++ node->Virtual.pageTables[Kernel->core] = gcvNULL;
++ node->Virtual.lockKernels[Kernel->core] = gcvNULL;
++ }
++#endif
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Unmapped virtual node 0x%x from 0x%08X",
++ node, node->Virtual.addresses[Kernel->core]);
++#endif
++
++ }
++
++ else
++ {
++ gcmkONERROR(
++ gckOS_UnlockPages(os,
++ node->Virtual.physical,
++ node->Virtual.bytes,
++ node->Virtual.logical));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Scheduled unlock for virtual node 0x%x",
++ node);
++
++ /* Schedule the surface to be unlocked. */
++ *Asynchroneous = gcvTRUE;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Asynchroneous=%d", gcmOPT_VALUE(Asynchroneous));
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++gceSTATUS
++gckVIDMEM_Node_Lock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ OUT gctUINT32 *Address
++ )
++{
++ gceSTATUS status;
++ gckOS os;
++ gcuVIDMEM_NODE_PTR node = Node->node;
++ gcsGPU_MAP_PTR gpuMap;
++ gctPHYS_ADDR physical = gcvNULL;
++ gctUINT32 phys = gcvINVALID_ADDRESS;
++ gctUINT32 processID;
++ gcsLOCK_INFO_PTR lockInfo;
++ gctUINT32 pageCount;
++ gckMMU mmu;
++ gctUINT32 i;
++ gctUINT32_PTR pageTableEntry;
++ gctUINT32 offset = 0;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Node = %x", Node);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(gckKERNEL_GetProcessMMU(Kernel, &mmu));
++
++ gcmkONERROR(gckOS_AcquireMutex(os, Node->mapMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Get map information for current process. */
++ gpuMap = _FindGPUMap(Node->mapHead, processID);
++
++ if (gpuMap == gcvNULL)
++ {
++ gpuMap = _CreateGPUMap(os, &Node->mapHead, &Node->mapTail, processID);
++
++ if (gpuMap == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++ }
++
++ lockInfo = &gpuMap->lockInfo;
++
++ if (lockInfo->lockeds[Kernel->core] ++ == 0)
++ {
++ /* Get necessary information. */
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ phys = node->VidMem.memory->baseAddress
++ + node->VidMem.offset
++ + node->VidMem.alignment;
++
++ /* GPU page table use 4K page. */
++ pageCount = ((phys + node->VidMem.bytes + 4096 - 1) >> 12)
++ - (phys >> 12);
++
++ offset = phys & 0xFFF;
++ }
++ else
++ {
++ pageCount = node->Virtual.pageCount;
++ physical = node->Virtual.physical;
++ }
++
++ /* Allocate pages inside the MMU. */
++ gcmkONERROR(gckMMU_AllocatePages(
++ mmu,
++ pageCount,
++ &lockInfo->pageTables[Kernel->core],
++ &lockInfo->GPUAddresses[Kernel->core]));
++
++ /* Record MMU from which pages are allocated. */
++ lockInfo->lockMmus[Kernel->core] = mmu;
++
++ pageTableEntry = lockInfo->pageTables[Kernel->core];
++
++ /* Fill page table entries. */
++ if (phys != gcvINVALID_ADDRESS)
++ {
++ gctUINT32 address = lockInfo->GPUAddresses[Kernel->core];
++ for (i = 0; i < pageCount; i++)
++ {
++ gckMMU_GetPageEntry(mmu, address, &pageTableEntry);
++ gckMMU_SetPage(mmu, phys & 0xFFFFF000, pageTableEntry);
++ phys += 4096;
++ address += 4096;
++ pageTableEntry += 1;
++ }
++ }
++ else
++ {
++ gctUINT32 address = lockInfo->GPUAddresses[Kernel->core];
++ gcmkASSERT(physical != gcvNULL);
++ gcmkONERROR(gckOS_MapPagesEx(os,
++ Kernel->core,
++ physical,
++ pageCount,
++ address,
++ pageTableEntry));
++ }
++
++ gcmkONERROR(gckMMU_Flush(mmu));
++ }
++
++ *Address = lockInfo->GPUAddresses[Kernel->core] + offset;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mapMutex));
++ acquired = gcvFALSE;
++
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->mapMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVIDMEM_NODE_Unlock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsGPU_MAP_PTR gpuMap;
++ gcsLOCK_INFO_PTR lockInfo;
++ gckMMU mmu;
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 pageCount;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%08X, Node = %x, ProcessID=%d",
++ Kernel, Node, ProcessID);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Node->mapMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Get map information for current process. */
++ gpuMap = _FindGPUMap(Node->mapHead, ProcessID);
++
++ if (gpuMap == gcvNULL)
++ {
++ /* No mapping for this process. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ lockInfo = &gpuMap->lockInfo;
++
++ if (--lockInfo->lockeds[Kernel->core] == 0)
++ {
++ node = Node->node;
++
++ /* Get necessary information. */
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ gctUINT32 phys = node->VidMem.memory->baseAddress
++ + node->VidMem.offset
++ + node->VidMem.alignment;
++
++ /* GPU page table use 4K page. */
++ pageCount = ((phys + node->VidMem.bytes + 4096 - 1) >> 12)
++ - (phys >> 12);
++ }
++ else
++ {
++ pageCount = node->Virtual.pageCount;
++ }
++
++ /* Get MMU which allocates pages. */
++ mmu = lockInfo->lockMmus[Kernel->core];
++
++ /* Free virtual spaces in page table. */
++ gcmkVERIFY_OK(gckMMU_FreePagesEx(
++ mmu,
++ lockInfo->GPUAddresses[Kernel->core],
++ pageCount
++ ));
++
++ _DestroyGPUMap(Kernel->os, &Node->mapHead, &Node->mapTail, gpuMap);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Node->mapMutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Node->mapMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckVIDMEM_HANDLE_Allocate
++**
++** Allocate a handle for a gckVIDMEM_NODE object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gckVIDMEM_NODE Node
++** Pointer to a gckVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** gctUINT32 * Handle
++** Pointer to a variable receiving a handle represent this
++** gckVIDMEM_NODE in userspace.
++*/
++static gceSTATUS
++gckVIDMEM_HANDLE_Allocate(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ OUT gctUINT32 * Handle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 processID = 0;
++ gctPOINTER pointer = gcvNULL;
++ gctPOINTER handleDatabase = gcvNULL;
++ gctPOINTER mutex = gcvNULL;
++ gctUINT32 handle = 0;
++ gckVIDMEM_HANDLE handleObject = gcvNULL;
++ gckOS os = Kernel->os;
++
++ gcmkHEADER_ARG("Kernel=0x%X, Node=0x%X", Kernel, Node);
++
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Allocate a gckVIDMEM_HANDLE object. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsVIDMEM_HANDLE), &pointer));
++
++ gcmkVERIFY_OK(gckOS_ZeroMemory(pointer, gcmSIZEOF(gcsVIDMEM_HANDLE)));
++
++ handleObject = pointer;
++
++ gcmkONERROR(gckOS_AtomConstruct(os, &handleObject->reference));
++
++ /* Set default reference count to 1. */
++ gckOS_AtomSet(os, handleObject->reference, 1);
++
++ gcmkVERIFY_OK(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(
++ gckKERNEL_FindHandleDatbase(Kernel,
++ processID,
++ &handleDatabase,
++ &mutex));
++
++ /* Allocate a handle for this object. */
++ gcmkONERROR(
++ gckKERNEL_AllocateIntegerId(handleDatabase, handleObject, &handle));
++
++ handleObject->node = Node;
++ handleObject->handle = handle;
++
++ *Handle = handle;
++
++ gcmkFOOTER_ARG("*Handle=%d", *Handle);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (handleObject != gcvNULL)
++ {
++ if (handleObject->reference != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, handleObject->reference));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, handleObject));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckVIDMEM_NODE_Reference(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node
++ )
++{
++ gctINT32 oldValue;
++ gcmkHEADER_ARG("Kernel=0x%X Node=0x%X", Kernel, Node);
++
++ gckOS_AtomIncrement(Kernel->os, Node->reference, &oldValue);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVIDMEM_HANDLE_Reference(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_HANDLE handleObject = gcvNULL;
++ gctPOINTER database = gcvNULL;
++ gctPOINTER mutex = gcvNULL;
++ gctINT32 oldValue = 0;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Handle=%d PrcoessID=%d", Handle, ProcessID);
++
++ gcmkONERROR(
++ gckKERNEL_FindHandleDatbase(Kernel, ProcessID, &database, &mutex));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Translate handle to gckVIDMEM_HANDLE object. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(database, Handle, (gctPOINTER *)&handleObject));
++
++ /* Increase the reference count. */
++ gckOS_AtomIncrement(Kernel->os, handleObject->reference, &oldValue);
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVIDMEM_HANDLE_Dereference(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle
++ )
++{
++ gceSTATUS status;
++ gctPOINTER handleDatabase = gcvNULL;
++ gctPOINTER mutex = gcvNULL;
++ gctINT32 oldValue = 0;
++ gckVIDMEM_HANDLE handleObject = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Handle=%d PrcoessID=%d", Handle, ProcessID);
++
++ gcmkONERROR(
++ gckKERNEL_FindHandleDatbase(Kernel,
++ ProcessID,
++ &handleDatabase,
++ &mutex));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Translate handle to gckVIDMEM_HANDLE. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(handleDatabase, Handle, (gctPOINTER *)&handleObject));
++
++ gckOS_AtomDecrement(Kernel->os, handleObject->reference, &oldValue);
++
++ if (oldValue == 1)
++ {
++ /* Remove handle from database if this is the last reference. */
++ gcmkVERIFY_OK(gckKERNEL_FreeIntegerId(handleDatabase, Handle));
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ acquired = gcvFALSE;
++
++ if (oldValue == 1)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, handleObject->reference));
++ gcmkOS_SAFE_FREE(Kernel->os, handleObject);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVIDMEM_HANDLE_LookupAndReference(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Handle,
++ OUT gckVIDMEM_NODE * Node
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_HANDLE handleObject = gcvNULL;
++ gckVIDMEM_NODE node = gcvNULL;
++ gctPOINTER database = gcvNULL;
++ gctPOINTER mutex = gcvNULL;
++ gctUINT32 processID = 0;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X Handle=%d", Kernel, Handle);
++
++ gckOS_GetProcessID(&processID);
++
++ gcmkONERROR(
++ gckKERNEL_FindHandleDatbase(Kernel, processID, &database, &mutex));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Translate handle to gckVIDMEM_HANDLE object. */
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(database, Handle, (gctPOINTER *)&handleObject));
++
++ /* Get gckVIDMEM_NODE object. */
++ node = handleObject->node;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ acquired = gcvFALSE;
++
++ /* Reference this gckVIDMEM_NODE object. */
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Reference(Kernel, node));
++
++ /* Return result. */
++ *Node = node;
++
++ gcmkFOOTER_ARG("*Node=%d", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVIDMEM_HANDLE_Lookup(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle,
++ OUT gckVIDMEM_NODE * Node
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_HANDLE handleObject = gcvNULL;
++ gckVIDMEM_NODE node = gcvNULL;
++ gctPOINTER database = gcvNULL;
++ gctPOINTER mutex = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X ProcessID=%d Handle=%d",
++ Kernel, ProcessID, Handle);
++
++ gcmkONERROR(
++ gckKERNEL_FindHandleDatbase(Kernel, ProcessID, &database, &mutex));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(
++ gckKERNEL_QueryIntegerId(database, Handle, (gctPOINTER *)&handleObject));
++
++ node = handleObject->node;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ acquired = gcvFALSE;
++
++ *Node = node;
++
++ gcmkFOOTER_ARG("*Node=%d", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_NODE_Allocate
++**
++** Allocate a gckVIDMEM_NODE object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** OUTPUT:
++**
++** gctUINT32 * Handle
++** Pointer to a variable receiving a handle represent this
++** gckVIDMEM_NODE in userspace.
++*/
++gceSTATUS
++gckVIDMEM_NODE_Allocate(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR VideoNode,
++ IN gceSURF_TYPE Type,
++ IN gcePOOL Pool,
++ IN gctUINT32 * Handle
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE node = gcvNULL;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 handle = 0;
++ gckOS os = Kernel->os;
++
++ gcmkHEADER_ARG("Kernel=0x%X VideoNode=0x%X", Kernel, VideoNode);
++
++ /* Construct a node. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsVIDMEM_NODE), &pointer));
++
++ gcmkVERIFY_OK(gckOS_ZeroMemory(pointer, gcmSIZEOF(gcsVIDMEM_NODE)));
++
++ node = pointer;
++
++ node->node = VideoNode;
++ node->type = Type;
++ node->pool = Pool;
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkONERROR(gckOS_CreateMutex(os, &node->mapMutex));
++#endif
++
++ gcmkONERROR(gckOS_AtomConstruct(os, &node->reference));
++
++ gcmkONERROR(gckOS_CreateMutex(os, &node->mutex));
++
++ /* Reference is 1 by default . */
++ gckVIDMEM_NODE_Reference(Kernel, node);
++
++ /* Create a handle to represent this node. */
++ gcmkONERROR(gckVIDMEM_HANDLE_Allocate(Kernel, node, &handle));
++
++ *Handle = handle;
++
++ gcmkFOOTER_ARG("*Handle=%d", *Handle);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (node != gcvNULL)
++ {
++#if gcdPROCESS_ADDRESS_SPACE
++ if (node->mapMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->mapMutex));
++ }
++#endif
++
++ if (node->mutex)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->mutex));
++ }
++
++ if (node->reference != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, node->reference));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, node));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVIDMEM_NODE_Dereference(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node
++ )
++{
++ gctINT32 oldValue = 0;
++ gctPOINTER database = Kernel->db->nameDatabase;
++ gctPOINTER mutex = Kernel->db->nameDatabaseMutex;
++
++ gcmkHEADER_ARG("Kernel=0x%X Node=0x%X", Kernel, Node);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++
++ gcmkVERIFY_OK(gckOS_AtomDecrement(Kernel->os, Node->reference, &oldValue));
++
++ if (oldValue == 1 && Node->name)
++ {
++ /* Free name if exists. */
++ gcmkVERIFY_OK(gckKERNEL_FreeIntegerId(database, Node->name));
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++
++ if (oldValue == 1)
++ {
++ /* Free gcuVIDMEM_NODE. */
++ gcmkVERIFY_OK(gckVIDMEM_Free(Kernel, Node->node));
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Node->reference));
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Node->mapMutex));
++#endif
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Node->mutex));
++ gcmkOS_SAFE_FREE(Kernel->os, Node);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_NODE_Name
++**
++** Naming a gckVIDMEM_NODE object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctUINT32 Handle
++** Handle to a gckVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** gctUINT32 * Name
++** Pointer to a variable receiving a name which can be pass to another
++** process.
++*/
++gceSTATUS
++gckVIDMEM_NODE_Name(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Handle,
++ IN gctUINT32 * Name
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE node = gcvNULL;
++ gctUINT32 name = 0;
++ gctUINT32 processID = 0;
++ gctPOINTER database = Kernel->db->nameDatabase;
++ gctPOINTER mutex = Kernel->db->nameDatabaseMutex;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL referenced = gcvFALSE;
++ gcmkHEADER_ARG("Kernel=0x%X Handle=%d", Kernel, Handle);
++
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(gckVIDMEM_HANDLE_LookupAndReference(Kernel, Handle, &node));
++ referenced = gcvTRUE;
++
++ if (node->name == 0)
++ {
++ /* Name this node. */
++ gcmkONERROR(gckKERNEL_AllocateIntegerId(database, node, &name));
++ node->name = name;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ acquired = gcvFALSE;
++
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, node));
++
++ if(node)
++ {
++ *Name = node->name;
++ }
++
++ gcmkFOOTER_ARG("*Name=%d", *Name);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (referenced)
++ {
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, node));
++ }
++
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_NODE_Import
++**
++** Import a gckVIDMEM_NODE object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctUINT32 Name
++** Name of a gckVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** gctUINT32 * Handle
++** Pointer to a variable receiving a handle represent this
++** gckVIDMEM_NODE in userspace.
++*/
++gceSTATUS
++gckVIDMEM_NODE_Import(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name,
++ IN gctUINT32 * Handle
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE node = gcvNULL;
++ gctPOINTER database = Kernel->db->nameDatabase;
++ gctPOINTER mutex = Kernel->db->nameDatabaseMutex;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL referenced = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%X Name=%d", Kernel, Name);
++
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Lookup in database to get the node. */
++ gcmkONERROR(gckKERNEL_QueryIntegerId(database, Name, (gctPOINTER *)&node));
++
++ /* Reference the node. */
++ gcmkONERROR(gckVIDMEM_NODE_Reference(Kernel, node));
++ referenced = gcvTRUE;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ acquired = gcvFALSE;
++
++ /* Allocate a handle for current process. */
++ gcmkONERROR(gckVIDMEM_HANDLE_Allocate(Kernel, node, Handle));
++
++ gcmkFOOTER_ARG("*Handle=%d", *Handle);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (referenced)
++ {
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, node));
++ }
++
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++typedef struct _gcsVIDMEM_NODE_FDPRIVATE
++{
++ gcsFDPRIVATE base;
++ gckKERNEL kernel;
++ gckVIDMEM_NODE node;
++}
++gcsVIDMEM_NODE_FDPRIVATE;
++
++
++static gctINT
++_ReleaseFdPrivate(
++ gcsFDPRIVATE_PTR FdPrivate
++ )
++{
++ /* Cast private info. */
++ gcsVIDMEM_NODE_FDPRIVATE * private = (gcsVIDMEM_NODE_FDPRIVATE *) FdPrivate;
++
++ gckVIDMEM_NODE_Dereference(private->kernel, private->node);
++ gckOS_Free(private->kernel->os, private);
++
++ return 0;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_NODE_GetFd
++**
++** Attach a gckVIDMEM_NODE object to a native fd.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctUINT32 Handle
++** Handle to a gckVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** gctUINT32 * Fd
++** Pointer to a variable receiving a native fd from os.
++*/
++gceSTATUS
++gckVIDMEM_NODE_GetFd(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Handle,
++ OUT gctINT * Fd
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM_NODE node = gcvNULL;
++ gctBOOL referenced = gcvFALSE;
++ gcsVIDMEM_NODE_FDPRIVATE * fdPrivate = gcvNULL;
++ gcmkHEADER_ARG("Kernel=0x%X Handle=%d", Kernel, Handle);
++
++ /* Query and reference handle. */
++ gcmkONERROR(gckVIDMEM_HANDLE_LookupAndReference(Kernel, Handle, &node));
++ referenced = gcvTRUE;
++
++ /* Allocate memory for private info. */
++ gcmkONERROR(gckOS_Allocate(
++ Kernel->os,
++ gcmSIZEOF(gcsVIDMEM_NODE_FDPRIVATE),
++ (gctPOINTER *)&fdPrivate
++ ));
++
++ fdPrivate->base.release = _ReleaseFdPrivate;
++ fdPrivate->kernel = Kernel;
++ fdPrivate->node = node;
++
++ /* Allocated fd owns a reference. */
++ gcmkONERROR(gckOS_GetFd("vidmem", &fdPrivate->base, Fd));
++
++ gcmkFOOTER_ARG("*Fd=%d", *Fd);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (referenced)
++ {
++ gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, node));
++ }
++
++ if (fdPrivate)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, fdPrivate));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_base.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_base.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_base.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_base.h 2015-05-01 14:57:59.583427001 -0500
+@@ -0,0 +1,5520 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#ifndef __gc_hal_base_h_
++#define __gc_hal_base_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++#include "gc_hal_dump.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gckOS * gckOS;
++typedef struct _gcoHAL * gcoHAL;
++typedef struct _gcoOS * gcoOS;
++typedef struct _gco2D * gco2D;
++typedef struct gcsATOM * gcsATOM_PTR;
++
++#if gcdENABLE_3D
++typedef struct _gco3D * gco3D;
++typedef struct _gcoCL * gcoCL;
++typedef struct _gcsFAST_FLUSH * gcsFAST_FLUSH_PTR;
++#endif
++
++typedef struct _gcoSURF * gcoSURF;
++typedef struct _gcsSURF_INFO * gcsSURF_INFO_PTR;
++typedef struct _gcsSURF_NODE * gcsSURF_NODE_PTR;
++typedef struct _gcsSURF_FORMAT_INFO * gcsSURF_FORMAT_INFO_PTR;
++typedef struct _gcsPOINT * gcsPOINT_PTR;
++typedef struct _gcsSIZE * gcsSIZE_PTR;
++typedef struct _gcsRECT * gcsRECT_PTR;
++typedef struct _gcsBOUNDARY * gcsBOUNDARY_PTR;
++typedef struct _gcoDUMP * gcoDUMP;
++typedef struct _gcoHARDWARE * gcoHARDWARE;
++typedef union _gcuVIDMEM_NODE * gcuVIDMEM_NODE_PTR;
++typedef struct _gcsVIDMEM_NODE * gckVIDMEM_NODE;
++
++#if gcdENABLE_VG
++typedef struct _gcoVG * gcoVG;
++typedef struct _gcsCOMPLETION_SIGNAL * gcsCOMPLETION_SIGNAL_PTR;
++typedef struct _gcsCONTEXT_MAP * gcsCONTEXT_MAP_PTR;
++#else
++typedef void * gcoVG;
++#endif
++
++#if gcdSYNC
++typedef struct _gcoFENCE * gcoFENCE;
++typedef struct _gcsSYNC_CONTEXT * gcsSYNC_CONTEXT_PTR;
++#endif
++
++#if defined(ANDROID)
++typedef struct _gcoOS_SymbolsList gcoOS_SymbolsList;
++#endif
++
++/******************************************************************************\
++******************************* Process local storage *************************
++\******************************************************************************/
++
++typedef struct _gcsPLS * gcsPLS_PTR;
++
++#if gcdENABLE_3D
++/******************************************************************************
++**
++** Patch defines which should be moved to dedicate file later
++**
++** !!! ALWAYS ADD new ID in the TAIL, otherwise will break exising TRACE FILE
++*******************************************************************************/
++typedef enum _gcePATCH_ID
++{
++ gcvPATCH_NOTINIT = -1,
++ gcvPATCH_INVALID = 0,
++
++#if gcdDEBUG_OPTION
++ gcvPATCH_DEBUG,
++#endif
++
++ gcvPATCH_GTFES30,
++ gcvPATCH_CTGL11,
++ gcvPATCH_CTGL20,
++ gcvPATCH_GLBM11,
++ gcvPATCH_GLBM21,
++ gcvPATCH_GLBM25,
++ gcvPATCH_GLBM27,
++ gcvPATCH_GLBMGUI,
++ gcvPATCH_GFXBENCH,
++ gcvPATCH_ANTUTU, /* Antutu 3.x */
++ gcvPATCH_ANTUTU4X, /* Antutu 4.x */
++ gcvPATCH_QUADRANT,
++ gcvPATCH_GPUBENCH,
++ gcvPATCH_DUOKAN,
++ gcvPATCH_GLOFTSXHM,
++ gcvPATCH_XRUNNER,
++ gcvPATCH_BUSPARKING3D,
++ gcvPATCH_SIEGECRAFT,
++ gcvPATCH_PREMIUM,
++ gcvPATCH_RACEILLEGAL,
++ gcvPATCH_MEGARUN,
++ gcvPATCH_BMGUI,
++ gcvPATCH_NENAMARK,
++ gcvPATCH_NENAMARK2,
++ gcvPATCH_FISHNOODLE,
++ gcvPATCH_MM06,
++ gcvPATCH_MM07,
++ gcvPATCH_BM21,
++ gcvPATCH_SMARTBENCH,
++ gcvPATCH_JPCT,
++ gcvPATCH_NEOCORE,
++ gcvPATCH_RTESTVA,
++ gcvPATCH_NBA2013,
++ gcvPATCH_BARDTALE,
++ gcvPATCH_F18,
++ gcvPATCH_CARPARK,
++ gcvPATCH_CARCHALLENGE,
++ gcvPATCH_HEROESCALL,
++ gcvPATCH_GLOFTF3HM,
++ gcvPATCH_CRAZYRACING,
++ gcvPATCH_FIREFOX,
++ gcvPATCH_CHROME,
++ gcvPATCH_MONOPOLY,
++ gcvPATCH_SNOWCOLD,
++ gcvPATCH_BM3,
++ gcvPATCH_BASEMARKX,
++ gcvPATCH_DEQP,
++ gcvPATCH_SF4,
++ gcePATCH_MGOHEAVEN2,
++ gcePATCH_SILIBILI,
++ gcePATCH_ELEMENTSDEF,
++ gcePATCH_GLOFTKRHM,
++ gcvPATCH_OCLCTS,
++ gcvPATCH_A8HP,
++ gcvPATCH_A8CN,
++ gcvPATCH_WISTONESG,
++ gcvPATCH_SPEEDRACE,
++ gcvPATCH_FSBHAWAIIF,
++ gcvPATCH_AIRNAVY,
++ gcvPATCH_F18NEW,
++ gcvPATCH_CKZOMBIES2,
++ gcvPATCH_EADGKEEPER,
++ gcvPATCH_BASEMARK2V2,
++ gcvPATCH_RIPTIDEGP2,
++ gcvPATCH_OESCTS,
++ gcvPATCH_GANGSTAR,
++ gcvPATCH_WHRKYZIXOVAN,
++ gcvPATCH_NAMESGAS,
++ gcvPATCH_AFTERBURNER,
++ gcvPATCH_UIMARK,
++ gcvPATCH_FM_OES_PLAYER,
++ gcvPATCH_SUMSUNG_BENCH,
++ gcvPATCH_ROCKSTAR_MAXPAYNE,
++ gcvPATCH_TITANPACKING,
++ gcvPATCH_BASEMARKOSIICN,
++ gcvPATCH_FRUITNINJA,
++#if defined(ANDROID)
++ gcePATCH_ANDROID_CTS_MEDIA_PRESENTATIONTIME,
++#endif
++ gcvPATCH_ANDROID_COMPOSITOR,
++ gcvPATCH_CTS_TEXTUREVIEW,
++ gcvPATCH_WATER2_CHUKONG,
++
++ gcvPATCH_COUNT
++} gcePATCH_ID;
++#endif /* gcdENABLE_3D */
++
++typedef void (* gctPLS_DESTRUCTOR) (
++ gcsPLS_PTR
++ );
++
++typedef struct _gcsPLS
++{
++ /* Global objects. */
++ gcoOS os;
++ gcoHAL hal;
++
++ /* Internal memory pool. */
++ gctSIZE_T internalSize;
++ gctPHYS_ADDR internalPhysical;
++ gctPOINTER internalLogical;
++
++ /* External memory pool. */
++ gctSIZE_T externalSize;
++ gctPHYS_ADDR externalPhysical;
++ gctPOINTER externalLogical;
++
++ /* Contiguous memory pool. */
++ gctSIZE_T contiguousSize;
++ gctPHYS_ADDR contiguousPhysical;
++ gctPOINTER contiguousLogical;
++
++ /* EGL-specific process-wide objects. */
++ gctPOINTER eglDisplayInfo;
++ gctPOINTER eglSurfaceInfo;
++ gceSURF_FORMAT eglConfigFormat;
++
++ /* PLS reference count */
++ gcsATOM_PTR reference;
++
++ /* PorcessID of the constrcutor process */
++ gctUINT32 processID;
++
++ /* ThreadID of the constrcutor process. */
++ gctSIZE_T threadID;
++ /* Flag for calling module destructor. */
++ gctBOOL exiting;
++
++ gctBOOL bNeedSupportNP2Texture;
++
++ gctPLS_DESTRUCTOR destructor;
++ /* Mutex to guard PLS access. currently it's for EGL.
++ ** We can use this mutex for every PLS access.
++ */
++ gctPOINTER accessLock;
++#if gcdENABLE_3D
++ /* Global patchID to overwrite the detection */
++ gcePATCH_ID patchID;
++#endif
++}
++gcsPLS;
++
++extern gcsPLS gcPLS;
++
++#if gcdENABLE_3D
++#define gcPLS_INITIALIZER \
++{ \
++ gcvNULL, /* gcoOS object. */ \
++ gcvNULL, /* gcoHAL object. */ \
++ 0, /* internalSize */ \
++ gcvNULL, /* internalPhysical */ \
++ gcvNULL, /* internalLogical */ \
++ 0, /* externalSize */ \
++ gcvNULL, /* externalPhysical */ \
++ gcvNULL, /* externalLogical */ \
++ 0, /* contiguousSize */ \
++ gcvNULL, /* contiguousPhysical */ \
++ gcvNULL, /* contiguousLogical */ \
++ gcvNULL, /* eglDisplayInfo */ \
++ gcvNULL, /* eglSurfaceInfo */ \
++ gcvSURF_A8R8G8B8,/* eglConfigFormat */ \
++ gcvNULL, /* reference */ \
++ 0, /* processID */ \
++ 0, /* threadID */ \
++ gcvFALSE, /* exiting */ \
++ gcvFALSE, /* Special flag for NP2 texture. */ \
++ gcvNULL, /* destructor */ \
++ gcvNULL, /* accessLock */ \
++ gcvPATCH_NOTINIT,/* global patchID */ \
++}
++#else
++#define gcPLS_INITIALIZER \
++{ \
++ gcvNULL, /* gcoOS object. */ \
++ gcvNULL, /* gcoHAL object. */ \
++ 0, /* internalSize */ \
++ gcvNULL, /* internalPhysical */ \
++ gcvNULL, /* internalLogical */ \
++ 0, /* externalSize */ \
++ gcvNULL, /* externalPhysical */ \
++ gcvNULL, /* externalLogical */ \
++ 0, /* contiguousSize */ \
++ gcvNULL, /* contiguousPhysical */ \
++ gcvNULL, /* contiguousLogical */ \
++ gcvNULL, /* eglDisplayInfo */ \
++ gcvNULL, /* eglSurfaceInfo */ \
++ gcvSURF_A8R8G8B8,/* eglConfigFormat */ \
++ gcvNULL, /* reference */ \
++ 0, /* processID */ \
++ 0, /* threadID */ \
++ gcvFALSE, /* exiting */ \
++ gcvFALSE, /* Special flag for NP2 texture. */ \
++ gcvNULL, /* destructor */ \
++ gcvNULL, /* accessLock */ \
++}
++#endif
++
++/******************************************************************************\
++******************************* Thread local storage *************************
++\******************************************************************************/
++
++typedef struct _gcsTLS * gcsTLS_PTR;
++
++typedef void (* gctTLS_DESTRUCTOR) (
++ gcsTLS_PTR
++ );
++
++typedef struct _gcsTLS
++{
++ gceHARDWARE_TYPE currentType;
++
++ /* Current 3D hardwre of this thread */
++ gcoHARDWARE currentHardware;
++
++ /* Default 3D hardware of this thread */
++ gcoHARDWARE defaultHardware;
++
++ /* Only for separated 3D and 2D */
++ gcoHARDWARE hardware2D;
++#if gcdENABLE_VG
++ gcoVGHARDWARE vg;
++ gcoVG engineVG;
++#endif /* gcdENABLE_VG */
++#if gcdENABLE_3D
++ gco3D engine3D;
++#endif
++#if gcdENABLE_2D
++ gco2D engine2D;
++#endif
++
++ /*thread data */
++ gctPOINTER context;
++ /* ES(including es1 and es2) client driver context which is current state */
++ gctPOINTER esClientCtx;
++ gctTLS_DESTRUCTOR destructor;
++
++ gctBOOL copied;
++
++ /* libGAL.so handle */
++ gctHANDLE handle;
++
++ /* If true, do not releas 2d engine and hardware in hal layer */
++ gctBOOL release2DUpper;
++}
++gcsTLS;
++
++/******************************************************************************\
++********************************* Enumerations *********************************
++\******************************************************************************/
++
++typedef enum _gcePLS_VALUE
++{
++ gcePLS_VALUE_EGL_DISPLAY_INFO,
++ gcePLS_VALUE_EGL_SURFACE_INFO,
++ gcePLS_VALUE_EGL_CONFIG_FORMAT_INFO,
++ gcePLS_VALUE_EGL_DESTRUCTOR_INFO,
++}
++gcePLS_VALUE;
++
++/* Video memory pool type. */
++typedef enum _gcePOOL
++{
++ gcvPOOL_UNKNOWN = 0,
++ gcvPOOL_DEFAULT,
++ gcvPOOL_LOCAL,
++ gcvPOOL_LOCAL_INTERNAL,
++ gcvPOOL_LOCAL_EXTERNAL,
++ gcvPOOL_UNIFIED,
++ gcvPOOL_SYSTEM,
++ gcvPOOL_VIRTUAL,
++ gcvPOOL_USER,
++ gcvPOOL_CONTIGUOUS,
++
++ gcvPOOL_NUMBER_OF_POOLS
++}
++gcePOOL;
++
++#if gcdENABLE_3D
++/* Blending functions. */
++typedef enum _gceBLEND_FUNCTION
++{
++ gcvBLEND_ZERO,
++ gcvBLEND_ONE,
++ gcvBLEND_SOURCE_COLOR,
++ gcvBLEND_INV_SOURCE_COLOR,
++ gcvBLEND_SOURCE_ALPHA,
++ gcvBLEND_INV_SOURCE_ALPHA,
++ gcvBLEND_TARGET_COLOR,
++ gcvBLEND_INV_TARGET_COLOR,
++ gcvBLEND_TARGET_ALPHA,
++ gcvBLEND_INV_TARGET_ALPHA,
++ gcvBLEND_SOURCE_ALPHA_SATURATE,
++ gcvBLEND_CONST_COLOR,
++ gcvBLEND_INV_CONST_COLOR,
++ gcvBLEND_CONST_ALPHA,
++ gcvBLEND_INV_CONST_ALPHA,
++}
++gceBLEND_FUNCTION;
++
++/* Blending modes. */
++typedef enum _gceBLEND_MODE
++{
++ gcvBLEND_ADD,
++ gcvBLEND_SUBTRACT,
++ gcvBLEND_REVERSE_SUBTRACT,
++ gcvBLEND_MIN,
++ gcvBLEND_MAX,
++}
++gceBLEND_MODE;
++
++/* Depth modes. */
++typedef enum _gceDEPTH_MODE
++{
++ gcvDEPTH_NONE,
++ gcvDEPTH_Z,
++ gcvDEPTH_W,
++}
++gceDEPTH_MODE;
++#endif /* gcdENABLE_3D */
++
++#if (gcdENABLE_3D || gcdENABLE_VG)
++/* API flags. */
++typedef enum _gceAPI
++{
++ gcvAPI_D3D = 1,
++ gcvAPI_OPENGL_ES11,
++ gcvAPI_OPENGL_ES20,
++ gcvAPI_OPENGL_ES30,
++ gcvAPI_OPENGL,
++ gcvAPI_OPENVG,
++ gcvAPI_OPENCL,
++}
++gceAPI;
++#endif
++
++
++typedef enum _gceWHERE
++{
++ gcvWHERE_COMMAND,
++ gcvWHERE_RASTER,
++ gcvWHERE_PIXEL,
++}
++gceWHERE;
++
++typedef enum _gceHOW
++{
++ gcvHOW_SEMAPHORE = 0x1,
++ gcvHOW_STALL = 0x2,
++ gcvHOW_SEMAPHORE_STALL = 0x3,
++}
++gceHOW;
++
++typedef enum _gceSignalHandlerType
++{
++ gcvHANDLE_SIGFPE_WHEN_SIGNAL_CODE_IS_0 = 0x1,
++}
++gceSignalHandlerType;
++
++/* gcsHAL_Limits*/
++typedef struct _gcsHAL_LIMITS
++{
++ /* chip info */
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 featureCount;
++ gctUINT32 *chipFeatures;
++
++ /* target caps */
++ gctUINT32 maxWidth;
++ gctUINT32 maxHeight;
++ gctUINT32 multiTargetCount;
++ gctUINT32 maxSamples;
++
++}gcsHAL_LIMITS;
++
++/******************************************************************************\
++*********** Generic Memory Allocation Optimization Using Containers ************
++\******************************************************************************/
++
++/* Generic container definition. */
++typedef struct _gcsCONTAINER_LINK * gcsCONTAINER_LINK_PTR;
++typedef struct _gcsCONTAINER_LINK
++{
++ /* Points to the next container. */
++ gcsCONTAINER_LINK_PTR next;
++}
++gcsCONTAINER_LINK;
++
++typedef struct _gcsCONTAINER_RECORD * gcsCONTAINER_RECORD_PTR;
++typedef struct _gcsCONTAINER_RECORD
++{
++ gcsCONTAINER_RECORD_PTR prev;
++ gcsCONTAINER_RECORD_PTR next;
++}
++gcsCONTAINER_RECORD;
++
++typedef struct _gcsCONTAINER * gcsCONTAINER_PTR;
++typedef struct _gcsCONTAINER
++{
++ gctUINT containerSize;
++ gctUINT recordSize;
++ gctUINT recordCount;
++ gcsCONTAINER_LINK_PTR containers;
++ gcsCONTAINER_RECORD freeList;
++ gcsCONTAINER_RECORD allocList;
++}
++gcsCONTAINER;
++
++gceSTATUS
++gcsCONTAINER_Construct(
++ IN gcsCONTAINER_PTR Container,
++ gctUINT RecordsPerContainer,
++ gctUINT RecordSize
++ );
++
++gceSTATUS
++gcsCONTAINER_Destroy(
++ IN gcsCONTAINER_PTR Container
++ );
++
++gceSTATUS
++gcsCONTAINER_AllocateRecord(
++ IN gcsCONTAINER_PTR Container,
++ OUT gctPOINTER * Record
++ );
++
++gceSTATUS
++gcsCONTAINER_FreeRecord(
++ IN gcsCONTAINER_PTR Container,
++ IN gctPOINTER Record
++ );
++
++gceSTATUS
++gcsCONTAINER_FreeAll(
++ IN gcsCONTAINER_PTR Container
++ );
++
++/******************************************************************************\
++********************************* gcoHAL Object *********************************
++\******************************************************************************/
++
++/* Construct a new gcoHAL object. */
++gceSTATUS
++gcoHAL_ConstructEx(
++ IN gctPOINTER Context,
++ IN gcoOS Os,
++ OUT gcoHAL * Hal
++ );
++
++/* Destroy an gcoHAL object. */
++gceSTATUS
++gcoHAL_DestroyEx(
++ IN gcoHAL Hal
++ );
++
++/* Empty function for compatibility. */
++gceSTATUS
++gcoHAL_Construct(
++ IN gctPOINTER Context,
++ IN gcoOS Os,
++ OUT gcoHAL * Hal
++ );
++
++/* Empty function for compatibility. */
++gceSTATUS
++gcoHAL_Destroy(
++ IN gcoHAL Hal
++ );
++
++/* Get HAL options */
++gceSTATUS
++gcoHAL_GetOption(
++ IN gcoHAL Hal,
++ IN gceOPTION Option
++ );
++
++gceSTATUS
++gcoHAL_FrameInfoOps(
++ IN gcoHAL Hal,
++ IN gceFRAMEINFO FrameInfo,
++ IN gceFRAMEINFO_OP Op,
++ IN OUT gctUINT * Val
++ );
++
++
++gceSTATUS
++gcoHAL_GetHardware(
++ IN gcoHAL Hal,
++ OUT gcoHARDWARE* Hw
++ );
++
++#if gcdENABLE_2D
++/* Get pointer to gco2D object. */
++gceSTATUS
++gcoHAL_Get2DEngine(
++ IN gcoHAL Hal,
++ OUT gco2D * Engine
++ );
++#endif
++
++#if gcdENABLE_3D
++gceSTATUS
++gcoHAL_GetSpecialHintData(
++ IN gcoHAL Hal,
++ OUT gctINT * Hint
++ );
++/*
++** Deprecated(Don't use it), keep it here for external library(libgcu.so)
++*/
++gceSTATUS
++gcoHAL_Get3DEngine(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++#endif /* gcdEANBLE_3D */
++
++
++gceSTATUS
++gcoHAL_GetProductName(
++ IN gcoHAL Hal,
++ OUT gctSTRING *ProductName
++ );
++
++gceSTATUS
++gcoHAL_SetFscaleValue(
++ IN gctUINT FscaleValue
++ );
++
++gceSTATUS
++gcoHAL_GetFscaleValue(
++ OUT gctUINT * FscaleValue,
++ OUT gctUINT * MinFscaleValue,
++ OUT gctUINT * MaxFscaleValue
++ );
++
++gceSTATUS
++gcoHAL_SetBltNP2Texture(
++ gctBOOL enable
++ );
++
++gceSTATUS
++gcoHAL_NameVideoMemory(
++ IN gctUINT32 Handle,
++ OUT gctUINT32 * Name
++ );
++
++gceSTATUS
++gcoHAL_ImportVideoMemory(
++ IN gctUINT32 Name,
++ OUT gctUINT32 * Handle
++ );
++
++gceSTATUS
++gcoHAL_GetVideoMemoryFd(
++ IN gctUINT32 Handle,
++ OUT gctINT * Fd
++ );
++
++/* Verify whether the specified feature is available in hardware. */
++gceSTATUS
++gcoHAL_IsFeatureAvailable(
++ IN gcoHAL Hal,
++ IN gceFEATURE Feature
++ );
++
++gceSTATUS
++gcoHAL_IsSwwaNeeded(
++ IN gcoHAL Hal,
++ IN gceSWWA Swwa
++ );
++
++gceSTATUS
++gcoHAL_IsFeatureAvailable1(
++ IN gcoHAL Hal,
++ IN gceFEATURE Feature
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gcoHAL_QueryChipIdentity(
++ IN gcoHAL Hal,
++ OUT gceCHIPMODEL* ChipModel,
++ OUT gctUINT32* ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures
++ );
++
++/* Query the minor features of the hardware. */
++gceSTATUS gcoHAL_QueryChipMinorFeatures(
++ IN gcoHAL Hal,
++ OUT gctUINT32* NumFeatures,
++ OUT gctUINT32* ChipMinorFeatures
++ );
++
++gctINT32
++gcoOS_EndRecordAllocation(void);
++void
++gcoOS_RecordAllocation(void);
++void
++gcoOS_AddRecordAllocation(gctSIZE_T Size);
++
++/* Query the amount of video memory. */
++gceSTATUS
++gcoHAL_QueryVideoMemory(
++ IN gcoHAL Hal,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Map video memory. */
++gceSTATUS
++gcoHAL_MapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap video memory. */
++gceSTATUS
++gcoHAL_UnmapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ IN gctPOINTER Logical
++ );
++
++/* Schedule an unmap of a buffer mapped through its physical address. */
++gceSTATUS
++gcoHAL_ScheduleUnmapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ IN gctPOINTER Logical
++ );
++
++/* Allocate video memory. */
++gceSTATUS
++gcoOS_AllocateVideoMemory(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN gctBOOL InCacheable,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctUINT32 * Physical,
++ OUT gctPOINTER * Logical,
++ OUT gctPOINTER * Handle
++ );
++
++/* Free video memory. */
++gceSTATUS
++gcoOS_FreeVideoMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Handle
++ );
++
++/* Lock video memory. */
++gceSTATUS
++gcoOS_LockVideoMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Handle,
++ IN gctBOOL InUserSpace,
++ IN gctBOOL InCacheable,
++ OUT gctUINT32 * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoHAL_MapUserMemory(
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR GPUAddress
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gcoHAL_UnmapUserMemory(
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 GPUAddress
++ );
++
++/* Schedule an unmap of a user buffer using event mechanism. */
++gceSTATUS
++gcoHAL_ScheduleUnmapUserMemory(
++ IN gcoHAL Hal,
++ IN gctPOINTER Info,
++ IN gctSIZE_T Size,
++ IN gctUINT32 Address,
++ IN gctPOINTER Memory
++ );
++
++/* Commit the current command buffer. */
++gceSTATUS
++gcoHAL_Commit(
++ IN gcoHAL Hal,
++ IN gctBOOL Stall
++ );
++
++#if gcdENABLE_3D
++/* Sencd fence command. */
++gceSTATUS
++gcoHAL_SendFence(
++ IN gcoHAL Hal
++ );
++#endif /* gcdENABLE_3D */
++
++/* Query the tile capabilities. */
++gceSTATUS
++gcoHAL_QueryTiled(
++ IN gcoHAL Hal,
++ OUT gctINT32 * TileWidth2D,
++ OUT gctINT32 * TileHeight2D,
++ OUT gctINT32 * TileWidth3D,
++ OUT gctINT32 * TileHeight3D
++ );
++
++gceSTATUS
++gcoHAL_Compact(
++ IN gcoHAL Hal
++ );
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gcoHAL_ProfileStart(
++ IN gcoHAL Hal
++ );
++
++gceSTATUS
++gcoHAL_ProfileEnd(
++ IN gcoHAL Hal,
++ IN gctCONST_STRING Title
++ );
++#endif
++
++/* Power Management */
++gceSTATUS
++gcoHAL_SetPowerManagementState(
++ IN gcoHAL Hal,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gcoHAL_QueryPowerManagementState(
++ IN gcoHAL Hal,
++ OUT gceCHIPPOWERSTATE *State
++ );
++
++/* Set the filter type for filter blit. */
++gceSTATUS
++gcoHAL_SetFilterType(
++ IN gcoHAL Hal,
++ IN gceFILTER_TYPE FilterType
++ );
++
++gceSTATUS
++gcoHAL_GetDump(
++ IN gcoHAL Hal,
++ OUT gcoDUMP * Dump
++ );
++
++#if gcdENABLE_3D
++gceSTATUS
++gcoHAL_SetPatchID(
++ IN gcoHAL Hal,
++ IN gcePATCH_ID PatchID
++ );
++
++/* Get Patch ID based on process name */
++gceSTATUS
++gcoHAL_GetPatchID(
++ IN gcoHAL Hal,
++ OUT gcePATCH_ID * PatchID
++ );
++
++gceSTATUS
++gcoHAL_SetGlobalPatchID(
++ IN gcoHAL Hal,
++ IN gcePATCH_ID PatchID
++ );
++#endif /* gcdENABLE_3D */
++/* Call the kernel HAL layer. */
++gceSTATUS
++gcoHAL_Call(
++ IN gcoHAL Hal,
++ IN OUT gcsHAL_INTERFACE_PTR Interface
++ );
++
++/* Schedule an event. */
++gceSTATUS
++gcoHAL_ScheduleEvent(
++ IN gcoHAL Hal,
++ IN OUT gcsHAL_INTERFACE_PTR Interface
++ );
++
++/* Destroy a surface. */
++gceSTATUS
++gcoHAL_DestroySurface(
++ IN gcoHAL Hal,
++ IN gcoSURF Surface
++ );
++
++/* Request a start/stop timestamp. */
++gceSTATUS
++gcoHAL_SetTimer(
++ IN gcoHAL Hal,
++ IN gctUINT32 Index,
++ IN gctBOOL Start
++ );
++
++/* Get Time delta from a Timer in microseconds. */
++gceSTATUS
++gcoHAL_GetTimerTime(
++ IN gcoHAL Hal,
++ IN gctUINT32 Timer,
++ OUT gctINT32_PTR TimeDelta
++ );
++
++/* set timeout value. */
++gceSTATUS
++gcoHAL_SetTimeOut(
++ IN gcoHAL Hal,
++ IN gctUINT32 timeOut
++ );
++
++gceSTATUS
++gcoHAL_SetHardwareType(
++ IN gcoHAL Hal,
++ IN gceHARDWARE_TYPE HardwardType
++ );
++
++gceSTATUS
++gcoHAL_GetHardwareType(
++ IN gcoHAL Hal,
++ OUT gceHARDWARE_TYPE * HardwardType
++ );
++
++gceSTATUS
++gcoHAL_QueryChipCount(
++ IN gcoHAL Hal,
++ OUT gctINT32 * Count
++ );
++
++gceSTATUS
++gcoHAL_Query3DCoreCount(
++ IN gcoHAL Hal,
++ OUT gctUINT32 *Count
++ );
++
++gceSTATUS
++gcoHAL_QuerySeparated2D(
++ IN gcoHAL Hal
++ );
++
++gceSTATUS
++gcoHAL_Is3DAvailable(
++ IN gcoHAL Hal
++ );
++
++/* Get pointer to gcoVG object. */
++gceSTATUS
++gcoHAL_GetVGEngine(
++ IN gcoHAL Hal,
++ OUT gcoVG * Engine
++ );
++
++gceSTATUS
++gcoHAL_QueryChipLimits(
++ IN gcoHAL Hal,
++ IN gctINT32 Chip,
++ IN gctINT32 Mask,
++ OUT gcsHAL_LIMITS *Limits);
++
++gceSTATUS
++gcoHAL_QueryChipFeature(
++ IN gcoHAL Hal,
++ IN gctINT32 Chip,
++ IN gctINT32 Mask,
++ IN gceFEATURE Feature);
++
++/*----------------------------------------------------------------------------*/
++/*----- Shared Buffer --------------------------------------------------------*/
++
++/* Create shared buffer. */
++gceSTATUS
++gcoHAL_CreateShBuffer(
++ IN gctUINT32 Size,
++ OUT gctSHBUF * ShBuf
++ );
++
++/* Destroy shared buffer. */
++gceSTATUS
++gcoHAL_DestroyShBuffer(
++ IN gctSHBUF ShBuf
++ );
++
++/* Map shared buffer to current process. */
++gceSTATUS
++gcoHAL_MapShBuffer(
++ IN gctSHBUF ShBuf
++ );
++
++/* Write user data to shared buffer. */
++gceSTATUS
++gcoHAL_WriteShBuffer(
++ IN gctSHBUF ShBuf,
++ IN gctCONST_POINTER Data,
++ IN gctUINT32 ByteCount
++ );
++
++/* Read user data from shared buffer. */
++gceSTATUS
++gcoHAL_ReadShBuffer(
++ IN gctSHBUF ShBuf,
++ IN gctPOINTER Data,
++ IN gctUINT32 BytesCount,
++ OUT gctUINT32 * BytesRead
++ );
++
++/* Config power management to be enabled or disabled. */
++gceSTATUS
++gcoHAL_ConfigPowerManagement(
++ IN gctBOOL Enable
++ );
++
++#if gcdENABLE_3D || gcdENABLE_VG
++/* Query the target capabilities. */
++gceSTATUS
++gcoHAL_QueryTargetCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MultiTargetCount,
++ OUT gctUINT * MaxSamples
++ );
++#endif
++
++/******************************************************************************\
++********************************** gcoOS Object *********************************
++\******************************************************************************/
++/* Lock PLS access */
++gceSTATUS
++gcoOS_LockPLS(
++ void
++ );
++
++/* Unlock PLS access */
++gceSTATUS
++gcoOS_UnLockPLS(
++ void
++ );
++
++/* Get PLS value for given key */
++gctPOINTER
++gcoOS_GetPLSValue(
++ IN gcePLS_VALUE key
++ );
++
++/* Set PLS value of a given key */
++void
++gcoOS_SetPLSValue(
++ IN gcePLS_VALUE key,
++ OUT gctPOINTER value
++ );
++
++/* Get access to the thread local storage. */
++gceSTATUS
++gcoOS_GetTLS(
++ OUT gcsTLS_PTR * TLS
++ );
++
++ /* Copy the TLS from a source thread. */
++ gceSTATUS gcoOS_CopyTLS(IN gcsTLS_PTR Source);
++
++/* Destroy the objects associated with the current thread. */
++void
++gcoOS_FreeThreadData(
++ void
++ );
++
++/* Empty function for compatibility. */
++gceSTATUS
++gcoOS_Construct(
++ IN gctPOINTER Context,
++ OUT gcoOS * Os
++ );
++
++/* Empty function for compatibility. */
++gceSTATUS
++gcoOS_Destroy(
++ IN gcoOS Os
++ );
++
++/* Get the base address for the physical memory. */
++gceSTATUS
++gcoOS_GetBaseAddress(
++ IN gcoOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++/* Allocate memory from the heap. */
++gceSTATUS
++gcoOS_Allocate(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Get allocated memory size. */
++gceSTATUS
++gcoOS_GetMemorySize(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ OUT gctSIZE_T_PTR MemorySize
++ );
++
++/* Free allocated memory. */
++gceSTATUS
++gcoOS_Free(
++ IN gcoOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gcoOS_AllocateSharedMemory(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Free memory. */
++gceSTATUS
++gcoOS_FreeSharedMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gcoOS_AllocateMemory(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Free memory. */
++gceSTATUS
++gcoOS_FreeMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate contiguous memory. */
++gceSTATUS
++gcoOS_AllocateContiguous(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free contiguous memory. */
++gceSTATUS
++gcoOS_FreeContiguous(
++ IN gcoOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoOS_MapUserMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoOS_MapUserMemoryEx(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gcoOS_UnmapUserMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ );
++
++/* Device I/O Control call to the kernel HAL layer. */
++gceSTATUS
++gcoOS_DeviceControl(
++ IN gcoOS Os,
++ IN gctUINT32 IoControlCode,
++ IN gctPOINTER InputBuffer,
++ IN gctSIZE_T InputBufferSize,
++ IN gctPOINTER OutputBuffer,
++ IN gctSIZE_T OutputBufferSize
++ );
++
++/* Allocate non paged memory. */
++gceSTATUS
++gcoOS_AllocateNonPagedMemory(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free non paged memory. */
++gceSTATUS
++gcoOS_FreeNonPagedMemory(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++#define gcmOS_SAFE_FREE(os, mem) \
++ gcoOS_Free(os, mem); \
++ mem = gcvNULL
++
++#define gcmOS_SAFE_FREE_SHARED_MEMORY(os, mem) \
++ gcoOS_FreeSharedMemory(os, mem); \
++ mem = gcvNULL
++
++#define gcmkOS_SAFE_FREE(os, mem) \
++ gckOS_Free(os, mem); \
++ mem = gcvNULL
++
++typedef enum _gceFILE_MODE
++{
++ gcvFILE_CREATE = 0,
++ gcvFILE_APPEND,
++ gcvFILE_READ,
++ gcvFILE_CREATETEXT,
++ gcvFILE_APPENDTEXT,
++ gcvFILE_READTEXT,
++}
++gceFILE_MODE;
++
++/* Open a file. */
++gceSTATUS
++gcoOS_Open(
++ IN gcoOS Os,
++ IN gctCONST_STRING FileName,
++ IN gceFILE_MODE Mode,
++ OUT gctFILE * File
++ );
++
++/* Close a file. */
++gceSTATUS
++gcoOS_Close(
++ IN gcoOS Os,
++ IN gctFILE File
++ );
++
++/* Read data from a file. */
++gceSTATUS
++gcoOS_Read(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctSIZE_T ByteCount,
++ IN gctPOINTER Data,
++ OUT gctSIZE_T * ByteRead
++ );
++
++/* Write data to a file. */
++gceSTATUS
++gcoOS_Write(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Flush data to a file. */
++gceSTATUS
++gcoOS_Flush(
++ IN gcoOS Os,
++ IN gctFILE File
++ );
++
++/* Close a file descriptor. */
++gceSTATUS
++gcoOS_CloseFD(
++ IN gcoOS Os,
++ IN gctINT FD
++ );
++
++/* Dup file descriptor to another. */
++gceSTATUS
++gcoOS_DupFD(
++ IN gcoOS Os,
++ IN gctINT FD,
++ OUT gctINT * FD2
++ );
++
++/* Create an endpoint for communication. */
++gceSTATUS
++gcoOS_Socket(
++ IN gcoOS Os,
++ IN gctINT Domain,
++ IN gctINT Type,
++ IN gctINT Protocol,
++ OUT gctINT *SockFd
++ );
++
++/* Close a socket. */
++gceSTATUS
++gcoOS_CloseSocket(
++ IN gcoOS Os,
++ IN gctINT SockFd
++ );
++
++/* Initiate a connection on a socket. */
++gceSTATUS
++gcoOS_Connect(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctCONST_POINTER HostName,
++ IN gctUINT Port);
++
++/* Shut down part of connection on a socket. */
++gceSTATUS
++gcoOS_Shutdown(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctINT How
++ );
++
++/* Send a message on a socket. */
++gceSTATUS
++gcoOS_Send(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data,
++ IN gctINT Flags
++ );
++
++/* Initiate a connection on a socket. */
++gceSTATUS
++gcoOS_WaitForSend(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctINT Seconds,
++ IN gctINT MicroSeconds);
++
++/* Get environment variable value. */
++gceSTATUS
++gcoOS_GetEnv(
++ IN gcoOS Os,
++ IN gctCONST_STRING VarName,
++ OUT gctSTRING * Value
++ );
++
++/* Set environment variable value. */
++gceSTATUS
++gcoOS_SetEnv(
++ IN gcoOS Os,
++ IN gctCONST_STRING VarName,
++ IN gctSTRING Value
++ );
++
++/* Get current working directory. */
++gceSTATUS
++gcoOS_GetCwd(
++ IN gcoOS Os,
++ IN gctINT SizeInBytes,
++ OUT gctSTRING Buffer
++ );
++
++/* Get file status info. */
++gceSTATUS
++gcoOS_Stat(
++ IN gcoOS Os,
++ IN gctCONST_STRING FileName,
++ OUT gctPOINTER Buffer
++ );
++
++typedef enum _gceFILE_WHENCE
++{
++ gcvFILE_SEEK_SET,
++ gcvFILE_SEEK_CUR,
++ gcvFILE_SEEK_END
++}
++gceFILE_WHENCE;
++
++/* Set the current position of a file. */
++gceSTATUS
++gcoOS_Seek(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctUINT32 Offset,
++ IN gceFILE_WHENCE Whence
++ );
++
++/* Set the current position of a file. */
++gceSTATUS
++gcoOS_SetPos(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctUINT32 Position
++ );
++
++/* Get the current position of a file. */
++gceSTATUS
++gcoOS_GetPos(
++ IN gcoOS Os,
++ IN gctFILE File,
++ OUT gctUINT32 * Position
++ );
++
++/* Same as strstr. */
++gceSTATUS
++gcoOS_StrStr(
++ IN gctCONST_STRING String,
++ IN gctCONST_STRING SubString,
++ OUT gctSTRING * Output
++ );
++
++/* Find the last occurance of a character inside a string. */
++gceSTATUS
++gcoOS_StrFindReverse(
++ IN gctCONST_STRING String,
++ IN gctINT8 Character,
++ OUT gctSTRING * Output
++ );
++
++gceSTATUS
++gcoOS_StrDup(
++ IN gcoOS Os,
++ IN gctCONST_STRING String,
++ OUT gctSTRING * Target
++ );
++
++/* Copy a string. */
++gceSTATUS
++gcoOS_StrCopySafe(
++ IN gctSTRING Destination,
++ IN gctSIZE_T DestinationSize,
++ IN gctCONST_STRING Source
++ );
++
++/* Append a string. */
++gceSTATUS
++gcoOS_StrCatSafe(
++ IN gctSTRING Destination,
++ IN gctSIZE_T DestinationSize,
++ IN gctCONST_STRING Source
++ );
++
++/* Compare two strings. */
++gceSTATUS
++gcoOS_StrCmp(
++ IN gctCONST_STRING String1,
++ IN gctCONST_STRING String2
++ );
++
++/* Compare characters of two strings. */
++gceSTATUS
++gcoOS_StrNCmp(
++ IN gctCONST_STRING String1,
++ IN gctCONST_STRING String2,
++ IN gctSIZE_T Count
++ );
++
++/* Convert string to float. */
++gceSTATUS
++gcoOS_StrToFloat(
++ IN gctCONST_STRING String,
++ OUT gctFLOAT * Float
++ );
++
++/* Convert hex string to integer. */
++gceSTATUS gcoOS_HexStrToInt(
++ IN gctCONST_STRING String,
++ OUT gctINT * Int
++ );
++
++/* Convert hex string to float. */
++gceSTATUS
++gcoOS_HexStrToFloat(
++ IN gctCONST_STRING String,
++ OUT gctFLOAT * Float
++ );
++
++/* Convert string to integer. */
++gceSTATUS
++gcoOS_StrToInt(
++ IN gctCONST_STRING String,
++ OUT gctINT * Int
++ );
++
++gceSTATUS
++gcoOS_MemCmp(
++ IN gctCONST_POINTER Memory1,
++ IN gctCONST_POINTER Memory2,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_PrintStrSafe(
++ OUT gctSTRING String,
++ IN gctSIZE_T StringSize,
++ IN OUT gctUINT * Offset,
++ IN gctCONST_STRING Format,
++ ...
++ );
++
++gceSTATUS
++gcoOS_LoadLibrary(
++ IN gcoOS Os,
++ IN gctCONST_STRING Library,
++ OUT gctHANDLE * Handle
++ );
++
++gceSTATUS
++gcoOS_FreeLibrary(
++ IN gcoOS Os,
++ IN gctHANDLE Handle
++ );
++
++gceSTATUS
++gcoOS_GetProcAddress(
++ IN gcoOS Os,
++ IN gctHANDLE Handle,
++ IN gctCONST_STRING Name,
++ OUT gctPOINTER * Function
++ );
++
++gceSTATUS
++gcoOS_Compact(
++ IN gcoOS Os
++ );
++
++gceSTATUS
++gcoOS_AddSignalHandler (
++ IN gceSignalHandlerType SignalHandlerType
++ );
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gcoOS_ProfileStart(
++ IN gcoOS Os
++ );
++
++gceSTATUS
++gcoOS_ProfileEnd(
++ IN gcoOS Os,
++ IN gctCONST_STRING Title
++ );
++
++gceSTATUS
++gcoOS_SetProfileSetting(
++ IN gcoOS Os,
++ IN gctBOOL Enable,
++ IN gctCONST_STRING FileName
++ );
++#endif
++
++/* Query the video memory. */
++gceSTATUS
++gcoOS_QueryVideoMemory(
++ IN gcoOS Os,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Detect if the process is the executable specified. */
++gceSTATUS
++gcoOS_DetectProcessByNamePid(
++ IN gctCONST_STRING Name,
++ IN gctHANDLE Pid
++ );
++
++/* Detect if the current process is the executable specified. */
++gceSTATUS
++gcoOS_DetectProcessByName(
++ IN gctCONST_STRING Name
++ );
++
++gceSTATUS
++gcoOS_DetectProcessByEncryptedName(
++ IN gctCONST_STRING Name
++ );
++
++#if defined(ANDROID)
++gceSTATUS
++gcoOS_DetectProgrameByEncryptedSymbols(
++ IN gcoOS_SymbolsList Symbols
++ );
++#endif
++
++/*----------------------------------------------------------------------------*/
++/*----- Atoms ----------------------------------------------------------------*/
++
++/* Construct an atom. */
++gceSTATUS
++gcoOS_AtomConstruct(
++ IN gcoOS Os,
++ OUT gcsATOM_PTR * Atom
++ );
++
++/* Destroy an atom. */
++gceSTATUS
++gcoOS_AtomDestroy(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom
++ );
++
++/* Get the 32-bit value protected by an atom. */
++gceSTATUS
++gcoOS_AtomGet(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/* Set the 32-bit value protected by an atom. */
++gceSTATUS
++gcoOS_AtomSet(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ IN gctINT32 Value
++ );
++
++/* Increment an atom. */
++gceSTATUS
++gcoOS_AtomIncrement(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ OUT gctINT32_PTR OldValue
++ );
++
++/* Decrement an atom. */
++gceSTATUS
++gcoOS_AtomDecrement(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ OUT gctINT32_PTR OldValue
++ );
++
++gctHANDLE
++gcoOS_GetCurrentProcessID(
++ void
++ );
++
++gctHANDLE
++gcoOS_GetCurrentThreadID(
++ void
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Time -----------------------------------------------------------------*/
++
++/* Get the number of milliseconds since the system started. */
++gctUINT32
++gcoOS_GetTicks(
++ void
++ );
++
++/* Get time in microseconds. */
++gceSTATUS
++gcoOS_GetTime(
++ gctUINT64_PTR Time
++ );
++
++/* Get CPU usage in microseconds. */
++gceSTATUS
++gcoOS_GetCPUTime(
++ gctUINT64_PTR CPUTime
++ );
++
++/* Get memory usage. */
++gceSTATUS
++gcoOS_GetMemoryUsage(
++ gctUINT32_PTR MaxRSS,
++ gctUINT32_PTR IxRSS,
++ gctUINT32_PTR IdRSS,
++ gctUINT32_PTR IsRSS
++ );
++
++/* Delay a number of microseconds. */
++gceSTATUS
++gcoOS_Delay(
++ IN gcoOS Os,
++ IN gctUINT32 Delay
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Threads --------------------------------------------------------------*/
++
++#ifdef _WIN32
++/* Cannot include windows.h here becuase "near" and "far"
++ * which are used in gcsDEPTH_INFO, are defined to nothing in WinDef.h.
++ * So, use the real value of DWORD and WINAPI, instead.
++ * DWORD is unsigned long, and WINAPI is __stdcall.
++ * If these two are change in WinDef.h, the following two typdefs
++ * need to be changed, too.
++ */
++typedef unsigned long gctTHREAD_RETURN;
++typedef unsigned long (__stdcall * gcTHREAD_ROUTINE)(void * Argument);
++#else
++typedef void * gctTHREAD_RETURN;
++typedef void * (* gcTHREAD_ROUTINE)(void *);
++#endif
++
++/* Create a new thread. */
++gceSTATUS
++gcoOS_CreateThread(
++ IN gcoOS Os,
++ IN gcTHREAD_ROUTINE Worker,
++ IN gctPOINTER Argument,
++ OUT gctPOINTER * Thread
++ );
++
++/* Close a thread. */
++gceSTATUS
++gcoOS_CloseThread(
++ IN gcoOS Os,
++ IN gctPOINTER Thread
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Mutexes --------------------------------------------------------------*/
++
++/* Create a new mutex. */
++gceSTATUS
++gcoOS_CreateMutex(
++ IN gcoOS Os,
++ OUT gctPOINTER * Mutex
++ );
++
++/* Delete a mutex. */
++gceSTATUS
++gcoOS_DeleteMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Acquire a mutex. */
++gceSTATUS
++gcoOS_AcquireMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ );
++
++/* Release a mutex. */
++gceSTATUS
++gcoOS_ReleaseMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Signals --------------------------------------------------------------*/
++
++/* Create a signal. */
++gceSTATUS
++gcoOS_CreateSignal(
++ IN gcoOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ );
++
++/* Destroy a signal. */
++gceSTATUS
++gcoOS_DestroySignal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Signal a signal. */
++gceSTATUS
++gcoOS_Signal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ );
++
++/* Wait for a signal. */
++gceSTATUS
++gcoOS_WaitSignal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ );
++
++/* Map a signal from another process */
++gceSTATUS
++gcoOS_MapSignal(
++ IN gctSIGNAL RemoteSignal,
++ OUT gctSIGNAL * LocalSignal
++ );
++
++/* Unmap a signal mapped from another process */
++gceSTATUS
++gcoOS_UnmapSignal(
++ IN gctSIGNAL Signal
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Android Native Fence -------------------------------------------------*/
++
++/* Create sync point. */
++gceSTATUS
++gcoOS_CreateSyncPoint(
++ IN gcoOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ );
++
++/* Destroy sync point. */
++gceSTATUS
++gcoOS_DestroySyncPoint(
++ IN gcoOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++/* Create native fence. */
++gceSTATUS
++gcoOS_CreateNativeFence(
++ IN gcoOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ );
++
++/* Wait on native fence. */
++gceSTATUS
++gcoOS_WaitNativeFence(
++ IN gcoOS Os,
++ IN gctINT FenceFD,
++ IN gctUINT32 Timeout
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Memory Access and Cache ----------------------------------------------*/
++
++/* Write a register. */
++gceSTATUS
++gcoOS_WriteRegister(
++ IN gcoOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Read a register. */
++gceSTATUS
++gcoOS_ReadRegister(
++ IN gcoOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++gceSTATUS
++gcoOS_CacheClean(
++ IN gcoOS Os,
++ IN gctUINT32 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_CacheFlush(
++ IN gcoOS Os,
++ IN gctUINT32 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_CacheInvalidate(
++ IN gcoOS Os,
++ IN gctUINT32 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_MemoryBarrier(
++ IN gcoOS Os,
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gcoOS_CPUPhysicalToGPUPhysical(
++ IN gctUINT32 CPUPhysical,
++ OUT gctUINT32_PTR GPUPhysical
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Profile --------------------------------------------------------------*/
++
++gceSTATUS
++gckOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ );
++
++gceSTATUS
++gckOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ );
++
++gctUINT32
++gckOS_ProfileToMS(
++ IN gctUINT64 Ticks
++ );
++
++gceSTATUS
++gcoOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ );
++
++gceSTATUS
++gcoOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ );
++
++#define _gcmPROFILE_INIT(prefix, freq, start) \
++ do { \
++ prefix ## OS_QueryProfileTickRate(&(freq)); \
++ prefix ## OS_GetProfileTick(&(start)); \
++ } while (gcvFALSE)
++
++#define _gcmPROFILE_QUERY(prefix, start, ticks) \
++ do { \
++ prefix ## OS_GetProfileTick(&(ticks)); \
++ (ticks) = ((ticks) > (start)) ? ((ticks) - (start)) \
++ : (~0ull - (start) + (ticks) + 1); \
++ } while (gcvFALSE)
++
++#if gcdENABLE_PROFILING
++# define gcmkPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gck, freq, start)
++# define gcmkPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gck, start, ticks)
++# define gcmPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gco, freq, start)
++# define gcmPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gco, start, ticks)
++# define gcmPROFILE_ONLY(x) x
++# define gcmPROFILE_ELSE(x) do { } while (gcvFALSE)
++# define gcmPROFILE_DECLARE_ONLY(x) x
++# define gcmPROFILE_DECLARE_ELSE(x) typedef x
++#else
++# define gcmkPROFILE_INIT(start, freq) do { } while (gcvFALSE)
++# define gcmkPROFILE_QUERY(start, ticks) do { } while (gcvFALSE)
++# define gcmPROFILE_INIT(start, freq) do { } while (gcvFALSE)
++# define gcmPROFILE_QUERY(start, ticks) do { } while (gcvFALSE)
++# define gcmPROFILE_ONLY(x) do { } while (gcvFALSE)
++# define gcmPROFILE_ELSE(x) x
++# define gcmPROFILE_DECLARE_ONLY(x) do { } while (gcvFALSE)
++# define gcmPROFILE_DECLARE_ELSE(x) x
++#endif
++
++/*******************************************************************************
++** gcoMATH object
++*/
++
++#define gcdPI 3.14159265358979323846f
++
++/* Kernel. */
++gctINT
++gckMATH_ModuloInt(
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++/* User. */
++gctUINT32
++gcoMATH_Log2in5dot5(
++ IN gctINT X
++ );
++
++
++gctFLOAT
++gcoMATH_UIntAsFloat(
++ IN gctUINT32 X
++ );
++
++gctUINT32
++gcoMATH_FloatAsUInt(
++ IN gctFLOAT X
++ );
++
++gctBOOL
++gcoMATH_CompareEqualF(
++ IN gctFLOAT X,
++ IN gctFLOAT Y
++ );
++
++gctUINT16
++gcoMATH_UInt8AsFloat16(
++ IN gctUINT8 X
++ );
++
++gctUINT32
++gcoMATH_Float16ToFloat(
++ IN gctUINT16 In
++ );
++
++gctUINT16
++gcoMATH_FloatToFloat16(
++ IN gctUINT32 In
++ );
++
++gctUINT32
++gcoMATH_Float11ToFloat(
++ IN gctUINT32 In
++ );
++
++gctUINT16
++gcoMATH_FloatToFloat11(
++ IN gctUINT32 In
++ );
++
++gctUINT32
++gcoMATH_Float10ToFloat(
++ IN gctUINT32 In
++ );
++
++gctUINT16
++gcoMATH_FloatToFloat10(
++ IN gctUINT32 In
++ );
++
++gctUINT32
++gcoMATH_Float14ToFloat(
++ IN gctUINT16 In
++ );
++
++/******************************************************************************\
++**************************** Coordinate Structures *****************************
++\******************************************************************************/
++
++typedef struct _gcsPOINT
++{
++ gctINT32 x;
++ gctINT32 y;
++}
++gcsPOINT;
++
++typedef struct _gcsSIZE
++{
++ gctINT32 width;
++ gctINT32 height;
++}
++gcsSIZE;
++
++typedef struct _gcsRECT
++{
++ gctINT32 left;
++ gctINT32 top;
++ gctINT32 right;
++ gctINT32 bottom;
++}
++gcsRECT;
++
++typedef union _gcsPIXEL
++{
++ struct
++ {
++ gctFLOAT r, g, b, a;
++ gctFLOAT d, s;
++ } pf;
++
++ struct
++ {
++ gctINT32 r, g, b, a;
++ gctINT32 d, s;
++ } pi;
++
++ struct
++ {
++ gctUINT32 r, g, b, a;
++ gctUINT32 d, s;
++ } pui;
++
++} gcsPIXEL;
++
++/******************************************************************************\
++********************************* gcoSURF Object ********************************
++\******************************************************************************/
++
++/*----------------------------------------------------------------------------*/
++/*------------------------------- gcoSURF Common ------------------------------*/
++
++/* Color format classes. */
++typedef enum _gceFORMAT_CLASS
++{
++ gcvFORMAT_CLASS_RGBA = 4500,
++ gcvFORMAT_CLASS_YUV,
++ gcvFORMAT_CLASS_INDEX,
++ gcvFORMAT_CLASS_LUMINANCE,
++ gcvFORMAT_CLASS_BUMP,
++ gcvFORMAT_CLASS_DEPTH,
++ gcvFORMAT_CLASS_ASTC,
++ gcvFORMAT_CLASS_OTHER
++}
++gceFORMAT_CLASS;
++
++/* Color format data type */
++typedef enum _gceFORMAT_DATATYPE
++{
++ gcvFORMAT_DATATYPE_UNSIGNED_NORMALIZED,
++ gcvFORMAT_DATATYPE_SIGNED_NORMALIZED,
++ gcvFORMAT_DATATYPE_UNSIGNED_INTEGER,
++ gcvFORMAT_DATATYPE_SIGNED_INTEGER,
++ gcvFORMAT_DATATYPE_FLOAT16,
++ gcvFORMAT_DATATYPE_FLOAT32,
++ gcvFORMAT_DATATYPE_FLOAT_E5B9G9R9,
++ gcvFORMAT_DATATYPE_FLOAT_B10G11R11F,
++ gcvFORMAT_DATATYPE_INDEX,
++ gcvFORMAT_DATATYPE_SRGB,
++ gcvFORMAT_DATATYPE_FLOAT32_UINT,
++}
++gceFORMAT_DATATYPE;
++
++/* Special enums for width field in gcsFORMAT_COMPONENT. */
++typedef enum _gceCOMPONENT_CONTROL
++{
++ gcvCOMPONENT_NOTPRESENT = 0x00,
++ gcvCOMPONENT_DONTCARE = 0x80,
++ gcvCOMPONENT_WIDTHMASK = 0x7F,
++ gcvCOMPONENT_ODD = 0x80
++}
++gceCOMPONENT_CONTROL;
++
++/* Color format component parameters. */
++typedef struct _gcsFORMAT_COMPONENT
++{
++ gctUINT8 start;
++ gctUINT8 width;
++}
++gcsFORMAT_COMPONENT;
++
++/* RGBA color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_RGBA
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT red;
++ gcsFORMAT_COMPONENT green;
++ gcsFORMAT_COMPONENT blue;
++}
++gcsFORMAT_CLASS_TYPE_RGBA;
++
++/* YUV color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_YUV
++{
++ gcsFORMAT_COMPONENT y;
++ gcsFORMAT_COMPONENT u;
++ gcsFORMAT_COMPONENT v;
++}
++gcsFORMAT_CLASS_TYPE_YUV;
++
++/* Index color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_INDEX
++{
++ gcsFORMAT_COMPONENT value;
++}
++gcsFORMAT_CLASS_TYPE_INDEX;
++
++/* Luminance color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_LUMINANCE
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT value;
++}
++gcsFORMAT_CLASS_TYPE_LUMINANCE;
++
++/* Bump map color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_BUMP
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT l;
++ gcsFORMAT_COMPONENT v;
++ gcsFORMAT_COMPONENT u;
++ gcsFORMAT_COMPONENT q;
++ gcsFORMAT_COMPONENT w;
++}
++gcsFORMAT_CLASS_TYPE_BUMP;
++
++/* Depth and stencil format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_DEPTH
++{
++ gcsFORMAT_COMPONENT depth;
++ gcsFORMAT_COMPONENT stencil;
++}
++gcsFORMAT_CLASS_TYPE_DEPTH;
++
++typedef union _gcuPIXEL_FORMAT_CLASS
++{
++ gcsFORMAT_CLASS_TYPE_BUMP bump;
++ gcsFORMAT_CLASS_TYPE_RGBA rgba;
++ gcsFORMAT_CLASS_TYPE_YUV yuv;
++ gcsFORMAT_CLASS_TYPE_LUMINANCE lum;
++ gcsFORMAT_CLASS_TYPE_INDEX index;
++ gcsFORMAT_CLASS_TYPE_DEPTH depth;
++}
++gcuPIXEL_FORMAT_CLASS;
++
++/* Format parameters. */
++typedef struct _gcsSURF_FORMAT_INFO
++{
++ /* Name of the format */
++ gctCONST_STRING formatName;
++
++ /* Format code and class. */
++ gceSURF_FORMAT format;
++ gceFORMAT_CLASS fmtClass;
++
++ /* Format data type */
++ gceFORMAT_DATATYPE fmtDataType;
++
++ /* The size of one pixel in bits. */
++ gctUINT8 bitsPerPixel;
++
++ /* Pixel block dimensions. */
++ gctUINT blockWidth;
++ gctUINT blockHeight;
++
++ /* Pixel block size in bits. */
++ gctUINT blockSize;
++
++ /* Some formats are larger than what the GPU can support. */
++ /* These formats are read in the number of layers specified. */
++ gctUINT8 layers;
++
++ /* The format is faked and software will interpret it differently
++ ** with HW. Most of them can't be blendable(PE) or filterable(TX).
++ */
++ gctBOOL fakedFormat;
++
++ /* Some formats have two neighbour pixels interleaved together. */
++ /* To describe such format, set the flag to 1 and add another */
++ /* like this one describing the odd pixel format. */
++ gctBOOL interleaved;
++
++ /* sRGB format. */
++ gctBOOL sRGB;
++
++ /* Format components. */
++ gcuPIXEL_FORMAT_CLASS u;
++
++ /* Format components. */
++ gcuPIXEL_FORMAT_CLASS uOdd;
++
++ /* Render format. */
++ gceSURF_FORMAT closestRenderFormat;
++ /*gctCLOSEST_FORMAT dynamicClosestRenderFormat;*/
++ gctUINT renderFormat;
++ const gceTEXTURE_SWIZZLE * pixelSwizzle;
++
++ /* Texture format. */
++ gceSURF_FORMAT closestTXFormat;
++ gctUINT txFormat;
++ const gceTEXTURE_SWIZZLE * txSwizzle;
++ gctBOOL txIntFilter;
++}
++gcsSURF_FORMAT_INFO;
++
++/* Frame buffer information. */
++typedef struct _gcsSURF_FRAMEBUFFER
++{
++ gctPOINTER logical;
++ gctUINT width, height;
++ gctINT stride;
++ gceSURF_FORMAT format;
++}
++gcsSURF_FRAMEBUFFER;
++
++/* Generic pixel component descriptors. */
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XXX8;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XX8X;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_X8XX;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_8XXX;
++
++typedef enum _gceORIENTATION
++{
++ gcvORIENTATION_TOP_BOTTOM,
++ gcvORIENTATION_BOTTOM_TOP,
++}
++gceORIENTATION;
++
++
++/* Construct a new gcoSURF object. */
++gceSTATUS
++gcoSURF_Construct(
++ IN gcoHAL Hal,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gcePOOL Pool,
++ OUT gcoSURF * Surface
++ );
++
++/* Destroy an gcoSURF object. */
++gceSTATUS
++gcoSURF_Destroy(
++ IN gcoSURF Surface
++ );
++
++/* Map user-allocated surface. */
++gceSTATUS
++gcoSURF_MapUserSurface(
++ IN gcoSURF Surface,
++ IN gctUINT Alignment,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical
++ );
++
++/* Wrapp surface with known logical/GPU address */
++gceSTATUS
++gcoSURF_WrapSurface(
++ IN gcoSURF Surface,
++ IN gctUINT Alignment,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical
++ );
++
++
++/* Query vid mem node info. */
++gceSTATUS
++gcoSURF_QueryVidMemNode(
++ IN gcoSURF Surface,
++ OUT gctUINT32 * Node,
++ OUT gcePOOL * Pool,
++ OUT gctSIZE_T_PTR Bytes
++ );
++
++/* Set the color type of the surface. */
++gceSTATUS
++gcoSURF_SetColorType(
++ IN gcoSURF Surface,
++ IN gceSURF_COLOR_TYPE ColorType
++ );
++
++/* Get the color type of the surface. */
++gceSTATUS
++gcoSURF_GetColorType(
++ IN gcoSURF Surface,
++ OUT gceSURF_COLOR_TYPE *ColorType
++ );
++
++/* Set the color space of the surface. */
++gceSTATUS
++gcoSURF_SetColorSpace(
++ IN gcoSURF Surface,
++ IN gceSURF_COLOR_SPACE ColorSpace
++ );
++
++/* Get the color space of the surface. */
++gceSTATUS
++gcoSURF_GetColorSpace(
++ IN gcoSURF Surface,
++ OUT gceSURF_COLOR_SPACE *ColorSpace
++ );
++
++
++/* Set the surface ration angle. */
++gceSTATUS
++gcoSURF_SetRotation(
++ IN gcoSURF Surface,
++ IN gceSURF_ROTATION Rotation
++ );
++
++gceSTATUS
++gcoSURF_IsValid(
++ IN gcoSURF Surface
++ );
++
++#if gcdENABLE_3D
++/* Verify and return the state of the tile status mechanism. */
++gceSTATUS
++gcoSURF_IsTileStatusSupported(
++ IN gcoSURF Surface
++ );
++
++/* Verify if surface has tile status enabled. */
++gceSTATUS
++gcoSURF_IsTileStatusEnabled(
++ IN gcoSURF Surface
++ );
++
++/* Verify if surface is compressed. */
++gceSTATUS
++gcoSURF_IsCompressed(
++ IN gcoSURF Surface
++ );
++
++/* Enable tile status for the specified surface on zero slot. */
++gceSTATUS
++gcoSURF_EnableTileStatus(
++ IN gcoSURF Surface
++ );
++
++/* Enable tile status for the specified surface on specified slot. */
++gceSTATUS
++gcoSURF_EnableTileStatusEx(
++ IN gcoSURF Surface,
++ IN gctUINT RtIndex
++ );
++
++/* Disable tile status for the specified surface. */
++gceSTATUS
++gcoSURF_DisableTileStatus(
++ IN gcoSURF Surface,
++ IN gctBOOL Decompress
++ );
++
++/* Flush tile status cache for the specified surface. */
++gceSTATUS
++gcoSURF_FlushTileStatus(
++ IN gcoSURF Surface,
++ IN gctBOOL Decompress
++ );
++#endif /* gcdENABLE_3D */
++
++/* Get surface size. */
++gceSTATUS
++gcoSURF_GetSize(
++ IN gcoSURF Surface,
++ OUT gctUINT * Width,
++ OUT gctUINT * Height,
++ OUT gctUINT * Depth
++ );
++
++/* Get surface aligned sizes. */
++gceSTATUS
++gcoSURF_GetAlignedSize(
++ IN gcoSURF Surface,
++ OUT gctUINT * Width,
++ OUT gctUINT * Height,
++ OUT gctINT * Stride
++ );
++
++/* Get alignments. */
++gceSTATUS
++gcoSURF_GetAlignment(
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT * AddressAlignment,
++ OUT gctUINT * XAlignment,
++ OUT gctUINT * YAlignment
++ );
++
++gceSTATUS
++gcoSURF_AlignResolveRect(
++ IN gcoSURF Surf,
++ IN gcsPOINT_PTR RectOrigin,
++ IN gcsPOINT_PTR RectSize,
++ OUT gcsPOINT_PTR AlignedOrigin,
++ OUT gcsPOINT_PTR AlignedSize
++ );
++
++/* Get surface type and format. */
++gceSTATUS
++gcoSURF_GetFormat(
++ IN gcoSURF Surface,
++ OUT OPTIONAL gceSURF_TYPE * Type,
++ OUT OPTIONAL gceSURF_FORMAT * Format
++ );
++
++/* Get surface information */
++gceSTATUS
++gcoSURF_GetFormatInfo(
++ IN gcoSURF Surface,
++ OUT gcsSURF_FORMAT_INFO_PTR * formatInfo
++ );
++
++/* Get Surface pack format */
++gceSTATUS
++gcoSURF_GetPackedFormat(
++ IN gcoSURF Surface,
++ OUT gceSURF_FORMAT * Format
++ );
++
++/* Get surface tiling. */
++gceSTATUS
++gcoSURF_GetTiling(
++ IN gcoSURF Surface,
++ OUT gceTILING * Tiling
++ );
++
++/* Get flip bitmap offset bytes. */
++gceSTATUS
++gcoSURF_GetFlipBitmapOffset(
++ IN gcoSURF Surface,
++ OUT gctUINT_PTR FlipBitmapOffset
++ );
++
++/* Get bottom buffer offset bytes. */
++gceSTATUS
++gcoSURF_GetBottomBufferOffset(
++ IN gcoSURF Surface,
++ OUT gctUINT_PTR BottomBufferOffset
++ );
++
++/* Lock the surface. */
++gceSTATUS
++gcoSURF_Lock(
++ IN gcoSURF Surface,
++ IN OUT gctUINT32 * Address,
++ IN OUT gctPOINTER * Memory
++ );
++
++/* Unlock the surface. */
++gceSTATUS
++gcoSURF_Unlock(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory
++ );
++
++/*. Query surface flags.*/
++gceSTATUS
++gcoSURF_QueryFlags(
++ IN gcoSURF Surface,
++ IN gceSURF_FLAG Flag
++ );
++
++/* Return pixel format parameters; Info is required to be a pointer to an
++ * array of at least two items because some formats have up to two records
++ * of description. */
++gceSTATUS
++gcoSURF_QueryFormat(
++ IN gceSURF_FORMAT Format,
++ OUT gcsSURF_FORMAT_INFO_PTR * Info
++ );
++
++/* Compute the color pixel mask. */
++gceSTATUS
++gcoSURF_ComputeColorMask(
++ IN gcsSURF_FORMAT_INFO_PTR Format,
++ OUT gctUINT32_PTR ColorMask
++ );
++
++/* Flush the surface. */
++gceSTATUS
++gcoSURF_Flush(
++ IN gcoSURF Surface
++ );
++
++/* Fill surface from it's tile status buffer. */
++gceSTATUS
++gcoSURF_FillFromTile(
++ IN gcoSURF Surface
++ );
++
++/* Fill surface with a value. */
++gceSTATUS
++gcoSURF_Fill(
++ IN gcoSURF Surface,
++ IN gcsPOINT_PTR Origin,
++ IN gcsSIZE_PTR Size,
++ IN gctUINT32 Value,
++ IN gctUINT32 Mask
++ );
++
++/* Alpha blend two surfaces together. */
++gceSTATUS
++gcoSURF_Blend(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrig,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsSIZE_PTR Size,
++ IN gceSURF_BLEND_MODE Mode
++ );
++
++/* Create a new gcoSURF wrapper object. */
++gceSTATUS
++gcoSURF_ConstructWrapper(
++ IN gcoHAL Hal,
++ OUT gcoSURF * Surface
++ );
++
++/* Set surface flags.*/
++gceSTATUS
++gcoSURF_SetFlags(
++ IN gcoSURF Surface,
++ IN gceSURF_FLAG Flag,
++ IN gctBOOL Value
++ );
++
++/* Set the underlying buffer for the surface wrapper. */
++gceSTATUS
++gcoSURF_SetBuffer(
++ IN gcoSURF Surface,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Stride,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical
++ );
++
++/* Set the underlying video buffer for the surface wrapper. */
++gceSTATUS
++gcoSURF_SetVideoBuffer(
++ IN gcoSURF Surface,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Stride,
++ IN gctPOINTER *LogicalPlane1,
++ IN gctUINT32 *PhysicalPlane1
++ );
++
++/* Set the size of the surface in pixels and map the underlying buffer. */
++gceSTATUS
++gcoSURF_SetWindow(
++ IN gcoSURF Surface,
++ IN gctUINT X,
++ IN gctUINT Y,
++ IN gctUINT Width,
++ IN gctUINT Height
++ );
++
++/* Set width/height alignment of the surface directly and calculate stride/size. This is only for dri backend now. Please be careful before use. */
++gceSTATUS
++gcoSURF_SetAlignment(
++ IN gcoSURF Surface,
++ IN gctUINT Width,
++ IN gctUINT Height
++ );
++
++/* Increase reference count of the surface. */
++gceSTATUS
++gcoSURF_ReferenceSurface(
++ IN gcoSURF Surface
++ );
++
++/* Get surface reference count. */
++gceSTATUS
++gcoSURF_QueryReferenceCount(
++ IN gcoSURF Surface,
++ OUT gctINT32 * ReferenceCount
++ );
++
++/* Set surface orientation. */
++gceSTATUS
++gcoSURF_SetOrientation(
++ IN gcoSURF Surface,
++ IN gceORIENTATION Orientation
++ );
++
++/* Query surface orientation. */
++gceSTATUS
++gcoSURF_QueryOrientation(
++ IN gcoSURF Surface,
++ OUT gceORIENTATION * Orientation
++ );
++
++gceSTATUS
++gcoSURF_SetOffset(
++ IN gcoSURF Surface,
++ IN gctSIZE_T Offset
++ );
++
++gceSTATUS
++gcoSURF_NODE_Cache(
++ IN gcsSURF_NODE_PTR Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++
++/* Lock and unlock surface node */
++gceSTATUS
++gcoSURF_LockNode(
++ IN gcsSURF_NODE_PTR Node,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++gceSTATUS
++gcoSURF_UnLockNode(
++ IN gcsSURF_NODE_PTR Node,
++ IN gceSURF_TYPE Type
++ );
++
++/* Perform CPU cache operation on surface node */
++gceSTATUS
++gcoSURF_NODE_CPUCacheOperation(
++ IN gcsSURF_NODE_PTR Node,
++ IN gceSURF_TYPE Type,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Length,
++ IN gceCACHEOPERATION Operation
++ );
++
++/* Perform CPU cache operation on surface */
++gceSTATUS
++gcoSURF_CPUCacheOperation(
++ IN gcoSURF Surface,
++ IN gceCACHEOPERATION Operation
++ );
++
++
++gceSTATUS
++gcoSURF_Swap(
++ IN gcoSURF Surface1,
++ IN gcoSURF Surface2
++ );
++
++gceSTATUS
++gcoSURF_ResetSurWH(
++ IN gcoSURF Surface,
++ IN gctUINT oriw,
++ IN gctUINT orih,
++ IN gctUINT alignw,
++ IN gctUINT alignh,
++ IN gceSURF_FORMAT fmt
++);
++
++/* Update surface timestamp. */
++gceSTATUS
++gcoSURF_UpdateTimeStamp(
++ IN gcoSURF Surface
++ );
++
++/* Query surface current timestamp. */
++gceSTATUS
++gcoSURF_QueryTimeStamp(
++ IN gcoSURF Surface,
++ OUT gctUINT64 * TimeStamp
++ );
++
++/*
++ * Allocate shared buffer for this surface, so that
++ * surface states can be shared across processes.
++ */
++gceSTATUS
++gcoSURF_AllocShBuffer(
++ IN gcoSURF Surface,
++ OUT gctSHBUF * ShBuf
++ );
++
++/* Bind shared buffer to this surface */
++gceSTATUS
++gcoSURF_BindShBuffer(
++ IN gcoSURF Surface,
++ IN gctSHBUF ShBuf
++ );
++
++/* Push surface shared states to shared buffer. */
++gceSTATUS
++gcoSURF_PushSharedInfo(
++ IN gcoSURF Surface
++ );
++
++/* Pop shared states from shared buffer. */
++gceSTATUS
++gcoSURF_PopSharedInfo(
++ IN gcoSURF Surface
++ );
++
++#if (gcdENABLE_3D || gcdENABLE_VG)
++/* Copy surface. */
++gceSTATUS
++gcoSURF_Copy(
++ IN gcoSURF Surface,
++ IN gcoSURF Source
++ );
++
++/* Set number of samples for a gcoSURF object. */
++gceSTATUS
++gcoSURF_SetSamples(
++ IN gcoSURF Surface,
++ IN gctUINT Samples
++ );
++
++/* Get the number of samples per pixel. */
++gceSTATUS
++gcoSURF_GetSamples(
++ IN gcoSURF Surface,
++ OUT gctUINT_PTR Samples
++ );
++#endif
++
++/******************************************************************************\
++********************************* gcoDUMP Object ********************************
++\******************************************************************************/
++
++/* Construct a new gcoDUMP object. */
++gceSTATUS
++gcoDUMP_Construct(
++ IN gcoOS Os,
++ IN gcoHAL Hal,
++ OUT gcoDUMP * Dump
++ );
++
++/* Destroy a gcoDUMP object. */
++gceSTATUS
++gcoDUMP_Destroy(
++ IN gcoDUMP Dump
++ );
++
++/* Enable/disable dumping. */
++gceSTATUS
++gcoDUMP_Control(
++ IN gcoDUMP Dump,
++ IN gctSTRING FileName
++ );
++
++gceSTATUS
++gcoDUMP_IsEnabled(
++ IN gcoDUMP Dump,
++ OUT gctBOOL * Enabled
++ );
++
++/* Add surface. */
++gceSTATUS
++gcoDUMP_AddSurface(
++ IN gcoDUMP Dump,
++ IN gctINT32 Width,
++ IN gctINT32 Height,
++ IN gceSURF_FORMAT PixelFormat,
++ IN gctUINT32 Address,
++ IN gctSIZE_T ByteCount
++ );
++
++/* Mark the beginning of a frame. */
++gceSTATUS
++gcoDUMP_FrameBegin(
++ IN gcoDUMP Dump
++ );
++
++/* Mark the end of a frame. */
++gceSTATUS
++gcoDUMP_FrameEnd(
++ IN gcoDUMP Dump
++ );
++
++/* Dump data. */
++gceSTATUS
++gcoDUMP_DumpData(
++ IN gcoDUMP Dump,
++ IN gceDUMP_TAG Type,
++ IN gctUINT32 Address,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Delete an address. */
++gceSTATUS
++gcoDUMP_Delete(
++ IN gcoDUMP Dump,
++ IN gctUINT32 Address
++ );
++
++/* Enable dump or not. */
++gceSTATUS
++gcoDUMP_SetDumpFlag(
++ IN gctBOOL DumpState
++ );
++
++/******************************************************************************\
++******************************* gcsRECT Structure ******************************
++\******************************************************************************/
++
++/* Initialize rectangle structure. */
++gceSTATUS
++gcsRECT_Set(
++ OUT gcsRECT_PTR Rect,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Return the width of the rectangle. */
++gceSTATUS
++gcsRECT_Width(
++ IN gcsRECT_PTR Rect,
++ OUT gctINT32 * Width
++ );
++
++/* Return the height of the rectangle. */
++gceSTATUS
++gcsRECT_Height(
++ IN gcsRECT_PTR Rect,
++ OUT gctINT32 * Height
++ );
++
++/* Ensure that top left corner is to the left and above the right bottom. */
++gceSTATUS
++gcsRECT_Normalize(
++ IN OUT gcsRECT_PTR Rect
++ );
++
++/* Compare two rectangles. */
++gceSTATUS
++gcsRECT_IsEqual(
++ IN gcsRECT_PTR Rect1,
++ IN gcsRECT_PTR Rect2,
++ OUT gctBOOL * Equal
++ );
++
++/* Compare the sizes of two rectangles. */
++gceSTATUS
++gcsRECT_IsOfEqualSize(
++ IN gcsRECT_PTR Rect1,
++ IN gcsRECT_PTR Rect2,
++ OUT gctBOOL * EqualSize
++ );
++
++gceSTATUS
++gcsRECT_RelativeRotation(
++ IN gceSURF_ROTATION Orientation,
++ IN OUT gceSURF_ROTATION *Relation);
++
++gceSTATUS
++
++gcsRECT_Rotate(
++
++ IN OUT gcsRECT_PTR Rect,
++
++ IN gceSURF_ROTATION Rotation,
++
++ IN gceSURF_ROTATION toRotation,
++
++ IN gctINT32 SurfaceWidth,
++
++ IN gctINT32 SurfaceHeight
++
++ );
++
++/******************************************************************************\
++**************************** gcsBOUNDARY Structure *****************************
++\******************************************************************************/
++
++typedef struct _gcsBOUNDARY
++{
++ gctINT x;
++ gctINT y;
++ gctINT width;
++ gctINT height;
++}
++gcsBOUNDARY;
++
++/******************************************************************************\
++********************************* gcoHEAP Object ********************************
++\******************************************************************************/
++
++typedef struct _gcoHEAP * gcoHEAP;
++
++/* Construct a new gcoHEAP object. */
++gceSTATUS
++gcoHEAP_Construct(
++ IN gcoOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gcoHEAP * Heap
++ );
++
++/* Destroy an gcoHEAP object. */
++gceSTATUS
++gcoHEAP_Destroy(
++ IN gcoHEAP Heap
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gcoHEAP_Allocate(
++ IN gcoHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcoHEAP_GetMemorySize(
++ IN gcoHEAP Heap,
++ IN gctPOINTER Memory,
++ OUT gctSIZE_T_PTR MemorySize
++ );
++
++/* Free memory. */
++gceSTATUS
++gcoHEAP_Free(
++ IN gcoHEAP Heap,
++ IN gctPOINTER Node
++ );
++
++#if (VIVANTE_PROFILER || gcdDEBUG)
++/* Profile the heap. */
++gceSTATUS
++gcoHEAP_ProfileStart(
++ IN gcoHEAP Heap
++ );
++
++gceSTATUS
++gcoHEAP_ProfileEnd(
++ IN gcoHEAP Heap,
++ IN gctCONST_STRING Title
++ );
++#endif
++
++
++/******************************************************************************\
++******************************* Debugging Macros *******************************
++\******************************************************************************/
++
++void
++gcoOS_SetDebugLevel(
++ IN gctUINT32 Level
++ );
++
++void
++gcoOS_GetDebugLevel(
++ OUT gctUINT32_PTR DebugLevel
++ );
++
++void
++gcoOS_SetDebugZone(
++ IN gctUINT32 Zone
++ );
++
++void
++gcoOS_GetDebugZone(
++ IN gctUINT32 Zone,
++ OUT gctUINT32_PTR DebugZone
++ );
++
++void
++gcoOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ );
++
++void
++gcoOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ );
++
++void
++gcoOS_SetDebugFile(
++ IN gctCONST_STRING FileName
++ );
++
++gctFILE
++gcoOS_ReplaceDebugFile(
++ IN gctFILE fp
++ );
++
++void
++gcoOS_SysTraceBegin(
++ IN gctCONST_STRING FuncName
++ );
++
++void
++gcoOS_SysTraceEnd(
++ IN void);
++
++/*******************************************************************************
++**
++** gcmFATAL
++**
++** Print a message to the debugger and execute a break point.
++**
++** ARGUMENTS:
++**
++** message Message.
++** ... Optional arguments.
++*/
++
++void
++gckOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_FATAL)
++# define gcmFATAL gcoOS_DebugFatal
++# define gcmkFATAL gckOS_DebugFatal
++#elif gcdHAS_ELLIPSIS
++# define gcmFATAL(...)
++# define gcmkFATAL(...)
++#else
++ gcmINLINE static void
++ __dummy_fatal(
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmFATAL __dummy_fatal
++# define gcmkFATAL __dummy_fatal
++#endif
++
++#define gcmENUM2TEXT(e) case e: return #e
++
++/*******************************************************************************
++**
++** gcmTRACE
++**
++** Print a message to the debugfer if the correct level has been set. In
++** retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** level Level of message.
++** message Message.
++** ... Optional arguments.
++*/
++#define gcvLEVEL_NONE -1
++#define gcvLEVEL_ERROR 0
++#define gcvLEVEL_WARNING 1
++#define gcvLEVEL_INFO 2
++#define gcvLEVEL_VERBOSE 3
++
++void
++gckOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_DebugTraceN(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmTRACE gcoOS_DebugTrace
++# define gcmkTRACE gckOS_DebugTrace
++# define gcmkTRACE_N gckOS_DebugTraceN
++#elif gcdHAS_ELLIPSIS
++# define gcmTRACE(...)
++# define gcmkTRACE(...)
++# define gcmkTRACE_N(...)
++#else
++ gcmINLINE static void
++ __dummy_trace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++ gcmINLINE static void
++ __dummy_trace_n(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++# define gcmTRACE __dummy_trace
++# define gcmkTRACE __dummy_trace
++# define gcmkTRACE_N __dummy_trace_n
++#endif
++
++/* Zones common for kernel and user. */
++#define gcvZONE_OS (1 << 0)
++#define gcvZONE_HARDWARE (1 << 1)
++#define gcvZONE_HEAP (1 << 2)
++#define gcvZONE_SIGNAL (1 << 27)
++
++/* Kernel zones. */
++#define gcvZONE_KERNEL (1 << 3)
++#define gcvZONE_VIDMEM (1 << 4)
++#define gcvZONE_COMMAND (1 << 5)
++#define gcvZONE_DRIVER (1 << 6)
++#define gcvZONE_CMODEL (1 << 7)
++#define gcvZONE_MMU (1 << 8)
++#define gcvZONE_EVENT (1 << 9)
++#define gcvZONE_DEVICE (1 << 10)
++#define gcvZONE_DATABASE (1 << 11)
++#define gcvZONE_INTERRUPT (1 << 12)
++#define gcvZONE_POWER (1 << 13)
++
++/* User zones. */
++#define gcvZONE_HAL (1 << 3)
++#define gcvZONE_BUFFER (1 << 4)
++#define gcvZONE_CONTEXT (1 << 5)
++#define gcvZONE_SURFACE (1 << 6)
++#define gcvZONE_INDEX (1 << 7)
++#define gcvZONE_STREAM (1 << 8)
++#define gcvZONE_TEXTURE (1 << 9)
++#define gcvZONE_2D (1 << 10)
++#define gcvZONE_3D (1 << 11)
++#define gcvZONE_COMPILER (1 << 12)
++#define gcvZONE_MEMORY (1 << 13)
++#define gcvZONE_STATE (1 << 14)
++#define gcvZONE_AUX (1 << 15)
++#define gcvZONE_VERTEX (1 << 16)
++#define gcvZONE_CL (1 << 17)
++#define gcvZONE_COMPOSITION (1 << 17)
++#define gcvZONE_VG (1 << 18)
++#define gcvZONE_IMAGE (1 << 19)
++#define gcvZONE_UTILITY (1 << 20)
++#define gcvZONE_PARAMETERS (1 << 21)
++#define gcvZONE_BUFOBJ (1 << 22)
++#define gcvZONE_SHADER (1 << 23)
++#define gcvZONE_STREAM_OUT (1 << 24)
++
++/* API definitions. */
++#define gcvZONE_API_HAL (1 << 28)
++#define gcvZONE_API_EGL (2 << 28)
++#define gcvZONE_API_ES11 (3 << 28)
++#define gcvZONE_API_ES20 (4 << 28)
++#define gcvZONE_API_VG11 (5 << 28)
++#define gcvZONE_API_GL (6 << 28)
++#define gcvZONE_API_DFB (7 << 28)
++#define gcvZONE_API_GDI ((gctUINT32)8 << 28)
++#define gcvZONE_API_D3D ((gctUINT32)9 << 28)
++#define gcvZONE_API_ES30 ((gctUINT32)10 << 28)
++
++
++#define gcmZONE_GET_API(zone) ((zone) >> 28)
++/*Set gcdZONE_MASE like 0x0 | gcvZONE_API_EGL
++will enable print EGL module debug info*/
++#define gcdZONE_MASK 0x0FFFFFFF
++
++/* Handy zones. */
++#define gcvZONE_NONE 0
++#define gcvZONE_ALL 0x0FFFFFFF
++
++/*Dump API depth set 1 for API, 2 for API and API behavior*/
++#define gcvDUMP_API_DEPTH 1
++
++/*******************************************************************************
++**
++** gcmTRACE_ZONE
++**
++** Print a message to the debugger if the correct level and zone has been
++** set. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** Level Level of message.
++** Zone Zone of message.
++** Message Message.
++** ... Optional arguments.
++*/
++
++void
++gckOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_DebugTraceZoneN(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmTRACE_ZONE gcoOS_DebugTraceZone
++# define gcmkTRACE_ZONE gckOS_DebugTraceZone
++# define gcmkTRACE_ZONE_N gckOS_DebugTraceZoneN
++#elif gcdHAS_ELLIPSIS
++# define gcmTRACE_ZONE(...)
++# define gcmkTRACE_ZONE(...)
++# define gcmkTRACE_ZONE_N(...)
++#else
++ gcmINLINE static void
++ __dummy_trace_zone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++ gcmINLINE static void
++ __dummy_trace_zone_n(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++# define gcmTRACE_ZONE __dummy_trace_zone
++# define gcmkTRACE_ZONE __dummy_trace_zone
++# define gcmkTRACE_ZONE_N __dummy_trace_zone_n
++#endif
++
++/*******************************************************************************
++**
++** gcmDEBUG_ONLY
++**
++** Execute a statement or function only in DEBUG mode.
++**
++** ARGUMENTS:
++**
++** f Statement or function to execute.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++# define gcmDEBUG_ONLY(f) f
++#else
++# define gcmDEBUG_ONLY(f)
++#endif
++
++/*******************************************************************************
++**
++** gcmSTACK_PUSH
++** gcmSTACK_POP
++** gcmSTACK_DUMP
++**
++** Push or pop a function with entry arguments on the trace stack.
++**
++** ARGUMENTS:
++**
++** Function Name of function.
++** Line Line number.
++** Text Optional text.
++** ... Optional arguments for text.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_STACK)
++ void gcoOS_StackPush(IN gctINT8_PTR Identity, IN gctCONST_STRING Function, IN gctINT Line, IN gctCONST_STRING Text, ...);
++ void gcoOS_StackPop(IN gctINT8_PTR Identity, IN gctCONST_STRING Function);
++ void gcoOS_StackDump(void);
++ void gcoOS_StackRemove(IN gctHANDLE Thread);
++
++# define gcmSTACK_PUSH gcoOS_StackPush
++# define gcmSTACK_POP gcoOS_StackPop
++# define gcmSTACK_DUMP gcoOS_StackDump
++# define gcmSTACK_REMOVE gcoOS_StackRemove
++#elif gcdHAS_ELLIPSIS
++# define gcmSTACK_PUSH(...) do { } while (0)
++# define gcmSTACK_POP(...) do { } while (0)
++# define gcmSTACK_DUMP() do { } while (0)
++# define gcmSTACK_REMOVE(...) do { } while (0)
++#else
++ gcmINLINE static void
++ __dummy_stack_push(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text, ...
++ )
++ {
++ }
++# define gcmSTACK_PUSH __dummy_stack_push
++# define gcmSTACK_POP(a,b) do { } while (0)
++# define gcmSTACK_DUMP() do { } while (0)
++# define gcmSTACK_REMOVE(a) do { } while (0)
++#endif
++
++/******************************************************************************\
++******************************** Binary Trace **********************************
++\******************************************************************************/
++typedef struct _gcsBINARY_TRACE_MESSAGE * gcsBINARY_TRACE_MESSAGE_PTR;
++typedef struct _gcsBINARY_TRACE_MESSAGE
++{
++ gctUINT32 signature;
++ gctUINT32 pid;
++ gctUINT32 tid;
++ gctUINT32 line;
++ gctUINT32 numArguments;
++ gctUINT8 payload;
++}
++gcsBINARY_TRACE_MESSAGE;
++
++#define gcdBINARY_TRACE_MESSAGE_SIZE 240
++
++#if gcdBINARY_TRACE
++ void
++ gcoOS_BinaryTrace(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text OPTIONAL,
++ ...
++ );
++
++ void
++ gckOS_BinaryTrace(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text OPTIONAL,
++ ...
++ );
++
++# define gcmBINARY_TRACE gcoOS_BinaryTrace
++# define gcmkBINARY_TRACE gckOS_BinaryTrace
++#elif gcdHAS_ELLIPSIS
++# define gcmBINARY_TRACE(Function, Line, Text, ...)
++# define gcmkBINARY_TRACE(Function, Line, Text, ...)
++#else
++ gcmINLINE static void
++ __dummy_binary_trace(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text,
++ )
++ {
++ }
++
++# define gcmBINARY_TRACE __dummy_binary_trace
++# define gcmkBINARY_TRACE __dummy_binary_trace
++#endif
++
++/******************************************************************************\
++******************************** Logging Macros ********************************
++\******************************************************************************/
++
++#define gcdHEADER_LEVEL gcvLEVEL_VERBOSE
++
++#ifndef gcdEMPTY_HEADER_FOOTER
++#define gcdEMPTY_HEADER_FOOTER 0
++#endif
++
++#if gcdENABLE_PROFILING
++void
++gcoOS_ProfileDB(
++ IN gctCONST_STRING Function,
++ IN OUT gctBOOL_PTR Initialized
++ );
++
++#define gcmHEADER() \
++ gctINT8 __user__ = 1; \
++ static gctBOOL __profile__initialized__ = gcvFALSE; \
++ gcmSTACK_PUSH(&__user__, __FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__)
++#define gcmHEADER_ARG(...) \
++ gctINT8 __user__ = 1; \
++ static gctBOOL __profile__initialized__ = gcvFALSE; \
++ gcmSTACK_PUSH(&__user__, __FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__)
++#define gcmFOOTER() \
++ gcmSTACK_POP(&__user__, __FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_NO() \
++ gcmSTACK_POP(&__user__, __FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_ARG(...) \
++ gcmSTACK_POP(&__user__, __FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_KILL() \
++ gcmSTACK_POP(&__user__, __FUNCTION__); \
++ gcoOS_ProfileDB(gcvNULL, gcvNULL)
++
++#else /* gcdENABLE_PROFILING */
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmHEADER()
++#elif gcdEMPTY_HEADER_FOOTER
++# define gcmHEADER()
++#elif gcdHAS_ELLIPSIS
++#define gcmHEADER() \
++ gctINT8 __user__ = 1; \
++ gctINT8_PTR __user_ptr__ = &__user__; \
++ gcmSTACK_PUSH(__user_ptr__, __FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d)", __FUNCTION__, __LINE__)
++#else
++ gcmINLINE static void
++ __dummy_header(void)
++ {
++ }
++# define gcmHEADER __dummy_header
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmHEADER_ARG(Text, ...)
++#elif gcdHAS_ELLIPSIS
++#if gcdEMPTY_HEADER_FOOTER
++# define gcmHEADER_ARG(Text, ...)
++#else
++# define gcmHEADER_ARG(Text, ...) \
++ gctINT8 __user__ = 1; \
++ gctINT8_PTR __user_ptr__ = &__user__; \
++ gcmSTACK_PUSH(__user_ptr__, __FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__)
++#endif
++#else
++ gcmINLINE static void
++ __dummy_header_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmHEADER_ARG __dummy_header_arg
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++# define gcmFOOTER()
++#elif gcdEMPTY_HEADER_FOOTER
++# define gcmFOOTER()
++#elif gcdHAS_ELLIPSIS
++# define gcmFOOTER() \
++ gcmSTACK_POP(__user_ptr__, __FUNCTION__); \
++ gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): status=%d(%s)", \
++ __FUNCTION__, __LINE__, \
++ status, gcoOS_DebugStatus2Name(status)); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer(void)
++ {
++ }
++# define gcmFOOTER __dummy_footer
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmFOOTER_NO()
++#elif gcdEMPTY_HEADER_FOOTER
++# define gcmFOOTER_NO()
++#elif gcdHAS_ELLIPSIS
++#define gcmFOOTER_NO() \
++ gcmSTACK_POP(__user_ptr__, __FUNCTION__); \
++ gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_no(void)
++ {
++ }
++# define gcmFOOTER_NO __dummy_footer_no
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmFOOTER_KILL()
++#elif gcdEMPTY_HEADER_FOOTER
++# define gcmFOOTER_KILL()
++#elif gcdHAS_ELLIPSIS
++#define gcmFOOTER_KILL() \
++ gcmSTACK_POP(__user_ptr__, __FUNCTION__); \
++ gcmBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_kill(void)
++ {
++ }
++# define gcmFOOTER_KILL __dummy_footer_kill
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++# define gcmFOOTER_ARG(Text, ...)
++#elif gcdHAS_ELLIPSIS
++#if gcdEMPTY_HEADER_FOOTER
++# define gcmFOOTER_ARG(Text, ...)
++#else
++# define gcmFOOTER_ARG(Text, ...) \
++ gcmSTACK_POP(__user_ptr__, __FUNCTION__); \
++ gcmBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__); \
++ *__user_ptr__ -= 1
++#endif
++#else
++ gcmINLINE static void
++ __dummy_footer_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmFOOTER_ARG __dummy_footer_arg
++#endif
++
++#endif /* gcdENABLE_PROFILING */
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmkHEADER()
++#elif gcdHAS_ELLIPSIS
++#define gcmkHEADER() \
++ gctINT8 __kernel__ = 1; \
++ gctINT8_PTR __kernel_ptr__ = &__kernel__; \
++ gcmkBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d)", __FUNCTION__, __LINE__)
++#else
++ gcmINLINE static void
++ __dummy_kheader(void)
++ {
++ }
++# define gcmkHEADER __dummy_kheader
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++# define gcmkHEADER_ARG(Text, ...)
++#elif gcdHAS_ELLIPSIS
++# define gcmkHEADER_ARG(Text, ...) \
++ gctINT8 __kernel__ = 1; \
++ gctINT8_PTR __kernel_ptr__ = &__kernel__; \
++ gcmkBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__)
++#else
++ gcmINLINE static void
++ __dummy_kheader_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmkHEADER_ARG __dummy_kheader_arg
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmkFOOTER()
++#elif gcdHAS_ELLIPSIS
++#define gcmkFOOTER() \
++ gcmkBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, status); \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): status=%d(%s)", \
++ __FUNCTION__, __LINE__, status, gckOS_DebugStatus2Name(status)); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter(void)
++ {
++ }
++# define gcmkFOOTER __dummy_kfooter
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++#define gcmkFOOTER_NO()
++#elif gcdHAS_ELLIPSIS
++#define gcmkFOOTER_NO() \
++ gcmkBINARY_TRACE(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter_no(void)
++ {
++ }
++# define gcmkFOOTER_NO __dummy_kfooter_no
++#endif
++
++#ifdef gcdFSL_REL_BUILD
++# define gcmkFOOTER_ARG(Text, ...)
++#elif gcdHAS_ELLIPSIS
++# define gcmkFOOTER_ARG(Text, ...) \
++ gcmkBINARY_TRACE(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): " Text, \
++ __FUNCTION__, __LINE__, __VA_ARGS__); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmkFOOTER_ARG __dummy_kfooter_arg
++#endif
++
++#define gcmOPT_VALUE(ptr) (((ptr) == gcvNULL) ? 0 : *(ptr))
++#define gcmOPT_VALUE_INDEX(ptr, index) (((ptr) == gcvNULL) ? 0 : ptr[index])
++#define gcmOPT_POINTER(ptr) (((ptr) == gcvNULL) ? gcvNULL : *(ptr))
++#define gcmOPT_STRING(ptr) (((ptr) == gcvNULL) ? "(nil)" : (ptr))
++
++void
++gckOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_PrintN(
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_CopyPrint(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#define gcmPRINT gcoOS_Print
++#define gcmkPRINT gckOS_Print
++#define gcmkPRINT_N gckOS_PrintN
++
++#if gcdPRINT_VERSION
++# define gcmPRINT_VERSION() do { \
++ _gcmPRINT_VERSION(gcm); \
++ gcmSTACK_DUMP(); \
++ } while (0)
++# define gcmkPRINT_VERSION() _gcmPRINT_VERSION(gcmk)
++# define _gcmPRINT_VERSION(prefix) \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ "Vivante HAL version %d.%d.%d build %d %s %s", \
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, \
++ gcvVERSION_BUILD, gcvVERSION_DATE, gcvVERSION_TIME )
++#else
++# define gcmPRINT_VERSION() do { gcmSTACK_DUMP(); } while (gcvFALSE)
++# define gcmkPRINT_VERSION() do { } while (gcvFALSE)
++#endif
++
++typedef enum _gceDUMP_BUFFER
++{
++ gceDUMP_BUFFER_CONTEXT,
++ gceDUMP_BUFFER_USER,
++ gceDUMP_BUFFER_KERNEL,
++ gceDUMP_BUFFER_LINK,
++ gceDUMP_BUFFER_WAITLINK,
++ gceDUMP_BUFFER_FROM_USER,
++}
++gceDUMP_BUFFER;
++
++void
++gckOS_DumpBuffer(
++ IN gckOS Os,
++ IN gctPOINTER Buffer,
++ IN gctUINT Size,
++ IN gceDUMP_BUFFER Type,
++ IN gctBOOL CopyMessage
++ );
++
++#define gcmkDUMPBUFFER gckOS_DumpBuffer
++
++#if gcdDUMP_COMMAND
++# define gcmkDUMPCOMMAND(Os, Buffer, Size, Type, CopyMessage) \
++ gcmkDUMPBUFFER(Os, Buffer, Size, Type, CopyMessage)
++#else
++# define gcmkDUMPCOMMAND(Os, Buffer, Size, Type, CopyMessage)
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++
++void
++gckOS_DebugFlush(
++ gctCONST_STRING CallerName,
++ gctUINT LineNumber,
++ gctUINT32 DmaAddress
++ );
++
++# define gcmkDEBUGFLUSH(DmaAddress) \
++ gckOS_DebugFlush(__FUNCTION__, __LINE__, DmaAddress)
++#else
++# define gcmkDEBUGFLUSH(DmaAddress)
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_FRAMERATE
++**
++** Print average frame rate
++**
++*/
++#if gcdDUMP_FRAMERATE
++ gceSTATUS
++ gcfDumpFrameRate(
++ void
++ );
++# define gcmDUMP_FRAMERATE gcfDumpFrameRate
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_FRAMERATE(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_frame_rate(
++ void
++ )
++ {
++ }
++# define gcmDUMP_FRAMERATE __dummy_dump_frame_rate
++#endif
++
++
++/*******************************************************************************
++**
++** gcmDUMP
++**
++** Print a dump message.
++**
++** ARGUMENTS:
++**
++** gctSTRING Message.
++**
++** ... Optional arguments.
++*/
++#if gcdDUMP
++ gceSTATUS
++ gcfDump(
++ IN gcoOS Os,
++ IN gctCONST_STRING String,
++ ...
++ );
++# define gcmDUMP gcfDump
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP(...)
++#else
++ gcmINLINE static void
++ __dummy_dump(
++ IN gcoOS Os,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmDUMP __dummy_dump
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_DATA
++**
++** Add data to the dump.
++**
++** ARGUMENTS:
++**
++** gctSTRING Tag
++** Tag for dump.
++**
++** gctPOINTER Logical
++** Logical address of buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes.
++*/
++
++#if gcdDUMP || gcdDUMP_COMMAND
++ gceSTATUS
++ gcfDumpData(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++# define gcmDUMP_DATA gcfDumpData
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_DATA(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_data(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++ {
++ }
++# define gcmDUMP_DATA __dummy_dump_data
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_BUFFER
++**
++** Print a buffer to the dump.
++**
++** ARGUMENTS:
++**
++** gctSTRING Tag
++** Tag for dump.
++**
++** gctUINT32 Physical
++** Physical address of buffer.
++**
++** gctPOINTER Logical
++** Logical address of buffer.
++**
++** gctUINT32 Offset
++** Offset into buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes.
++*/
++
++#if gcdDUMP || gcdDUMP_COMMAND
++gceSTATUS
++gcfDumpBuffer(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes
++ );
++# define gcmDUMP_BUFFER gcfDumpBuffer
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_BUFFER(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_buffer(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes
++ )
++ {
++ }
++# define gcmDUMP_BUFFER __dummy_dump_buffer
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API
++**
++** Print a dump message for a high level API prefixed by the function name.
++**
++** ARGUMENTS:
++**
++** gctSTRING Message.
++**
++** ... Optional arguments.
++*/
++gceSTATUS gcfDumpApi(IN gctCONST_STRING String, ...);
++#if gcdDUMP_API
++# define gcmDUMP_API gcfDumpApi
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_API(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api(
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmDUMP_API __dummy_dump_api
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_ARRAY
++**
++** Print an array of data.
++**
++** ARGUMENTS:
++**
++** gctUINT32_PTR Pointer to array.
++** gctUINT32 Size.
++*/
++gceSTATUS gcfDumpArray(IN gctCONST_POINTER Data, IN gctUINT32 Size);
++#if gcdDUMP_API
++# define gcmDUMP_API_ARRAY gcfDumpArray
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_API_ARRAY(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_array(
++ IN gctCONST_POINTER Data,
++ IN gctUINT32 Size
++ )
++ {
++ }
++# define gcmDUMP_API_ARRAY __dummy_dump_api_array
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_ARRAY_TOKEN
++**
++** Print an array of data terminated by a token.
++**
++** ARGUMENTS:
++**
++** gctUINT32_PTR Pointer to array.
++** gctUINT32 Termination.
++*/
++gceSTATUS gcfDumpArrayToken(IN gctCONST_POINTER Data, IN gctUINT32 Termination);
++#if gcdDUMP_API
++# define gcmDUMP_API_ARRAY_TOKEN gcfDumpArrayToken
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_API_ARRAY_TOKEN(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_array_token(
++ IN gctCONST_POINTER Data,
++ IN gctUINT32 Termination
++ )
++ {
++ }
++# define gcmDUMP_API_ARRAY_TOKEN __dummy_dump_api_array_token
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_DATA
++**
++** Print an array of bytes.
++**
++** ARGUMENTS:
++**
++** gctCONST_POINTER Pointer to array.
++** gctSIZE_T Size.
++*/
++gceSTATUS gcfDumpApiData(IN gctCONST_POINTER Data, IN gctSIZE_T Size);
++#if gcdDUMP_API
++# define gcmDUMP_API_DATA gcfDumpApiData
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_API_DATA(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_data(
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Size
++ )
++ {
++ }
++# define gcmDUMP_API_DATA __dummy_dump_api_data
++#endif
++
++/*******************************************************************************
++** gcmDUMP_2D_COMMAND
++**
++** Print the 2D command buffer.
++**
++** ARGUMENTS:
++**
++** gctUINT32_PTR Pointer to the command buffer.
++** gctUINT32 Command buffer size.
++*/
++gceSTATUS gcfDump2DCommand(IN gctUINT32_PTR Command, IN gctUINT32 Size);
++#if gcdDUMP_2D
++# define gcmDUMP_2D_COMMAND gcfDump2DCommand
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_2D_COMMAND(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_2d_command(
++ IN gctUINT32_PTR Command,
++ IN gctUINT32 Size
++ )
++ {
++ }
++# define gcmDUMP_2D_COMMAND __dummy_dump_2d_command
++#endif
++
++/*******************************************************************************
++** gcmDUMP_2D_SURFACE
++**
++** Print the 2D surface memory.
++**
++** ARGUMENTS:
++**
++** gctBOOL Src.
++** gctUINT32 Address.
++*/
++gceSTATUS gcfDump2DSurface(IN gctBOOL Src, IN gctUINT32 Address);
++#if gcdDUMP_2D
++# define gcmDUMP_2D_SURFACE gcfDump2DSurface
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_2D_SURFACE(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_2d_surface(
++ IN gctBOOL Src,
++ IN gctUINT32 Address
++ )
++ {
++ }
++# define gcmDUMP_2D_SURFACE __dummy_dump_2d_surface
++#endif
++
++/*******************************************************************************
++** gcmDUMP_ADD_MEMORY_INFO
++**
++** Record the memory info.
++**
++** ARGUMENTS:
++**
++** gctUINT32 Address.
++** gctSIZE_T Size.
++*/
++gceSTATUS gcfAddMemoryInfo(IN gctUINT32 GPUAddress, IN gctPOINTER Logical, IN gctUINT32 Physical, IN gctUINT32 Size);
++#if gcdDUMP_2D
++# define gcmDUMP_ADD_MEMORY_INFO gcfAddMemoryInfo
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_ADD_MEMORY_INFO(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_add_memory_info(
++ IN gctUINT32 GPUAddress,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Size
++ )
++ {
++ }
++# define gcmDUMP_ADD_MEMORY_INFO __dummy_dump_add_memory_info
++#endif
++
++/*******************************************************************************
++** gcmDUMP_DEL_MEMORY_INFO
++**
++** Record the memory info.
++**
++** ARGUMENTS:
++**
++** gctUINT32 Address.
++*/
++gceSTATUS gcfDelMemoryInfo(IN gctUINT32 Address);
++#if gcdDUMP_2D
++# define gcmDUMP_DEL_MEMORY_INFO gcfDelMemoryInfo
++#elif gcdHAS_ELLIPSIS
++# define gcmDUMP_DEL_MEMORY_INFO(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_del_memory_info(
++ IN gctUINT32 Address
++ )
++ {
++ }
++# define gcmDUMP_DEL_MEMORY_INFO __dummy_dump_del_memory_info
++#endif
++
++#if gcdDUMP_2D
++extern gctPOINTER dumpMemInfoListMutex;
++extern gctBOOL dump2DFlag;
++#endif
++
++/*******************************************************************************
++**
++** gcmTRACE_RELEASE
++**
++** Print a message to the shader debugger.
++**
++** ARGUMENTS:
++**
++** message Message.
++** ... Optional arguments.
++*/
++
++#define gcmTRACE_RELEASE gcoOS_DebugShaderTrace
++
++void
++gcoOS_DebugShaderTrace(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_SetDebugShaderFiles(
++ IN gctCONST_STRING VSFileName,
++ IN gctCONST_STRING FSFileName
++ );
++
++void
++gcoOS_SetDebugShaderFileType(
++ IN gctUINT32 ShaderType
++ );
++
++void
++gcoOS_EnableDebugBuffer(
++ IN gctBOOL Enable
++ );
++
++/*******************************************************************************
++**
++** gcmBREAK
++**
++** Break into the debugger. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** None.
++*/
++
++void
++gcoOS_DebugBreak(
++ void
++ );
++
++void
++gckOS_DebugBreak(
++ void
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_BREAK)
++# define gcmBREAK gcoOS_DebugBreak
++# define gcmkBREAK gckOS_DebugBreak
++#else
++# define gcmBREAK()
++# define gcmkBREAK()
++#endif
++
++/*******************************************************************************
++**
++** gcmASSERT
++**
++** Evaluate an expression and break into the debugger if the expression
++** evaluates to false. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** exp Expression to evaluate.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define _gcmASSERT(prefix, exp) \
++ do \
++ { \
++ if (!(exp)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ASSERT at %s(%d)", \
++ __FUNCTION__, __LINE__); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ "(%s)", #exp); \
++ prefix##BREAK(); \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmASSERT(exp) _gcmASSERT(gcm, exp)
++# define gcmkASSERT(exp) _gcmASSERT(gcmk, exp)
++#else
++# define gcmASSERT(exp)
++# define gcmkASSERT(exp)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY
++**
++** Verify if an expression returns true. If the expression does not
++** evaluates to true, an assertion will happen in debug mode.
++**
++** ARGUMENTS:
++**
++** exp Expression to evaluate.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define gcmVERIFY(exp) gcmASSERT(exp)
++# define gcmkVERIFY(exp) gcmkASSERT(exp)
++#else
++# define gcmVERIFY(exp) exp
++# define gcmkVERIFY(exp) exp
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY_OK
++**
++** Verify a fucntion returns gcvSTATUS_OK. If the function does not return
++** gcvSTATUS_OK, an assertion will happen in debug mode.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++
++void
++gcoOS_Verify(
++ IN gceSTATUS status
++ );
++
++void
++gckOS_Verify(
++ IN gceSTATUS status
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define gcmVERIFY_OK(func) \
++ do \
++ { \
++ gceSTATUS verifyStatus = func; \
++ gcoOS_Verify(verifyStatus); \
++ if (verifyStatus != gcvSTATUS_OK) \
++ { \
++ gcmTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmVERIFY_OK(%d): function returned %d", \
++ __LINE__, verifyStatus \
++ ); \
++ } \
++ gcmASSERT(verifyStatus == gcvSTATUS_OK); \
++ } \
++ while (gcvFALSE)
++# define gcmkVERIFY_OK(func) \
++ do \
++ { \
++ gceSTATUS verifyStatus = func; \
++ if (verifyStatus != gcvSTATUS_OK) \
++ { \
++ gcmkTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmkVERIFY_OK(%d): function returned %d", \
++ __LINE__, verifyStatus \
++ ); \
++ } \
++ gckOS_Verify(verifyStatus); \
++ gcmkASSERT(verifyStatus == gcvSTATUS_OK); \
++ } \
++ while (gcvFALSE)
++#else
++# define gcmVERIFY_OK(func) func
++# define gcmkVERIFY_OK(func) func
++#endif
++
++gctCONST_STRING
++gcoOS_DebugStatus2Name(
++ gceSTATUS status
++ );
++
++gctCONST_STRING
++gckOS_DebugStatus2Name(
++ gceSTATUS status
++ );
++
++/*******************************************************************************
++**
++** gcmERR_BREAK
++**
++** Executes a break statement on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmERR_BREAK(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++#define _gcmkERR_BREAK(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++#define gcmERR_BREAK(func) _gcmERR_BREAK(gcm, func)
++#define gcmkERR_BREAK(func) _gcmkERR_BREAK(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmERR_RETURN
++**
++** Executes a return on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmERR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ prefix##FOOTER(); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++#define _gcmkERR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ prefix##FOOTER(); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++#define gcmERR_RETURN(func) _gcmERR_RETURN(gcm, func)
++#define gcmkERR_RETURN(func) _gcmkERR_RETURN(gcmk, func)
++
++
++/*******************************************************************************
++**
++** gcmONERROR
++**
++** Jump to the error handler in case there is an error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmONERROR(prefix, func) \
++ do \
++ { \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ONERROR: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ goto OnError; \
++ } \
++ } \
++ while (gcvFALSE)
++#define _gcmkONERROR(prefix, func) \
++ do \
++ { \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ONERROR: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ goto OnError; \
++ } \
++ } \
++ while (gcvFALSE)
++#define gcmONERROR(func) _gcmONERROR(gcm, func)
++#define gcmkONERROR(func) _gcmkONERROR(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmkSAFECASTSIZET
++**
++** Check wether value of a gctSIZE_T varible beyond the capability
++** of 32bits GPU hardware.
++**
++** ASSUMPTIONS:
++**
++**
++**
++** ARGUMENTS:
++**
++** x A gctUINT32 variable
++** y A gctSIZE_T variable
++*/
++#define gcmkSAFECASTSIZET(x, y) \
++ do \
++ { \
++ gctUINT32 tmp = (gctUINT32)(y); \
++ if (gcmSIZEOF(gctSIZE_T) > gcmSIZEOF(gctUINT32)) \
++ { \
++ gcmkASSERT(tmp <= gcvMAXUINT32); \
++ } \
++ (x) = tmp; \
++ } \
++ while (gcvFALSE)
++
++#define gcmSAFECASTSIZET(x, y) \
++ do \
++ { \
++ gctUINT32 tmp = (gctUINT32)(y); \
++ if (gcmSIZEOF(gctSIZE_T) > gcmSIZEOF(gctUINT32)) \
++ { \
++ gcmASSERT(tmp <= gcvMAXUINT32); \
++ } \
++ (x) = tmp; \
++ } \
++ while (gcvFALSE)
++
++/*******************************************************************************
++**
++** gcmVERIFY_LOCK
++**
++** Verifies whether the surface is locked.
++**
++** ARGUMENTS:
++**
++** surfaceInfo Pointer to the surface iniformational structure.
++*/
++#define gcmVERIFY_LOCK(surfaceInfo) \
++ if (!surfaceInfo->node.valid) \
++ { \
++ gcmONERROR(gcvSTATUS_MEMORY_UNLOCKED); \
++ } \
++
++/*******************************************************************************
++**
++** gcmVERIFY_NODE_LOCK
++**
++** Verifies whether the surface node is locked.
++**
++** ARGUMENTS:
++**
++** surfaceInfo Pointer to the surface iniformational structure.
++*/
++#define gcmVERIFY_NODE_LOCK(surfaceNode) \
++ if (!(surfaceNode)->valid) \
++ { \
++ status = gcvSTATUS_MEMORY_UNLOCKED; \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++
++/*******************************************************************************
++**
++** gcmBADOBJECT_BREAK
++**
++** Executes a break statement on bad object.
++**
++** ARGUMENTS:
++**
++** obj Object to test.
++** t Expected type of the object.
++*/
++#define gcmBADOBJECT_BREAK(obj, t) \
++ if ((obj == gcvNULL) \
++ || (((gcsOBJECT *)(obj))->type != t) \
++ ) \
++ { \
++ status = gcvSTATUS_INVALID_OBJECT; \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++
++/*******************************************************************************
++**
++** gcmCHECK_STATUS
++**
++** Executes a break statement on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmCHECK_STATUS(prefix, func) \
++ do \
++ { \
++ last = func; \
++ if (gcmIS_ERROR(last)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \
++ last, gcoOS_DebugStatus2Name(last), __FUNCTION__, __LINE__); \
++ status = last; \
++ } \
++ } \
++ while (gcvFALSE)
++#define _gcmkCHECK_STATUS(prefix, func) \
++ do \
++ { \
++ last = func; \
++ if (gcmIS_ERROR(last)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \
++ last, gckOS_DebugStatus2Name(last), __FUNCTION__, __LINE__); \
++ status = last; \
++ } \
++ } \
++ while (gcvFALSE)
++#define gcmCHECK_STATUS(func) _gcmCHECK_STATUS(gcm, func)
++#define gcmkCHECK_STATUS(func) _gcmkCHECK_STATUS(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmVERIFY_ARGUMENT
++**
++** Assert if an argument does not apply to the specified expression. If
++** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be
++** returned from the current function. In retail mode this macro does
++** nothing.
++**
++** ARGUMENTS:
++**
++** arg Argument to evaluate.
++*/
++# define _gcmVERIFY_ARGUMENT(prefix, arg) \
++ do \
++ { \
++ if (!(arg)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, #prefix "VERIFY_ARGUMENT failed:"); \
++ prefix##ASSERT(arg); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT); \
++ return gcvSTATUS_INVALID_ARGUMENT; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg)
++# define gcmkVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcmk, arg)
++
++/*******************************************************************************
++**
++** gcmDEBUG_VERIFY_ARGUMENT
++**
++** Works just like gcmVERIFY_ARGUMENT, but is only valid in debug mode.
++** Use this to verify arguments inside non-public API functions.
++*/
++#if gcdDEBUG
++# define gcmDEBUG_VERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg)
++# define gcmkDEBUG_VERIFY_ARGUMENT(arg) _gcmkVERIFY_ARGUMENT(gcm, arg)
++#else
++# define gcmDEBUG_VERIFY_ARGUMENT(arg)
++# define gcmkDEBUG_VERIFY_ARGUMENT(arg)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY_ARGUMENT_RETURN
++**
++** Assert if an argument does not apply to the specified expression. If
++** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be
++** returned from the current function. In retail mode this macro does
++** nothing.
++**
++** ARGUMENTS:
++**
++** arg Argument to evaluate.
++*/
++# define _gcmVERIFY_ARGUMENT_RETURN(prefix, arg, value) \
++ do \
++ { \
++ if (!(arg)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "gcmVERIFY_ARGUMENT_RETURN failed:"); \
++ prefix##ASSERT(arg); \
++ prefix##FOOTER_ARG("value=%d", value); \
++ return value; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_ARGUMENT_RETURN(arg, value) \
++ _gcmVERIFY_ARGUMENT_RETURN(gcm, arg, value)
++# define gcmkVERIFY_ARGUMENT_RETURN(arg, value) \
++ _gcmVERIFY_ARGUMENT_RETURN(gcmk, arg, value)
++
++#define MAX_LOOP_COUNT 0x7FFFFFFF
++
++/******************************************************************************\
++****************************** User Debug Option ******************************
++\******************************************************************************/
++
++/* User option. */
++typedef enum _gceDEBUG_MSG
++{
++ gcvDEBUG_MSG_NONE,
++ gcvDEBUG_MSG_ERROR,
++ gcvDEBUG_MSG_WARNING
++}
++gceDEBUG_MSG;
++
++typedef struct _gcsUSER_DEBUG_OPTION
++{
++ gceDEBUG_MSG debugMsg;
++}
++gcsUSER_DEBUG_OPTION;
++
++gcsUSER_DEBUG_OPTION *
++gcGetUserDebugOption(
++ void
++ );
++
++#if defined(ANDROID)
++struct _gcoOS_SymbolsList
++{
++#if gcdENABLE_3D
++ gcePATCH_ID patchId;
++#endif
++ const char * symList[10];
++};
++#endif
++
++#if gcdHAS_ELLIPSIS
++#define gcmUSER_DEBUG_MSG(level, ...) \
++ do \
++ { \
++ if (level <= gcGetUserDebugOption()->debugMsg) \
++ { \
++ gcoOS_Print(__VA_ARGS__); \
++ } \
++ } while (gcvFALSE)
++
++#define gcmUSER_DEBUG_ERROR_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_ERROR, "Error: " __VA_ARGS__)
++#define gcmUSER_DEBUG_WARNING_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_WARNING, "Warring: " __VA_ARGS__)
++#else
++#define gcmUSER_DEBUG_MSG
++#define gcmUSER_DEBUG_ERROR_MSG
++#define gcmUSER_DEBUG_WARNING_MSG
++#endif
++
++/*******************************************************************************
++**
++** A set of macros to aid state loading.
++**
++** ARGUMENTS:
++**
++** CommandBuffer Pointer to a gcoCMDBUF object.
++** StateDelta Pointer to a gcsSTATE_DELTA state delta structure.
++** Memory Destination memory pointer of gctUINT32_PTR type.
++** PartOfContext Whether or not the state is a part of the context.
++** FixedPoint Whether or not the state is of the fixed point format.
++** Count Number of consecutive states to be loaded.
++** Address State address.
++** Data Data to be set to the state.
++*/
++
++/*----------------------------------------------------------------------------*/
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++
++# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count) \
++ CommandBuffer->lastLoadStatePtr = gcmPTR_TO_UINT64(Memory); \
++ CommandBuffer->lastLoadStateAddress = Address; \
++ CommandBuffer->lastLoadStateCount = Count
++
++# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address) \
++ gcmASSERT( \
++ (gctUINT) (Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastLoadStatePtr, gctUINT32_PTR) - 1) \
++ == \
++ (gctUINT) (Address - CommandBuffer->lastLoadStateAddress) \
++ ); \
++ \
++ gcmASSERT(CommandBuffer->lastLoadStateCount > 0); \
++ \
++ CommandBuffer->lastLoadStateCount -= 1
++
++# define gcmVERIFYLOADSTATEDONE(CommandBuffer) \
++ gcmASSERT(CommandBuffer->lastLoadStateCount == 0);
++
++# define gcmDEFINELOADSTATEBASE() \
++ gctUINT32_PTR LoadStateBase;
++
++# define gcmSETLOADSTATEBASE(CommandBuffer, OutSide) \
++ if (OutSide) \
++ {\
++ LoadStateBase = (gctUINT32_PTR)*OutSide; \
++ }\
++ else\
++ {\
++ LoadStateBase = (gctUINT_PTR)CommandBuffer->buffer;\
++ }
++
++
++# define gcmVERIFYLOADSTATEALIGNED(CommandBuffer, Memory) \
++ gcmASSERT(((Memory - LoadStateBase) & 1) == 0);
++
++# define gcmUNSETLOADSTATEBASE() \
++ LoadStateBase = LoadStateBase;
++
++#else
++
++# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count)
++# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address)
++# define gcmVERIFYLOADSTATEDONE(CommandBuffer)
++
++# define gcmDEFINELOADSTATEBASE()
++# define gcmSETLOADSTATEBASE(CommandBuffer, OutSide)
++# define gcmVERIFYLOADSTATEALIGNED(CommandBuffer, Memory)
++# define gcmUNSETLOADSTATEBASE()
++
++#endif
++
++#if gcdSECURE_USER
++
++# define gcmDEFINESECUREUSER() \
++ gctUINT __secure_user_offset__; \
++ gctUINT32_PTR __secure_user_hintArray__;
++
++# define gcmBEGINSECUREUSER() \
++ __secure_user_offset__ = reserve->lastOffset; \
++ \
++ __secure_user_hintArray__ = gcmUINT64_TO_PTR(reserve->hintArrayTail)
++
++# define gcmENDSECUREUSER() \
++ reserve->hintArrayTail = gcmPTR_TO_UINT64(__secure_user_hintArray__)
++
++# define gcmSKIPSECUREUSER() \
++ __secure_user_offset__ += gcmSIZEOF(gctUINT32)
++
++# define gcmUPDATESECUREUSER() \
++ *__secure_user_hintArray__ = __secure_user_offset__; \
++ \
++ __secure_user_offset__ += gcmSIZEOF(gctUINT32); \
++ __secure_user_hintArray__ += 1
++
++#else
++
++# define gcmDEFINESECUREUSER()
++# define gcmBEGINSECUREUSER()
++# define gcmENDSECUREUSER()
++# define gcmSKIPSECUREUSER()
++# define gcmUPDATESECUREUSER()
++
++#endif
++
++/*----------------------------------------------------------------------------*/
++
++#if gcdDUMP
++# define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data) \
++ if (FixedPoint) \
++ { \
++ gcmDUMP(gcvNULL, "#[state.x 0x%04X 0x%08X]", \
++ Address, Data \
++ ); \
++ } \
++ else \
++ { \
++ gcmDUMP(gcvNULL, "#[state 0x%04X 0x%08X]", \
++ Address, Data \
++ ); \
++ }
++#else
++# define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data)
++#endif
++
++#define gcmDEFINESTATEBUFFER(CommandBuffer, StateDelta, Memory, ReserveSize) \
++ gcmDEFINESECUREUSER() \
++ gctSIZE_T ReserveSize; \
++ gcoCMDBUF CommandBuffer; \
++ gctUINT32_PTR Memory; \
++ gcsSTATE_DELTA_PTR StateDelta
++
++#define gcmBEGINSTATEBUFFER(Hardware, CommandBuffer, StateDelta, Memory, ReserveSize) \
++{ \
++ gcmONERROR(gcoBUFFER_Reserve( \
++ Hardware->buffer, ReserveSize, gcvTRUE, gcvCOMMAND_3D, &CommandBuffer \
++ )); \
++ \
++ Memory = (gctUINT32_PTR) gcmUINT64_TO_PTR(CommandBuffer->lastReserve); \
++ \
++ StateDelta = Hardware->delta; \
++ \
++ gcmBEGINSECUREUSER(); \
++}
++
++#define gcmENDSTATEBUFFER(Hardware, CommandBuffer, Memory, ReserveSize) \
++{ \
++ gcmENDSECUREUSER(); \
++ \
++ gcmASSERT( \
++ gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT8_PTR) + ReserveSize \
++ == \
++ (gctUINT8_PTR) Memory \
++ ); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, Count) \
++{ \
++ gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) == 0); \
++ gcmASSERT((gctUINT32)Count <= 1024); \
++ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count); \
++ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, FLOAT, FixedPoint) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, Address); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmENDSTATEBATCH(CommandBuffer, Memory) \
++{ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) == 0); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ gcmSAFECASTSIZET(__temp_data32__, Data); \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcoHARDWARE_UpdateDelta( \
++ StateDelta, Address, 0, __temp_data32__ \
++ ); \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETSTATEDATAWITHMASK(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcoHARDWARE_UpdateDelta( \
++ StateDelta, Address, Mask, __temp_data32__ \
++ ); \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++
++#define gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, gcvFALSE, Address, __temp_data32__); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmSETFILLER(CommandBuffer, Memory) \
++{ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ Memory += 1; \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSINGLESTATE(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++#define gcmSETSINGLESTATEWITHMASK(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATAWITHMASK(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++
++#define gcmSETSINGLECTRLSTATE(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++
++
++#define gcmSETSEMASTALLPIPE(StateDelta, CommandBuffer, Memory, Data) \
++{ \
++ gcmSETSINGLESTATE(StateDelta, CommandBuffer, Memory, gcvFALSE, AQSemaphoreRegAddrs, Data); \
++ \
++ *Memory++ = gcmSETFIELDVALUE(0, STALL_COMMAND, OPCODE, STALL); \
++ \
++ *Memory++ = Data; \
++ \
++ gcmDUMP(gcvNULL, "#[stall 0x%08X 0x%08X]", \
++ gcmSETFIELDVALUE(0, AQ_SEMAPHORE, SOURCE, FRONT_END), \
++ gcmSETFIELDVALUE(0, AQ_SEMAPHORE, DESTINATION, PIXEL_ENGINE)); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++/*******************************************************************************
++**
++** gcmSETSTARTDECOMMAND
++**
++** Form a START_DE command.
++**
++** ARGUMENTS:
++**
++** Memory Destination memory pointer of gctUINT32_PTR type.
++** Count Number of the rectangles.
++*/
++
++#define gcmSETSTARTDECOMMAND(Memory, Count) \
++{ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_START_DE_COMMAND, OPCODE, START_DE) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, DATA_COUNT, 0); \
++ \
++ *Memory++ = 0xDEADDEED; \
++}
++
++/*****************************************
++** Temp command buffer macro
++*/
++#define gcmDEFINESTATEBUFFER_NEW(CommandBuffer, StateDelta, Memory) \
++ gcmDEFINESECUREUSER() \
++ gcmDEFINELOADSTATEBASE() \
++ gcsTEMPCMDBUF CommandBuffer = gcvNULL; \
++ gctUINT32_PTR Memory; \
++ gcsSTATE_DELTA_PTR StateDelta
++
++
++#define gcmBEGINSTATEBUFFER_NEW(Hardware, CommandBuffer, StateDelta, Memory, OutSide) \
++{ \
++ if (OutSide) \
++ {\
++ Memory = (gctUINT32_PTR)*OutSide; \
++ }\
++ else \
++ {\
++ gcmONERROR(gcoBUFFER_StartTEMPCMDBUF( \
++ Hardware->buffer, &CommandBuffer \
++ ));\
++ \
++ Memory = (gctUINT32_PTR)(CommandBuffer->buffer); \
++ \
++ }\
++ StateDelta = Hardware->delta; \
++ \
++ gcmBEGINSECUREUSER(); \
++ gcmSETLOADSTATEBASE(CommandBuffer,OutSide);\
++}
++
++#define gcmENDSTATEBUFFER_NEW(Hardware, CommandBuffer, Memory, OutSide) \
++{ \
++ gcmENDSECUREUSER(); \
++ \
++ if (OutSide) \
++ {\
++ *OutSide = Memory; \
++ }\
++ else \
++ {\
++ CommandBuffer->currentByteSize = (gctUINT32)((gctUINT8_PTR)Memory - \
++ (gctUINT8_PTR)CommandBuffer->buffer); \
++ \
++ gcmONERROR(gcoBUFFER_EndTEMPCMDBUF(Hardware->buffer));\
++ }\
++ gcmUNSETLOADSTATEBASE()\
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, Count) \
++{ \
++ gcmVERIFYLOADSTATEALIGNED(CommandBuffer,Memory);\
++ gcmASSERT((gctUINT32)Count <= 1024); \
++ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, FLOAT, FixedPoint) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, Address); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmENDSTATEBATCH_NEW(CommandBuffer, Memory) \
++ gcmVERIFYLOADSTATEALIGNED(CommandBuffer,Memory);
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSTATEDATA_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcoHARDWARE_UpdateDelta( \
++ StateDelta, Address, 0, __temp_data32__ \
++ ); \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETSTATEDATAWITHMASK_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcoHARDWARE_UpdateDelta( \
++ StateDelta, Address, Mask, __temp_data32__ \
++ ); \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++
++#define gcmSETCTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, gcvFALSE, Address, __temp_data32__); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmSETFILLER_NEW(CommandBuffer, Memory) \
++{ \
++ Memory += 1; \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSINGLESTATE_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATA_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data); \
++ gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \
++}
++
++#define gcmSETSINGLESTATEWITHMASK_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATAWITHMASK_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data); \
++ gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \
++}
++
++
++#define gcmSETSINGLECTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETCTRLSTATE_NEW(StateDelta, CommandBuffer, Memory, Address, Data); \
++ gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \
++}
++
++
++
++#define gcmSETSEMASTALLPIPE_NEW(StateDelta, CommandBuffer, Memory, Data) \
++{ \
++ gcmSETSINGLESTATE_NEW(StateDelta, CommandBuffer, Memory, gcvFALSE, AQSemaphoreRegAddrs, Data); \
++ \
++ *Memory++ = gcmSETFIELDVALUE(0, STALL_COMMAND, OPCODE, STALL); \
++ \
++ *Memory++ = Data; \
++ \
++ gcmDUMP(gcvNULL, "#[stall 0x%08X 0x%08X]", \
++ gcmSETFIELDVALUE(0, AQ_SEMAPHORE, SOURCE, FRONT_END), \
++ gcmSETFIELDVALUE(0, AQ_SEMAPHORE, DESTINATION, PIXEL_ENGINE)); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmSETSTARTDECOMMAND_NEW(CommandBuffer, Memory, Count) \
++{ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_START_DE_COMMAND, OPCODE, START_DE) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, DATA_COUNT, 0); \
++ \
++ *Memory++ = 0xDEADDEED; \
++ \
++}
++
++#define gcmSETSTATEDATA_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETSTATEDATAWITHMASK_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETSINGLESTATE_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATA_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data); \
++ gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \
++}
++
++#define gcmSETSINGLESTATEWITHMASK_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gcmBEGINSTATEBATCH_NEW(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATAWITHMASK_NEW_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data); \
++ gcmENDSTATEBATCH_NEW(CommandBuffer, Memory); \
++}
++
++#define gcmSETSTATEDATA_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ gcmSAFECASTSIZET(__temp_data32__, Data); \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETSTATEDATAWITHMASK_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETSINGLESTATE_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATA_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++#define gcmSETSINGLESTATEWITHMASK_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATAWITHMASK_FAST(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Mask, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++#define gcmDEFINESTATEBUFFER_NEW_FAST(CommandBuffer, Memory) \
++ gcmDEFINESECUREUSER() \
++ gcmDEFINELOADSTATEBASE() \
++ gcsTEMPCMDBUF CommandBuffer = gcvNULL; \
++ gctUINT32_PTR Memory;
++
++#define gcmDEFINESTATEBUFFER_FAST(CommandBuffer, Memory, ReserveSize) \
++ gcmDEFINESECUREUSER() \
++ gctSIZE_T ReserveSize; \
++ gcoCMDBUF CommandBuffer; \
++ gctUINT32_PTR Memory;
++
++#define gcmBEGINSTATEBUFFER_FAST(Hardware, CommandBuffer, Memory, ReserveSize) \
++{ \
++ gcmONERROR(gcoBUFFER_Reserve( \
++ Hardware->buffer, ReserveSize, gcvTRUE, &CommandBuffer \
++ )); \
++ \
++ Memory = (gctUINT32_PTR) gcmUINT64_TO_PTR(CommandBuffer->lastReserve); \
++ \
++ gcmBEGINSECUREUSER(); \
++}
++
++#define gcmBEGINSTATEBUFFER_NEW_FAST(Hardware, CommandBuffer, Memory, OutSide) \
++{ \
++ if (OutSide) \
++ {\
++ Memory = (gctUINT32_PTR)*OutSide; \
++ }\
++ else \
++ {\
++ gcmONERROR(gcoBUFFER_StartTEMPCMDBUF( \
++ Hardware->buffer, &CommandBuffer \
++ ));\
++ \
++ Memory = (gctUINT32_PTR)(CommandBuffer->buffer); \
++ \
++ }\
++ \
++ gcmBEGINSECUREUSER(); \
++ gcmSETLOADSTATEBASE(CommandBuffer,OutSide);\
++}
++/*******************************************************************************
++**
++** gcmCONFIGUREUNIFORMS
++**
++** Configure uniforms according to chip and numConstants.
++*/
++#if !gcdENABLE_UNIFIED_CONSTANT
++#define gcmCONFIGUREUNIFORMS(ChipModel, ChipRevision, NumConstants, \
++ UnifiedConst, VsConstBase, PsConstBase, VsConstMax, PsConstMax, ConstMax) \
++{ \
++ if (ChipModel == gcv2000 && ChipRevision == 0x5118) \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 64; \
++ ConstMax = 320; \
++ } \
++ else if (NumConstants == 320) \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 64; \
++ ConstMax = 320; \
++ } \
++ /* All GC1000 series chips can only support 64 uniforms for ps on non-unified const mode. */ \
++ else if (NumConstants > 256 && ChipModel == gcv1000) \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 64; \
++ ConstMax = 320; \
++ } \
++ else if (NumConstants > 256) \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 256; \
++ ConstMax = 512; \
++ } \
++ else if (NumConstants == 256) \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 256; \
++ ConstMax = 512; \
++ } \
++ else \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 168; \
++ PsConstMax = 64; \
++ ConstMax = 232; \
++ } \
++}
++#else
++#define gcmCONFIGUREUNIFORMS(ChipModel, ChipRevision, NumConstants, \
++ UnifiedConst, VsConstBase, PsConstBase, VsConstMax, PsConstMax, ConstMax) \
++{ \
++ if (NumConstants > 256) \
++ { \
++ UnifiedConst = gcvTRUE; \
++ VsConstBase = gcregSHUniformsRegAddrs; \
++ PsConstBase = gcregSHUniformsRegAddrs; \
++ ConstMax = NumConstants; \
++ VsConstMax = 256; \
++ PsConstMax = ConstMax - VsConstMax; \
++ } \
++ else if (NumConstants == 256) \
++ { \
++ if (ChipModel == gcv2000 && ChipRevision == 0x5118) \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 64; \
++ ConstMax = 320; \
++ } \
++ else \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 256; \
++ PsConstMax = 256; \
++ ConstMax = 512; \
++ } \
++ } \
++ else \
++ { \
++ UnifiedConst = gcvFALSE; \
++ VsConstBase = AQVertexShaderConstRegAddrs; \
++ PsConstBase = AQPixelShaderConstRegAddrs; \
++ VsConstMax = 168; \
++ PsConstMax = 64; \
++ ConstMax = 232; \
++ } \
++}
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_base_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver.h 2015-05-01 14:57:59.583427001 -0500
+@@ -0,0 +1,1136 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#ifndef __gc_hal_driver_h_
++#define __gc_hal_driver_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_driver_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* I/O Control Codes ******************************
++\******************************************************************************/
++
++#define gcvHAL_CLASS "galcore"
++#define IOCTL_GCHAL_INTERFACE 30000
++#define IOCTL_GCHAL_KERNEL_INTERFACE 30001
++#define IOCTL_GCHAL_TERMINATE 30002
++
++#undef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++/******************************************************************************\
++********************************* Command Codes ********************************
++\******************************************************************************/
++
++typedef enum _gceHAL_COMMAND_CODES
++{
++ /* Generic query. */
++ gcvHAL_QUERY_VIDEO_MEMORY,
++ gcvHAL_QUERY_CHIP_IDENTITY,
++
++ /* Contiguous memory. */
++ gcvHAL_ALLOCATE_NON_PAGED_MEMORY,
++ gcvHAL_FREE_NON_PAGED_MEMORY,
++ gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY,
++ gcvHAL_FREE_CONTIGUOUS_MEMORY,
++
++ /* Video memory allocation. */
++ gcvHAL_ALLOCATE_VIDEO_MEMORY, /* Enforced alignment. */
++ gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY, /* No alignment. */
++ gcvHAL_RELEASE_VIDEO_MEMORY,
++
++ /* Physical-to-logical mapping. */
++ gcvHAL_MAP_MEMORY,
++ gcvHAL_UNMAP_MEMORY,
++
++ /* Logical-to-physical mapping. */
++ gcvHAL_MAP_USER_MEMORY,
++ gcvHAL_UNMAP_USER_MEMORY,
++
++ /* Surface lock/unlock. */
++ gcvHAL_LOCK_VIDEO_MEMORY,
++ gcvHAL_UNLOCK_VIDEO_MEMORY,
++
++ /* Event queue. */
++ gcvHAL_EVENT_COMMIT,
++
++ gcvHAL_USER_SIGNAL,
++ gcvHAL_SIGNAL,
++ gcvHAL_WRITE_DATA,
++
++ gcvHAL_COMMIT,
++ gcvHAL_STALL,
++
++ gcvHAL_READ_REGISTER,
++ gcvHAL_WRITE_REGISTER,
++
++ gcvHAL_GET_PROFILE_SETTING,
++ gcvHAL_SET_PROFILE_SETTING,
++
++ gcvHAL_READ_ALL_PROFILE_REGISTERS,
++ gcvHAL_PROFILE_REGISTERS_2D,
++#if VIVANTE_PROFILER_PERDRAW
++ gcvHAL_READ_PROFILER_REGISTER_SETTING,
++#endif
++
++ /* Power management. */
++ gcvHAL_SET_POWER_MANAGEMENT_STATE,
++ gcvHAL_QUERY_POWER_MANAGEMENT_STATE,
++
++ gcvHAL_GET_BASE_ADDRESS,
++
++ gcvHAL_SET_IDLE, /* reserved */
++
++ /* Queries. */
++ gcvHAL_QUERY_KERNEL_SETTINGS,
++
++ /* Reset. */
++ gcvHAL_RESET,
++
++ /* Map physical address into handle. */
++ gcvHAL_MAP_PHYSICAL,
++
++ /* Debugger stuff. */
++ gcvHAL_DEBUG,
++
++ /* Cache stuff. */
++ gcvHAL_CACHE,
++
++ /* TimeStamp */
++ gcvHAL_TIMESTAMP,
++
++ /* Database. */
++ gcvHAL_DATABASE,
++
++ /* Version. */
++ gcvHAL_VERSION,
++
++ /* Chip info */
++ gcvHAL_CHIP_INFO,
++
++ /* Process attaching/detaching. */
++ gcvHAL_ATTACH,
++ gcvHAL_DETACH,
++
++ /* Composition. */
++ gcvHAL_COMPOSE,
++
++ /* Set timeOut value */
++ gcvHAL_SET_TIMEOUT,
++
++ /* Frame database. */
++ gcvHAL_GET_FRAME_INFO,
++
++ gcvHAL_QUERY_COMMAND_BUFFER,
++
++ gcvHAL_COMMIT_DONE,
++
++ /* GPU and event dump */
++ gcvHAL_DUMP_GPU_STATE,
++ gcvHAL_DUMP_EVENT,
++
++ /* Virtual command buffer. */
++ gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER,
++ gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER,
++
++ /* FSCALE_VAL. */
++ gcvHAL_SET_FSCALE_VALUE,
++ gcvHAL_GET_FSCALE_VALUE,
++
++ gcvHAL_NAME_VIDEO_MEMORY,
++ gcvHAL_IMPORT_VIDEO_MEMORY,
++
++ /* Reset time stamp. */
++ gcvHAL_QUERY_RESET_TIME_STAMP,
++
++ /* Multi-GPU read/write. */
++ gcvHAL_READ_REGISTER_EX,
++ gcvHAL_WRITE_REGISTER_EX,
++
++ /* Sync point operations. */
++ gcvHAL_SYNC_POINT,
++
++ /* Create native fence and return its fd. */
++ gcvHAL_CREATE_NATIVE_FENCE,
++
++ /* Destory MMU. */
++ gcvHAL_DESTROY_MMU,
++
++ /* Shared buffer. */
++ gcvHAL_SHBUF,
++
++ /* Config power management. */
++ gcvHAL_CONFIG_POWER_MANAGEMENT,
++
++ /* Connect a video node to an OS native fd. */
++ gcvHAL_GET_VIDEO_MEMORY_FD,
++}
++gceHAL_COMMAND_CODES;
++
++/******************************************************************************\
++****************************** Interface Structure *****************************
++\******************************************************************************/
++
++#define gcdMAX_PROFILE_FILE_NAME 128
++
++/* Kernel settings. */
++typedef struct _gcsKERNEL_SETTINGS
++{
++ /* Used RealTime signal between kernel and user. */
++ gctINT signal;
++}
++gcsKERNEL_SETTINGS;
++
++
++/* gcvHAL_QUERY_CHIP_IDENTITY */
++typedef struct _gcsHAL_QUERY_CHIP_IDENTITY * gcsHAL_QUERY_CHIP_IDENTITY_PTR;
++typedef struct _gcsHAL_QUERY_CHIP_IDENTITY
++{
++
++ /* Chip model. */
++ gceCHIPMODEL chipModel;
++
++ /* Revision value.*/
++ gctUINT32 chipRevision;
++
++ /* Supported feature fields. */
++ gctUINT32 chipFeatures;
++
++ /* Supported minor feature fields. */
++ gctUINT32 chipMinorFeatures;
++
++ /* Supported minor feature 1 fields. */
++ gctUINT32 chipMinorFeatures1;
++
++ /* Supported minor feature 2 fields. */
++ gctUINT32 chipMinorFeatures2;
++
++ /* Supported minor feature 3 fields. */
++ gctUINT32 chipMinorFeatures3;
++
++ /* Supported minor feature 4 fields. */
++ gctUINT32 chipMinorFeatures4;
++
++ /* Supported minor feature 5 fields. */
++ gctUINT32 chipMinorFeatures5;
++
++ /* Number of streams supported. */
++ gctUINT32 streamCount;
++
++ /* Total number of temporary registers per thread. */
++ gctUINT32 registerMax;
++
++ /* Maximum number of threads. */
++ gctUINT32 threadCount;
++
++ /* Number of shader cores. */
++ gctUINT32 shaderCoreCount;
++
++ /* Size of the vertex cache. */
++ gctUINT32 vertexCacheSize;
++
++ /* Number of entries in the vertex output buffer. */
++ gctUINT32 vertexOutputBufferSize;
++
++ /* Number of pixel pipes. */
++ gctUINT32 pixelPipes;
++
++ /* Number of instructions. */
++ gctUINT32 instructionCount;
++
++ /* Number of constants. */
++ gctUINT32 numConstants;
++
++ /* Buffer size */
++ gctUINT32 bufferSize;
++
++ /* Number of varyings */
++ gctUINT32 varyingsCount;
++
++ /* Supertile layout style in hardware */
++ gctUINT32 superTileMode;
++
++#if gcdMULTI_GPU
++ /* Number of 3D GPUs */
++ gctUINT32 gpuCoreCount;
++#endif
++
++ /* Special control bits for 2D chip. */
++ gctUINT32 chip2DControl;
++
++ /* Product ID */
++ gctUINT32 productID;
++}
++gcsHAL_QUERY_CHIP_IDENTITY;
++
++/* gcvHAL_COMPOSE. */
++typedef struct _gcsHAL_COMPOSE * gcsHAL_COMPOSE_PTR;
++typedef struct _gcsHAL_COMPOSE
++{
++ /* Composition state buffer. */
++ gctUINT64 physical;
++ gctUINT64 logical;
++ gctUINT offset;
++ gctUINT size;
++
++ /* Composition end signal. */
++ gctUINT64 process;
++ gctUINT64 signal;
++
++ /* User signals. */
++ gctUINT64 userProcess;
++ gctUINT64 userSignal1;
++ gctUINT64 userSignal2;
++
++#if defined(__QNXNTO__)
++ /* Client pulse side-channel connection ID. */
++ gctINT32 coid;
++
++ /* Set by server. */
++ gctINT32 rcvid;
++#endif
++}
++gcsHAL_COMPOSE;
++
++
++typedef struct _gcsHAL_INTERFACE
++{
++ /* Command code. */
++ gceHAL_COMMAND_CODES command;
++
++ /* Hardware type. */
++ gceHARDWARE_TYPE hardwareType;
++
++ /* Status value. */
++ gceSTATUS status;
++
++ /* Handle to this interface channel. */
++ gctUINT64 handle;
++
++ /* Pid of the client. */
++ gctUINT32 pid;
++
++ /* Union of command structures. */
++ union _u
++ {
++ /* gcvHAL_GET_BASE_ADDRESS */
++ struct _gcsHAL_GET_BASE_ADDRESS
++ {
++ /* Physical memory address of internal memory. */
++ OUT gctUINT32 baseAddress;
++ }
++ GetBaseAddress;
++
++ /* gcvHAL_QUERY_VIDEO_MEMORY */
++ struct _gcsHAL_QUERY_VIDEO_MEMORY
++ {
++ /* Physical memory address of internal memory. Just a name. */
++ OUT gctUINT32 internalPhysical;
++
++ /* Size in bytes of internal memory. */
++ OUT gctUINT64 internalSize;
++
++ /* Physical memory address of external memory. Just a name. */
++ OUT gctUINT32 externalPhysical;
++
++ /* Size in bytes of external memory.*/
++ OUT gctUINT64 externalSize;
++
++ /* Physical memory address of contiguous memory. Just a name. */
++ OUT gctUINT32 contiguousPhysical;
++
++ /* Size in bytes of contiguous memory.*/
++ OUT gctUINT64 contiguousSize;
++ }
++ QueryVideoMemory;
++
++ /* gcvHAL_QUERY_CHIP_IDENTITY */
++ gcsHAL_QUERY_CHIP_IDENTITY QueryChipIdentity;
++
++ /* gcvHAL_MAP_MEMORY */
++ struct _gcsHAL_MAP_MEMORY
++ {
++ /* Physical memory address to map. Just a name on Linux/Qnx. */
++ IN gctUINT32 physical;
++
++ /* Number of bytes in physical memory to map. */
++ IN gctUINT64 bytes;
++
++ /* Address of mapped memory. */
++ OUT gctUINT64 logical;
++ }
++ MapMemory;
++
++ /* gcvHAL_UNMAP_MEMORY */
++ struct _gcsHAL_UNMAP_MEMORY
++ {
++ /* Physical memory address to unmap. Just a name on Linux/Qnx. */
++ IN gctUINT32 physical;
++
++ /* Number of bytes in physical memory to unmap. */
++ IN gctUINT64 bytes;
++
++ /* Address of mapped memory to unmap. */
++ IN gctUINT64 logical;
++ }
++ UnmapMemory;
++
++ /* gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY */
++ struct _gcsHAL_ALLOCATE_LINEAR_VIDEO_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT bytes;
++
++ /* Buffer alignment. */
++ IN gctUINT alignment;
++
++ /* Type of allocation. */
++ IN gceSURF_TYPE type;
++
++ /* Flag of allocation. */
++ IN gctUINT32 flag;
++
++ /* Memory pool to allocate from. */
++ IN OUT gcePOOL pool;
++
++ /* Allocated video memory. */
++ OUT gctUINT32 node;
++ }
++ AllocateLinearVideoMemory;
++
++ /* gcvHAL_ALLOCATE_VIDEO_MEMORY */
++ struct _gcsHAL_ALLOCATE_VIDEO_MEMORY
++ {
++ /* Width of rectangle to allocate. */
++ IN OUT gctUINT width;
++
++ /* Height of rectangle to allocate. */
++ IN OUT gctUINT height;
++
++ /* Depth of rectangle to allocate. */
++ IN gctUINT depth;
++
++ /* Format rectangle to allocate in gceSURF_FORMAT. */
++ IN gceSURF_FORMAT format;
++
++ /* Type of allocation. */
++ IN gceSURF_TYPE type;
++
++ /* Memory pool to allocate from. */
++ IN OUT gcePOOL pool;
++
++ /* Allocated video memory. */
++ OUT gctUINT32 node;
++ }
++ AllocateVideoMemory;
++
++ /* gcvHAL_RELEASE_VIDEO_MEMORY */
++ struct _gcsHAL_RELEASE_VIDEO_MEMORY
++ {
++ /* Allocated video memory. */
++ IN gctUINT32 node;
++
++#ifdef __QNXNTO__
++/* TODO: This is part of the unlock - why is it here? */
++ /* Mapped logical address to unmap in user space. */
++ OUT gctUINT64 memory;
++
++ /* Number of bytes to allocated. */
++ OUT gctUINT64 bytes;
++#endif
++ }
++ ReleaseVideoMemory;
++
++ /* gcvHAL_LOCK_VIDEO_MEMORY */
++ struct _gcsHAL_LOCK_VIDEO_MEMORY
++ {
++ /* Allocated video memory. */
++ IN gctUINT32 node;
++
++ /* Cache configuration. */
++ /* Only gcvPOOL_CONTIGUOUS and gcvPOOL_VIRUTAL
++ ** can be configured */
++ IN gctBOOL cacheable;
++
++ /* Hardware specific address. */
++ OUT gctUINT32 address;
++
++ /* Mapped logical address. */
++ OUT gctUINT64 memory;
++
++ /* Customer priviate handle*/
++ OUT gctUINT32 gid;
++
++ /* Bus address of a contiguous video node. */
++ OUT gctUINT64 physicalAddress;
++ }
++ LockVideoMemory;
++
++ /* gcvHAL_UNLOCK_VIDEO_MEMORY */
++ struct _gcsHAL_UNLOCK_VIDEO_MEMORY
++ {
++ /* Allocated video memory. */
++ IN gctUINT64 node;
++
++ /* Type of surface. */
++ IN gceSURF_TYPE type;
++
++ /* Flag to unlock surface asynchroneously. */
++ IN OUT gctBOOL asynchroneous;
++ }
++ UnlockVideoMemory;
++
++ /* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */
++ struct _gcsHAL_ALLOCATE_NON_PAGED_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateNonPagedMemory;
++
++ /* gcvHAL_FREE_NON_PAGED_MEMORY */
++ struct _gcsHAL_FREE_NON_PAGED_MEMORY
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeNonPagedMemory;
++
++ /* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */
++ struct _gcsHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateVirtualCommandBuffer;
++
++ /* gcvHAL_FREE_NON_PAGED_MEMORY */
++ struct _gcsHAL_FREE_VIRTUAL_COMMAND_BUFFER
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeVirtualCommandBuffer;
++
++ /* gcvHAL_EVENT_COMMIT. */
++ struct _gcsHAL_EVENT_COMMIT
++ {
++ /* Event queue in gcsQUEUE. */
++ IN gctUINT64 queue;
++
++#if gcdMULTI_GPU
++ IN gceCORE_3D_MASK chipEnable;
++
++ IN gceMULTI_GPU_MODE gpuMode;
++#endif
++ }
++ Event;
++
++ /* gcvHAL_COMMIT */
++ struct _gcsHAL_COMMIT
++ {
++ /* Context buffer object gckCONTEXT. */
++ IN gctUINT64 context;
++
++ /* Command buffer gcoCMDBUF. */
++ IN gctUINT64 commandBuffer;
++
++ /* State delta buffer in gcsSTATE_DELTA. */
++ gctUINT64 delta;
++
++ /* Event queue in gcsQUEUE. */
++ IN gctUINT64 queue;
++
++#if gcdMULTI_GPU
++ IN gceCORE_3D_MASK chipEnable;
++
++ IN gceMULTI_GPU_MODE gpuMode;
++#endif
++ }
++ Commit;
++
++ /* gcvHAL_MAP_USER_MEMORY */
++ struct _gcsHAL_MAP_USER_MEMORY
++ {
++ /* Base address of user memory to map. */
++ IN gctUINT64 memory;
++
++ /* Physical address of user memory to map. */
++ IN gctUINT32 physical;
++
++ /* Size of user memory in bytes to map. */
++ IN gctUINT64 size;
++
++ /* Info record required by gcvHAL_UNMAP_USER_MEMORY. Just a name. */
++ OUT gctUINT32 info;
++
++ /* Physical address of mapped memory. */
++ OUT gctUINT32 address;
++ }
++ MapUserMemory;
++
++ /* gcvHAL_UNMAP_USER_MEMORY */
++ struct _gcsHAL_UNMAP_USER_MEMORY
++ {
++ /* Base address of user memory to unmap. */
++ IN gctUINT64 memory;
++
++ /* Size of user memory in bytes to unmap. */
++ IN gctUINT64 size;
++
++ /* Info record returned by gcvHAL_MAP_USER_MEMORY. Just a name. */
++ IN gctUINT32 info;
++
++ /* Physical address of mapped memory as returned by
++ gcvHAL_MAP_USER_MEMORY. */
++ IN gctUINT32 address;
++ }
++ UnmapUserMemory;
++#if !USE_NEW_LINUX_SIGNAL
++ /* gcsHAL_USER_SIGNAL */
++ struct _gcsHAL_USER_SIGNAL
++ {
++ /* Command. */
++ gceUSER_SIGNAL_COMMAND_CODES command;
++
++ /* Signal ID. */
++ IN OUT gctINT id;
++
++ /* Reset mode. */
++ IN gctBOOL manualReset;
++
++ /* Wait timedout. */
++ IN gctUINT32 wait;
++
++ /* State. */
++ IN gctBOOL state;
++ }
++ UserSignal;
++#endif
++
++ /* gcvHAL_SIGNAL. */
++ struct _gcsHAL_SIGNAL
++ {
++ /* Signal handle to signal gctSIGNAL. */
++ IN gctUINT64 signal;
++
++ /* Reserved gctSIGNAL. */
++ IN gctUINT64 auxSignal;
++
++ /* Process owning the signal gctHANDLE. */
++ IN gctUINT64 process;
++
++#if defined(__QNXNTO__)
++ /* Client pulse side-channel connection ID. Set by client in gcoOS_CreateSignal. */
++ IN gctINT32 coid;
++
++ /* Set by server. */
++ IN gctINT32 rcvid;
++#endif
++ /* Event generated from where of pipeline */
++ IN gceKERNEL_WHERE fromWhere;
++ }
++ Signal;
++
++ /* gcvHAL_WRITE_DATA. */
++ struct _gcsHAL_WRITE_DATA
++ {
++ /* Address to write data to. */
++ IN gctUINT32 address;
++
++ /* Data to write. */
++ IN gctUINT32 data;
++ }
++ WriteData;
++
++ /* gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY */
++ struct _gcsHAL_ALLOCATE_CONTIGUOUS_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Hardware address of allocation. */
++ OUT gctUINT32 address;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateContiguousMemory;
++
++ /* gcvHAL_FREE_CONTIGUOUS_MEMORY */
++ struct _gcsHAL_FREE_CONTIGUOUS_MEMORY
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeContiguousMemory;
++
++ /* gcvHAL_READ_REGISTER */
++ struct _gcsHAL_READ_REGISTER
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ /* Data read. */
++ OUT gctUINT32 data;
++ }
++ ReadRegisterData;
++
++ /* gcvHAL_WRITE_REGISTER */
++ struct _gcsHAL_WRITE_REGISTER
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ /* Data read. */
++ IN gctUINT32 data;
++ }
++ WriteRegisterData;
++
++#if gcdMULTI_GPU
++ /* gcvHAL_READ_REGISTER_EX */
++ struct _gcsHAL_READ_REGISTER_EX
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ IN gctUINT32 coreSelect;
++
++ /* Data read. */
++ OUT gctUINT32 data[gcdMULTI_GPU];
++ }
++ ReadRegisterDataEx;
++
++ /* gcvHAL_WRITE_REGISTER_EX */
++ struct _gcsHAL_WRITE_REGISTER_EX
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ IN gctUINT32 coreSelect;
++
++ /* Data read. */
++ IN gctUINT32 data[gcdMULTI_GPU];
++ }
++ WriteRegisterDataEx;
++#endif
++
++#if VIVANTE_PROFILER
++ /* gcvHAL_GET_PROFILE_SETTING */
++ struct _gcsHAL_GET_PROFILE_SETTING
++ {
++ /* Enable profiling */
++ OUT gctBOOL enable;
++ }
++ GetProfileSetting;
++
++ /* gcvHAL_SET_PROFILE_SETTING */
++ struct _gcsHAL_SET_PROFILE_SETTING
++ {
++ /* Enable profiling */
++ IN gctBOOL enable;
++ }
++ SetProfileSetting;
++
++#if VIVANTE_PROFILER_PERDRAW
++ /* gcvHAL_READ_PROFILER_REGISTER_SETTING */
++ struct _gcsHAL_READ_PROFILER_REGISTER_SETTING
++ {
++ /*Should Clear Register*/
++ IN gctBOOL bclear;
++ }
++ SetProfilerRegisterClear;
++#endif
++
++ /* gcvHAL_READ_ALL_PROFILE_REGISTERS */
++ struct _gcsHAL_READ_ALL_PROFILE_REGISTERS
++ {
++#if VIVANTE_PROFILER_CONTEXT
++ /* Context buffer object gckCONTEXT. Just a name. */
++ IN gctUINT32 context;
++#endif
++
++ /* Data read. */
++ OUT gcsPROFILER_COUNTERS counters;
++ }
++ RegisterProfileData;
++
++ /* gcvHAL_PROFILE_REGISTERS_2D */
++ struct _gcsHAL_PROFILE_REGISTERS_2D
++ {
++ /* Data read in gcs2D_PROFILE. */
++ OUT gctUINT64 hwProfile2D;
++ }
++ RegisterProfileData2D;
++#endif
++
++ /* Power management. */
++ /* gcvHAL_SET_POWER_MANAGEMENT_STATE */
++ struct _gcsHAL_SET_POWER_MANAGEMENT
++ {
++ /* Data read. */
++ IN gceCHIPPOWERSTATE state;
++ }
++ SetPowerManagement;
++
++ /* gcvHAL_QUERY_POWER_MANAGEMENT_STATE */
++ struct _gcsHAL_QUERY_POWER_MANAGEMENT
++ {
++ /* Data read. */
++ OUT gceCHIPPOWERSTATE state;
++
++ /* Idle query. */
++ OUT gctBOOL isIdle;
++ }
++ QueryPowerManagement;
++
++ /* gcvHAL_QUERY_KERNEL_SETTINGS */
++ struct _gcsHAL_QUERY_KERNEL_SETTINGS
++ {
++ /* Settings.*/
++ OUT gcsKERNEL_SETTINGS settings;
++ }
++ QueryKernelSettings;
++
++ /* gcvHAL_MAP_PHYSICAL */
++ struct _gcsHAL_MAP_PHYSICAL
++ {
++ /* gcvTRUE to map, gcvFALSE to unmap. */
++ IN gctBOOL map;
++
++ /* Physical address. */
++ IN OUT gctUINT64 physical;
++ }
++ MapPhysical;
++
++ /* gcvHAL_DEBUG */
++ struct _gcsHAL_DEBUG
++ {
++ /* If gcvTRUE, set the debug information. */
++ IN gctBOOL set;
++ IN gctUINT32 level;
++ IN gctUINT32 zones;
++ IN gctBOOL enable;
++
++ IN gceDEBUG_MESSAGE_TYPE type;
++ IN gctUINT32 messageSize;
++
++ /* Message to print if not empty. */
++ IN gctCHAR message[80];
++ }
++ Debug;
++
++ /* gcvHAL_CACHE */
++ struct _gcsHAL_CACHE
++ {
++ IN gceCACHEOPERATION operation;
++ IN gctUINT64 process;
++ IN gctUINT64 logical;
++ IN gctUINT64 bytes;
++ IN gctUINT32 node;
++ }
++ Cache;
++
++ /* gcvHAL_TIMESTAMP */
++ struct _gcsHAL_TIMESTAMP
++ {
++ /* Timer select. */
++ IN gctUINT32 timer;
++
++ /* Timer request type (0-stop, 1-start, 2-send delta). */
++ IN gctUINT32 request;
++
++ /* Result of delta time in microseconds. */
++ OUT gctINT32 timeDelta;
++ }
++ TimeStamp;
++
++ /* gcvHAL_DATABASE */
++ struct _gcsHAL_DATABASE
++ {
++ /* Set to gcvTRUE if you want to query a particular process ID.
++ ** Set to gcvFALSE to query the last detached process. */
++ IN gctBOOL validProcessID;
++
++ /* Process ID to query. */
++ IN gctUINT32 processID;
++
++ /* Information. */
++ OUT gcuDATABASE_INFO vidMem;
++ OUT gcuDATABASE_INFO nonPaged;
++ OUT gcuDATABASE_INFO contiguous;
++ OUT gcuDATABASE_INFO gpuIdle;
++
++ /* Detail information about video memory. */
++ OUT gcuDATABASE_INFO vidMemPool[3];
++ }
++ Database;
++
++ /* gcvHAL_VERSION */
++ struct _gcsHAL_VERSION
++ {
++ /* Major version: N.n.n. */
++ OUT gctINT32 major;
++
++ /* Minor version: n.N.n. */
++ OUT gctINT32 minor;
++
++ /* Patch version: n.n.N. */
++ OUT gctINT32 patch;
++
++ /* Build version. */
++ OUT gctUINT32 build;
++ }
++ Version;
++
++ /* gcvHAL_CHIP_INFO */
++ struct _gcsHAL_CHIP_INFO
++ {
++ /* Chip count. */
++ OUT gctINT32 count;
++
++ /* Chip types. */
++ OUT gceHARDWARE_TYPE types[gcdCHIP_COUNT];
++ }
++ ChipInfo;
++
++ /* gcvHAL_ATTACH */
++ struct _gcsHAL_ATTACH
++ {
++ /* Handle of context buffer object. */
++ OUT gctUINT32 context;
++
++ /* Number of states in the buffer. */
++ OUT gctUINT64 stateCount;
++
++ /* Map context buffer to user or not. */
++ IN gctBOOL map;
++
++ /* Physical of context buffer. */
++ OUT gctUINT32 physicals[2];
++
++ /* Physical of context buffer. */
++ OUT gctUINT64 logicals[2];
++
++ /* Bytes of context buffer. */
++ OUT gctUINT32 bytes;
++ }
++ Attach;
++
++ /* gcvHAL_DETACH */
++ struct _gcsHAL_DETACH
++ {
++ /* Context buffer object gckCONTEXT. Just a name. */
++ IN gctUINT32 context;
++ }
++ Detach;
++
++ /* gcvHAL_COMPOSE. */
++ gcsHAL_COMPOSE Compose;
++
++ /* gcvHAL_GET_FRAME_INFO. */
++ struct _gcsHAL_GET_FRAME_INFO
++ {
++ /* gcsHAL_FRAME_INFO* */
++ OUT gctUINT64 frameInfo;
++ }
++ GetFrameInfo;
++
++ /* gcvHAL_SET_TIME_OUT. */
++ struct _gcsHAL_SET_TIMEOUT
++ {
++ gctUINT32 timeOut;
++ }
++ SetTimeOut;
++
++#if gcdENABLE_VG
++ /* gcvHAL_COMMIT */
++ struct _gcsHAL_VGCOMMIT
++ {
++ /* Context buffer. gcsVGCONTEXT_PTR */
++ IN gctUINT64 context;
++
++ /* Command queue. gcsVGCMDQUEUE_PTR */
++ IN gctUINT64 queue;
++
++ /* Number of entries in the queue. */
++ IN gctUINT entryCount;
++
++ /* Task table. gcsTASK_MASTER_TABLE_PTR */
++ IN gctUINT64 taskTable;
++ }
++ VGCommit;
++
++ /* gcvHAL_QUERY_COMMAND_BUFFER */
++ struct _gcsHAL_QUERY_COMMAND_BUFFER
++ {
++ /* Command buffer attributes. */
++ OUT gcsCOMMAND_BUFFER_INFO information;
++ }
++ QueryCommandBuffer;
++
++#endif
++
++ struct _gcsHAL_SET_FSCALE_VALUE
++ {
++ IN gctUINT value;
++ }
++ SetFscaleValue;
++
++ struct _gcsHAL_GET_FSCALE_VALUE
++ {
++ OUT gctUINT value;
++ OUT gctUINT minValue;
++ OUT gctUINT maxValue;
++ }
++ GetFscaleValue;
++
++ struct _gcsHAL_NAME_VIDEO_MEMORY
++ {
++ IN gctUINT32 handle;
++ OUT gctUINT32 name;
++ }
++ NameVideoMemory;
++
++ struct _gcsHAL_IMPORT_VIDEO_MEMORY
++ {
++ IN gctUINT32 name;
++ OUT gctUINT32 handle;
++ }
++ ImportVideoMemory;
++
++ struct _gcsHAL_QUERY_RESET_TIME_STAMP
++ {
++ OUT gctUINT64 timeStamp;
++ }
++ QueryResetTimeStamp;
++
++ struct _gcsHAL_SYNC_POINT
++ {
++ /* Command. */
++ gceSYNC_POINT_COMMAND_CODES command;
++
++ /* Sync point. */
++ IN OUT gctUINT64 syncPoint;
++
++ /* From where. */
++ IN gceKERNEL_WHERE fromWhere;
++
++ /* Signaled state. */
++ OUT gctBOOL state;
++ }
++ SyncPoint;
++
++ struct _gcsHAL_CREATE_NATIVE_FENCE
++ {
++ /* Signal id to dup. */
++ IN gctUINT64 syncPoint;
++
++ /* Native fence file descriptor. */
++ OUT gctINT fenceFD;
++
++ }
++ CreateNativeFence;
++
++ struct _gcsHAL_DESTROY_MMU
++ {
++ /* Mmu object. */
++ IN gctUINT64 mmu;
++ }
++ DestroyMmu;
++
++ struct _gcsHAL_SHBUF
++ {
++ gceSHBUF_COMMAND_CODES command;
++
++ /* Shared buffer. */
++ IN OUT gctUINT64 id;
++
++ /* User data to be shared. */
++ IN gctUINT64 data;
++
++ /* Data size. */
++ IN OUT gctUINT32 bytes;
++ }
++ ShBuf;
++
++ struct _gcsHAL_CONFIG_POWER_MANAGEMENT
++ {
++ IN gctBOOL enable;
++ }
++ ConfigPowerManagement;
++
++ struct _gcsHAL_GET_VIDEO_MEMORY_FD
++ {
++ IN gctUINT32 handle;
++ OUT gctINT fd;
++ }
++ GetVideoMemoryFd;
++ }
++ u;
++}
++gcsHAL_INTERFACE;
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_driver_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver_vg.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver_vg.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_driver_vg.h 2015-05-01 14:57:59.583427001 -0500
+@@ -0,0 +1,270 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_driver_vg_h_
++#define __gc_hal_driver_vg_h_
++
++
++
++#include "gc_hal_types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* I/O Control Codes ******************************
++\******************************************************************************/
++
++#define gcvHAL_CLASS "galcore"
++#define IOCTL_GCHAL_INTERFACE 30000
++
++/******************************************************************************\
++********************************* Command Codes ********************************
++\******************************************************************************/
++
++/******************************************************************************\
++********************* Command buffer information structure. ********************
++\******************************************************************************/
++
++typedef struct _gcsCOMMAND_BUFFER_INFO * gcsCOMMAND_BUFFER_INFO_PTR;
++typedef struct _gcsCOMMAND_BUFFER_INFO
++{
++ /* FE command buffer interrupt ID. */
++ gctINT32 feBufferInt;
++
++ /* TS overflow interrupt ID. */
++ gctINT32 tsOverflowInt;
++
++ /* Alignment and mask for the buffer address. */
++ gctUINT addressMask;
++ gctUINT32 addressAlignment;
++
++ /* Alignment for each command. */
++ gctUINT32 commandAlignment;
++
++ /* Number of bytes required by the STATE command. */
++ gctUINT32 stateCommandSize;
++
++ /* Number of bytes required by the RESTART command. */
++ gctUINT32 restartCommandSize;
++
++ /* Number of bytes required by the FETCH command. */
++ gctUINT32 fetchCommandSize;
++
++ /* Number of bytes required by the CALL command. */
++ gctUINT32 callCommandSize;
++
++ /* Number of bytes required by the RETURN command. */
++ gctUINT32 returnCommandSize;
++
++ /* Number of bytes required by the EVENT command. */
++ gctUINT32 eventCommandSize;
++
++ /* Number of bytes required by the END command. */
++ gctUINT32 endCommandSize;
++
++ /* Number of bytes reserved at the tail of a static command buffer. */
++ gctUINT32 staticTailSize;
++
++ /* Number of bytes reserved at the tail of a dynamic command buffer. */
++ gctUINT32 dynamicTailSize;
++}
++gcsCOMMAND_BUFFER_INFO;
++
++/******************************************************************************\
++******************************** Task Structures *******************************
++\******************************************************************************/
++
++typedef enum _gceTASK
++{
++ gcvTASK_LINK,
++ gcvTASK_CLUSTER,
++ gcvTASK_INCREMENT,
++ gcvTASK_DECREMENT,
++ gcvTASK_SIGNAL,
++ gcvTASK_LOCKDOWN,
++ gcvTASK_UNLOCK_VIDEO_MEMORY,
++ gcvTASK_FREE_VIDEO_MEMORY,
++ gcvTASK_FREE_CONTIGUOUS_MEMORY,
++ gcvTASK_UNMAP_USER_MEMORY
++}
++gceTASK;
++
++typedef struct _gcsTASK_HEADER * gcsTASK_HEADER_PTR;
++typedef struct _gcsTASK_HEADER
++{
++ /* Task ID. */
++ IN gceTASK id;
++}
++gcsTASK_HEADER;
++
++typedef struct _gcsTASK_LINK * gcsTASK_LINK_PTR;
++typedef struct _gcsTASK_LINK
++{
++ /* Task ID (gcvTASK_LINK). */
++ IN gceTASK id;
++
++ /* Pointer to the next task container. */
++ IN gctPOINTER cotainer;
++
++ /* Pointer to the next task from the next task container. */
++ IN gcsTASK_HEADER_PTR task;
++}
++gcsTASK_LINK;
++
++typedef struct _gcsTASK_CLUSTER * gcsTASK_CLUSTER_PTR;
++typedef struct _gcsTASK_CLUSTER
++{
++ /* Task ID (gcvTASK_CLUSTER). */
++ IN gceTASK id;
++
++ /* Number of tasks in the cluster. */
++ IN gctUINT taskCount;
++}
++gcsTASK_CLUSTER;
++
++typedef struct _gcsTASK_INCREMENT * gcsTASK_INCREMENT_PTR;
++typedef struct _gcsTASK_INCREMENT
++{
++ /* Task ID (gcvTASK_INCREMENT). */
++ IN gceTASK id;
++
++ /* Address of the variable to increment. */
++ IN gctUINT32 address;
++}
++gcsTASK_INCREMENT;
++
++typedef struct _gcsTASK_DECREMENT * gcsTASK_DECREMENT_PTR;
++typedef struct _gcsTASK_DECREMENT
++{
++ /* Task ID (gcvTASK_DECREMENT). */
++ IN gceTASK id;
++
++ /* Address of the variable to decrement. */
++ IN gctUINT32 address;
++}
++gcsTASK_DECREMENT;
++
++typedef struct _gcsTASK_SIGNAL * gcsTASK_SIGNAL_PTR;
++typedef struct _gcsTASK_SIGNAL
++{
++ /* Task ID (gcvTASK_SIGNAL). */
++ IN gceTASK id;
++
++ /* Process owning the signal. */
++ IN gctHANDLE process;
++
++ /* Signal handle to signal. */
++ IN gctSIGNAL signal;
++
++#if defined(__QNXNTO__)
++ IN gctINT32 coid;
++ IN gctINT32 rcvid;
++#endif
++}
++gcsTASK_SIGNAL;
++
++typedef struct _gcsTASK_LOCKDOWN * gcsTASK_LOCKDOWN_PTR;
++typedef struct _gcsTASK_LOCKDOWN
++{
++ /* Task ID (gcvTASK_LOCKDOWN). */
++ IN gceTASK id;
++
++ /* Address of the user space counter. */
++ IN gctUINT32 userCounter;
++
++ /* Address of the kernel space counter. */
++ IN gctUINT32 kernelCounter;
++
++ /* Process owning the signal. */
++ IN gctHANDLE process;
++
++ /* Signal handle to signal. */
++ IN gctSIGNAL signal;
++}
++gcsTASK_LOCKDOWN;
++
++typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY * gcsTASK_UNLOCK_VIDEO_MEMORY_PTR;
++typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY
++{
++ /* Task ID (gcvTASK_UNLOCK_VIDEO_MEMORY). */
++ IN gceTASK id;
++
++ /* Allocated video memory. */
++ IN gctUINT64 node;
++}
++gcsTASK_UNLOCK_VIDEO_MEMORY;
++
++typedef struct _gcsTASK_FREE_VIDEO_MEMORY * gcsTASK_FREE_VIDEO_MEMORY_PTR;
++typedef struct _gcsTASK_FREE_VIDEO_MEMORY
++{
++ /* Task ID (gcvTASK_FREE_VIDEO_MEMORY). */
++ IN gceTASK id;
++
++ /* Allocated video memory. */
++ IN gctUINT32 node;
++}
++gcsTASK_FREE_VIDEO_MEMORY;
++
++typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY * gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR;
++typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY
++{
++ /* Task ID (gcvTASK_FREE_CONTIGUOUS_MEMORY). */
++ IN gceTASK id;
++
++ /* Number of bytes allocated. */
++ IN gctSIZE_T bytes;
++
++ /* Physical address of allocation. */
++ IN gctPHYS_ADDR physical;
++
++ /* Logical address of allocation. */
++ IN gctPOINTER logical;
++}
++gcsTASK_FREE_CONTIGUOUS_MEMORY;
++
++typedef struct _gcsTASK_UNMAP_USER_MEMORY * gcsTASK_UNMAP_USER_MEMORY_PTR;
++typedef struct _gcsTASK_UNMAP_USER_MEMORY
++{
++ /* Task ID (gcvTASK_UNMAP_USER_MEMORY). */
++ IN gceTASK id;
++
++ /* Base address of user memory to unmap. */
++ IN gctPOINTER memory;
++
++ /* Size of user memory in bytes to unmap. */
++ IN gctSIZE_T size;
++
++ /* Info record returned by gcvHAL_MAP_USER_MEMORY. */
++ IN gctPOINTER info;
++
++ /* Physical address of mapped memory as returned by
++ gcvHAL_MAP_USER_MEMORY. */
++ IN gctUINT32 address;
++}
++gcsTASK_UNMAP_USER_MEMORY;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_driver_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_dump.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_dump.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_dump.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_dump.h 2015-05-01 14:57:59.583427001 -0500
+@@ -0,0 +1,89 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_dump_h_
++#define __gc_hal_dump_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++** FILE LAYOUT:
++**
++** gcsDUMP_FILE structure
++**
++** gcsDUMP_DATA frame
++** gcsDUMP_DATA or gcDUMP_DATA_SIZE records rendingring the frame
++** gctUINT8 data[length]
++*/
++
++#define gcvDUMP_FILE_SIGNATURE gcmCC('g','c','D','B')
++
++typedef struct _gcsDUMP_FILE
++{
++ gctUINT32 signature; /* File signature */
++ gctSIZE_T length; /* Length of file */
++ gctUINT32 frames; /* Number of frames in file */
++}
++gcsDUMP_FILE;
++
++typedef enum _gceDUMP_TAG
++{
++ gcvTAG_SURFACE = gcmCC('s','u','r','f'),
++ gcvTAG_FRAME = gcmCC('f','r','m',' '),
++ gcvTAG_COMMAND = gcmCC('c','m','d',' '),
++ gcvTAG_INDEX = gcmCC('i','n','d','x'),
++ gcvTAG_STREAM = gcmCC('s','t','r','m'),
++ gcvTAG_TEXTURE = gcmCC('t','e','x','t'),
++ gcvTAG_RENDER_TARGET = gcmCC('r','n','d','r'),
++ gcvTAG_DEPTH = gcmCC('z','b','u','f'),
++ gcvTAG_RESOLVE = gcmCC('r','s','l','v'),
++ gcvTAG_DELETE = gcmCC('d','e','l',' '),
++ gcvTAG_BUFOBJ = gcmCC('b','u','f','o'),
++}
++gceDUMP_TAG;
++
++typedef struct _gcsDUMP_SURFACE
++{
++ gceDUMP_TAG type; /* Type of record. */
++ gctUINT32 address; /* Address of the surface. */
++ gctINT16 width; /* Width of surface. */
++ gctINT16 height; /* Height of surface. */
++ gceSURF_FORMAT format; /* Surface pixel format. */
++ gctSIZE_T length; /* Number of bytes inside the surface. */
++}
++gcsDUMP_SURFACE;
++
++typedef struct _gcsDUMP_DATA
++{
++ gceDUMP_TAG type; /* Type of record. */
++ gctSIZE_T length; /* Number of bytes of data. */
++ gctUINT32 address; /* Address for the data. */
++}
++gcsDUMP_DATA;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_dump_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform.h 2015-05-01 14:57:59.587427001 -0500
+@@ -0,0 +1,672 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_eglplatform_h_
++#define __gc_hal_eglplatform_h_
++
++/* Include VDK types. */
++#include "gc_hal_types.h"
++#include "gc_hal_base.h"
++#include "gc_hal_eglplatform_type.h"
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++#if defined(_WIN32) || defined(__VC32__) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__)
++/* Win32 and Windows CE platforms. */
++#include <windows.h>
++typedef HDC HALNativeDisplayType;
++typedef HWND HALNativeWindowType;
++typedef HBITMAP HALNativePixmapType;
++
++typedef struct __BITFIELDINFO{
++ BITMAPINFO bmi;
++ RGBQUAD bmiColors[2];
++} BITFIELDINFO;
++
++#elif defined(LINUX) && defined(EGL_API_DFB) && !defined(__APPLE__)
++#include <directfb.h>
++typedef struct _DFBDisplay * HALNativeDisplayType;
++typedef struct _DFBWindow * HALNativeWindowType;
++typedef struct _DFBPixmap * HALNativePixmapType;
++
++#elif defined(LINUX) && defined(EGL_API_FB) && !defined(__APPLE__)
++
++#if defined(EGL_API_WL)
++
++#if defined(__GNUC__)
++# define inline __inline__ /* GNU keyword. */
++#endif
++
++/* Wayland platform. */
++#include <wayland-egl.h>
++
++#define WL_EGL_NUM_BACKBUFFERS 3
++
++typedef struct _gcsWL_VIV_BUFFER
++{
++ struct wl_resource *wl_buffer;
++ gcoSURF surface;
++ gctINT32 width, height;
++} gcsWL_VIV_BUFFER;
++
++typedef struct _gcsWL_EGL_DISPLAY
++{
++ struct wl_display* wl_display;
++ struct wl_viv* wl_viv;
++ struct wl_registry *registry;
++ struct wl_event_queue *wl_queue;
++ gctINT swapInterval;
++} gcsWL_EGL_DISPLAY;
++
++typedef struct _gcsWL_EGL_BUFFER_INFO
++{
++ gctINT32 width;
++ gctINT32 height;
++ gctINT32 stride;
++ gceSURF_FORMAT format;
++ gcuVIDMEM_NODE_PTR node;
++ gcePOOL pool;
++ gctUINT bytes;
++ gcoSURF surface;
++ gcoSURF attached_surface;
++ gctINT32 invalidate;
++ gctBOOL locked;
++} gcsWL_EGL_BUFFER_INFO;
++
++typedef struct _gcsWL_EGL_BUFFER
++{
++ struct wl_buffer* wl_buffer;
++ gcsWL_EGL_BUFFER_INFO info;
++} gcsWL_EGL_BUFFER;
++
++typedef struct _gcsWL_EGL_WINDOW_INFO
++{
++ gctINT32 dx;
++ gctINT32 dy;
++ gctUINT width;
++ gctUINT height;
++ gctINT32 attached_width;
++ gctINT32 attached_height;
++ gceSURF_FORMAT format;
++ gctUINT bpp;
++} gcsWL_EGL_WINDOW_INFO;
++
++struct wl_egl_window
++{
++ gcsWL_EGL_DISPLAY* display;
++ gcsWL_EGL_BUFFER backbuffers[WL_EGL_NUM_BACKBUFFERS];
++ gcsWL_EGL_WINDOW_INFO info;
++ gctUINT current;
++ struct wl_surface* surface;
++ struct wl_callback* frame_callback;
++};
++
++typedef void* HALNativeDisplayType;
++typedef void* HALNativeWindowType;
++typedef void* HALNativePixmapType;
++#else
++/* Linux platform for FBDEV. */
++typedef struct _FBDisplay * HALNativeDisplayType;
++typedef struct _FBWindow * HALNativeWindowType;
++typedef struct _FBPixmap * HALNativePixmapType;
++#endif
++#elif defined(__ANDROID__) || defined(ANDROID)
++
++struct egl_native_pixmap_t;
++
++#if ANDROID_SDK_VERSION >= 9
++ #include <android/native_window.h>
++
++ typedef struct ANativeWindow* HALNativeWindowType;
++ typedef struct egl_native_pixmap_t* HALNativePixmapType;
++ typedef void* HALNativeDisplayType;
++#else
++ struct android_native_window_t;
++ typedef struct android_native_window_t* HALNativeWindowType;
++ typedef struct egl_native_pixmap_t * HALNativePixmapType;
++ typedef void* HALNativeDisplayType;
++#endif
++
++#elif defined(LINUX) || defined(__APPLE__)
++/* X11 platform. */
++#include <X11/Xlib.h>
++#include <X11/Xutil.h>
++
++typedef Display * HALNativeDisplayType;
++typedef Window HALNativeWindowType;
++
++#ifdef CUSTOM_PIXMAP
++typedef void * HALNativePixmapType;
++#else
++typedef Pixmap HALNativePixmapType;
++#endif /* CUSTOM_PIXMAP */
++
++/* Rename some badly named X defines. */
++#ifdef Status
++# define XStatus int
++# undef Status
++#endif
++#ifdef Always
++# define XAlways 2
++# undef Always
++#endif
++#ifdef CurrentTime
++# undef CurrentTime
++# define XCurrentTime 0
++#endif
++
++#elif defined(__QNXNTO__)
++#include <screen/screen.h>
++
++/* VOID */
++typedef int HALNativeDisplayType;
++typedef screen_window_t HALNativeWindowType;
++typedef screen_pixmap_t HALNativePixmapType;
++
++#else
++
++#error "Platform not recognized"
++
++/* VOID */
++typedef void * HALNativeDisplayType;
++typedef void * HALNativeWindowType;
++typedef void * HALNativePixmapType;
++
++#endif
++
++/* define DUMMY according to the system */
++#if defined(EGL_API_WL)
++# define WL_DUMMY (31415926)
++# define EGL_DUMMY WL_DUMMY
++#elif defined(__ANDROID__) || defined(ANDROID)
++# define ANDROID_DUMMY (31415926)
++# define EGL_DUMMY ANDROID_DUMMY
++#else
++# define EGL_DUMMY (31415926)
++#endif
++
++/*******************************************************************************
++** Display. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_GetDisplay(
++ OUT HALNativeDisplayType * Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_GetDisplayByIndex(
++ IN gctINT DisplayIndex,
++ OUT HALNativeDisplayType * Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_GetDisplayInfo(
++ IN HALNativeDisplayType Display,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctSIZE_T * Physical,
++ OUT gctINT * Stride,
++ OUT gctINT * BitsPerPixel
++ );
++
++
++
++gceSTATUS
++gcoOS_GetDisplayInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetNextDisplayInfoExByIndex(
++ IN gctINT Index,
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetDisplayVirtual(
++ IN HALNativeDisplayType Display,
++ OUT gctINT * Width,
++ OUT gctINT * Height
++ );
++
++gceSTATUS
++gcoOS_GetDisplayBackbuffer(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctPOINTER * context,
++ OUT gcoSURF * surface,
++ OUT gctUINT * Offset,
++ OUT gctINT * X,
++ OUT gctINT * Y
++ );
++
++gceSTATUS
++gcoOS_SetDisplayVirtual(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT Offset,
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++gceSTATUS
++gcoOS_SetDisplayVirtualEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER Context,
++ IN gcoSURF Surface,
++ IN gctUINT Offset,
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++gceSTATUS
++gcoOS_SetSwapInterval(
++ IN HALNativeDisplayType Display,
++ IN gctINT Interval
++);
++
++gceSTATUS
++gcoOS_SetSwapIntervalEx(
++ IN HALNativeDisplayType Display,
++ IN gctINT Interval,
++ IN gctPOINTER localDisplay);
++
++gceSTATUS
++gcoOS_GetSwapInterval(
++ IN HALNativeDisplayType Display,
++ IN gctINT_PTR Min,
++ IN gctINT_PTR Max
++);
++
++gceSTATUS
++gcoOS_DisplayBufferRegions(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT NumRects,
++ IN gctINT_PTR Rects
++ );
++
++gceSTATUS
++gcoOS_DestroyDisplay(
++ IN HALNativeDisplayType Display
++ );
++
++gceSTATUS
++gcoOS_InitLocalDisplayInfo(
++ IN HALNativeDisplayType Display,
++ IN OUT gctPOINTER * localDisplay
++ );
++
++gceSTATUS
++gcoOS_DeinitLocalDisplayInfo(
++ IN HALNativeDisplayType Display,
++ IN OUT gctPOINTER * localDisplay
++ );
++
++gceSTATUS
++gcoOS_GetDisplayInfoEx2(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER localDisplay,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetDisplayBackbufferEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER localDisplay,
++ OUT gctPOINTER * context,
++ OUT gcoSURF * surface,
++ OUT gctUINT * Offset,
++ OUT gctINT * X,
++ OUT gctINT * Y
++ );
++
++gceSTATUS
++gcoOS_IsValidDisplay(
++ IN HALNativeDisplayType Display
++ );
++
++gceSTATUS
++gcoOS_GetNativeVisualId(
++ IN HALNativeDisplayType Display,
++ OUT gctINT* nativeVisualId
++ );
++
++gctBOOL
++gcoOS_SynchronousFlip(
++ IN HALNativeDisplayType Display
++ );
++
++/*******************************************************************************
++** Windows. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_CreateWindow(
++ IN HALNativeDisplayType Display,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gctINT Width,
++ IN gctINT Height,
++ OUT HALNativeWindowType * Window
++ );
++
++gceSTATUS
++gcoOS_GetWindowInfo(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctINT * X,
++ OUT gctINT * Y,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctUINT * Offset
++ );
++
++gceSTATUS
++gcoOS_DestroyWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_DrawImage(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits
++ );
++
++gceSTATUS
++gcoOS_GetImage(
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ OUT gctINT * BitsPerPixel,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_GetWindowInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctINT * X,
++ OUT gctINT * Y,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctUINT * Offset,
++ OUT gceSURF_FORMAT * Format
++ );
++
++gceSTATUS
++gcoOS_DrawImageEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits,
++ IN gceSURF_FORMAT Format
++ );
++
++/*******************************************************************************
++** Pixmaps. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_CreatePixmap(
++ IN HALNativeDisplayType Display,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ OUT HALNativePixmapType * Pixmap
++ );
++
++gceSTATUS
++gcoOS_GetPixmapInfo(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_DrawPixmap(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits
++ );
++
++gceSTATUS
++gcoOS_DestroyPixmap(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap
++ );
++
++gceSTATUS
++gcoOS_GetPixmapInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits,
++ OUT gceSURF_FORMAT * Format
++ );
++
++gceSTATUS
++gcoOS_CopyPixmapBits(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ IN gctUINT DstWidth,
++ IN gctUINT DstHeight,
++ IN gctINT DstStride,
++ IN gceSURF_FORMAT DstFormat,
++ OUT gctPOINTER DstBits
++ );
++
++/*******************************************************************************
++** OS relative. ****************************************************************
++*/
++gceSTATUS
++gcoOS_LoadEGLLibrary(
++ OUT gctHANDLE * Handle
++ );
++
++gceSTATUS
++gcoOS_FreeEGLLibrary(
++ IN gctHANDLE Handle
++ );
++
++gceSTATUS
++gcoOS_ShowWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_HideWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_SetWindowTitle(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctCONST_STRING Title
++ );
++
++gceSTATUS
++gcoOS_CapturePointer(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_GetEvent(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT halEvent * Event
++ );
++
++gceSTATUS
++gcoOS_CreateClientBuffer(
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT Format,
++ IN gctINT Type,
++ OUT gctPOINTER * ClientBuffer
++ );
++
++gceSTATUS
++gcoOS_GetClientBufferInfo(
++ IN gctPOINTER ClientBuffer,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_DestroyClientBuffer(
++ IN gctPOINTER ClientBuffer
++ );
++
++gceSTATUS
++gcoOS_DestroyContext(
++ IN gctPOINTER Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_CreateContext(
++ IN gctPOINTER LocalDisplay,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_MakeCurrent(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType DrawDrawable,
++ IN HALNativeWindowType ReadDrawable,
++ IN gctPOINTER Context,
++ IN gcoSURF ResolveTarget
++ );
++
++gceSTATUS
++gcoOS_CreateDrawable(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable
++ );
++
++gceSTATUS
++gcoOS_DestroyDrawable(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable
++ );
++gceSTATUS
++gcoOS_SwapBuffers(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable,
++ IN gcoSURF RenderTarget,
++ IN gcoSURF ResolveTarget,
++ IN gctPOINTER ResolveBits,
++ OUT gctUINT *Width,
++ OUT gctUINT *Height
++ );
++
++#ifdef EGL_API_DRI
++gceSTATUS
++gcoOS_ResizeWindow(
++ IN gctPOINTER localDisplay,
++ IN HALNativeWindowType Drawable,
++ IN gctUINT Width,
++ IN gctUINT Height)
++ ;
++
++#ifdef USE_FREESCALE_EGL_ACCEL
++gceSTATUS
++gcoOS_SwapBuffersGeneric_Async(
++ IN gctPOINTER localDisplay,
++ IN HALNativeWindowType Drawable,
++ IN gcoSURF RenderTarget,
++ IN gcoSURF ResolveTarget,
++ IN gctPOINTER ResolveBits,
++ OUT gctUINT *Width,
++ OUT gctUINT *Height,
++ IN void * resolveRect
++ );
++
++gceSTATUS
++gcoOS_DrawSurface(
++ IN gctPOINTER localDisplay,
++ IN HALNativeWindowType Drawable
++ );
++#endif
++
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_eglplatform_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform_type.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform_type.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform_type.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_eglplatform_type.h 2015-05-01 14:57:59.587427001 -0500
+@@ -0,0 +1,286 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_eglplatform_type_h_
++#define __gc_hal_eglplatform_type_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*******************************************************************************
++** Events. *********************************************************************
++*/
++
++typedef enum _halEventType
++{
++ /* Keyboard event. */
++ HAL_KEYBOARD,
++
++ /* Mouse move event. */
++ HAL_POINTER,
++
++ /* Mouse button event. */
++ HAL_BUTTON,
++
++ /* Application close event. */
++ HAL_CLOSE,
++
++ /* Application window has been updated. */
++ HAL_WINDOW_UPDATE
++}
++halEventType;
++
++/* Scancodes for keyboard. */
++typedef enum _halKeys
++{
++ HAL_UNKNOWN = -1,
++
++ HAL_BACKSPACE = 0x08,
++ HAL_TAB,
++ HAL_ENTER = 0x0D,
++ HAL_ESCAPE = 0x1B,
++
++ HAL_SPACE = 0x20,
++ HAL_SINGLEQUOTE = 0x27,
++ HAL_PAD_ASTERISK = 0x2A,
++ HAL_COMMA = 0x2C,
++ HAL_HYPHEN,
++ HAL_PERIOD,
++ HAL_SLASH,
++ HAL_0,
++ HAL_1,
++ HAL_2,
++ HAL_3,
++ HAL_4,
++ HAL_5,
++ HAL_6,
++ HAL_7,
++ HAL_8,
++ HAL_9,
++ HAL_SEMICOLON = 0x3B,
++ HAL_EQUAL = 0x3D,
++ HAL_A = 0x41,
++ HAL_B,
++ HAL_C,
++ HAL_D,
++ HAL_E,
++ HAL_F,
++ HAL_G,
++ HAL_H,
++ HAL_I,
++ HAL_J,
++ HAL_K,
++ HAL_L,
++ HAL_M,
++ HAL_N,
++ HAL_O,
++ HAL_P,
++ HAL_Q,
++ HAL_R,
++ HAL_S,
++ HAL_T,
++ HAL_U,
++ HAL_V,
++ HAL_W,
++ HAL_X,
++ HAL_Y,
++ HAL_Z,
++ HAL_LBRACKET,
++ HAL_BACKSLASH,
++ HAL_RBRACKET,
++ HAL_BACKQUOTE = 0x60,
++
++ HAL_F1 = 0x80,
++ HAL_F2,
++ HAL_F3,
++ HAL_F4,
++ HAL_F5,
++ HAL_F6,
++ HAL_F7,
++ HAL_F8,
++ HAL_F9,
++ HAL_F10,
++ HAL_F11,
++ HAL_F12,
++
++ HAL_LCTRL,
++ HAL_RCTRL,
++ HAL_LSHIFT,
++ HAL_RSHIFT,
++ HAL_LALT,
++ HAL_RALT,
++ HAL_CAPSLOCK,
++ HAL_NUMLOCK,
++ HAL_SCROLLLOCK,
++ HAL_PAD_0,
++ HAL_PAD_1,
++ HAL_PAD_2,
++ HAL_PAD_3,
++ HAL_PAD_4,
++ HAL_PAD_5,
++ HAL_PAD_6,
++ HAL_PAD_7,
++ HAL_PAD_8,
++ HAL_PAD_9,
++ HAL_PAD_HYPHEN,
++ HAL_PAD_PLUS,
++ HAL_PAD_SLASH,
++ HAL_PAD_PERIOD,
++ HAL_PAD_ENTER,
++ HAL_SYSRQ,
++ HAL_PRNTSCRN,
++ HAL_BREAK,
++ HAL_UP,
++ HAL_LEFT,
++ HAL_RIGHT,
++ HAL_DOWN,
++ HAL_HOME,
++ HAL_END,
++ HAL_PGUP,
++ HAL_PGDN,
++ HAL_INSERT,
++ HAL_DELETE,
++ HAL_LWINDOW,
++ HAL_RWINDOW,
++ HAL_MENU,
++ HAL_POWER,
++ HAL_SLEEP,
++ HAL_WAKE
++}
++halKeys;
++
++/* Structure that defined keyboard mapping. */
++typedef struct _halKeyMap
++{
++ /* Normal key. */
++ halKeys normal;
++
++ /* Extended key. */
++ halKeys extended;
++}
++halKeyMap;
++
++/* Event structure. */
++typedef struct _halEvent
++{
++ /* Event type. */
++ halEventType type;
++
++ /* Event data union. */
++ union _halEventData
++ {
++ /* Event data for keyboard. */
++ struct _halKeyboard
++ {
++ /* Scancode. */
++ halKeys scancode;
++
++ /* ASCII characte of the key pressed. */
++ char key;
++
++ /* Flag whether the key was pressed (1) or released (0). */
++ char pressed;
++ }
++ keyboard;
++
++ /* Event data for pointer. */
++ struct _halPointer
++ {
++ /* Current pointer coordinate. */
++ int x;
++ int y;
++ }
++ pointer;
++
++ /* Event data for mouse buttons. */
++ struct _halButton
++ {
++ /* Left button state. */
++ int left;
++
++ /* Middle button state. */
++ int middle;
++
++ /* Right button state. */
++ int right;
++
++ /* Current pointer coordinate. */
++ int x;
++ int y;
++ }
++ button;
++ }
++ data;
++}
++halEvent;
++
++/* VFK_DISPLAY_INFO structure defining information returned by
++ vdkGetDisplayInfoEx. */
++typedef struct _halDISPLAY_INFO
++{
++ /* The size of the display in pixels. */
++ int width;
++ int height;
++
++ /* The stride of the dispay. -1 is returned if the stride is not known
++ ** for the specified display.*/
++ int stride;
++
++ /* The color depth of the display in bits per pixel. */
++ int bitsPerPixel;
++
++ /* The logical pointer to the display memory buffer. NULL is returned
++ ** if the pointer is not known for the specified display. */
++ void * logical;
++
++ /* The physical address of the display memory buffer. ~0 is returned
++ ** if the address is not known for the specified display. */
++ unsigned long physical;
++
++ int wrapFB; /* true if compositor, false otherwise. */
++
++#ifndef __QNXNTO__
++ /* 355_FB_MULTI_BUFFER */
++ int multiBuffer;
++ int backBufferY;
++#endif
++
++ /* The color info of the display. */
++ unsigned int alphaLength;
++ unsigned int alphaOffset;
++ unsigned int redLength;
++ unsigned int redOffset;
++ unsigned int greenLength;
++ unsigned int greenOffset;
++ unsigned int blueLength;
++ unsigned int blueOffset;
++
++ /* Display flip support. */
++ int flip;
++}
++halDISPLAY_INFO;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_eglplatform_type_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine.h 2015-05-01 14:57:59.587427001 -0500
+@@ -0,0 +1,2587 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_engine_h_
++#define __gc_hal_engine_h_
++
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++
++#if gcdENABLE_3D
++#if gcdENABLE_VG
++#include "gc_hal_engine_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gcoSTREAM * gcoSTREAM;
++typedef struct _gcoVERTEX * gcoVERTEX;
++typedef struct _gcoTEXTURE * gcoTEXTURE;
++typedef struct _gcoINDEX * gcoINDEX;
++typedef struct _gcsVERTEX_ATTRIBUTES * gcsVERTEX_ATTRIBUTES_PTR;
++typedef struct _gcoVERTEXARRAY * gcoVERTEXARRAY;
++typedef struct _gcoBUFOBJ * gcoBUFOBJ;
++
++#define gcdATTRIBUTE_COUNT 16
++
++typedef enum _gcePROGRAM_STAGE
++{
++ gcvPROGRAM_STAGE_VERTEX = 0x0,
++ gcvPROGRAM_STAGE_TES = 0x1,
++ gcvPROGRAM_STAGE_TCS = 0x2,
++ gcvPROGRAM_STAGE_GEOMETRY = 0x3,
++ gcvPROGRAM_STAGE_FRAGMENT = 0x4,
++ gcvPROGRAM_STAGE_COMPUTE = 0x5,
++ gcvPROGRAM_STAGE_OPENCL = 0x6,
++ gcvPROGRAM_STAGE_LAST
++}
++gcePROGRAM_STAGE;
++
++typedef enum _gcePROGRAM_STAGE_BIT
++{
++ gcvPROGRAM_STAGE_VERTEX_BIT = 1 << gcvPROGRAM_STAGE_VERTEX,
++ gcvPROGRAM_STAGE_TES_BIT = 1 << gcvPROGRAM_STAGE_TES,
++ gcvPROGRAM_STAGE_TCS_BIT = 1 << gcvPROGRAM_STAGE_TCS,
++ gcvPROGRAM_STAGE_GEOMETRY_BIT = 1 << gcvPROGRAM_STAGE_GEOMETRY,
++ gcvPROGRAM_STAGE_FRAGMENT_BIT = 1 << gcvPROGRAM_STAGE_FRAGMENT,
++ gcvPROGRAM_STAGE_COMPUTE_BIT = 1 << gcvPROGRAM_STAGE_COMPUTE,
++ gcvPROGRAM_STAGE_OPENCL_BIT = 1 << gcvPROGRAM_STAGE_OPENCL,
++}
++gcePROGRAM_STAGE_BIT;
++
++
++/******************************************************************************\
++********************************* gcoHAL Object *********************************
++\******************************************************************************/
++
++gceSTATUS
++gcoHAL_QueryShaderCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctUINT * Varyings
++ );
++
++gceSTATUS
++gcoHAL_QueryShaderCapsEx(
++ IN gcoHAL Hal,
++ OUT gctUINT * ShaderCoreCount,
++ OUT gctUINT * ThreadCount,
++ OUT gctUINT * VertexInstructionCount,
++ OUT gctUINT * FragmentInstructionCount
++ );
++
++gceSTATUS
++gcoHAL_QuerySamplerBase(
++ IN gcoHAL Hal,
++ OUT gctUINT32 * VertexCount,
++ OUT gctINT_PTR VertexBase,
++ OUT gctUINT32 * FragmentCount,
++ OUT gctINT_PTR FragmentBase
++ );
++
++gceSTATUS
++gcoHAL_QueryUniformBase(
++ IN gcoHAL Hal,
++ OUT gctUINT32 * VertexBase,
++ OUT gctUINT32 * FragmentBase
++ );
++
++gceSTATUS
++gcoHAL_QueryTextureCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MaxDepth,
++ OUT gctBOOL * Cubic,
++ OUT gctBOOL * NonPowerOfTwo,
++ OUT gctUINT * VertexSamplers,
++ OUT gctUINT * PixelSamplers
++ );
++
++gceSTATUS
++gcoHAL_QueryTextureMaxAniso(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxAnisoValue
++ );
++
++gceSTATUS
++gcoHAL_QueryStreamCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT32 * MaxAttributes,
++ OUT gctUINT32 * MaxStreamSize,
++ OUT gctUINT32 * NumberOfStreams,
++ OUT gctUINT32 * Alignment
++ );
++
++/******************************************************************************\
++********************************* gcoSURF Object ********************************
++\******************************************************************************/
++
++/*----------------------------------------------------------------------------*/
++/*--------------------------------- gcoSURF 3D --------------------------------*/
++typedef enum _gceBLIT_FLAG
++{
++ gcvBLIT_FLAG_SKIP_DEPTH_WRITE = 0x1,
++ gcvBLIT_FLAG_SKIP_STENCIL_WRITE = 0x2,
++} gceBLIT_FLAG;
++
++typedef struct _gcsSURF_BLIT_ARGS
++{
++ gcoSURF srcSurface;
++ gctINT srcX, srcY, srcZ;
++ gctINT srcWidth, srcHeight, srcDepth;
++ gcoSURF dstSurface;
++ gctINT dstX, dstY, dstZ;
++ gctINT dstWidth, dstHeight, dstDepth;
++ gctBOOL xReverse;
++ gctBOOL yReverse;
++ gctBOOL scissorTest;
++ gcsRECT scissor;
++ gctUINT flags;
++}
++gcsSURF_BLIT_ARGS;
++
++
++
++
++/* Clear flags. */
++typedef enum _gceCLEAR
++{
++ gcvCLEAR_COLOR = 0x1,
++ gcvCLEAR_DEPTH = 0x2,
++ gcvCLEAR_STENCIL = 0x4,
++ gcvCLEAR_HZ = 0x8,
++ gcvCLEAR_HAS_VAA = 0x10,
++ gcvCLEAR_WITH_GPU_ONLY = 0x100,
++ gcvCLEAR_WITH_CPU_ONLY = 0x200,
++}
++gceCLEAR;
++
++typedef struct _gcsSURF_CLEAR_ARGS
++{
++ /*
++ ** Color to fill the color portion of the framebuffer when clear
++ ** is called.
++ */
++ struct {
++ gcuVALUE r;
++ gcuVALUE g;
++ gcuVALUE b;
++ gcuVALUE a;
++ /*
++ ** Color has multiple value type so we must specify it.
++ */
++ gceVALUE_TYPE valueType;
++ } color;
++
++ gcuVALUE depth;
++
++ gctUINT stencil;
++
++
++
++ /*
++ ** stencil bit-wise mask
++ */
++ gctUINT8 stencilMask;
++ /*
++ ** Depth Write Mask
++ */
++ gctBOOL depthMask;
++ /*
++ ** 4-bit channel Mask: ABGR:MSB->LSB
++ */
++ gctUINT8 colorMask;
++ /*
++ ** If ClearRect is NULL, it means full clear
++ */
++ gcsRECT_PTR clearRect;
++ /*
++ ** clear flags
++ */
++ gceCLEAR flags;
++
++ /*
++ ** Offset in surface to cube/array/3D
++ */
++ gctUINT32 offset;
++
++} gcsSURF_CLEAR_ARGS;
++
++
++typedef gcsSURF_CLEAR_ARGS* gcsSURF_CLEAR_ARGS_PTR;
++
++typedef struct _gscSURF_BLITDRAW_BLIT
++{
++ gcoSURF srcSurface;
++ gcoSURF dstSurface;
++ gcsRECT srcRect;
++ gcsRECT dstRect;
++ gceTEXTURE_FILTER filterMode;
++ gctBOOL xReverse;
++ gctBOOL yReverse;
++ gctBOOL scissorEnabled;
++ gcsRECT scissor;
++}gscSURF_BLITDRAW_BLIT;
++
++
++typedef enum _gceBLITDRAW_TYPE
++{
++ gcvBLITDRAW_CLEAR = 0,
++ gcvBLITDRAW_BLIT = 1,
++
++ /* last number, not a real type */
++ gcvBLITDRAW_NUM_TYPE
++ }
++gceBLITDRAW_TYPE;
++
++
++typedef struct _gscSURF_BLITDRAW_ARGS
++{
++ /* always the fist member */
++ gceHAL_ARG_VERSION version;
++
++ union _gcsSURF_BLITDRAW_ARGS_UNION
++ {
++ struct _gscSURF_BLITDRAW_ARG_v1
++ {
++ /* Whether it's clear or blit operation, can be extended. */
++ gceBLITDRAW_TYPE type;
++
++ union _gscSURF_BLITDRAW_UNION
++ {
++ gscSURF_BLITDRAW_BLIT blit;
++
++ struct _gscSURF_BLITDRAW_CLEAR
++ {
++ gcsSURF_CLEAR_ARGS clearArgs;
++ gcoSURF rtSurface;
++ gcoSURF dsSurface;
++ } clear;
++ } u;
++ } v1;
++ } uArgs;
++}
++gcsSURF_BLITDRAW_ARGS;
++
++
++typedef struct _gcsSURF_RESOLVE_ARGS
++{
++ gceHAL_ARG_VERSION version;
++ union _gcsSURF_RESOLVE_ARGS_UNION
++ {
++ struct _gcsSURF_RESOLVE_ARG_v1
++ {
++ gctBOOL yInverted;
++ }v1;
++ } uArgs;
++}
++gcsSURF_RESOLVE_ARGS;
++
++
++/* CPU Blit with format (including linear <-> tile) conversion*/
++gceSTATUS
++gcoSURF_BlitCPU(
++ gcsSURF_BLIT_ARGS* args
++ );
++
++
++gceSTATUS
++gcoSURF_BlitDraw(
++ IN gcsSURF_BLITDRAW_ARGS *args
++ );
++#endif /* gcdENABLE_3D */
++
++
++
++#if gcdENABLE_3D
++/* Clear surface function. */
++gceSTATUS
++gcoSURF_Clear(
++ IN gcoSURF Surface,
++ IN gcsSURF_CLEAR_ARGS_PTR clearArg
++ );
++
++/* Preserve pixels from source. */
++gceSTATUS
++gcoSURF_Preserve(
++ IN gcoSURF Source,
++ IN gcoSURF Dest,
++ IN gcsRECT_PTR MaskRect
++ );
++
++
++/* TO BE REMOVED */
++ gceSTATUS
++ depr_gcoSURF_Resolve(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 DestAddress,
++ IN gctPOINTER DestBits,
++ IN gctINT DestStride,
++ IN gceSURF_TYPE DestType,
++ IN gceSURF_FORMAT DestFormat,
++ IN gctUINT DestWidth,
++ IN gctUINT DestHeight
++ );
++
++ gceSTATUS
++ depr_gcoSURF_ResolveRect(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 DestAddress,
++ IN gctPOINTER DestBits,
++ IN gctINT DestStride,
++ IN gceSURF_TYPE DestType,
++ IN gceSURF_FORMAT DestFormat,
++ IN gctUINT DestWidth,
++ IN gctUINT DestHeight,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Resample surface. */
++gceSTATUS
++gcoSURF_Resample(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface
++ );
++
++/* Resolve surface. */
++gceSTATUS
++gcoSURF_Resolve(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface
++ );
++
++gceSTATUS
++gcoSURF_ResolveEx(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsSURF_RESOLVE_ARGS *args
++ );
++
++
++/* Resolve rectangular area of a surface. */
++gceSTATUS
++gcoSURF_ResolveRect(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Resolve rectangular area of a surface. */
++gceSTATUS
++gcoSURF_ResolveRectEx(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize,
++ IN gcsSURF_RESOLVE_ARGS *args
++ );
++
++
++gceSTATUS
++gcoSURF_GetResolveAlignment(
++ IN gcoSURF Surface,
++ OUT gctUINT *originX,
++ OUT gctUINT *originY,
++ OUT gctUINT *sizeX,
++ OUT gctUINT *sizeY
++ );
++
++gceSTATUS
++gcoSURF_IsHWResolveable(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Set surface resolvability. */
++gceSTATUS
++gcoSURF_SetResolvability(
++ IN gcoSURF Surface,
++ IN gctBOOL Resolvable
++ );
++
++gceSTATUS
++gcoSURF_IsRenderable(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoSURF_IsFormatRenderableAsRT(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoSURF_GetFence(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoBUFOBJ_GetFence(
++ IN gcoBUFOBJ bufObj
++ );
++
++gceSTATUS
++gcoBUFOBJ_WaitFence(
++ IN gcoBUFOBJ bufObj
++ );
++
++gceSTATUS
++gcoBUFOBJ_IsFenceEnabled(
++ IN gcoBUFOBJ bufObj
++ );
++
++gceSTATUS
++gcoSURF_WaitFence(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoSTREAM_GetFence(
++ IN gcoSTREAM stream
++ );
++
++gceSTATUS
++gcoSTREAM_WaitFence(
++ IN gcoSTREAM stream
++ );
++
++gceSTATUS
++gcoINDEX_GetFence(
++ IN gcoINDEX index
++ );
++
++gceSTATUS
++gcoINDEX_WaitFence(
++ IN gcoINDEX index
++ );
++
++gceSTATUS
++gcoSURF_3DBlitClearRect(
++ IN gcoSURF Surface,
++ IN gcsSURF_CLEAR_ARGS_PTR ClearArgs
++ );
++
++
++gceSTATUS
++gcoSURF_3DBlitBltRect(
++ IN gcoSURF SrcSurf,
++ IN gcoSURF DestSurf,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++gceSTATUS
++gcoSURF_3DBlitCopy(
++ IN gctUINT32 SrcAddress,
++ IN gctUINT32 DestAddress,
++ IN gctUINT32 Bytes
++ );
++
++
++/******************************************************************************\
++******************************** gcoINDEX Object *******************************
++\******************************************************************************/
++
++/* Construct a new gcoINDEX object. */
++gceSTATUS
++gcoINDEX_Construct(
++ IN gcoHAL Hal,
++ OUT gcoINDEX * Index
++ );
++
++/* Destroy a gcoINDEX object. */
++gceSTATUS
++gcoINDEX_Destroy(
++ IN gcoINDEX Index
++ );
++
++/* Lock index in memory. */
++gceSTATUS
++gcoINDEX_Lock(
++ IN gcoINDEX Index,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Unlock index that was previously locked with gcoINDEX_Lock. */
++gceSTATUS
++gcoINDEX_Unlock(
++ IN gcoINDEX Index
++ );
++
++/* Upload index data into the memory. */
++gceSTATUS
++gcoINDEX_Load(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE IndexType,
++ IN gctUINT32 IndexCount,
++ IN gctPOINTER IndexBuffer
++ );
++
++/* Bind an index object to the hardware. */
++gceSTATUS
++gcoINDEX_Bind(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type
++ );
++
++/* Bind an index object to the hardware. */
++gceSTATUS
++gcoINDEX_BindOffset(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset
++ );
++
++/* Free existing index buffer. */
++gceSTATUS
++gcoINDEX_Free(
++ IN gcoINDEX Index
++ );
++
++/* Upload data into an index buffer. */
++gceSTATUS
++gcoINDEX_Upload(
++ IN gcoINDEX Index,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Bytes
++ );
++
++/* Upload data into an index buffer starting at an offset. */
++gceSTATUS
++gcoINDEX_UploadOffset(
++ IN gcoINDEX Index,
++ IN gctSIZE_T Offset,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Bytes
++ );
++
++/*Merge index2 to index1 from 0, index2 must subset of inex1*/
++gceSTATUS
++gcoINDEX_Merge(
++ IN gcoINDEX Index1,
++ IN gcoINDEX Index2
++ );
++
++/*check if index buffer is enough for this draw*/
++gctBOOL
++gcoINDEX_CheckRange(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctINT Count,
++ IN gctUINT32 Indices
++ );
++
++/* Query the index capabilities. */
++gceSTATUS
++gcoINDEX_QueryCaps(
++ OUT gctBOOL * Index8,
++ OUT gctBOOL * Index16,
++ OUT gctBOOL * Index32,
++ OUT gctUINT * MaxIndex
++ );
++
++/* Determine the index range in the current index buffer. */
++gceSTATUS
++gcoINDEX_GetIndexRange(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset,
++ IN gctUINT32 Count,
++ OUT gctUINT32 * MinimumIndex,
++ OUT gctUINT32 * MaximumIndex
++ );
++
++/* Dynamic buffer management. */
++gceSTATUS
++gcoINDEX_SetDynamic(
++ IN gcoINDEX Index,
++ IN gctSIZE_T Bytes,
++ IN gctUINT Buffers
++ );
++
++/******************************************************************************\
++********************************** gco3D Object *********************************
++\******************************************************************************/
++
++/* Blending targets. */
++typedef enum _gceBLEND_UNIT
++{
++ gcvBLEND_SOURCE,
++ gcvBLEND_TARGET,
++}
++gceBLEND_UNIT;
++
++/* Construct a new gco3D object. */
++gceSTATUS
++gco3D_Construct(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++
++/* Destroy an gco3D object. */
++gceSTATUS
++gco3D_Destroy(
++ IN gco3D Engine
++ );
++
++/* Set 3D API type. */
++gceSTATUS
++gco3D_SetAPI(
++ IN gco3D Engine,
++ IN gceAPI ApiType
++ );
++
++/* Get 3D API type. */
++gceSTATUS
++gco3D_GetAPI(
++ IN gco3D Engine,
++ OUT gceAPI * ApiType
++ );
++
++/* Set render target. */
++gceSTATUS
++gco3D_SetTarget(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Unset render target. */
++gceSTATUS
++gco3D_UnsetTarget(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gco3D_SetTargetEx(
++ IN gco3D Engine,
++ IN gctUINT32 TargetIndex,
++ IN gcoSURF Surface,
++ IN gctUINT32 LayerIndex
++ );
++
++gceSTATUS
++gco3D_UnsetTargetEx(
++ IN gco3D Engine,
++ IN gctUINT32 TargetIndex,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gco3D_SetTargetOffsetEx(
++ IN gco3D Engine,
++ IN gctUINT32 TargetIndex,
++ IN gctSIZE_T Offset
++ );
++
++
++gceSTATUS
++gco3D_SetPSOutputMapping(
++ IN gco3D Engine,
++ IN gctINT32 * psOutputMapping
++ );
++
++
++/* Set depth buffer. */
++gceSTATUS
++gco3D_SetDepth(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gco3D_SetDepthBufferOffset(
++ IN gco3D Engine,
++ IN gctSIZE_T Offset
++ );
++
++/* Unset depth buffer. */
++gceSTATUS
++gco3D_UnsetDepth(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Set viewport. */
++gceSTATUS
++gco3D_SetViewport(
++ IN gco3D Engine,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Set scissors. */
++gceSTATUS
++gco3D_SetScissors(
++ IN gco3D Engine,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Set clear color. */
++gceSTATUS
++gco3D_SetClearColor(
++ IN gco3D Engine,
++ IN gctUINT8 Red,
++ IN gctUINT8 Green,
++ IN gctUINT8 Blue,
++ IN gctUINT8 Alpha
++ );
++
++/* Set fixed point clear color. */
++gceSTATUS
++gco3D_SetClearColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++/* Set floating point clear color. */
++gceSTATUS
++gco3D_SetClearColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Set fixed point clear depth. */
++gceSTATUS
++gco3D_SetClearDepthX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Depth
++ );
++
++/* Set floating point clear depth. */
++gceSTATUS
++gco3D_SetClearDepthF(
++ IN gco3D Engine,
++ IN gctFLOAT Depth
++ );
++
++/* Set clear stencil. */
++gceSTATUS
++gco3D_SetClearStencil(
++ IN gco3D Engine,
++ IN gctUINT32 Stencil
++ );
++
++/* Set shading mode. */
++gceSTATUS
++gco3D_SetShading(
++ IN gco3D Engine,
++ IN gceSHADING Shading
++ );
++
++/* Set blending mode. */
++gceSTATUS
++gco3D_EnableBlending(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set blending function. */
++gceSTATUS
++gco3D_SetBlendFunction(
++ IN gco3D Engine,
++ IN gceBLEND_UNIT Unit,
++ IN gceBLEND_FUNCTION FunctionRGB,
++ IN gceBLEND_FUNCTION FunctionAlpha
++ );
++
++/* Set blending mode. */
++gceSTATUS
++gco3D_SetBlendMode(
++ IN gco3D Engine,
++ IN gceBLEND_MODE ModeRGB,
++ IN gceBLEND_MODE ModeAlpha
++ );
++
++/* Set blending color. */
++gceSTATUS
++gco3D_SetBlendColor(
++ IN gco3D Engine,
++ IN gctUINT Red,
++ IN gctUINT Green,
++ IN gctUINT Blue,
++ IN gctUINT Alpha
++ );
++
++/* Set fixed point blending color. */
++gceSTATUS
++gco3D_SetBlendColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++/* Set floating point blending color. */
++gceSTATUS
++gco3D_SetBlendColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Set culling mode. */
++gceSTATUS
++gco3D_SetCulling(
++ IN gco3D Engine,
++ IN gceCULL Mode
++ );
++
++/* Enable point size */
++gceSTATUS
++gco3D_SetPointSizeEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set point sprite */
++gceSTATUS
++gco3D_SetPointSprite(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set fill mode. */
++gceSTATUS
++gco3D_SetFill(
++ IN gco3D Engine,
++ IN gceFILL Mode
++ );
++
++/* Set depth compare mode. */
++gceSTATUS
++gco3D_SetDepthCompare(
++ IN gco3D Engine,
++ IN gceCOMPARE Compare
++ );
++
++/* Enable depth writing. */
++gceSTATUS
++gco3D_EnableDepthWrite(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set depth mode. */
++gceSTATUS
++gco3D_SetDepthMode(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode
++ );
++
++/* Set depth range. */
++gceSTATUS
++gco3D_SetDepthRangeX(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode,
++ IN gctFIXED_POINT Near,
++ IN gctFIXED_POINT Far
++ );
++
++/* Set depth range. */
++gceSTATUS
++gco3D_SetDepthRangeF(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode,
++ IN gctFLOAT Near,
++ IN gctFLOAT Far
++ );
++
++/* Set last pixel enable */
++gceSTATUS
++gco3D_SetLastPixelEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set depth Bias and Scale */
++gceSTATUS
++gco3D_SetDepthScaleBiasX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT DepthScale,
++ IN gctFIXED_POINT DepthBias
++ );
++
++gceSTATUS
++gco3D_SetDepthScaleBiasF(
++ IN gco3D Engine,
++ IN gctFLOAT DepthScale,
++ IN gctFLOAT DepthBias
++ );
++
++/* Set depth near and far clipping plane. */
++gceSTATUS
++gco3D_SetDepthPlaneF(
++ IN gco3D Engine,
++ IN gctFLOAT Near,
++ IN gctFLOAT Far
++ );
++
++/* Enable or disable dithering. */
++gceSTATUS
++gco3D_EnableDither(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set color write enable bits. */
++gceSTATUS
++gco3D_SetColorWrite(
++ IN gco3D Engine,
++ IN gctUINT8 Enable
++ );
++
++/* Enable or disable early depth. */
++gceSTATUS
++gco3D_SetEarlyDepth(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Deprecated: Enable or disable all early depth operations. */
++gceSTATUS
++gco3D_SetAllEarlyDepthModes(
++ IN gco3D Engine,
++ IN gctBOOL Disable
++ );
++
++/* Enable or disable all early depth operations. */
++gceSTATUS
++gco3D_SetAllEarlyDepthModesEx(
++ IN gco3D Engine,
++ IN gctBOOL Disable,
++ IN gctBOOL DisableModify,
++ IN gctBOOL DisablePassZ
++ );
++
++/* Switch dynamic early mode */
++gceSTATUS
++gco3D_SwitchDynamicEarlyDepthMode(
++ IN gco3D Engine
++ );
++
++/* Set dynamic early mode */
++gceSTATUS
++gco3D_DisableDynamicEarlyDepthMode(
++ IN gco3D Engine,
++ IN gctBOOL Disable
++ );
++
++/* Enable or disable depth-only mode. */
++gceSTATUS
++gco3D_SetDepthOnly(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++typedef struct _gcsSTENCIL_INFO * gcsSTENCIL_INFO_PTR;
++typedef struct _gcsSTENCIL_INFO
++{
++ gceSTENCIL_MODE mode;
++
++ gctUINT8 maskFront;
++ gctUINT8 maskBack;
++ gctUINT8 writeMaskFront;
++ gctUINT8 writeMaskBack;
++
++ gctUINT8 referenceFront;
++
++ gceCOMPARE compareFront;
++ gceSTENCIL_OPERATION passFront;
++ gceSTENCIL_OPERATION failFront;
++ gceSTENCIL_OPERATION depthFailFront;
++
++ gctUINT8 referenceBack;
++ gceCOMPARE compareBack;
++ gceSTENCIL_OPERATION passBack;
++ gceSTENCIL_OPERATION failBack;
++ gceSTENCIL_OPERATION depthFailBack;
++}
++gcsSTENCIL_INFO;
++
++/* Set stencil mode. */
++gceSTATUS
++gco3D_SetStencilMode(
++ IN gco3D Engine,
++ IN gceSTENCIL_MODE Mode
++ );
++
++/* Set stencil mask. */
++gceSTATUS
++gco3D_SetStencilMask(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil back mask. */
++gceSTATUS
++gco3D_SetStencilMaskBack(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil write mask. */
++gceSTATUS
++gco3D_SetStencilWriteMask(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil back write mask. */
++gceSTATUS
++gco3D_SetStencilWriteMaskBack(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil reference. */
++gceSTATUS
++gco3D_SetStencilReference(
++ IN gco3D Engine,
++ IN gctUINT8 Reference,
++ IN gctBOOL Front
++ );
++
++/* Set stencil compare. */
++gceSTATUS
++gco3D_SetStencilCompare(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceCOMPARE Compare
++ );
++
++/* Set stencil operation on pass. */
++gceSTATUS
++gco3D_SetStencilPass(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set stencil operation on fail. */
++gceSTATUS
++gco3D_SetStencilFail(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set stencil operation on depth fail. */
++gceSTATUS
++gco3D_SetStencilDepthFail(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set all stencil states in one blow. */
++gceSTATUS
++gco3D_SetStencilAll(
++ IN gco3D Engine,
++ IN gcsSTENCIL_INFO_PTR Info
++ );
++
++typedef struct _gcsALPHA_INFO * gcsALPHA_INFO_PTR;
++typedef struct _gcsALPHA_INFO
++{
++ /* Alpha test states. */
++ gctBOOL test;
++ gceCOMPARE compare;
++ gctUINT8 reference;
++ gctFLOAT floatReference;
++
++ /* Alpha blending states. */
++ gctBOOL blend;
++
++ gceBLEND_FUNCTION srcFuncColor;
++ gceBLEND_FUNCTION srcFuncAlpha;
++ gceBLEND_FUNCTION trgFuncColor;
++ gceBLEND_FUNCTION trgFuncAlpha;
++
++ gceBLEND_MODE modeColor;
++ gceBLEND_MODE modeAlpha;
++
++ gctUINT32 color;
++}
++gcsALPHA_INFO;
++
++/* Enable or disable alpha test. */
++gceSTATUS
++gco3D_SetAlphaTest(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set alpha test compare. */
++gceSTATUS
++gco3D_SetAlphaCompare(
++ IN gco3D Engine,
++ IN gceCOMPARE Compare
++ );
++
++/* Set alpha test reference in unsigned integer. */
++gceSTATUS
++gco3D_SetAlphaReference(
++ IN gco3D Engine,
++ IN gctUINT8 Reference,
++ IN gctFLOAT FloatReference
++ );
++
++/* Set alpha test reference in fixed point. */
++gceSTATUS
++gco3D_SetAlphaReferenceX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Reference
++ );
++
++/* Set alpha test reference in floating point. */
++gceSTATUS
++gco3D_SetAlphaReferenceF(
++ IN gco3D Engine,
++ IN gctFLOAT Reference
++ );
++
++/* Enable/Disable anti-alias line. */
++gceSTATUS
++gco3D_SetAntiAliasLine(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set texture slot for anti-alias line. */
++gceSTATUS
++gco3D_SetAALineTexSlot(
++ IN gco3D Engine,
++ IN gctUINT TexSlot
++ );
++
++/* Set anti-alias line width scale. */
++gceSTATUS
++gco3D_SetAALineWidth(
++ IN gco3D Engine,
++ IN gctFLOAT Width
++ );
++
++/* Draw a number of primitives. */
++gceSTATUS
++gco3D_DrawPrimitives(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctSIZE_T StartVertex,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++gceSTATUS
++gco3D_DrawInstancedPrimitives(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctBOOL DrawIndex,
++ IN gctSIZE_T StartVertex,
++ IN gctSIZE_T StartIndex,
++ IN gctSIZE_T PrimitiveCount,
++ IN gctSIZE_T VertexCount,
++ IN gctSIZE_T InstanceCount
++ );
++
++gceSTATUS
++gco3D_DrawPrimitivesCount(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT* StartVertex,
++ IN gctSIZE_T* VertexCount,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++
++/* Draw a number of primitives using offsets. */
++gceSTATUS
++gco3D_DrawPrimitivesOffset(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT32 StartOffset,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Draw a number of indexed primitives. */
++gceSTATUS
++gco3D_DrawIndexedPrimitives(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctSIZE_T BaseVertex,
++ IN gctSIZE_T StartIndex,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Draw a number of indexed primitives using offsets. */
++gceSTATUS
++gco3D_DrawIndexedPrimitivesOffset(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT32 BaseOffset,
++ IN gctINT32 StartOffset,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Draw a element from pattern */
++gceSTATUS
++gco3D_DrawPattern(
++ IN gco3D Engine,
++ IN gcsFAST_FLUSH_PTR FastFlushInfo
++ );
++
++/* Enable or disable anti-aliasing. */
++gceSTATUS
++gco3D_SetAntiAlias(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Write data into the command buffer. */
++gceSTATUS
++gco3D_WriteBuffer(
++ IN gco3D Engine,
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Aligned
++ );
++
++/* Send sempahore and stall until sempahore is signalled. */
++gceSTATUS
++gco3D_Semaphore(
++ IN gco3D Engine,
++ IN gceWHERE From,
++ IN gceWHERE To,
++ IN gceHOW How);
++
++/* Explicitly flush shader L1 cache */
++gceSTATUS
++gco3D_FlushSHL1Cache(
++ IN gco3D Engine
++ );
++
++/* Set the subpixels center. */
++gceSTATUS
++gco3D_SetCentroids(
++ IN gco3D Engine,
++ IN gctUINT32 Index,
++ IN gctPOINTER Centroids
++ );
++
++gceSTATUS
++gco3D_SetLogicOp(
++ IN gco3D Engine,
++ IN gctUINT8 Rop
++ );
++
++gceSTATUS
++gco3D_SetOQ(
++ IN gco3D Engine,
++ INOUT gctPOINTER * Result,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco3D_GetOQ(
++ IN gco3D Engine,
++ IN gctPOINTER Result,
++ OUT gctINT64 * Logical
++ );
++
++gceSTATUS
++gco3D_DeleteOQ(
++ IN gco3D Engine,
++ INOUT gctPOINTER Result
++ );
++
++gceSTATUS
++gco3D_SetColorOutCount(
++ IN gco3D Engine,
++ IN gctUINT32 ColorOutCount
++ );
++
++gceSTATUS
++gco3D_Set3DEngine(
++ IN gco3D Engine
++ );
++
++gceSTATUS
++gco3D_UnSet3DEngine(
++ IN gco3D Engine
++ );
++
++gceSTATUS
++gco3D_Get3DEngine(
++ OUT gco3D * Engine
++ );
++
++
++/* OCL thread walker information. */
++typedef struct _gcsTHREAD_WALKER_INFO * gcsTHREAD_WALKER_INFO_PTR;
++typedef struct _gcsTHREAD_WALKER_INFO
++{
++ gctUINT32 dimensions;
++ gctUINT32 traverseOrder;
++ gctUINT32 enableSwathX;
++ gctUINT32 enableSwathY;
++ gctUINT32 enableSwathZ;
++ gctUINT32 swathSizeX;
++ gctUINT32 swathSizeY;
++ gctUINT32 swathSizeZ;
++ gctUINT32 valueOrder;
++
++ gctUINT32 globalSizeX;
++ gctUINT32 globalOffsetX;
++ gctUINT32 globalSizeY;
++ gctUINT32 globalOffsetY;
++ gctUINT32 globalSizeZ;
++ gctUINT32 globalOffsetZ;
++
++ gctUINT32 workGroupSizeX;
++ gctUINT32 workGroupCountX;
++ gctUINT32 workGroupSizeY;
++ gctUINT32 workGroupCountY;
++ gctUINT32 workGroupSizeZ;
++ gctUINT32 workGroupCountZ;
++
++ gctUINT32 threadAllocation;
++}
++gcsTHREAD_WALKER_INFO;
++
++/* Start OCL thread walker. */
++gceSTATUS
++gco3D_InvokeThreadWalker(
++ IN gco3D Engine,
++ IN gcsTHREAD_WALKER_INFO_PTR Info
++ );
++
++gceSTATUS
++gco3D_GetClosestRenderFormat(
++ IN gco3D Engine,
++ IN gceSURF_FORMAT InFormat,
++ OUT gceSURF_FORMAT* OutFormat
++ );
++
++/* Set w clip and w plane limit value. */
++gceSTATUS
++gco3D_SetWClipEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco3D_GetWClipEnable(
++ IN gco3D Engine,
++ OUT gctBOOL * Enable
++ );
++
++gceSTATUS
++gco3D_SetWPlaneLimitF(
++ IN gco3D Engine,
++ IN gctFLOAT Value
++ );
++
++gceSTATUS
++gco3D_SetWPlaneLimitX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Value
++ );
++
++gceSTATUS
++gco3D_SetWPlaneLimit(
++ IN gco3D Engine,
++ IN gctFLOAT Value
++ );
++
++gceSTATUS
++gco3D_PrimitiveRestart(
++ IN gco3D Engine,
++ IN gctBOOL PrimitiveRestart);
++
++#if gcdSTREAM_OUT_BUFFER
++
++gceSTATUS
++gco3D_QueryStreamOut(
++ IN gco3D Engine,
++ IN gctUINT32 OriginalIndexAddress,
++ IN gctUINT32 OriginalIndexOffset,
++ IN gctUINT32 OriginalIndexCount,
++ OUT gctBOOL_PTR Found
++ );
++
++gceSTATUS
++gco3D_StartStreamOut(
++ IN gco3D Engine,
++ IN gctINT StreamOutStatus,
++ IN gctUINT32 IndexAddress,
++ IN gctUINT32 IndexOffset,
++ IN gctUINT32 IndexCount
++ );
++
++gceSTATUS
++gco3D_StopStreamOut(
++ IN gco3D Engine
++ );
++
++gceSTATUS
++gco3D_ReplayStreamOut(
++ IN gco3D Engine,
++ IN gctUINT32 IndexAddress,
++ IN gctUINT32 IndexOffset,
++ IN gctUINT32 IndexCount
++ );
++
++gceSTATUS
++gco3D_EndStreamOut(
++ IN gco3D Engine
++ );
++
++#endif
++
++/*----------------------------------------------------------------------------*/
++/*-------------------------- gco3D Fragment Processor ------------------------*/
++
++/* Set the fragment processor configuration. */
++gceSTATUS
++gco3D_SetFragmentConfiguration(
++ IN gco3D Engine,
++ IN gctBOOL ColorFromStream,
++ IN gctBOOL EnableFog,
++ IN gctBOOL EnableSmoothPoint,
++ IN gctUINT32 ClipPlanes
++ );
++
++/* Enable/disable texture stage operation. */
++gceSTATUS
++gco3D_EnableTextureStage(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL Enable
++ );
++
++/* Program the channel enable masks for the color texture function. */
++gceSTATUS
++gco3D_SetTextureColorMask(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL ColorEnabled,
++ IN gctBOOL AlphaEnabled
++ );
++
++/* Program the channel enable masks for the alpha texture function. */
++gceSTATUS
++gco3D_SetTextureAlphaMask(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL ColorEnabled,
++ IN gctBOOL AlphaEnabled
++ );
++
++/* Program the constant fragment color. */
++gceSTATUS
++gco3D_SetFragmentColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetFragmentColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Program the constant fog color. */
++gceSTATUS
++gco3D_SetFogColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetFogColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Program the constant texture color. */
++gceSTATUS
++gco3D_SetTetxureColorX(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetTetxureColorF(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Configure color texture function. */
++gceSTATUS
++gco3D_SetColorTextureFunction(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gceTEXTURE_FUNCTION Function,
++ IN gceTEXTURE_SOURCE Source0,
++ IN gceTEXTURE_CHANNEL Channel0,
++ IN gceTEXTURE_SOURCE Source1,
++ IN gceTEXTURE_CHANNEL Channel1,
++ IN gceTEXTURE_SOURCE Source2,
++ IN gceTEXTURE_CHANNEL Channel2,
++ IN gctINT Scale
++ );
++
++/* Configure alpha texture function. */
++gceSTATUS
++gco3D_SetAlphaTextureFunction(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gceTEXTURE_FUNCTION Function,
++ IN gceTEXTURE_SOURCE Source0,
++ IN gceTEXTURE_CHANNEL Channel0,
++ IN gceTEXTURE_SOURCE Source1,
++ IN gceTEXTURE_CHANNEL Channel1,
++ IN gceTEXTURE_SOURCE Source2,
++ IN gceTEXTURE_CHANNEL Channel2,
++ IN gctINT Scale
++ );
++
++/******************************************************************************\
++******************************* gcoTEXTURE Object *******************************
++\******************************************************************************/
++
++/* Cube faces. */
++typedef enum _gceTEXTURE_FACE
++{
++ gcvFACE_NONE,
++ gcvFACE_POSITIVE_X,
++ gcvFACE_NEGATIVE_X,
++ gcvFACE_POSITIVE_Y,
++ gcvFACE_NEGATIVE_Y,
++ gcvFACE_POSITIVE_Z,
++ gcvFACE_NEGATIVE_Z,
++}
++gceTEXTURE_FACE;
++
++typedef struct _gcsTEXTURE
++{
++ /* Addressing modes. */
++ gceTEXTURE_ADDRESSING s;
++ gceTEXTURE_ADDRESSING t;
++ gceTEXTURE_ADDRESSING r;
++
++ gceTEXTURE_SWIZZLE swizzle[gcvTEXTURE_COMPONENT_NUM];
++
++ /* Border color. */
++ gctUINT8 border[gcvTEXTURE_COMPONENT_NUM];
++
++ /* Filters. */
++ gceTEXTURE_FILTER minFilter;
++ gceTEXTURE_FILTER magFilter;
++ gceTEXTURE_FILTER mipFilter;
++ gctUINT anisoFilter;
++
++ /* Level of detail. */
++ gctFLOAT lodBias;
++ gctFLOAT lodMin;
++ gctFLOAT lodMax;
++
++ /* base/max level */
++ gctINT32 baseLevel;
++ gctINT32 maxLevel;
++
++ /* depth texture comparison */
++ gceTEXTURE_COMPARE_MODE compareMode;
++ gceCOMPARE compareFunc;
++
++}
++gcsTEXTURE, * gcsTEXTURE_PTR;
++
++/* Construct a new gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Construct(
++ IN gcoHAL Hal,
++ OUT gcoTEXTURE * Texture
++ );
++
++/* Construct a new gcoTEXTURE object with type information. */
++gceSTATUS
++gcoTEXTURE_ConstructEx(
++ IN gcoHAL Hal,
++ IN gceTEXTURE_TYPE Type,
++ OUT gcoTEXTURE * Texture
++ );
++
++
++/* Construct a new sized gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_ConstructSized(
++ IN gcoHAL Hal,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT Faces,
++ IN gctUINT MipMapCount,
++ IN gcePOOL Pool,
++ OUT gcoTEXTURE * Texture
++ );
++
++/* Destroy an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Destroy(
++ IN gcoTEXTURE Texture
++ );
++
++/* Upload data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Upload(
++ IN gcoTEXTURE Texture,
++ IN gctINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctSIZE_T Width,
++ IN gctSIZE_T Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_COLOR_SPACE SrcColorSpace
++ );
++
++/* Upload data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadSub(
++ IN gcoTEXTURE Texture,
++ IN gctINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctSIZE_T X,
++ IN gctSIZE_T Y,
++ IN gctSIZE_T Width,
++ IN gctSIZE_T Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_COLOR_SPACE SrcColorSpace,
++ IN gctUINT32 PhysicalAddress
++ );
++
++
++/* Upload YUV data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadYUV(
++ IN gcoTEXTURE Texture,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctPOINTER Memory[3],
++ IN gctINT Stride[3],
++ IN gceSURF_FORMAT Format
++ );
++
++/* Upload compressed data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadCompressed(
++ IN gcoTEXTURE Texture,
++ IN gctINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctSIZE_T Width,
++ IN gctSIZE_T Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Bytes
++ );
++
++/* Upload compressed sub data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadCompressedSub(
++ IN gcoTEXTURE Texture,
++ IN gctINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctSIZE_T XOffset,
++ IN gctSIZE_T YOffset,
++ IN gctSIZE_T Width,
++ IN gctSIZE_T Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Size
++ );
++
++/* Get gcoSURF object for a mipmap level. */
++gceSTATUS
++gcoTEXTURE_GetMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ OUT gcoSURF * Surface
++ );
++
++/* Get gcoSURF object for a mipmap level and face offset. */
++gceSTATUS
++gcoTEXTURE_GetMipMapFace(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ OUT gcoSURF * Surface,
++ OUT gctSIZE_T_PTR Offset
++ );
++
++gceSTATUS
++gcoTEXTURE_GetMipMapSlice(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gctUINT Slice,
++ OUT gcoSURF * Surface,
++ OUT gctSIZE_T_PTR Offset
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gctINT InternalFormat,
++ IN gceSURF_FORMAT Format,
++ IN gctSIZE_T Width,
++ IN gctSIZE_T Height,
++ IN gctSIZE_T Depth,
++ IN gctUINT Faces,
++ IN gcePOOL Pool,
++ OUT gcoSURF * Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMapWithFlag(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gctINT InternalFormat,
++ IN gceSURF_FORMAT Format,
++ IN gctSIZE_T Width,
++ IN gctSIZE_T Height,
++ IN gctSIZE_T Depth,
++ IN gctUINT Faces,
++ IN gcePOOL Pool,
++ IN gctBOOL Protected,
++ OUT gcoSURF * Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMapFromClient(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMapFromSurface(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_SetEndianHint(
++ IN gcoTEXTURE Texture,
++ IN gceENDIAN_HINT EndianHint
++ );
++
++gceSTATUS
++gcoTEXTURE_Disable(
++ IN gcoHAL Hal,
++ IN gctINT Sampler
++ );
++
++gceSTATUS
++gcoTEXTURE_Flush(
++ IN gcoTEXTURE Texture
++ );
++
++gceSTATUS
++gcoTEXTURE_FlushVS(
++ IN gcoTEXTURE Texture
++ );
++
++gceSTATUS
++gcoTEXTURE_QueryCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MaxDepth,
++ OUT gctBOOL * Cubic,
++ OUT gctBOOL * NonPowerOfTwo,
++ OUT gctUINT * VertexSamplers,
++ OUT gctUINT * PixelSamplers
++ );
++
++gceSTATUS
++gcoTEXTURE_GetClosestFormat(
++ IN gcoHAL Hal,
++ IN gceSURF_FORMAT InFormat,
++ OUT gceSURF_FORMAT* OutFormat
++ );
++
++gceSTATUS
++gcoTEXTURE_GetClosestFormatEx(
++ IN gcoHAL Hal,
++ IN gceSURF_FORMAT InFormat,
++ IN gceTEXTURE_TYPE TextureType,
++ OUT gceSURF_FORMAT* OutFormat
++ );
++
++gceSTATUS
++gcoTEXTURE_GetFormatInfo(
++ IN gcoTEXTURE Texture,
++ IN gctINT preferLevel,
++ OUT gcsSURF_FORMAT_INFO_PTR * TxFormatInfo
++ );
++
++gceSTATUS
++gcoTEXTURE_GetTextureFormatName(
++ IN gcsSURF_FORMAT_INFO_PTR TxFormatInfo,
++ OUT gctCONST_STRING * TxName
++ );
++
++gceSTATUS
++gcoTEXTURE_RenderIntoMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_RenderIntoMipMap2(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gctBOOL Sync
++ );
++
++gceSTATUS
++gcoTEXTURE_IsRenderable(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_IsComplete(
++ IN gcoTEXTURE Texture,
++ IN gcsTEXTURE_PTR Info,
++ IN gctINT BaseLevel,
++ IN gctINT MaxLevel
++ );
++
++gceSTATUS
++gcoTEXTURE_BindTexture(
++ IN gcoTEXTURE Texture,
++ IN gctINT Target,
++ IN gctINT Sampler,
++ IN gcsTEXTURE_PTR Info
++ );
++
++gceSTATUS
++gcoTEXTURE_BindTextureEx(
++ IN gcoTEXTURE Texture,
++ IN gctINT Target,
++ IN gctINT Sampler,
++ IN gcsTEXTURE_PTR Info,
++ IN gctINT textureLayer
++ );
++
++gceSTATUS
++gcoTEXTURE_InitParams(
++ IN gcoHAL Hal,
++ IN gcsTEXTURE_PTR TexParams
++ );
++
++gceSTATUS
++gcoTEXTURE_SetDepthTextureFlag(
++ IN gcoTEXTURE Texture,
++ IN gctBOOL unsized
++ );
++
++
++/******************************************************************************\
++******************************* gcoSTREAM Object ******************************
++\******************************************************************************/
++
++typedef enum _gceVERTEX_FORMAT
++{
++ gcvVERTEX_BYTE,
++ gcvVERTEX_UNSIGNED_BYTE,
++ gcvVERTEX_SHORT,
++ gcvVERTEX_UNSIGNED_SHORT,
++ gcvVERTEX_INT,
++ gcvVERTEX_UNSIGNED_INT,
++ gcvVERTEX_FIXED,
++ gcvVERTEX_HALF,
++ gcvVERTEX_FLOAT,
++ gcvVERTEX_UNSIGNED_INT_10_10_10_2,
++ gcvVERTEX_INT_10_10_10_2,
++ gcvVERTEX_UNSIGNED_INT_2_10_10_10_REV,
++ gcvVERTEX_INT_2_10_10_10_REV,
++ /* integer format */
++ gcvVERTEX_INT8,
++ gcvVERTEX_INT16,
++ gcvVERTEX_INT32,
++}
++gceVERTEX_FORMAT;
++
++/* What the SW converting scheme to create temp attrib */
++typedef enum _gceATTRIB_SCHEME
++{
++ gcvATTRIB_SCHEME_KEEP = 0,
++ gcvATTRIB_SCHEME_2_10_10_10_REV_TO_FLOAT,
++ gcvATTRIB_SCHEME_BYTE_TO_INT,
++ gcvATTRIB_SCHEME_SHORT_TO_INT,
++ gcvATTRIB_SCHEME_UBYTE_TO_UINT,
++ gcvATTRIB_SCHEME_USHORT_TO_UINT,
++} gceATTRIB_SCHEME;
++
++gceSTATUS
++gcoSTREAM_Construct(
++ IN gcoHAL Hal,
++ OUT gcoSTREAM * Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Destroy(
++ IN gcoSTREAM Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Upload(
++ IN gcoSTREAM Stream,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Dynamic
++ );
++
++gceSTATUS
++gcoSTREAM_SetStride(
++ IN gcoSTREAM Stream,
++ IN gctUINT32 Stride
++ );
++
++gceSTATUS
++gcoSTREAM_Size(
++ IN gcoSTREAM Stream,
++ OUT gctSIZE_T *Size
++ );
++
++gceSTATUS
++gcoSTREAM_Node(
++ IN gcoSTREAM Stream,
++ OUT gcsSURF_NODE_PTR * Node
++ );
++
++gceSTATUS
++gcoSTREAM_Lock(
++ IN gcoSTREAM Stream,
++ OUT gctPOINTER * Logical,
++ OUT gctUINT32 * Physical
++ );
++
++gceSTATUS
++gcoSTREAM_Unlock(
++ IN gcoSTREAM Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Reserve(
++ IN gcoSTREAM Stream,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoSTREAM_Flush(
++ IN gcoSTREAM Stream
++ );
++
++/* Dynamic buffer API. */
++gceSTATUS
++gcoSTREAM_SetDynamic(
++ IN gcoSTREAM Stream,
++ IN gctSIZE_T Bytes,
++ IN gctUINT Buffers
++ );
++
++typedef struct _gcsSTREAM_INFO
++{
++ gctUINT index;
++ gceVERTEX_FORMAT format;
++ gctBOOL normalized;
++ gctUINT components;
++ gctSIZE_T size;
++ gctCONST_POINTER data;
++ gctUINT stride;
++}
++gcsSTREAM_INFO, * gcsSTREAM_INFO_PTR;
++
++gceSTATUS
++gcoSTREAM_UploadDynamic(
++ IN gcoSTREAM Stream,
++ IN gctUINT VertexCount,
++ IN gctUINT InfoCount,
++ IN gcsSTREAM_INFO_PTR Info,
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoSTREAM_CPUCacheOperation(
++ IN gcoSTREAM Stream,
++ IN gceCACHEOPERATION Operation
++ );
++
++gceSTATUS
++gcoSTREAM_CPUCacheOperation_Range(
++ IN gcoSTREAM Stream,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Length,
++ IN gceCACHEOPERATION Operation
++ );
++
++/******************************************************************************\
++******************************** gcoVERTEX Object ******************************
++\******************************************************************************/
++
++typedef struct _gcsVERTEX_ATTRIBUTES
++{
++ gceVERTEX_FORMAT format;
++ gctBOOL normalized;
++ gctUINT32 components;
++ gctSIZE_T size;
++ gctUINT32 stream;
++ gctUINT32 offset;
++ gctUINT32 stride;
++}
++gcsVERTEX_ATTRIBUTES;
++
++gceSTATUS
++gcoVERTEX_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVERTEX * Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_Destroy(
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_Reset(
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_EnableAttribute(
++ IN gcoVERTEX Vertex,
++ IN gctUINT32 Index,
++ IN gceVERTEX_FORMAT Format,
++ IN gctBOOL Normalized,
++ IN gctUINT32 Components,
++ IN gcoSTREAM Stream,
++ IN gctUINT32 Offset,
++ IN gctUINT32 Stride
++ );
++
++gceSTATUS
++gcoVERTEX_DisableAttribute(
++ IN gcoVERTEX Vertex,
++ IN gctUINT32 Index
++ );
++
++gceSTATUS
++gcoVERTEX_Bind(
++ IN gcoVERTEX Vertex
++ );
++
++/*******************************************************************************
++***** gcoVERTEXARRAY Object ***************************************************/
++
++typedef struct _gcsATTRIBUTE
++{
++ /* Enabled. */
++ gctBOOL enable;
++
++ /* Number of components. */
++ gctINT size;
++
++ /* Attribute format. */
++ gceVERTEX_FORMAT format;
++
++ /* Flag whether the attribute is normalized or not. */
++ gctBOOL normalized;
++
++ /* Stride of the component. */
++ gctSIZE_T stride;
++
++ /* Divisor of the attribute */
++ gctUINT divisor;
++
++ /* Pointer to the attribute data. */
++ gctCONST_POINTER pointer;
++
++ /* Stream object owning the attribute data. */
++ gcoBUFOBJ stream;
++
++ /* Generic values for attribute. */
++ gctFLOAT genericValue[4];
++
++ /* Generic size for attribute. */
++ gctINT genericSize;
++
++ /* Vertex shader linkage. */
++ gctUINT linkage;
++
++#if gcdUSE_WCLIP_PATCH
++ /* Does it hold positions? */
++ gctBOOL isPosition;
++#endif
++
++ /* Index to vertex array */
++ gctINT arrayIdx;
++
++ gceATTRIB_SCHEME convertScheme;
++
++ /* Pointer to the temporary buffer to be freed */
++ gcoBUFOBJ tempStream;
++
++ /* Pointer to the temporary memory to be freed */
++ gctCONST_POINTER tempMemory;
++}
++gcsATTRIBUTE,
++* gcsATTRIBUTE_PTR;
++
++
++typedef struct _gcsVERTEXARRAY
++{
++ /* Enabled. */
++ gctBOOL enable;
++
++ /* Number of components. */
++ gctINT size;
++
++ /* Attribute format. */
++ gceVERTEX_FORMAT format;
++
++ /* Flag whether the attribute is normalized or not. */
++ gctBOOL normalized;
++
++ /* Stride of the component. */
++ gctUINT stride;
++
++ /* Divisor of the attribute */
++ gctUINT divisor;
++
++ /* Pointer to the attribute data. */
++ gctCONST_POINTER pointer;
++
++ /* Stream object owning the attribute data. */
++ gcoSTREAM stream;
++
++ /* Generic values for attribute. */
++ gctFLOAT genericValue[4];
++
++ /* Generic size for attribute. */
++ gctINT genericSize;
++
++ /* Vertex shader linkage. */
++ gctUINT linkage;
++
++ gctBOOL isPosition;
++}
++gcsVERTEXARRAY,
++* gcsVERTEXARRAY_PTR;
++
++gceSTATUS
++gcoVERTEXARRAY_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVERTEXARRAY * Vertex
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Destroy(
++ IN gcoVERTEXARRAY Vertex
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Bind_Ex(
++ IN gcoVERTEXARRAY Vertex,
++ IN gctUINT32 EnableBits,
++ IN gcsVERTEXARRAY_PTR VertexArray,
++ IN gctUINT First,
++ IN gctSIZE_T Count,
++ IN gctBOOL DrawArraysInstanced,
++ IN gctSIZE_T InstanceCount,
++ IN gceINDEX_TYPE IndexType,
++ IN gcoINDEX IndexObject,
++ IN gctPOINTER IndexMemory,
++ IN OUT gcePRIMITIVE * PrimitiveType,
++#if gcdUSE_WCLIP_PATCH
++ IN OUT gctUINT * PrimitiveCount,
++ IN OUT gctFLOAT * wLimitRms,
++ IN OUT gctBOOL * wLimitDirty
++#else
++ IN OUT gctUINT * PrimitiveCount
++#endif
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Bind_Ex2(
++ IN gcoVERTEXARRAY Vertex,
++ IN gctUINT32 EnableBits,
++ IN gcsATTRIBUTE_PTR VertexArray,
++ IN gctSIZE_T First,
++ IN gctSIZE_T Count,
++ IN gctBOOL DrawArraysInstanced,
++ IN gctSIZE_T InstanceCount,
++ IN gceINDEX_TYPE IndexType,
++ IN gcoBUFOBJ IndexObject,
++ IN gctPOINTER IndexMemory,
++ IN OUT gcePRIMITIVE * PrimitiveType,
++#if gcdUSE_WCLIP_PATCH
++ IN OUT gctSIZE_T * PrimitiveCount,
++ IN OUT gctFLOAT * wLimitRms,
++ IN OUT gctBOOL * wLimitDirty,
++#else
++ IN OUT gctUINT * PrimitiveCount,
++#endif
++ IN gctINT VertexInstanceIdLinkage
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Bind(
++ IN gcoVERTEXARRAY Vertex,
++ IN gctUINT32 EnableBits,
++ IN gcsVERTEXARRAY_PTR VertexArray,
++ IN gctUINT First,
++ IN gctSIZE_T Count,
++ IN gceINDEX_TYPE IndexType,
++ IN gcoINDEX IndexObject,
++ IN gctPOINTER IndexMemory,
++ IN OUT gcePRIMITIVE * PrimitiveType,
++#if gcdUSE_WCLIP_PATCH
++ IN OUT gctUINT * PrimitiveCount,
++ IN OUT gctFLOAT * wLimitRms,
++ IN OUT gctBOOL * wLimitDirty
++#else
++ IN OUT gctUINT * PrimitiveCount
++#endif
++ );
++
++/*******************************************************************************
++***** Composition *************************************************************/
++
++typedef enum _gceCOMPOSITION
++{
++ gcvCOMPOSE_CLEAR = 1,
++ gcvCOMPOSE_BLUR,
++ gcvCOMPOSE_DIM,
++ gcvCOMPOSE_LAYER
++}
++gceCOMPOSITION;
++
++typedef struct _gcsCOMPOSITION * gcsCOMPOSITION_PTR;
++typedef struct _gcsCOMPOSITION
++{
++ /* Structure size. */
++ gctUINT structSize;
++
++ /* Composition operation. */
++ gceCOMPOSITION operation;
++
++ /* Layer to be composed. */
++ gcoSURF layer;
++
++ /* Source and target coordinates. */
++ gcsRECT srcRect;
++ gcsRECT trgRect;
++
++ /* Target rectangle */
++ gcsPOINT v0;
++ gcsPOINT v1;
++ gcsPOINT v2;
++
++ /* Blending parameters. */
++ gctBOOL enableBlending;
++ gctBOOL premultiplied;
++ gctUINT8 alphaValue;
++
++ /* Clear color. */
++ gctFLOAT r;
++ gctFLOAT g;
++ gctFLOAT b;
++ gctFLOAT a;
++}
++gcsCOMPOSITION;
++
++gceSTATUS
++gco3D_ProbeComposition(
++ IN gcoHARDWARE Hardware,
++ IN gctBOOL ResetIfEmpty
++ );
++
++gceSTATUS
++gco3D_CompositionBegin(
++ IN gcoHARDWARE Hardware
++ );
++
++gceSTATUS
++gco3D_ComposeLayer(
++ IN gcoHARDWARE Hardware,
++ IN gcsCOMPOSITION_PTR Layer
++ );
++
++gceSTATUS
++gco3D_CompositionSignals(
++ IN gcoHARDWARE Hardware,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal1,
++ IN gctSIGNAL Signal2
++ );
++
++gceSTATUS
++gco3D_CompositionEnd(
++ IN gcoHARDWARE Hardware,
++ IN gcoSURF Target,
++ IN gctBOOL Synchronous
++ );
++
++/* Frame Database */
++gceSTATUS
++gcoHAL_AddFrameDB(
++ void
++ );
++
++gceSTATUS
++gcoHAL_DumpFrameDB(
++ gctCONST_STRING Filename OPTIONAL
++ );
++
++/******************************************************************************
++**********************gcoBUFOBJ object*****************************************
++*******************************************************************************/
++typedef enum _gceBUFOBJ_TYPE
++{
++ gcvBUFOBJ_TYPE_ARRAY_BUFFER = 1,
++ gcvBUFOBJ_TYPE_ELEMENT_ARRAY_BUFFER = 2,
++ gcvBUFOBJ_TYPE_GENERIC_BUFFER = 100
++
++} gceBUFOBJ_TYPE;
++
++typedef enum _gceBUFOBJ_USAGE
++{
++ gcvBUFOBJ_USAGE_STREAM_DRAW = 1,
++ gcvBUFOBJ_USAGE_STREAM_READ,
++ gcvBUFOBJ_USAGE_STREAM_COPY,
++ gcvBUFOBJ_USAGE_STATIC_DRAW,
++ gcvBUFOBJ_USAGE_STATIC_READ,
++ gcvBUFOBJ_USAGE_STATIC_COPY,
++ gcvBUFOBJ_USAGE_DYNAMIC_DRAW,
++ gcvBUFOBJ_USAGE_DYNAMIC_READ,
++ gcvBUFOBJ_USAGE_DYNAMIC_COPY,
++
++} gceBUFOBJ_USAGE;
++
++/* Construct a new gcoBUFOBJ object. */
++gceSTATUS
++gcoBUFOBJ_Construct(
++ IN gcoHAL Hal,
++ IN gceBUFOBJ_TYPE Type,
++ OUT gcoBUFOBJ * BufObj
++ );
++
++/* Destroy a gcoBUFOBJ object. */
++gceSTATUS
++gcoBUFOBJ_Destroy(
++ IN gcoBUFOBJ BufObj
++ );
++
++/* Lock pbo in memory. */
++gceSTATUS
++gcoBUFOBJ_Lock(
++ IN gcoBUFOBJ BufObj,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Lock pbo in memory. */
++gceSTATUS
++gcoBUFOBJ_FastLock(
++ IN gcoBUFOBJ BufObj,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Unlock pbo that was previously locked with gcoBUFOBJ_Lock. */
++gceSTATUS
++gcoBUFOBJ_Unlock(
++ IN gcoBUFOBJ BufObj
++ );
++
++/* Free existing pbo buffer. */
++gceSTATUS
++gcoBUFOBJ_Free(
++ IN gcoBUFOBJ BufObj
++ );
++
++/* Upload data into an pbo buffer. */
++gceSTATUS
++gcoBUFOBJ_Upload(
++ IN gcoBUFOBJ BufObj,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Bytes,
++ IN gceBUFOBJ_USAGE Usage
++ );
++
++/* Bind an index object to the hardware. */
++gceSTATUS
++gcoBUFOBJ_IndexBind (
++ IN gcoBUFOBJ Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Count
++ );
++
++/* Find min and max index for the index buffer */
++gceSTATUS
++gcoBUFOBJ_IndexGetRange(
++ IN gcoBUFOBJ Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset,
++ IN gctUINT32 Count,
++ OUT gctUINT32 * MinimumIndex,
++ OUT gctUINT32 * MaximumIndex
++ );
++
++/* Sets a buffer object as dirty */
++gceSTATUS
++gcoBUFOBJ_SetDirty(
++ IN gcoBUFOBJ BufObj
++ );
++
++/* Creates a new buffer if needed */
++gceSTATUS
++gcoBUFOBJ_AlignIndexBufferWhenNeeded(
++ IN gcoBUFOBJ BufObj,
++ IN gctSIZE_T Offset,
++ OUT gcoBUFOBJ * AlignedBufObj
++ );
++
++/* Cache operations on whole range */
++gceSTATUS
++gcoBUFOBJ_CPUCacheOperation(
++ IN gcoBUFOBJ BufObj,
++ IN gceCACHEOPERATION Operation
++ );
++
++/* Cache operations on a specified range */
++gceSTATUS
++gcoBUFOBJ_CPUCacheOperation_Range(
++ IN gcoBUFOBJ BufObj,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Length,
++ IN gceCACHEOPERATION Operation
++ );
++
++/* Return size of the bufobj */
++gceSTATUS
++gcoBUFOBJ_GetSize(
++ IN gcoBUFOBJ BufObj,
++ OUT gctSIZE_T_PTR Size
++ );
++
++/* Return memory node of the bufobj */
++gceSTATUS
++gcoBUFOBJ_GetNode(
++ IN gcoBUFOBJ BufObj,
++ OUT gcsSURF_NODE_PTR * Node
++ );
++
++/* Handle GPU cache operations */
++gceSTATUS
++gcoBUFOBJ_GPUCacheOperation(
++ gcoBUFOBJ BufObj
++ );
++
++/* Dump buffer. */
++void
++gcoBUFOBJ_Dump(
++ IN gcoBUFOBJ BufObj
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* gcdENABLE_3D */
++#endif /* __gc_hal_engine_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine_vg.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine_vg.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_engine_vg.h 2015-05-01 14:57:59.587427001 -0500
+@@ -0,0 +1,1215 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_engine_vg_h_
++#define __gc_hal_engine_vg_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include "gc_hal_types.h"
++
++/******************************************************************************\
++******************************** VG Enumerations *******************************
++\******************************************************************************/
++
++/**
++** @ingroup gcoVG
++**
++** @brief Tiling mode for painting and imagig.
++**
++** This enumeration defines the tiling modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 tile modes.
++*/
++typedef enum _gceTILE_MODE
++{
++ gcvTILE_FILL,
++ gcvTILE_PAD,
++ gcvTILE_REPEAT,
++ gcvTILE_REFLECT
++}
++gceTILE_MODE;
++
++/******************************************************************************/
++/** @ingroup gcoVG
++**
++** @brief The different paint modes.
++**
++** This enumeration lists the available paint modes.
++*/
++typedef enum _gcePAINT_TYPE
++{
++ /** Solid color. */
++ gcvPAINT_MODE_SOLID,
++
++ /** Linear gradient. */
++ gcvPAINT_MODE_LINEAR,
++
++ /** Radial gradient. */
++ gcvPAINT_MODE_RADIAL,
++
++ /** Pattern. */
++ gcvPAINT_MODE_PATTERN,
++
++ /** Mode count. */
++ gcvPAINT_MODE_COUNT
++}
++gcePAINT_TYPE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Types of path data supported by HAL.
++**
++** This enumeration defines the types of path data supported by the HAL.
++** This is in fact a one-to-one mapping of the OpenVG 1.1 path types.
++*/
++typedef enum _gcePATHTYPE
++{
++ gcePATHTYPE_UNKNOWN = -1,
++ gcePATHTYPE_INT8,
++ gcePATHTYPE_INT16,
++ gcePATHTYPE_INT32,
++ gcePATHTYPE_FLOAT
++}
++gcePATHTYPE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Supported path segment commands.
++**
++** This enumeration defines the path segment commands supported by the HAL.
++*/
++typedef enum _gceVGCMD
++{
++ gcvVGCMD_END, /* 0: GCCMD_TS_OPCODE_END */
++ gcvVGCMD_CLOSE, /* 1: GCCMD_TS_OPCODE_CLOSE */
++ gcvVGCMD_MOVE, /* 2: GCCMD_TS_OPCODE_MOVE */
++ gcvVGCMD_MOVE_REL, /* 3: GCCMD_TS_OPCODE_MOVE_REL */
++ gcvVGCMD_LINE, /* 4: GCCMD_TS_OPCODE_LINE */
++ gcvVGCMD_LINE_REL, /* 5: GCCMD_TS_OPCODE_LINE_REL */
++ gcvVGCMD_QUAD, /* 6: GCCMD_TS_OPCODE_QUADRATIC */
++ gcvVGCMD_QUAD_REL, /* 7: GCCMD_TS_OPCODE_QUADRATIC_REL */
++ gcvVGCMD_CUBIC, /* 8: GCCMD_TS_OPCODE_CUBIC */
++ gcvVGCMD_CUBIC_REL, /* 9: GCCMD_TS_OPCODE_CUBIC_REL */
++ gcvVGCMD_BREAK, /* 10: GCCMD_TS_OPCODE_BREAK */
++ gcvVGCMD_HLINE, /* 11: ******* R E S E R V E D *******/
++ gcvVGCMD_HLINE_REL, /* 12: ******* R E S E R V E D *******/
++ gcvVGCMD_VLINE, /* 13: ******* R E S E R V E D *******/
++ gcvVGCMD_VLINE_REL, /* 14: ******* R E S E R V E D *******/
++ gcvVGCMD_SQUAD, /* 15: ******* R E S E R V E D *******/
++ gcvVGCMD_SQUAD_REL, /* 16: ******* R E S E R V E D *******/
++ gcvVGCMD_SCUBIC, /* 17: ******* R E S E R V E D *******/
++ gcvVGCMD_SCUBIC_REL, /* 18: ******* R E S E R V E D *******/
++ gcvVGCMD_SCCWARC, /* 19: ******* R E S E R V E D *******/
++ gcvVGCMD_SCCWARC_REL, /* 20: ******* R E S E R V E D *******/
++ gcvVGCMD_SCWARC, /* 21: ******* R E S E R V E D *******/
++ gcvVGCMD_SCWARC_REL, /* 22: ******* R E S E R V E D *******/
++ gcvVGCMD_LCCWARC, /* 23: ******* R E S E R V E D *******/
++ gcvVGCMD_LCCWARC_REL, /* 24: ******* R E S E R V E D *******/
++ gcvVGCMD_LCWARC, /* 25: ******* R E S E R V E D *******/
++ gcvVGCMD_LCWARC_REL, /* 26: ******* R E S E R V E D *******/
++
++ /* The width of the command recognized by the hardware on bits. */
++ gcvVGCMD_WIDTH = 5,
++
++ /* Hardware command mask. */
++ gcvVGCMD_MASK = (1 << gcvVGCMD_WIDTH) - 1,
++
++ /* Command modifiers. */
++ gcvVGCMD_H_MOD = 1 << gcvVGCMD_WIDTH, /* = 32 */
++ gcvVGCMD_V_MOD = 2 << gcvVGCMD_WIDTH, /* = 64 */
++ gcvVGCMD_S_MOD = 3 << gcvVGCMD_WIDTH, /* = 96 */
++ gcvVGCMD_ARC_MOD = 4 << gcvVGCMD_WIDTH, /* = 128 */
++
++ /* Emulated LINE commands. */
++ gcvVGCMD_HLINE_EMUL = gcvVGCMD_H_MOD | gcvVGCMD_LINE, /* = 36 */
++ gcvVGCMD_HLINE_EMUL_REL = gcvVGCMD_H_MOD | gcvVGCMD_LINE_REL, /* = 37 */
++ gcvVGCMD_VLINE_EMUL = gcvVGCMD_V_MOD | gcvVGCMD_LINE, /* = 68 */
++ gcvVGCMD_VLINE_EMUL_REL = gcvVGCMD_V_MOD | gcvVGCMD_LINE_REL, /* = 69 */
++
++ /* Emulated SMOOTH commands. */
++ gcvVGCMD_SQUAD_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD, /* = 102 */
++ gcvVGCMD_SQUAD_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD_REL, /* = 103 */
++ gcvVGCMD_SCUBIC_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC, /* = 104 */
++ gcvVGCMD_SCUBIC_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC_REL, /* = 105 */
++
++ /* Emulation ARC commands. */
++ gcvVGCMD_ARC_LINE = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE, /* = 132 */
++ gcvVGCMD_ARC_LINE_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE_REL, /* = 133 */
++ gcvVGCMD_ARC_QUAD = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD, /* = 134 */
++ gcvVGCMD_ARC_QUAD_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD_REL /* = 135 */
++}
++gceVGCMD;
++typedef enum _gceVGCMD * gceVGCMD_PTR;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Blending modes supported by the HAL.
++**
++** This enumeration defines the blending modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 blending modes.
++*/
++typedef enum _gceVG_BLEND
++{
++ gcvVG_BLEND_SRC,
++ gcvVG_BLEND_SRC_OVER,
++ gcvVG_BLEND_DST_OVER,
++ gcvVG_BLEND_SRC_IN,
++ gcvVG_BLEND_DST_IN,
++ gcvVG_BLEND_MULTIPLY,
++ gcvVG_BLEND_SCREEN,
++ gcvVG_BLEND_DARKEN,
++ gcvVG_BLEND_LIGHTEN,
++ gcvVG_BLEND_ADDITIVE,
++ gcvVG_BLEND_SUBTRACT,
++ gcvVG_BLEND_FILTER
++}
++gceVG_BLEND;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Image modes supported by the HAL.
++**
++** This enumeration defines the image modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 image modes with the addition
++** of NO IMAGE.
++*/
++typedef enum _gceVG_IMAGE
++{
++ gcvVG_IMAGE_NONE,
++ gcvVG_IMAGE_NORMAL,
++ gcvVG_IMAGE_MULTIPLY,
++ gcvVG_IMAGE_STENCIL,
++ gcvVG_IMAGE_FILTER
++}
++gceVG_IMAGE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Filter mode patterns and imaging.
++**
++** This enumeration defines the filter modes supported by the HAL.
++*/
++typedef enum _gceIMAGE_FILTER
++{
++ gcvFILTER_POINT,
++ gcvFILTER_LINEAR,
++ gcvFILTER_BI_LINEAR
++}
++gceIMAGE_FILTER;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Primitive modes supported by the HAL.
++**
++** This enumeration defines the primitive modes supported by the HAL.
++*/
++typedef enum _gceVG_PRIMITIVE
++{
++ gcvVG_SCANLINE,
++ gcvVG_RECTANGLE,
++ gcvVG_TESSELLATED,
++ gcvVG_TESSELLATED_TILED
++}
++gceVG_PRIMITIVE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Rendering quality modes supported by the HAL.
++**
++** This enumeration defines the rendering quality modes supported by the HAL.
++*/
++typedef enum _gceRENDER_QUALITY
++{
++ gcvVG_NONANTIALIASED,
++ gcvVG_2X2_MSAA,
++ gcvVG_2X4_MSAA,
++ gcvVG_4X4_MSAA
++}
++gceRENDER_QUALITY;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Fill rules supported by the HAL.
++**
++** This enumeration defines the fill rules supported by the HAL.
++*/
++typedef enum _gceFILL_RULE
++{
++ gcvVG_EVEN_ODD,
++ gcvVG_NON_ZERO
++}
++gceFILL_RULE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Cap styles supported by the HAL.
++**
++** This enumeration defines the cap styles supported by the HAL.
++*/
++typedef enum _gceCAP_STYLE
++{
++ gcvCAP_BUTT,
++ gcvCAP_ROUND,
++ gcvCAP_SQUARE
++}
++gceCAP_STYLE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Join styles supported by the HAL.
++**
++** This enumeration defines the join styles supported by the HAL.
++*/
++typedef enum _gceJOIN_STYLE
++{
++ gcvJOIN_MITER,
++ gcvJOIN_ROUND,
++ gcvJOIN_BEVEL
++}
++gceJOIN_STYLE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Channel mask values.
++**
++** This enumeration defines the values for channel mask used in image
++** filtering.
++*/
++
++/* Base values for channel mask definitions. */
++#define gcvCHANNEL_X (0)
++#define gcvCHANNEL_R (1 << 0)
++#define gcvCHANNEL_G (1 << 1)
++#define gcvCHANNEL_B (1 << 2)
++#define gcvCHANNEL_A (1 << 3)
++
++typedef enum _gceCHANNEL
++{
++ gcvCHANNEL_XXXX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_XXXA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_XXBX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_XXBA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_XGXX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_XGXA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_XGBX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_XGBA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_RXXX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_RXXA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_RXBX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_RXBA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_RGXX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_RGXA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_RGBX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_RGBA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A),
++}
++gceCHANNEL;
++
++/******************************************************************************\
++******************************** VG Structures *******************************
++\******************************************************************************/
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the color ramp used by the gradient paints.
++**
++** The gcsCOLOR_RAMP structure defines the layout of one single color inside
++** a color ramp which is used by gradient paints.
++*/
++typedef struct _gcsCOLOR_RAMP
++{
++ /** Value for the color stop. */
++ gctFLOAT stop;
++
++ /** Red color channel value for the color stop. */
++ gctFLOAT red;
++
++ /** Green color channel value for the color stop. */
++ gctFLOAT green;
++
++ /** Blue color channel value for the color stop. */
++ gctFLOAT blue;
++
++ /** Alpha color channel value for the color stop. */
++ gctFLOAT alpha;
++}
++gcsCOLOR_RAMP, * gcsCOLOR_RAMP_PTR;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the color ramp used by the gradient paints in fixed form.
++**
++** The gcsCOLOR_RAMP structure defines the layout of one single color inside
++** a color ramp which is used by gradient paints.
++*/
++typedef struct _gcsFIXED_COLOR_RAMP
++{
++ /** Value for the color stop. */
++ gctFIXED_POINT stop;
++
++ /** Red color channel value for the color stop. */
++ gctFIXED_POINT red;
++
++ /** Green color channel value for the color stop. */
++ gctFIXED_POINT green;
++
++ /** Blue color channel value for the color stop. */
++ gctFIXED_POINT blue;
++
++ /** Alpha color channel value for the color stop. */
++ gctFIXED_POINT alpha;
++}
++gcsFIXED_COLOR_RAMP, * gcsFIXED_COLOR_RAMP_PTR;
++
++
++/**
++** @ingroup gcoVG
++**
++** @brief Rectangle structure used by the gcoVG object.
++**
++** This structure defines the layout of a rectangle. Make sure width and
++** height are larger than 0.
++*/
++typedef struct _gcsVG_RECT * gcsVG_RECT_PTR;
++typedef struct _gcsVG_RECT
++{
++ /** Left location of the rectangle. */
++ gctINT x;
++
++ /** Top location of the rectangle. */
++ gctINT y;
++
++ /** Width of the rectangle. */
++ gctINT width;
++
++ /** Height of the rectangle. */
++ gctINT height;
++}
++gcsVG_RECT;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Path command buffer attribute structure.
++**
++** The gcsPATH_BUFFER_INFO structure contains the specifics about
++** the layout of the path data command buffer.
++*/
++typedef struct _gcsPATH_BUFFER_INFO * gcsPATH_BUFFER_INFO_PTR;
++typedef struct _gcsPATH_BUFFER_INFO
++{
++ gctUINT reservedForHead;
++ gctUINT reservedForTail;
++}
++gcsPATH_BUFFER_INFO;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the path data container structure.
++**
++** The gcsPATH structure defines the layout of the path data container.
++*/
++typedef struct _gcsPATH_DATA * gcsPATH_DATA_PTR;
++typedef struct _gcsPATH_DATA
++{
++ /* Data container in command buffer format. */
++ gcsCMDBUFFER data;
++
++ /* Path data type. */
++ gcePATHTYPE dataType;
++}
++gcsPATH_DATA;
++
++
++/******************************************************************************\
++********************************* gcoHAL Object ********************************
++\******************************************************************************/
++
++/* Query path data storage attributes. */
++gceSTATUS
++gcoHAL_QueryPathStorage(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ OUT gcsPATH_BUFFER_INFO_PTR Information
++ );
++
++/* Associate a completion signal with the command buffer. */
++gceSTATUS
++gcoHAL_AssociateCompletion(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Release the current command buffer completion signal. */
++gceSTATUS
++gcoHAL_DeassociateCompletion(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Verify whether the command buffer is still in use. */
++gceSTATUS
++gcoHAL_CheckCompletion(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Wait until the command buffer is no longer in use. */
++gceSTATUS
++gcoHAL_WaitCompletion(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Flush the pixel cache. */
++gceSTATUS
++gcoHAL_Flush(
++ IN gcoHAL Hal
++#if GC355_PROFILER
++ ,
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth
++#endif
++ );
++
++/* Split a harwdare address into pool and offset. */
++gceSTATUS
++gcoHAL_SplitAddress(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Combine pool and offset into a harwdare address. */
++gceSTATUS
++gcoHAL_CombineAddress(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcePOOL Pool,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Schedule to free linear video memory allocated. */
++gceSTATUS
++gcoHAL_ScheduleVideoMemory(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctUINT32 Node
++ );
++
++/* Free linear video memory allocated with gcoHAL_AllocateLinearVideoMemory. */
++gceSTATUS
++gcoHAL_FreeVideoMemory(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctUINT32 Node
++ );
++
++/* Query command buffer attributes. */
++gceSTATUS
++gcoHAL_QueryCommandBuffer(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++/* Allocate and lock linear video memory. */
++gceSTATUS
++gcoHAL_AllocateLinearVideoMemory(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ IN gcePOOL Pool,
++ OUT gctUINT32 * Node,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Align the specified size accordingly to the hardware requirements. */
++gceSTATUS
++gcoHAL_GetAlignedSurfaceSize(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Width,
++ IN OUT gctUINT32_PTR Height
++ );
++
++gceSTATUS
++gcoHAL_ReserveTask(
++ IN gcoHAL Hal,
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceBLOCK Block,
++ IN gctUINT TaskCount,
++ IN gctUINT32 Bytes,
++ OUT gctPOINTER * Memory
++ );
++/******************************************************************************\
++********************************** gcoVG Object ********************************
++\******************************************************************************/
++
++/** @defgroup gcoVG gcoVG
++**
++** The gcoVG object abstracts the VG hardware pipe.
++*/
++#if GC355_PROFILER
++void
++gcoVG_ProfilerEnableDisable(
++ IN gcoVG Vg,
++ IN gctUINT enableGetAPITimes,
++ IN gctFILE apiTimeFile
++ );
++
++void
++gcoVG_ProfilerTreeDepth(
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth
++ );
++
++void
++gcoVG_ProfilerSetStates(
++ IN gcoVG Vg,
++ IN gctUINT treeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth
++ );
++#endif
++
++gctBOOL
++gcoVG_IsMaskSupported(
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceSURF_FORMAT Format
++ );
++
++gctBOOL
++gcoVG_IsTargetSupported(
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceSURF_FORMAT Format
++ );
++
++gctBOOL
++gcoVG_IsImageSupported(
++#if GC355_PROFILER
++ IN gcoVG Vg,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceSURF_FORMAT Format
++ );
++
++gctUINT8 gcoVG_PackColorComponent(
++#if GC355_PROFILER
++ gcoVG Vg,
++ gctUINT TreeDepth,
++ gctUINT saveLayerTreeDepth,
++ gctUINT varTreeDepth,
++#endif
++ gctFLOAT Value
++ );
++
++gceSTATUS
++gcoVG_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVG * Vg
++ );
++
++gceSTATUS
++gcoVG_Destroy(
++ IN gcoVG Vg
++#if GC355_PROFILER
++ ,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth
++#endif
++ );
++
++gceSTATUS
++gcoVG_SetTarget(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Target
++ );
++
++gceSTATUS
++gcoVG_UnsetTarget(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoVG_SetUserToSurface(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT UserToSurface[9]
++ );
++
++gceSTATUS
++gcoVG_SetSurfaceToImage(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT SurfaceToImage[9]
++ );
++
++gceSTATUS
++gcoVG_EnableMask(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetMask(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Mask
++ );
++
++gceSTATUS
++gcoVG_UnsetMask(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoVG_FlushMask(
++ IN gcoVG Vg
++#if GC355_PROFILER
++ ,
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth
++#endif
++ );
++
++gceSTATUS
++gcoVG_EnableScissor(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetScissor(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctSIZE_T RectangleCount,
++ IN gcsVG_RECT_PTR Rectangles
++ );
++
++gceSTATUS
++gcoVG_EnableColorTransform(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetColorTransform(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT ColorTransform[8]
++ );
++
++gceSTATUS
++gcoVG_SetTileFillColor(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++gceSTATUS
++gcoVG_SetSolidPaint(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctUINT8 Red,
++ IN gctUINT8 Green,
++ IN gctUINT8 Blue,
++ IN gctUINT8 Alpha
++ );
++
++gceSTATUS
++gcoVG_SetLinearPaint(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT Constant,
++ IN gctFLOAT StepX,
++ IN gctFLOAT StepY
++ );
++
++gceSTATUS
++gcoVG_SetRadialPaint(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT LinConstant,
++ IN gctFLOAT LinStepX,
++ IN gctFLOAT LinStepY,
++ IN gctFLOAT RadConstant,
++ IN gctFLOAT RadStepX,
++ IN gctFLOAT RadStepY,
++ IN gctFLOAT RadStepXX,
++ IN gctFLOAT RadStepYY,
++ IN gctFLOAT RadStepXY
++ );
++
++gceSTATUS
++gcoVG_SetPatternPaint(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctFLOAT UConstant,
++ IN gctFLOAT UStepX,
++ IN gctFLOAT UStepY,
++ IN gctFLOAT VConstant,
++ IN gctFLOAT VStepX,
++ IN gctFLOAT VStepY,
++ IN gctBOOL Linear
++ );
++
++gceSTATUS
++gcoVG_SetColorRamp(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF ColorRamp,
++ IN gceTILE_MODE ColorRampSpreadMode
++ );
++
++gceSTATUS
++gcoVG_SetPattern(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctINT32 width,
++ IN gctINT32 height,
++ IN gcoSURF Pattern,
++ IN gceTILE_MODE TileMode,
++ IN gceIMAGE_FILTER Filter
++ );
++
++gceSTATUS
++gcoVG_SetImageMode(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceVG_IMAGE Mode
++ );
++
++gceSTATUS
++gcoVG_SetBlendMode(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceVG_BLEND Mode
++ );
++
++gceSTATUS
++gcoVG_SetRenderingQuality(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceRENDER_QUALITY Quality
++ );
++
++gceSTATUS
++gcoVG_SetFillRule(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gceFILL_RULE FillRule
++ );
++
++gceSTATUS
++gcoVG_FinalizePath(
++ IN gcoVG Vg,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++gceSTATUS
++gcoVG_Clear(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctINT X,
++ IN gctINT Y,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_DrawPath(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcsPATH_DATA_PTR PathData,
++ IN gctFLOAT Scale,
++ IN gctFLOAT Bias,
++#if gcdMOVG
++ IN gctUINT32 Width,
++ IN gctUINT32 Height,
++ IN gctFLOAT *Bounds,
++#endif
++ IN gctBOOL SoftwareTesselation
++ );
++
++gceSTATUS
++gcoVG_DrawImage(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Source,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT SourceX,
++ IN gctINT SourceY,
++ IN gctINT TargetX,
++ IN gctINT TargetY,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctBOOL Mask,
++ IN gctBOOL isDrawImage
++ );
++
++gceSTATUS
++gcoVG_TesselateImage(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Image,
++ IN gcsVG_RECT_PTR Rectangle,
++ IN gceIMAGE_FILTER Filter,
++ IN gctBOOL Mask,
++#if gcdMOVG
++ IN gctBOOL SoftwareTesselation,
++ IN gceVG_BLEND BlendMode
++#else
++ IN gctBOOL SoftwareTesselation
++#endif
++ );
++
++gceSTATUS
++gcoVG_Blit(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gcsVG_RECT_PTR SrcRect,
++ IN gcsVG_RECT_PTR TrgRect,
++ IN gceIMAGE_FILTER Filter,
++ IN gceVG_BLEND Mode
++ );
++
++gceSTATUS
++gcoVG_ColorMatrix(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN const gctFLOAT * Matrix,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_SeparableConvolve(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctINT KernelWidth,
++ IN gctINT KernelHeight,
++ IN gctINT ShiftX,
++ IN gctINT ShiftY,
++ IN const gctINT16 * KernelX,
++ IN const gctINT16 * KernelY,
++ IN gctFLOAT Scale,
++ IN gctFLOAT Bias,
++ IN gceTILE_MODE TilingMode,
++ IN gctFLOAT_PTR FillColor,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_GaussianBlur(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctFLOAT StdDeviationX,
++ IN gctFLOAT StdDeviationY,
++ IN gceTILE_MODE TilingMode,
++ IN gctFLOAT_PTR FillColor,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_EnableDither(
++ IN gcoVG Vg,
++#if GC355_PROFILER
++ IN gctUINT TreeDepth,
++ IN gctUINT saveLayerTreeDepth,
++ IN gctUINT varTreeDepth,
++#endif
++ IN gctBOOL Enable
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_vg_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_enum.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_enum.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_enum.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_enum.h 2015-05-01 14:57:59.587427001 -0500
+@@ -0,0 +1,1608 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_enum_h_
++#define __gc_hal_enum_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Chip models. */
++typedef enum _gceCHIPMODEL
++{
++ gcv200 = 0x0200,
++ gcv300 = 0x0300,
++ gcv320 = 0x0320,
++ gcv328 = 0x0328,
++ gcv350 = 0x0350,
++ gcv355 = 0x0355,
++ gcv400 = 0x0400,
++ gcv410 = 0x0410,
++ gcv420 = 0x0420,
++ gcv428 = 0x0428,
++ gcv450 = 0x0450,
++ gcv500 = 0x0500,
++ gcv520 = 0x0520,
++ gcv530 = 0x0530,
++ gcv600 = 0x0600,
++ gcv700 = 0x0700,
++ gcv800 = 0x0800,
++ gcv860 = 0x0860,
++ gcv880 = 0x0880,
++ gcv1000 = 0x1000,
++ gcv1500 = 0x1500,
++ gcv2000 = 0x2000,
++ gcv2100 = 0x2100,
++ gcv2200 = 0x2200,
++ gcv2500 = 0x2500,
++ gcv3000 = 0x3000,
++ gcv4000 = 0x4000,
++ gcv5000 = 0x5000,
++ gcv5200 = 0x5200,
++ gcv6400 = 0x6400,
++}
++gceCHIPMODEL;
++
++/* Chip features. */
++typedef enum _gceFEATURE
++{
++ gcvFEATURE_PIPE_2D = 0,
++ gcvFEATURE_PIPE_3D,
++ gcvFEATURE_PIPE_VG,
++ gcvFEATURE_DC,
++ gcvFEATURE_HIGH_DYNAMIC_RANGE,
++ gcvFEATURE_MODULE_CG,
++ gcvFEATURE_MIN_AREA,
++ gcvFEATURE_BUFFER_INTERLEAVING,
++ gcvFEATURE_BYTE_WRITE_2D,
++ gcvFEATURE_ENDIANNESS_CONFIG,
++ gcvFEATURE_DUAL_RETURN_BUS,
++ gcvFEATURE_DEBUG_MODE,
++ gcvFEATURE_YUY2_RENDER_TARGET,
++ gcvFEATURE_FRAGMENT_PROCESSOR,
++ gcvFEATURE_2DPE20,
++ gcvFEATURE_FAST_CLEAR,
++ gcvFEATURE_YUV420_TILER,
++ gcvFEATURE_YUY2_AVERAGING,
++ gcvFEATURE_FLIP_Y,
++ gcvFEATURE_EARLY_Z,
++ gcvFEATURE_COMPRESSION,
++ gcvFEATURE_MSAA,
++ gcvFEATURE_SPECIAL_ANTI_ALIASING,
++ gcvFEATURE_SPECIAL_MSAA_LOD,
++ gcvFEATURE_422_TEXTURE_COMPRESSION,
++ gcvFEATURE_DXT_TEXTURE_COMPRESSION,
++ gcvFEATURE_ETC1_TEXTURE_COMPRESSION,
++ gcvFEATURE_CORRECT_TEXTURE_CONVERTER,
++ gcvFEATURE_TEXTURE_8K,
++ gcvFEATURE_SCALER,
++ gcvFEATURE_YUV420_SCALER,
++ gcvFEATURE_SHADER_HAS_W,
++ gcvFEATURE_SHADER_HAS_SIGN,
++ gcvFEATURE_SHADER_HAS_FLOOR,
++ gcvFEATURE_SHADER_HAS_CEIL,
++ gcvFEATURE_SHADER_HAS_SQRT,
++ gcvFEATURE_SHADER_HAS_TRIG,
++ gcvFEATURE_VAA,
++ gcvFEATURE_HZ,
++ gcvFEATURE_CORRECT_STENCIL,
++ gcvFEATURE_VG20,
++ gcvFEATURE_VG_FILTER,
++ gcvFEATURE_VG21,
++ gcvFEATURE_VG_DOUBLE_BUFFER,
++ gcvFEATURE_MC20,
++ gcvFEATURE_SUPER_TILED,
++ gcvFEATURE_FAST_CLEAR_FLUSH,
++ gcvFEATURE_2D_FILTERBLIT_PLUS_ALPHABLEND,
++ gcvFEATURE_2D_DITHER,
++ gcvFEATURE_2D_A8_TARGET,
++ gcvFEATURE_2D_A8_NO_ALPHA,
++ gcvFEATURE_2D_FILTERBLIT_FULLROTATION,
++ gcvFEATURE_2D_BITBLIT_FULLROTATION,
++ gcvFEATURE_WIDE_LINE,
++ gcvFEATURE_FC_FLUSH_STALL,
++ gcvFEATURE_FULL_DIRECTFB,
++ gcvFEATURE_HALF_FLOAT_PIPE,
++ gcvFEATURE_LINE_LOOP,
++ gcvFEATURE_2D_YUV_BLIT,
++ gcvFEATURE_2D_TILING,
++ gcvFEATURE_NON_POWER_OF_TWO,
++ gcvFEATURE_3D_TEXTURE,
++ gcvFEATURE_TEXTURE_ARRAY,
++ gcvFEATURE_TILE_FILLER,
++ gcvFEATURE_LOGIC_OP,
++ gcvFEATURE_COMPOSITION,
++ gcvFEATURE_MIXED_STREAMS,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT,
++ gcvFEATURE_END_EVENT,
++ gcvFEATURE_VERTEX_10_10_10_2,
++ gcvFEATURE_TEXTURE_10_10_10_2,
++ gcvFEATURE_TEXTURE_ANISOTROPIC_FILTERING,
++ gcvFEATURE_TEXTURE_FLOAT_HALF_FLOAT,
++ gcvFEATURE_2D_ROTATION_STALL_FIX,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT_EX,
++ gcvFEATURE_BUG_FIXES10,
++ gcvFEATURE_2D_MINOR_TILING,
++ /* Supertiled compressed textures are supported. */
++ gcvFEATURE_TEX_COMPRRESSION_SUPERTILED,
++ gcvFEATURE_FAST_MSAA,
++ gcvFEATURE_BUG_FIXED_INDEXED_TRIANGLE_STRIP,
++ gcvFEATURE_TEXTURE_TILE_STATUS_READ,
++ gcvFEATURE_DEPTH_BIAS_FIX,
++ gcvFEATURE_RECT_PRIMITIVE,
++ gcvFEATURE_BUG_FIXES11,
++ gcvFEATURE_SUPERTILED_TEXTURE,
++ gcvFEATURE_2D_NO_COLORBRUSH_INDEX8,
++ gcvFEATURE_RS_YUV_TARGET,
++ gcvFEATURE_2D_FC_SOURCE,
++ gcvFEATURE_2D_CC_NOAA_SOURCE,
++ gcvFEATURE_PE_DITHER_FIX,
++ gcvFEATURE_2D_YUV_SEPARATE_STRIDE,
++ gcvFEATURE_FRUSTUM_CLIP_FIX,
++ gcvFEATURE_TEXTURE_SWIZZLE,
++ gcvFEATURE_PRIMITIVE_RESTART,
++ gcvFEATURE_TEXTURE_LINEAR,
++ gcvFEATURE_TEXTURE_YUV_ASSEMBLER,
++ gcvFEATURE_LINEAR_RENDER_TARGET,
++ gcvFEATURE_SHADER_HAS_ATOMIC,
++ gcvFEATURE_SHADER_HAS_INSTRUCTION_CACHE,
++ gcvFEATURE_SHADER_ENHANCEMENTS2,
++ gcvFEATURE_BUG_FIXES7,
++ gcvFEATURE_SHADER_HAS_RTNE,
++ gcvFEATURE_SHADER_HAS_EXTRA_INSTRUCTIONS2,
++ gcvFEATURE_SHADER_ENHANCEMENTS3,
++ gcvFEATURE_DYNAMIC_FREQUENCY_SCALING,
++ gcvFEATURE_SINGLE_BUFFER,
++ gcvFEATURE_OCCLUSION_QUERY,
++ gcvFEATURE_2D_GAMMA,
++ gcvFEATURE_2D_COLOR_SPACE_CONVERSION,
++ gcvFEATURE_2D_SUPER_TILE_VERSION,
++ gcvFEATURE_HALTI0,
++ gcvFEATURE_HALTI1,
++ gcvFEATURE_HALTI2,
++ gcvFEATURE_2D_MIRROR_EXTENSION,
++ gcvFEATURE_TEXTURE_ASTC,
++ gcvFEATURE_2D_SUPER_TILE_V1,
++ gcvFEATURE_2D_SUPER_TILE_V2,
++ gcvFEATURE_2D_SUPER_TILE_V3,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT_EX2,
++ gcvFEATURE_NEW_RA,
++ gcvFEATURE_BUG_FIXED_IMPLICIT_PRIMITIVE_RESTART,
++ gcvFEATURE_PE_MULTI_RT_BLEND_ENABLE_CONTROL,
++ gcvFEATURE_SMALL_MSAA, /* An upgraded version of Fast MSAA */
++ gcvFEATURE_VERTEX_INST_ID_AS_ATTRIBUTE,
++ gcvFEATURE_DUAL_16,
++ gcvFEATURE_BRANCH_ON_IMMEDIATE_REG,
++ gcvFEATURE_2D_COMPRESSION,
++ gcvFEATURE_TPC_COMPRESSION,
++ gcvFEATURE_2D_OPF_YUV_OUTPUT,
++ gcvFEATURE_2D_FILTERBLIT_A8_ALPHA,
++ gcvFEATURE_2D_MULTI_SRC_BLT_TO_UNIFIED_DST_RECT,
++ gcvFEATURE_V2_COMPRESSION_Z16_FIX,
++
++ gcvFEATURE_VERTEX_INST_ID_AS_INTEGER,
++ gcvFEATURE_2D_YUV_MODE,
++ gcvFEATURE_ACE,
++ gcvFEATURE_COLOR_COMPRESSION,
++
++ gcvFEATURE_32BPP_COMPONENT_TEXTURE_CHANNEL_SWIZZLE,
++ gcvFEATURE_64BPP_HW_CLEAR_SUPPORT,
++ gcvFEATURE_TX_LERP_PRECISION_FIX,
++ gcvFEATURE_COMPRESSION_V2,
++ gcvFEATURE_MMU,
++ gcvFEATURE_COMPRESSION_V3,
++ gcvFEATURE_TX_DECOMPRESSOR,
++ gcvFEATURE_MRT_TILE_STATUS_BUFFER,
++ gcvFEATURE_COMPRESSION_V1,
++ gcvFEATURE_V1_COMPRESSION_Z16_DECOMPRESS_FIX,
++ gcvFEATURE_RTT,
++ gcvFEATURE_GENERICS,
++ gcvFEATURE_2D_ONE_PASS_FILTER,
++ gcvFEATURE_2D_ONE_PASS_FILTER_TAP,
++ gcvFEATURE_2D_POST_FLIP,
++ gcvFEATURE_2D_PIXEL_ALIGNMENT,
++ gcvFEATURE_CORRECT_AUTO_DISABLE_COUNT,
++ gcvFEATURE_CORRECT_AUTO_DISABLE_COUNT_WIDTH,
++
++ gcvFEATURE_HALTI3,
++ gcvFEATURE_EEZ,
++ gcvFEATURE_INTEGER_PIPE_FIX,
++ gcvFEATURE_PSOUTPUT_MAPPING,
++ gcvFEATURE_8K_RT_FIX,
++ gcvFEATURE_TX_TILE_STATUS_MAPPING,
++ gcvFEATURE_SRGB_RT_SUPPORT,
++ gcvFEATURE_UNIFORM_APERTURE,
++ gcvFEATURE_TEXTURE_16K,
++ gcvFEATURE_PA_FARZCLIPPING_FIX,
++ gcvFEATURE_PE_DITHER_COLORMASK_FIX,
++ gcvFEATURE_ZSCALE_FIX,
++
++ gcvFEATURE_MULTI_PIXELPIPES,
++ gcvFEATURE_PIPE_CL,
++
++ gcvFEATURE_BUG_FIXES18,
++
++ gcvFEATURE_UNIFIED_SAMPLERS,
++ gcvFEATURE_CL_PS_WALKER,
++ gcvFEATURE_NEW_HZ,
++
++ gcvFEATURE_TX_FRAC_PRECISION_6BIT,
++ gcvFEATURE_SH_INSTRUCTION_PREFETCH,
++ gcvFEATURE_PROBE,
++
++ gcvFEATURE_BUG_FIXES8,
++ gcvFEATURE_2D_ALL_QUAD,
++
++ gcvFEATURE_SINGLE_PIPE_HALTI1,
++
++ gcvFEATURE_BLOCK_SIZE_16x16,
++
++ gcvFEATURE_NO_USER_CSC,
++ gcvFEATURE_ANDROID_ONLY,
++ gcvFEATURE_HAS_PRODUCTID,
++
++ gcvFEATURE_V2_MSAA_COMP_FIX,
++
++ gcvFEATURE_S8_ONLY_RENDERING,
++
++ gcvFEATURE_SEPARATE_SRC_DST,
++
++ gcvFEATURE_FE_START_VERTEX_SUPPORT,
++ gcvFEATURE_RS_DEPTHSTENCIL_NATIVE_SUPPORT,
++
++ /* Insert features above this comment only. */
++ gcvFEATURE_COUNT /* Not a feature. */
++}
++gceFEATURE;
++
++/* Chip SWWA. */
++typedef enum _gceSWWA
++{
++ gcvSWWA_601 = 0,
++ gcvSWWA_706,
++ gcvSWWA_1163,
++ gcvSWWA_1165,
++ /* Insert SWWA above this comment only. */
++ gcvSWWA_COUNT /* Not a SWWA. */
++}
++gceSWWA;
++
++
++/* Option Set*/
++typedef enum _gceOPITON
++{
++ /* HW setting we take PREFER */
++ gcvOPTION_PREFER_MULTIPIPE_RS = 0,
++ gcvOPTION_PREFER_ZCONVERT_BYPASS =1,
++
++
++ gcvOPTION_HW_NULL = 50,
++ gcvOPTION_PRINT_OPTION = 51,
++
++ gcvOPTION_FBO_PREFER_MEM = 80,
++
++ /* Insert option above this comment only */
++ gcvOPTION_COUNT /* Not a OPTION*/
++}
++gceOPTION;
++
++typedef enum _gceFRAMEINFO
++{
++ gcvFRAMEINFO_FRAME_NUM = 0,
++ gcvFRAMEINFO_DRAW_NUM = 1,
++ gcvFRAMEINFO_DRAW_DUAL16_NUM = 2,
++ gcvFRAMEINFO_DRAW_FL32_NUM = 3,
++
++
++ gcvFRAMEINFO_COUNT,
++}
++gceFRAMEINFO;
++
++typedef enum _gceFRAMEINFO_OP
++{
++ gcvFRAMEINFO_OP_INC = 0,
++ gcvFRAMEINFO_OP_DEC = 1,
++ gcvFRAMEINFO_OP_ZERO = 2,
++ gcvFRAMEINFO_OP_GET = 3,
++
++
++ gcvFRAMEINFO_OP_COUNT,
++}
++gceFRAMEINFO_OP;
++
++
++/* Chip Power Status. */
++typedef enum _gceCHIPPOWERSTATE
++{
++ gcvPOWER_ON = 0,
++ gcvPOWER_OFF,
++ gcvPOWER_IDLE,
++ gcvPOWER_SUSPEND,
++ gcvPOWER_SUSPEND_ATPOWERON,
++ gcvPOWER_OFF_ATPOWERON,
++ gcvPOWER_IDLE_BROADCAST,
++ gcvPOWER_SUSPEND_BROADCAST,
++ gcvPOWER_OFF_BROADCAST,
++ gcvPOWER_OFF_RECOVERY,
++ gcvPOWER_OFF_TIMEOUT,
++ gcvPOWER_ON_AUTO
++}
++gceCHIPPOWERSTATE;
++
++/* CPU cache operations */
++typedef enum _gceCACHEOPERATION
++{
++ gcvCACHE_CLEAN = 0x01,
++ gcvCACHE_INVALIDATE = 0x02,
++ gcvCACHE_FLUSH = gcvCACHE_CLEAN | gcvCACHE_INVALIDATE,
++ gcvCACHE_MEMORY_BARRIER = 0x04
++}
++gceCACHEOPERATION;
++
++/* Surface types. */
++typedef enum _gceSURF_TYPE
++{
++ gcvSURF_TYPE_UNKNOWN = 0,
++ gcvSURF_INDEX,
++ gcvSURF_VERTEX,
++ gcvSURF_TEXTURE,
++ gcvSURF_RENDER_TARGET,
++ gcvSURF_DEPTH,
++ gcvSURF_BITMAP,
++ gcvSURF_TILE_STATUS,
++ gcvSURF_IMAGE,
++ gcvSURF_MASK,
++ gcvSURF_SCISSOR,
++ gcvSURF_HIERARCHICAL_DEPTH,
++ gcvSURF_NUM_TYPES, /* Make sure this is the last one! */
++
++ /* Combinations. */
++ gcvSURF_NO_TILE_STATUS = 0x100,
++ gcvSURF_NO_VIDMEM = 0x200, /* Used to allocate surfaces with no underlying vidmem node.
++ In Android, vidmem node is allocated by another process. */
++ gcvSURF_CACHEABLE = 0x400, /* Used to allocate a cacheable surface */
++
++ gcvSURF_FLIP = 0x800, /* The Resolve Target the will been flip resolve from RT */
++
++ gcvSURF_TILE_STATUS_DIRTY = 0x1000, /* Init tile status to all dirty */
++
++ gcvSURF_LINEAR = 0x2000,
++
++ gcvSURF_CREATE_AS_TEXTURE = 0x4000, /* create it as a texture */
++
++ gcvSURF_PROTECTED_CONTENT = 0x8000, /* create it as content protected */
++
++ /* Create it as no compression, valid on when it has tile status. */
++ gcvSURF_NO_COMPRESSION = 0x40000,
++
++ gcvSURF_CONTIGUOUS = 0x20000, /*create it as contiguous */
++
++ gcvSURF_TEXTURE_LINEAR = gcvSURF_TEXTURE
++ | gcvSURF_LINEAR,
++
++ gcvSURF_RENDER_TARGET_LINEAR = gcvSURF_RENDER_TARGET
++ | gcvSURF_LINEAR,
++
++ gcvSURF_RENDER_TARGET_NO_TILE_STATUS = gcvSURF_RENDER_TARGET
++ | gcvSURF_NO_TILE_STATUS,
++
++ gcvSURF_RENDER_TARGET_TS_DIRTY = gcvSURF_RENDER_TARGET
++ | gcvSURF_TILE_STATUS_DIRTY,
++
++ gcvSURF_DEPTH_NO_TILE_STATUS = gcvSURF_DEPTH
++ | gcvSURF_NO_TILE_STATUS,
++
++ gcvSURF_DEPTH_TS_DIRTY = gcvSURF_DEPTH
++ | gcvSURF_TILE_STATUS_DIRTY,
++
++ /* Supported surface types with no vidmem node. */
++ gcvSURF_BITMAP_NO_VIDMEM = gcvSURF_BITMAP
++ | gcvSURF_NO_VIDMEM,
++
++ gcvSURF_TEXTURE_NO_VIDMEM = gcvSURF_TEXTURE
++ | gcvSURF_NO_VIDMEM,
++
++ /* Cacheable surface types with no vidmem node. */
++ gcvSURF_CACHEABLE_BITMAP_NO_VIDMEM = gcvSURF_BITMAP_NO_VIDMEM
++ | gcvSURF_CACHEABLE,
++
++ gcvSURF_CACHEABLE_BITMAP = gcvSURF_BITMAP
++ | gcvSURF_CACHEABLE,
++
++ gcvSURF_FLIP_BITMAP = gcvSURF_BITMAP
++ | gcvSURF_FLIP,
++}
++gceSURF_TYPE;
++
++typedef enum _gceSURF_USAGE
++{
++ gcvSURF_USAGE_UNKNOWN,
++ gcvSURF_USAGE_RESOLVE_AFTER_CPU,
++ gcvSURF_USAGE_RESOLVE_AFTER_3D
++}
++gceSURF_USAGE;
++
++typedef enum _gceSURF_COLOR_SPACE
++{
++ gcvSURF_COLOR_SPACE_UNKNOWN,
++ gcvSURF_COLOR_SPACE_LINEAR,
++ gcvSURF_COLOR_SPACE_NONLINEAR,
++}
++gceSURF_COLOR_SPACE;
++
++typedef enum _gceSURF_COLOR_TYPE
++{
++ gcvSURF_COLOR_UNKNOWN = 0,
++ gcvSURF_COLOR_LINEAR = 0x01,
++ gcvSURF_COLOR_ALPHA_PRE = 0x02,
++}
++gceSURF_COLOR_TYPE;
++
++/* Rotation. */
++typedef enum _gceSURF_ROTATION
++{
++ gcvSURF_0_DEGREE = 0,
++ gcvSURF_90_DEGREE,
++ gcvSURF_180_DEGREE,
++ gcvSURF_270_DEGREE,
++ gcvSURF_FLIP_X,
++ gcvSURF_FLIP_Y,
++
++ gcvSURF_POST_FLIP_X = 0x40000000,
++ gcvSURF_POST_FLIP_Y = 0x80000000,
++}
++gceSURF_ROTATION;
++
++/* Surface flag */
++typedef enum _gceSURF_FLAG
++{
++ /* None flag */
++ gcvSURF_FLAG_NONE = 0x0,
++ /* content is preserved after swap */
++ gcvSURF_FLAG_CONTENT_PRESERVED = 0x1,
++ /* content is updated after swap*/
++ gcvSURF_FLAG_CONTENT_UPDATED = 0x2,
++ /* content is y inverted */
++ gcvSURF_FLAG_CONTENT_YINVERTED = 0x4,
++ /* content is protected */
++ gcvSURF_FLAG_CONTENT_PROTECTED = 0x8,
++ /* surface is contiguous. */
++ gcvSURF_FLAG_CONTIGUOUS = (1 << 4),
++}
++gceSURF_FLAG;
++
++typedef enum _gceMIPMAP_IMAGE_FORMAT
++{
++ gcvUNKNOWN_MIPMAP_IMAGE_FORMAT = -2
++}
++gceMIPMAP_IMAGE_FORMAT;
++
++/* Surface formats. */
++typedef enum _gceSURF_FORMAT
++{
++ /* Unknown format. */
++ gcvSURF_UNKNOWN = 0,
++
++ /* Palettized formats. */
++ gcvSURF_INDEX1 = 100,
++ gcvSURF_INDEX4,
++ gcvSURF_INDEX8,
++
++ /* RGB formats. */
++ gcvSURF_A2R2G2B2 = 200,
++ gcvSURF_R3G3B2,
++ gcvSURF_A8R3G3B2,
++ gcvSURF_X4R4G4B4,
++ gcvSURF_A4R4G4B4,
++ gcvSURF_R4G4B4A4,
++ gcvSURF_X1R5G5B5,
++ gcvSURF_A1R5G5B5,
++ gcvSURF_R5G5B5A1,
++ gcvSURF_R5G6B5,
++ gcvSURF_R8G8B8,
++ gcvSURF_X8R8G8B8,
++ gcvSURF_A8R8G8B8,
++ gcvSURF_R8G8B8A8,
++ gcvSURF_G8R8G8B8,
++ gcvSURF_R8G8B8G8,
++ gcvSURF_X2R10G10B10,
++ gcvSURF_A2R10G10B10,
++ gcvSURF_X12R12G12B12,
++ gcvSURF_A12R12G12B12,
++ gcvSURF_X16R16G16B16,
++ gcvSURF_A16R16G16B16,
++ gcvSURF_A32R32G32B32,
++ gcvSURF_R8G8B8X8,
++ gcvSURF_R5G5B5X1,
++ gcvSURF_R4G4B4X4,
++ gcvSURF_X16R16G16B16_2_A8R8G8B8,
++ gcvSURF_A16R16G16B16_2_A8R8G8B8,
++ gcvSURF_A32R32G32B32_2_G32R32F,
++ gcvSURF_A32R32G32B32_4_A8R8G8B8,
++
++ /* BGR formats. */
++ gcvSURF_A4B4G4R4 = 300,
++ gcvSURF_A1B5G5R5,
++ gcvSURF_B5G6R5,
++ gcvSURF_B8G8R8,
++ gcvSURF_B16G16R16,
++ gcvSURF_X8B8G8R8,
++ gcvSURF_A8B8G8R8,
++ gcvSURF_A2B10G10R10,
++ gcvSURF_X16B16G16R16,
++ gcvSURF_A16B16G16R16,
++ gcvSURF_B32G32R32,
++ gcvSURF_X32B32G32R32,
++ gcvSURF_A32B32G32R32,
++ gcvSURF_B4G4R4A4,
++ gcvSURF_B5G5R5A1,
++ gcvSURF_B8G8R8X8,
++ gcvSURF_B8G8R8A8,
++ gcvSURF_X4B4G4R4,
++ gcvSURF_X1B5G5R5,
++ gcvSURF_B4G4R4X4,
++ gcvSURF_B5G5R5X1,
++ gcvSURF_X2B10G10R10,
++ gcvSURF_B8G8R8_SNORM,
++ gcvSURF_X8B8G8R8_SNORM,
++ gcvSURF_A8B8G8R8_SNORM,
++ gcvSURF_A8B12G12R12_2_A8R8G8B8,
++
++ /* Compressed formats. */
++ gcvSURF_DXT1 = 400,
++ gcvSURF_DXT2,
++ gcvSURF_DXT3,
++ gcvSURF_DXT4,
++ gcvSURF_DXT5,
++ gcvSURF_CXV8U8,
++ gcvSURF_ETC1,
++ gcvSURF_R11_EAC,
++ gcvSURF_SIGNED_R11_EAC,
++ gcvSURF_RG11_EAC,
++ gcvSURF_SIGNED_RG11_EAC,
++ gcvSURF_RGB8_ETC2,
++ gcvSURF_SRGB8_ETC2,
++ gcvSURF_RGB8_PUNCHTHROUGH_ALPHA1_ETC2,
++ gcvSURF_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2,
++ gcvSURF_RGBA8_ETC2_EAC,
++ gcvSURF_SRGB8_ALPHA8_ETC2_EAC,
++
++ /* YUV formats. */
++ gcvSURF_YUY2 = 500,
++ gcvSURF_UYVY,
++ gcvSURF_YV12,
++ gcvSURF_I420,
++ gcvSURF_NV12,
++ gcvSURF_NV21,
++ gcvSURF_NV16,
++ gcvSURF_NV61,
++ gcvSURF_YVYU,
++ gcvSURF_VYUY,
++
++ /* Depth formats. */
++ gcvSURF_D16 = 600,
++ gcvSURF_D24S8,
++ gcvSURF_D32,
++ gcvSURF_D24X8,
++ gcvSURF_D32F,
++ gcvSURF_S8D32F,
++ gcvSURF_S8D32F_1_G32R32F,
++ gcvSURF_S8D32F_2_A8R8G8B8,
++ gcvSURF_D24S8_1_A8R8G8B8,
++ gcvSURF_S8,
++
++ /* Alpha formats. */
++ gcvSURF_A4 = 700,
++ gcvSURF_A8,
++ gcvSURF_A12,
++ gcvSURF_A16,
++ gcvSURF_A32,
++ gcvSURF_A1,
++
++ /* Luminance formats. */
++ gcvSURF_L4 = 800,
++ gcvSURF_L8,
++ gcvSURF_L12,
++ gcvSURF_L16,
++ gcvSURF_L32,
++ gcvSURF_L1,
++
++ /* Alpha/Luminance formats. */
++ gcvSURF_A4L4 = 900,
++ gcvSURF_A2L6,
++ gcvSURF_A8L8,
++ gcvSURF_A4L12,
++ gcvSURF_A12L12,
++ gcvSURF_A16L16,
++
++ /* Bump formats. */
++ gcvSURF_L6V5U5 = 1000,
++ gcvSURF_V8U8,
++ gcvSURF_X8L8V8U8,
++ gcvSURF_Q8W8V8U8,
++ gcvSURF_A2W10V10U10,
++ gcvSURF_V16U16,
++ gcvSURF_Q16W16V16U16,
++
++ /* R/RG/RA formats. */
++ gcvSURF_R8 = 1100,
++ gcvSURF_X8R8,
++ gcvSURF_G8R8,
++ gcvSURF_X8G8R8,
++ gcvSURF_A8R8,
++ gcvSURF_R16,
++ gcvSURF_X16R16,
++ gcvSURF_G16R16,
++ gcvSURF_X16G16R16,
++ gcvSURF_A16R16,
++ gcvSURF_R32,
++ gcvSURF_X32R32,
++ gcvSURF_G32R32,
++ gcvSURF_X32G32R32,
++ gcvSURF_A32R32,
++ gcvSURF_RG16,
++ gcvSURF_R8_SNORM,
++ gcvSURF_G8R8_SNORM,
++
++ gcvSURF_R8_1_X8R8G8B8,
++ gcvSURF_G8R8_1_X8R8G8B8,
++
++ /* Floating point formats. */
++ gcvSURF_R16F = 1200,
++ gcvSURF_X16R16F,
++ gcvSURF_G16R16F,
++ gcvSURF_X16G16R16F,
++ gcvSURF_B16G16R16F,
++ gcvSURF_X16B16G16R16F,
++ gcvSURF_A16B16G16R16F,
++ gcvSURF_R32F,
++ gcvSURF_X32R32F,
++ gcvSURF_G32R32F,
++ gcvSURF_X32G32R32F,
++ gcvSURF_B32G32R32F,
++ gcvSURF_X32B32G32R32F,
++ gcvSURF_A32B32G32R32F,
++ gcvSURF_A16F,
++ gcvSURF_L16F,
++ gcvSURF_A16L16F,
++ gcvSURF_A16R16F,
++ gcvSURF_A32F,
++ gcvSURF_L32F,
++ gcvSURF_A32L32F,
++ gcvSURF_A32R32F,
++ gcvSURF_E5B9G9R9,
++ gcvSURF_B10G11R11F,
++
++ gcvSURF_X16B16G16R16F_2_A8R8G8B8,
++ gcvSURF_A16B16G16R16F_2_A8R8G8B8,
++ gcvSURF_G32R32F_2_A8R8G8B8,
++ gcvSURF_X32B32G32R32F_2_G32R32F,
++ gcvSURF_A32B32G32R32F_2_G32R32F,
++ gcvSURF_X32B32G32R32F_4_A8R8G8B8,
++ gcvSURF_A32B32G32R32F_4_A8R8G8B8,
++
++ gcvSURF_R16F_1_A4R4G4B4,
++ gcvSURF_G16R16F_1_A8R8G8B8,
++ gcvSURF_B16G16R16F_2_A8R8G8B8,
++
++ gcvSURF_R32F_1_A8R8G8B8,
++ gcvSURF_B32G32R32F_3_A8R8G8B8,
++
++ gcvSURF_B10G11R11F_1_A8R8G8B8,
++
++
++ /* sRGB format. */
++ gcvSURF_SBGR8 = 1400,
++ gcvSURF_A8_SBGR8,
++ gcvSURF_X8_SBGR8,
++
++ /* Integer formats. */
++ gcvSURF_R8I = 1500,
++ gcvSURF_R8UI,
++ gcvSURF_R16I,
++ gcvSURF_R16UI,
++ gcvSURF_R32I,
++ gcvSURF_R32UI,
++ gcvSURF_X8R8I,
++ gcvSURF_G8R8I,
++ gcvSURF_X8R8UI,
++ gcvSURF_G8R8UI,
++ gcvSURF_X16R16I,
++ gcvSURF_G16R16I,
++ gcvSURF_X16R16UI,
++ gcvSURF_G16R16UI,
++ gcvSURF_X32R32I,
++ gcvSURF_G32R32I,
++ gcvSURF_X32R32UI,
++ gcvSURF_G32R32UI,
++ gcvSURF_X8G8R8I,
++ gcvSURF_B8G8R8I,
++ gcvSURF_X8G8R8UI,
++ gcvSURF_B8G8R8UI,
++ gcvSURF_X16G16R16I,
++ gcvSURF_B16G16R16I,
++ gcvSURF_X16G16R16UI,
++ gcvSURF_B16G16R16UI,
++ gcvSURF_X32G32R32I,
++ gcvSURF_B32G32R32I,
++ gcvSURF_X32G32R32UI,
++ gcvSURF_B32G32R32UI,
++ gcvSURF_X8B8G8R8I,
++ gcvSURF_A8B8G8R8I,
++ gcvSURF_X8B8G8R8UI,
++ gcvSURF_A8B8G8R8UI,
++ gcvSURF_X16B16G16R16I,
++ gcvSURF_A16B16G16R16I,
++ gcvSURF_X16B16G16R16UI,
++ gcvSURF_A16B16G16R16UI,
++ gcvSURF_X32B32G32R32I,
++ gcvSURF_A32B32G32R32I,
++ gcvSURF_X32B32G32R32UI,
++ gcvSURF_A32B32G32R32UI,
++ gcvSURF_A2B10G10R10UI,
++ gcvSURF_G32R32I_2_A8R8G8B8,
++ gcvSURF_G32R32UI_2_A8R8G8B8,
++ gcvSURF_X16B16G16R16I_2_A8R8G8B8,
++ gcvSURF_A16B16G16R16I_2_A8R8G8B8,
++ gcvSURF_X16B16G16R16UI_2_A8R8G8B8,
++ gcvSURF_A16B16G16R16UI_2_A8R8G8B8,
++ gcvSURF_X32B32G32R32I_2_G32R32I,
++ gcvSURF_A32B32G32R32I_2_G32R32I,
++ gcvSURF_X32B32G32R32I_3_A8R8G8B8,
++ gcvSURF_A32B32G32R32I_4_A8R8G8B8,
++ gcvSURF_X32B32G32R32UI_2_G32R32UI,
++ gcvSURF_A32B32G32R32UI_2_G32R32UI,
++ gcvSURF_X32B32G32R32UI_3_A8R8G8B8,
++ gcvSURF_A32B32G32R32UI_4_A8R8G8B8,
++ gcvSURF_A2B10G10R10UI_1_A8R8G8B8,
++ gcvSURF_A8B8G8R8I_1_A8R8G8B8,
++ gcvSURF_A8B8G8R8UI_1_A8R8G8B8,
++ gcvSURF_R8I_1_A4R4G4B4,
++ gcvSURF_R8UI_1_A4R4G4B4,
++ gcvSURF_R16I_1_A4R4G4B4,
++ gcvSURF_R16UI_1_A4R4G4B4,
++ gcvSURF_R32I_1_A8R8G8B8,
++ gcvSURF_R32UI_1_A8R8G8B8,
++ gcvSURF_X8R8I_1_A4R4G4B4,
++ gcvSURF_X8R8UI_1_A4R4G4B4,
++ gcvSURF_G8R8I_1_A4R4G4B4,
++ gcvSURF_G8R8UI_1_A4R4G4B4,
++ gcvSURF_X16R16I_1_A4R4G4B4,
++ gcvSURF_X16R16UI_1_A4R4G4B4,
++ gcvSURF_G16R16I_1_A8R8G8B8,
++ gcvSURF_G16R16UI_1_A8R8G8B8,
++ gcvSURF_X32R32I_1_A8R8G8B8,
++ gcvSURF_X32R32UI_1_A8R8G8B8,
++ gcvSURF_X8G8R8I_1_A4R4G4B4,
++ gcvSURF_X8G8R8UI_1_A4R4G4B4,
++ gcvSURF_B8G8R8I_1_A8R8G8B8,
++ gcvSURF_B8G8R8UI_1_A8R8G8B8,
++ gcvSURF_B16G16R16I_2_A8R8G8B8,
++ gcvSURF_B16G16R16UI_2_A8R8G8B8,
++ gcvSURF_B32G32R32I_3_A8R8G8B8,
++ gcvSURF_B32G32R32UI_3_A8R8G8B8,
++
++ /* ASTC formats. */
++ gcvSURF_ASTC4x4 = 1600,
++ gcvSURF_ASTC5x4,
++ gcvSURF_ASTC5x5,
++ gcvSURF_ASTC6x5,
++ gcvSURF_ASTC6x6,
++ gcvSURF_ASTC8x5,
++ gcvSURF_ASTC8x6,
++ gcvSURF_ASTC8x8,
++ gcvSURF_ASTC10x5,
++ gcvSURF_ASTC10x6,
++ gcvSURF_ASTC10x8,
++ gcvSURF_ASTC10x10,
++ gcvSURF_ASTC12x10,
++ gcvSURF_ASTC12x12,
++ gcvSURF_ASTC4x4_SRGB,
++ gcvSURF_ASTC5x4_SRGB,
++ gcvSURF_ASTC5x5_SRGB,
++ gcvSURF_ASTC6x5_SRGB,
++ gcvSURF_ASTC6x6_SRGB,
++ gcvSURF_ASTC8x5_SRGB,
++ gcvSURF_ASTC8x6_SRGB,
++ gcvSURF_ASTC8x8_SRGB,
++ gcvSURF_ASTC10x5_SRGB,
++ gcvSURF_ASTC10x6_SRGB,
++ gcvSURF_ASTC10x8_SRGB,
++ gcvSURF_ASTC10x10_SRGB,
++ gcvSURF_ASTC12x10_SRGB,
++ gcvSURF_ASTC12x12_SRGB,
++
++ gcvSURF_FORMAT_COUNT
++}
++gceSURF_FORMAT;
++
++/* Format modifiers. */
++typedef enum _gceSURF_FORMAT_MODE
++{
++ gcvSURF_FORMAT_OCL = 0x80000000
++}
++gceSURF_FORMAT_MODE;
++
++/* Pixel swizzle modes. */
++typedef enum _gceSURF_SWIZZLE
++{
++ gcvSURF_NOSWIZZLE = 0,
++ gcvSURF_ARGB,
++ gcvSURF_ABGR,
++ gcvSURF_RGBA,
++ gcvSURF_BGRA
++}
++gceSURF_SWIZZLE;
++
++/* Transparency modes. */
++typedef enum _gceSURF_TRANSPARENCY
++{
++ /* Valid only for PE 1.0 */
++ gcvSURF_OPAQUE = 0,
++ gcvSURF_SOURCE_MATCH,
++ gcvSURF_SOURCE_MASK,
++ gcvSURF_PATTERN_MASK,
++}
++gceSURF_TRANSPARENCY;
++
++/* Surface Alignment. */
++typedef enum _gceSURF_ALIGNMENT
++{
++ gcvSURF_FOUR = 0,
++ gcvSURF_SIXTEEN,
++ gcvSURF_SUPER_TILED,
++ gcvSURF_SPLIT_TILED,
++ gcvSURF_SPLIT_SUPER_TILED
++}
++gceSURF_ALIGNMENT;
++
++/* Surface Addressing. */
++typedef enum _gceSURF_ADDRESSING
++{
++ gcvSURF_NO_STRIDE_TILED = 0,
++ gcvSURF_NO_STRIDE_LINEAR,
++ gcvSURF_STRIDE_TILED,
++ gcvSURF_STRIDE_LINEAR
++}
++gceSURF_ADDRESSING;
++
++/* Transparency modes. */
++typedef enum _gce2D_TRANSPARENCY
++{
++ /* Valid only for PE 2.0 */
++ gcv2D_OPAQUE = 0,
++ gcv2D_KEYED,
++ gcv2D_MASKED
++}
++gce2D_TRANSPARENCY;
++
++/* Mono packing modes. */
++typedef enum _gceSURF_MONOPACK
++{
++ gcvSURF_PACKED8 = 0,
++ gcvSURF_PACKED16,
++ gcvSURF_PACKED32,
++ gcvSURF_UNPACKED,
++}
++gceSURF_MONOPACK;
++
++/* Blending modes. */
++typedef enum _gceSURF_BLEND_MODE
++{
++ /* Porter-Duff blending modes. */
++ /* Fsrc Fdst */
++ gcvBLEND_CLEAR = 0, /* 0 0 */
++ gcvBLEND_SRC, /* 1 0 */
++ gcvBLEND_DST, /* 0 1 */
++ gcvBLEND_SRC_OVER_DST, /* 1 1 - Asrc */
++ gcvBLEND_DST_OVER_SRC, /* 1 - Adst 1 */
++ gcvBLEND_SRC_IN_DST, /* Adst 0 */
++ gcvBLEND_DST_IN_SRC, /* 0 Asrc */
++ gcvBLEND_SRC_OUT_DST, /* 1 - Adst 0 */
++ gcvBLEND_DST_OUT_SRC, /* 0 1 - Asrc */
++ gcvBLEND_SRC_ATOP_DST, /* Adst 1 - Asrc */
++ gcvBLEND_DST_ATOP_SRC, /* 1 - Adst Asrc */
++ gcvBLEND_SRC_XOR_DST, /* 1 - Adst 1 - Asrc */
++
++ /* Special blending modes. */
++ gcvBLEND_SET, /* DST = 1 */
++ gcvBLEND_SUB /* DST = DST * (1 - SRC) */
++}
++gceSURF_BLEND_MODE;
++
++/* Per-pixel alpha modes. */
++typedef enum _gceSURF_PIXEL_ALPHA_MODE
++{
++ gcvSURF_PIXEL_ALPHA_STRAIGHT = 0,
++ gcvSURF_PIXEL_ALPHA_INVERSED
++}
++gceSURF_PIXEL_ALPHA_MODE;
++
++/* Global alpha modes. */
++typedef enum _gceSURF_GLOBAL_ALPHA_MODE
++{
++ gcvSURF_GLOBAL_ALPHA_OFF = 0,
++ gcvSURF_GLOBAL_ALPHA_ON,
++ gcvSURF_GLOBAL_ALPHA_SCALE
++}
++gceSURF_GLOBAL_ALPHA_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gceSURF_PIXEL_COLOR_MODE
++{
++ gcvSURF_COLOR_STRAIGHT = 0,
++ gcvSURF_COLOR_MULTIPLY
++}
++gceSURF_PIXEL_COLOR_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gce2D_PIXEL_COLOR_MULTIPLY_MODE
++{
++ gcv2D_COLOR_MULTIPLY_DISABLE = 0,
++ gcv2D_COLOR_MULTIPLY_ENABLE
++}
++gce2D_PIXEL_COLOR_MULTIPLY_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gce2D_GLOBAL_COLOR_MULTIPLY_MODE
++{
++ gcv2D_GLOBAL_COLOR_MULTIPLY_DISABLE = 0,
++ gcv2D_GLOBAL_COLOR_MULTIPLY_ALPHA,
++ gcv2D_GLOBAL_COLOR_MULTIPLY_COLOR
++}
++gce2D_GLOBAL_COLOR_MULTIPLY_MODE;
++
++/* Alpha blending factor modes. */
++typedef enum _gceSURF_BLEND_FACTOR_MODE
++{
++ gcvSURF_BLEND_ZERO = 0,
++ gcvSURF_BLEND_ONE,
++ gcvSURF_BLEND_STRAIGHT,
++ gcvSURF_BLEND_INVERSED,
++ gcvSURF_BLEND_COLOR,
++ gcvSURF_BLEND_COLOR_INVERSED,
++ gcvSURF_BLEND_SRC_ALPHA_SATURATED,
++ gcvSURF_BLEND_STRAIGHT_NO_CROSS,
++ gcvSURF_BLEND_INVERSED_NO_CROSS,
++ gcvSURF_BLEND_COLOR_NO_CROSS,
++ gcvSURF_BLEND_COLOR_INVERSED_NO_CROSS,
++ gcvSURF_BLEND_SRC_ALPHA_SATURATED_CROSS
++}
++gceSURF_BLEND_FACTOR_MODE;
++
++/* Alpha blending porter duff rules. */
++typedef enum _gce2D_PORTER_DUFF_RULE
++{
++ gcvPD_CLEAR = 0,
++ gcvPD_SRC,
++ gcvPD_SRC_OVER,
++ gcvPD_DST_OVER,
++ gcvPD_SRC_IN,
++ gcvPD_DST_IN,
++ gcvPD_SRC_OUT,
++ gcvPD_DST_OUT,
++ gcvPD_SRC_ATOP,
++ gcvPD_DST_ATOP,
++ gcvPD_ADD,
++ gcvPD_XOR,
++ gcvPD_DST
++}
++gce2D_PORTER_DUFF_RULE;
++
++/* Alpha blending factor modes. */
++typedef enum _gce2D_YUV_COLOR_MODE
++{
++ gcv2D_YUV_601= 0,
++ gcv2D_YUV_709,
++ gcv2D_YUV_USER_DEFINED,
++ gcv2D_YUV_USER_DEFINED_CLAMP,
++
++ /* Default setting is for src. gcv2D_YUV_DST
++ can be ORed to set dst.
++ */
++ gcv2D_YUV_DST = 0x80000000,
++}
++gce2D_YUV_COLOR_MODE;
++
++typedef enum _gce2D_COMMAND
++{
++ gcv2D_CLEAR = 0,
++ gcv2D_LINE,
++ gcv2D_BLT,
++ gcv2D_STRETCH,
++ gcv2D_HOR_FILTER,
++ gcv2D_VER_FILTER,
++ gcv2D_MULTI_SOURCE_BLT,
++ gcv2D_FILTER_BLT,
++}
++gce2D_COMMAND;
++
++typedef enum _gce2D_TILE_STATUS_CONFIG
++{
++ gcv2D_TSC_DISABLE = 0,
++ gcv2D_TSC_ENABLE = 0x00000001,
++ gcv2D_TSC_COMPRESSED = 0x00000002,
++ gcv2D_TSC_DOWN_SAMPLER = 0x00000004,
++ gcv2D_TSC_2D_COMPRESSED = 0x00000008,
++ gcv2D_TSC_TPC_COMPRESSED = 0x00000010,
++}
++gce2D_TILE_STATUS_CONFIG;
++
++typedef enum _gce2D_QUERY
++{
++ gcv2D_QUERY_RGB_ADDRESS_MIN_ALIGN = 0,
++ gcv2D_QUERY_RGB_STRIDE_MIN_ALIGN,
++ gcv2D_QUERY_YUV_ADDRESS_MIN_ALIGN,
++ gcv2D_QUERY_YUV_STRIDE_MIN_ALIGN,
++}
++gce2D_QUERY;
++
++typedef enum _gce2D_SUPER_TILE_VERSION
++{
++ gcv2D_SUPER_TILE_VERSION_V1 = 1,
++ gcv2D_SUPER_TILE_VERSION_V2 = 2,
++ gcv2D_SUPER_TILE_VERSION_V3 = 3,
++}
++gce2D_SUPER_TILE_VERSION;
++
++typedef enum _gce2D_STATE
++{
++ gcv2D_STATE_SPECIAL_FILTER_MIRROR_MODE = 1,
++ gcv2D_STATE_SUPER_TILE_VERSION,
++ gcv2D_STATE_EN_GAMMA,
++ gcv2D_STATE_DE_GAMMA,
++ gcv2D_STATE_MULTI_SRC_BLIT_UNIFIED_DST_RECT,
++ gcv2D_STATE_PROFILE_ENABLE,
++ gcv2D_STATE_XRGB_ENABLE,
++
++ gcv2D_STATE_ARRAY_EN_GAMMA = 0x10001,
++ gcv2D_STATE_ARRAY_DE_GAMMA,
++ gcv2D_STATE_ARRAY_CSC_YUV_TO_RGB,
++ gcv2D_STATE_ARRAY_CSC_RGB_TO_YUV,
++}
++gce2D_STATE;
++
++typedef enum _gce2D_STATE_PROFILE
++{
++ gcv2D_STATE_PROFILE_NONE = 0x0,
++ gcv2D_STATE_PROFILE_COMMAND = 0x1,
++ gcv2D_STATE_PROFILE_SURFACE = 0x2,
++ gcv2D_STATE_PROFILE_ALL = 0xFFFF,
++}
++gce2D_STATE_PROFILE;
++
++/* Texture object types */
++typedef enum _gceTEXTURE_TYPE
++{
++ gcvTEXTURE_UNKNOWN = 0,
++ gcvTEXTURE_1D,
++ gcvTEXTURE_2D,
++ gcvTEXTURE_3D,
++ gcvTEXTURE_CUBEMAP,
++ gcvTEXTURE_1D_ARRAY,
++ gcvTEXTURE_2D_ARRAY,
++ gcvTEXTURE_EXTERNAL
++}
++gceTEXTURE_TYPE;
++
++#if gcdENABLE_3D
++/* Texture functions. */
++typedef enum _gceTEXTURE_FUNCTION
++{
++ gcvTEXTURE_DUMMY = 0,
++ gcvTEXTURE_REPLACE = 0,
++ gcvTEXTURE_MODULATE,
++ gcvTEXTURE_ADD,
++ gcvTEXTURE_ADD_SIGNED,
++ gcvTEXTURE_INTERPOLATE,
++ gcvTEXTURE_SUBTRACT,
++ gcvTEXTURE_DOT3
++}
++gceTEXTURE_FUNCTION;
++
++/* Texture sources. */
++typedef enum _gceTEXTURE_SOURCE
++{
++ gcvCOLOR_FROM_TEXTURE = 0,
++ gcvCOLOR_FROM_CONSTANT_COLOR,
++ gcvCOLOR_FROM_PRIMARY_COLOR,
++ gcvCOLOR_FROM_PREVIOUS_COLOR
++}
++gceTEXTURE_SOURCE;
++
++/* Texture source channels. */
++typedef enum _gceTEXTURE_CHANNEL
++{
++ gcvFROM_COLOR = 0,
++ gcvFROM_ONE_MINUS_COLOR,
++ gcvFROM_ALPHA,
++ gcvFROM_ONE_MINUS_ALPHA
++}
++gceTEXTURE_CHANNEL;
++#endif /* gcdENABLE_3D */
++
++/* Filter types. */
++typedef enum _gceFILTER_TYPE
++{
++ gcvFILTER_SYNC = 0,
++ gcvFILTER_BLUR,
++ gcvFILTER_USER
++}
++gceFILTER_TYPE;
++
++/* Filter pass types. */
++typedef enum _gceFILTER_PASS_TYPE
++{
++ gcvFILTER_HOR_PASS = 0,
++ gcvFILTER_VER_PASS
++}
++gceFILTER_PASS_TYPE;
++
++/* Endian hints. */
++typedef enum _gceENDIAN_HINT
++{
++ gcvENDIAN_NO_SWAP = 0,
++ gcvENDIAN_SWAP_WORD,
++ gcvENDIAN_SWAP_DWORD
++}
++gceENDIAN_HINT;
++
++/* Tiling modes. */
++typedef enum _gceTILING
++{
++ gcvINVALIDTILED = 0x0, /* Invalid tiling */
++ /* Tiling basic modes enum'ed in power of 2. */
++ gcvLINEAR = 0x1, /* No tiling. */
++ gcvTILED = 0x2, /* 4x4 tiling. */
++ gcvSUPERTILED = 0x4, /* 64x64 tiling. */
++ gcvMINORTILED = 0x8, /* 2x2 tiling. */
++
++ /* Tiling special layouts. */
++ gcvTILING_SPLIT_BUFFER = 0x100,
++
++ /* Tiling combination layouts. */
++ gcvMULTI_TILED = gcvTILED
++ | gcvTILING_SPLIT_BUFFER,
++
++ gcvMULTI_SUPERTILED = gcvSUPERTILED
++ | gcvTILING_SPLIT_BUFFER,
++}
++gceTILING;
++
++/* 2D pattern type. */
++typedef enum _gce2D_PATTERN
++{
++ gcv2D_PATTERN_SOLID = 0,
++ gcv2D_PATTERN_MONO,
++ gcv2D_PATTERN_COLOR,
++ gcv2D_PATTERN_INVALID
++}
++gce2D_PATTERN;
++
++/* 2D source type. */
++typedef enum _gce2D_SOURCE
++{
++ gcv2D_SOURCE_MASKED = 0,
++ gcv2D_SOURCE_MONO,
++ gcv2D_SOURCE_COLOR,
++ gcv2D_SOURCE_INVALID
++}
++gce2D_SOURCE;
++
++/* Pipes. */
++typedef enum _gcePIPE_SELECT
++{
++ gcvPIPE_INVALID = ~0,
++ gcvPIPE_3D = 0,
++ gcvPIPE_2D
++}
++gcePIPE_SELECT;
++
++/* Hardware type. */
++typedef enum _gceHARDWARE_TYPE
++{
++ gcvHARDWARE_INVALID = 0x00,
++ gcvHARDWARE_3D = 0x01,
++ gcvHARDWARE_2D = 0x02,
++ gcvHARDWARE_VG = 0x04,
++#if gcdMULTI_GPU_AFFINITY
++ gcvHARDWARE_OCL = 0x05,
++#endif
++ gcvHARDWARE_3D2D = gcvHARDWARE_3D | gcvHARDWARE_2D
++}
++gceHARDWARE_TYPE;
++
++#define gcdCHIP_COUNT 3
++
++typedef enum _gceMMU_MODE
++{
++ gcvMMU_MODE_1K,
++ gcvMMU_MODE_4K,
++} gceMMU_MODE;
++
++/* User signal command codes. */
++typedef enum _gceUSER_SIGNAL_COMMAND_CODES
++{
++ gcvUSER_SIGNAL_CREATE,
++ gcvUSER_SIGNAL_DESTROY,
++ gcvUSER_SIGNAL_SIGNAL,
++ gcvUSER_SIGNAL_WAIT,
++ gcvUSER_SIGNAL_MAP,
++ gcvUSER_SIGNAL_UNMAP,
++}
++gceUSER_SIGNAL_COMMAND_CODES;
++
++/* Sync point command codes. */
++typedef enum _gceSYNC_POINT_COMMAND_CODES
++{
++ gcvSYNC_POINT_CREATE,
++ gcvSYNC_POINT_DESTROY,
++ gcvSYNC_POINT_SIGNAL,
++}
++gceSYNC_POINT_COMMAND_CODES;
++
++/* Shared buffer command codes. */
++typedef enum _gceSHBUF_COMMAND_CODES
++{
++ gcvSHBUF_CREATE,
++ gcvSHBUF_DESTROY,
++ gcvSHBUF_MAP,
++ gcvSHBUF_WRITE,
++ gcvSHBUF_READ,
++}
++gceSHBUF_COMMAND_CODES;
++
++/* Event locations. */
++typedef enum _gceKERNEL_WHERE
++{
++ gcvKERNEL_COMMAND,
++ gcvKERNEL_VERTEX,
++ gcvKERNEL_TRIANGLE,
++ gcvKERNEL_TEXTURE,
++ gcvKERNEL_PIXEL,
++}
++gceKERNEL_WHERE;
++
++#if gcdENABLE_VG
++/* Hardware blocks. */
++typedef enum _gceBLOCK
++{
++ gcvBLOCK_COMMAND,
++ gcvBLOCK_TESSELLATOR,
++ gcvBLOCK_TESSELLATOR2,
++ gcvBLOCK_TESSELLATOR3,
++ gcvBLOCK_RASTER,
++ gcvBLOCK_VG,
++ gcvBLOCK_VG2,
++ gcvBLOCK_VG3,
++ gcvBLOCK_PIXEL,
++
++ /* Number of defined blocks. */
++ gcvBLOCK_COUNT
++}
++gceBLOCK;
++#endif
++
++/* gcdDUMP message type. */
++typedef enum _gceDEBUG_MESSAGE_TYPE
++{
++ gcvMESSAGE_TEXT,
++ gcvMESSAGE_DUMP
++}
++gceDEBUG_MESSAGE_TYPE;
++
++/* Shading format. */
++typedef enum _gceSHADING
++{
++ gcvSHADING_SMOOTH,
++ gcvSHADING_FLAT_D3D,
++ gcvSHADING_FLAT_OPENGL,
++}
++gceSHADING;
++
++/* Culling modes. */
++typedef enum _gceCULL
++{
++ gcvCULL_NONE,
++ gcvCULL_CCW,
++ gcvCULL_CW,
++}
++gceCULL;
++
++/* Fill modes. */
++typedef enum _gceFILL
++{
++ gcvFILL_POINT,
++ gcvFILL_WIRE_FRAME,
++ gcvFILL_SOLID,
++}
++gceFILL;
++
++/* Compare modes. */
++typedef enum _gceCOMPARE
++{
++ gcvCOMPARE_INVALID = 0,
++ gcvCOMPARE_NEVER,
++ gcvCOMPARE_NOT_EQUAL,
++ gcvCOMPARE_LESS,
++ gcvCOMPARE_LESS_OR_EQUAL,
++ gcvCOMPARE_EQUAL,
++ gcvCOMPARE_GREATER,
++ gcvCOMPARE_GREATER_OR_EQUAL,
++ gcvCOMPARE_ALWAYS,
++}
++gceCOMPARE;
++
++/* Stencil modes. */
++typedef enum _gceSTENCIL_MODE
++{
++ gcvSTENCIL_NONE,
++ gcvSTENCIL_SINGLE_SIDED,
++ gcvSTENCIL_DOUBLE_SIDED,
++}
++gceSTENCIL_MODE;
++
++/* Stencil operations. */
++typedef enum _gceSTENCIL_OPERATION
++{
++ gcvSTENCIL_KEEP,
++ gcvSTENCIL_REPLACE,
++ gcvSTENCIL_ZERO,
++ gcvSTENCIL_INVERT,
++ gcvSTENCIL_INCREMENT,
++ gcvSTENCIL_DECREMENT,
++ gcvSTENCIL_INCREMENT_SATURATE,
++ gcvSTENCIL_DECREMENT_SATURATE,
++ gcvSTENCIL_OPERATION_INVALID = -1
++}
++gceSTENCIL_OPERATION;
++
++/* Stencil selection. */
++typedef enum _gceSTENCIL_WHERE
++{
++ gcvSTENCIL_FRONT,
++ gcvSTENCIL_BACK,
++}
++gceSTENCIL_WHERE;
++
++/* Texture addressing selection. */
++typedef enum _gceTEXTURE_WHICH
++{
++ gcvTEXTURE_S,
++ gcvTEXTURE_T,
++ gcvTEXTURE_R,
++}
++gceTEXTURE_WHICH;
++
++/* Texture addressing modes. */
++typedef enum _gceTEXTURE_ADDRESSING
++{
++ gcvTEXTURE_INVALID = 0,
++ gcvTEXTURE_CLAMP,
++ gcvTEXTURE_WRAP,
++ gcvTEXTURE_MIRROR,
++ gcvTEXTURE_BORDER,
++ gcvTEXTURE_MIRROR_ONCE,
++}
++gceTEXTURE_ADDRESSING;
++
++/* Texture filters. */
++typedef enum _gceTEXTURE_FILTER
++{
++ gcvTEXTURE_NONE,
++ gcvTEXTURE_POINT,
++ gcvTEXTURE_LINEAR,
++ gcvTEXTURE_ANISOTROPIC,
++}
++gceTEXTURE_FILTER;
++
++typedef enum _gceTEXTURE_COMPONENT
++{
++ gcvTEXTURE_COMPONENT_R,
++ gcvTEXTURE_COMPONENT_G,
++ gcvTEXTURE_COMPONENT_B,
++ gcvTEXTURE_COMPONENT_A,
++
++ gcvTEXTURE_COMPONENT_NUM,
++} gceTEXTURE_COMPONENT;
++
++/* Texture swizzle modes. */
++typedef enum _gceTEXTURE_SWIZZLE
++{
++ gcvTEXTURE_SWIZZLE_R = 0,
++ gcvTEXTURE_SWIZZLE_G,
++ gcvTEXTURE_SWIZZLE_B,
++ gcvTEXTURE_SWIZZLE_A,
++ gcvTEXTURE_SWIZZLE_0,
++ gcvTEXTURE_SWIZZLE_1,
++
++ gcvTEXTURE_SWIZZLE_INVALID,
++} gceTEXTURE_SWIZZLE;
++
++typedef enum _gceTEXTURE_COMPARE_MODE
++{
++ gcvTEXTURE_COMPARE_MODE_INVALID = 0,
++ gcvTEXTURE_COMPARE_MODE_NONE,
++ gcvTEXTURE_COMPARE_MODE_REF,
++} gceTEXTURE_COMPARE_MODE;
++
++/* Pixel output swizzle modes. */
++typedef enum _gcePIXEL_SWIZZLE
++{
++ gcvPIXEL_SWIZZLE_R = gcvTEXTURE_SWIZZLE_R,
++ gcvPIXEL_SWIZZLE_G = gcvTEXTURE_SWIZZLE_G,
++ gcvPIXEL_SWIZZLE_B = gcvTEXTURE_SWIZZLE_B,
++ gcvPIXEL_SWIZZLE_A = gcvTEXTURE_SWIZZLE_A,
++
++ gcvPIXEL_SWIZZLE_INVALID,
++} gcePIXEL_SWIZZLE;
++
++/* Primitive types. */
++typedef enum _gcePRIMITIVE
++{
++ gcvPRIMITIVE_POINT_LIST,
++ gcvPRIMITIVE_LINE_LIST,
++ gcvPRIMITIVE_LINE_STRIP,
++ gcvPRIMITIVE_LINE_LOOP,
++ gcvPRIMITIVE_TRIANGLE_LIST,
++ gcvPRIMITIVE_TRIANGLE_STRIP,
++ gcvPRIMITIVE_TRIANGLE_FAN,
++ gcvPRIMITIVE_RECTANGLE,
++}
++gcePRIMITIVE;
++
++/* Index types. */
++typedef enum _gceINDEX_TYPE
++{
++ gcvINDEX_8,
++ gcvINDEX_16,
++ gcvINDEX_32,
++}
++gceINDEX_TYPE;
++
++/* Multi GPU rendering modes. */
++typedef enum _gceMULTI_GPU_RENDERING_MODE
++{
++ gcvMULTI_GPU_RENDERING_MODE_OFF,
++ gcvMULTI_GPU_RENDERING_MODE_SPLIT_WIDTH,
++ gcvMULTI_GPU_RENDERING_MODE_SPLIT_HEIGHT,
++ gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED_64x64,
++ gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED_128x64,
++ gcvMULTI_GPU_RENDERING_MODE_INTERLEAVED_128x128
++}
++gceMULTI_GPU_RENDERING_MODE;
++
++typedef enum _gceCORE_3D_MASK
++{
++ gcvCORE_3D_0_MASK = (1 << 0),
++ gcvCORE_3D_1_MASK = (1 << 1),
++
++ gcvCORE_3D_ALL_MASK = (0xFFFF)
++}
++gceCORE_3D_MASK;
++
++typedef enum _gceCORE_3D_ID
++{
++ gcvCORE_3D_0_ID = 0,
++ gcvCORE_3D_1_ID = 1,
++
++ gcvCORE_3D_ID_INVALID = ~0UL
++}
++gceCORE_3D_ID;
++
++typedef enum _gceMULTI_GPU_MODE
++{
++ gcvMULTI_GPU_MODE_COMBINED = 0,
++ gcvMULTI_GPU_MODE_INDEPENDENT = 1
++}
++gceMULTI_GPU_MODE;
++
++typedef enum _gceMACHINECODE
++{
++ gcvMACHINECODE_ANTUTU0 = 0x0,
++
++ gcvMACHINECODE_GLB27_RELEASE_0,
++
++ gcvMACHINECODE_GLB25_RELEASE_0,
++ gcvMACHINECODE_GLB25_RELEASE_1,
++ gcvMACHINECODE_GLB25_RELEASE_2,
++
++ /* keep it as the last enum */
++ gcvMACHINECODE_COUNT
++}
++gceMACHINECODE;
++
++typedef enum _gceUNIFORMCVT
++{
++ gcvUNIFORMCVT_NONE = 0,
++ gcvUNIFORMCVT_TO_BOOL,
++ gcvUNIFORMCVT_TO_FLOAT,
++} gceUNIFORMCVT;
++
++typedef enum _gceHAL_ARG_VERSION
++{
++ gcvHAL_ARG_VERSION_V1 = 0x0,
++}
++gceHAL_ARG_VERSION;
++
++
++/*
++* Bit of a requirment is 1 means requirement is a must, 0 means requirement can
++* be ignored.
++*/
++#define gcvALLOC_FLAG_CONTIGUOUS_BIT 0
++#define gcvALLOC_FLAG_CACHEABLE_BIT 1
++#define gcvALLOC_FLAG_SECURITY_BIT 2
++#define gcvALLOC_FLAG_NON_CONTIGUOUS_BIT 3
++#define gcvALLOC_FLAG_MEMLIMIT_BIT 4
++
++/* No special needs. */
++#define gcvALLOC_FLAG_NONE (0)
++/* Physical contiguous. */
++#define gcvALLOC_FLAG_CONTIGUOUS (1 << gcvALLOC_FLAG_CONTIGUOUS_BIT)
++/* Can be remapped as cacheable. */
++#define gcvALLOC_FLAG_CACHEABLE (1 << gcvALLOC_FLAG_CACHEABLE_BIT)
++/* Secure buffer. */
++#define gcvALLOC_FLAG_SECURITY (1 << gcvALLOC_FLAG_SECURITY_BIT)
++/* Physical non contiguous. */
++#define gcvALLOC_FLAG_NON_CONTIGUOUS (1 << gcvALLOC_FLAG_NON_CONTIGUOUS_BIT)
++#define gcvALLOC_FLAG_MEMLIMIT (1 << gcvALLOC_FLAG_MEMLIMIT_BIT)
++
++/* GL_VIV internal usage */
++#ifndef GL_MAP_BUFFER_OBJ_VIV
++#define GL_MAP_BUFFER_OBJ_VIV 0x10000
++#endif
++
++/* Command buffer usage. */
++#define gcvCOMMAND_2D (1 << 0)
++#define gcvCOMMAND_3D (1 << 1)
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gckCONTEXT * gckCONTEXT;
++typedef struct _gcoCMDBUF * gcoCMDBUF;
++
++typedef struct _gcsSTATE_DELTA * gcsSTATE_DELTA_PTR;
++typedef struct _gcsQUEUE * gcsQUEUE_PTR;
++typedef struct _gcoQUEUE * gcoQUEUE;
++typedef struct _gcsHAL_INTERFACE * gcsHAL_INTERFACE_PTR;
++typedef struct _gcs2D_PROFILE * gcs2D_PROFILE_PTR;
++
++#if gcdENABLE_VG
++typedef struct _gcoVGHARDWARE * gcoVGHARDWARE;
++typedef struct _gcoVGBUFFER * gcoVGBUFFER;
++typedef struct _gckVGHARDWARE * gckVGHARDWARE;
++typedef struct _gcsVGCONTEXT * gcsVGCONTEXT_PTR;
++typedef struct _gcsVGCONTEXT_MAP * gcsVGCONTEXT_MAP_PTR;
++typedef struct _gcsVGCMDQUEUE * gcsVGCMDQUEUE_PTR;
++typedef struct _gcsTASK_MASTER_TABLE * gcsTASK_MASTER_TABLE_PTR;
++typedef struct _gckVGKERNEL * gckVGKERNEL;
++typedef void * gctTHREAD;
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_enum_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal.h 2015-05-01 14:57:59.591427001 -0500
+@@ -0,0 +1,2859 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_h_
++#define __gc_hal_h_
++
++#include "gc_hal_rename.h"
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++#include "gc_hal_base.h"
++#include "gc_hal_profiler.h"
++#include "gc_hal_driver.h"
++#if gcdENABLE_3D
++#include "gc_hal_statistics.h"
++#endif
++
++#if gcdSECURITY
++#include "gc_hal_security_interface.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* Alignment Macros *******************************
++\******************************************************************************/
++
++/* Alignment with a non-power of two value. */
++#define gcmALIGN_NP2(n, align) \
++( \
++ ((n) + (align) - 1) - (((n) + (align) - 1) % (align)) \
++)
++
++/* Alignment with a power of two value. */
++#define gcmALIGN(n, align) \
++( \
++ ((n) + ((align) - 1)) & ~((align) - 1) \
++)
++
++#define gcmALIGN_BASE(n, align) \
++( \
++ ((n) & ~((align) - 1)) \
++)
++
++/******************************************************************************\
++***************************** Element Count Macro *****************************
++\******************************************************************************/
++
++#define gcmSIZEOF(a) \
++( \
++ (gctSIZE_T) (sizeof(a)) \
++)
++
++#define gcmCOUNTOF(a) \
++( \
++ sizeof(a) / sizeof(a[0]) \
++)
++
++/******************************************************************************\
++********************************* Cast Macro **********************************
++\******************************************************************************/
++#define gcmNAME_TO_PTR(na) \
++ gckKERNEL_QueryPointerFromName(kernel, gcmALL_TO_UINT32(na))
++
++#define gcmPTR_TO_NAME(ptr) \
++ gckKERNEL_AllocateNameFromPointer(kernel, ptr)
++
++#define gcmRELEASE_NAME(na) \
++ gckKERNEL_DeleteName(kernel, gcmALL_TO_UINT32(na))
++
++#define gcmALL_TO_UINT32(t) \
++( \
++ (gctUINT32) (gctUINTPTR_T) (t)\
++)
++
++#define gcmPTR_TO_UINT64(p) \
++( \
++ (gctUINT64) (gctUINTPTR_T) (p)\
++)
++
++#define gcmUINT64_TO_PTR(u) \
++( \
++ (gctPOINTER) (gctUINTPTR_T) (u)\
++)
++
++#define gcmUINT64_TO_TYPE(u, t) \
++( \
++ (t) (gctUINTPTR_T) (u)\
++)
++
++/******************************************************************************\
++******************************** Useful Macro *********************************
++\******************************************************************************/
++
++#define gcvINVALID_ADDRESS ~0U
++
++#define gcmGET_PRE_ROTATION(rotate) \
++ ((rotate) & (~(gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y)))
++
++#define gcmGET_POST_ROTATION(rotate) \
++ ((rotate) & (gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y))
++
++/******************************************************************************\
++******************************** gcsOBJECT Object *******************************
++\******************************************************************************/
++
++/* Type of objects. */
++typedef enum _gceOBJECT_TYPE
++{
++ gcvOBJ_UNKNOWN = 0,
++ gcvOBJ_2D = gcmCC('2','D',' ',' '),
++ gcvOBJ_3D = gcmCC('3','D',' ',' '),
++ gcvOBJ_ATTRIBUTE = gcmCC('A','T','T','R'),
++ gcvOBJ_BRUSHCACHE = gcmCC('B','R','U','$'),
++ gcvOBJ_BRUSHNODE = gcmCC('B','R','U','n'),
++ gcvOBJ_BRUSH = gcmCC('B','R','U','o'),
++ gcvOBJ_BUFFER = gcmCC('B','U','F','R'),
++ gcvOBJ_COMMAND = gcmCC('C','M','D',' '),
++ gcvOBJ_COMMANDBUFFER = gcmCC('C','M','D','B'),
++ gcvOBJ_CONTEXT = gcmCC('C','T','X','T'),
++ gcvOBJ_DEVICE = gcmCC('D','E','V',' '),
++ gcvOBJ_DUMP = gcmCC('D','U','M','P'),
++ gcvOBJ_EVENT = gcmCC('E','V','N','T'),
++ gcvOBJ_FUNCTION = gcmCC('F','U','N','C'),
++ gcvOBJ_HAL = gcmCC('H','A','L',' '),
++ gcvOBJ_HARDWARE = gcmCC('H','A','R','D'),
++ gcvOBJ_HEAP = gcmCC('H','E','A','P'),
++ gcvOBJ_INDEX = gcmCC('I','N','D','X'),
++ gcvOBJ_INTERRUPT = gcmCC('I','N','T','R'),
++ gcvOBJ_KERNEL = gcmCC('K','E','R','N'),
++ gcvOBJ_KERNEL_FUNCTION = gcmCC('K','F','C','N'),
++ gcvOBJ_MEMORYBUFFER = gcmCC('M','E','M','B'),
++ gcvOBJ_MMU = gcmCC('M','M','U',' '),
++ gcvOBJ_OS = gcmCC('O','S',' ',' '),
++ gcvOBJ_OUTPUT = gcmCC('O','U','T','P'),
++ gcvOBJ_PAINT = gcmCC('P','N','T',' '),
++ gcvOBJ_PATH = gcmCC('P','A','T','H'),
++ gcvOBJ_QUEUE = gcmCC('Q','U','E',' '),
++ gcvOBJ_SAMPLER = gcmCC('S','A','M','P'),
++ gcvOBJ_SHADER = gcmCC('S','H','D','R'),
++ gcvOBJ_STREAM = gcmCC('S','T','R','M'),
++ gcvOBJ_SURF = gcmCC('S','U','R','F'),
++ gcvOBJ_TEXTURE = gcmCC('T','X','T','R'),
++ gcvOBJ_UNIFORM = gcmCC('U','N','I','F'),
++ gcvOBJ_VARIABLE = gcmCC('V','A','R','I'),
++ gcvOBJ_VERTEX = gcmCC('V','R','T','X'),
++ gcvOBJ_VIDMEM = gcmCC('V','M','E','M'),
++ gcvOBJ_VG = gcmCC('V','G',' ',' '),
++ gcvOBJ_BUFOBJ = gcmCC('B','U','F','O'),
++ gcvOBJ_UNIFORM_BLOCK = gcmCC('U','B','L','K'),
++ gcvOBJ_CL = gcmCC('C','L',' ',' '),
++}
++gceOBJECT_TYPE;
++
++/* gcsOBJECT object defintinon. */
++typedef struct _gcsOBJECT
++{
++ /* Type of an object. */
++ gceOBJECT_TYPE type;
++}
++gcsOBJECT;
++
++typedef struct _gckHARDWARE * gckHARDWARE;
++
++/* CORE flags. */
++typedef enum _gceCORE
++{
++ gcvCORE_MAJOR = 0x0,
++ gcvCORE_2D = 0x1,
++ gcvCORE_VG = 0x2,
++#if gcdMULTI_GPU_AFFINITY
++ gcvCORE_OCL = 0x3,
++#endif
++}
++gceCORE;
++
++#if gcdMULTI_GPU_AFFINITY
++#define gcdMAX_GPU_COUNT 4
++#else
++#define gcdMAX_GPU_COUNT 3
++#endif
++
++#define gcdMAX_SURF_LAYER 4
++
++#define gcdMAX_DRAW_BUFFERS 4
++
++/*******************************************************************************
++**
++** gcmVERIFY_OBJECT
++**
++** Assert if an object is invalid or is not of the specified type. If the
++** object is invalid or not of the specified type, gcvSTATUS_INVALID_OBJECT
++** will be returned from the current function. In retail mode this macro
++** does nothing.
++**
++** ARGUMENTS:
++**
++** obj Object to test.
++** t Expected type of the object.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++#define _gcmVERIFY_OBJECT(prefix, obj, t) \
++ if ((obj) == gcvNULL) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT failed: NULL"); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT((obj) != gcvNULL); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \
++ return gcvSTATUS_INVALID_OBJECT; \
++ } \
++ else if (((gcsOBJECT*) (obj))->type != t) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT failed: %c%c%c%c", \
++ gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \
++ return gcvSTATUS_INVALID_OBJECT; \
++ }
++
++# define gcmVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcm, obj, t)
++# define gcmkVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcmk, obj, t)
++#else
++# define gcmVERIFY_OBJECT(obj, t) do {} while (gcvFALSE)
++# define gcmkVERIFY_OBJECT(obj, t) do {} while (gcvFALSE)
++#endif
++
++/******************************************************************************/
++/*VERIFY_OBJECT if special return expected*/
++/******************************************************************************/
++#ifndef EGL_API_ANDROID
++# define _gcmVERIFY_OBJECT_RETURN(prefix, obj, t, retVal) \
++ do \
++ { \
++ if ((obj) == gcvNULL) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT_RETURN failed: NULL"); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT((obj) != gcvNULL); \
++ prefix##FOOTER_ARG("retVal=%d", retVal); \
++ return retVal; \
++ } \
++ else if (((gcsOBJECT*) (obj))->type != t) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT_RETURN failed: %c%c%c%c", \
++ gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \
++ prefix##FOOTER_ARG("retVal=%d", retVal); \
++ return retVal; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_OBJECT_RETURN(obj, t, retVal) \
++ _gcmVERIFY_OBJECT_RETURN(gcm, obj, t, retVal)
++# define gcmkVERIFY_OBJECT_RETURN(obj, t, retVal) \
++ _gcmVERIFY_OBJECT_RETURN(gcmk, obj, t, retVal)
++#else
++# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE)
++# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE)
++#endif
++
++/******************************************************************************\
++********************************** gckOS Object *********************************
++\******************************************************************************/
++
++/* Construct a new gckOS object. */
++gceSTATUS
++gckOS_Construct(
++ IN gctPOINTER Context,
++ OUT gckOS * Os
++ );
++
++/* Destroy an gckOS object. */
++gceSTATUS
++gckOS_Destroy(
++ IN gckOS Os
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gckOS_QueryVideoMemory(
++ IN gckOS Os,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Allocate memory from the heap. */
++gceSTATUS
++gckOS_Allocate(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Free allocated memory. */
++gceSTATUS
++gckOS_Free(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Wrapper for allocation memory.. */
++gceSTATUS
++gckOS_AllocateMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Wrapper for freeing memory. */
++gceSTATUS
++gckOS_FreeMemory(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate paged memory. */
++gceSTATUS
++gckOS_AllocatePagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ );
++
++/* Allocate paged memory. */
++gceSTATUS
++gckOS_AllocatePagedMemoryEx(
++ IN gckOS Os,
++ IN gctUINT32 Flag,
++ IN gctSIZE_T Bytes,
++ OUT gctUINT32 * Gid,
++ OUT gctPHYS_ADDR * Physical
++ );
++
++/* Lock pages. */
++gceSTATUS
++gckOS_LockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Cacheable,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ );
++
++/* Map pages. */
++gceSTATUS
++gckOS_MapPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ );
++
++/* Map pages. */
++gceSTATUS
++gckOS_MapPagesEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctUINT32 Address,
++ IN gctPOINTER PageTable
++ );
++
++gceSTATUS
++gckOS_UnmapPages(
++ IN gckOS Os,
++ IN gctSIZE_T PageCount,
++ IN gctUINT32 Address
++ );
++
++/* Unlock pages. */
++gceSTATUS
++gckOS_UnlockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Free paged memory. */
++gceSTATUS
++gckOS_FreePagedMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Allocate non-paged memory. */
++gceSTATUS
++gckOS_AllocateNonPagedMemory(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free non-paged memory. */
++gceSTATUS
++gckOS_FreeNonPagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++/* Allocate contiguous memory. */
++gceSTATUS
++gckOS_AllocateContiguous(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free contiguous memory. */
++gceSTATUS
++gckOS_FreeContiguous(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Get the number fo bytes per page. */
++gceSTATUS
++gckOS_GetPageSize(
++ IN gckOS Os,
++ OUT gctSIZE_T * PageSize
++ );
++
++/* Get the physical address of a corresponding logical address. */
++gceSTATUS
++gckOS_GetPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++/* Get the physical address of a corresponding user logical address. */
++gceSTATUS
++gckOS_UserLogicalToPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++/* Get the physical address of a corresponding logical address. */
++gceSTATUS
++gckOS_GetPhysicalAddressProcess(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32 * Address
++ );
++
++/* Map physical memory. */
++gceSTATUS
++gckOS_MapPhysical(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap previously mapped physical memory. */
++gceSTATUS
++gckOS_UnmapPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Get real physical address from descriptor. */
++gceSTATUS
++gckOS_PhysicalToPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Physical,
++ OUT gctUINT32 * PhysicalAddress
++ );
++
++/* Read data from a hardware register. */
++gceSTATUS
++gckOS_ReadRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++/* Read data from a hardware register. */
++gceSTATUS
++gckOS_ReadRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++/* Write data to a hardware register. */
++gceSTATUS
++gckOS_WriteRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Write data to a hardware register. */
++gceSTATUS
++gckOS_WriteRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckOS_ReadRegisterByCoreId(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 CoreId,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++gceSTATUS
++gckOS_WriteRegisterByCoreId(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 CoreId,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++#endif
++
++/* Write data to a 32-bit memory location. */
++gceSTATUS
++gckOS_WriteMemory(
++ IN gckOS Os,
++ IN gctPOINTER Address,
++ IN gctUINT32 Data
++ );
++
++/* Map physical memory into the process space. */
++gceSTATUS
++gckOS_MapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap physical memory from the specified process space. */
++gceSTATUS
++gckOS_UnmapMemoryEx(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical,
++ IN gctUINT32 PID
++ );
++
++/* Unmap physical memory from the process space. */
++gceSTATUS
++gckOS_UnmapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Unmap user logical memory out of physical memory.
++ * This function is only supported in Linux currently.
++ */
++gceSTATUS
++gckOS_UnmapUserLogical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Create a new mutex. */
++gceSTATUS
++gckOS_CreateMutex(
++ IN gckOS Os,
++ OUT gctPOINTER * Mutex
++ );
++
++/* Delete a mutex. */
++gceSTATUS
++gckOS_DeleteMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Acquire a mutex. */
++gceSTATUS
++gckOS_AcquireMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ );
++
++/* Release a mutex. */
++gceSTATUS
++gckOS_ReleaseMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Atomically exchange a pair of 32-bit values. */
++gceSTATUS
++gckOS_AtomicExchange(
++ IN gckOS Os,
++ IN OUT gctUINT32_PTR Target,
++ IN gctUINT32 NewValue,
++ OUT gctUINT32_PTR OldValue
++ );
++
++/* Atomically exchange a pair of pointers. */
++gceSTATUS
++gckOS_AtomicExchangePtr(
++ IN gckOS Os,
++ IN OUT gctPOINTER * Target,
++ IN gctPOINTER NewValue,
++ OUT gctPOINTER * OldValue
++ );
++
++gceSTATUS
++gckOS_AtomSetMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ );
++
++gceSTATUS
++gckOS_AtomClearMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ );
++
++gceSTATUS
++gckOS_DumpCallStack(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_GetProcessNameByPid(
++ IN gctINT Pid,
++ IN gctSIZE_T Length,
++ OUT gctUINT8_PTR String
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomConstruct
++**
++** Create an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Atom
++** Pointer to a variable receiving the constructed atom.
++*/
++gceSTATUS
++gckOS_AtomConstruct(
++ IN gckOS Os,
++ OUT gctPOINTER * Atom
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomDestroy
++**
++** Destroy an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomDestroy(
++ IN gckOS Os,
++ OUT gctPOINTER Atom
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomGet
++**
++** Get the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the value of the atom.
++*/
++gceSTATUS
++gckOS_AtomGet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomSet
++**
++** Set the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** gctINT32 Value
++** The value of the atom.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ IN gctINT32 Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomIncrement
++**
++** Atomically increment the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomIncrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomDecrement
++**
++** Atomically decrement the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomDecrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/* Delay a number of microseconds. */
++gceSTATUS
++gckOS_Delay(
++ IN gckOS Os,
++ IN gctUINT32 Delay
++ );
++
++/* Get time in milliseconds. */
++gceSTATUS
++gckOS_GetTicks(
++ OUT gctUINT32_PTR Time
++ );
++
++/* Compare time value. */
++gceSTATUS
++gckOS_TicksAfter(
++ IN gctUINT32 Time1,
++ IN gctUINT32 Time2,
++ OUT gctBOOL_PTR IsAfter
++ );
++
++/* Get time in microseconds. */
++gceSTATUS
++gckOS_GetTime(
++ OUT gctUINT64_PTR Time
++ );
++
++/* Memory barrier. */
++gceSTATUS
++gckOS_MemoryBarrier(
++ IN gckOS Os,
++ IN gctPOINTER Address
++ );
++
++/* Map user pointer. */
++gceSTATUS
++gckOS_MapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Unmap user pointer. */
++gceSTATUS
++gckOS_UnmapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ IN gctPOINTER KernelPointer
++ );
++
++/*******************************************************************************
++**
++** gckOS_QueryNeedCopy
++**
++** Query whether the memory can be accessed or mapped directly or it has to be
++** copied.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID of the current process.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR NeedCopy
++** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or
++** gcvFALSE if the memory can be accessed or mapped dircetly.
++*/
++gceSTATUS
++gckOS_QueryNeedCopy(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ OUT gctBOOL_PTR NeedCopy
++ );
++
++/*******************************************************************************
++**
++** gckOS_CopyFromUserData
++**
++** Copy data from user to kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyFromUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ );
++
++/*******************************************************************************
++**
++** gckOS_CopyToUserData
++**
++** Copy data from kernel to user memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyToUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ );
++
++gceSTATUS
++gckOS_SuspendInterrupt(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_SuspendInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_ResumeInterrupt(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_ResumeInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++/* Get the base address for the physical memory. */
++gceSTATUS
++gckOS_GetBaseAddress(
++ IN gckOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++/* Perform a memory copy. */
++gceSTATUS
++gckOS_MemCopy(
++ IN gctPOINTER Destination,
++ IN gctCONST_POINTER Source,
++ IN gctSIZE_T Bytes
++ );
++
++/* Zero memory. */
++gceSTATUS
++gckOS_ZeroMemory(
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Bytes
++ );
++
++/* Device I/O control to the kernel HAL layer. */
++gceSTATUS
++gckOS_DeviceControl(
++ IN gckOS Os,
++ IN gctBOOL FromUser,
++ IN gctUINT32 IoControlCode,
++ IN gctPOINTER InputBuffer,
++ IN gctSIZE_T InputBufferSize,
++ OUT gctPOINTER OutputBuffer,
++ IN gctSIZE_T OutputBufferSize
++ );
++
++/*******************************************************************************
++**
++** gckOS_GetProcessID
++**
++** Get current process ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ProcessID
++** Pointer to the variable that receives the process ID.
++*/
++gceSTATUS
++gckOS_GetProcessID(
++ OUT gctUINT32_PTR ProcessID
++ );
++
++gceSTATUS
++gckOS_GetCurrentProcessID(
++ OUT gctUINT32_PTR ProcessID
++ );
++
++/*******************************************************************************
++**
++** gckOS_GetThreadID
++**
++** Get current thread ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ThreadID
++** Pointer to the variable that receives the thread ID.
++*/
++gceSTATUS
++gckOS_GetThreadID(
++ OUT gctUINT32_PTR ThreadID
++ );
++
++#if gcdSECURITY
++gceSTATUS
++gckOS_OpenSecurityChannel(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gctUINT32 *Channel
++ );
++
++gceSTATUS
++gckOS_CloseSecurityChannel(
++ IN gctUINT32 Channel
++ );
++
++gceSTATUS
++gckOS_CallSecurityService(
++ IN gctUINT32 Channel,
++ IN gcsTA_INTERFACE * Interface
++ );
++
++gceSTATUS
++gckOS_InitSecurityChannel(
++ OUT gctUINT32 Channel
++ );
++
++gceSTATUS
++gckOS_AllocatePageArray(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageArrayLogical,
++ OUT gctPHYS_ADDR * PageArrayPhysical
++ );
++#endif
++
++/******************************************************************************\
++********************************** Signal Object *********************************
++\******************************************************************************/
++
++/* Create a signal. */
++gceSTATUS
++gckOS_CreateSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ );
++
++/* Destroy a signal. */
++gceSTATUS
++gckOS_DestroySignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Signal a signal. */
++gceSTATUS
++gckOS_Signal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ );
++
++/* Wait for a signal. */
++gceSTATUS
++gckOS_WaitSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ );
++
++/* Map a user signal to the kernel space. */
++gceSTATUS
++gckOS_MapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process,
++ OUT gctSIGNAL * MappedSignal
++ );
++
++/* Unmap a user signal */
++gceSTATUS
++gckOS_UnmapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Map user memory. */
++gceSTATUS
++gckOS_MapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gckOS_UnmapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ );
++
++/******************************************************************************\
++************************** Android Native Fence Sync ***************************
++\******************************************************************************/
++gceSTATUS
++gckOS_CreateSyncTimeline(
++ IN gckOS Os,
++ OUT gctHANDLE * Timeline
++ );
++
++gceSTATUS
++gckOS_DestroySyncTimeline(
++ IN gckOS Os,
++ IN gctHANDLE Timeline
++ );
++
++gceSTATUS
++gckOS_CreateSyncPoint(
++ IN gckOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ );
++
++gceSTATUS
++gckOS_ReferenceSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_DestroySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_SignalSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_QuerySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctBOOL_PTR State
++ );
++
++gceSTATUS
++gckOS_CreateNativeFence(
++ IN gckOS Os,
++ IN gctHANDLE Timeline,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ );
++
++#if !USE_NEW_LINUX_SIGNAL
++/* Create signal to be used in the user space. */
++gceSTATUS
++gckOS_CreateUserSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctINT * SignalID
++ );
++
++/* Destroy signal used in the user space. */
++gceSTATUS
++gckOS_DestroyUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID
++ );
++
++/* Wait for signal used in the user space. */
++gceSTATUS
++gckOS_WaitUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctUINT32 Wait
++ );
++
++/* Signal a signal used in the user space. */
++gceSTATUS
++gckOS_SignalUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctBOOL State
++ );
++#endif /* USE_NEW_LINUX_SIGNAL */
++
++/* Set a signal owned by a process. */
++#if defined(__QNXNTO__)
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctINT Recvid,
++ IN gctINT Coid
++ );
++#else
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process
++ );
++#endif
++
++/******************************************************************************\
++** Cache Support
++*/
++
++gceSTATUS
++gckOS_CacheClean(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctUINT32 Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gckOS_CacheFlush(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctUINT32 Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gckOS_CacheInvalidate(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctUINT32 Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gckOS_CPUPhysicalToGPUPhysical(
++ IN gckOS Os,
++ IN gctUINT32 CPUPhysical,
++ IN gctUINT32_PTR GPUPhysical
++ );
++
++gceSTATUS
++gckOS_GPUPhysicalToCPUPhysical(
++ IN gckOS Os,
++ IN gctUINT32 GPUPhysical,
++ IN gctUINT32_PTR CPUPhysical
++ );
++
++gceSTATUS
++gckOS_QueryOption(
++ IN gckOS Os,
++ IN gctCONST_STRING Option,
++ OUT gctUINT32 * Value
++ );
++
++/******************************************************************************\
++** Debug Support
++*/
++
++void
++gckOS_SetDebugLevel(
++ IN gctUINT32 Level
++ );
++
++void
++gckOS_SetDebugZone(
++ IN gctUINT32 Zone
++ );
++
++void
++gckOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ );
++
++void
++gckOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ );
++
++void
++gckOS_SetDebugFile(
++ IN gctCONST_STRING FileName
++ );
++
++/*******************************************************************************
++** Broadcast interface.
++*/
++
++typedef enum _gceBROADCAST
++{
++ /* GPU might be idle. */
++ gcvBROADCAST_GPU_IDLE,
++
++ /* A commit is going to happen. */
++ gcvBROADCAST_GPU_COMMIT,
++
++ /* GPU seems to be stuck. */
++ gcvBROADCAST_GPU_STUCK,
++
++ /* First process gets attached. */
++ gcvBROADCAST_FIRST_PROCESS,
++
++ /* Last process gets detached. */
++ gcvBROADCAST_LAST_PROCESS,
++
++ /* AXI bus error. */
++ gcvBROADCAST_AXI_BUS_ERROR,
++
++ /* Out of memory. */
++ gcvBROADCAST_OUT_OF_MEMORY,
++}
++gceBROADCAST;
++
++gceSTATUS
++gckOS_Broadcast(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gceBROADCAST Reason
++ );
++
++gceSTATUS
++gckOS_BroadcastHurry(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Urgency
++ );
++
++gceSTATUS
++gckOS_BroadcastCalibrateSpeed(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Idle,
++ IN gctUINT Time
++ );
++
++/*******************************************************************************
++**
++** gckOS_SetGPUPower
++**
++** Set the power of the GPU on or off.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gceCORE Core
++** GPU whose power is set.
++**
++** gctBOOL Clock
++** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock.
++**
++** gctBOOL Power
++** gcvTRUE to turn on the power, or gcvFALSE to turn off the power.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUPower(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctBOOL Clock,
++ IN gctBOOL Power
++ );
++
++gceSTATUS
++gckOS_ResetGPU(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_PrepareGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_FinishGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_QueryGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gctUINT32 * Frequency,
++ OUT gctUINT8 * Scale
++ );
++
++gceSTATUS
++gckOS_SetGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT8 Scale
++ );
++
++/*******************************************************************************
++** Semaphores.
++*/
++
++/* Create a new semaphore. */
++gceSTATUS
++gckOS_CreateSemaphore(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ );
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_CreateSemaphoreVG(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ );
++#endif
++
++/* Delete a semahore. */
++gceSTATUS
++gckOS_DestroySemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Acquire a semahore. */
++gceSTATUS
++gckOS_AcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Try to acquire a semahore. */
++gceSTATUS
++gckOS_TryAcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Release a semahore. */
++gceSTATUS
++gckOS_ReleaseSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/*******************************************************************************
++** Timer API.
++*/
++
++typedef void (*gctTIMERFUNCTION)(gctPOINTER);
++
++/* Create a timer. */
++gceSTATUS
++gckOS_CreateTimer(
++ IN gckOS Os,
++ IN gctTIMERFUNCTION Function,
++ IN gctPOINTER Data,
++ OUT gctPOINTER * Timer
++ );
++
++/* Destory a timer. */
++gceSTATUS
++gckOS_DestroyTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ );
++
++/* Start a timer. */
++gceSTATUS
++gckOS_StartTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer,
++ IN gctUINT32 Delay
++ );
++
++/* Stop a timer. */
++gceSTATUS
++gckOS_StopTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ );
++
++/******************************************************************************\
++********************************* gckHEAP Object ********************************
++\******************************************************************************/
++
++typedef struct _gckHEAP * gckHEAP;
++
++/* Construct a new gckHEAP object. */
++gceSTATUS
++gckHEAP_Construct(
++ IN gckOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gckHEAP * Heap
++ );
++
++/* Destroy an gckHEAP object. */
++gceSTATUS
++gckHEAP_Destroy(
++ IN gckHEAP Heap
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gckHEAP_Allocate(
++ IN gckHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Node
++ );
++
++/* Free memory. */
++gceSTATUS
++gckHEAP_Free(
++ IN gckHEAP Heap,
++ IN gctPOINTER Node
++ );
++
++/* Profile the heap. */
++gceSTATUS
++gckHEAP_ProfileStart(
++ IN gckHEAP Heap
++ );
++
++gceSTATUS
++gckHEAP_ProfileEnd(
++ IN gckHEAP Heap,
++ IN gctCONST_STRING Title
++ );
++
++
++/******************************************************************************\
++******************************** gckVIDMEM Object ******************************
++\******************************************************************************/
++
++typedef struct _gckVIDMEM * gckVIDMEM;
++typedef struct _gckKERNEL * gckKERNEL;
++typedef struct _gckDB * gckDB;
++typedef struct _gckDVFS * gckDVFS;
++
++/* Construct a new gckVIDMEM object. */
++gceSTATUS
++gckVIDMEM_Construct(
++ IN gckOS Os,
++ IN gctUINT32 BaseAddress,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Threshold,
++ IN gctSIZE_T Banking,
++ OUT gckVIDMEM * Memory
++ );
++
++/* Destroy an gckVDIMEM object. */
++gceSTATUS
++gckVIDMEM_Destroy(
++ IN gckVIDMEM Memory
++ );
++
++/* Allocate linear memory. */
++gceSTATUS
++gckVIDMEM_AllocateLinear(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ IN gctBOOL Specified,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Free memory. */
++gceSTATUS
++gckVIDMEM_Free(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node
++ );
++
++/* Lock memory. */
++gceSTATUS
++gckVIDMEM_Lock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ IN gctBOOL Cacheable,
++ OUT gctUINT32 * Address,
++ OUT gctUINT32 * Gid,
++ OUT gctUINT64 * PhysicalAddress
++ );
++
++/* Unlock memory. */
++gceSTATUS
++gckVIDMEM_Unlock(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM_NODE Node,
++ IN gceSURF_TYPE Type,
++ IN OUT gctBOOL * Asynchroneous
++ );
++
++/* Construct a gcuVIDMEM_NODE union for virtual memory. */
++gceSTATUS
++gckVIDMEM_ConstructVirtual(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Flag,
++ IN gctSIZE_T Bytes,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Destroy a gcuVIDMEM_NODE union for virtual memory. */
++gceSTATUS
++gckVIDMEM_DestroyVirtual(
++ IN gcuVIDMEM_NODE_PTR Node
++ );
++
++/******************************************************************************\
++******************************** gckKERNEL Object ******************************
++\******************************************************************************/
++
++struct _gcsHAL_INTERFACE;
++
++/* Notifications. */
++typedef enum _gceNOTIFY
++{
++ gcvNOTIFY_INTERRUPT,
++ gcvNOTIFY_COMMAND_QUEUE,
++}
++gceNOTIFY;
++
++/* Flush flags. */
++typedef enum _gceKERNEL_FLUSH
++{
++ gcvFLUSH_COLOR = 0x01,
++ gcvFLUSH_DEPTH = 0x02,
++ gcvFLUSH_TEXTURE = 0x04,
++ gcvFLUSH_2D = 0x08,
++#if gcdMULTI_GPU
++ gcvFLUSH_L2 = 0x10,
++#endif
++ gcvFLUSH_TILE_STATUS = 0x20,
++ gcvFLUSH_ALL = gcvFLUSH_COLOR
++ | gcvFLUSH_DEPTH
++ | gcvFLUSH_TEXTURE
++ | gcvFLUSH_2D
++#if gcdMULTI_GPU
++ | gcvFLUSH_L2
++#endif
++ | gcvFLUSH_TILE_STATUS
++}
++gceKERNEL_FLUSH;
++
++/* Construct a new gckKERNEL object. */
++gceSTATUS
++gckKERNEL_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Context,
++ IN gckDB SharedDB,
++ OUT gckKERNEL * Kernel
++ );
++
++/* Destroy an gckKERNEL object. */
++gceSTATUS
++gckKERNEL_Destroy(
++ IN gckKERNEL Kernel
++ );
++
++/* Dispatch a user-level command. */
++gceSTATUS
++gckKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Query Database requirements. */
++gceSTATUS
++ gckKERNEL_QueryDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN OUT gcsHAL_INTERFACE * Interface
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gckKERNEL_QueryVideoMemory(
++ IN gckKERNEL Kernel,
++ OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Lookup the gckVIDMEM object for a pool. */
++gceSTATUS
++gckKERNEL_GetVideoMemoryPool(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ OUT gckVIDMEM * VideoMemory
++ );
++
++gceSTATUS
++gckKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ IN gctUINT32 Flag,
++ OUT gctUINT32 * Node
++ );
++
++gceSTATUS
++gckKERNEL_ReleaseVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 Handle
++ );
++
++gceSTATUS
++gckKERNEL_LockVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ );
++
++gceSTATUS
++gckKERNEL_UnlockVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN OUT gcsHAL_INTERFACE * Interface
++ );
++
++/* Map video memory. */
++gceSTATUS
++gckKERNEL_MapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++#ifdef __QNXNTO__
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes,
++#endif
++ OUT gctPOINTER * Logical
++ );
++
++/* Map video memory. */
++gceSTATUS
++gckKERNEL_MapVideoMemoryEx(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++#ifdef __QNXNTO__
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes,
++#endif
++ OUT gctPOINTER * Logical
++ );
++
++#ifdef __QNXNTO__
++/* Unmap video memory. */
++gceSTATUS
++gckKERNEL_UnmapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes
++ );
++#endif
++
++/* Map memory. */
++gceSTATUS
++gckKERNEL_MapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap memory. */
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Notification of events. */
++gceSTATUS
++gckKERNEL_Notify(
++ IN gckKERNEL Kernel,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gceNOTIFY Notifcation,
++ IN gctBOOL Data
++ );
++
++gceSTATUS
++gckKERNEL_QuerySettings(
++ IN gckKERNEL Kernel,
++ OUT gcsKERNEL_SETTINGS * Settings
++ );
++
++/*******************************************************************************
++**
++** gckKERNEL_Recovery
++**
++** Try to recover the GPU from a fatal error.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Recovery(
++ IN gckKERNEL Kernel
++ );
++
++/* Set the value of timeout on HW operation. */
++void
++gckKERNEL_SetTimeOut(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 timeOut
++ );
++
++/* Get access to the user data. */
++gceSTATUS
++gckKERNEL_OpenUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctPOINTER StaticStorage,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Release resources associated with the user data connection. */
++gceSTATUS
++gckKERNEL_CloseUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctBOOL FlushData,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++gceSTATUS
++gckDVFS_Construct(
++ IN gckHARDWARE Hardware,
++ OUT gckDVFS * Frequency
++ );
++
++gceSTATUS
++gckDVFS_Destroy(
++ IN gckDVFS Dvfs
++ );
++
++gceSTATUS
++gckDVFS_Start(
++ IN gckDVFS Dvfs
++ );
++
++gceSTATUS
++gckDVFS_Stop(
++ IN gckDVFS Dvfs
++ );
++
++/******************************************************************************\
++******************************* gckHARDWARE Object *****************************
++\******************************************************************************/
++
++/* Construct a new gckHARDWARE object. */
++gceSTATUS
++gckHARDWARE_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gckHARDWARE * Hardware
++ );
++
++/* Destroy an gckHARDWARE object. */
++gceSTATUS
++gckHARDWARE_Destroy(
++ IN gckHARDWARE Hardware
++ );
++
++/* Get hardware type. */
++gceSTATUS
++gckHARDWARE_GetType(
++ IN gckHARDWARE Hardware,
++ OUT gceHARDWARE_TYPE * Type
++ );
++
++/* Query system memory requirements. */
++gceSTATUS
++gckHARDWARE_QuerySystemMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ );
++
++/* Build virtual address. */
++gceSTATUS
++gckHARDWARE_BuildVirtualAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Query command buffer requirements. */
++gceSTATUS
++gckHARDWARE_QueryCommandBuffer(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Alignment,
++ OUT gctUINT32 * ReservedHead,
++ OUT gctUINT32 * ReservedTail
++ );
++
++/* Add a WAIT/LINK pair in the command queue. */
++gceSTATUS
++gckHARDWARE_WaitLink(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN OUT gctUINT32 * Bytes,
++ OUT gctUINT32 * WaitOffset,
++ OUT gctUINT32 * WaitBytes
++ );
++
++/* Kickstart the command processor. */
++gceSTATUS
++gckHARDWARE_Execute(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Bytes
++ );
++
++/* Add an END command in the command queue. */
++gceSTATUS
++gckHARDWARE_End(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ );
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckHARDWARE_ChipEnable(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gceCORE_3D_MASK ChipEnable,
++ IN OUT gctSIZE_T * Bytes
++ );
++#endif
++
++/* Add a NOP command in the command queue. */
++gceSTATUS
++gckHARDWARE_Nop(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a PIPESELECT command in the command queue. */
++gceSTATUS
++gckHARDWARE_PipeSelect(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gcePIPE_SELECT Pipe,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Add a LINK command in the command queue. */
++gceSTATUS
++gckHARDWARE_Link(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT32 FetchSize,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Add an EVENT command in the command queue. */
++gceSTATUS
++gckHARDWARE_Event(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT8 Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Query the available memory. */
++gceSTATUS
++gckHARDWARE_QueryMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gckHARDWARE_QueryChipIdentity(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ );
++
++/* Query the shader uniforms support. */
++gceSTATUS
++gckHARDWARE_QueryShaderCaps(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctBOOL * UnifiedUnforms
++ );
++
++/* Split a harwdare specific address into API stuff. */
++gceSTATUS
++gckHARDWARE_SplitMemory(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Update command queue tail pointer. */
++gceSTATUS
++gckHARDWARE_UpdateQueueTail(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset
++ );
++
++/* Convert logical address to hardware specific address. */
++gceSTATUS
++gckHARDWARE_ConvertLogical(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ );
++
++/* Interrupt manager. */
++gceSTATUS
++gckHARDWARE_Interrupt(
++ IN gckHARDWARE Hardware,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gctBOOL InterruptValid
++ );
++
++/* Program MMU. */
++gceSTATUS
++gckHARDWARE_SetMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical
++ );
++
++/* Flush the MMU. */
++gceSTATUS
++gckHARDWARE_FlushMMU(
++ IN gckHARDWARE Hardware
++ );
++
++/* Set the page table base address. */
++gceSTATUS
++gckHARDWARE_SetMMUv2(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Enable,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctBOOL FromPower
++ );
++
++#if gcdPROCESS_ADDRESS_SPACE
++/* Configure mmu configuration. */
++gceSTATUS
++gckHARDWARE_ConfigMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctPOINTER MtlbLogical,
++ IN gctUINT32 Offset,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctSIZE_T * WaitLinkOffset,
++ OUT gctSIZE_T * WaitLinkBytes
++ );
++#endif
++
++/* Get idle register. */
++gceSTATUS
++gckHARDWARE_GetIdle(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Wait,
++ OUT gctUINT32 * Data
++ );
++
++/* Flush the caches. */
++gceSTATUS
++gckHARDWARE_Flush(
++ IN gckHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Enable/disable fast clear. */
++gceSTATUS
++gckHARDWARE_SetFastClear(
++ IN gckHARDWARE Hardware,
++ IN gctINT Enable,
++ IN gctINT Compression
++ );
++
++gceSTATUS
++gckHARDWARE_ReadInterrupt(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ );
++
++/* Power management. */
++gceSTATUS
++gckHARDWARE_SetPowerManagementState(
++ IN gckHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gckHARDWARE_QueryPowerManagementState(
++ IN gckHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ );
++
++gceSTATUS
++gckHARDWARE_SetPowerManagement(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ );
++
++gceSTATUS
++gckHARDWARE_SetPowerManagementLock(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Lock
++ );
++
++gceSTATUS
++gckHARDWARE_SetGpuProfiler(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL GpuProfiler
++ );
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ );
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ );
++
++gceSTATUS
++gckHARDWARE_SetMinFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT MinFscaleValue
++ );
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckHARDWARE_SetPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Timeout
++);
++
++gceSTATUS
++gckHARDWARE_QueryPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++);
++#endif
++
++/* Profile 2D Engine. */
++gceSTATUS
++gckHARDWARE_ProfileEngine2D(
++ IN gckHARDWARE Hardware,
++ OUT gcs2D_PROFILE_PTR Profile
++ );
++
++gceSTATUS
++gckHARDWARE_InitializeHardware(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_Reset(
++ IN gckHARDWARE Hardware
++ );
++
++typedef gceSTATUS (*gctISRMANAGERFUNC)(gctPOINTER Context);
++
++gceSTATUS
++gckHARDWARE_SetIsrManager(
++ IN gckHARDWARE Hardware,
++ IN gctISRMANAGERFUNC StartIsr,
++ IN gctISRMANAGERFUNC StopIsr,
++ IN gctPOINTER Context
++ );
++
++/* Start a composition. */
++gceSTATUS
++gckHARDWARE_Compose(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Size,
++ IN gctUINT8 EventID
++ );
++
++/* Check for Hardware features. */
++gceSTATUS
++gckHARDWARE_IsFeatureAvailable(
++ IN gckHARDWARE Hardware,
++ IN gceFEATURE Feature
++ );
++
++gceSTATUS
++gckHARDWARE_DumpMMUException(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_DumpGPUState(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_InitDVFS(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_QueryLoad(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Load
++ );
++
++gceSTATUS
++gckHARDWARE_SetDVFSPeroid(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Frequency
++ );
++
++gceSTATUS
++gckHARDWARE_PrepareFunctions(
++ gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_SetMMUStates(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ );
++
++#if !gcdENABLE_VG
++/******************************************************************************\
++***************************** gckINTERRUPT Object ******************************
++\******************************************************************************/
++
++typedef struct _gckINTERRUPT * gckINTERRUPT;
++
++typedef gceSTATUS (* gctINTERRUPT_HANDLER)(
++ IN gckKERNEL Kernel
++ );
++
++gceSTATUS
++gckINTERRUPT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckINTERRUPT * Interrupt
++ );
++
++gceSTATUS
++gckINTERRUPT_Destroy(
++ IN gckINTERRUPT Interrupt
++ );
++
++gceSTATUS
++gckINTERRUPT_SetHandler(
++ IN gckINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ );
++
++gceSTATUS
++gckINTERRUPT_Notify(
++ IN gckINTERRUPT Interrupt,
++ IN gctBOOL Valid
++ );
++#endif
++/******************************************************************************\
++******************************** gckEVENT Object *******************************
++\******************************************************************************/
++
++typedef struct _gckEVENT * gckEVENT;
++
++/* Construct a new gckEVENT object. */
++gceSTATUS
++gckEVENT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckEVENT * Event
++ );
++
++/* Destroy an gckEVENT object. */
++gceSTATUS
++gckEVENT_Destroy(
++ IN gckEVENT Event
++ );
++
++/* Reserve the next available hardware event. */
++#if gcdMULTI_GPU
++gceSTATUS
++gckEVENT_GetEvent(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ OUT gctUINT8 * EventID,
++ IN gceKERNEL_WHERE Source,
++ IN gceCORE_3D_MASK ChipEnable
++ );
++#else
++gceSTATUS
++gckEVENT_GetEvent(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ OUT gctUINT8 * EventID,
++ IN gceKERNEL_WHERE Source
++ );
++#endif
++
++/* Add a new event to the list of events. */
++gceSTATUS
++gckEVENT_AddList(
++ IN gckEVENT Event,
++ IN gcsHAL_INTERFACE_PTR Interface,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctBOOL AllocateAllowed,
++ IN gctBOOL FromKernel
++ );
++
++/* Schedule a FreeNonPagedMemory event. */
++gceSTATUS
++gckEVENT_FreeNonPagedMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a FreeContiguousMemory event. */
++gceSTATUS
++gckEVENT_FreeContiguousMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a FreeVideoMemory event. */
++gceSTATUS
++gckEVENT_FreeVideoMemory(
++ IN gckEVENT Event,
++ IN gcuVIDMEM_NODE_PTR VideoMemory,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a signal event. */
++gceSTATUS
++gckEVENT_Signal(
++ IN gckEVENT Event,
++ IN gctSIGNAL Signal,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule an Unlock event. */
++gceSTATUS
++gckEVENT_Unlock(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctPOINTER Node,
++ IN gceSURF_TYPE Type
++ );
++
++gceSTATUS
++gckEVENT_CommitDone(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a FreeVirtualCommandBuffer event. */
++gceSTATUS
++gckEVENT_DestroyVirtualCommandBuffer(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower,
++ IN gceCORE_3D_MASK ChipEnable
++ );
++#else
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower
++ );
++#endif
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue,
++ IN gceCORE_3D_MASK ChipEnable
++ );
++#else
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue
++ );
++#endif
++
++/* Schedule a composition event. */
++gceSTATUS
++gckEVENT_Compose(
++ IN gckEVENT Event,
++ IN gcsHAL_COMPOSE_PTR Info
++ );
++
++/* Event callback routine. */
++gceSTATUS
++gckEVENT_Notify(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ );
++
++/* Event callback routine. */
++gceSTATUS
++gckEVENT_Interrupt(
++ IN gckEVENT Event,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gctUINT32 IDs
++ );
++
++gceSTATUS
++gckEVENT_Dump(
++ IN gckEVENT Event
++ );
++/******************************************************************************\
++******************************* gckCOMMAND Object ******************************
++\******************************************************************************/
++
++typedef struct _gckCOMMAND * gckCOMMAND;
++
++/* Construct a new gckCOMMAND object. */
++gceSTATUS
++gckCOMMAND_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckCOMMAND * Command
++ );
++
++/* Destroy an gckCOMMAND object. */
++gceSTATUS
++gckCOMMAND_Destroy(
++ IN gckCOMMAND Command
++ );
++
++/* Acquire command queue synchronization objects. */
++gceSTATUS
++gckCOMMAND_EnterCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Release command queue synchronization objects. */
++gceSTATUS
++gckCOMMAND_ExitCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Start the command queue. */
++gceSTATUS
++gckCOMMAND_Start(
++ IN gckCOMMAND Command
++ );
++
++/* Stop the command queue. */
++gceSTATUS
++gckCOMMAND_Stop(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromRecovery
++ );
++
++#if gcdMULTI_GPU
++/* Commit a buffer to the command queue. */
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID,
++ IN gceCORE_3D_MASK ChipEnable
++ );
++#else
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID
++ );
++#endif
++
++/* Reserve space in the command buffer. */
++gceSTATUS
++gckCOMMAND_Reserve(
++ IN gckCOMMAND Command,
++ IN gctUINT32 RequestedBytes,
++ OUT gctPOINTER * Buffer,
++ OUT gctUINT32 * BufferSize
++ );
++
++/* Execute reserved space in the command buffer. */
++gceSTATUS
++gckCOMMAND_Execute(
++ IN gckCOMMAND Command,
++ IN gctUINT32 RequstedBytes
++ );
++
++/* Stall the command queue. */
++#if gcdMULTI_GPU
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower,
++ IN gceCORE_3D_MASK ChipEnable
++ );
++#else
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++#endif
++
++/* Attach user process. */
++gceSTATUS
++gckCOMMAND_Attach(
++ IN gckCOMMAND Command,
++ OUT gckCONTEXT * Context,
++ OUT gctSIZE_T * StateCount,
++ IN gctUINT32 ProcessID
++ );
++
++/* Detach user process. */
++gceSTATUS
++gckCOMMAND_Detach(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context
++ );
++
++/* Dump command buffer being executed by GPU. */
++gceSTATUS
++gckCOMMAND_DumpExecutingBuffer(
++ IN gckCOMMAND Command
++ );
++
++/* Whether a kernel command buffer address. */
++gceSTATUS
++gckCOMMAND_AddressInKernelCommandBuffer(
++ IN gckCOMMAND Command,
++ IN gctUINT32 Address,
++ OUT gctBOOL *In
++ );
++
++/******************************************************************************\
++********************************* gckMMU Object ********************************
++\******************************************************************************/
++
++typedef struct _gckMMU * gckMMU;
++
++/* Construct a new gckMMU object. */
++gceSTATUS
++gckMMU_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ );
++
++/* Destroy an gckMMU object. */
++gceSTATUS
++gckMMU_Destroy(
++ IN gckMMU Mmu
++ );
++
++/* Allocate pages inside the MMU. */
++gceSTATUS
++gckMMU_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++gceSTATUS
++gckMMU_AllocatePagesEx(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++/* Remove a page table from the MMU. */
++gceSTATUS
++gckMMU_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ );
++
++/* Set the MMU page with info. */
++gceSTATUS
++gckMMU_SetPage(
++ IN gckMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ );
++
++gceSTATUS
++gckMMU_Flush(
++ IN gckMMU Mmu,
++ IN gceSURF_TYPE Type
++ );
++
++gceSTATUS
++gckMMU_DumpPageTableEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address
++ );
++
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHARDWARE_QueryProfileRegisters(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ OUT gcsPROFILER_COUNTERS * Counters
++ );
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++gceSTATUS
++gckHARDWARE_QueryContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ IN gckCONTEXT Context,
++ OUT gcsPROFILER_COUNTERS * Counters
++ );
++
++gceSTATUS
++gckHARDWARE_UpdateContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gckCONTEXT Context
++ );
++#endif
++
++#if VIVANTE_PROFILER_NEW
++gceSTATUS
++gckHARDWARE_InitProfiler(
++ IN gckHARDWARE Hardware
++ );
++#endif
++
++gceSTATUS
++gckOS_SignalQueryHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ OUT gckHARDWARE * Hardware
++ );
++
++gceSTATUS
++gckOS_SignalSetHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckOS_DetectProcessByName(
++ IN gctCONST_POINTER Name
++ );
++
++void
++gckOS_DumpParam(
++ void
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#if gcdENABLE_VG
++#include "gc_hal_vg.h"
++#endif
++
++#endif /* __gc_hal_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_kernel_buffer.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_kernel_buffer.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_kernel_buffer.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_kernel_buffer.h 2015-05-01 14:57:59.591427001 -0500
+@@ -0,0 +1,225 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_buffer_h_
++#define __gc_hal_kernel_buffer_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++************************ Command Buffer and Event Objects **********************
++\******************************************************************************/
++
++/* The number of context buffers per user. */
++#define gcdCONTEXT_BUFFER_COUNT 2
++
++/* State delta record. */
++typedef struct _gcsSTATE_DELTA_RECORD * gcsSTATE_DELTA_RECORD_PTR;
++typedef struct _gcsSTATE_DELTA_RECORD
++{
++ /* State address. */
++ gctUINT address;
++
++ /* State mask. */
++ gctUINT32 mask;
++
++ /* State data. */
++ gctUINT32 data;
++}
++gcsSTATE_DELTA_RECORD;
++
++/* State delta. */
++typedef struct _gcsSTATE_DELTA
++{
++ /* For debugging: the number of delta in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT num;
++#endif
++
++ /* Main state delta ID. Every time state delta structure gets reinitialized,
++ main ID is incremented. If main state ID overflows, all map entry IDs get
++ reinitialized to make sure there is no potential erroneous match after
++ the overflow.*/
++ gctUINT id;
++
++ /* The number of contexts pending modification by the delta. */
++ gctINT refCount;
++
++ /* Vertex element count for the delta buffer. */
++ gctUINT elementCount;
++
++ /* Number of states currently stored in the record array. */
++ gctUINT recordCount;
++
++ /* Record array; holds all modified states in gcsSTATE_DELTA_RECORD. */
++ gctUINT64 recordArray;
++
++ /* Map entry ID is used for map entry validation. If map entry ID does not
++ match the main state delta ID, the entry and the corresponding state are
++ considered not in use. */
++ gctUINT64 mapEntryID;
++ gctUINT mapEntryIDSize;
++
++ /* If the map entry ID matches the main state delta ID, index points to
++ the state record in the record array. */
++ gctUINT64 mapEntryIndex;
++
++ /* Previous and next state deltas in gcsSTATE_DELTA. */
++ gctUINT64 prev;
++ gctUINT64 next;
++}
++gcsSTATE_DELTA;
++
++/* Command buffer patch record. */
++struct _gcsPATCH
++{
++ /* Pointer within the buffer. */
++ gctUINT32_PTR pointer;
++
++ /* 32-bit data to write at the specified offset. */
++ gctUINT32 data;
++};
++
++/* List of patches for the command buffer. */
++struct _gcsPATCH_LIST
++{
++ /* Array of patch records. */
++ struct _gcsPATCH patch[1024];
++
++ /* Number of patches in the array. */
++ gctUINT count;
++
++ /* Next item in the list. */
++ struct _gcsPATCH_LIST *next;
++};
++
++/* Command buffer object. */
++struct _gcoCMDBUF
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Commit count. */
++ gctUINT count;
++
++ /* Command buffer entry and exit pipes. */
++ gcePIPE_SELECT entryPipe;
++ gcePIPE_SELECT exitPipe;
++
++ /* Feature usage flags. */
++ gctBOOL using2D;
++ gctBOOL using3D;
++ gctBOOL usingFilterBlit;
++ gctBOOL usingPalette;
++
++ /* Physical address of command buffer. Just a name. */
++ gctUINT32 physical;
++
++ /* Logical address of command buffer. */
++ gctUINT64 logical;
++
++ /* Number of bytes in command buffer. */
++ gctUINT32 bytes;
++
++ /* Start offset into the command buffer. */
++ gctUINT32 startOffset;
++
++ /* Current offset into the command buffer. */
++ gctUINT32 offset;
++
++ /* Number of free bytes in command buffer. */
++ gctUINT32 free;
++
++ /* Location of the last reserved area. */
++ gctUINT64 lastReserve;
++ gctUINT32 lastOffset;
++
++#if gcdSECURE_USER
++ /* Hint array for the current command buffer. */
++ gctUINT hintArraySize;
++ gctUINT64 hintArray;
++ gctUINT64 hintArrayTail;
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Last load state command location and hardware address. */
++ gctUINT64 lastLoadStatePtr;
++ gctUINT32 lastLoadStateAddress;
++ gctUINT32 lastLoadStateCount;
++#endif
++
++ /* Completion signal. */
++ gctSIGNAL signal;
++
++ /* List of patches. */
++ struct _gcsPATCH_LIST *patchHead;
++ struct _gcsPATCH_LIST *patchTail;
++
++ /* Link to the siblings. */
++ gcoCMDBUF prev;
++ gcoCMDBUF next;
++};
++
++typedef struct _gcsQUEUE
++{
++ /* Pointer to next gcsQUEUE structure in gcsQUEUE. */
++ gctUINT64 next;
++
++ /* Event information. */
++ gcsHAL_INTERFACE iface;
++}
++gcsQUEUE;
++
++/* Event queue. */
++struct _gcoQUEUE
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to current event queue. */
++ gcsQUEUE_PTR head;
++ gcsQUEUE_PTR tail;
++
++ /* chunks of the records. */
++ gctPOINTER chunks;
++
++ /* List of free records. */
++ gcsQUEUE_PTR freeList;
++
++ #define gcdIN_QUEUE_RECORD_LIMIT 16
++ /* Number of records currently in queue */
++ gctUINT32 recordCount;
++};
++
++struct _gcsTEMPCMDBUF
++{
++ gctUINT32 currentByteSize;
++ gctPOINTER buffer;
++ gctBOOL inUse;
++};
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_buffer_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_mem.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_mem.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_mem.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_mem.h 2015-05-01 14:57:59.591427001 -0500
+@@ -0,0 +1,530 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/*
++** Include file for the local memory management.
++*/
++
++#ifndef __gc_hal_mem_h_
++#define __gc_hal_mem_h_
++#if (gcdENABLE_3D || gcdENABLE_VG)
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*******************************************************************************
++** Usage:
++
++ The macros to declare MemPool type and functions are
++ gcmMEM_DeclareFSMemPool (Type, TypeName, Prefix)
++ gcmMEM_DeclareVSMemPool (Type, TypeName, Prefix)
++ gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix)
++
++ The data structures for MemPool are
++ typedef struct _gcsMEM_FS_MEM_POOL * gcsMEM_FS_MEM_POOL;
++ typedef struct _gcsMEM_VS_MEM_POOL * gcsMEM_VS_MEM_POOL;
++ typedef struct _gcsMEM_AFS_MEM_POOL * gcsMEM_AFS_MEM_POOL;
++
++ The MemPool constructor and destructor functions are
++ gcfMEM_InitFSMemPool(gcsMEM_FS_MEM_POOL *, gcoOS, gctUINT, gctUINT);
++ gcfMEM_FreeFSMemPool(gcsMEM_FS_MEM_POOL *);
++ gcfMEM_InitVSMemPool(gcsMEM_VS_MEM_POOL *, gcoOS, gctUINT, gctBOOL);
++ gcfMEM_FreeVSMemPool(gcsMEM_VS_MEM_POOL *);
++ gcfMEM_InitAFSMemPool(gcsMEM_AFS_MEM_POOL *, gcoOS, gctUINT);
++ gcfMEM_FreeAFSMemPool(gcsMEM_AFS_MEM_POOL *);
++
++ FS: for Fixed-Size data structures
++ VS: for Variable-size data structures
++ AFS: for Array of Fixed-Size data structures
++
++
++ // Example 1: For a fixed-size data structure, struct gcsNode.
++ // It is used locally in a file, so the functions are static without prefix.
++ // At top level, declear allocate and free functions.
++ // The first argument is the data type.
++ // The second armument is the short name used in the fuctions.
++ gcmMEM_DeclareFSMemPool(struct gcsNode, Node, );
++
++ // The previous macro creates two inline functions,
++ // _AllocateNode and _FreeNode.
++
++ // In function or struct
++ gcsMEM_FS_MEM_POOL nodeMemPool;
++
++ // In function,
++ struct gcsNode * node;
++ gceSTATUS status;
++
++ // Before using the memory pool, initialize it.
++ // The second argument is the gcoOS object.
++ // The third argument is the number of data structures to allocate for each chunk.
++ status = gcfMEM_InitFSMemPool(&nodeMemPool, os, 100, sizeof(struct gcsNode));
++ ...
++
++ // Allocate a node.
++ status = _AllocateNode(nodeMemPool, &node);
++ ...
++ // Free a node.
++ _FreeNode(nodeMemPool, node);
++
++ // After using the memory pool, free it.
++ gcfMEM_FreeFSMemPool(&nodeMemPool);
++
++
++ // Example 2: For array of fixed-size data structures, struct gcsNode.
++ // It is used in several files, so the functions are extern with prefix.
++ // At top level, declear allocate and free functions.
++ // The first argument is the data type, and the second one is the short name
++ // used in the fuctions.
++ gcmMEM_DeclareAFSMemPool(struct gcsNode, NodeArray, gcfOpt);
++
++ // The previous macro creates two inline functions,
++ // gcfOpt_AllocateNodeArray and gcfOpt_FreeNodeArray.
++
++ // In function or struct
++ gcsMEM_AFS_MEM_POOL nodeArrayMemPool;
++
++ // In function,
++ struct gcsNode * nodeArray;
++ gceSTATUS status;
++
++ // Before using the array memory pool, initialize it.
++ // The second argument is the gcoOS object, the third is the number of data
++ // structures to allocate for each chunk.
++ status = gcfMEM_InitAFSMemPool(&nodeArrayMemPool, os, sizeof(struct gcsNode));
++ ...
++
++ // Allocate a node array of size 100.
++ status = gcfOpt_AllocateNodeArray(nodeArrayMemPool, &nodeArray, 100);
++ ...
++ // Free a node array.
++ gcfOpt_FreeNodeArray(&nodeArrayMemPool, nodeArray);
++
++ // After using the array memory pool, free it.
++ gcfMEM_FreeAFSMemPool(&nodeArrayMemPool);
++
++*******************************************************************************/
++
++/*******************************************************************************
++** To switch back to use gcoOS_Allocate and gcoOS_Free, add
++** #define USE_LOCAL_MEMORY_POOL 0
++** before including this file.
++*******************************************************************************/
++#ifndef USE_LOCAL_MEMORY_POOL
++/*
++ USE_LOCAL_MEMORY_POOL
++
++ This define enables the local memory management to improve performance.
++*/
++#define USE_LOCAL_MEMORY_POOL 1
++#endif
++
++/*******************************************************************************
++** Memory Pool Data Structures
++*******************************************************************************/
++#if USE_LOCAL_MEMORY_POOL
++ typedef struct _gcsMEM_FS_MEM_POOL * gcsMEM_FS_MEM_POOL;
++ typedef struct _gcsMEM_VS_MEM_POOL * gcsMEM_VS_MEM_POOL;
++ typedef struct _gcsMEM_AFS_MEM_POOL * gcsMEM_AFS_MEM_POOL;
++#else
++ typedef gcoOS gcsMEM_FS_MEM_POOL;
++ typedef gcoOS gcsMEM_VS_MEM_POOL;
++ typedef gcoOS gcsMEM_AFS_MEM_POOL;
++#endif
++
++/*******************************************************************************
++** Memory Pool Macros
++*******************************************************************************/
++#if USE_LOCAL_MEMORY_POOL
++#define gcmMEM_DeclareFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ return(gcfMEM_FSMemPoolGetANode(MemPool, (gctPOINTER *) Pointer)); \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ gcmERR_RETURN(gcfMEM_FSMemPoolGetANode(MemPool, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcfMEM_FSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName##List( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * FirstPointer, \
++ Type * LastPointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x FirstPointer=0x%x LastPointer=0x%x", MemPool, FirstPointer, LastPointer); \
++ status = gcfMEM_FSMemPoolFreeAList(MemPool, (gctPOINTER) FirstPointer, (gctPOINTER) LastPointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareVSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status;\
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ status = gcfMEM_VSMemPoolGetANode(MemPool, Size, (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++ Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ gcmERR_RETURN(gcfMEM_VSMemPoolGetANode(MemPool, Size, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, size); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pinter); \
++ status = gcfMEM_VSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ status = gcfMEM_AFSMemPoolGetANode(MemPool, Count, (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ gcmERR_RETURN(gcfMEM_AFSMemPoolGetANode(MemPool, Count, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Count * gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcfMEM_AFSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#else
++
++#define gcmMEM_DeclareFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcoOS_Allocate(MemPool, \
++ gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareVSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ status = gcoOS_Allocate(MemPool, \
++ Size, \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ Size, \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Size); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ status = gcoOS_Allocate(MemPool, \
++ Count * gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ Count * gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Count * gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++#endif
++
++/*******************************************************************************
++** Memory Pool Data Functions
++*******************************************************************************/
++gceSTATUS
++gcfMEM_InitFSMemPool(
++ IN gcsMEM_FS_MEM_POOL * MemPool,
++ IN gcoOS OS,
++ IN gctUINT NodeCount,
++ IN gctUINT NodeSize
++ );
++
++gceSTATUS
++gcfMEM_FreeFSMemPool(
++ IN gcsMEM_FS_MEM_POOL * MemPool
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolGetANode(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolFreeANode(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolFreeAList(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ IN gctPOINTER FirstNode,
++ IN gctPOINTER LastNode
++ );
++
++gceSTATUS
++gcfMEM_InitVSMemPool(
++ IN gcsMEM_VS_MEM_POOL * MemPool,
++ IN gcoOS OS,
++ IN gctUINT BlockSize,
++ IN gctBOOL RecycleFreeNode
++ );
++
++gceSTATUS
++gcfMEM_FreeVSMemPool(
++ IN gcsMEM_VS_MEM_POOL * MemPool
++ );
++
++gceSTATUS
++gcfMEM_VSMemPoolGetANode(
++ IN gcsMEM_VS_MEM_POOL MemPool,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_VSMemPoolFreeANode(
++ IN gcsMEM_VS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++gceSTATUS
++gcfMEM_InitAFSMemPool(
++ IN gcsMEM_AFS_MEM_POOL *MemPool,
++ IN gcoOS OS,
++ IN gctUINT NodeCount,
++ IN gctUINT NodeSize
++ );
++
++gceSTATUS
++gcfMEM_FreeAFSMemPool(
++ IN gcsMEM_AFS_MEM_POOL *MemPool
++ );
++
++gceSTATUS
++gcfMEM_AFSMemPoolGetANode(
++ IN gcsMEM_AFS_MEM_POOL MemPool,
++ IN gctUINT Count,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_AFSMemPoolFreeANode(
++ IN gcsMEM_AFS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* (gcdENABLE_3D || gcdENABLE_VG) */
++#endif /* __gc_hal_mem_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_options.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_options.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_options.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_options.h 2015-05-01 14:57:59.591427001 -0500
+@@ -0,0 +1,1271 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#ifndef __gc_hal_options_h_
++#define __gc_hal_options_h_
++
++/*
++ gcdSECURITY
++
++*/
++#ifndef gcdSECURITY
++# define gcdSECURITY 0
++#endif
++
++/*
++ gcdPRINT_VERSION
++
++ Print HAL version.
++*/
++#ifndef gcdPRINT_VERSION
++# define gcdPRINT_VERSION 0
++#endif
++
++/*
++ USE_NEW_LINUX_SIGNAL
++
++ This define enables the Linux kernel signaling between kernel and user.
++*/
++#ifndef USE_NEW_LINUX_SIGNAL
++# define USE_NEW_LINUX_SIGNAL 0
++#endif
++
++/*
++ VIVANTE_PROFILER
++
++ This define enables the profiler.
++*/
++#ifndef VIVANTE_PROFILER
++# define VIVANTE_PROFILER 1
++#endif
++
++/*
++ VIVANTE_PROFILER_CONTEXT
++
++ This define enables the profiler according each context.
++*/
++#ifndef VIVANTE_PROFILER_CONTEXT
++# define VIVANTE_PROFILER_CONTEXT 1
++#endif
++
++#ifndef VIVANTE_PROFILER_PERDRAW
++# define VIVANTE_PROFILER_PERDRAW 0
++#endif
++
++#ifndef VIVANTE_PROFILER_NEW
++# define VIVANTE_PROFILER_NEW 0
++#endif
++
++#ifndef VIVANTE_PROFILER_PM
++# define VIVANTE_PROFILER_PM 1
++#endif
++/*
++ gcdUSE_VG
++
++ Enable VG HAL layer (only for GC350).
++*/
++#ifndef gcdUSE_VG
++# define gcdUSE_VG 0
++#endif
++
++/*
++ USE_SW_FB
++
++ Set to 1 if the frame buffer memory cannot be accessed by the GPU.
++*/
++#ifndef USE_SW_FB
++# define USE_SW_FB 0
++#endif
++
++/*
++ PROFILE_HAL_COUNTERS
++
++ This define enables HAL counter profiling support. HW and SHADER
++ counter profiling depends on this.
++*/
++#ifndef PROFILE_HAL_COUNTERS
++# define PROFILE_HAL_COUNTERS 1
++#endif
++
++/*
++ PROFILE_HW_COUNTERS
++
++ This define enables HW counter profiling support.
++*/
++#ifndef PROFILE_HW_COUNTERS
++# define PROFILE_HW_COUNTERS 1
++#endif
++
++/*
++ PROFILE_SHADER_COUNTERS
++
++ This define enables SHADER counter profiling support.
++*/
++#ifndef PROFILE_SHADER_COUNTERS
++# define PROFILE_SHADER_COUNTERS 1
++#endif
++
++/*
++ COMMAND_PROCESSOR_VERSION
++
++ The version of the command buffer and task manager.
++*/
++#define COMMAND_PROCESSOR_VERSION 1
++
++/*
++ gcdDUMP_KEY
++
++ Set this to a string that appears in 'cat /proc/<pid>/cmdline'. E.g. 'camera'.
++ HAL will create dumps for the processes matching this key.
++*/
++#ifndef gcdDUMP_KEY
++# define gcdDUMP_KEY "process"
++#endif
++
++/*
++ gcdDUMP_PATH
++
++ The dump file location. Some processes cannot write to the sdcard.
++ Try apps' data dir, e.g. /data/data/com.android.launcher
++*/
++#ifndef gcdDUMP_PATH
++#if defined(ANDROID)
++# define gcdDUMP_PATH "/mnt/sdcard/"
++#else
++# define gcdDUMP_PATH "./"
++#endif
++#endif
++
++/*
++ gcdDUMP
++
++ When set to 1, a dump of all states and memory uploads, as well as other
++ hardware related execution will be printed to the debug console. This
++ data can be used for playing back applications.
++*/
++#ifndef gcdDUMP
++# define gcdDUMP 0
++#endif
++
++/*
++ gcdDUMP_API
++
++ When set to 1, a high level dump of the EGL and GL/VG APs's are
++ captured.
++*/
++#ifndef gcdDUMP_API
++# define gcdDUMP_API 0
++#endif
++
++
++
++/*
++ gcdDEBUG_OPTION
++ When set to 1, the debug options are enabled. We must set other MACRO to enable
++ sub case.
++*/
++#ifndef gcdDEBUG_OPTION
++# define gcdDEBUG_OPTION 0
++
++#if gcdDEBUG_OPTION
++/*
++ gcdDEBUG_OPTION_KEY
++ The process name of debug application.
++*/
++#ifndef gcdDEBUG_OPTION_KEY
++# define gcdDEBUG_OPTION_KEY "process"
++# endif
++/*
++ gcdDEBUG_OPTION_NO_GL_DRAWS
++ When set to 1, all glDrawArrays and glDrawElements will be skip.
++*/
++#ifndef gcdDEBUG_OPTION_NO_GL_DRAWS
++# define gcdDEBUG_OPTION_NO_GL_DRAWS 0
++# endif
++/*
++ gcdDEBUG_OPTION_NO_DRAW_PRIMITIVES
++ When set to 1, all DrawPrimitives will be skip.
++*/
++#ifndef gcdDEBUG_OPTION_NO_DRAW_PRIMITIVES
++# define gcdDEBUG_OPTION_NO_DRAW_PRIMITIVES 0
++# endif
++/*
++ gcdDEBUG_OPTION_SKIP_SWAP
++ When set to 1, just one out of gcdDEBUG_OPTION_SKIP_FRAMES(such as 1/10) eglSwapBuffers will be resolve,
++ others skip.
++*/
++#ifndef gcdDEBUG_OPTION_SKIP_SWAP
++# define gcdDEBUG_OPTION_SKIP_SWAP 0
++# define gcdDEBUG_OPTION_SKIP_FRAMES 10
++# endif
++/*
++ gcdDEBUG_OPTION_FORCE_16BIT_RENDER_TARGET
++ When set to 1, the format of render target will force to RGB565.
++*/
++#ifndef gcdDEBUG_OPTION_FORCE_16BIT_RENDER_TARGET
++# define gcdDEBUG_OPTION_FORCE_16BIT_RENDER_TARGET 0
++# endif
++/*
++ gcdDEBUG_OPTION_NONE_TEXTURE
++ When set to 1, the type of texture will be set to AQ_TEXTURE_SAMPLE_MODE_TYPE_NONE.
++*/
++#ifndef gcdDEBUG_OPTION_NONE_TEXTURE
++# define gcdDEBUG_OPTION_NONE_TEXTURE 0
++# endif
++/*
++ gcdDEBUG_OPTION_NONE_DEPTH
++ When set to 1, the depth format of surface will be set to gcvSURF_UNKNOWN.
++*/
++#ifndef gcdDEBUG_OPTION_NONE_DEPTH
++# define gcdDEBUG_OPTION_NONE_DEPTH 0
++# endif
++
++# endif
++#endif
++
++/*
++ gcdDUMP_SWAP_PER_DRAW
++
++ When set to 1, dump swap command for every single draw to make simulation comparison happy.
++ Only valid for ES3 driver for now.
++*/
++#ifndef gcdDUMP_SWAP_PER_DRAW
++# define gcdDUMP_SWAP_PER_DRAW 0
++#endif
++
++/*
++ gcdDUMP_FRAMERATE
++ When set to a value other than zero, averaqe frame rate will be dumped.
++ The value set is the starting frame that the average will be calculated.
++ This is needed because sometimes first few frames are too slow to be included
++ in the average. Frame count starts from 1.
++*/
++#ifndef gcdDUMP_FRAMERATE
++# define gcdDUMP_FRAMERATE 0
++#endif
++
++/*
++ gcdENABLE_FSCALE_VAL_ADJUST
++ When non-zero, FSCALE_VAL when gcvPOWER_ON can be adjusted externally.
++ */
++#ifndef gcdENABLE_FSCALE_VAL_ADJUST
++# define gcdENABLE_FSCALE_VAL_ADJUST 1
++#endif
++
++/*
++ gcdDUMP_IN_KERNEL
++
++ When set to 1, all dumps will happen in the kernel. This is handy if
++ you want the kernel to dump its command buffers as well and the data
++ needs to be in sync.
++*/
++#ifndef gcdDUMP_IN_KERNEL
++# define gcdDUMP_IN_KERNEL 0
++#endif
++
++/*
++ gcdDUMP_COMMAND
++
++ When set to non-zero, the command queue will dump all incoming command
++ and context buffers as well as all other modifications to the command
++ queue.
++*/
++#ifndef gcdDUMP_COMMAND
++# define gcdDUMP_COMMAND 0
++#endif
++
++/*
++ gcdDUMP_2D
++
++ When set to non-zero, it will dump the 2D command and surface.
++*/
++#ifndef gcdDUMP_2D
++# define gcdDUMP_2D 0
++#endif
++
++/*
++ gcdDUMP_FRAME_TGA
++
++ When set to a value other than 0, a dump of the frame specified by the value,
++ will be done into frame.tga. Frame count starts from 1.
++ */
++#ifndef gcdDUMP_FRAME_TGA
++# define gcdDUMP_FRAME_TGA 0
++#endif
++/*
++ gcdNULL_DRIVER
++
++ Set to 1 for infinite speed hardware.
++ Set to 2 for bypassing the HAL.
++ Set to 3 for bypassing the drivers.
++*/
++#ifndef gcdNULL_DRIVER
++# define gcdNULL_DRIVER 0
++#endif
++
++/*
++ gcdENABLE_TIMEOUT_DETECTION
++
++ Enable timeout detection.
++*/
++#ifndef gcdENABLE_TIMEOUT_DETECTION
++# define gcdENABLE_TIMEOUT_DETECTION 0
++#endif
++
++/*
++ gcdCMD_BUFFER_SIZE
++
++ Number of bytes in a command buffer.
++*/
++#ifndef gcdCMD_BUFFER_SIZE
++# define gcdCMD_BUFFER_SIZE (128 << 10)
++#endif
++
++/*
++ gcdCMD_BUFFERS
++
++ Number of command buffers to use per client.
++*/
++#ifndef gcdCMD_BUFFERS
++# define gcdCMD_BUFFERS 2
++#endif
++
++/*
++ gcdMAX_CMD_BUFFERS
++
++ Maximum number of command buffers to use per client.
++*/
++#ifndef gcdMAX_CMD_BUFFERS
++# define gcdMAX_CMD_BUFFERS 8
++#endif
++
++/*
++ gcdCOMMAND_QUEUES
++
++ Number of command queues in the kernel.
++*/
++#ifndef gcdCOMMAND_QUEUES
++# define gcdCOMMAND_QUEUES 2
++#endif
++
++/*
++ gcdPOWER_CONTROL_DELAY
++
++ The delay in milliseconds required to wait until the GPU has woke up
++ from a suspend or power-down state. This is system dependent because
++ the bus clock also needs to stabalize.
++*/
++#ifndef gcdPOWER_CONTROL_DELAY
++# define gcdPOWER_CONTROL_DELAY 0
++#endif
++
++/*
++ gcdMIRROR_PAGETABLE
++
++ Enable it when GPUs with old MMU and new MMU exist at same SoC. It makes
++ each GPU use same virtual address to access same physical memory.
++*/
++#ifndef gcdMIRROR_PAGETABLE
++# define gcdMIRROR_PAGETABLE 0
++#endif
++
++/*
++ gcdMMU_SIZE
++
++ Size of the MMU page table in bytes. Each 4 bytes can hold 4kB worth of
++ virtual data.
++*/
++#ifndef gcdMMU_SIZE
++#if gcdMIRROR_PAGETABLE
++# define gcdMMU_SIZE 0x200000
++#else
++# define gcdMMU_SIZE (2048 << 10)
++#endif
++#endif
++
++/*
++ gcdSECURE_USER
++
++ Use logical addresses instead of physical addresses in user land. In
++ this case a hint table is created for both command buffers and context
++ buffers, and that hint table will be used to patch up those buffers in
++ the kernel when they are ready to submit.
++*/
++#ifndef gcdSECURE_USER
++# define gcdSECURE_USER 0
++#endif
++
++/*
++ gcdSECURE_CACHE_SLOTS
++
++ Number of slots in the logical to DMA address cache table. Each time a
++ logical address needs to be translated into a DMA address for the GPU,
++ this cache will be walked. The replacement scheme is LRU.
++*/
++#ifndef gcdSECURE_CACHE_SLOTS
++# define gcdSECURE_CACHE_SLOTS 1024
++#endif
++
++/*
++ gcdSECURE_CACHE_METHOD
++
++ Replacement scheme used for Secure Cache. The following options are
++ available:
++
++ gcdSECURE_CACHE_LRU
++ A standard LRU cache.
++
++ gcdSECURE_CACHE_LINEAR
++ A linear walker with the idea that an application will always
++ render the scene in a similar way, so the next entry in the
++ cache should be a hit most of the time.
++
++ gcdSECURE_CACHE_HASH
++ A 256-entry hash table.
++
++ gcdSECURE_CACHE_TABLE
++ A simple cache but with potential of a lot of cache replacement.
++*/
++#ifndef gcdSECURE_CACHE_METHOD
++# define gcdSECURE_CACHE_METHOD gcdSECURE_CACHE_HASH
++#endif
++
++/*
++ gcdREGISTER_ACCESS_FROM_USER
++
++ Set to 1 to allow IOCTL calls to get through from user land. This
++ should only be in debug or development drops.
++*/
++#ifndef gcdREGISTER_ACCESS_FROM_USER
++# define gcdREGISTER_ACCESS_FROM_USER 1
++#endif
++
++/*
++ gcdHEAP_SIZE
++
++ Set the allocation size for the internal heaps. Each time a heap is
++ full, a new heap will be allocated with this minmimum amount of bytes.
++ The bigger this size, the fewer heaps there are to allocate, the better
++ the performance. However, heaps won't be freed until they are
++ completely free, so there might be some more memory waste if the size is
++ too big.
++*/
++#ifndef gcdHEAP_SIZE
++# define gcdHEAP_SIZE (64 << 10)
++#endif
++
++/*
++ gcdPOWER_SUSPEND_WHEN_IDLE
++
++ Set to 1 to make GPU enter gcvPOWER_SUSPEND when idle detected,
++ otherwise GPU will enter gcvPOWER_IDLE.
++*/
++#ifndef gcdPOWER_SUSPEND_WHEN_IDLE
++# define gcdPOWER_SUSPEND_WHEN_IDLE 1
++#endif
++
++#ifndef gcdFPGA_BUILD
++# define gcdFPGA_BUILD 0
++#endif
++
++/*
++ gcdGPU_TIMEOUT
++
++ This define specified the number of milliseconds the system will wait
++ before it broadcasts the GPU is stuck. In other words, it will define
++ the timeout of any operation that needs to wait for the GPU.
++
++ If the value is 0, no timeout will be checked for.
++*/
++#ifndef gcdGPU_TIMEOUT
++#if gcdFPGA_BUILD
++# define gcdGPU_TIMEOUT 0
++# define gcdGPU_2D_TIMEOUT 0
++# else
++# define gcdGPU_TIMEOUT 20000
++# define gcdGPU_2D_TIMEOUT 4000
++# endif
++#endif
++
++/*
++ gcdGPU_ADVANCETIMER
++
++ it is advance timer.
++*/
++#ifndef gcdGPU_ADVANCETIMER
++# define gcdGPU_ADVANCETIMER 250
++#endif
++
++/*
++ gcdSTATIC_LINK
++
++ This define disalbes static linking;
++*/
++#ifndef gcdSTATIC_LINK
++# define gcdSTATIC_LINK 0
++#endif
++
++/*
++ gcdUSE_NEW_HEAP
++
++ Setting this define to 1 enables new heap.
++*/
++#ifndef gcdUSE_NEW_HEAP
++# define gcdUSE_NEW_HEAP 0
++#endif
++
++/*
++ gcdCMD_NO_2D_CONTEXT
++
++ This define enables no-context 2D command buffer.
++*/
++#ifndef gcdCMD_NO_2D_CONTEXT
++# define gcdCMD_NO_2D_CONTEXT 1
++#endif
++
++/*
++ gcdENABLE_BUFFER_ALIGNMENT
++
++ When enabled, video memory is allocated with atleast 16KB aligment
++ between multiple sub-buffers.
++*/
++#ifndef gcdENABLE_BUFFER_ALIGNMENT
++# define gcdENABLE_BUFFER_ALIGNMENT 1
++#endif
++
++/*
++ gcdENABLE_BANK_ALIGNMENT
++
++ When enabled, video memory is allocated bank aligned. The vendor can modify
++ _GetSurfaceBankAlignment() and _GetBankOffsetBytes() to define how
++ different types of allocations are bank and channel aligned.
++ When disabled (default), no bank alignment is done.
++*/
++#ifndef gcdENABLE_BANK_ALIGNMENT
++# define gcdENABLE_BANK_ALIGNMENT 0
++#endif
++
++/*
++ gcdBANK_BIT_START
++
++ Specifies the start bit of the bank (inclusive).
++*/
++#ifndef gcdBANK_BIT_START
++# define gcdBANK_BIT_START 12
++#endif
++
++/*
++ gcdBANK_BIT_END
++
++ Specifies the end bit of the bank (inclusive).
++*/
++#ifndef gcdBANK_BIT_END
++# define gcdBANK_BIT_END 14
++#endif
++
++/*
++ gcdBANK_CHANNEL_BIT
++
++ When set, video memory when allocated bank aligned is allocated such that
++ render and depth buffer addresses alternate on the channel bit specified.
++ This option has an effect only when gcdENABLE_BANK_ALIGNMENT is enabled.
++ When disabled (default), no alteration is done.
++*/
++#ifndef gcdBANK_CHANNEL_BIT
++# define gcdBANK_CHANNEL_BIT 7
++#endif
++
++/*
++ gcdDYNAMIC_SPEED
++
++ When non-zero, it informs the kernel driver to use the speed throttling
++ broadcasting functions to inform the system the GPU should be spet up or
++ slowed down. It will send a broadcast for slowdown each "interval"
++ specified by this define in milliseconds
++ (gckOS_BroadcastCalibrateSpeed).
++*/
++#ifndef gcdDYNAMIC_SPEED
++# define gcdDYNAMIC_SPEED 2000
++#endif
++
++/*
++ gcdDYNAMIC_EVENT_THRESHOLD
++
++ When non-zero, it specifies the maximum number of available events at
++ which the kernel driver will issue a broadcast to speed up the GPU
++ (gckOS_BroadcastHurry).
++*/
++#ifndef gcdDYNAMIC_EVENT_THRESHOLD
++# define gcdDYNAMIC_EVENT_THRESHOLD 5
++#endif
++
++/*
++ gcdENABLE_PROFILING
++
++ Enable profiling macros.
++*/
++#ifndef gcdENABLE_PROFILING
++# define gcdENABLE_PROFILING 0
++#endif
++
++/*
++ gcdENABLE_128B_MERGE
++
++ Enable 128B merge for the BUS control.
++*/
++#ifndef gcdENABLE_128B_MERGE
++# define gcdENABLE_128B_MERGE 0
++#endif
++
++/*
++ gcdFRAME_DB
++
++ When non-zero, it specified the number of frames inside the frame
++ database. The frame DB will collect per-frame timestamps and hardware
++ counters.
++*/
++#ifndef gcdFRAME_DB
++# define gcdFRAME_DB 0
++# define gcdFRAME_DB_RESET 0
++# define gcdFRAME_DB_NAME "/var/log/frameDB.log"
++#endif
++
++/*
++ gcdDISABLE_CORES_2D3D
++ disable the 2D3D cores for 2D openVG
++*/
++#ifndef gcdDISABLE_CORES_2D3D
++# define gcdDISABLE_CORES_2D3D 0
++#endif
++
++/*
++ gcdPAGED_MEMORY_CACHEABLE
++
++ When non-zero, paged memory will be cacheable.
++
++ Normally, driver will detemines whether a video memory
++ is cacheable or not. When cacheable is not neccessary,
++ it will be writecombine.
++
++ This option is only for those SOC which can't enable
++ writecombine without enabling cacheable.
++*/
++#ifndef gcdPAGED_MEMORY_CACHEABLE
++# define gcdPAGED_MEMORY_CACHEABLE 0
++#endif
++
++/*
++ gcdNONPAGED_MEMORY_CACHEABLE
++
++ When non-zero, non paged memory will be cacheable.
++*/
++#ifndef gcdNONPAGED_MEMORY_CACHEABLE
++# define gcdNONPAGED_MEMORY_CACHEABLE 0
++#endif
++
++/*
++ gcdNONPAGED_MEMORY_BUFFERABLE
++
++ When non-zero, non paged memory will be bufferable.
++ gcdNONPAGED_MEMORY_BUFFERABLE and gcdNONPAGED_MEMORY_CACHEABLE
++ can't be set 1 at same time
++*/
++#ifndef gcdNONPAGED_MEMORY_BUFFERABLE
++# define gcdNONPAGED_MEMORY_BUFFERABLE 1
++#endif
++
++/*
++ gcdENABLE_INFINITE_SPEED_HW
++ enable the Infinte HW , this is for 2D openVG
++*/
++#ifndef gcdENABLE_INFINITE_SPEED_HW
++# define gcdENABLE_INFINITE_SPEED_HW 0
++#endif
++
++/*
++ gcdMULTI_GPU
++
++ Enable/disable multi-GPU support.
++ 0 : Disable multi-GPU support
++ 1 : Enable one of the 3D cores
++ [2..X] : Number of 3D GPU Cores
++*/
++#ifndef gcdMULTI_GPU
++# define gcdMULTI_GPU 0
++#endif
++
++/*
++ gcdMULTI_GPU_AFFINITY
++
++ Enable/disable the binding of a context to one GPU
++*/
++#ifndef gcdMULTI_GPU_AFFINITY
++# define gcdMULTI_GPU_AFFINITY 0
++#endif
++
++/*
++ gcdPOWEROFF_TIMEOUT
++
++ When non-zero, GPU will power off automatically from
++ idle state, and gcdPOWEROFF_TIMEOUT is also the default
++ timeout in milliseconds.
++ */
++#ifndef gcdPOWEROFF_TIMEOUT
++# define gcdPOWEROFF_TIMEOUT 300
++#endif
++
++/*
++ QNX_SINGLE_THREADED_DEBUGGING
++*/
++#ifndef QNX_SINGLE_THREADED_DEBUGGING
++# define QNX_SINGLE_THREADED_DEBUGGING 0
++#endif
++
++/*
++ gcdRENDER_THREADS
++
++ Number of render threads. Make it zero, and there will be no render
++ threads.
++*/
++#ifndef gcdRENDER_THREADS
++# define gcdRENDER_THREADS 0
++#endif
++
++/*
++ gcdSMP
++
++ This define enables SMP support.
++
++ Currently, it only works on Linux/Android,
++ Kbuild will config it according to whether
++ CONFIG_SMP is set.
++
++*/
++#ifndef gcdSMP
++#ifdef __APPLE__
++# define gcdSMP 1
++#else
++# define gcdSMP 0
++#endif
++#endif
++
++/*
++ gcdSHARED_RESOLVE_BUFFER_ENABLED
++
++ Use shared resolve buffer for all app buffers.
++*/
++#ifndef gcdSHARED_RESOLVE_BUFFER_ENABLED
++# define gcdSHARED_RESOLVE_BUFFER_ENABLED 0
++#endif
++
++/*
++ gcdUSE_TRIANGLE_STRIP_PATCH
++ */
++#ifndef gcdUSE_TRIANGLE_STRIP_PATCH
++# define gcdUSE_TRIANGLE_STRIP_PATCH 1
++#endif
++
++/*
++ gcdENABLE_OUTER_CACHE_PATCH
++
++ Enable the outer cache patch.
++*/
++#ifndef gcdENABLE_OUTER_CACHE_PATCH
++# define gcdENABLE_OUTER_CACHE_PATCH 0
++#endif
++
++/*
++ gcdPROCESS_ADDRESS_SPACE
++
++ When non-zero, every process which attaches to galcore has its own GPU
++ address space, size of which is gcdPROCESS_ADDRESS_SPACE_SIZE.
++*/
++#ifndef gcdPROCESS_ADDRESS_SPACE
++# define gcdPROCESS_ADDRESS_SPACE 0
++# define gcdPROCESS_ADDRESS_SPACE_SIZE 0x80000000
++#endif
++
++/*
++ gcdSHARED_PAGETABLE
++
++ When non-zero, multiple GPUs in one chip with same MMU use
++ one shared pagetable. So that when accessing same surface,
++ they can use same GPU virtual address.
++*/
++#ifndef gcdSHARED_PAGETABLE
++# define gcdSHARED_PAGETABLE !gcdPROCESS_ADDRESS_SPACE
++#endif
++
++#ifndef gcdUSE_PVR
++# define gcdUSE_PVR 1
++#endif
++
++/*
++ gcdSMALL_BLOCK_SIZE
++
++ When non-zero, a part of VIDMEM will be reserved for requests
++ whose requesting size is less than gcdSMALL_BLOCK_SIZE.
++
++ For Linux, it's the size of a page. If this requeset fallbacks
++ to gcvPOOL_CONTIGUOUS or gcvPOOL_VIRTUAL, memory will be wasted
++ because they allocate a page at least.
++*/
++#ifndef gcdSMALL_BLOCK_SIZE
++# define gcdSMALL_BLOCK_SIZE 4096
++# define gcdRATIO_FOR_SMALL_MEMORY 32
++#endif
++
++/*
++ gcdCONTIGUOUS_SIZE_LIMIT
++ When non-zero, size of video node from gcvPOOL_CONTIGUOUS is
++ limited by gcdCONTIGUOUS_SIZE_LIMIT.
++*/
++#ifndef gcdCONTIGUOUS_SIZE_LIMIT
++# define gcdCONTIGUOUS_SIZE_LIMIT 0
++#endif
++
++/*
++ gcdLINK_QUEUE_SIZE
++
++ When non-zero, driver maintains a queue to record information of
++ latest lined context buffer and command buffer. Data in this queue
++ is be used to debug.
++*/
++#ifndef gcdLINK_QUEUE_SIZE
++# define gcdLINK_QUEUE_SIZE 5
++#endif
++
++/* gcdALPHA_KILL_IN_SHADER
++
++ Enable alpha kill inside the shader. This will be set automatically by the
++ HAL if certain states match a criteria.
++*/
++#ifndef gcdALPHA_KILL_IN_SHADER
++# define gcdALPHA_KILL_IN_SHADER 1
++#endif
++
++
++
++/*
++ gcdDVFS
++
++ When non-zero, software will make use of dynamic voltage and
++ frequency feature.
++ */
++#ifndef gcdDVFS
++# define gcdDVFS 0
++# define gcdDVFS_ANAYLSE_WINDOW 4
++# define gcdDVFS_POLLING_TIME (gcdDVFS_ANAYLSE_WINDOW * 4)
++#endif
++
++#ifndef gcdSYNC
++# define gcdSYNC 1
++#endif
++
++#ifndef gcdSHADER_SRC_BY_MACHINECODE
++# define gcdSHADER_SRC_BY_MACHINECODE 1
++#endif
++
++#ifndef gcdGLB27_SHADER_REPLACE_OPTIMIZATION
++# define gcdGLB27_SHADER_REPLACE_OPTIMIZATION 1
++#endif
++
++/*
++ gcdSTREAM_OUT_BUFFER
++
++ Enable suppport for the secondary stream out buffer.
++*/
++#ifndef gcdSTREAM_OUT_BUFFER
++# define gcdSTREAM_OUT_BUFFER 0
++# define gcdSTREAM_OUT_NAIVE_SYNC 0
++#endif
++
++/*
++ gcdUSE_HARDWARE_CONFIGURATION_TABLES
++
++ Enable the use of hardware configuration tables,
++ instead of query hardware and determine the features.
++*/
++#ifndef gcdUSE_HARDWARE_CONFIGURATION_TABLES
++# define gcdUSE_HARDWARE_CONFIGURATION_TABLES 0
++#endif
++
++/*
++ gcdSUPPORT_SWAP_RECTANGLE
++
++ Support swap with a specific rectangle.
++
++ Set the rectangle with eglSetSwapRectangleVIV api.
++ Android only.
++*/
++#ifndef gcdSUPPORT_SWAP_RECTANGLE
++# define gcdSUPPORT_SWAP_RECTANGLE 1
++#endif
++
++/*
++ gcdGPU_LINEAR_BUFFER_ENABLED
++
++ Use linear buffer for GPU apps so HWC can do 2D composition.
++ Android only.
++*/
++#ifndef gcdGPU_LINEAR_BUFFER_ENABLED
++# define gcdGPU_LINEAR_BUFFER_ENABLED 1
++#endif
++
++/*
++ gcdENABLE_RENDER_INTO_WINDOW
++
++ Enable Render-Into-Window (ie, No-Resolve) feature on android.
++ NOTE that even if enabled, it still depends on hardware feature and
++ android application behavior. When hardware feature or application
++ behavior can not support render into window mode, it will fail back
++ to normal mode.
++ When Render-Into-Window is finally used, window back buffer of android
++ applications will be allocated matching render target tiling format.
++ Otherwise buffer tiling is decided by the above option
++ 'gcdGPU_LINEAR_BUFFER_ENABLED'.
++ Android only for now.
++*/
++#ifndef gcdENABLE_RENDER_INTO_WINDOW
++# define gcdENABLE_RENDER_INTO_WINDOW 1
++#endif
++
++/*
++ gcdENABLE_RENDER_INTO_WINDOW_WITH_FC
++
++ Enable Direct-rendering (ie, No-Resolve) with tile status.
++ This is expremental and in development stage.
++ This will dynamically check if color compression is available.
++*/
++#ifndef gcdENABLE_RENDER_INTO_WINDOW_WITH_FC
++# define gcdENABLE_RENDER_INTO_WINDOW_WITH_FC 1
++#endif
++
++/*
++ gcdENABLE_BLIT_BUFFER_PRESERVE
++
++ Render-Into-Window (ie, No-Resolve) does not include preserved swap
++ behavior. This feature can enable buffer preserve in No-Resolve mode.
++ When enabled, previous buffer (may be part of ) will be resolve-blitted
++ to current buffer.
++*/
++#ifndef gcdENABLE_BLIT_BUFFER_PRESERVE
++# define gcdENABLE_BLIT_BUFFER_PRESERVE 1
++#endif
++
++/*
++ gcdANDROID_NATIVE_FENCE_SYNC
++
++ Enable android native fence sync. It is introduced since jellybean-4.2.
++ Depends on linux kernel option: CONFIG_SYNC.
++
++ 0: Disabled
++ 1: Build framework for native fence sync feature, and EGL extension
++ 2: Enable async swap buffers for client
++ * Native fence sync for client 'queueBuffer' in EGL, which is
++ 'acquireFenceFd' for layer in compositor side.
++ 3. Enable async hwcomposer composition.
++ * 'releaseFenceFd' for layer in compositor side, which is native
++ fence sync when client 'dequeueBuffer'
++ * Native fence sync for compositor 'queueBuffer' in EGL, which is
++ 'acquireFenceFd' for framebuffer target for DC
++ */
++#ifndef gcdANDROID_NATIVE_FENCE_SYNC
++# define gcdANDROID_NATIVE_FENCE_SYNC 0
++#endif
++
++/*
++ gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC
++
++ Enable implicit android native buffer sync.
++
++ For non-HW_RENDER buffer, CPU (or other hardware) and GPU can access
++ the buffer at the same time. This is to add implicit synchronization
++ between CPU (or the hardware) and GPU.
++
++ Eventually, please do not use implicit native buffer sync, but use
++ "fence sync" or "android native fence sync" instead in libgui, which
++ can be enabled in frameworks/native/libs/gui/Android.mk. This kind
++ of synchronization should be done by app but not driver itself.
++
++ Please disable this option when either "fence sync" or
++ "android native fence sync" is enabled.
++ */
++#ifndef gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC
++# define gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC 1
++#endif
++
++/*
++ * Implicit native buffer sync is not needed when ANDROID_native_fence_sync
++ * is available.
++ */
++#if gcdANDROID_NATIVE_FENCE_SYNC
++# undef gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC
++# define gcdANDROID_IMPLICIT_NATIVE_BUFFER_SYNC 0
++#endif
++
++/*
++ gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST
++
++ Enable source surface address adjust when composition on android.
++ Android only.
++*/
++#ifndef gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST
++# define gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST 1
++#endif
++
++/*
++ gcdUSE_WCLIP_PATCH
++
++ Enable wclipping patch.
++*/
++#ifndef gcdUSE_WCLIP_PATCH
++# define gcdUSE_WCLIP_PATCH 1
++#endif
++
++#ifndef gcdUSE_NPOT_PATCH
++# define gcdUSE_NPOT_PATCH 1
++#endif
++
++/*
++ gcd3DBLIT
++
++ TODO: Should be replaced by feature bit if available.
++*/
++#ifndef gcd3DBLIT
++# define gcd3DBLIT 0
++#endif
++
++/*
++ gcdINTERNAL_COMMENT
++
++ Wrap internal comment, content wrapped by it and the macor itself
++ will be removed in release driver.
++*/
++#ifndef gcdINTERNAL_COMMENT
++# define gcdINTERNAL_COMMENT 1
++#endif
++
++/*
++ gcdRTT_DISABLE_FC
++
++ Disable RTT FC support. For test only.
++*/
++#ifndef gcdRTT_DISABLE_FC
++# define gcdRTT_DISABLE_FC 0
++#endif
++
++/*
++ gcdFORCE_MIPMAP
++
++ Force generate mipmap for texture.
++*/
++#ifndef gcdFORCE_MIPMAP
++# define gcdFORCE_MIPMAP 0
++#endif
++
++/*
++ gcdFORCE_BILINEAR
++
++ Force bilinear for mipfilter.
++*/
++#ifndef gcdFORCE_BILINEAR
++# define gcdFORCE_BILINEAR 1
++#endif
++
++/*
++ gcdBINARY_TRACE
++
++ When non-zero, binary trace will be generated.
++
++ When gcdBINARY_TRACE_FILE_SIZE is non-zero, binary trace buffer will
++ be written to a file which size is limited to
++ gcdBINARY_TRACE_FILE_SIZE.
++*/
++#ifndef gcdBINARY_TRACE
++# define gcdBINARY_TRACE 0
++# define gcdBINARY_TRACE_FILE_SIZE 0
++#endif
++
++#ifndef gcdMOVG
++# define gcdMOVG 0
++#if gcdMOVG
++# define GC355_PROFILER 1
++# endif
++# define gcdENABLE_TS_DOUBLE_BUFFER 1
++#else
++#if gcdMOVG
++# define GC355_PROFILER 1
++# define gcdENABLE_TS_DOUBLE_BUFFER 0
++#else
++# define gcdENABLE_TS_DOUBLE_BUFFER 1
++#endif
++#endif
++
++/* gcdINTERRUPT_STATISTIC
++ *
++ * Monitor the event send to GPU and interrupt issued by GPU.
++ */
++
++#ifndef gcdINTERRUPT_STATISTIC
++#if defined(LINUX)
++# define gcdINTERRUPT_STATISTIC 1
++#else
++# define gcdINTERRUPT_STATISTIC 0
++#endif
++#endif
++
++/*
++ gcdYINVERTED_RENDERING
++ When it's not zero, we will rendering display buffer
++ with top-bottom direction. All other offscreen rendering
++ will be bottom-top, which follow OpenGL ES spec.
++*/
++#ifndef gcdYINVERTED_RENDERING
++# define gcdYINVERTED_RENDERING 1
++#endif
++
++#if gcdYINVERTED_RENDERING
++/* disable unaligned linear composition adjust in Y-inverted rendering mode. */
++# undef gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST
++# define gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST 0
++#endif
++
++/*
++ gcdFENCE_WAIT_LOOP_COUNT
++ Wait fence, loop count.
++*/
++#ifndef gcdFENCE_WAIT_LOOP_COUNT
++# define gcdFENCE_WAIT_LOOP_COUNT 100
++#endif
++
++/*
++ gcdHAL_3D_DRAWBLIT
++ When it's not zero, we will enable HAL 3D drawblit
++ to replace client 3dblit.
++*/
++#ifndef gcdHAL_3D_DRAWBLIT
++# define gcdHAL_3D_DRAWBLIT 1
++#endif
++
++/*
++ gcdPARTIAL_FAST_CLEAR
++ When it's not zero, partial fast clear is enabled.
++ Depends on gcdHAL_3D_DRAWBLIT, if gcdHAL_3D_DRAWBLIT is not enabled,
++ only available when scissor box is completely aligned.
++ Expremental, under test.
++*/
++#ifndef gcdPARTIAL_FAST_CLEAR
++# define gcdPARTIAL_FAST_CLEAR 1
++#endif
++
++/*
++ gcdREMOVE_SURF_ORIENTATION
++ When it's not zero, we will remove surface orientation function.
++ It wil become to a parameter of resolve function.
++*/
++#ifndef gcdREMOVE_SURF_ORIENTATION
++# define gcdREMOVE_SURF_ORIENTATION 0
++#endif
++
++/*
++ gcdPATTERN_FAST_PATH
++ For pattern match
++*/
++#ifndef gcdPATTERN_FAST_PATH
++# define gcdPATTERN_FAST_PATH 1
++#endif
++
++/*
++ gcdUSE_INPUT_DEVICE
++ disable input devices usage under fb mode to support fb+vdk multi-process
++*/
++#ifndef gcdUSE_INPUT_DEVICE
++# define gcdUSE_INPUT_DEVICE 1
++#endif
++
++
++/*
++ gcdFRAMEINFO_STATISTIC
++ When enable, collect frame information.
++*/
++#ifndef gcdFRAMEINFO_STATISTIC
++
++#if (defined(DBG) && DBG) || defined(DEBUG) || defined(_DEBUG) || gcdDUMP
++# define gcdFRAMEINFO_STATISTIC 1
++#else
++# define gcdFRAMEINFO_STATISTIC 0
++#endif
++
++#endif
++
++/*
++ gcdPACKED_OUTPUT_ADDRESS
++ When it's not zero, ps output is already packed after linked
++*/
++#ifndef gcdPACKED_OUTPUT_ADDRESS
++# define gcdPACKED_OUTPUT_ADDRESS 1
++#endif
++
++/*
++ gcdENABLE_THIRD_PARTY_OPERATION
++ Enable third party operation like tpc or not.
++*/
++#ifndef gcdENABLE_THIRD_PARTY_OPERATION
++# define gcdENABLE_THIRD_PARTY_OPERATION 1
++#endif
++
++
++/*
++ Core configurations. By default enable all cores.
++*/
++#ifndef gcdENABLE_3D
++# define gcdENABLE_3D 1
++#endif
++
++#ifndef gcdENABLE_2D
++# define gcdENABLE_2D 1
++#endif
++
++#ifndef gcdENABLE_VG
++# define gcdENABLE_VG 0
++#endif
++
++#ifndef gcdGC355_MEM_PRINT
++# define gcdGC355_MEM_PRINT 0
++#else
++#if (!((gcdENABLE_3D == 0) && (gcdENABLE_2D == 0) && (gcdENABLE_VG == 1)))
++# undef gcdGC355_MEM_PRINT
++# define gcdGC355_MEM_PRINT 0
++# endif
++#endif
++
++#ifndef gcdENABLE_UNIFIED_CONSTANT
++# define gcdENABLE_UNIFIED_CONSTANT 1
++#endif
++
++/*
++ gcdRECORD_COMMAND
++*/
++#ifndef gcdRECORD_COMMAND
++# define gcdRECORD_COMMAND 0
++#endif
++
++#endif /* __gc_hal_options_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_profiler.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_profiler.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_profiler.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_profiler.h 2015-05-01 14:57:59.591427001 -0500
+@@ -0,0 +1,585 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_profiler_h_
++#define __gc_hal_profiler_h_
++
++#if VIVANTE_PROFILER_NEW
++#include "gc_hal_engine.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#define GLVERTEX_OBJECT 10
++#define GLVERTEX_OBJECT_BYTES 11
++
++#define GLINDEX_OBJECT 20
++#define GLINDEX_OBJECT_BYTES 21
++
++#define GLTEXTURE_OBJECT 30
++#define GLTEXTURE_OBJECT_BYTES 31
++
++#define GLBUFOBJ_OBJECT 40
++#define GLBUFOBJ_OBJECT_BYTES 41
++
++#if VIVANTE_PROFILER
++#define gcmPROFILE_GC(Enum, Value) gcoPROFILER_Count(gcvNULL, Enum, Value)
++#else
++#define gcmPROFILE_GC(Enum, Value) do { } while (gcvFALSE)
++#endif
++
++#ifndef gcdNEW_PROFILER_FILE
++#define gcdNEW_PROFILER_FILE 1
++#endif
++
++#define ES11_CALLS 151
++#define ES11_DRAWCALLS (ES11_CALLS + 1)
++#define ES11_STATECHANGECALLS (ES11_DRAWCALLS + 1)
++#define ES11_POINTCOUNT (ES11_STATECHANGECALLS + 1)
++#define ES11_LINECOUNT (ES11_POINTCOUNT + 1)
++#define ES11_TRIANGLECOUNT (ES11_LINECOUNT + 1)
++
++#define ES30_CALLS 159
++#define ES30_DRAWCALLS (ES30_CALLS + 1)
++#define ES30_STATECHANGECALLS (ES30_DRAWCALLS + 1)
++#define ES30_POINTCOUNT (ES30_STATECHANGECALLS + 1)
++#define ES30_LINECOUNT (ES30_POINTCOUNT + 1)
++#define ES30_TRIANGLECOUNT (ES30_LINECOUNT + 1)
++
++#define VG11_CALLS 88
++#define VG11_DRAWCALLS (VG11_CALLS + 1)
++#define VG11_STATECHANGECALLS (VG11_DRAWCALLS + 1)
++#define VG11_FILLCOUNT (VG11_STATECHANGECALLS + 1)
++#define VG11_STROKECOUNT (VG11_FILLCOUNT + 1)
++/* End of Driver API ID Definitions. */
++
++/* HAL & MISC IDs. */
++#define HAL_VERTBUFNEWBYTEALLOC 1
++#define HAL_VERTBUFTOTALBYTEALLOC (HAL_VERTBUFNEWBYTEALLOC + 1)
++#define HAL_VERTBUFNEWOBJALLOC (HAL_VERTBUFTOTALBYTEALLOC + 1)
++#define HAL_VERTBUFTOTALOBJALLOC (HAL_VERTBUFNEWOBJALLOC + 1)
++#define HAL_INDBUFNEWBYTEALLOC (HAL_VERTBUFTOTALOBJALLOC + 1)
++#define HAL_INDBUFTOTALBYTEALLOC (HAL_INDBUFNEWBYTEALLOC + 1)
++#define HAL_INDBUFNEWOBJALLOC (HAL_INDBUFTOTALBYTEALLOC + 1)
++#define HAL_INDBUFTOTALOBJALLOC (HAL_INDBUFNEWOBJALLOC + 1)
++#define HAL_TEXBUFNEWBYTEALLOC (HAL_INDBUFTOTALOBJALLOC + 1)
++#define HAL_TEXBUFTOTALBYTEALLOC (HAL_TEXBUFNEWBYTEALLOC + 1)
++#define HAL_TEXBUFNEWOBJALLOC (HAL_TEXBUFTOTALBYTEALLOC + 1)
++#define HAL_TEXBUFTOTALOBJALLOC (HAL_TEXBUFNEWOBJALLOC + 1)
++
++#define GPU_CYCLES 1
++#define GPU_READ64BYTE (GPU_CYCLES + 1)
++#define GPU_WRITE64BYTE (GPU_READ64BYTE + 1)
++#define GPU_TOTALCYCLES (GPU_WRITE64BYTE + 1)
++#define GPU_IDLECYCLES (GPU_TOTALCYCLES + 1)
++
++#define VS_INSTCOUNT 1
++#define VS_BRANCHINSTCOUNT (VS_INSTCOUNT + 1)
++#define VS_TEXLDINSTCOUNT (VS_BRANCHINSTCOUNT + 1)
++#define VS_RENDEREDVERTCOUNT (VS_TEXLDINSTCOUNT + 1)
++#define VS_SOURCE (VS_RENDEREDVERTCOUNT + 1)
++
++#define PS_INSTCOUNT 1
++#define PS_BRANCHINSTCOUNT (PS_INSTCOUNT + 1)
++#define PS_TEXLDINSTCOUNT (PS_BRANCHINSTCOUNT + 1)
++#define PS_RENDEREDPIXCOUNT (PS_TEXLDINSTCOUNT + 1)
++#define PS_SOURCE (PS_RENDEREDPIXCOUNT + 1)
++
++#define PA_INVERTCOUNT 1
++#define PA_INPRIMCOUNT (PA_INVERTCOUNT + 1)
++#define PA_OUTPRIMCOUNT (PA_INPRIMCOUNT + 1)
++#define PA_DEPTHCLIPCOUNT (PA_OUTPRIMCOUNT + 1)
++#define PA_TRIVIALREJCOUNT (PA_DEPTHCLIPCOUNT + 1)
++#define PA_CULLCOUNT (PA_TRIVIALREJCOUNT + 1)
++
++#define SE_TRIANGLECOUNT 1
++#define SE_LINECOUNT (SE_TRIANGLECOUNT + 1)
++
++#define RA_VALIDPIXCOUNT 1
++#define RA_TOTALQUADCOUNT (RA_VALIDPIXCOUNT + 1)
++#define RA_VALIDQUADCOUNTEZ (RA_TOTALQUADCOUNT + 1)
++#define RA_TOTALPRIMCOUNT (RA_VALIDQUADCOUNTEZ + 1)
++#define RA_PIPECACHEMISSCOUNT (RA_TOTALPRIMCOUNT + 1)
++#define RA_PREFCACHEMISSCOUNT (RA_PIPECACHEMISSCOUNT + 1)
++#define RA_EEZCULLCOUNT (RA_PREFCACHEMISSCOUNT + 1)
++
++#define TX_TOTBILINEARREQ 1
++#define TX_TOTTRILINEARREQ (TX_TOTBILINEARREQ + 1)
++#define TX_TOTDISCARDTEXREQ (TX_TOTTRILINEARREQ + 1)
++#define TX_TOTTEXREQ (TX_TOTDISCARDTEXREQ + 1)
++#define TX_MEMREADCOUNT (TX_TOTTEXREQ + 1)
++#define TX_MEMREADIN8BCOUNT (TX_MEMREADCOUNT + 1)
++#define TX_CACHEMISSCOUNT (TX_MEMREADIN8BCOUNT + 1)
++#define TX_CACHEHITTEXELCOUNT (TX_CACHEMISSCOUNT + 1)
++#define TX_CACHEMISSTEXELCOUNT (TX_CACHEHITTEXELCOUNT + 1)
++
++#define PE_KILLEDBYCOLOR 1
++#define PE_KILLEDBYDEPTH (PE_KILLEDBYCOLOR + 1)
++#define PE_DRAWNBYCOLOR (PE_KILLEDBYDEPTH + 1)
++#define PE_DRAWNBYDEPTH (PE_DRAWNBYCOLOR + 1)
++
++#define MC_READREQ8BPIPE 1
++#define MC_READREQ8BIP (MC_READREQ8BPIPE + 1)
++#define MC_WRITEREQ8BPIPE (MC_READREQ8BIP + 1)
++
++#define AXI_READREQSTALLED 1
++#define AXI_WRITEREQSTALLED (AXI_READREQSTALLED + 1)
++#define AXI_WRITEDATASTALLED (AXI_WRITEREQSTALLED + 1)
++
++#define PVS_INSTRCOUNT 1
++#define PVS_ALUINSTRCOUNT (PVS_INSTRCOUNT + 1)
++#define PVS_TEXINSTRCOUNT (PVS_ALUINSTRCOUNT + 1)
++#define PVS_ATTRIBCOUNT (PVS_TEXINSTRCOUNT + 1)
++#define PVS_UNIFORMCOUNT (PVS_ATTRIBCOUNT + 1)
++#define PVS_FUNCTIONCOUNT (PVS_UNIFORMCOUNT + 1)
++#define PVS_SOURCE (PVS_FUNCTIONCOUNT + 1)
++
++#define PPS_INSTRCOUNT 1
++#define PPS_ALUINSTRCOUNT (PPS_INSTRCOUNT + 1)
++#define PPS_TEXINSTRCOUNT (PPS_ALUINSTRCOUNT + 1)
++#define PPS_ATTRIBCOUNT (PPS_TEXINSTRCOUNT + 1)
++#define PPS_UNIFORMCOUNT (PPS_ATTRIBCOUNT + 1)
++#define PPS_FUNCTIONCOUNT (PPS_UNIFORMCOUNT + 1)
++#define PPS_SOURCE (PPS_FUNCTIONCOUNT + 1)
++/* End of MISC Counter IDs. */
++
++#ifdef gcdNEW_PROFILER_FILE
++
++/* Category Constants. */
++#define VPHEADER 0x010000
++#define VPG_INFO 0x020000
++#define VPG_TIME 0x030000
++#define VPG_MEM 0x040000
++#define VPG_ES11 0x050000
++#define VPG_ES30 0x060000
++#define VPG_VG11 0x070000
++#define VPG_HAL 0x080000
++#define VPG_HW 0x090000
++#define VPG_GPU 0x0a0000
++#define VPG_VS 0x0b0000
++#define VPG_PS 0x0c0000
++#define VPG_PA 0x0d0000
++#define VPG_SETUP 0x0e0000
++#define VPG_RA 0x0f0000
++#define VPG_TX 0x100000
++#define VPG_PE 0x110000
++#define VPG_MC 0x120000
++#define VPG_AXI 0x130000
++#define VPG_PROG 0x140000
++#define VPG_PVS 0x150000
++#define VPG_PPS 0x160000
++#define VPG_ES11_TIME 0x170000
++#define VPG_ES30_TIME 0x180000
++#define VPG_FRAME 0x190000
++#define VPG_ES11_DRAW 0x200000
++#define VPG_ES30_DRAW 0x210000
++#define VPG_VG11_TIME 0x220000
++#define VPG_END 0xff0000
++
++/* Info. */
++#define VPC_INFOCOMPANY (VPG_INFO + 1)
++#define VPC_INFOVERSION (VPC_INFOCOMPANY + 1)
++#define VPC_INFORENDERER (VPC_INFOVERSION + 1)
++#define VPC_INFOREVISION (VPC_INFORENDERER + 1)
++#define VPC_INFODRIVER (VPC_INFOREVISION + 1)
++#define VPC_INFODRIVERMODE (VPC_INFODRIVER + 1)
++#define VPC_INFOSCREENSIZE (VPC_INFODRIVERMODE + 1)
++
++/* Counter Constants. */
++#define VPC_ELAPSETIME (VPG_TIME + 1)
++#define VPC_CPUTIME (VPC_ELAPSETIME + 1)
++
++#define VPC_MEMMAXRES (VPG_MEM + 1)
++#define VPC_MEMSHARED (VPC_MEMMAXRES + 1)
++#define VPC_MEMUNSHAREDDATA (VPC_MEMSHARED + 1)
++#define VPC_MEMUNSHAREDSTACK (VPC_MEMUNSHAREDDATA + 1)
++
++/* OpenGL ES11 Statics Counter IDs. */
++#define VPC_ES11CALLS (VPG_ES11 + ES11_CALLS)
++#define VPC_ES11DRAWCALLS (VPG_ES11 + ES11_DRAWCALLS)
++#define VPC_ES11STATECHANGECALLS (VPG_ES11 + ES11_STATECHANGECALLS)
++#define VPC_ES11POINTCOUNT (VPG_ES11 + ES11_POINTCOUNT)
++#define VPC_ES11LINECOUNT (VPG_ES11 + ES11_LINECOUNT)
++#define VPC_ES11TRIANGLECOUNT (VPG_ES11 + ES11_TRIANGLECOUNT)
++
++/* OpenGL ES30 Statistics Counter IDs. */
++#define VPC_ES30CALLS (VPG_ES30 + ES30_CALLS)
++#define VPC_ES30DRAWCALLS (VPG_ES30 + ES30_DRAWCALLS)
++#define VPC_ES30STATECHANGECALLS (VPG_ES30 + ES30_STATECHANGECALLS)
++#define VPC_ES30POINTCOUNT (VPG_ES30 + ES30_POINTCOUNT)
++#define VPC_ES30LINECOUNT (VPG_ES30 + ES30_LINECOUNT)
++#define VPC_ES30TRIANGLECOUNT (VPG_ES30 + ES30_TRIANGLECOUNT)
++
++/* OpenVG Statistics Counter IDs. */
++#define VPC_VG11CALLS (VPG_VG11 + VG11_CALLS)
++#define VPC_VG11DRAWCALLS (VPG_VG11 + VG11_DRAWCALLS)
++#define VPC_VG11STATECHANGECALLS (VPG_VG11 + VG11_STATECHANGECALLS)
++#define VPC_VG11FILLCOUNT (VPG_VG11 + VG11_FILLCOUNT)
++#define VPC_VG11STROKECOUNT (VPG_VG11 + VG11_STROKECOUNT)
++
++/* HAL Counters. */
++#define VPC_HALVERTBUFNEWBYTEALLOC (VPG_HAL + HAL_VERTBUFNEWBYTEALLOC)
++#define VPC_HALVERTBUFTOTALBYTEALLOC (VPG_HAL + HAL_VERTBUFTOTALBYTEALLOC)
++#define VPC_HALVERTBUFNEWOBJALLOC (VPG_HAL + HAL_VERTBUFNEWOBJALLOC)
++#define VPC_HALVERTBUFTOTALOBJALLOC (VPG_HAL + HAL_VERTBUFTOTALOBJALLOC)
++#define VPC_HALINDBUFNEWBYTEALLOC (VPG_HAL + HAL_INDBUFNEWBYTEALLOC)
++#define VPC_HALINDBUFTOTALBYTEALLOC (VPG_HAL + HAL_INDBUFTOTALBYTEALLOC)
++#define VPC_HALINDBUFNEWOBJALLOC (VPG_HAL + HAL_INDBUFNEWOBJALLOC)
++#define VPC_HALINDBUFTOTALOBJALLOC (VPG_HAL + HAL_INDBUFTOTALOBJALLOC)
++#define VPC_HALTEXBUFNEWBYTEALLOC (VPG_HAL + HAL_TEXBUFNEWBYTEALLOC)
++#define VPC_HALTEXBUFTOTALBYTEALLOC (VPG_HAL + HAL_TEXBUFTOTALBYTEALLOC)
++#define VPC_HALTEXBUFNEWOBJALLOC (VPG_HAL + HAL_TEXBUFNEWOBJALLOC)
++#define VPC_HALTEXBUFTOTALOBJALLOC (VPG_HAL + HAL_TEXBUFTOTALOBJALLOC)
++
++/* HW: GPU Counters. */
++#define VPC_GPUCYCLES (VPG_GPU + GPU_CYCLES)
++#define VPC_GPUREAD64BYTE (VPG_GPU + GPU_READ64BYTE)
++#define VPC_GPUWRITE64BYTE (VPG_GPU + GPU_WRITE64BYTE)
++#define VPC_GPUTOTALCYCLES (VPG_GPU + GPU_TOTALCYCLES)
++#define VPC_GPUIDLECYCLES (VPG_GPU + GPU_IDLECYCLES)
++
++/* HW: Shader Counters. */
++#define VPC_VSINSTCOUNT (VPG_VS + VS_INSTCOUNT)
++#define VPC_VSBRANCHINSTCOUNT (VPG_VS + VS_BRANCHINSTCOUNT)
++#define VPC_VSTEXLDINSTCOUNT (VPG_VS + VS_TEXLDINSTCOUNT)
++#define VPC_VSRENDEREDVERTCOUNT (VPG_VS + VS_RENDEREDVERTCOUNT)
++/* HW: PS Count. */
++#define VPC_PSINSTCOUNT (VPG_PS + PS_INSTCOUNT)
++#define VPC_PSBRANCHINSTCOUNT (VPG_PS + PS_BRANCHINSTCOUNT)
++#define VPC_PSTEXLDINSTCOUNT (VPG_PS + PS_TEXLDINSTCOUNT)
++#define VPC_PSRENDEREDPIXCOUNT (VPG_PS + PS_RENDEREDPIXCOUNT)
++
++
++/* HW: PA Counters. */
++#define VPC_PAINVERTCOUNT (VPG_PA + PA_INVERTCOUNT)
++#define VPC_PAINPRIMCOUNT (VPG_PA + PA_INPRIMCOUNT)
++#define VPC_PAOUTPRIMCOUNT (VPG_PA + PA_OUTPRIMCOUNT)
++#define VPC_PADEPTHCLIPCOUNT (VPG_PA + PA_DEPTHCLIPCOUNT)
++#define VPC_PATRIVIALREJCOUNT (VPG_PA + PA_TRIVIALREJCOUNT)
++#define VPC_PACULLCOUNT (VPG_PA + PA_CULLCOUNT)
++
++/* HW: Setup Counters. */
++#define VPC_SETRIANGLECOUNT (VPG_SETUP + SE_TRIANGLECOUNT)
++#define VPC_SELINECOUNT (VPG_SETUP + SE_LINECOUNT)
++
++/* HW: RA Counters. */
++#define VPC_RAVALIDPIXCOUNT (VPG_RA + RA_VALIDPIXCOUNT)
++#define VPC_RATOTALQUADCOUNT (VPG_RA + RA_TOTALQUADCOUNT)
++#define VPC_RAVALIDQUADCOUNTEZ (VPG_RA + RA_VALIDQUADCOUNTEZ)
++#define VPC_RATOTALPRIMCOUNT (VPG_RA + RA_TOTALPRIMCOUNT)
++#define VPC_RAPIPECACHEMISSCOUNT (VPG_RA + RA_PIPECACHEMISSCOUNT)
++#define VPC_RAPREFCACHEMISSCOUNT (VPG_RA + RA_PREFCACHEMISSCOUNT)
++#define VPC_RAEEZCULLCOUNT (VPG_RA + RA_EEZCULLCOUNT)
++
++/* HW: TEX Counters. */
++#define VPC_TXTOTBILINEARREQ (VPG_TX + TX_TOTBILINEARREQ)
++#define VPC_TXTOTTRILINEARREQ (VPG_TX + TX_TOTTRILINEARREQ)
++#define VPC_TXTOTDISCARDTEXREQ (VPG_TX + TX_TOTDISCARDTEXREQ)
++#define VPC_TXTOTTEXREQ (VPG_TX + TX_TOTTEXREQ)
++#define VPC_TXMEMREADCOUNT (VPG_TX + TX_MEMREADCOUNT)
++#define VPC_TXMEMREADIN8BCOUNT (VPG_TX + TX_MEMREADIN8BCOUNT)
++#define VPC_TXCACHEMISSCOUNT (VPG_TX + TX_CACHEMISSCOUNT)
++#define VPC_TXCACHEHITTEXELCOUNT (VPG_TX + TX_CACHEHITTEXELCOUNT)
++#define VPC_TXCACHEMISSTEXELCOUNT (VPG_TX + TX_CACHEMISSTEXELCOUNT)
++
++/* HW: PE Counters. */
++#define VPC_PEKILLEDBYCOLOR (VPG_PE + PE_KILLEDBYCOLOR)
++#define VPC_PEKILLEDBYDEPTH (VPG_PE + PE_KILLEDBYDEPTH)
++#define VPC_PEDRAWNBYCOLOR (VPG_PE + PE_DRAWNBYCOLOR)
++#define VPC_PEDRAWNBYDEPTH (VPG_PE + PE_DRAWNBYDEPTH)
++
++/* HW: MC Counters. */
++#define VPC_MCREADREQ8BPIPE (VPG_MC + MC_READREQ8BPIPE)
++#define VPC_MCREADREQ8BIP (VPG_MC + MC_READREQ8BIP)
++#define VPC_MCWRITEREQ8BPIPE (VPG_MC + MC_WRITEREQ8BPIPE)
++
++/* HW: AXI Counters. */
++#define VPC_AXIREADREQSTALLED (VPG_AXI + AXI_READREQSTALLED)
++#define VPC_AXIWRITEREQSTALLED (VPG_AXI + AXI_WRITEREQSTALLED)
++#define VPC_AXIWRITEDATASTALLED (VPG_AXI + AXI_WRITEDATASTALLED)
++
++/* PROGRAM: Shader program counters. */
++#define VPC_PVSINSTRCOUNT (VPG_PVS + PVS_INSTRCOUNT)
++#define VPC_PVSALUINSTRCOUNT (VPG_PVS + PVS_ALUINSTRCOUNT)
++#define VPC_PVSTEXINSTRCOUNT (VPG_PVS + PVS_TEXINSTRCOUNT)
++#define VPC_PVSATTRIBCOUNT (VPG_PVS + PVS_ATTRIBCOUNT)
++#define VPC_PVSUNIFORMCOUNT (VPG_PVS + PVS_UNIFORMCOUNT)
++#define VPC_PVSFUNCTIONCOUNT (VPG_PVS + PVS_FUNCTIONCOUNT)
++#define VPC_PVSSOURCE (VPG_PVS + PVS_SOURCE)
++
++#define VPC_PPSINSTRCOUNT (VPG_PPS + PPS_INSTRCOUNT)
++#define VPC_PPSALUINSTRCOUNT (VPG_PPS + PPS_ALUINSTRCOUNT)
++#define VPC_PPSTEXINSTRCOUNT (VPG_PPS + PPS_TEXINSTRCOUNT)
++#define VPC_PPSATTRIBCOUNT (VPG_PPS + PPS_ATTRIBCOUNT)
++#define VPC_PPSUNIFORMCOUNT (VPG_PPS + PPS_UNIFORMCOUNT)
++#define VPC_PPSFUNCTIONCOUNT (VPG_PPS + PPS_FUNCTIONCOUNT)
++#define VPC_PPSSOURCE (VPG_PPS + PPS_SOURCE)
++
++#define VPC_PROGRAMHANDLE (VPG_PROG + 1)
++
++#define VPC_ES30_DRAW_NO (VPG_ES30_DRAW + 1)
++#define VPC_ES11_DRAW_NO (VPG_ES11_DRAW + 1)
++#endif
++
++
++/* HW profile information. */
++typedef struct _gcsPROFILER_COUNTERS
++{
++ /* HW static counters. */
++ gctUINT32 gpuClock;
++ gctUINT32 axiClock;
++ gctUINT32 shaderClock;
++
++ /* HW vairable counters. */
++ gctUINT32 gpuClockStart;
++ gctUINT32 gpuClockEnd;
++
++ /* HW vairable counters. */
++ gctUINT32 gpuCyclesCounter;
++ gctUINT32 gpuTotalCyclesCounter;
++ gctUINT32 gpuIdleCyclesCounter;
++ gctUINT32 gpuTotalRead64BytesPerFrame;
++ gctUINT32 gpuTotalWrite64BytesPerFrame;
++
++ /* PE */
++ gctUINT32 pe_pixel_count_killed_by_color_pipe;
++ gctUINT32 pe_pixel_count_killed_by_depth_pipe;
++ gctUINT32 pe_pixel_count_drawn_by_color_pipe;
++ gctUINT32 pe_pixel_count_drawn_by_depth_pipe;
++
++ /* SH */
++ gctUINT32 ps_inst_counter;
++ gctUINT32 rendered_pixel_counter;
++ gctUINT32 vs_inst_counter;
++ gctUINT32 rendered_vertice_counter;
++ gctUINT32 vtx_branch_inst_counter;
++ gctUINT32 vtx_texld_inst_counter;
++ gctUINT32 pxl_branch_inst_counter;
++ gctUINT32 pxl_texld_inst_counter;
++
++ /* PA */
++ gctUINT32 pa_input_vtx_counter;
++ gctUINT32 pa_input_prim_counter;
++ gctUINT32 pa_output_prim_counter;
++ gctUINT32 pa_depth_clipped_counter;
++ gctUINT32 pa_trivial_rejected_counter;
++ gctUINT32 pa_culled_counter;
++
++ /* SE */
++ gctUINT32 se_culled_triangle_count;
++ gctUINT32 se_culled_lines_count;
++
++ /* RA */
++ gctUINT32 ra_valid_pixel_count;
++ gctUINT32 ra_total_quad_count;
++ gctUINT32 ra_valid_quad_count_after_early_z;
++ gctUINT32 ra_total_primitive_count;
++ gctUINT32 ra_pipe_cache_miss_counter;
++ gctUINT32 ra_prefetch_cache_miss_counter;
++ gctUINT32 ra_eez_culled_counter;
++
++ /* TX */
++ gctUINT32 tx_total_bilinear_requests;
++ gctUINT32 tx_total_trilinear_requests;
++ gctUINT32 tx_total_discarded_texture_requests;
++ gctUINT32 tx_total_texture_requests;
++ gctUINT32 tx_mem_read_count;
++ gctUINT32 tx_mem_read_in_8B_count;
++ gctUINT32 tx_cache_miss_count;
++ gctUINT32 tx_cache_hit_texel_count;
++ gctUINT32 tx_cache_miss_texel_count;
++
++ /* MC */
++ gctUINT32 mc_total_read_req_8B_from_pipeline;
++ gctUINT32 mc_total_read_req_8B_from_IP;
++ gctUINT32 mc_total_write_req_8B_from_pipeline;
++
++ /* HI */
++ gctUINT32 hi_axi_cycles_read_request_stalled;
++ gctUINT32 hi_axi_cycles_write_request_stalled;
++ gctUINT32 hi_axi_cycles_write_data_stalled;
++}
++gcsPROFILER_COUNTERS;
++
++#if VIVANTE_PROFILER_NEW
++#define NumOfDrawBuf 64
++#endif
++
++/* HAL profile information. */
++typedef struct _gcsPROFILER
++{
++ gctUINT32 enable;
++ gctBOOL enableHal;
++ gctBOOL enableHW;
++ gctBOOL enableSH;
++ gctBOOL isSyncMode;
++ gctBOOL disableOutputCounter;
++
++ gctBOOL useSocket;
++ gctINT sockFd;
++
++ gctFILE file;
++
++ /* Aggregate Information */
++
++ /* Clock Info */
++ gctUINT64 frameStart;
++ gctUINT64 frameEnd;
++
++ /* Current frame information */
++ gctUINT32 frameNumber;
++ gctUINT64 frameStartTimeusec;
++ gctUINT64 frameEndTimeusec;
++ gctUINT64 frameStartCPUTimeusec;
++ gctUINT64 frameEndCPUTimeusec;
++
++#if PROFILE_HAL_COUNTERS
++ gctUINT32 vertexBufferTotalBytesAlloc;
++ gctUINT32 vertexBufferNewBytesAlloc;
++ int vertexBufferTotalObjectsAlloc;
++ int vertexBufferNewObjectsAlloc;
++
++ gctUINT32 indexBufferTotalBytesAlloc;
++ gctUINT32 indexBufferNewBytesAlloc;
++ int indexBufferTotalObjectsAlloc;
++ int indexBufferNewObjectsAlloc;
++
++ gctUINT32 textureBufferTotalBytesAlloc;
++ gctUINT32 textureBufferNewBytesAlloc;
++ int textureBufferTotalObjectsAlloc;
++ int textureBufferNewObjectsAlloc;
++
++ gctUINT32 numCommits;
++ gctUINT32 drawPointCount;
++ gctUINT32 drawLineCount;
++ gctUINT32 drawTriangleCount;
++ gctUINT32 drawVertexCount;
++ gctUINT32 redundantStateChangeCalls;
++#endif
++
++ gctUINT32 prevVSInstCount;
++ gctUINT32 prevVSBranchInstCount;
++ gctUINT32 prevVSTexInstCount;
++ gctUINT32 prevVSVertexCount;
++ gctUINT32 prevPSInstCount;
++ gctUINT32 prevPSBranchInstCount;
++ gctUINT32 prevPSTexInstCount;
++ gctUINT32 prevPSPixelCount;
++
++#if VIVANTE_PROFILER_NEW
++ gcoBUFOBJ newCounterBuf[NumOfDrawBuf];
++ gctUINT32 curBufId;
++#endif
++
++}
++gcsPROFILER;
++
++/* Memory profile information. */
++struct _gcsMemProfile
++{
++ /* Memory Usage */
++ gctUINT32 videoMemUsed;
++ gctUINT32 systemMemUsed;
++ gctUINT32 commitBufferSize;
++ gctUINT32 contextBufferCopyBytes;
++};
++
++/* Shader profile information. */
++struct _gcsSHADER_PROFILER
++{
++ gctUINT32 shaderLength;
++ gctUINT32 shaderALUCycles;
++ gctUINT32 shaderTexLoadCycles;
++ gctUINT32 shaderTempRegCount;
++ gctUINT32 shaderSamplerRegCount;
++ gctUINT32 shaderInputRegCount;
++ gctUINT32 shaderOutputRegCount;
++};
++
++/* Initialize the gcsProfiler. */
++gceSTATUS
++gcoPROFILER_Initialize(
++ IN gcoHAL Hal,
++ IN gctBOOL Enable
++ );
++
++/* Destroy the gcProfiler. */
++gceSTATUS
++gcoPROFILER_Destroy(
++ IN gcoHAL Hal
++ );
++
++/* Write data to profiler. */
++gceSTATUS
++gcoPROFILER_Write(
++ IN gcoHAL Hal,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Flush data out. */
++gceSTATUS
++gcoPROFILER_Flush(
++ IN gcoHAL Hal
++ );
++
++/* Call to signal end of frame. */
++gceSTATUS
++gcoPROFILER_EndFrame(
++ IN gcoHAL Hal
++ );
++
++/* Call to signal end of draw. */
++gceSTATUS
++gcoPROFILER_EndDraw(
++ IN gcoHAL Hal,
++ IN gctBOOL FirstDraw
++ );
++
++/* Increase profile counter Enum by Value. */
++gceSTATUS
++gcoPROFILER_Count(
++ IN gcoHAL Hal,
++ IN gctUINT32 Enum,
++ IN gctINT Value
++ );
++
++/* Profile input vertex shader. */
++gceSTATUS
++gcoPROFILER_ShaderVS(
++ IN gcoHAL Hal,
++ IN gctPOINTER Vs
++ );
++
++/* Profile input fragment shader. */
++gceSTATUS
++gcoPROFILER_ShaderFS(
++ IN gcoHAL Hal,
++ IN gctPOINTER Fs
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_profiler_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_raster.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_raster.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_raster.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_raster.h 2015-05-01 14:57:59.591427001 -0500
+@@ -0,0 +1,1038 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_raster_h_
++#define __gc_hal_raster_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gcoBRUSH * gcoBRUSH;
++typedef struct _gcoBRUSH_CACHE * gcoBRUSH_CACHE;
++
++/******************************************************************************\
++******************************** gcoBRUSH Object *******************************
++\******************************************************************************/
++
++/* Create a new solid color gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructSingleColor(
++ IN gcoHAL Hal,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a new monochrome gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructMonochrome(
++ IN gcoHAL Hal,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a color gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructColor(
++ IN gcoHAL Hal,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctPOINTER Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Destroy an gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_Destroy(
++ IN gcoBRUSH Brush
++ );
++
++/******************************************************************************\
++******************************** gcoSURF Object *******************************
++\******************************************************************************/
++
++/* Set cipping rectangle. */
++gceSTATUS
++gcoSURF_SetClipping(
++ IN gcoSURF Surface
++ );
++
++/* Clear one or more rectangular areas. */
++gceSTATUS
++gcoSURF_Clear2D(
++ IN gcoSURF DestSurface,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 LoColor,
++ IN gctUINT32 HiColor
++ );
++
++/* Draw one or more Bresenham lines. */
++gceSTATUS
++gcoSURF_Line(
++ IN gcoSURF Surface,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++/* Generic rectangular blit. */
++gceSTATUS
++gcoSURF_Blit(
++ IN OPTIONAL gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 RectCount,
++ IN OPTIONAL gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN OPTIONAL gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN OPTIONAL gceSURF_TRANSPARENCY Transparency,
++ IN OPTIONAL gctUINT32 TransparencyColor,
++ IN OPTIONAL gctPOINTER Mask,
++ IN OPTIONAL gceSURF_MONOPACK MaskPack
++ );
++
++/* Monochrome blit. */
++gceSTATUS
++gcoSURF_MonoBlit(
++ IN gcoSURF DestSurface,
++ IN gctPOINTER Source,
++ IN gceSURF_MONOPACK SourcePack,
++ IN gcsPOINT_PTR SourceSize,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsRECT_PTR DestRect,
++ IN OPTIONAL gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gctBOOL ColorConvert,
++ IN gctUINT8 MonoTransparency,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor
++ );
++
++/* Filter blit. */
++gceSTATUS
++gcoSURF_FilterBlit(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Enable alpha blending engine in the hardware and disengage the ROP engine. */
++gceSTATUS
++gcoSURF_EnableAlphaBlend(
++ IN gcoSURF Surface,
++ IN gctUINT8 SrcGlobalAlphaValue,
++ IN gctUINT8 DstGlobalAlphaValue,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode,
++ IN gceSURF_PIXEL_COLOR_MODE SrcColorMode,
++ IN gceSURF_PIXEL_COLOR_MODE DstColorMode
++ );
++
++/* Disable alpha blending engine in the hardware and engage the ROP engine. */
++gceSTATUS
++gcoSURF_DisableAlphaBlend(
++ IN gcoSURF Surface
++ );
++
++/* Copy a rectangular area with format conversion. */
++gceSTATUS
++gcoSURF_CopyPixels(
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctINT SourceX,
++ IN gctINT SourceY,
++ IN gctINT TargetX,
++ IN gctINT TargetY,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++/* Read surface pixel. */
++gceSTATUS
++gcoSURF_ReadPixel(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gceSURF_FORMAT Format,
++ OUT gctPOINTER PixelValue
++ );
++
++/* Write surface pixel. */
++gceSTATUS
++gcoSURF_WritePixel(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gceSURF_FORMAT Format,
++ IN gctPOINTER PixelValue
++ );
++
++gceSTATUS
++gcoSURF_SetDither(
++ IN gcoSURF Surface,
++ IN gctBOOL Dither
++ );
++
++gceSTATUS
++gcoSURF_Set2DSource(
++ gcoSURF Surface,
++ gceSURF_ROTATION Rotation
++ );
++
++gceSTATUS
++gcoSURF_Set2DTarget(
++ gcoSURF Surface,
++ gceSURF_ROTATION Rotation
++ );
++
++/******************************************************************************\
++********************************** gco2D Object *********************************
++\******************************************************************************/
++
++/* Construct a new gco2D object. */
++gceSTATUS
++gco2D_Construct(
++ IN gcoHAL Hal,
++ OUT gco2D * Hardware
++ );
++
++/* Destroy an gco2D object. */
++gceSTATUS
++gco2D_Destroy(
++ IN gco2D Hardware
++ );
++
++/* Sets the maximum number of brushes in the brush cache. */
++gceSTATUS
++gco2D_SetBrushLimit(
++ IN gco2D Hardware,
++ IN gctUINT MaxCount
++ );
++
++/* Flush the brush. */
++gceSTATUS
++gco2D_FlushBrush(
++ IN gco2D Engine,
++ IN gcoBRUSH Brush,
++ IN gceSURF_FORMAT Format
++ );
++
++/* Program the specified solid color brush. */
++gceSTATUS
++gco2D_LoadSolidBrush(
++ IN gco2D Engine,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask
++ );
++
++gceSTATUS
++gco2D_LoadMonochromeBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask
++ );
++
++gceSTATUS
++gco2D_LoadColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask
++ );
++
++/* Configure monochrome source. */
++gceSTATUS
++gco2D_SetMonochromeSource(
++ IN gco2D Engine,
++ IN gctBOOL ColorConvert,
++ IN gctUINT8 MonoTransparency,
++ IN gceSURF_MONOPACK DataPack,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor
++ );
++
++/* Configure color source. */
++gceSTATUS
++gco2D_SetColorSource(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 TransparencyColor
++ );
++
++/* Configure color source extension for full rotation. */
++gceSTATUS
++gco2D_SetColorSourceEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 TransparencyColor
++ );
++
++/* Configure color source. */
++gceSTATUS
++gco2D_SetColorSourceAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctBOOL CoordRelative
++ );
++
++gceSTATUS
++gco2D_SetColorSourceN(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctUINT32 SurfaceNumber
++ );
++
++/* Configure masked color source. */
++gceSTATUS
++gco2D_SetMaskedSource(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_MONOPACK MaskPack
++ );
++
++/* Configure masked color source extension for full rotation. */
++gceSTATUS
++gco2D_SetMaskedSourceEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_MONOPACK MaskPack,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++ );
++
++/* Setup the source rectangle. */
++gceSTATUS
++gco2D_SetSource(
++ IN gco2D Engine,
++ IN gcsRECT_PTR SrcRect
++ );
++
++/* Set clipping rectangle. */
++gceSTATUS
++gco2D_SetClipping(
++ IN gco2D Engine,
++ IN gcsRECT_PTR Rect
++ );
++
++/* Configure destination. */
++gceSTATUS
++gco2D_SetTarget(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth
++ );
++
++/* Configure destination extension for full rotation. */
++gceSTATUS
++gco2D_SetTargetEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++ );
++
++/* Calculate and program the stretch factors. */
++gceSTATUS
++gco2D_CalcStretchFactor(
++ IN gco2D Engine,
++ IN gctINT32 SrcSize,
++ IN gctINT32 DestSize,
++ OUT gctUINT32_PTR Factor
++ );
++
++gceSTATUS
++gco2D_SetStretchFactors(
++ IN gco2D Engine,
++ IN gctUINT32 HorFactor,
++ IN gctUINT32 VerFactor
++ );
++
++/* Calculate and program the stretch factors based on the rectangles. */
++gceSTATUS
++gco2D_SetStretchRectFactors(
++ IN gco2D Engine,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect
++ );
++
++/* Create a new solid color gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructSingleColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a new monochrome gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructMonochromeBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a color gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctPOINTER Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Clear one or more rectangular areas. */
++gceSTATUS
++gco2D_Clear(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT32 Color32,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Draw one or more Bresenham lines. */
++gceSTATUS
++gco2D_Line(
++ IN gco2D Engine,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Draw one or more Bresenham lines based on the 32-bit color. */
++gceSTATUS
++gco2D_ColorLine(
++ IN gco2D Engine,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gctUINT32 Color32,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Generic blit. */
++gceSTATUS
++gco2D_Blit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++gceSTATUS
++gco2D_Blend(
++ IN gco2D Engine,
++ IN gctUINT32 SrcCount,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Batch blit. */
++gceSTATUS
++gco2D_BatchBlit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Stretch blit. */
++gceSTATUS
++gco2D_StretchBlit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Monochrome blit. */
++gceSTATUS
++gco2D_MonoBlit(
++ IN gco2D Engine,
++ IN gctPOINTER StreamBits,
++ IN gcsPOINT_PTR StreamSize,
++ IN gcsRECT_PTR StreamRect,
++ IN gceSURF_MONOPACK SrcStreamPack,
++ IN gceSURF_MONOPACK DestStreamPack,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 FgRop,
++ IN gctUINT32 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++gceSTATUS
++gco2D_MonoBlitEx(
++ IN gco2D Engine,
++ IN gctPOINTER StreamBits,
++ IN gctINT32 StreamStride,
++ IN gctINT32 StreamWidth,
++ IN gctINT32 StreamHeight,
++ IN gctINT32 StreamX,
++ IN gctINT32 StreamY,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DstRect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++/* Set kernel size. */
++gceSTATUS
++gco2D_SetKernelSize(
++ IN gco2D Engine,
++ IN gctUINT8 HorKernelSize,
++ IN gctUINT8 VerKernelSize
++ );
++
++/* Set filter type. */
++gceSTATUS
++gco2D_SetFilterType(
++ IN gco2D Engine,
++ IN gceFILTER_TYPE FilterType
++ );
++
++/* Set the filter kernel by user. */
++gceSTATUS
++gco2D_SetUserFilterKernel(
++ IN gco2D Engine,
++ IN gceFILTER_PASS_TYPE PassType,
++ IN gctUINT16_PTR KernelArray
++ );
++
++/* Select the pass(es) to be done for user defined filter. */
++gceSTATUS
++gco2D_EnableUserFilterPasses(
++ IN gco2D Engine,
++ IN gctBOOL HorPass,
++ IN gctBOOL VerPass
++ );
++
++/* Frees the temporary buffer allocated by filter blit operation. */
++gceSTATUS
++gco2D_FreeFilterBuffer(
++ IN gco2D Engine
++ );
++
++/* Filter blit. */
++gceSTATUS
++gco2D_FilterBlit(
++ IN gco2D Engine,
++ IN gctUINT32 SrcAddress,
++ IN gctUINT SrcStride,
++ IN gctUINT32 SrcUAddress,
++ IN gctUINT SrcUStride,
++ IN gctUINT32 SrcVAddress,
++ IN gctUINT SrcVStride,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32 DestAddress,
++ IN gctUINT DestStride,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Filter blit extension for full rotation. */
++gceSTATUS
++gco2D_FilterBlitEx(
++ IN gco2D Engine,
++ IN gctUINT32 SrcAddress,
++ IN gctUINT SrcStride,
++ IN gctUINT32 SrcUAddress,
++ IN gctUINT SrcUStride,
++ IN gctUINT32 SrcVAddress,
++ IN gctUINT SrcVStride,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gctUINT32 SrcSurfaceHeight,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32 DestAddress,
++ IN gctUINT DestStride,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gctUINT32 DestSurfaceHeight,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++gceSTATUS
++gco2D_FilterBlitEx2(
++ IN gco2D Engine,
++ IN gctUINT32_PTR SrcAddresses,
++ IN gctUINT32 SrcAddressNum,
++ IN gctUINT32_PTR SrcStrides,
++ IN gctUINT32 SrcStrideNum,
++ IN gceTILING SrcTiling,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gctUINT32 SrcSurfaceHeight,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32_PTR DestAddresses,
++ IN gctUINT32 DestAddressNum,
++ IN gctUINT32_PTR DestStrides,
++ IN gctUINT32 DestStrideNum,
++ IN gceTILING DestTiling,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gctUINT32 DestSurfaceHeight,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Enable alpha blending engine in the hardware and disengage the ROP engine. */
++gceSTATUS
++gco2D_EnableAlphaBlend(
++ IN gco2D Engine,
++ IN gctUINT8 SrcGlobalAlphaValue,
++ IN gctUINT8 DstGlobalAlphaValue,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode,
++ IN gceSURF_PIXEL_COLOR_MODE SrcColorMode,
++ IN gceSURF_PIXEL_COLOR_MODE DstColorMode
++ );
++
++/* Enable alpha blending engine in the hardware. */
++gceSTATUS
++gco2D_EnableAlphaBlendAdvanced(
++ IN gco2D Engine,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode
++ );
++
++/* Enable alpha blending engine with Porter Duff rule. */
++gceSTATUS
++gco2D_SetPorterDuffBlending(
++ IN gco2D Engine,
++ IN gce2D_PORTER_DUFF_RULE Rule
++ );
++
++/* Disable alpha blending engine in the hardware and engage the ROP engine. */
++gceSTATUS
++gco2D_DisableAlphaBlend(
++ IN gco2D Engine
++ );
++
++/* Retrieve the maximum number of 32-bit data chunks for a single DE command. */
++gctUINT32
++gco2D_GetMaximumDataCount(
++ void
++ );
++
++/* Retrieve the maximum number of rectangles, that can be passed in a single DE command. */
++gctUINT32
++gco2D_GetMaximumRectCount(
++ void
++ );
++
++/* Returns the pixel alignment of the surface. */
++gceSTATUS
++gco2D_GetPixelAlignment(
++ gceSURF_FORMAT Format,
++ gcsPOINT_PTR Alignment
++ );
++
++/* Retrieve monochrome stream pack size. */
++gceSTATUS
++gco2D_GetPackSize(
++ IN gceSURF_MONOPACK StreamPack,
++ OUT gctUINT32 * PackWidth,
++ OUT gctUINT32 * PackHeight
++ );
++
++/* Flush the 2D pipeline. */
++gceSTATUS
++gco2D_Flush(
++ IN gco2D Engine
++ );
++
++/* Load 256-entry color table for INDEX8 source surfaces. */
++gceSTATUS
++gco2D_LoadPalette(
++ IN gco2D Engine,
++ IN gctUINT FirstIndex,
++ IN gctUINT IndexCount,
++ IN gctPOINTER ColorTable,
++ IN gctBOOL ColorConvert
++ );
++
++/* Enable/disable 2D BitBlt mirrorring. */
++gceSTATUS
++gco2D_SetBitBlitMirror(
++ IN gco2D Engine,
++ IN gctBOOL HorizontalMirror,
++ IN gctBOOL VerticalMirror
++ );
++
++/*
++ * Set the transparency for source, destination and pattern.
++ * It also enable or disable the DFB color key mode.
++ */
++gceSTATUS
++gco2D_SetTransparencyAdvancedEx(
++ IN gco2D Engine,
++ IN gce2D_TRANSPARENCY SrcTransparency,
++ IN gce2D_TRANSPARENCY DstTransparency,
++ IN gce2D_TRANSPARENCY PatTransparency,
++ IN gctBOOL EnableDFBColorKeyMode
++ );
++
++/* Set the transparency for source, destination and pattern. */
++gceSTATUS
++gco2D_SetTransparencyAdvanced(
++ IN gco2D Engine,
++ IN gce2D_TRANSPARENCY SrcTransparency,
++ IN gce2D_TRANSPARENCY DstTransparency,
++ IN gce2D_TRANSPARENCY PatTransparency
++ );
++
++/* Set the source color key. */
++gceSTATUS
++gco2D_SetSourceColorKeyAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKey
++ );
++
++/* Set the source color key range. */
++gceSTATUS
++gco2D_SetSourceColorKeyRangeAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKeyLow,
++ IN gctUINT32 ColorKeyHigh
++ );
++
++/* Set the target color key. */
++gceSTATUS
++gco2D_SetTargetColorKeyAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKey
++ );
++
++/* Set the target color key range. */
++gceSTATUS
++gco2D_SetTargetColorKeyRangeAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKeyLow,
++ IN gctUINT32 ColorKeyHigh
++ );
++
++/* Set the YUV color space mode. */
++gceSTATUS
++gco2D_SetYUVColorMode(
++ IN gco2D Engine,
++ IN gce2D_YUV_COLOR_MODE Mode
++ );
++
++/* Setup the source global color value in ARGB8 format. */
++gceSTATUS gco2D_SetSourceGlobalColorAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Color32
++ );
++
++/* Setup the target global color value in ARGB8 format. */
++gceSTATUS gco2D_SetTargetGlobalColorAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Color32
++ );
++
++/* Setup the source and target pixel multiply modes. */
++gceSTATUS
++gco2D_SetPixelMultiplyModeAdvanced(
++ IN gco2D Engine,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE SrcPremultiplySrcAlpha,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstPremultiplyDstAlpha,
++ IN gce2D_GLOBAL_COLOR_MULTIPLY_MODE SrcPremultiplyGlobalMode,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstDemultiplyDstAlpha
++ );
++
++/* Set the GPU clock cycles after which the idle engine will keep auto-flushing. */
++gceSTATUS
++gco2D_SetAutoFlushCycles(
++ IN gco2D Engine,
++ IN gctUINT32 Cycles
++ );
++
++#if VIVANTE_PROFILER
++/* Read the profile registers available in the 2D engine and sets them in the profile.
++ The function will also reset the pixelsRendered counter every time.
++*/
++gceSTATUS
++gco2D_ProfileEngine(
++ IN gco2D Engine,
++ OPTIONAL gcs2D_PROFILE_PTR Profile
++ );
++#endif
++
++/* Enable or disable 2D dithering. */
++gceSTATUS
++gco2D_EnableDither(
++ IN gco2D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco2D_SetGenericSource(
++ IN gco2D Engine,
++ IN gctUINT32_PTR Addresses,
++ IN gctUINT32 AddressNum,
++ IN gctUINT32_PTR Strides,
++ IN gctUINT32 StrideNum,
++ IN gceTILING Tiling,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++);
++
++gceSTATUS
++gco2D_SetGenericTarget(
++ IN gco2D Engine,
++ IN gctUINT32_PTR Addresses,
++ IN gctUINT32 AddressNum,
++ IN gctUINT32_PTR Strides,
++ IN gctUINT32 StrideNum,
++ IN gceTILING Tiling,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++);
++
++gceSTATUS
++gco2D_SetCurrentSourceIndex(
++ IN gco2D Engine,
++ IN gctUINT32 SrcIndex
++ );
++
++gceSTATUS
++gco2D_MultiSourceBlit(
++ IN gco2D Engine,
++ IN gctUINT32 SourceMask,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 RectCount
++ );
++
++gceSTATUS
++gco2D_SetROP(
++ IN gco2D Engine,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++gceSTATUS
++gco2D_SetGdiStretchMode(
++ IN gco2D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco2D_SetSourceTileStatus(
++ IN gco2D Engine,
++ IN gce2D_TILE_STATUS_CONFIG TSControl,
++ IN gceSURF_FORMAT CompressedFormat,
++ IN gctUINT32 ClearValue,
++ IN gctUINT32 GpuAddress
++ );
++
++gceSTATUS
++gco2D_SetTargetTileStatus(
++ IN gco2D Engine,
++ IN gce2D_TILE_STATUS_CONFIG TileStatusConfig,
++ IN gceSURF_FORMAT CompressedFormat,
++ IN gctUINT32 ClearValue,
++ IN gctUINT32 GpuAddress
++ );
++
++gceSTATUS
++gco2D_QueryU32(
++ IN gco2D Engine,
++ IN gce2D_QUERY Item,
++ OUT gctUINT32_PTR Value
++ );
++
++gceSTATUS
++gco2D_SetStateU32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctUINT32 Value
++ );
++
++gceSTATUS
++gco2D_SetStateArrayI32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctINT32_PTR Array,
++ IN gctINT32 ArraySize
++ );
++
++gceSTATUS
++gco2D_SetStateArrayU32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctUINT32_PTR Array,
++ IN gctINT32 ArraySize
++ );
++
++gceSTATUS
++gco2D_SetTargetRect(
++ IN gco2D Engine,
++ IN gcsRECT_PTR Rect
++ );
++
++gceSTATUS
++gco2D_Set2DEngine(
++ IN gco2D Engine
++ );
++
++gceSTATUS
++gco2D_UnSet2DEngine(
++ IN gco2D Engine
++ );
++
++gceSTATUS
++gco2D_Get2DEngine(
++ OUT gco2D * Engine
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_raster_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_rename.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_rename.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_rename.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_rename.h 2015-05-01 14:57:59.591427001 -0500
+@@ -0,0 +1,243 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_rename_h_
++#define __gc_hal_rename_h_
++
++
++#if defined(_HAL2D_APPENDIX)
++
++#define _HAL2D_RENAME_2(api, appendix) api ## appendix
++#define _HAL2D_RENAME_1(api, appendix) _HAL2D_RENAME_2(api, appendix)
++#define gcmHAL2D(api) _HAL2D_RENAME_1(api, _HAL2D_APPENDIX)
++
++
++#define gckOS_Construct gcmHAL2D(gckOS_Construct)
++#define gckOS_Destroy gcmHAL2D(gckOS_Destroy)
++#define gckOS_QueryVideoMemory gcmHAL2D(gckOS_QueryVideoMemory)
++#define gckOS_Allocate gcmHAL2D(gckOS_Allocate)
++#define gckOS_Free gcmHAL2D(gckOS_Free)
++#define gckOS_AllocateMemory gcmHAL2D(gckOS_AllocateMemory)
++#define gckOS_FreeMemory gcmHAL2D(gckOS_FreeMemory)
++#define gckOS_AllocatePagedMemory gcmHAL2D(gckOS_AllocatePagedMemory)
++#define gckOS_AllocatePagedMemoryEx gcmHAL2D(gckOS_AllocatePagedMemoryEx)
++#define gckOS_LockPages gcmHAL2D(gckOS_LockPages)
++#define gckOS_MapPages gcmHAL2D(gckOS_MapPages)
++#define gckOS_UnlockPages gcmHAL2D(gckOS_UnlockPages)
++#define gckOS_FreePagedMemory gcmHAL2D(gckOS_FreePagedMemory)
++#define gckOS_AllocateNonPagedMemory gcmHAL2D(gckOS_AllocateNonPagedMemory)
++#define gckOS_FreeNonPagedMemory gcmHAL2D(gckOS_FreeNonPagedMemory)
++#define gckOS_AllocateContiguous gcmHAL2D(gckOS_AllocateContiguous)
++#define gckOS_FreeContiguous gcmHAL2D(gckOS_FreeContiguous)
++#define gckOS_GetPageSize gcmHAL2D(gckOS_GetPageSize)
++#define gckOS_GetPhysicalAddress gcmHAL2D(gckOS_GetPhysicalAddress)
++#define gckOS_UserLogicalToPhysical gcmHAL2D(gckOS_UserLogicalToPhysical)
++#define gckOS_GetPhysicalAddressProcess gcmHAL2D(gckOS_GetPhysicalAddressProcess)
++#define gckOS_MapPhysical gcmHAL2D(gckOS_MapPhysical)
++#define gckOS_UnmapPhysical gcmHAL2D(gckOS_UnmapPhysical)
++#define gckOS_ReadRegister gcmHAL2D(gckOS_ReadRegister)
++#define gckOS_WriteRegister gcmHAL2D(gckOS_WriteRegister)
++#define gckOS_WriteMemory gcmHAL2D(gckOS_WriteMemory)
++#define gckOS_MapMemory gcmHAL2D(gckOS_MapMemory)
++#define gckOS_UnmapMemory gcmHAL2D(gckOS_UnmapMemory)
++#define gckOS_UnmapMemoryEx gcmHAL2D(gckOS_UnmapMemoryEx)
++#define gckOS_CreateMutex gcmHAL2D(gckOS_CreateMutex)
++#define gckOS_DeleteMutex gcmHAL2D(gckOS_DeleteMutex)
++#define gckOS_AcquireMutex gcmHAL2D(gckOS_AcquireMutex)
++#define gckOS_ReleaseMutex gcmHAL2D(gckOS_ReleaseMutex)
++#define gckOS_AtomicExchange gcmHAL2D(gckOS_AtomicExchange)
++#define gckOS_AtomicExchangePtr gcmHAL2D(gckOS_AtomicExchangePtr)
++#define gckOS_AtomConstruct gcmHAL2D(gckOS_AtomConstruct)
++#define gckOS_AtomDestroy gcmHAL2D(gckOS_AtomDestroy)
++#define gckOS_AtomGet gcmHAL2D(gckOS_AtomGet)
++#define gckOS_AtomIncrement gcmHAL2D(gckOS_AtomIncrement)
++#define gckOS_AtomDecrement gcmHAL2D(gckOS_AtomDecrement)
++#define gckOS_Delay gcmHAL2D(gckOS_Delay)
++#define gckOS_GetTime gcmHAL2D(gckOS_GetTime)
++#define gckOS_MemoryBarrier gcmHAL2D(gckOS_MemoryBarrier)
++#define gckOS_MapUserPointer gcmHAL2D(gckOS_MapUserPointer)
++#define gckOS_UnmapUserPointer gcmHAL2D(gckOS_UnmapUserPointer)
++#define gckOS_QueryNeedCopy gcmHAL2D(gckOS_QueryNeedCopy)
++#define gckOS_CopyFromUserData gcmHAL2D(gckOS_CopyFromUserData)
++#define gckOS_CopyToUserData gcmHAL2D(gckOS_CopyToUserData)
++#define gckOS_SuspendInterrupt gcmHAL2D(gckOS_SuspendInterrupt)
++#define gckOS_ResumeInterrupt gcmHAL2D(gckOS_ResumeInterrupt)
++#define gckOS_GetBaseAddress gcmHAL2D(gckOS_GetBaseAddress)
++#define gckOS_MemCopy gcmHAL2D(gckOS_MemCopy)
++#define gckOS_ZeroMemory gcmHAL2D(gckOS_ZeroMemory)
++#define gckOS_DeviceControl gcmHAL2D(gckOS_DeviceControl)
++#define gckOS_GetProcessID gcmHAL2D(gckOS_GetProcessID)
++#define gckOS_GetThreadID gcmHAL2D(gckOS_GetThreadID)
++#define gckOS_CreateSignal gcmHAL2D(gckOS_CreateSignal)
++#define gckOS_DestroySignal gcmHAL2D(gckOS_DestroySignal)
++#define gckOS_Signal gcmHAL2D(gckOS_Signal)
++#define gckOS_WaitSignal gcmHAL2D(gckOS_WaitSignal)
++#define gckOS_MapSignal gcmHAL2D(gckOS_MapSignal)
++#define gckOS_MapUserMemory gcmHAL2D(gckOS_MapUserMemory)
++#define gckOS_UnmapUserMemory gcmHAL2D(gckOS_UnmapUserMemory)
++#define gckOS_CreateUserSignal gcmHAL2D(gckOS_CreateUserSignal)
++#define gckOS_DestroyUserSignal gcmHAL2D(gckOS_DestroyUserSignal)
++#define gckOS_WaitUserSignal gcmHAL2D(gckOS_WaitUserSignal)
++#define gckOS_SignalUserSignal gcmHAL2D(gckOS_SignalUserSignal)
++#define gckOS_UserSignal gcmHAL2D(gckOS_UserSignal)
++#define gckOS_UserSignal gcmHAL2D(gckOS_UserSignal)
++#define gckOS_CacheClean gcmHAL2D(gckOS_CacheClean)
++#define gckOS_CacheFlush gcmHAL2D(gckOS_CacheFlush)
++#define gckOS_SetDebugLevel gcmHAL2D(gckOS_SetDebugLevel)
++#define gckOS_SetDebugZone gcmHAL2D(gckOS_SetDebugZone)
++#define gckOS_SetDebugLevelZone gcmHAL2D(gckOS_SetDebugLevelZone)
++#define gckOS_SetDebugZones gcmHAL2D(gckOS_SetDebugZones)
++#define gckOS_SetDebugFile gcmHAL2D(gckOS_SetDebugFile)
++#define gckOS_Broadcast gcmHAL2D(gckOS_Broadcast)
++#define gckOS_SetGPUPower gcmHAL2D(gckOS_SetGPUPower)
++#define gckOS_CreateSemaphore gcmHAL2D(gckOS_CreateSemaphore)
++#define gckOS_DestroySemaphore gcmHAL2D(gckOS_DestroySemaphore)
++#define gckOS_AcquireSemaphore gcmHAL2D(gckOS_AcquireSemaphore)
++#define gckOS_ReleaseSemaphore gcmHAL2D(gckOS_ReleaseSemaphore)
++#define gckHEAP_Construct gcmHAL2D(gckHEAP_Construct)
++#define gckHEAP_Destroy gcmHAL2D(gckHEAP_Destroy)
++#define gckHEAP_Allocate gcmHAL2D(gckHEAP_Allocate)
++#define gckHEAP_Free gcmHAL2D(gckHEAP_Free)
++#define gckHEAP_ProfileStart gcmHAL2D(gckHEAP_ProfileStart)
++#define gckHEAP_ProfileEnd gcmHAL2D(gckHEAP_ProfileEnd)
++#define gckHEAP_Test gcmHAL2D(gckHEAP_Test)
++#define gckVIDMEM_Construct gcmHAL2D(gckVIDMEM_Construct)
++#define gckVIDMEM_Destroy gcmHAL2D(gckVIDMEM_Destroy)
++#define gckVIDMEM_Allocate gcmHAL2D(gckVIDMEM_Allocate)
++#define gckVIDMEM_AllocateLinear gcmHAL2D(gckVIDMEM_AllocateLinear)
++#define gckVIDMEM_Free gcmHAL2D(gckVIDMEM_Free)
++#define gckVIDMEM_Lock gcmHAL2D(gckVIDMEM_Lock)
++#define gckVIDMEM_Unlock gcmHAL2D(gckVIDMEM_Unlock)
++#define gckVIDMEM_ConstructVirtual gcmHAL2D(gckVIDMEM_ConstructVirtual)
++#define gckVIDMEM_DestroyVirtual gcmHAL2D(gckVIDMEM_DestroyVirtual)
++#define gckKERNEL_Construct gcmHAL2D(gckKERNEL_Construct)
++#define gckKERNEL_Destroy gcmHAL2D(gckKERNEL_Destroy)
++#define gckKERNEL_Dispatch gcmHAL2D(gckKERNEL_Dispatch)
++#define gckKERNEL_QueryVideoMemory gcmHAL2D(gckKERNEL_QueryVideoMemory)
++#define gckKERNEL_GetVideoMemoryPool gcmHAL2D(gckKERNEL_GetVideoMemoryPool)
++#define gckKERNEL_MapVideoMemory gcmHAL2D(gckKERNEL_MapVideoMemory)
++#define gckKERNEL_UnmapVideoMemory gcmHAL2D(gckKERNEL_UnmapVideoMemory)
++#define gckKERNEL_MapMemory gcmHAL2D(gckKERNEL_MapMemory)
++#define gckKERNEL_UnmapMemory gcmHAL2D(gckKERNEL_UnmapMemory)
++#define gckKERNEL_Notify gcmHAL2D(gckKERNEL_Notify)
++#define gckKERNEL_QuerySettings gcmHAL2D(gckKERNEL_QuerySettings)
++#define gckKERNEL_Recovery gcmHAL2D(gckKERNEL_Recovery)
++#define gckKERNEL_OpenUserData gcmHAL2D(gckKERNEL_OpenUserData)
++#define gckKERNEL_CloseUserData gcmHAL2D(gckKERNEL_CloseUserData)
++#define gckHARDWARE_Construct gcmHAL2D(gckHARDWARE_Construct)
++#define gckHARDWARE_Destroy gcmHAL2D(gckHARDWARE_Destroy)
++#define gckHARDWARE_QuerySystemMemory gcmHAL2D(gckHARDWARE_QuerySystemMemory)
++#define gckHARDWARE_BuildVirtualAddress gcmHAL2D(gckHARDWARE_BuildVirtualAddress)
++#define gckHARDWARE_QueryCommandBuffer gcmHAL2D(gckHARDWARE_QueryCommandBuffer)
++#define gckHARDWARE_WaitLink gcmHAL2D(gckHARDWARE_WaitLink)
++#define gckHARDWARE_Execute gcmHAL2D(gckHARDWARE_Execute)
++#define gckHARDWARE_End gcmHAL2D(gckHARDWARE_End)
++#define gckHARDWARE_Nop gcmHAL2D(gckHARDWARE_Nop)
++#define gckHARDWARE_PipeSelect gcmHAL2D(gckHARDWARE_PipeSelect)
++#define gckHARDWARE_Link gcmHAL2D(gckHARDWARE_Link)
++#define gckHARDWARE_Event gcmHAL2D(gckHARDWARE_Event)
++#define gckHARDWARE_QueryMemory gcmHAL2D(gckHARDWARE_QueryMemory)
++#define gckHARDWARE_QueryChipIdentity gcmHAL2D(gckHARDWARE_QueryChipIdentity)
++#define gckHARDWARE_QueryChipSpecs gcmHAL2D(gckHARDWARE_QueryChipSpecs)
++#define gckHARDWARE_QueryShaderCaps gcmHAL2D(gckHARDWARE_QueryShaderCaps)
++#define gckHARDWARE_ConvertFormat gcmHAL2D(gckHARDWARE_ConvertFormat)
++#define gckHARDWARE_SplitMemory gcmHAL2D(gckHARDWARE_SplitMemory)
++#define gckHARDWARE_AlignToTile gcmHAL2D(gckHARDWARE_AlignToTile)
++#define gckHARDWARE_UpdateQueueTail gcmHAL2D(gckHARDWARE_UpdateQueueTail)
++#define gckHARDWARE_ConvertLogical gcmHAL2D(gckHARDWARE_ConvertLogical)
++#define gckHARDWARE_Interrupt gcmHAL2D(gckHARDWARE_Interrupt)
++#define gckHARDWARE_SetMMU gcmHAL2D(gckHARDWARE_SetMMU)
++#define gckHARDWARE_FlushMMU gcmHAL2D(gckHARDWARE_FlushMMU)
++#define gckHARDWARE_GetIdle gcmHAL2D(gckHARDWARE_GetIdle)
++#define gckHARDWARE_Flush gcmHAL2D(gckHARDWARE_Flush)
++#define gckHARDWARE_SetFastClear gcmHAL2D(gckHARDWARE_SetFastClear)
++#define gckHARDWARE_ReadInterrupt gcmHAL2D(gckHARDWARE_ReadInterrupt)
++#define gckHARDWARE_SetPowerManagementState gcmHAL2D(gckHARDWARE_SetPowerManagementState)
++#define gckHARDWARE_QueryPowerManagementState gcmHAL2D(gckHARDWARE_QueryPowerManagementState)
++#define gckHARDWARE_ProfileEngine2D gcmHAL2D(gckHARDWARE_ProfileEngine2D)
++#define gckHARDWARE_InitializeHardware gcmHAL2D(gckHARDWARE_InitializeHardware)
++#define gckHARDWARE_Reset gcmHAL2D(gckHARDWARE_Reset)
++#define gckINTERRUPT_Construct gcmHAL2D(gckINTERRUPT_Construct)
++#define gckINTERRUPT_Destroy gcmHAL2D(gckINTERRUPT_Destroy)
++#define gckINTERRUPT_SetHandler gcmHAL2D(gckINTERRUPT_SetHandler)
++#define gckINTERRUPT_Notify gcmHAL2D(gckINTERRUPT_Notify)
++#define gckEVENT_Construct gcmHAL2D(gckEVENT_Construct)
++#define gckEVENT_Destroy gcmHAL2D(gckEVENT_Destroy)
++#define gckEVENT_AddList gcmHAL2D(gckEVENT_AddList)
++#define gckEVENT_FreeNonPagedMemory gcmHAL2D(gckEVENT_FreeNonPagedMemory)
++#define gckEVENT_FreeContiguousMemory gcmHAL2D(gckEVENT_FreeContiguousMemory)
++#define gckEVENT_FreeVideoMemory gcmHAL2D(gckEVENT_FreeVideoMemory)
++#define gckEVENT_Signal gcmHAL2D(gckEVENT_Signal)
++#define gckEVENT_Unlock gcmHAL2D(gckEVENT_Unlock)
++#define gckEVENT_Submit gcmHAL2D(gckEVENT_Submit)
++#define gckEVENT_Commit gcmHAL2D(gckEVENT_Commit)
++#define gckEVENT_Notify gcmHAL2D(gckEVENT_Notify)
++#define gckEVENT_Interrupt gcmHAL2D(gckEVENT_Interrupt)
++#define gckCOMMAND_Construct gcmHAL2D(gckCOMMAND_Construct)
++#define gckCOMMAND_Destroy gcmHAL2D(gckCOMMAND_Destroy)
++#define gckCOMMAND_EnterCommit gcmHAL2D(gckCOMMAND_EnterCommit)
++#define gckCOMMAND_ExitCommit gcmHAL2D(gckCOMMAND_ExitCommit)
++#define gckCOMMAND_Start gcmHAL2D(gckCOMMAND_Start)
++#define gckCOMMAND_Stop gcmHAL2D(gckCOMMAND_Stop)
++#define gckCOMMAND_Commit gcmHAL2D(gckCOMMAND_Commit)
++#define gckCOMMAND_Reserve gcmHAL2D(gckCOMMAND_Reserve)
++#define gckCOMMAND_Execute gcmHAL2D(gckCOMMAND_Execute)
++#define gckCOMMAND_Stall gcmHAL2D(gckCOMMAND_Stall)
++#define gckCOMMAND_Attach gcmHAL2D(gckCOMMAND_Attach)
++#define gckCOMMAND_Detach gcmHAL2D(gckCOMMAND_Detach)
++#define gckMMU_Construct gcmHAL2D(gckMMU_Construct)
++#define gckMMU_Destroy gcmHAL2D(gckMMU_Destroy)
++#define gckMMU_AllocatePages gcmHAL2D(gckMMU_AllocatePages)
++#define gckMMU_FreePages gcmHAL2D(gckMMU_FreePages)
++#define gckMMU_Test gcmHAL2D(gckMMU_Test)
++#define gckHARDWARE_QueryProfileRegisters gcmHAL2D(gckHARDWARE_QueryProfileRegisters)
++
++
++#define FindMdlMap gcmHAL2D(FindMdlMap)
++#define OnProcessExit gcmHAL2D(OnProcessExit)
++
++#define gckGALDEVICE_Destroy gcmHAL2D(gckGALDEVICE_Destroy)
++#define gckOS_Print gcmHAL2D(gckOS_Print)
++#define gckGALDEVICE_FreeMemory gcmHAL2D(gckGALDEVICE_FreeMemory)
++#define gckGALDEVICE_AllocateMemory gcmHAL2D(gckGALDEVICE_AllocateMemory)
++#define gckOS_DebugBreak gcmHAL2D(gckOS_DebugBreak)
++#define gckGALDEVICE_Release_ISR gcmHAL2D(gckGALDEVICE_Release_ISR)
++#define gckOS_Verify gcmHAL2D(gckOS_Verify)
++#define gckCOMMAND_Release gcmHAL2D(gckCOMMAND_Release)
++#define gckGALDEVICE_Stop gcmHAL2D(gckGALDEVICE_Stop)
++#define gckGALDEVICE_Construct gcmHAL2D(gckGALDEVICE_Construct)
++#define gckOS_DebugFatal gcmHAL2D(gckOS_DebugFatal)
++#define gckOS_DebugTrace gcmHAL2D(gckOS_DebugTrace)
++#define gckHARDWARE_GetBaseAddress gcmHAL2D(gckHARDWARE_GetBaseAddress)
++#define gckGALDEVICE_Setup_ISR gcmHAL2D(gckGALDEVICE_Setup_ISR)
++#define gckKERNEL_AttachProcess gcmHAL2D(gckKERNEL_AttachProcess)
++#define gckKERNEL_AttachProcessEx gcmHAL2D(gckKERNEL_AttachProcessEx)
++#define gckGALDEVICE_Start_Thread gcmHAL2D(gckGALDEVICE_Start_Thread)
++#define gckHARDWARE_QueryIdle gcmHAL2D(gckHARDWARE_QueryIdle)
++#define gckGALDEVICE_Start gcmHAL2D(gckGALDEVICE_Start)
++#define gckOS_GetKernelLogical gcmHAL2D(gckOS_GetKernelLogical)
++#define gckOS_DebugTraceZone gcmHAL2D(gckOS_DebugTraceZone)
++#define gckGALDEVICE_Stop_Thread gcmHAL2D(gckGALDEVICE_Stop_Thread)
++#define gckHARDWARE_NeedBaseAddress gcmHAL2D(gckHARDWARE_NeedBaseAddress)
++
++#endif
++
++#endif /* __gc_hal_rename_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_security_interface.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_security_interface.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_security_interface.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_security_interface.h 2015-05-01 14:57:59.591427001 -0500
+@@ -0,0 +1,137 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef _GC_HAL_SECURITY_INTERFACE_H_
++#define _GC_HAL_SECURITY_INTERFACE_H_
++/*!
++ @brief Command codes between kernel module and TrustZone
++ @discussion
++ Critical services must be done in TrustZone to avoid sensitive content leak. Most of kernel module is kept in non-Secure os to minimize
++ code in TrustZone.
++ */
++typedef enum kernel_packet_command {
++ KERNEL_START_COMMAND,
++ KERNEL_SUBMIT,
++ KERNEL_MAP_MEMORY, /* */
++ KERNEL_UNMAP_MEMORY,
++ KERNEL_ALLOCATE_SECRUE_MEMORY, /*! Security memory management. */
++ KERNEL_FREE_SECURE_MEMORY,
++ KERNEL_EXECUTE, /* Execute a command buffer. */
++} kernel_packet_command_t;
++
++/*!
++ @brief gckCOMMAND Object requests TrustZone to start FE.
++ @discussion
++ DMA enabled register can only be written in TrustZone to avoid GPU from jumping to a hacked code.
++ Kernel module need use these command to ask TrustZone start command parser.
++ */
++struct kernel_start_command {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT8 gpu; /*! Which GPU. */
++};
++
++/*!
++ @brief gckCOMMAND Object requests TrustZone to submit command buffer.
++ @discussion
++ Code in trustzone will check content of command buffer after copying command buffer to TrustZone.
++ */
++struct kernel_submit {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT8 gpu; /*! Which GPU. */
++ gctUINT8 kernel_command; /*! Whether it is a kernel command. */
++ gctUINT32 command_buffer_handle; /*! Handle to command buffer. */
++ gctUINT32 offset; /* Offset in command buffer. */
++ gctUINT32 * command_buffer; /*! Content of command buffer need to be submit. */
++ gctUINT32 command_buffer_length; /*! Length of command buffer. */
++};
++
++
++/*!
++ @brief gckVIDMEM Object requests TrustZone to allocate security memory.
++ @discussion
++ Allocate a buffer from security GPU memory.
++ */
++struct kernel_allocate_security_memory {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT32 bytes; /*! Requested bytes. */
++ gctUINT32 memory_handle; /*! Handle of allocated memory. */
++};
++
++/*!
++ @brief gckVIDMEM Object requests TrustZone to allocate security memory.
++ @discussion
++ Free a video memory buffer from security GPU memory.
++ */
++struct kernel_free_security_memory {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT32 memory_handle; /*! Handle of allocated memory. */
++};
++
++struct kernel_execute {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT8 gpu; /*! Which GPU. */
++ gctUINT8 kernel_command; /*! Whether it is a kernel command. */
++ gctUINT32 * command_buffer; /*! Content of command buffer need to be submit. */
++ gctUINT32 command_buffer_length; /*! Length of command buffer. */
++};
++
++typedef struct kernel_map_scatter_gather {
++ gctUINT32 bytes;
++ gctUINT32 physical;
++ struct kernel_map_scatter_gather *next;
++}
++kernel_map_scatter_gather_t;
++
++struct kernel_map_memory {
++ kernel_packet_command_t command;
++ kernel_map_scatter_gather_t *scatter;
++ gctUINT32 *physicals;
++ gctUINT32 pageCount;
++ gctUINT32 gpuAddress;
++};
++
++struct kernel_unmap_memory {
++ gctUINT32 gpuAddress;
++ gctUINT32 pageCount;
++};
++
++typedef struct _gcsTA_INTERFACE {
++ kernel_packet_command_t command;
++ union {
++ struct kernel_submit Submit;
++ struct kernel_start_command StartCommand;
++ struct kernel_allocate_security_memory AllocateSecurityMemory;
++ struct kernel_execute Execute;
++ struct kernel_map_memory MapMemory;
++ struct kernel_unmap_memory UnmapMemory;
++ } u;
++ gceSTATUS result;
++} gcsTA_INTERFACE;
++
++enum {
++ gcvTA_COMMAND_INIT,
++ gcvTA_COMMAND_DISPATCH,
++
++ gcvTA_CALLBACK_ALLOC_SECURE_MEM,
++ gcvTA_CALLBACK_FREE_SECURE_MEM,
++};
++
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_statistics.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_statistics.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_statistics.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_statistics.h 2015-05-01 14:57:59.591427001 -0500
+@@ -0,0 +1,99 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_statistics_h_
++#define __gc_hal_statistics_h_
++
++
++#define VIV_STAT_ENABLE_STATISTICS 0
++
++/* Toal number of frames for which the frame time is accounted. We have storage
++ to keep frame times for last this many frames.
++*/
++#define VIV_STAT_FRAME_BUFFER_SIZE 30
++
++
++/*
++ Total number of frames sampled for a mode. This means
++
++ # of frames for HZ Current : VIV_STAT_EARLY_Z_SAMPLE_FRAMES
++ # of frames for HZ Switched : VIV_STAT_EARLY_Z_SAMPLE_FRAMES
++ +
++ --------------------------------------------------------
++ : (2 * VIV_STAT_EARLY_Z_SAMPLE_FRAMES) frames needed
++
++ IMPORTANT: This total must be smaller than VIV_STAT_FRAME_BUFFER_SIZE
++*/
++#define VIV_STAT_EARLY_Z_SAMPLE_FRAMES 7
++#define VIV_STAT_EARLY_Z_LATENCY_FRAMES 2
++
++/* Multiplication factor for previous Hz off mode. Make it more than 1.0 to advertise HZ on.*/
++#define VIV_STAT_EARLY_Z_FACTOR (1.05f)
++
++/* Defines the statistical data keys monitored by the statistics module */
++typedef enum _gceSTATISTICS
++{
++ gcvFRAME_FPS = 1,
++}
++gceSTATISTICS;
++
++/* HAL statistics information. */
++typedef struct _gcsSTATISTICS_EARLYZ
++{
++ gctUINT switchBackCount;
++ gctUINT nextCheckPoint;
++ gctBOOL disabled;
++}
++gcsSTATISTICS_EARLYZ;
++
++
++/* HAL statistics information. */
++typedef struct _gcsSTATISTICS
++{
++ gctUINT64 frameTime[VIV_STAT_FRAME_BUFFER_SIZE];
++ gctUINT64 previousFrameTime;
++ gctUINT frame;
++ gcsSTATISTICS_EARLYZ earlyZ;
++}
++gcsSTATISTICS;
++
++
++/* Add a frame based data into current statistics. */
++void
++gcfSTATISTICS_AddData(
++ IN gceSTATISTICS Key,
++ IN gctUINT Value
++ );
++
++/* Marks the frame end and triggers statistical calculations and decisions.*/
++void
++gcfSTATISTICS_MarkFrameEnd (
++ void
++ );
++
++/* Sets whether the dynmaic HZ is disabled or not .*/
++void
++gcfSTATISTICS_DisableDynamicEarlyZ (
++ IN gctBOOL Disabled
++ );
++
++#endif /*__gc_hal_statistics_h_ */
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_types.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_types.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_types.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_types.h 2015-05-01 14:57:59.591427001 -0500
+@@ -0,0 +1,932 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_types_h_
++#define __gc_hal_types_h_
++
++#include "gc_hal_version.h"
++#include "gc_hal_options.h"
++
++#if !defined(VIV_KMD)
++#if defined(__KERNEL__)
++#include "linux/version.h"
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++ typedef unsigned long uintptr_t;
++# endif
++# include "linux/types.h"
++#elif defined(UNDER_CE)
++#include <crtdefs.h>
++#elif defined(_MSC_VER) && (_MSC_VER <= 1500)
++#include <crtdefs.h>
++#include "vadefs.h"
++#elif defined(__QNXNTO__)
++#define _QNX_SOURCE
++#include <stdint.h>
++#include <stddef.h>
++#else
++#include <stdlib.h>
++#include <stddef.h>
++#include <stdint.h>
++#endif
++#endif
++
++#ifdef _WIN32
++#pragma warning(disable:4127) /* Conditional expression is constant (do { }
++ ** while(0)). */
++#pragma warning(disable:4100) /* Unreferenced formal parameter. */
++#pragma warning(disable:4204) /* Non-constant aggregate initializer (C99). */
++#pragma warning(disable:4131) /* Uses old-style declarator (for Bison and
++ ** Flex generated files). */
++#pragma warning(disable:4206) /* Translation unit is empty. */
++#pragma warning(disable:4214) /* Nonstandard extension used :
++ ** bit field types other than int. */
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++** Platform macros.
++*/
++
++#if defined(__GNUC__)
++# define gcdHAS_ELLIPSIS 1 /* GCC always has it. */
++#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
++# define gcdHAS_ELLIPSIS 1 /* C99 has it. */
++#elif defined(_MSC_VER) && (_MSC_VER >= 1500)
++# define gcdHAS_ELLIPSIS 1 /* MSVC 2007+ has it. */
++#elif defined(UNDER_CE)
++#if UNDER_CE >= 600
++# define gcdHAS_ELLIPSIS 1
++# else
++# define gcdHAS_ELLIPSIS 0
++# endif
++#else
++# error "gcdHAS_ELLIPSIS: Platform could not be determined"
++#endif
++
++/******************************************************************************\
++************************************ Keyword ***********************************
++\******************************************************************************/
++#if defined(ANDROID) && defined(__BIONIC_FORTIFY)
++# define gcmINLINE __inline__ __attribute__ ((always_inline)) __attribute__ ((gnu_inline)) __attribute__ ((artificial))
++#elif ((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || defined(__APPLE__))
++# define gcmINLINE inline /* C99 keyword. */
++#elif defined(__GNUC__)
++# define gcmINLINE __inline__ /* GNU keyword. */
++#elif defined(_MSC_VER) || defined(UNDER_CE)
++# define gcmINLINE __inline /* Internal keyword. */
++#else
++# error "gcmINLINE: Platform could not be determined"
++#endif
++
++/* Possible debug flags. */
++#define gcdDEBUG_NONE 0
++#define gcdDEBUG_ALL (1 << 0)
++#define gcdDEBUG_FATAL (1 << 1)
++#define gcdDEBUG_TRACE (1 << 2)
++#define gcdDEBUG_BREAK (1 << 3)
++#define gcdDEBUG_ASSERT (1 << 4)
++#define gcdDEBUG_CODE (1 << 5)
++#define gcdDEBUG_STACK (1 << 6)
++
++#define gcmIS_DEBUG(flag) ( gcdDEBUG & (flag | gcdDEBUG_ALL) )
++
++#ifndef gcdDEBUG
++#if (defined(DBG) && DBG) || defined(DEBUG) || defined(_DEBUG)
++# define gcdDEBUG gcdDEBUG_ALL
++# else
++# define gcdDEBUG gcdDEBUG_NONE
++# endif
++#endif
++
++#ifdef _USRDLL
++#ifdef _MSC_VER
++#ifdef HAL_EXPORTS
++# define HALAPI __declspec(dllexport)
++# else
++# define HALAPI __declspec(dllimport)
++# endif
++# define HALDECL __cdecl
++# else
++#ifdef HAL_EXPORTS
++# define HALAPI
++# else
++# define HALAPI extern
++# endif
++# endif
++#else
++# define HALAPI
++# define HALDECL
++#endif
++
++/******************************************************************************\
++********************************** Common Types ********************************
++\******************************************************************************/
++
++#define gcvFALSE 0
++#define gcvTRUE 1
++
++#define gcvINFINITE ((gctUINT32) ~0U)
++
++#define gcvINVALID_HANDLE ((gctHANDLE) ~0U)
++
++typedef int gctBOOL;
++typedef gctBOOL * gctBOOL_PTR;
++
++typedef int gctINT;
++typedef signed char gctINT8;
++typedef signed short gctINT16;
++typedef signed int gctINT32;
++typedef signed long long gctINT64;
++
++typedef gctINT * gctINT_PTR;
++typedef gctINT8 * gctINT8_PTR;
++typedef gctINT16 * gctINT16_PTR;
++typedef gctINT32 * gctINT32_PTR;
++typedef gctINT64 * gctINT64_PTR;
++
++typedef unsigned int gctUINT;
++typedef unsigned char gctUINT8;
++typedef unsigned short gctUINT16;
++typedef unsigned int gctUINT32;
++typedef unsigned long long gctUINT64;
++typedef uintptr_t gctUINTPTR_T;
++
++typedef gctUINT * gctUINT_PTR;
++typedef gctUINT8 * gctUINT8_PTR;
++typedef gctUINT16 * gctUINT16_PTR;
++typedef gctUINT32 * gctUINT32_PTR;
++typedef gctUINT64 * gctUINT64_PTR;
++
++typedef size_t gctSIZE_T;
++typedef gctSIZE_T * gctSIZE_T_PTR;
++typedef gctUINT32 gctTRACE;
++
++#ifdef __cplusplus
++# define gcvNULL 0
++#else
++# define gcvNULL ((void *) 0)
++#endif
++
++#define gcvMAXINT8 0x7f
++#define gcvMININT8 0x80
++#define gcvMAXINT16 0x7fff
++#define gcvMININT16 0x8000
++#define gcvMAXINT32 0x7fffffff
++#define gcvMININT32 0x80000000
++#define gcvMAXINT64 0x7fffffffffffffff
++#define gcvMININT64 0x8000000000000000
++#define gcvMAXUINT8 0xff
++#define gcvMINUINT8 0x0
++#define gcvMAXUINT16 0xffff
++#define gcvMINUINT16 0x8000
++#define gcvMAXUINT32 0xffffffff
++#define gcvMINUINT32 0x80000000
++#define gcvMAXUINT64 0xffffffffffffffff
++#define gcvMINUINT64 0x8000000000000000
++#define gcvMAXUINTPTR_T (~(gctUINTPTR_T)0)
++
++typedef float gctFLOAT;
++typedef signed int gctFIXED_POINT;
++typedef float * gctFLOAT_PTR;
++
++typedef void * gctPHYS_ADDR;
++typedef void * gctHANDLE;
++typedef void * gctFILE;
++typedef void * gctSIGNAL;
++typedef void * gctWINDOW;
++typedef void * gctIMAGE;
++typedef void * gctSYNC_POINT;
++typedef void * gctSHBUF;
++
++typedef void * gctSEMAPHORE;
++
++typedef void * gctPOINTER;
++typedef const void * gctCONST_POINTER;
++
++typedef char gctCHAR;
++typedef char * gctSTRING;
++typedef const char * gctCONST_STRING;
++
++typedef struct _gcsCOUNT_STRING
++{
++ gctSIZE_T Length;
++ gctCONST_STRING String;
++}
++gcsCOUNT_STRING;
++
++typedef union _gcuFLOAT_UINT32
++{
++ gctFLOAT f;
++ gctUINT32 u;
++}
++gcuFLOAT_UINT32;
++
++/* Fixed point constants. */
++#define gcvZERO_X ((gctFIXED_POINT) 0x00000000)
++#define gcvHALF_X ((gctFIXED_POINT) 0x00008000)
++#define gcvONE_X ((gctFIXED_POINT) 0x00010000)
++#define gcvNEGONE_X ((gctFIXED_POINT) 0xFFFF0000)
++#define gcvTWO_X ((gctFIXED_POINT) 0x00020000)
++
++
++
++#define gcmFIXEDCLAMP_NEG1_TO_1(_x) \
++ (((_x) < gcvNEGONE_X) \
++ ? gcvNEGONE_X \
++ : (((_x) > gcvONE_X) \
++ ? gcvONE_X \
++ : (_x)))
++
++#define gcmFLOATCLAMP_NEG1_TO_1(_f) \
++ (((_f) < -1.0f) \
++ ? -1.0f \
++ : (((_f) > 1.0f) \
++ ? 1.0f \
++ : (_f)))
++
++
++#define gcmFIXEDCLAMP_0_TO_1(_x) \
++ (((_x) < 0) \
++ ? 0 \
++ : (((_x) > gcvONE_X) \
++ ? gcvONE_X \
++ : (_x)))
++
++#define gcmFLOATCLAMP_0_TO_1(_f) \
++ (((_f) < 0.0f) \
++ ? 0.0f \
++ : (((_f) > 1.0f) \
++ ? 1.0f \
++ : (_f)))
++
++
++/******************************************************************************\
++******************************* Multicast Values *******************************
++\******************************************************************************/
++
++/* Value types. */
++typedef enum _gceVALUE_TYPE
++{
++ gcvVALUE_UINT = 0x0,
++ gcvVALUE_FIXED,
++ gcvVALUE_FLOAT,
++ gcvVALUE_INT,
++
++ /*
++ ** The value need be unsigned denormalized. clamp (0.0-1.0) should be done first.
++ */
++ gcvVALUE_FLAG_UNSIGNED_DENORM = 0x00010000,
++
++ /*
++ ** The value need be signed denormalized. clamp (-1.0-1.0) should be done first.
++ */
++ gcvVALUE_FLAG_SIGNED_DENORM = 0x00020000,
++
++ /*
++ ** The value need to gammar
++ */
++ gcvVALUE_FLAG_GAMMAR = 0x00040000,
++
++ /*
++ ** The value need to convert from float to float16
++ */
++ gcvVALUE_FLAG_FLOAT_TO_FLOAT16 = 0x0080000,
++
++ /*
++ ** Mask for flag field.
++ */
++ gcvVALUE_FLAG_MASK = 0xFFFF0000,
++}
++gceVALUE_TYPE;
++
++/* Value unions. */
++typedef union _gcuVALUE
++{
++ gctUINT uintValue;
++ gctFIXED_POINT fixedValue;
++ gctFLOAT floatValue;
++ gctINT intValue;
++}
++gcuVALUE;
++
++
++
++
++/* Stringizing macro. */
++#define gcmSTRING(Value) #Value
++
++/******************************************************************************\
++******************************* Fixed Point Math *******************************
++\******************************************************************************/
++
++#define gcmXMultiply(x1, x2) gcoMATH_MultiplyFixed(x1, x2)
++#define gcmXDivide(x1, x2) gcoMATH_DivideFixed(x1, x2)
++#define gcmXMultiplyDivide(x1, x2, x3) gcoMATH_MultiplyDivideFixed(x1, x2, x3)
++
++/* 2D Engine profile. */
++typedef struct _gcs2D_PROFILE
++{
++ /* Cycle count.
++ 32bit counter incremented every 2D clock cycle.
++ Wraps back to 0 when the counter overflows.
++ */
++ gctUINT32 cycleCount;
++
++ /* Pixels rendered by the 2D engine.
++ Resets to 0 every time it is read. */
++ gctUINT32 pixelsRendered;
++}
++gcs2D_PROFILE;
++
++/* Macro to combine four characters into a Charcater Code. */
++#define gcmCC(c1, c2, c3, c4) \
++( \
++ (char) (c1) \
++ | \
++ ((char) (c2) << 8) \
++ | \
++ ((char) (c3) << 16) \
++ | \
++ ((char) (c4) << 24) \
++)
++
++#define gcmPRINTABLE(c) ((((c) >= ' ') && ((c) <= '}')) ? ((c) != '%' ? (c) : ' ') : ' ')
++
++#define gcmCC_PRINT(cc) \
++ gcmPRINTABLE((char) ( (cc) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 8) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 16) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 24) & 0xFF))
++
++/******************************************************************************\
++****************************** Function Parameters *****************************
++\******************************************************************************/
++
++#define IN
++#define OUT
++#define INOUT
++#define OPTIONAL
++
++/******************************************************************************\
++********************************* Status Codes *********************************
++\******************************************************************************/
++
++typedef enum _gceSTATUS
++{
++ gcvSTATUS_OK = 0,
++ gcvSTATUS_FALSE = 0,
++ gcvSTATUS_TRUE = 1,
++ gcvSTATUS_NO_MORE_DATA = 2,
++ gcvSTATUS_CACHED = 3,
++ gcvSTATUS_MIPMAP_TOO_LARGE = 4,
++ gcvSTATUS_NAME_NOT_FOUND = 5,
++ gcvSTATUS_NOT_OUR_INTERRUPT = 6,
++ gcvSTATUS_MISMATCH = 7,
++ gcvSTATUS_MIPMAP_TOO_SMALL = 8,
++ gcvSTATUS_LARGER = 9,
++ gcvSTATUS_SMALLER = 10,
++ gcvSTATUS_CHIP_NOT_READY = 11,
++ gcvSTATUS_NEED_CONVERSION = 12,
++ gcvSTATUS_SKIP = 13,
++ gcvSTATUS_DATA_TOO_LARGE = 14,
++ gcvSTATUS_INVALID_CONFIG = 15,
++ gcvSTATUS_CHANGED = 16,
++ gcvSTATUS_NOT_SUPPORT_DITHER = 17,
++ gcvSTATUS_EXECUTED = 18,
++ gcvSTATUS_TERMINATE = 19,
++
++ gcvSTATUS_INVALID_ARGUMENT = -1,
++ gcvSTATUS_INVALID_OBJECT = -2,
++ gcvSTATUS_OUT_OF_MEMORY = -3,
++ gcvSTATUS_MEMORY_LOCKED = -4,
++ gcvSTATUS_MEMORY_UNLOCKED = -5,
++ gcvSTATUS_HEAP_CORRUPTED = -6,
++ gcvSTATUS_GENERIC_IO = -7,
++ gcvSTATUS_INVALID_ADDRESS = -8,
++ gcvSTATUS_CONTEXT_LOSSED = -9,
++ gcvSTATUS_TOO_COMPLEX = -10,
++ gcvSTATUS_BUFFER_TOO_SMALL = -11,
++ gcvSTATUS_INTERFACE_ERROR = -12,
++ gcvSTATUS_NOT_SUPPORTED = -13,
++ gcvSTATUS_MORE_DATA = -14,
++ gcvSTATUS_TIMEOUT = -15,
++ gcvSTATUS_OUT_OF_RESOURCES = -16,
++ gcvSTATUS_INVALID_DATA = -17,
++ gcvSTATUS_INVALID_MIPMAP = -18,
++ gcvSTATUS_NOT_FOUND = -19,
++ gcvSTATUS_NOT_ALIGNED = -20,
++ gcvSTATUS_INVALID_REQUEST = -21,
++ gcvSTATUS_GPU_NOT_RESPONDING = -22,
++ gcvSTATUS_TIMER_OVERFLOW = -23,
++ gcvSTATUS_VERSION_MISMATCH = -24,
++ gcvSTATUS_LOCKED = -25,
++ gcvSTATUS_INTERRUPTED = -26,
++ gcvSTATUS_DEVICE = -27,
++ gcvSTATUS_NOT_MULTI_PIPE_ALIGNED = -28,
++
++ /* Linker errors. */
++ gcvSTATUS_GLOBAL_TYPE_MISMATCH = -1000,
++ gcvSTATUS_TOO_MANY_ATTRIBUTES = -1001,
++ gcvSTATUS_TOO_MANY_UNIFORMS = -1002,
++ gcvSTATUS_TOO_MANY_VARYINGS = -1003,
++ gcvSTATUS_UNDECLARED_VARYING = -1004,
++ gcvSTATUS_VARYING_TYPE_MISMATCH = -1005,
++ gcvSTATUS_MISSING_MAIN = -1006,
++ gcvSTATUS_NAME_MISMATCH = -1007,
++ gcvSTATUS_INVALID_INDEX = -1008,
++ gcvSTATUS_UNIFORM_MISMATCH = -1009,
++ gcvSTATUS_UNSAT_LIB_SYMBOL = -1010,
++ gcvSTATUS_TOO_MANY_SHADERS = -1011,
++ gcvSTATUS_LINK_INVALID_SHADERS = -1012,
++ gcvSTATUS_CS_NO_WORKGROUP_SIZE = -1013,
++ gcvSTATUS_LINK_LIB_ERROR = -1014,
++ gcvSTATUS_SHADER_VERSION_MISMATCH = -1015,
++ gcvSTATUS_TOO_MANY_INSTRUCTION = -1016,
++ gcvSTATUS_SSBO_MISMATCH = -1017,
++ gcvSTATUS_TOO_MANY_OUTPUT = -1018,
++ gcvSTATUS_TOO_MANY_INPUT = -1019,
++ gcvSTATUS_NOT_SUPPORT_CL = -1020,
++ gcvSTATUS_NOT_SUPPORT_INTEGER = -1021,
++ gcvSTATUS_UNIFORM_TYPE_MISMATCH = -1022,
++ gcvSTATUS_TOO_MANY_SAMPLER = -1023,
++
++ /* Compiler errors. */
++ gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR = -2000,
++ gcvSTATUS_COMPILER_FE_PARSER_ERROR = -2001,
++
++ /* Recompilation Errors */
++ gcvSTATUS_RECOMPILER_CONVERT_UNIMPLEMENTED = -3000,
++}
++gceSTATUS;
++
++/******************************************************************************\
++********************************* Status Macros ********************************
++\******************************************************************************/
++
++#define gcmIS_ERROR(status) (status < 0)
++#define gcmNO_ERROR(status) (status >= 0)
++#define gcmIS_SUCCESS(status) (status == gcvSTATUS_OK)
++
++/******************************************************************************\
++********************************* Field Macros *********************************
++\******************************************************************************/
++
++#define __gcmSTART(reg_field) \
++ (0 ? reg_field)
++
++#define __gcmEND(reg_field) \
++ (1 ? reg_field)
++
++#define __gcmGETSIZE(reg_field) \
++ (__gcmEND(reg_field) - __gcmSTART(reg_field) + 1)
++
++#define __gcmALIGN(data, reg_field) \
++ (((gctUINT32) (data)) << __gcmSTART(reg_field))
++
++#define __gcmMASK(reg_field) \
++ ((gctUINT32) ((__gcmGETSIZE(reg_field) == 32) \
++ ? ~0 \
++ : (~(~0 << __gcmGETSIZE(reg_field)))))
++
++/*******************************************************************************
++**
++** gcmFIELDMASK
++**
++** Get aligned field mask.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmFIELDMASK(reg, field) \
++( \
++ __gcmALIGN(__gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmGETFIELD
++**
++** Extract the value of a field from specified data.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmGETFIELD(data, reg, field) \
++( \
++ ((((gctUINT32) (data)) >> __gcmSTART(reg##_##field)) \
++ & __gcmMASK(reg##_##field)) \
++)
++
++/*******************************************************************************
++**
++** gcmSETFIELD
++**
++** Set the value of a field within specified data.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETFIELD(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) \
++ & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \
++ | __gcmALIGN((gctUINT32) (value) \
++ & __gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmSETFIELDVALUE
++**
++** Set the value of a field within specified data with a
++** predefined value.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Name of the value within the field.
++*/
++#define gcmSETFIELDVALUE(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) \
++ & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \
++ | __gcmALIGN(reg##_##field##_##value \
++ & __gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmGETMASKEDFIELDMASK
++**
++** Determine field mask of a masked field.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmGETMASKEDFIELDMASK(reg, field) \
++( \
++ gcmSETFIELD(0, reg, field, ~0) | \
++ gcmSETFIELD(0, reg, MASK_ ## field, ~0) \
++)
++
++/*******************************************************************************
++**
++** gcmSETMASKEDFIELD
++**
++** Set the value of a masked field with specified data.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETMASKEDFIELD(reg, field, value) \
++( \
++ gcmSETFIELD (~0, reg, field, value) & \
++ gcmSETFIELDVALUE(~0, reg, MASK_ ## field, ENABLED) \
++)
++
++/*******************************************************************************
++**
++** gcmSETMASKEDFIELDVALUE
++**
++** Set the value of a masked field with specified data.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETMASKEDFIELDVALUE(reg, field, value) \
++( \
++ gcmSETFIELDVALUE(~0, reg, field, value) & \
++ gcmSETFIELDVALUE(~0, reg, MASK_ ## field, ENABLED) \
++)
++
++/*******************************************************************************
++**
++** gcmVERIFYFIELDVALUE
++**
++** Verify if the value of a field within specified data equals a
++** predefined value.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Name of the value within the field.
++*/
++#define gcmVERIFYFIELDVALUE(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) >> __gcmSTART(reg##_##field) & \
++ __gcmMASK(reg##_##field)) \
++ == \
++ (reg##_##field##_##value & __gcmMASK(reg##_##field)) \
++)
++
++/*******************************************************************************
++** Bit field macros.
++*/
++
++#define __gcmSTARTBIT(Field) \
++ ( 1 ? Field )
++
++#define __gcmBITSIZE(Field) \
++ ( 0 ? Field )
++
++#define __gcmBITMASK(Field) \
++( \
++ (1 << __gcmBITSIZE(Field)) - 1 \
++)
++
++#define gcmGETBITS(Value, Type, Field) \
++( \
++ ( ((Type) (Value)) >> __gcmSTARTBIT(Field) ) \
++ & \
++ __gcmBITMASK(Field) \
++)
++
++#define gcmSETBITS(Value, Type, Field, NewValue) \
++( \
++ ( ((Type) (Value)) \
++ & ~(__gcmBITMASK(Field) << __gcmSTARTBIT(Field)) \
++ ) \
++ | \
++ ( ( ((Type) (NewValue)) \
++ & __gcmBITMASK(Field) \
++ ) << __gcmSTARTBIT(Field) \
++ ) \
++)
++
++/*******************************************************************************
++**
++** gcmISINREGRANGE
++**
++** Verify whether the specified address is in the register range.
++**
++** ARGUMENTS:
++**
++** Address Address to be verified.
++** Name Name of a register.
++*/
++
++#define gcmISINREGRANGE(Address, Name) \
++( \
++ ((Address & (~0U << Name ## _LSB)) == (Name ## _Address >> 2)) \
++)
++
++/******************************************************************************\
++******************************** Ceiling Macro ********************************
++\******************************************************************************/
++#define gcmCEIL(x) ((x - (gctUINT32)x) == 0 ? (gctUINT32)x : (gctUINT32)x + 1)
++
++/******************************************************************************\
++******************************** Min/Max Macros ********************************
++\******************************************************************************/
++
++#define gcmMIN(x, y) (((x) <= (y)) ? (x) : (y))
++#define gcmMAX(x, y) (((x) >= (y)) ? (x) : (y))
++#define gcmCLAMP(x, min, max) (((x) < (min)) ? (min) : \
++ ((x) > (max)) ? (max) : (x))
++#define gcmABS(x) (((x) < 0) ? -(x) : (x))
++#define gcmNEG(x) (((x) < 0) ? (x) : -(x))
++
++/******************************************************************************\
++******************************** Bit Macro ********************************
++\******************************************************************************/
++#define gcmBITSET(x, y) ((x) & (y))
++/*******************************************************************************
++**
++** gcmPTR2INT
++**
++** Convert a pointer to an integer value.
++**
++** ARGUMENTS:
++**
++** p Pointer value.
++*/
++#define gcmPTR2INT(p) \
++( \
++ (gctUINTPTR_T) (p) \
++)
++
++#define gcmPTR2INT32(p) \
++( \
++ (gctUINT32)(gctUINTPTR_T) (p) \
++)
++
++/*******************************************************************************
++**
++** gcmINT2PTR
++**
++** Convert an integer value into a pointer.
++**
++** ARGUMENTS:
++**
++** v Integer value.
++*/
++
++#define gcmINT2PTR(i) \
++( \
++ (gctPOINTER) (gctUINTPTR_T)(i) \
++)
++
++/*******************************************************************************
++**
++** gcmOFFSETOF
++**
++** Compute the byte offset of a field inside a structure.
++**
++** ARGUMENTS:
++**
++** s Structure name.
++** field Field name.
++*/
++#define gcmOFFSETOF(s, field) \
++( \
++ gcmPTR2INT32(& (((struct s *) 0)->field)) \
++)
++
++/*******************************************************************************
++**
++** gcmSWAB32
++**
++** Return a value with all bytes in the 32 bit argument swapped.
++*/
++#define gcmSWAB32(x) ((gctUINT32)( \
++ (((gctUINT32)(x) & (gctUINT32)0x000000FFUL) << 24) | \
++ (((gctUINT32)(x) & (gctUINT32)0x0000FF00UL) << 8) | \
++ (((gctUINT32)(x) & (gctUINT32)0x00FF0000UL) >> 8) | \
++ (((gctUINT32)(x) & (gctUINT32)0xFF000000UL) >> 24)))
++
++/*******************************************************************************
++***** Database ****************************************************************/
++
++typedef struct _gcsDATABASE_COUNTERS
++{
++ /* Number of currently allocated bytes. */
++ gctUINT64 bytes;
++
++ /* Maximum number of bytes allocated (memory footprint). */
++ gctUINT64 maxBytes;
++
++ /* Total number of bytes allocated. */
++ gctUINT64 totalBytes;
++}
++gcsDATABASE_COUNTERS;
++
++typedef struct _gcuDATABASE_INFO
++{
++ /* Counters. */
++ gcsDATABASE_COUNTERS counters;
++
++ /* Time value. */
++ gctUINT64 time;
++}
++gcuDATABASE_INFO;
++
++/*******************************************************************************
++***** Frame database **********************************************************/
++
++/* gcsHAL_FRAME_INFO */
++typedef struct _gcsHAL_FRAME_INFO
++{
++ /* Current timer tick. */
++ OUT gctUINT64 ticks;
++
++ /* Bandwidth counters. */
++ OUT gctUINT readBytes8[8];
++ OUT gctUINT writeBytes8[8];
++
++ /* Counters. */
++ OUT gctUINT cycles[8];
++ OUT gctUINT idleCycles[8];
++ OUT gctUINT mcCycles[8];
++ OUT gctUINT readRequests[8];
++ OUT gctUINT writeRequests[8];
++
++ /* 3D counters. */
++ OUT gctUINT vertexCount;
++ OUT gctUINT primitiveCount;
++ OUT gctUINT rejectedPrimitives;
++ OUT gctUINT culledPrimitives;
++ OUT gctUINT clippedPrimitives;
++ OUT gctUINT outPrimitives;
++ OUT gctUINT inPrimitives;
++ OUT gctUINT culledQuadCount;
++ OUT gctUINT totalQuadCount;
++ OUT gctUINT quadCount;
++ OUT gctUINT totalPixelCount;
++
++ /* PE counters. */
++ OUT gctUINT colorKilled[8];
++ OUT gctUINT colorDrawn[8];
++ OUT gctUINT depthKilled[8];
++ OUT gctUINT depthDrawn[8];
++
++ /* Shader counters. */
++ OUT gctUINT shaderCycles;
++ OUT gctUINT vsInstructionCount;
++ OUT gctUINT vsTextureCount;
++ OUT gctUINT psInstructionCount;
++ OUT gctUINT psTextureCount;
++
++ /* Texture counters. */
++ OUT gctUINT bilinearRequests;
++ OUT gctUINT trilinearRequests;
++ OUT gctUINT txBytes8;
++ OUT gctUINT txHitCount;
++ OUT gctUINT txMissCount;
++}
++gcsHAL_FRAME_INFO;
++
++#if gcdLINK_QUEUE_SIZE
++typedef struct _gckLINKDATA * gckLINKDATA;
++struct _gckLINKDATA
++{
++ gctUINT32 start;
++ gctUINT32 end;
++ gctUINT32 pid;
++};
++
++typedef struct _gckLINKQUEUE * gckLINKQUEUE;
++struct _gckLINKQUEUE
++{
++ struct _gckLINKDATA data[gcdLINK_QUEUE_SIZE];
++ gctUINT32 rear;
++ gctUINT32 front;
++ gctUINT32 count;
++};
++#endif
++
++#define gcdENTRY_QUEUE_SIZE 256
++typedef struct _gckENTRYDATA * gckENTRYDATA;
++struct _gckENTRYDATA
++{
++ gctUINT32 physical;
++ gctUINT32 bytes;
++};
++
++typedef struct _gckENTRYQUEUE * gckENTRYQUEUE;
++struct _gckENTRYQUEUE
++{
++ struct _gckENTRYDATA data[gcdENTRY_QUEUE_SIZE];
++ gctUINT32 rear;
++ gctUINT32 front;
++ gctUINT32 count;
++};
++
++typedef enum _gceTRACEMODE
++{
++ gcvTRACEMODE_NONE = 0,
++ gcvTRACEMODE_FULL = 1,
++ gcvTRACEMODE_LOGGER = 2,
++ gcvTRACEMODE_PRE = 3,
++ gcvTRACEMODE_POST = 4,
++ gcvTRACEMODE_SYSTRACE = 5,
++
++} gceTRACEMODE;
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_types_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_version.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_version.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_version.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_version.h 2015-05-01 14:57:59.591427001 -0500
+@@ -0,0 +1,39 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_version_h_
++#define __gc_hal_version_h_
++
++#define gcvVERSION_MAJOR 5
++
++#define gcvVERSION_MINOR 0
++
++#define gcvVERSION_PATCH 11
++
++#define gcvVERSION_BUILD 25762
++
++#define gcvVERSION_STRING "5.0.11.p4.25762"
++
++#define gcvVERSION_DATE __DATE__
++
++#define gcvVERSION_TIME __TIME__
++
++#endif /* __gc_hal_version_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_vg.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_vg.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_vg.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/kernel/inc/gc_hal_vg.h 2015-05-01 14:57:59.595427001 -0500
+@@ -0,0 +1,896 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_vg_h_
++#define __gc_hal_vg_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++#include "gc_hal_rename.h"
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++#include "gc_hal_base.h"
++
++#if gcdENABLE_VG
++
++/* Thread routine type. */
++#if defined(LINUX)
++ typedef gctINT gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE
++#elif defined(WIN32)
++ typedef gctUINT gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE __stdcall
++#elif defined(__QNXNTO__)
++ typedef void * gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE
++#endif
++
++typedef gctTHREADFUNCRESULT (gctTHREADFUNCTYPE * gctTHREADFUNC) (
++ gctTHREADFUNCPARAMETER ThreadParameter
++ );
++
++
++#if defined(gcvDEBUG)
++# undef gcvDEBUG
++#endif
++
++#define gcdFORCE_DEBUG 0
++#define gcdFORCE_MESSAGES 0
++
++
++#if DBG || defined(DEBUG) || defined(_DEBUG) || gcdFORCE_DEBUG
++# define gcvDEBUG 1
++#else
++# define gcvDEBUG 0
++#endif
++
++#define _gcmERROR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++
++#define gcmERROR_RETURN(func) _gcmERROR_RETURN(gcm, func)
++
++#define gcmLOG_LOCATION()
++
++#define gcmkIS_ERROR(status) (status < 0)
++
++#define gcmALIGNDOWN(n, align) \
++( \
++ (n) & ~((align) - 1) \
++)
++
++#define gcmIS_VALID_INDEX(Index, Array) \
++ (((gctUINT) (Index)) < gcmCOUNTOF(Array))
++
++
++#define gcmIS_NAN(x) \
++( \
++ ((* (gctUINT32_PTR) &(x)) & 0x7FFFFFFF) == 0x7FFFFFFF \
++)
++
++#define gcmLERP(v1, v2, w) \
++ ((v1) * (w) + (v2) * (1.0f - (w)))
++
++#define gcmINTERSECT(Start1, Start2, Length) \
++ (gcmABS((Start1) - (Start2)) < (Length))
++
++/*******************************************************************************
++**
++** gcmERR_GOTO
++**
++** Prints a message and terminates the current loop on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** Function
++** Function to evaluate.
++*/
++
++#define gcmERR_GOTO(Function) \
++ status = Function; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ gcmTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmERR_GOTO: status=%d @ line=%d in function %s.\n", \
++ status, __LINE__, __FUNCTION__ \
++ ); \
++ goto ErrorHandler; \
++ }
++
++#if gcvDEBUG || gcdFORCE_MESSAGES
++# define gcmVERIFY_BOOLEAN(Expression) \
++ gcmASSERT( \
++ ( (Expression) == gcvFALSE ) || \
++ ( (Expression) == gcvTRUE ) \
++ )
++#else
++# define gcmVERIFY_BOOLEAN(Expression)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFYFIELDFIT
++**
++** Verify whether the value fits in the field.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmVERIFYFIELDFIT(reg, field, value) \
++ gcmASSERT( \
++ (value) <= gcmFIELDMAX(reg, field) \
++ )
++/*******************************************************************************
++**
++** gcmFIELDMAX
++**
++** Get field maximum value.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmFIELDMAX(reg, field) \
++( \
++ (gctUINT32) \
++ ( \
++ (__gcmGETSIZE(reg##_##field) == 32) \
++ ? ~0 \
++ : (~(~0 << __gcmGETSIZE(reg##_##field))) \
++ ) \
++)
++
++
++/* ANSI C does not have the 'f' functions, define replacements here. */
++#define gcmSINF(x) ((gctFLOAT) sin(x))
++#define gcmCOSF(x) ((gctFLOAT) cos(x))
++#define gcmASINF(x) ((gctFLOAT) asin(x))
++#define gcmACOSF(x) ((gctFLOAT) acos(x))
++#define gcmSQRTF(x) ((gctFLOAT) sqrt(x))
++#define gcmFABSF(x) ((gctFLOAT) fabs(x))
++#define gcmFMODF(x, y) ((gctFLOAT) fmod((x), (y)))
++#define gcmCEILF(x) ((gctFLOAT) ceil(x))
++#define gcmFLOORF(x) ((gctFLOAT) floor(x))
++
++
++
++/* Fixed point constants. */
++#define gcvZERO_X ((gctFIXED_POINT) 0x00000000)
++#define gcvHALF_X ((gctFIXED_POINT) 0x00008000)
++#define gcvONE_X ((gctFIXED_POINT) 0x00010000)
++#define gcvNEGONE_X ((gctFIXED_POINT) 0xFFFF0000)
++#define gcvTWO_X ((gctFIXED_POINT) 0x00020000)
++
++/* Integer constants. */
++#define gcvMAX_POS_INT ((gctINT) 0x7FFFFFFF)
++#define gcvMAX_NEG_INT ((gctINT) 0x80000000)
++
++/* Float constants. */
++#define gcvMAX_POS_FLOAT ((gctFLOAT) 3.4028235e+038)
++#define gcvMAX_NEG_FLOAT ((gctFLOAT) -3.4028235e+038)
++
++/******************************************************************************\
++***************************** Miscellaneous Macro ******************************
++\******************************************************************************/
++
++#define gcmKB2BYTES(Kilobyte) \
++( \
++ (Kilobyte) << 10 \
++)
++
++#define gcmMB2BYTES(Megabyte) \
++( \
++ (Megabyte) << 20 \
++)
++
++#define gcmMAT(Matrix, Row, Column) \
++( \
++ (Matrix) [(Row) * 3 + (Column)] \
++)
++
++#define gcmMAKE2CHAR(Char1, Char2) \
++( \
++ ((gctUINT16) (gctUINT8) (Char1) << 0) | \
++ ((gctUINT16) (gctUINT8) (Char2) << 8) \
++)
++
++#define gcmMAKE4CHAR(Char1, Char2, Char3, Char4) \
++( \
++ ((gctUINT32)(gctUINT8) (Char1) << 0) | \
++ ((gctUINT32)(gctUINT8) (Char2) << 8) | \
++ ((gctUINT32)(gctUINT8) (Char3) << 16) | \
++ ((gctUINT32)(gctUINT8) (Char4) << 24) \
++)
++
++/* some platforms need to fix the physical address for HW to access*/
++#define gcmFIXADDRESS(address) \
++(\
++ (address)\
++)
++
++#define gcmkFIXADDRESS(address) \
++(\
++ (address)\
++)
++
++/******************************************************************************\
++****************************** Kernel Debug Macro ******************************
++\******************************************************************************/
++
++/* Set signal to signaled state for specified process. */
++gceSTATUS
++gckOS_SetSignal(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ );
++
++/* Return the kernel logical pointer for the given physical one. */
++gceSTATUS
++gckOS_GetKernelLogical(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Return the kernel logical pointer for the given physical one. */
++gceSTATUS
++gckOS_GetKernelLogicalEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----------------------------- Semaphore Object -----------------------------*/
++
++/* Increment the value of a semaphore. */
++gceSTATUS
++gckOS_IncrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ );
++
++/* Decrement the value of a semaphore (waiting might occur). */
++gceSTATUS
++gckOS_DecrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ );
++
++
++/*----------------------------------------------------------------------------*/
++/*------------------------------- Thread Object ------------------------------*/
++
++/* Start a thread. */
++gceSTATUS
++gckOS_StartThread(
++ IN gckOS Os,
++ IN gctTHREADFUNC ThreadFunction,
++ IN gctPOINTER ThreadParameter,
++ OUT gctTHREAD * Thread
++ );
++
++/* Stop a thread. */
++gceSTATUS
++gckOS_StopThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ );
++
++/* Verify whether the thread is still running. */
++gceSTATUS
++gckOS_VerifyThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ );
++
++
++/* Construct a new gckVGKERNEL object. */
++gceSTATUS
++gckVGKERNEL_Construct(
++ IN gckOS Os,
++ IN gctPOINTER Context,
++ IN gckKERNEL inKernel,
++ OUT gckVGKERNEL * Kernel
++ );
++
++/* Destroy an gckVGKERNEL object. */
++gceSTATUS
++gckVGKERNEL_Destroy(
++ IN gckVGKERNEL Kernel
++ );
++
++/* Allocate linear video memory. */
++gceSTATUS
++gckVGKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Unmap memory. */
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Dispatch a user-level command. */
++gceSTATUS
++gckVGKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Query command buffer requirements. */
++gceSTATUS
++gckKERNEL_QueryCommandBuffer(
++ IN gckKERNEL Kernel,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++
++/******************************************************************************\
++******************************* gckVGHARDWARE Object ******************************
++\******************************************************************************/
++
++/* Construct a new gckVGHARDWARE object. */
++gceSTATUS
++gckVGHARDWARE_Construct(
++ IN gckOS Os,
++ OUT gckVGHARDWARE * Hardware
++ );
++
++/* Destroy an gckVGHARDWARE object. */
++gceSTATUS
++gckVGHARDWARE_Destroy(
++ IN gckVGHARDWARE Hardware
++ );
++
++/* Query system memory requirements. */
++gceSTATUS
++gckVGHARDWARE_QuerySystemMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ );
++
++/* Build virtual address. */
++gceSTATUS
++gckVGHARDWARE_BuildVirtualAddress(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Kickstart the command processor. */
++gceSTATUS
++gckVGHARDWARE_Execute(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctUINT32 Count
++ );
++
++/* Query the available memory. */
++gceSTATUS
++gckVGHARDWARE_QueryMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gckVGHARDWARE_QueryChipIdentity(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPMODEL* ChipModel,
++ OUT gctUINT32* ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures,
++ OUT gctUINT32* ChipMinorFeatures1
++ );
++
++/* Convert an API format. */
++gceSTATUS
++gckVGHARDWARE_ConvertFormat(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT32 * BitsPerPixel,
++ OUT gctUINT32 * BytesPerTile
++ );
++
++/* Split a harwdare specific address into API stuff. */
++gceSTATUS
++gckVGHARDWARE_SplitMemory(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Align size to tile boundary. */
++gceSTATUS
++gckVGHARDWARE_AlignToTile(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Width,
++ IN OUT gctUINT32_PTR Height
++ );
++
++/* Convert logical address to hardware specific address. */
++gceSTATUS
++gckVGHARDWARE_ConvertLogical(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ );
++
++/* Program MMU. */
++gceSTATUS
++gckVGHARDWARE_SetMMU(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical
++ );
++
++/* Flush the MMU. */
++gceSTATUS
++gckVGHARDWARE_FlushMMU(
++ IN gckVGHARDWARE Hardware
++ );
++
++/* Get idle register. */
++gceSTATUS
++gckVGHARDWARE_GetIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32 * Data
++ );
++
++/* Flush the caches. */
++gceSTATUS
++gckVGHARDWARE_Flush(
++ IN gckVGHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Enable/disable fast clear. */
++gceSTATUS
++gckVGHARDWARE_SetFastClear(
++ IN gckVGHARDWARE Hardware,
++ IN gctINT Enable
++ );
++
++gceSTATUS
++gckVGHARDWARE_ReadInterrupt(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ );
++
++/* Power management. */
++gceSTATUS
++gckVGHARDWARE_SetPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ );
++
++gceSTATUS
++gckVGHARDWARE_SetPowerManagement(
++ IN gckVGHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ );
++
++gceSTATUS
++gckVGHARDWARE_SetPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Timeout
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ );
++/******************************************************************************\
++*************************** Command Buffer Structures **************************
++\******************************************************************************/
++
++/* Vacant command buffer marker. */
++#define gcvVACANT_BUFFER ((gcsCOMPLETION_SIGNAL_PTR) ((gctSIZE_T)1))
++
++/* Command buffer header. */
++typedef struct _gcsCMDBUFFER * gcsCMDBUFFER_PTR;
++typedef struct _gcsCMDBUFFER
++{
++ /* Pointer to the completion signal. */
++ gcsCOMPLETION_SIGNAL_PTR completion;
++
++ /* The user sets this to the node of the container buffer whitin which
++ this particular command buffer resides. The kernel sets this to the
++ node of the internally allocated buffer. */
++ gcuVIDMEM_NODE_PTR node;
++
++ /* Command buffer hardware address. */
++ gctUINT32 address;
++
++ /* The offset of the buffer from the beginning of the header. */
++ gctUINT32 bufferOffset;
++
++ /* Size of the area allocated for the data portion of this particular
++ command buffer (headers and tail reserves are excluded). */
++ gctUINT32 size;
++
++ /* Offset into the buffer [0..size]; reflects exactly how much data has
++ been put into the command buffer. */
++ gctUINT offset;
++
++ /* The number of command units in the buffer for the hardware to
++ execute. */
++ gctUINT32 dataCount;
++
++ /* MANAGED BY : user HAL (gcoBUFFER object).
++ USED BY : user HAL (gcoBUFFER object).
++ Points to the immediate next allocated command buffer. */
++ gcsCMDBUFFER_PTR nextAllocated;
++
++ /* MANAGED BY : user layers (HAL and drivers).
++ USED BY : kernel HAL (gcoBUFFER object).
++ Points to the next subbuffer if any. A family of subbuffers are chained
++ together and are meant to be executed inseparably as a unit. Meaning
++ that context switching cannot occur while a chain of subbuffers is being
++ executed. */
++ gcsCMDBUFFER_PTR nextSubBuffer;
++}
++gcsCMDBUFFER;
++
++/* Command queue element. */
++typedef struct _gcsVGCMDQUEUE
++{
++ /* Pointer to the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Dynamic vs. static command buffer state. */
++ gctBOOL dynamic;
++}
++gcsVGCMDQUEUE;
++
++/* Context map entry. */
++typedef struct _gcsVGCONTEXT_MAP
++{
++ /* State index. */
++ gctUINT32 index;
++
++ /* New state value. */
++ gctUINT32 data;
++
++ /* Points to the next entry in the mod list. */
++ gcsVGCONTEXT_MAP_PTR next;
++}
++gcsVGCONTEXT_MAP;
++
++/* gcsVGCONTEXT structure that holds the current context. */
++typedef struct _gcsVGCONTEXT
++{
++ /* Context ID. */
++ gctUINT64 id;
++
++ /* State caching ebable flag. */
++ gctBOOL stateCachingEnabled;
++
++ /* Current pipe. */
++ gctUINT32 currentPipe;
++
++ /* State map/mod buffer. */
++ gctUINT32 mapFirst;
++ gctUINT32 mapLast;
++ gcsVGCONTEXT_MAP_PTR mapContainer;
++ gcsVGCONTEXT_MAP_PTR mapPrev;
++ gcsVGCONTEXT_MAP_PTR mapCurr;
++ gcsVGCONTEXT_MAP_PTR firstPrevMap;
++ gcsVGCONTEXT_MAP_PTR firstCurrMap;
++
++ /* Main context buffer. */
++ gcsCMDBUFFER_PTR header;
++ gctUINT32_PTR buffer;
++
++ /* Completion signal. */
++ gctHANDLE process;
++ gctSIGNAL signal;
++
++#if defined(__QNXNTO__)
++ gctINT32 coid;
++ gctINT32 rcvid;
++#endif
++}
++gcsVGCONTEXT;
++
++/* User space task header. */
++typedef struct _gcsTASK * gcsTASK_PTR;
++typedef struct _gcsTASK
++{
++ /* Pointer to the next task for the same interrupt in user space. */
++ gcsTASK_PTR next;
++
++ /* Size of the task data that immediately follows the structure. */
++ gctUINT size;
++
++ /* Task data starts here. */
++ /* ... */
++}
++gcsTASK;
++
++/* User space task master table entry. */
++typedef struct _gcsTASK_MASTER_ENTRY * gcsTASK_MASTER_ENTRY_PTR;
++typedef struct _gcsTASK_MASTER_ENTRY
++{
++ /* Pointers to the head and to the tail of the task chain. */
++ gcsTASK_PTR head;
++ gcsTASK_PTR tail;
++}
++gcsTASK_MASTER_ENTRY;
++
++/* User space task master table entry. */
++typedef struct _gcsTASK_MASTER_TABLE
++{
++ /* Table with one entry per block. */
++ gcsTASK_MASTER_ENTRY table[gcvBLOCK_COUNT];
++
++ /* The total number of tasks sckeduled. */
++ gctUINT count;
++
++ /* The total size of event data in bytes. */
++ gctUINT size;
++
++#if defined(__QNXNTO__)
++ gctINT32 coid;
++ gctINT32 rcvid;
++#endif
++}
++gcsTASK_MASTER_TABLE;
++
++/******************************************************************************\
++***************************** gckVGINTERRUPT Object ******************************
++\******************************************************************************/
++
++typedef struct _gckVGINTERRUPT * gckVGINTERRUPT;
++
++typedef gceSTATUS (* gctINTERRUPT_HANDLER)(
++ IN gckVGKERNEL Kernel
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Construct(
++ IN gckVGKERNEL Kernel,
++ OUT gckVGINTERRUPT * Interrupt
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Destroy(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Enable(
++ IN gckVGINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Disable(
++ IN gckVGINTERRUPT Interrupt,
++ IN gctINT32 Id
++ );
++
++#ifndef __QNXNTO__
++
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++#else
++
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt,
++ OUT gckOS *Os,
++ OUT gctSEMAPHORE *Semaphore
++ );
++
++#endif
++
++gceSTATUS
++gckVGINTERRUPT_DumpState(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++
++/******************************************************************************\
++******************************* gckVGCOMMAND Object *******************************
++\******************************************************************************/
++
++typedef struct _gckVGCOMMAND * gckVGCOMMAND;
++
++/* Construct a new gckVGCOMMAND object. */
++gceSTATUS
++gckVGCOMMAND_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT TaskGranularity,
++ IN gctUINT QueueSize,
++ OUT gckVGCOMMAND * Command
++ );
++
++/* Destroy an gckVGCOMMAND object. */
++gceSTATUS
++gckVGCOMMAND_Destroy(
++ IN gckVGCOMMAND Command
++ );
++
++/* Query command buffer attributes. */
++gceSTATUS
++gckVGCOMMAND_QueryCommandBuffer(
++ IN gckVGCOMMAND Command,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++
++/* Allocate a command queue. */
++gceSTATUS
++gckVGCOMMAND_Allocate(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer,
++ OUT gctPOINTER * Data
++ );
++
++/* Release memory held by the command queue. */
++gceSTATUS
++gckVGCOMMAND_Free(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ );
++
++/* Schedule the command queue for execution. */
++gceSTATUS
++gckVGCOMMAND_Execute(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ );
++
++/* Commit a buffer to the command queue. */
++gceSTATUS
++gckVGCOMMAND_Commit(
++ IN gckVGCOMMAND Command,
++ IN gcsVGCONTEXT_PTR Context,
++ IN gcsVGCMDQUEUE_PTR Queue,
++ IN gctUINT EntryCount,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable
++ );
++
++/******************************************************************************\
++********************************* gckVGMMU Object ********************************
++\******************************************************************************/
++
++typedef struct _gckVGMMU * gckVGMMU;
++
++/* Construct a new gckVGMMU object. */
++gceSTATUS
++gckVGMMU_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT32 MmuSize,
++ OUT gckVGMMU * Mmu
++ );
++
++/* Destroy an gckVGMMU object. */
++gceSTATUS
++gckVGMMU_Destroy(
++ IN gckVGMMU Mmu
++ );
++
++/* Allocate pages inside the MMU. */
++gceSTATUS
++gckVGMMU_AllocatePages(
++ IN gckVGMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++/* Remove a page table from the MMU. */
++gceSTATUS
++gckVGMMU_FreePages(
++ IN gckVGMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ );
++
++/* Set the MMU page with info. */
++gceSTATUS
++gckVGMMU_SetPage(
++ IN gckVGMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ );
++
++/* Flush MMU */
++gceSTATUS
++gckVGMMU_Flush(
++ IN gckVGMMU Mmu
++ );
++
++#endif /* gcdENABLE_VG */
++
++#ifdef __cplusplus
++} /* extern "C" */
++#endif
++
++#endif /* __gc_hal_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h 2015-05-01 14:57:59.595427001 -0500
+@@ -0,0 +1,34 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++extern gceSTATUS
++_DefaultAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ );
++
++gcsALLOCATOR_DESC allocatorArray[] =
++{
++ /* Default allocator. */
++ gcmkDEFINE_ALLOCATOR_DESC("default", _DefaultAlloctorInit),
++};
++
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_array.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_array.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_array.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_array.h 2015-05-01 14:57:59.595427001 -0500
+@@ -0,0 +1,45 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++extern gceSTATUS
++_DefaultAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ );
++
++#if LINUX_CMA_FSL
++gceSTATUS
++_CMAFSLAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ );
++#endif
++
++gcsALLOCATOR_DESC allocatorArray[] =
++{
++#if LINUX_CMA_FSL
++ gcmkDEFINE_ALLOCATOR_DESC("cmafsl", _CMAFSLAlloctorInit),
++#endif
++ /* Default allocator. */
++ gcmkDEFINE_ALLOCATOR_DESC("default", _DefaultAlloctorInit),
++};
++
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_cma.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_cma.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_cma.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_cma.c 2015-05-01 14:57:59.595427001 -0500
+@@ -0,0 +1,412 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel_allocator.h"
++
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mman.h>
++#include <asm/atomic.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/dma-mapping.h>
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++typedef struct _gcsCMA_PRIV * gcsCMA_PRIV_PTR;
++typedef struct _gcsCMA_PRIV {
++ gctUINT32 cmasize;
++}
++gcsCMA_PRIV;
++
++struct mdl_cma_priv {
++ gctPOINTER kvaddr;
++ dma_addr_t physical;
++};
++
++int gc_cma_usage_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckALLOCATOR Allocator = node->device;
++ gcsCMA_PRIV_PTR priv = Allocator->privateData;
++
++ seq_printf(m, "cma: %u bytes\n", priv->cmasize);
++
++ return 0;
++}
++
++static gcsINFO InfoList[] =
++{
++ {"cmausage", gc_cma_usage_show},
++};
++
++static void
++_DefaultAllocatorDebugfsInit(
++ IN gckALLOCATOR Allocator,
++ IN gckDEBUGFS_DIR Root
++ )
++{
++ gcmkVERIFY_OK(
++ gckDEBUGFS_DIR_Init(&Allocator->debugfsDir, Root->root, "cma"));
++
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_CreateFiles(
++ &Allocator->debugfsDir,
++ InfoList,
++ gcmCOUNTOF(InfoList),
++ Allocator
++ ));
++}
++
++static void
++_DefaultAllocatorDebugfsCleanup(
++ IN gckALLOCATOR Allocator
++ )
++{
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles(
++ &Allocator->debugfsDir,
++ InfoList,
++ gcmCOUNTOF(InfoList)
++ ));
++
++ gckDEBUGFS_DIR_Deinit(&Allocator->debugfsDir);
++}
++
++static gceSTATUS
++_CMAFSLAlloc(
++ IN gckALLOCATOR Allocator,
++ INOUT PLINUX_MDL Mdl,
++ IN gctSIZE_T NumPages,
++ IN gctUINT32 Flags
++ )
++{
++ gceSTATUS status;
++ gcsCMA_PRIV_PTR priv = (gcsCMA_PRIV_PTR)Allocator->privateData;
++
++ struct mdl_cma_priv *mdl_priv=gcvNULL;
++ gckOS os = Allocator->os;
++
++ gcmkHEADER_ARG("Mdl=%p NumPages=%d", Mdl, NumPages);
++
++ gcmkONERROR(gckOS_Allocate(os, sizeof(struct mdl_cma_priv), (gctPOINTER *)&mdl_priv));
++ mdl_priv->kvaddr = gcvNULL;
++
++ mdl_priv->kvaddr = dma_alloc_writecombine(gcvNULL,
++ NumPages * PAGE_SIZE,
++ &mdl_priv->physical,
++ GFP_KERNEL | gcdNOWARN);
++
++ if (mdl_priv->kvaddr == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ Mdl->priv = mdl_priv;
++ priv->cmasize += NumPages * PAGE_SIZE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if(mdl_priv)
++ gckOS_Free(os, mdl_priv);
++ gcmkFOOTER();
++ return status;
++}
++
++static void
++_CMAFSLFree(
++ IN gckALLOCATOR Allocator,
++ IN OUT PLINUX_MDL Mdl
++ )
++{
++ gckOS os = Allocator->os;
++ struct mdl_cma_priv *mdl_priv=(struct mdl_cma_priv *)Mdl->priv;
++ gcsCMA_PRIV_PTR priv = (gcsCMA_PRIV_PTR)Allocator->privateData;
++ dma_free_writecombine(gcvNULL,
++ Mdl->numPages * PAGE_SIZE,
++ mdl_priv->kvaddr,
++ mdl_priv->physical);
++ gckOS_Free(os, mdl_priv);
++ priv->cmasize -= Mdl->numPages * PAGE_SIZE;
++}
++
++gctINT
++_CMAFSLMapUser(
++ gckALLOCATOR Allocator,
++ PLINUX_MDL Mdl,
++ PLINUX_MDL_MAP MdlMap,
++ gctBOOL Cacheable
++ )
++{
++
++ PLINUX_MDL mdl = Mdl;
++ PLINUX_MDL_MAP mdlMap = MdlMap;
++ struct mdl_cma_priv *mdl_priv=(struct mdl_cma_priv *)Mdl->priv;
++
++ gcmkHEADER_ARG("Allocator=%p Mdl=%p MdlMap=%p gctBOOL=%d", Allocator, Mdl, MdlMap, Cacheable);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): vmaAddr->0x%X for phys_addr->0x%X",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)mdlMap->vmaAddr,
++ (gctUINT32)(gctUINTPTR_T)mdl
++ );
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ /* Now map all the vmalloc pages to this user address. */
++ if (mdl->contiguous)
++ {
++ /* map kernel memory to user space.. */
++ if (dma_mmap_writecombine(gcvNULL,
++ mdlMap->vma,
++ mdl_priv->kvaddr,
++ mdl_priv->physical,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): dma_mmap_attrs error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++ else
++ {
++ gckOS_Print("incorrect mdl:conti%d\n",mdl->contiguous);
++ }
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_CMAUnmapUser(
++ IN gckALLOCATOR Allocator,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++ )
++{
++ if (unlikely(current->mm == gcvNULL))
++ {
++ /* Do nothing if process is exiting. */
++ return;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
++ if (vm_munmap((unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): vm_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++#else
++ down_write(&current->mm->mmap_sem);
++ if (do_munmap(current->mm, (unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++ up_write(&current->mm->mmap_sem);
++#endif
++}
++
++gceSTATUS
++_CMAMapKernel(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ OUT gctPOINTER *Logical
++ )
++{
++ struct mdl_cma_priv *mdl_priv=(struct mdl_cma_priv *)Mdl->priv;
++ *Logical =mdl_priv->kvaddr;
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_CMAUnmapKernel(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++extern gceSTATUS
++_DefaultLogicalToPhysical(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32_PTR Physical
++ );
++
++extern gceSTATUS
++_DefaultCache(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++
++gceSTATUS
++_CMAPhysical(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctUINT32 Offset,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ struct mdl_cma_priv *mdl_priv=(struct mdl_cma_priv *)Mdl->priv;
++ gcmkASSERT(!Offset);
++ *Physical = mdl_priv->physical;
++
++ return gcvSTATUS_OK;
++}
++
++
++extern void
++_DefaultAllocatorDestructor(
++ IN void* PrivateData
++ );
++
++/* Default allocator operations. */
++gcsALLOCATOR_OPERATIONS CMAFSLAllocatorOperations = {
++ .Alloc = _CMAFSLAlloc,
++ .Free = _CMAFSLFree,
++ .MapUser = _CMAFSLMapUser,
++ .UnmapUser = _CMAUnmapUser,
++ .MapKernel = _CMAMapKernel,
++ .UnmapKernel = _CMAUnmapKernel,
++ .LogicalToPhysical = _DefaultLogicalToPhysical,
++ .Cache = _DefaultCache,
++ .Physical = _CMAPhysical,
++};
++
++/* Default allocator entry. */
++gceSTATUS
++_CMAFSLAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ )
++{
++ gceSTATUS status;
++ gckALLOCATOR allocator;
++ gcsCMA_PRIV_PTR priv = gcvNULL;
++
++ gcmkONERROR(
++ gckALLOCATOR_Construct(Os, &CMAFSLAllocatorOperations, &allocator));
++
++ priv = kzalloc(gcmSIZEOF(gcsCMA_PRIV), GFP_KERNEL | gcdNOWARN);
++
++ if (!priv)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Register private data. */
++ allocator->privateData = priv;
++ allocator->privateDataDestructor = _DefaultAllocatorDestructor;
++
++ allocator->debugfsInit = _DefaultAllocatorDebugfsInit;
++ allocator->debugfsCleanup = _DefaultAllocatorDebugfsCleanup;
++
++ allocator->capability = gcvALLOC_FLAG_CONTIGUOUS;
++
++ *Allocator = allocator;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.c 2015-05-01 14:57:59.595427001 -0500
+@@ -0,0 +1,938 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel_allocator.h"
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mman.h>
++#include <asm/atomic.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++
++#include "gc_hal_kernel_allocator_array.h"
++#include "gc_hal_kernel_platform.h"
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++typedef struct _gcsDEFAULT_PRIV * gcsDEFAULT_PRIV_PTR;
++typedef struct _gcsDEFAULT_PRIV {
++ gctUINT32 low;
++ gctUINT32 high;
++}
++gcsDEFAULT_PRIV;
++
++/******************************************************************************\
++************************** Default Allocator Debugfs ***************************
++\******************************************************************************/
++
++int gc_usage_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckALLOCATOR Allocator = node->device;
++ gcsDEFAULT_PRIV_PTR priv = Allocator->privateData;
++
++ seq_printf(m, "low: %u bytes\n", priv->low);
++ seq_printf(m, "high: %u bytes\n", priv->high);
++
++ return 0;
++}
++
++static gcsINFO InfoList[] =
++{
++ {"lowHighUsage", gc_usage_show},
++};
++
++static void
++_DefaultAllocatorDebugfsInit(
++ IN gckALLOCATOR Allocator,
++ IN gckDEBUGFS_DIR Root
++ )
++{
++ gcmkVERIFY_OK(
++ gckDEBUGFS_DIR_Init(&Allocator->debugfsDir, Root->root, "default"));
++
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_CreateFiles(
++ &Allocator->debugfsDir,
++ InfoList,
++ gcmCOUNTOF(InfoList),
++ Allocator
++ ));
++}
++
++static void
++_DefaultAllocatorDebugfsCleanup(
++ IN gckALLOCATOR Allocator
++ )
++{
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles(
++ &Allocator->debugfsDir,
++ InfoList,
++ gcmCOUNTOF(InfoList)
++ ));
++
++ gckDEBUGFS_DIR_Deinit(&Allocator->debugfsDir);
++}
++
++
++static void
++_NonContiguousFree(
++ IN struct page ** Pages,
++ IN gctUINT32 NumPages
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Pages=0x%X, NumPages=%d", Pages, NumPages);
++
++ gcmkASSERT(Pages != gcvNULL);
++
++ for (i = 0; i < NumPages; i++)
++ {
++ __free_page(Pages[i]);
++ }
++
++ if (is_vmalloc_addr(Pages))
++ {
++ vfree(Pages);
++ }
++ else
++ {
++ kfree(Pages);
++ }
++
++ gcmkFOOTER_NO();
++}
++
++static struct page **
++_NonContiguousAlloc(
++ IN gctUINT32 NumPages
++ )
++{
++ struct page ** pages;
++ struct page *p;
++ gctINT i, size;
++
++ gcmkHEADER_ARG("NumPages=%lu", NumPages);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
++ if (NumPages > totalram_pages)
++#else
++ if (NumPages > num_physpages)
++#endif
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ size = NumPages * sizeof(struct page *);
++
++ pages = kmalloc(size, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ pages = vmalloc(size);
++
++ if (!pages)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++ }
++
++ for (i = 0; i < NumPages; i++)
++ {
++ p = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN);
++
++ if (!p)
++ {
++ _NonContiguousFree(pages, i);
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ pages[i] = p;
++ }
++
++ gcmkFOOTER_ARG("pages=0x%X", pages);
++ return pages;
++}
++
++gctSTRING
++_CreateKernelVirtualMapping(
++ IN PLINUX_MDL Mdl
++ )
++{
++ gctSTRING addr = 0;
++ gctINT numPages = Mdl->numPages;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ if (Mdl->contiguous)
++ {
++ addr = page_address(Mdl->u.contiguousPages);
++ }
++ else
++ {
++ addr = vmap(Mdl->u.nonContiguousPages,
++ numPages,
++ 0,
++ PAGE_KERNEL);
++
++ /* Trigger a page fault. */
++ memset(addr, 0, numPages * PAGE_SIZE);
++ }
++#else
++ struct page ** pages;
++ gctBOOL free = gcvFALSE;
++ gctINT i;
++
++ if (Mdl->contiguous)
++ {
++ pages = kmalloc(sizeof(struct page *) * numPages, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ return gcvNULL;
++ }
++
++ for (i = 0; i < numPages; i++)
++ {
++ pages[i] = nth_page(Mdl->u.contiguousPages, i);
++ }
++
++ free = gcvTRUE;
++ }
++ else
++ {
++ pages = Mdl->u.nonContiguousPages;
++ }
++
++ /* ioremap() can't work on system memory since 2.6.38. */
++ addr = vmap(pages, numPages, 0, gcmkNONPAGED_MEMROY_PROT(PAGE_KERNEL));
++
++ if (free)
++ {
++ kfree(pages);
++ }
++
++#endif
++
++ return addr;
++}
++
++void
++_DestoryKernelVirtualMapping(
++ IN gctSTRING Addr
++ )
++{
++#if !gcdNONPAGED_MEMORY_CACHEABLE
++ vunmap(Addr);
++#endif
++}
++
++void
++_UnmapUserLogical(
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++)
++{
++ if (unlikely(current->mm == gcvNULL))
++ {
++ /* Do nothing if process is exiting. */
++ return;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ if (vm_munmap((unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): vm_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++#else
++ down_write(&current->mm->mmap_sem);
++ if (do_munmap(current->mm, (unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++ up_write(&current->mm->mmap_sem);
++#endif
++}
++
++/***************************************************************************\
++************************ Default Allocator **********************************
++\***************************************************************************/
++#define C_MAX_PAGENUM (50*1024)
++static gceSTATUS
++_DefaultAlloc(
++ IN gckALLOCATOR Allocator,
++ INOUT PLINUX_MDL Mdl,
++ IN gctSIZE_T NumPages,
++ IN gctUINT32 Flags
++ )
++{
++ gceSTATUS status;
++ gctUINT32 order;
++ gctSIZE_T bytes;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ gctPOINTER addr = gcvNULL;
++#endif
++ gctUINT32 numPages;
++ gctUINT i = 0;
++ gctBOOL contiguous = Flags & gcvALLOC_FLAG_CONTIGUOUS;
++ struct sysinfo temsysinfo;
++ gcsDEFAULT_PRIV_PTR priv = (gcsDEFAULT_PRIV_PTR)Allocator->privateData;
++
++ gcmkHEADER_ARG("Mdl=%p NumPages=%d", Mdl, NumPages);
++
++ numPages = NumPages;
++ bytes = NumPages * PAGE_SIZE;
++ order = get_order(bytes);
++
++ si_meminfo(&temsysinfo);
++
++ if (Flags & gcvALLOC_FLAG_MEMLIMIT)
++ {
++ if ( (temsysinfo.freeram < NumPages) || ((temsysinfo.freeram-NumPages) < C_MAX_PAGENUM) )
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++ }
++
++ if (contiguous)
++ {
++ if (order >= MAX_ORDER)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ addr =
++ alloc_pages_exact(bytes, GFP_KERNEL | gcdNOWARN | __GFP_NORETRY);
++
++ Mdl->u.contiguousPages = addr
++ ? virt_to_page(addr)
++ : gcvNULL;
++
++ Mdl->exact = gcvTRUE;
++#else
++ Mdl->u.contiguousPages =
++ alloc_pages(GFP_KERNEL | gcdNOWARN | __GFP_NORETRY, order);
++#endif
++
++ if (Mdl->u.contiguousPages == gcvNULL)
++ {
++ Mdl->u.contiguousPages =
++ alloc_pages(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN, order);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ Mdl->exact = gcvFALSE;
++#endif
++ }
++ }
++ else
++ {
++ Mdl->u.nonContiguousPages = _NonContiguousAlloc(numPages);
++ }
++
++ if (Mdl->u.contiguousPages == gcvNULL && Mdl->u.nonContiguousPages == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ for (i = 0; i < numPages; i++)
++ {
++ struct page *page;
++
++ if (contiguous)
++ {
++ page = nth_page(Mdl->u.contiguousPages, i);
++ }
++ else
++ {
++ page = _NonContiguousToPage(Mdl->u.nonContiguousPages, i);
++ }
++
++ SetPageReserved(page);
++
++ if (!PageHighMem(page) && page_to_phys(page))
++ {
++ gcmkVERIFY_OK(
++ gckOS_CacheFlush(Allocator->os, _GetProcessID(), gcvNULL,
++ page_to_phys(page),
++ page_address(page),
++ PAGE_SIZE));
++
++ priv->low += PAGE_SIZE;
++ }
++ else
++ {
++ flush_dcache_page(page);
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED && defined(CONFIG_OUTER_CACHE) && gcdENABLE_OUTER_CACHE_PATCH
++ if (page_to_phys(page))
++ {
++ _HandleOuterCache(
++ Allocator->os,
++ page_to_phys(page),
++ gcvNULL,
++ PAGE_SIZE,
++ gcvCACHE_FLUSH
++ );
++ }
++#endif
++
++ priv->high += PAGE_SIZE;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static void
++_DefaultFree(
++ IN gckALLOCATOR Allocator,
++ IN OUT PLINUX_MDL Mdl
++ )
++{
++ gctINT i;
++ struct page * page;
++ gcsDEFAULT_PRIV_PTR priv = (gcsDEFAULT_PRIV_PTR)Allocator->privateData;
++
++ for (i = 0; i < Mdl->numPages; i++)
++ {
++ if (Mdl->contiguous)
++ {
++ page = nth_page(Mdl->u.contiguousPages, i);
++ }
++ else
++ {
++ page = _NonContiguousToPage(Mdl->u.nonContiguousPages, i);
++ }
++
++ ClearPageReserved(page);
++
++ if (PageHighMem(page))
++ {
++ priv->high -= PAGE_SIZE;
++ }
++ else
++ {
++ priv->low -= PAGE_SIZE;
++ }
++ }
++
++ if (Mdl->contiguous)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ if (Mdl->exact == gcvTRUE)
++ {
++ free_pages_exact(page_address(Mdl->u.contiguousPages), Mdl->numPages * PAGE_SIZE);
++ }
++ else
++#endif
++ {
++ __free_pages(Mdl->u.contiguousPages, get_order(Mdl->numPages * PAGE_SIZE));
++ }
++ }
++ else
++ {
++ _NonContiguousFree(Mdl->u.nonContiguousPages, Mdl->numPages);
++ }
++}
++
++gctINT
++_DefaultMapUser(
++ gckALLOCATOR Allocator,
++ PLINUX_MDL Mdl,
++ PLINUX_MDL_MAP MdlMap,
++ gctBOOL Cacheable
++ )
++{
++
++ gctSTRING addr;
++ unsigned long start;
++ unsigned long pfn;
++ gctINT i;
++ gckOS os = Allocator->os;
++ gcsPLATFORM * platform = os->device->platform;
++
++ PLINUX_MDL mdl = Mdl;
++ PLINUX_MDL_MAP mdlMap = MdlMap;
++
++ gcmkHEADER_ARG("Allocator=%p Mdl=%p MdlMap=%p gctBOOL=%d", Allocator, Mdl, MdlMap, Cacheable);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): vmaAddr->0x%X for phys_addr->0x%X",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)mdlMap->vmaAddr,
++ (gctUINT32)(gctUINTPTR_T)mdl
++ );
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++
++ if (Cacheable == gcvFALSE)
++ {
++ /* Make this mapping non-cached. */
++ mdlMap->vma->vm_page_prot = gcmkPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ }
++
++ if (platform && platform->ops->adjustProt)
++ {
++ platform->ops->adjustProt(mdlMap->vma);
++ }
++
++ addr = mdl->addr;
++
++ /* Now map all the vmalloc pages to this user address. */
++ if (mdl->contiguous)
++ {
++ /* map kernel memory to user space.. */
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ page_to_pfn(mdl->u.contiguousPages),
++ mdlMap->vma->vm_end - mdlMap->vma->vm_start,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): unable to mmap ret",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++ else
++ {
++ start = mdlMap->vma->vm_start;
++
++ for (i = 0; i < mdl->numPages; i++)
++ {
++ pfn = _NonContiguousToPfn(mdl->u.nonContiguousPages, i);
++
++ if (remap_pfn_range(mdlMap->vma,
++ start,
++ pfn,
++ PAGE_SIZE,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ start += PAGE_SIZE;
++ addr += PAGE_SIZE;
++ }
++ }
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_DefaultUnmapUser(
++ IN gckALLOCATOR Allocator,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++ )
++{
++ _UnmapUserLogical(Logical, Size);
++}
++
++gceSTATUS
++_DefaultMapKernel(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ OUT gctPOINTER *Logical
++ )
++{
++ *Logical = _CreateKernelVirtualMapping(Mdl);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_DefaultUnmapKernel(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical
++ )
++{
++ _DestoryKernelVirtualMapping(Logical);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_DefaultLogicalToPhysical(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ return _ConvertLogical2Physical(
++ Allocator->os, Logical, ProcessID, Mdl, Physical);
++}
++
++gceSTATUS
++_DefaultCache(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes,
++ IN gceCACHEOPERATION Operation
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_DefaultPhysical(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctUINT32 Offset,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ gcmkASSERT(Mdl->pagedMem && !Mdl->contiguous);
++ *Physical = _NonContiguousToPhys(Mdl->u.nonContiguousPages, Offset);
++
++ return gcvSTATUS_OK;
++}
++
++void
++_DefaultAllocatorDestructor(
++ IN void* PrivateData
++ )
++{
++ kfree(PrivateData);
++}
++
++/* Default allocator operations. */
++gcsALLOCATOR_OPERATIONS DefaultAllocatorOperations = {
++ .Alloc = _DefaultAlloc,
++ .Free = _DefaultFree,
++ .MapUser = _DefaultMapUser,
++ .UnmapUser = _DefaultUnmapUser,
++ .MapKernel = _DefaultMapKernel,
++ .UnmapKernel = _DefaultUnmapKernel,
++ .LogicalToPhysical = _DefaultLogicalToPhysical,
++ .Cache = _DefaultCache,
++ .Physical = _DefaultPhysical,
++};
++
++/* Default allocator entry. */
++gceSTATUS
++_DefaultAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ )
++{
++ gceSTATUS status;
++ gckALLOCATOR allocator;
++ gcsDEFAULT_PRIV_PTR priv = gcvNULL;
++
++ gcmkONERROR(
++ gckALLOCATOR_Construct(Os, &DefaultAllocatorOperations, &allocator));
++
++ priv = kzalloc(gcmSIZEOF(gcsDEFAULT_PRIV), GFP_KERNEL | gcdNOWARN);
++
++ if (!priv)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Register private data. */
++ allocator->privateData = priv;
++ allocator->privateDataDestructor = _DefaultAllocatorDestructor;
++
++ allocator->debugfsInit = _DefaultAllocatorDebugfsInit;
++ allocator->debugfsCleanup = _DefaultAllocatorDebugfsCleanup;
++
++ *Allocator = allocator;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++/***************************************************************************\
++************************ Allocator helper ***********************************
++\***************************************************************************/
++
++gceSTATUS
++gckALLOCATOR_Construct(
++ IN gckOS Os,
++ IN gcsALLOCATOR_OPERATIONS * Operations,
++ OUT gckALLOCATOR * Allocator
++ )
++{
++ gceSTATUS status;
++ gckALLOCATOR allocator;
++
++ gcmkHEADER_ARG("Os=%p, Operations=%p, Allocator=%p",
++ Os, Operations, Allocator);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Allocator != gcvNULL);
++ gcmkVERIFY_ARGUMENT
++ ( Operations
++ && Operations->Alloc
++ && Operations->Free
++ && Operations->MapUser
++ && Operations->UnmapUser
++ && Operations->MapKernel
++ && Operations->UnmapKernel
++ && Operations->LogicalToPhysical
++ && Operations->Cache
++ && Operations->Physical
++ );
++
++ gcmkONERROR(
++ gckOS_Allocate(Os, gcmSIZEOF(gcsALLOCATOR), (gctPOINTER *)&allocator));
++
++ gckOS_ZeroMemory(allocator, gcmSIZEOF(gcsALLOCATOR));
++
++ /* Record os. */
++ allocator->os = Os;
++
++ /* Set operations. */
++ allocator->ops = Operations;
++
++ allocator->capability = gcvALLOC_FLAG_CONTIGUOUS
++ | gcvALLOC_FLAG_NON_CONTIGUOUS
++ | gcvALLOC_FLAG_CACHEABLE
++ | gcvALLOC_FLAG_MEMLIMIT;
++ ;
++
++ *Allocator = allocator;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/******************************************************************************\
++******************************** Debugfs Support *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_AllocatorDebugfsInit(
++ IN gckOS Os
++ )
++{
++ gceSTATUS status;
++ gckGALDEVICE device = Os->device;
++
++ gckDEBUGFS_DIR dir = &Os->allocatorDebugfsDir;
++
++ gcmkONERROR(gckDEBUGFS_DIR_Init(dir, device->debugfsDir.root, "allocators"));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++static void
++_AllocatorDebugfsCleanup(
++ IN gckOS Os
++ )
++{
++ gckDEBUGFS_DIR dir = &Os->allocatorDebugfsDir;
++
++ gckDEBUGFS_DIR_Deinit(dir);
++}
++
++/***************************************************************************\
++************************ Allocator management *******************************
++\***************************************************************************/
++
++gceSTATUS
++gckOS_ImportAllocators(
++ gckOS Os
++ )
++{
++ gceSTATUS status;
++ gctUINT i;
++ gckALLOCATOR allocator;
++
++ _AllocatorDebugfsInit(Os);
++
++ INIT_LIST_HEAD(&Os->allocatorList);
++
++ for (i = 0; i < gcmCOUNTOF(allocatorArray); i++)
++ {
++ if (allocatorArray[i].construct)
++ {
++ /* Construct allocator. */
++ status = allocatorArray[i].construct(Os, &allocator);
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkPRINT("["DEVICE_NAME"]: Can't construct allocator(%s)",
++ allocatorArray[i].name);
++
++ continue;
++ }
++
++ allocator->name = allocatorArray[i].name;
++
++ if (allocator->debugfsInit)
++ {
++ /* Init allocator's debugfs. */
++ allocator->debugfsInit(allocator, &Os->allocatorDebugfsDir);
++ }
++
++ list_add_tail(&allocator->head, &Os->allocatorList);
++ }
++ }
++
++#if gcdDEBUG
++ list_for_each_entry(allocator, &Os->allocatorList, head)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d) Allocator: %s",
++ __FUNCTION__, __LINE__,
++ allocator->name
++ );
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_FreeAllocators(
++ gckOS Os
++ )
++{
++ gckALLOCATOR allocator;
++ gckALLOCATOR temp;
++
++ list_for_each_entry_safe(allocator, temp, &Os->allocatorList, head)
++ {
++ list_del(&allocator->head);
++
++ if (allocator->debugfsCleanup)
++ {
++ /* Clean up allocator's debugfs. */
++ allocator->debugfsCleanup(allocator);
++ }
++
++ /* Free private data. */
++ if (allocator->privateDataDestructor && allocator->privateData)
++ {
++ allocator->privateDataDestructor(allocator->privateData);
++ }
++
++ gckOS_Free(Os, allocator);
++ }
++
++ _AllocatorDebugfsCleanup(Os);
++
++ return gcvSTATUS_OK;
++}
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_allocator.h 2015-05-01 14:57:59.595427001 -0500
+@@ -0,0 +1,400 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_allocator_h_
++#define __gc_hal_kernel_allocator_h_
++
++#include "gc_hal_kernel_linux.h"
++
++typedef struct _gcsALLOCATOR * gckALLOCATOR;
++
++typedef struct _gcsALLOCATOR_OPERATIONS
++{
++ /**************************************************************************
++ **
++ ** Alloc
++ **
++ ** Allocte memory, request size is page aligned.
++ **
++ ** INPUT:
++ **
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_Mdl
++ ** Pointer to Mdl whichs stores information
++ ** about allocated memory.
++ **
++ ** gctSIZE_T NumPages
++ ** Number of pages need to allocate.
++ **
++ ** gctUINT32 Flag
++ ** Allocation option.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ gceSTATUS
++ (*Alloc)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctSIZE_T NumPages,
++ IN gctUINT32 Flag
++ );
++
++ /**************************************************************************
++ **
++ ** Free
++ **
++ ** Free memory.
++ **
++ ** INPUT:
++ **
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Mdl which stores information.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ void
++ (*Free)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl
++ );
++
++ /**************************************************************************
++ **
++ ** MapUser
++ **
++ ** Map memory to user space.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl.
++ **
++ ** PLINUX_MDL_MAP MdlMap
++ ** Pointer to a MdlMap, mapped address is stored
++ ** in MdlMap->vmaAddr
++ **
++ ** gctBOOL Cacheable
++ ** Whether this mapping is cacheable.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ gctINT
++ (*MapUser)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap,
++ IN gctBOOL Cacheable
++ );
++
++ /**************************************************************************
++ **
++ ** UnmapUser
++ **
++ ** Unmap address from user address space.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** gctPOINTER Logical
++ ** Address to be unmap
++ **
++ ** gctUINT32 Size
++ ** Size of address space
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ void
++ (*UnmapUser)(
++ IN gckALLOCATOR Allocator,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++ );
++
++ /**************************************************************************
++ **
++ ** MapKernel
++ **
++ ** Map memory to kernel space.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** OUTPUT:
++ ** gctPOINTER * Logical
++ ** Mapped kernel address.
++ */
++ gceSTATUS
++ (*MapKernel)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ OUT gctPOINTER *Logical
++ );
++
++ /**************************************************************************
++ **
++ ** UnmapKernel
++ **
++ ** Unmap memory from kernel space.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** gctPOINTER Logical
++ ** Mapped kernel address.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ gceSTATUS
++ (*UnmapKernel)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical
++ );
++
++ /**************************************************************************
++ **
++ ** LogicalToPhysical
++ **
++ ** Get physical address from logical address, logical
++ ** address could be user virtual address or kernel
++ ** virtual address.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** gctPOINTER Logical
++ ** Mapped kernel address.
++ **
++ ** gctUINT32 ProcessID
++ ** pid of current process.
++ ** OUTPUT:
++ **
++ ** gctUINT32_PTR Physical
++ ** Physical address.
++ **
++ */
++ gceSTATUS
++ (*LogicalToPhysical)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32_PTR Physical
++ );
++
++ /**************************************************************************
++ **
++ ** Cache
++ **
++ ** Maintain cache coherency.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** gctPOINTER Logical
++ ** Logical address, could be user address or kernel address
++ **
++ ** gctUINT32_PTR Physical
++ ** Physical address.
++ **
++ ** gctUINT32 Bytes
++ ** Size of memory region.
++ **
++ ** gceCACHEOPERATION Opertaion
++ ** Cache operation.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ gceSTATUS (*Cache)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++
++ /**************************************************************************
++ **
++ ** Physical
++ **
++ ** Get physical address from a offset in memory region.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** gctUINT32 Offset
++ ** Offset in this memory region.
++ **
++ ** OUTPUT:
++ ** gctUINT32_PTR Physical
++ ** Physical address.
++ **
++ */
++ gceSTATUS (*Physical)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctUINT32 Offset,
++ OUT gctUINT32_PTR Physical
++ );
++}
++gcsALLOCATOR_OPERATIONS;
++
++typedef struct _gcsALLOCATOR
++{
++ /* Pointer to gckOS Object. */
++ gckOS os;
++
++ /* Name. */
++ gctSTRING name;
++
++ /* Operations. */
++ gcsALLOCATOR_OPERATIONS* ops;
++
++ /* Capability of this allocator. */
++ gctUINT32 capability;
++
++ struct list_head head;
++
++ /* Debugfs entry of this allocator. */
++ gcsDEBUGFS_DIR debugfsDir;
++
++ /* Init allocator debugfs. */
++ void (*debugfsInit)(gckALLOCATOR, gckDEBUGFS_DIR);
++
++ /* Cleanup allocator debugfs. */
++ void (*debugfsCleanup)(gckALLOCATOR);
++
++ /* Private data used by customer allocator. */
++ void * privateData;
++
++ /* Private data destructor. */
++ void (*privateDataDestructor)(void *);
++}
++gcsALLOCATOR;
++
++typedef struct _gcsALLOCATOR_DESC
++{
++ /* Name of a allocator. */
++ char * name;
++
++ /* Entry function to construct a allocator. */
++ gceSTATUS (*construct)(gckOS, gckALLOCATOR *);
++}
++gcsALLOCATOR_DESC;
++
++/*
++* Helpers
++*/
++
++/* Fill a gcsALLOCATOR_DESC structure. */
++#define gcmkDEFINE_ALLOCATOR_DESC(Name, Construct) \
++ { \
++ .name = Name, \
++ .construct = Construct, \
++ }
++
++/* Construct a allocator. */
++gceSTATUS
++gckALLOCATOR_Construct(
++ IN gckOS Os,
++ IN gcsALLOCATOR_OPERATIONS * Operations,
++ OUT gckALLOCATOR * Allocator
++ );
++
++/*
++ How to implement customer allocator
++
++ Build in customer alloctor
++
++ It is recommanded that customer allocator is implmented in independent
++ source file(s) which is specified by CUSOMTER_ALLOCATOR_OBJS in Kbuld.
++
++ Register gcsALLOCATOR
++
++ For each customer specified allocator, a desciption entry must be added
++ to allocatorArray defined in gc_hal_kernel_allocator_array.h.
++
++ An entry in allocatorArray is a gcsALLOCATOR_DESC structure which describes
++ name and constructor of a gckALLOCATOR object.
++
++
++ Implement gcsALLOCATOR_DESC.init()
++
++ In gcsALLOCATOR_DESC.init(), gckALLOCATOR_Construct should be called
++ to create a gckALLOCATOR object, customer specified private data can
++ be put in gcsALLOCATOR.privateData.
++
++
++ Implement gcsALLOCATOR_OPERATIONS
++
++ When call gckALLOCATOR_Construct to create a gckALLOCATOR object, a
++ gcsALLOCATOR_OPERATIONS structure must be provided whose all members
++ implemented.
++
++*/
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.c 2015-05-01 14:57:59.595427001 -0500
+@@ -0,0 +1,1166 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifdef MODULE
++#include <linux/module.h>
++#endif
++#include <linux/init.h>
++#include <linux/debugfs.h>
++#include <linux/slab.h>
++#ifdef MODVERSIONS
++#include <linux/modversions.h>
++#endif
++#include <linux/stddef.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/mutex.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <asm/uaccess.h>
++#include <linux/completion.h>
++#include <linux/seq_file.h>
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel.h"
++
++/*
++ Prequsite:
++
++ 1) Debugfs feature must be enabled in the kernel.
++ 1.a) You can enable this, in the compilation of the uImage, all you have to do is, In the "make menuconfig" part,
++ you have to enable the debugfs in the kernel hacking part of the menu.
++
++ HOW TO USE:
++ 1) insert the driver with the following option logFileSize, Ex: insmod galcore.ko ...... logFileSize=10240
++ This gives a circular buffer of 10 MB
++
++ 2)Usually after inserting the driver, the debug file system is mounted under /sys/kernel/debug/
++
++ 2.a)If the debugfs is not mounted, you must do "mount -t debugfs none /sys/kernel/debug"
++
++ 3) To read what is being printed in the debugfs file system:
++ Ex : cat /sys/kernel/debug/gc/galcore_trace
++
++ 4)To write into the debug file system from user side :
++ Ex: echo "hello" > cat /sys/kernel/debug/gc/galcore_trace
++
++ 5)To write into debugfs from kernel side, Use the function called gckDEBUGFS_Print
++
++ How to Get Video Memory Usage:
++ 1) Select a process whose video memory usage can be dump, no need to reset it until <pid> is needed to be change.
++ echo <pid> > /sys/kernel/debug/gc/vidmem
++
++ 2) Get video memory usage.
++ cat /sys/kernel/debug/gc/vidmem
++
++ USECASE Kernel Dump:
++
++ 1) Go to /hal/inc/gc_hal_options.h, and enable the following flags:
++ - # define gcdDUMP 1
++ - # define gcdDUMP_IN_KERNEL 1
++ - # define gcdDUMP_COMMAND 1
++
++ 2) Go to /hal/kernel/gc_hal_kernel_command.c and disable the following flag
++ -#define gcdSIMPLE_COMMAND_DUMP 0
++
++ 3) Compile the driver
++ 4) insmod it with the logFileSize option
++ 5) Run an application
++ 6) You can get the dump by cat /sys/kernel/debug/gpu/galcore_trace
++
++ */
++
++/**/
++typedef va_list gctDBGARGS ;
++#define gcmkARGS_START(argument, pointer) va_start(argument, pointer)
++#define gcmkARGS_END(argument) va_end(argument)
++
++#define gcmkDEBUGFS_PRINT(ArgumentSize, Message) \
++ { \
++ gctDBGARGS __arguments__; \
++ gcmkARGS_START(__arguments__, Message); \
++ _debugfs_res = _DebugFSPrint(ArgumentSize, Message, &__arguments__);\
++ gcmkARGS_END(__arguments__); \
++ }
++
++/* Debug File System Node Struct. */
++struct _gcsDEBUGFS_Node
++{
++ /*wait queues for read and write operations*/
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ wait_queue_head_t read_q , write_q ;
++#else
++ struct wait_queue *read_q , *write_q ;
++#endif
++ struct dentry *parent ; /*parent directory*/
++ struct dentry *filen ; /*filename*/
++ struct dentry *vidmem;
++ struct semaphore sem ; /* mutual exclusion semaphore */
++ char *data ; /* The circular buffer data */
++ int size ; /* Size of the buffer pointed to by 'data' */
++ int refcount ; /* Files that have this buffer open */
++ int read_point ; /* Offset in circ. buffer of oldest data */
++ int write_point ; /* Offset in circ. buffer of newest data */
++ int offset ; /* Byte number of read_point in the stream */
++ struct _gcsDEBUGFS_Node *next ;
++};
++
++/* amount of data in the queue */
++#define gcmkNODE_QLEN(node) ( (node)->write_point >= (node)->read_point ? \
++ (node)->write_point - (node)->read_point : \
++ (node)->size - (node)->read_point + (node)->write_point)
++
++/* byte number of the last byte in the queue */
++#define gcmkNODE_FIRST_EMPTY_BYTE(node) ((node)->offset + gcmkNODE_QLEN(node))
++
++/*Synchronization primitives*/
++#define gcmkNODE_READQ(node) (&((node)->read_q))
++#define gcmkNODE_WRITEQ(node) (&((node)->write_q))
++#define gcmkNODE_SEM(node) (&((node)->sem))
++
++/*Utilities*/
++#define gcmkMIN(x, y) ((x) < (y) ? (x) : y)
++
++/*Debug File System Struct*/
++typedef struct _gcsDEBUGFS_
++{
++ gcsDEBUGFS_Node* linkedlist ;
++ gcsDEBUGFS_Node* currentNode ;
++ int isInited ;
++} gcsDEBUGFS_ ;
++
++/*debug file system*/
++static gcsDEBUGFS_ gc_dbgfs ;
++
++static int gc_debugfs_open(struct inode *inode, struct file *file)
++{
++ gcsINFO_NODE *node = inode->i_private;
++
++ return single_open(file, node->info->show, node);
++}
++
++static const struct file_operations gc_debugfs_operations = {
++ .owner = THIS_MODULE,
++ .open = gc_debugfs_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++gceSTATUS
++gckDEBUGFS_DIR_Init(
++ IN gckDEBUGFS_DIR Dir,
++ IN struct dentry *root,
++ IN gctCONST_STRING Name
++ )
++{
++ Dir->root = debugfs_create_dir(Name, root);
++
++ if (!Dir->root)
++ {
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ INIT_LIST_HEAD(&Dir->nodeList);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckDEBUGFS_DIR_CreateFiles(
++ IN gckDEBUGFS_DIR Dir,
++ IN gcsINFO * List,
++ IN int count,
++ IN gctPOINTER Data
++ )
++{
++ int i;
++ gcsINFO_NODE * node;
++ gceSTATUS status;
++
++ for (i = 0; i < count; i++)
++ {
++ /* Create a node. */
++ node = (gcsINFO_NODE *)kzalloc(sizeof(gcsINFO_NODE), GFP_KERNEL);
++
++ node->info = &List[i];
++ node->device = Data;
++
++ /* Bind to a file. TODO: clean up when fail. */
++ node->entry = debugfs_create_file(
++ List[i].name, S_IRUGO|S_IWUSR, Dir->root, node, &gc_debugfs_operations);
++
++ if (!node->entry)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ list_add(&(node->head), &(Dir->nodeList));
++ }
++
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles(Dir, List, count));
++ return status;
++}
++
++gceSTATUS
++gckDEBUGFS_DIR_RemoveFiles(
++ IN gckDEBUGFS_DIR Dir,
++ IN gcsINFO * List,
++ IN int count
++ )
++{
++ int i;
++ gcsINFO_NODE * node;
++ gcsINFO_NODE * temp;
++
++ for (i = 0; i < count; i++)
++ {
++ list_for_each_entry_safe(node, temp, &Dir->nodeList, head)
++ {
++ if (node->info == &List[i])
++ {
++ debugfs_remove(node->entry);
++ list_del(&node->head);
++ kfree(node);
++ }
++ }
++ }
++
++ return gcvSTATUS_OK;
++}
++
++void
++gckDEBUGFS_DIR_Deinit(
++ IN gckDEBUGFS_DIR Dir
++ )
++{
++ if (Dir->root != NULL)
++ {
++ debugfs_remove(Dir->root);
++ Dir->root = NULL;
++ }
++}
++
++/*******************************************************************************
++ **
++ ** READ & WRITE FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** _ReadFromNode
++ **
++ ** 1) reading bytes out of a circular buffer with wraparound.
++ ** 2)returns caddr_t, pointer to data read, which the caller must free.
++ ** 3) length is (a pointer to) the number of bytes to be read, which will be set by this function to
++ ** be the number of bytes actually returned
++ **
++ *******************************************************************************/
++static caddr_t
++_ReadFromNode (
++ gcsDEBUGFS_Node* Node ,
++ size_t *Length ,
++ loff_t *Offset
++ )
++{
++ caddr_t retval ;
++ int bytes_copied = 0 , n , start_point , remaining ;
++
++ /* is the user trying to read data that has already scrolled off? */
++ if ( *Offset < Node->offset )
++ {
++ *Offset = Node->offset ;
++ }
++
++ /* is the user trying to read past EOF? */
++ if ( *Offset >= gcmkNODE_FIRST_EMPTY_BYTE ( Node ) )
++ {
++ return NULL ;
++ }
++
++ /* find the smaller of the total bytes we have available and what
++ * the user is asking for */
++
++ *Length = gcmkMIN ( *Length , gcmkNODE_FIRST_EMPTY_BYTE ( Node ) - *Offset ) ;
++
++ remaining = * Length ;
++
++ /* figure out where to start based on user's Offset */
++ start_point = Node->read_point + ( *Offset - Node->offset ) ;
++
++ start_point = start_point % Node->size ;
++
++ /* allocate memory to return */
++ if ( ( retval = kmalloc ( sizeof (char ) * remaining , GFP_KERNEL ) ) == NULL )
++ return NULL ;
++
++ /* copy the (possibly noncontiguous) data to our buffer */
++ while ( remaining )
++ {
++ n = gcmkMIN ( remaining , Node->size - start_point ) ;
++ memcpy ( retval + bytes_copied , Node->data + start_point , n ) ;
++ bytes_copied += n ;
++ remaining -= n ;
++ start_point = ( start_point + n ) % Node->size ;
++ }
++
++ /* advance user's file pointer */
++ *Offset += * Length ;
++
++ return retval ;
++}
++
++/*******************************************************************************
++ **
++ ** _WriteToNode
++ **
++ ** 1) writes to a circular buffer with wraparound.
++ ** 2)in case of an overflow, it overwrites the oldest unread data.
++ **
++ *********************************************************************************/
++static void
++_WriteToNode (
++ gcsDEBUGFS_Node* Node ,
++ caddr_t Buf ,
++ int Length
++ )
++{
++ int bytes_copied = 0 ;
++ int overflow = 0 ;
++ int n ;
++
++ if ( Length + gcmkNODE_QLEN ( Node ) >= ( Node->size - 1 ) )
++ {
++ overflow = 1 ;
++
++ /* in case of overflow, figure out where the new buffer will
++ * begin. we start by figuring out where the current buffer ENDS:
++ * node->parent->offset + gcmkNODE_QLEN. we then advance the end-offset
++ * by the Length of the current write, and work backwards to
++ * figure out what the oldest unoverwritten data will be (i.e.,
++ * size of the buffer). */
++ Node->offset = Node->offset + gcmkNODE_QLEN ( Node ) + Length
++ - Node->size + 1 ;
++ }
++
++ while ( Length )
++ {
++ /* how many contiguous bytes are available from the write point to
++ * the end of the circular buffer? */
++ n = gcmkMIN ( Length , Node->size - Node->write_point ) ;
++ memcpy ( Node->data + Node->write_point , Buf + bytes_copied , n ) ;
++ bytes_copied += n ;
++ Length -= n ;
++ Node->write_point = ( Node->write_point + n ) % Node->size ;
++ }
++
++ /* if there is an overflow, reset the read point to read whatever is
++ * the oldest data that we have, that has not yet been
++ * overwritten. */
++ if ( overflow )
++ {
++ Node->read_point = ( Node->write_point + 1 ) % Node->size ;
++ }
++}
++
++/*******************************************************************************
++ **
++ ** PRINTING UTILITY (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** _GetArgumentSize
++ **
++ **
++ *******************************************************************************/
++static gctINT
++_GetArgumentSize (
++ IN gctCONST_STRING Message
++ )
++{
++ gctINT i , count ;
++
++ for ( i = 0 , count = 0 ; Message[i] ; i += 1 )
++ {
++ if ( Message[i] == '%' )
++ {
++ count += 1 ;
++ }
++ }
++ return count * sizeof (unsigned int ) ;
++}
++
++/*******************************************************************************
++ **
++ ** _AppendString
++ **
++ **
++ *******************************************************************************/
++static ssize_t
++_AppendString (
++ IN gcsDEBUGFS_Node* Node ,
++ IN gctCONST_STRING String ,
++ IN int Length
++ )
++{
++ caddr_t message = NULL ;
++ int n ;
++
++ /* if the message is longer than the buffer, just take the beginning
++ * of it, in hopes that the reader (if any) will have time to read
++ * before we wrap around and obliterate it */
++ n = gcmkMIN ( Length , Node->size - 1 ) ;
++
++ /* make sure we have the memory for it */
++ if ( ( message = kmalloc ( n , GFP_KERNEL ) ) == NULL )
++ return - ENOMEM ;
++
++ /* copy into our temp buffer */
++ memcpy ( message , String , n ) ;
++
++ /* now copy it into the circular buffer and free our temp copy */
++ _WriteToNode ( Node , message , n ) ;
++ kfree ( message ) ;
++ return n ;
++}
++
++/*******************************************************************************
++ **
++ ** _DebugFSPrint
++ **
++ **
++ *******************************************************************************/
++static ssize_t
++_DebugFSPrint (
++ IN unsigned int ArgumentSize ,
++ IN const char* Message ,
++ IN gctDBGARGS * Arguments
++
++ )
++{
++ char buffer[MAX_LINE_SIZE] ;
++ int len ;
++ ssize_t res=0;
++
++ if(in_interrupt())
++ {
++ return - ERESTARTSYS ;
++ }
++
++ if(down_interruptible( gcmkNODE_SEM ( gc_dbgfs.currentNode ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++ len = vsnprintf ( buffer , sizeof (buffer ) , Message , *( va_list * ) Arguments ) ;
++ buffer[len] = '\0' ;
++
++ /* Add end-of-line if missing. */
++ if ( buffer[len - 1] != '\n' )
++ {
++ buffer[len ++] = '\n' ;
++ buffer[len] = '\0' ;
++ }
++ res = _AppendString ( gc_dbgfs.currentNode , buffer , len ) ;
++ up ( gcmkNODE_SEM ( gc_dbgfs.currentNode ) ) ;
++ wake_up_interruptible ( gcmkNODE_READQ ( gc_dbgfs.currentNode ) ) ; /* blocked in read*/
++ return res;
++}
++
++/*******************************************************************************
++ **
++ ** LINUX SYSTEM FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** find the vivlog structure associated with an inode.
++ ** returns a pointer to the structure if found, NULL if not found
++ **
++ *******************************************************************************/
++static gcsDEBUGFS_Node*
++_GetNodeInfo (
++ IN struct inode *Inode
++ )
++{
++ gcsDEBUGFS_Node* node ;
++
++ if ( Inode == NULL )
++ return NULL ;
++
++ for ( node = gc_dbgfs.linkedlist ; node != NULL ; node = node->next )
++ if ( node->filen->d_inode->i_ino == Inode->i_ino )
++ return node ;
++
++ return NULL ;
++}
++
++/*******************************************************************************
++ **
++ ** _DebugFSRead
++ **
++ *******************************************************************************/
++static ssize_t
++_DebugFSRead (
++ struct file *file ,
++ char __user * buffer ,
++ size_t length ,
++ loff_t * offset
++ )
++{
++ int retval ;
++ caddr_t data_to_return ;
++ gcsDEBUGFS_Node* node ;
++ /* get the metadata about this emlog */
++ if ( ( node = _GetNodeInfo ( file->f_dentry->d_inode ) ) == NULL )
++ {
++ printk ( "debugfs_read: record not found\n" ) ;
++ return - EIO ;
++ }
++
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++
++ /* wait until there's data available (unless we do nonblocking reads) */
++ while ( *offset >= gcmkNODE_FIRST_EMPTY_BYTE ( node ) )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ if ( file->f_flags & O_NONBLOCK )
++ {
++ return - EAGAIN ;
++ }
++ if ( wait_event_interruptible ( ( *( gcmkNODE_READQ ( node ) ) ) , ( *offset < gcmkNODE_FIRST_EMPTY_BYTE ( node ) ) ) )
++ {
++ return - ERESTARTSYS ; /* signal: tell the fs layer to handle it */
++ }
++ /* otherwise loop, but first reacquire the lock */
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++ }
++ data_to_return = _ReadFromNode ( node , &length , offset ) ;
++ if ( data_to_return == NULL )
++ {
++ retval = 0 ;
++ goto unlock ;
++ }
++ if ( copy_to_user ( buffer , data_to_return , length ) > 0 )
++ {
++ retval = - EFAULT ;
++ }
++ else
++ {
++ retval = length ;
++ }
++ kfree ( data_to_return ) ;
++unlock:
++ up ( gcmkNODE_SEM ( node ) ) ;
++ wake_up_interruptible ( gcmkNODE_WRITEQ ( node ) ) ;
++ return retval ;
++}
++
++/*******************************************************************************
++ **
++ **_DebugFSWrite
++ **
++ *******************************************************************************/
++static ssize_t
++_DebugFSWrite (
++ struct file *file ,
++ const char __user * buffer ,
++ size_t length ,
++ loff_t * offset
++ )
++{
++ caddr_t message = NULL ;
++ int n ;
++ gcsDEBUGFS_Node*node ;
++
++ /* get the metadata about this log */
++ if ( ( node = _GetNodeInfo ( file->f_dentry->d_inode ) ) == NULL )
++ {
++ return - EIO ;
++ }
++
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++
++ /* if the message is longer than the buffer, just take the beginning
++ * of it, in hopes that the reader (if any) will have time to read
++ * before we wrap around and obliterate it */
++ n = gcmkMIN ( length , node->size - 1 ) ;
++
++ /* make sure we have the memory for it */
++ if ( ( message = kmalloc ( n , GFP_KERNEL ) ) == NULL )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ return - ENOMEM ;
++ }
++
++
++ /* copy into our temp buffer */
++ if ( copy_from_user ( message , buffer , n ) > 0 )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ kfree ( message ) ;
++ return - EFAULT ;
++ }
++
++ /* now copy it into the circular buffer and free our temp copy */
++ _WriteToNode ( node , message , n ) ;
++
++ kfree ( message ) ;
++ up ( gcmkNODE_SEM ( node ) ) ;
++
++ /* wake up any readers that might be waiting for the data. we call
++ * schedule in the vague hope that a reader will run before the
++ * writer's next write, to avoid losing data. */
++ wake_up_interruptible ( gcmkNODE_READQ ( node ) ) ;
++
++ return n ;
++}
++
++int dumpProcess = 0;
++
++void
++_PrintCounter(
++ struct seq_file *file,
++ gcsDATABASE_COUNTERS * counter,
++ gctCONST_STRING Name
++ )
++{
++ seq_printf(file,"Counter: %s\n", Name);
++
++ seq_printf(file,"%-9s%10s","", "All");
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Current");
++
++ seq_printf(file,"%10lld", counter->bytes);
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Maximum");
++
++ seq_printf(file,"%10lld", counter->maxBytes);
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Total");
++
++ seq_printf(file,"%10lld", counter->totalBytes);
++
++ seq_printf(file, "\n");
++}
++
++void
++_ShowCounters(
++ struct seq_file *file,
++ gcsDATABASE_PTR database
++ )
++{
++ gctUINT i = 0;
++ gcsDATABASE_COUNTERS * counter;
++ gcsDATABASE_COUNTERS * nonPaged;
++
++ static gctCONST_STRING surfaceTypes[] = {
++ "UNKNOWN",
++ "Index",
++ "Vertex",
++ "Texture",
++ "RT",
++ "Depth",
++ "Bitmap",
++ "TS",
++ "Image",
++ "Mask",
++ "Scissor",
++ "HZDepth",
++ };
++
++ /* Get pointer to counters. */
++ counter = &database->vidMem;
++
++ nonPaged = &database->nonPaged;
++
++ seq_printf(file,"Counter: vidMem (for each surface type)\n");
++
++ seq_printf(file,"%-9s%10s","", "All");
++
++ for (i = 1; i < gcvSURF_NUM_TYPES; i++)
++ {
++ counter = &database->vidMemType[i];
++
++ seq_printf(file, "%10s",surfaceTypes[i]);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Current");
++
++ seq_printf(file,"%10lld", database->vidMem.bytes);
++
++ for (i = 1; i < gcvSURF_NUM_TYPES; i++)
++ {
++ counter = &database->vidMemType[i];
++
++ seq_printf(file,"%10lld", counter->bytes);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Maximum");
++
++ seq_printf(file,"%10lld", database->vidMem.maxBytes);
++
++ for (i = 1; i < gcvSURF_NUM_TYPES; i++)
++ {
++ counter = &database->vidMemType[i];
++
++ seq_printf(file,"%10lld", counter->maxBytes);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Total");
++
++ seq_printf(file,"%10lld", database->vidMem.totalBytes);
++
++ for (i = 1; i < gcvSURF_NUM_TYPES; i++)
++ {
++ counter = &database->vidMemType[i];
++
++ seq_printf(file,"%10lld", counter->totalBytes);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"Counter: vidMem (for each pool)\n");
++
++ seq_printf(file,"%-9s%10s","", "All");
++
++ for (i = 1; i < gcvPOOL_NUMBER_OF_POOLS; i++)
++ {
++ seq_printf(file, "%10d", i);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Current");
++
++ seq_printf(file,"%10lld", database->vidMem.bytes);
++
++ for (i = 1; i < gcvPOOL_NUMBER_OF_POOLS; i++)
++ {
++ counter = &database->vidMemPool[i];
++
++ seq_printf(file,"%10lld", counter->bytes);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Maximum");
++
++ seq_printf(file,"%10lld", database->vidMem.maxBytes);
++
++ for (i = 1; i < gcvPOOL_NUMBER_OF_POOLS; i++)
++ {
++ counter = &database->vidMemPool[i];
++
++ seq_printf(file,"%10lld", counter->maxBytes);
++ }
++
++ seq_printf(file, "\n");
++
++ seq_printf(file,"%-9s","Total");
++
++ seq_printf(file,"%10lld", database->vidMem.totalBytes);
++
++ for (i = 1; i < gcvPOOL_NUMBER_OF_POOLS; i++)
++ {
++ counter = &database->vidMemPool[i];
++
++ seq_printf(file,"%10lld", counter->totalBytes);
++ }
++
++ seq_printf(file, "\n");
++
++ /* Print nonPaged. */
++ _PrintCounter(file, &database->nonPaged, "nonPaged");
++ _PrintCounter(file, &database->contiguous, "contiguous");
++ _PrintCounter(file, &database->mapUserMemory, "mapUserMemory");
++ _PrintCounter(file, &database->mapMemory, "mapMemory");
++}
++
++gckKERNEL
++_GetValidKernel(
++ gckGALDEVICE Device
++);
++static int vidmem_show(struct seq_file *file, void *unused)
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gckGALDEVICE device = file->private;
++
++ gckKERNEL kernel = _GetValidKernel(device);
++ if(kernel == gcvNULL)
++ {
++ return 0;
++ }
++
++ /* Find the database. */
++ gcmkONERROR(
++ gckKERNEL_FindDatabase(kernel, dumpProcess, gcvFALSE, &database));
++
++ seq_printf(file, "VidMem Usage (Process %d):\n", dumpProcess);
++
++ _ShowCounters(file, database);
++
++ return 0;
++
++OnError:
++ return 0;
++}
++
++static int
++vidmem_open(
++ struct inode *inode,
++ struct file *file
++ )
++{
++ return single_open(file, vidmem_show, inode->i_private);
++}
++
++static ssize_t
++vidmem_write(
++ struct file *file,
++ const char __user *buf,
++ size_t count,
++ loff_t *pos
++ )
++{
++ dumpProcess = simple_strtol(buf, NULL, 0);
++ return count;
++}
++
++/*******************************************************************************
++ **
++ ** File Operations Table
++ **
++ *******************************************************************************/
++static const struct file_operations debugfs_operations = {
++ .owner = THIS_MODULE ,
++ .read = _DebugFSRead ,
++ .write = _DebugFSWrite ,
++} ;
++
++static const struct file_operations vidmem_operations = {
++ .owner = THIS_MODULE ,
++ .open = vidmem_open,
++ .read = seq_read,
++ .write = vidmem_write,
++ .llseek = seq_lseek,
++} ;
++
++/*******************************************************************************
++ **
++ ** INTERFACE FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_IsEnabled
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++
++gctINT
++gckDEBUGFS_IsEnabled ( void )
++{
++ return gc_dbgfs.isInited ;
++}
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_Initialize
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++gctINT
++gckDEBUGFS_Initialize ( void )
++{
++ if ( ! gc_dbgfs.isInited )
++ {
++ gc_dbgfs.linkedlist = gcvNULL ;
++ gc_dbgfs.currentNode = gcvNULL ;
++ gc_dbgfs.isInited = 1 ;
++ }
++ return gc_dbgfs.isInited ;
++}
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_Terminate
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++gctINT
++gckDEBUGFS_Terminate ( void )
++{
++ gcsDEBUGFS_Node * next = gcvNULL ;
++ gcsDEBUGFS_Node * temp = gcvNULL ;
++ if ( gc_dbgfs.isInited )
++ {
++ temp = gc_dbgfs.linkedlist ;
++ while ( temp != gcvNULL )
++ {
++ next = temp->next ;
++ gckDEBUGFS_FreeNode ( temp ) ;
++ kfree ( temp ) ;
++ temp = next ;
++ }
++ gc_dbgfs.isInited = 0 ;
++ }
++ return 0 ;
++}
++
++
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_CreateNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ ** gckDEBUGFS_FreeNode * Device
++ ** Pointer to a variable receiving the gcsDEBUGFS_Node object pointer on
++ ** success.
++ *********************************************************************************/
++
++gctINT
++gckDEBUGFS_CreateNode (
++ IN gctPOINTER Device,
++ IN gctINT SizeInKB ,
++ IN struct dentry * Root ,
++ IN gctCONST_STRING NodeName ,
++ OUT gcsDEBUGFS_Node **Node
++ )
++{
++ gcsDEBUGFS_Node*node ;
++ /* allocate space for our metadata and initialize it */
++ if ( ( node = kmalloc ( sizeof (gcsDEBUGFS_Node ) , GFP_KERNEL ) ) == NULL )
++ goto struct_malloc_failed ;
++
++ /*Zero it out*/
++ memset ( node , 0 , sizeof (gcsDEBUGFS_Node ) ) ;
++
++ /*Init the sync primitives*/
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ init_waitqueue_head ( gcmkNODE_READQ ( node ) ) ;
++#else
++ init_waitqueue ( gcmkNODE_READQ ( node ) ) ;
++#endif
++
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ init_waitqueue_head ( gcmkNODE_WRITEQ ( node ) ) ;
++#else
++ init_waitqueue ( gcmkNODE_WRITEQ ( node ) ) ;
++#endif
++ sema_init ( gcmkNODE_SEM ( node ) , 1 ) ;
++ /*End the sync primitives*/
++
++ /*creating the debug file system*/
++ node->parent = Root;
++
++ if (SizeInKB)
++ {
++ /* figure out how much of a buffer this should be and allocate the buffer */
++ node->size = 1024 * SizeInKB ;
++ if ( ( node->data = ( char * ) vmalloc ( sizeof (char ) * node->size ) ) == NULL )
++ goto data_malloc_failed ;
++
++ /*creating the file*/
++ node->filen = debugfs_create_file(NodeName, S_IRUGO|S_IWUSR, node->parent, NULL,
++ &debugfs_operations);
++ }
++
++ node->vidmem
++ = debugfs_create_file("vidmem", S_IRUGO|S_IWUSR, node->parent, Device, &vidmem_operations);
++
++ /* add it to our linked list */
++ node->next = gc_dbgfs.linkedlist ;
++ gc_dbgfs.linkedlist = node ;
++
++
++ /* pass the struct back */
++ *Node = node ;
++ return 0 ;
++
++
++data_malloc_failed:
++ kfree ( node ) ;
++struct_malloc_failed:
++ return - ENOMEM ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_FreeNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDEBUGFS_FreeNode (
++ IN gcsDEBUGFS_Node * Node
++ )
++{
++
++ gcsDEBUGFS_Node **ptr ;
++
++ if ( Node == NULL )
++ {
++ printk ( "null passed to free_vinfo\n" ) ;
++ return ;
++ }
++
++ down ( gcmkNODE_SEM ( Node ) ) ;
++ /*free data*/
++ vfree ( Node->data ) ;
++
++ /*Close Debug fs*/
++ if (Node->vidmem)
++ {
++ debugfs_remove(Node->vidmem);
++ }
++
++ if ( Node->filen )
++ {
++ debugfs_remove ( Node->filen ) ;
++ }
++
++ /* now delete the node from the linked list */
++ ptr = & ( gc_dbgfs.linkedlist ) ;
++ while ( *ptr != Node )
++ {
++ if ( ! *ptr )
++ {
++ printk ( "corrupt info list!\n" ) ;
++ break ;
++ }
++ else
++ ptr = & ( ( **ptr ).next ) ;
++ }
++ *ptr = Node->next ;
++ up ( gcmkNODE_SEM ( Node ) ) ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_SetCurrentNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDEBUGFS_SetCurrentNode (
++ IN gcsDEBUGFS_Node * Node
++ )
++{
++ gc_dbgfs.currentNode = Node ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_GetCurrentNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDEBUGFS_GetCurrentNode (
++ OUT gcsDEBUGFS_Node ** Node
++ )
++{
++ *Node = gc_dbgfs.currentNode ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDEBUGFS_Print
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++ssize_t
++gckDEBUGFS_Print (
++ IN gctCONST_STRING Message ,
++ ...
++ )
++{
++ ssize_t _debugfs_res;
++ gcmkDEBUGFS_PRINT ( _GetArgumentSize ( Message ) , Message ) ;
++ return _debugfs_res;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debugfs.h 2015-05-01 14:57:59.595427001 -0500
+@@ -0,0 +1,135 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <stdarg.h>
++
++#ifndef __gc_hal_kernel_debugfs_h_
++#define __gc_hal_kernel_debugfs_h_
++
++ #define MAX_LINE_SIZE 768 /* Max bytes for a line of debug info */
++
++
++ typedef struct _gcsDEBUGFS_Node gcsDEBUGFS_Node;
++
++typedef struct _gcsDEBUGFS_DIR *gckDEBUGFS_DIR;
++typedef struct _gcsDEBUGFS_DIR
++{
++ struct dentry * root;
++ struct list_head nodeList;
++}
++gcsDEBUGFS_DIR;
++
++typedef struct _gcsINFO
++{
++ const char * name;
++ int (*show)(struct seq_file*, void*);
++}
++gcsINFO;
++
++typedef struct _gcsINFO_NODE
++{
++ gcsINFO * info;
++ gctPOINTER device;
++ struct dentry * entry;
++ struct list_head head;
++}
++gcsINFO_NODE;
++
++gceSTATUS
++gckDEBUGFS_DIR_Init(
++ IN gckDEBUGFS_DIR Dir,
++ IN struct dentry *root,
++ IN gctCONST_STRING Name
++ );
++
++gceSTATUS
++gckDEBUGFS_DIR_CreateFiles(
++ IN gckDEBUGFS_DIR Dir,
++ IN gcsINFO * List,
++ IN int count,
++ IN gctPOINTER Data
++ );
++
++gceSTATUS
++gckDEBUGFS_DIR_RemoveFiles(
++ IN gckDEBUGFS_DIR Dir,
++ IN gcsINFO * List,
++ IN int count
++ );
++
++void
++gckDEBUGFS_DIR_Deinit(
++ IN gckDEBUGFS_DIR Dir
++ );
++
++/*******************************************************************************
++ **
++ ** System Related
++ **
++ *******************************************************************************/
++
++gctINT gckDEBUGFS_IsEnabled(void);
++
++gctINT gckDEBUGFS_Initialize(void);
++
++gctINT gckDEBUGFS_Terminate(void);
++
++
++/*******************************************************************************
++ **
++ ** Node Related
++ **
++ *******************************************************************************/
++
++gctINT
++gckDEBUGFS_CreateNode(
++ IN gctPOINTER Device,
++ IN gctINT SizeInKB,
++ IN struct dentry * Root,
++ IN gctCONST_STRING NodeName,
++ OUT gcsDEBUGFS_Node **Node
++ );
++
++void gckDEBUGFS_FreeNode(
++ IN gcsDEBUGFS_Node * Node
++ );
++
++
++
++void gckDEBUGFS_SetCurrentNode(
++ IN gcsDEBUGFS_Node * Node
++ );
++
++
++
++void gckDEBUGFS_GetCurrentNode(
++ OUT gcsDEBUGFS_Node ** Node
++ );
++
++
++ssize_t gckDEBUGFS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#endif
++
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debug.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debug.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debug.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_debug.h 2015-05-01 14:57:59.595427001 -0500
+@@ -0,0 +1,113 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_debug_h_
++#define __gc_hal_kernel_debug_h_
++
++#include <gc_hal_kernel_linux.h>
++#include <linux/spinlock.h>
++#include <linux/time.h>
++#include <stdarg.h>
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** OS-dependent Macros *****************************
++\******************************************************************************/
++
++typedef va_list gctARGUMENTS;
++
++#define gcmkARGUMENTS_START(Arguments, Pointer) \
++ va_start(Arguments, Pointer)
++
++#define gcmkARGUMENTS_END(Arguments) \
++ va_end(Arguments)
++
++#define gcmkARGUMENTS_ARG(Arguments, Type) \
++ va_arg(Arguments, Type)
++
++#define gcmkDECLARE_LOCK(__spinLock__) \
++ static DEFINE_SPINLOCK(__spinLock__); \
++ unsigned long __spinLock__##flags = 0;
++
++#define gcmkLOCKSECTION(__spinLock__) \
++ spin_lock_irqsave(&__spinLock__, __spinLock__##flags)
++
++#define gcmkUNLOCKSECTION(__spinLock__) \
++ spin_unlock_irqrestore(&__spinLock__, __spinLock__##flags)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++# define gcmkGETPROCESSID() \
++ task_tgid_vnr(current)
++#else
++# define gcmkGETPROCESSID() \
++ current->tgid
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++# define gcmkGETTHREADID() \
++ task_pid_vnr(current)
++#else
++# define gcmkGETTHREADID() \
++ current->pid
++#endif
++
++#define gcmkOUTPUT_STRING(String) \
++ if(gckDEBUGFS_IsEnabled()) {\
++ while(-ERESTARTSYS == gckDEBUGFS_Print(String));\
++ }else{\
++ printk(String); \
++ }\
++ touch_softlockup_watchdog()
++
++
++#define gcmkSPRINTF(Destination, Size, Message, Value) \
++ snprintf(Destination, Size, Message, Value)
++
++#define gcmkSPRINTF2(Destination, Size, Message, Value1, Value2) \
++ snprintf(Destination, Size, Message, Value1, Value2)
++
++#define gcmkSPRINTF3(Destination, Size, Message, Value1, Value2, Value3) \
++ snprintf(Destination, Size, Message, Value1, Value2, Value3)
++
++#define gcmkVSPRINTF(Destination, Size, Message, Arguments) \
++ vsnprintf(Destination, Size, Message, *((va_list*)Arguments))
++
++#define gcmkSTRCAT(Destination, Size, String) \
++ strncat(Destination, String, Size)
++
++#define gcmkMEMCPY(Destination, Source, Size) \
++ memcpy(Destination, Source, Size)
++
++#define gcmkSTRLEN(String) \
++ strlen(String)
++
++/* If not zero, forces data alignment in the variable argument list
++ by its individual size. */
++#define gcdALIGNBYSIZE 1
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_debug_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.c 2015-05-01 14:57:59.599427001 -0500
+@@ -0,0 +1,2760 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mman.h>
++#include <linux/slab.h>
++
++#define _GC_OBJ_ZONE gcvZONE_DEVICE
++
++#define DEBUG_FILE "galcore_trace"
++#define PARENT_FILE "gpu"
++
++
++#ifdef FLAREON
++ static struct dove_gpio_irq_handler gc500_handle;
++#endif
++
++gckKERNEL
++_GetValidKernel(
++ gckGALDEVICE Device
++ )
++{
++ if (Device->kernels[gcvCORE_MAJOR])
++ {
++ return Device->kernels[gcvCORE_MAJOR];
++ }
++ else
++ if (Device->kernels[gcvCORE_2D])
++ {
++ return Device->kernels[gcvCORE_2D];
++ }
++ else
++ if (Device->kernels[gcvCORE_VG])
++ {
++ return Device->kernels[gcvCORE_VG];
++ }
++ else
++ {
++ return gcvNULL;
++ }
++}
++
++/******************************************************************************\
++******************************** Debugfs Support *******************************
++\******************************************************************************/
++
++/******************************************************************************\
++***************************** DEBUG SHOW FUNCTIONS *****************************
++\******************************************************************************/
++
++int gc_info_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckGALDEVICE device = node->device;
++ int i = 0;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->irqLines[i] != -1)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ chipModel = device->kernels[i]->vg->hardware->chipModel;
++ chipRevision = device->kernels[i]->vg->hardware->chipRevision;
++ }
++ else
++#endif
++ {
++ chipModel = device->kernels[i]->hardware->identity.chipModel;
++ chipRevision = device->kernels[i]->hardware->identity.chipRevision;
++ }
++
++ seq_printf(m, "gpu : %d\n", i);
++ seq_printf(m, "model : %4x\n", chipModel);
++ seq_printf(m, "revision : %4x\n", chipRevision);
++ seq_printf(m, "\n");
++ }
++ }
++
++ return 0;
++}
++
++int gc_clients_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckGALDEVICE device = node->device;
++
++ gckKERNEL kernel = _GetValidKernel(device);
++
++ gcsDATABASE_PTR database;
++ gctINT i, pid;
++ gctUINT8 name[24];
++
++ seq_printf(m, "%-8s%s\n", "PID", "NAME");
++ seq_printf(m, "------------------------\n");
++
++ /* Acquire the database mutex. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(kernel->os, kernel->db->dbMutex, gcvINFINITE));
++
++ /* Walk the databases. */
++ for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i)
++ {
++ for (database = kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ pid = database->processID;
++
++ gcmkVERIFY_OK(gckOS_ZeroMemory(name, gcmSIZEOF(name)));
++
++ gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name));
++
++ seq_printf(m, "%-8d%s\n", pid, name);
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(kernel->os, kernel->db->dbMutex));
++
++ /* Success. */
++ return 0;
++}
++
++static void
++_CounterAdd(
++ gcsDATABASE_COUNTERS * Dest,
++ gcsDATABASE_COUNTERS * Src
++ )
++{
++ Dest->bytes += Src->bytes;
++ Dest->maxBytes += Src->maxBytes;
++ Dest->totalBytes += Src->totalBytes;
++}
++
++static void
++_CounterPrint(
++ gcsDATABASE_COUNTERS * Counter,
++ gctCONST_STRING Name,
++ struct seq_file* m
++ )
++{
++ seq_printf(m, " %s:\n", Name);
++ seq_printf(m, " Used : %10llu B\n", Counter->bytes);
++}
++
++int gc_meminfo_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckGALDEVICE device = node->device;
++ gckKERNEL kernel = _GetValidKernel(device);
++ gckVIDMEM memory;
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctUINT32 i;
++
++ gctUINT32 free = 0, used = 0, total = 0;
++
++ gcsDATABASE_COUNTERS contiguousCounter = {0, 0, 0};
++ gcsDATABASE_COUNTERS virtualCounter = {0, 0, 0};
++ gcsDATABASE_COUNTERS nonPagedCounter = {0, 0, 0};
++
++ status = gckKERNEL_GetVideoMemoryPool(kernel, gcvPOOL_SYSTEM, &memory);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE));
++
++ free = memory->freeBytes;
++ used = memory->bytes - memory->freeBytes;
++ total = memory->bytes;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex));
++ }
++
++ seq_printf(m, "VIDEO MEMORY:\n");
++ seq_printf(m, " gcvPOOL_SYSTEM:\n");
++ seq_printf(m, " Free : %10u B\n", free);
++ seq_printf(m, " Used : %10u B\n", used);
++ seq_printf(m, " Total : %10u B\n", total);
++
++ /* Acquire the database mutex. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(kernel->os, kernel->db->dbMutex, gcvINFINITE));
++
++ /* Walk the databases. */
++ for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i)
++ {
++ for (database = kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ gcsDATABASE_COUNTERS * counter = &database->vidMemPool[gcvPOOL_CONTIGUOUS];
++ _CounterAdd(&contiguousCounter, counter);
++
++ counter = &database->vidMemPool[gcvPOOL_VIRTUAL];
++ _CounterAdd(&virtualCounter, counter);
++
++
++ counter = &database->nonPaged;
++ _CounterAdd(&nonPagedCounter, counter);
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(kernel->os, kernel->db->dbMutex));
++
++ _CounterPrint(&contiguousCounter, "gcvPOOL_CONTIGUOUS", m);
++ _CounterPrint(&virtualCounter, "gcvPOOL_VIRTUAL", m);
++
++ seq_printf(m, "\n");
++
++ seq_printf(m, "NON PAGED MEMORY:\n");
++ seq_printf(m, " Used : %10llu B\n", nonPagedCounter.bytes);
++
++ return 0;
++}
++
++static int
++_ShowRecord(
++ IN struct seq_file *file,
++ IN gcsDATABASE_RECORD_PTR record
++ )
++{
++ seq_printf(file, "%4d%8d%16p%16p%16zu\n",
++ record->type,
++ record->kernel->core,
++ record->data,
++ record->physical,
++ record->bytes
++ );
++
++ return 0;
++}
++
++static int
++_ShowRecords(
++ IN struct seq_file *File,
++ IN gcsDATABASE_PTR Database
++ )
++{
++ gctUINT i;
++
++ seq_printf(File, "Records:\n");
++
++ seq_printf(File, "%s%8s%16s%16s%16s\n",
++ "Type", "GPU", "Data", "Physical", "Bytes");
++
++ for (i = 0; i < gcmCOUNTOF(Database->list); i++)
++ {
++ gcsDATABASE_RECORD_PTR record = Database->list[i];
++
++ while (record != NULL)
++ {
++ _ShowRecord(File, record);
++ record = record->next;
++ }
++ }
++
++ return 0;
++}
++
++void
++_ShowCounters(
++ struct seq_file *File,
++ gcsDATABASE_PTR Database
++ );
++
++static void
++_ShowProcess(
++ IN struct seq_file *File,
++ IN gcsDATABASE_PTR Database
++ )
++{
++ gctINT pid;
++ gctUINT8 name[24];
++
++ /* Process ID and name */
++ pid = Database->processID;
++ gcmkVERIFY_OK(gckOS_ZeroMemory(name, gcmSIZEOF(name)));
++ gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name));
++
++ seq_printf(File, "--------------------------------------------------------------------------------\n");
++ seq_printf(File, "Process: %-8d %s\n", pid, name);
++
++ /* Detailed records */
++ _ShowRecords(File, Database);
++
++ seq_printf(File, "Counters:\n");
++
++ _ShowCounters(File, Database);
++}
++
++static void
++_ShowProcesses(
++ IN struct seq_file * file,
++ IN gckKERNEL Kernel
++ )
++{
++ gcsDATABASE_PTR database;
++ gctINT i;
++
++ /* Acquire the database mutex. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++
++ /* Idle time since last call */
++ seq_printf(file, "GPU Idle: %llu ns\n", Kernel->db->idleTime);
++ Kernel->db->idleTime = 0;
++
++ /* Walk the databases. */
++ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
++ {
++ for (database = Kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ _ShowProcess(file, database);
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++}
++
++static int
++gc_db_show(struct seq_file *m, void *data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckGALDEVICE device = node->device;
++ gckKERNEL kernel = _GetValidKernel(device);
++ _ShowProcesses(m, kernel);
++ return 0 ;
++}
++
++static int
++gc_version_show(struct seq_file *m, void *data)
++{
++ seq_printf(m, "%s\n", gcvVERSION_STRING);
++
++ return 0 ;
++}
++
++int gc_idle_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckGALDEVICE device = node->device;
++ gckKERNEL kernel = _GetValidKernel(device);
++ gcuDATABASE_INFO info;
++
++ gckKERNEL_QueryProcessDB(kernel, 0, gcvFALSE, gcvDB_IDLE, &info);
++
++ seq_printf(m, "GPU idle time since last query: %llu ns\n", info.time);
++
++ return 0;
++}
++
++static gcsINFO InfoList[] =
++{
++ {"info", gc_info_show},
++ {"clients", gc_clients_show},
++ {"meminfo", gc_meminfo_show},
++ {"idle", gc_idle_show},
++ {"database", gc_db_show},
++ {"version", gc_version_show},
++};
++
++static gceSTATUS
++_DebugfsInit(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++
++ gckDEBUGFS_DIR dir = &Device->debugfsDir;
++
++ gcmkONERROR(gckDEBUGFS_DIR_Init(dir, gcvNULL, "gc"));
++
++ gcmkONERROR(gckDEBUGFS_DIR_CreateFiles(dir, InfoList, gcmCOUNTOF(InfoList), Device));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++static void
++_DebugfsCleanup(
++ IN gckGALDEVICE Device
++ )
++{
++ gckDEBUGFS_DIR dir = &Device->debugfsDir;
++
++ if (Device->debugfsDir.root)
++ {
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles(dir, InfoList, gcmCOUNTOF(InfoList)));
++
++ gckDEBUGFS_DIR_Deinit(dir);
++ }
++}
++
++
++/******************************************************************************\
++*************************** Memory Allocation Wrappers *************************
++\******************************************************************************/
++
++static gceSTATUS
++_AllocateMemory(
++ IN gckGALDEVICE Device,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER *Logical,
++ OUT gctPHYS_ADDR *Physical,
++ OUT gctUINT32 *PhysAddr
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Bytes=%lu", Device, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++ gcmkVERIFY_ARGUMENT(Logical != NULL);
++ gcmkVERIFY_ARGUMENT(Physical != NULL);
++ gcmkVERIFY_ARGUMENT(PhysAddr != NULL);
++
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Device->os, gcvFALSE, &Bytes, Physical, Logical
++ ));
++
++ *PhysAddr = ((PLINUX_MDL)*Physical)->dmaHandle;
++
++ /* Success. */
++ gcmkFOOTER_ARG(
++ "*Logical=0x%x *Physical=0x%x *PhysAddr=0x%08x",
++ *Logical, *Physical, *PhysAddr
++ );
++
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_FreeMemory(
++ IN gckGALDEVICE Device,
++ IN gctPOINTER Logical,
++ IN gctPHYS_ADDR Physical)
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Logical=0x%x Physical=0x%x",
++ Device, Logical, Physical);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ status = gckOS_FreeContiguous(
++ Device->os, Physical, Logical,
++ ((PLINUX_MDL) Physical)->numPages * PAGE_SIZE
++ );
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++
++/******************************************************************************\
++******************************* Interrupt Handler ******************************
++\******************************************************************************/
++#if gcdMULTI_GPU
++static irqreturn_t isrRoutine3D0(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR],
++ gcvCORE_3D_0_ID,
++ gcvNOTIFY_INTERRUPT,
++ gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Wake up the threadRoutine to process events. */
++ device->dataReady3D[gcvCORE_3D_0_ID] = gcvTRUE;
++ wake_up_interruptible(&device->intrWaitQueue3D[gcvCORE_3D_0_ID]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine3D0(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ /* Sleep until being awaken by the interrupt handler. */
++ wait_event_interruptible(device->intrWaitQueue3D[gcvCORE_3D_0_ID],
++ device->dataReady3D[gcvCORE_3D_0_ID] == gcvTRUE);
++ device->dataReady3D[gcvCORE_3D_0_ID] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR],
++ gcvCORE_3D_0_ID,
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++
++#if gcdMULTI_GPU > 1
++static irqreturn_t isrRoutine3D1(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR],
++ gcvCORE_3D_1_ID,
++ gcvNOTIFY_INTERRUPT,
++ gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Wake up the worker thread to process events. */
++ device->dataReady3D[gcvCORE_3D_1_ID] = gcvTRUE;
++ wake_up_interruptible(&device->intrWaitQueue3D[gcvCORE_3D_1_ID]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine3D1(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ /* Sleep until being awaken by the interrupt handler. */
++ wait_event_interruptible(device->intrWaitQueue3D[gcvCORE_3D_1_ID],
++ device->dataReady3D[gcvCORE_3D_1_ID] == gcvTRUE);
++ device->dataReady3D[gcvCORE_3D_1_ID] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR],
++ gcvCORE_3D_1_ID,
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++#endif
++#elif gcdMULTI_GPU_AFFINITY
++static irqreturn_t isrRoutine3D0(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR], gcvNOTIFY_INTERRUPT, gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ up(&device->semas[gcvCORE_MAJOR]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine3D0(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_MAJOR]);
++ if (down); /*To make gcc 4.6 happye*/
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR],
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++
++static irqreturn_t isrRoutine3D1(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_OCL], gcvNOTIFY_INTERRUPT, gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ up(&device->semas[gcvCORE_OCL]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine3D1(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_OCL]);
++ if (down); /*To make gcc 4.6 happye*/
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_OCL],
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++#else
++static irqreturn_t isrRoutine(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR], gcvNOTIFY_INTERRUPT, gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ up(&device->semas[gcvCORE_MAJOR]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_MAJOR]);
++ if (down); /*To make gcc 4.6 happye*/
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR],
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++#endif
++
++static irqreturn_t isrRoutine2D(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_2D],
++#if gcdMULTI_GPU
++ 0,
++#endif
++ gcvNOTIFY_INTERRUPT,
++ gcvTRUE);
++ if (gcmIS_SUCCESS(status))
++ {
++ up(&device->semas[gcvCORE_2D]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine2D(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_2D]);
++ if (down); /*To make gcc 4.6 happye*/
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++ gckKERNEL_Notify(device->kernels[gcvCORE_2D],
++#if gcdMULTI_GPU
++ 0,
++#endif
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++
++static irqreturn_t isrRoutineVG(int irq, void *ctxt)
++{
++#if gcdENABLE_VG
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Serve the interrupt. */
++ status = gckVGINTERRUPT_Enque(device->kernels[gcvCORE_VG]->vg->interrupt);
++
++ /* Determine the return value. */
++ return (status == gcvSTATUS_NOT_OUR_INTERRUPT)
++ ? IRQ_RETVAL(0)
++ : IRQ_RETVAL(1);
++#else
++ return IRQ_NONE;
++#endif
++}
++
++static int threadRoutineVG(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_VG]);
++ if (down); /*To make gcc 4.6 happye*/
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++ gckKERNEL_Notify(device->kernels[gcvCORE_VG],
++#if gcdMULTI_GPU
++ 0,
++#endif
++ gcvNOTIFY_INTERRUPT,
++ gcvFALSE);
++ }
++}
++
++/******************************************************************************\
++******************************* gckGALDEVICE Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Construct
++**
++** Constructor.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gckGALDEVICE * Device
++** Pointer to a variable receiving the gckGALDEVICE object pointer on
++** success.
++*/
++gceSTATUS
++gckGALDEVICE_Construct(
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ IN gctINT IrqLine3D0,
++ IN gctUINT32 RegisterMemBase3D0,
++ IN gctSIZE_T RegisterMemSize3D0,
++ IN gctINT IrqLine3D1,
++ IN gctUINT32 RegisterMemBase3D1,
++ IN gctSIZE_T RegisterMemSize3D1,
++#else
++ IN gctINT IrqLine,
++ IN gctUINT32 RegisterMemBase,
++ IN gctSIZE_T RegisterMemSize,
++#endif
++ IN gctINT IrqLine2D,
++ IN gctUINT32 RegisterMemBase2D,
++ IN gctSIZE_T RegisterMemSize2D,
++ IN gctINT IrqLineVG,
++ IN gctUINT32 RegisterMemBaseVG,
++ IN gctSIZE_T RegisterMemSizeVG,
++ IN gctUINT32 ContiguousBase,
++ IN gctSIZE_T ContiguousSize,
++ IN gctSIZE_T BankSize,
++ IN gctINT FastClear,
++ IN gctINT Compression,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize,
++ IN gctINT Signal,
++ IN gctUINT LogFileSize,
++ IN gctINT PowerManagement,
++ IN gctINT GpuProfiler,
++ IN gcsDEVICE_CONSTRUCT_ARGS * Args,
++ OUT gckGALDEVICE *Device
++ )
++{
++ gctUINT32 internalBaseAddress = 0, internalAlignment = 0;
++ gctUINT32 externalBaseAddress = 0, externalAlignment = 0;
++ gctUINT32 horizontalTileSize, verticalTileSize;
++ struct resource* mem_region;
++ gctUINT32 physAddr;
++ gctUINT32 physical;
++ gckGALDEVICE device;
++ gceSTATUS status;
++ gctINT32 i;
++#if gcdMULTI_GPU
++ gctINT32 j;
++#endif
++ gceHARDWARE_TYPE type;
++ gckDB sharedDB = gcvNULL;
++ gckKERNEL kernel = gcvNULL;
++
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ gcmkHEADER_ARG("IrqLine3D0=%d RegisterMemBase3D0=0x%08x RegisterMemSize3D0=%u "
++ "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u "
++ "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u "
++ "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu "
++ "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d",
++ IrqLine3D0, RegisterMemBase3D0, RegisterMemSize3D0,
++ IrqLine2D, RegisterMemBase2D, RegisterMemSize2D,
++ IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG,
++ ContiguousBase, ContiguousSize, BankSize, FastClear, Compression,
++ PhysBaseAddr, PhysSize, Signal);
++#else
++ gcmkHEADER_ARG("IrqLine=%d RegisterMemBase=0x%08x RegisterMemSize=%u "
++ "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u "
++ "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u "
++ "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu "
++ "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d",
++ IrqLine, RegisterMemBase, RegisterMemSize,
++ IrqLine2D, RegisterMemBase2D, RegisterMemSize2D,
++ IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG,
++ ContiguousBase, ContiguousSize, BankSize, FastClear, Compression,
++ PhysBaseAddr, PhysSize, Signal);
++#endif
++
++#if gcdDISABLE_CORES_2D3D
++ IrqLine = -1;
++ IrqLine2D = -1;
++#endif
++
++ /* Allocate device structure. */
++ device = kmalloc(sizeof(struct _gckGALDEVICE), GFP_KERNEL | __GFP_NOWARN);
++
++ if (!device)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ memset(device, 0, sizeof(struct _gckGALDEVICE));
++
++ device->dbgNode = gcvNULL;
++
++ device->platform = Args->platform;
++
++ gcmkONERROR(_DebugfsInit(device));
++
++ if (gckDEBUGFS_CreateNode(
++ device, LogFileSize, device->debugfsDir.root ,DEBUG_FILE, &(device->dbgNode)))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the debug file system %s/%s \n",
++ __FUNCTION__, __LINE__,
++ PARENT_FILE, DEBUG_FILE
++ );
++ }
++ else if (LogFileSize)
++ {
++ gckDEBUGFS_SetCurrentNode(device->dbgNode);
++ }
++
++#if gcdMULTI_GPU
++ if (IrqLine3D0 != -1)
++ {
++ device->requestedRegisterMemBase3D[gcvCORE_3D_0_ID] = RegisterMemBase3D0;
++ device->requestedRegisterMemSize3D[gcvCORE_3D_0_ID] = RegisterMemSize3D0;
++ }
++
++ if (IrqLine3D1 != -1)
++ {
++ device->requestedRegisterMemBase3D[gcvCORE_3D_1_ID] = RegisterMemBase3D1;
++ device->requestedRegisterMemSize3D[gcvCORE_3D_1_ID] = RegisterMemSize3D1;
++ }
++#elif gcdMULTI_GPU_AFFINITY
++ if (IrqLine3D0 != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_MAJOR] = RegisterMemBase3D0;
++ device->requestedRegisterMemSizes[gcvCORE_MAJOR] = RegisterMemSize3D0;
++ }
++
++ if (IrqLine3D1 != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_OCL] = RegisterMemBase3D1;
++ device->requestedRegisterMemSizes[gcvCORE_OCL] = RegisterMemSize3D1;
++ }
++#else
++ if (IrqLine != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_MAJOR] = RegisterMemBase;
++ device->requestedRegisterMemSizes[gcvCORE_MAJOR] = RegisterMemSize;
++ }
++#endif
++
++ if (IrqLine2D != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_2D] = RegisterMemBase2D;
++ device->requestedRegisterMemSizes[gcvCORE_2D] = RegisterMemSize2D;
++ }
++
++ if (IrqLineVG != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_VG] = RegisterMemBaseVG;
++ device->requestedRegisterMemSizes[gcvCORE_VG] = RegisterMemSizeVG;
++ }
++
++ device->requestedContiguousBase = 0;
++ device->requestedContiguousSize = 0;
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdMULTI_GPU
++ if (i == gcvCORE_MAJOR)
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ physical = device->requestedRegisterMemBase3D[j];
++
++ /* Set up register memory region. */
++ if (physical != 0)
++ {
++ mem_region = request_mem_region(physical,
++ device->requestedRegisterMemSize3D[j],
++ "galcore register region");
++
++ if (mem_region == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to claim %lu bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSize3D[j]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->registerBase3D[j] = (gctPOINTER) ioremap_nocache(
++ physical, device->requestedRegisterMemSize3D[j]);
++
++ if (device->registerBase3D[j] == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Unable to map %ld bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSize3D[j]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ physical += device->requestedRegisterMemSize3D[j];
++ }
++ else
++ {
++ device->registerBase3D[j] = gcvNULL;
++ }
++ }
++ }
++ else
++#endif
++ {
++ physical = device->requestedRegisterMemBases[i];
++
++ /* Set up register memory region. */
++ if (physical != 0)
++ {
++ mem_region = request_mem_region(physical,
++ device->requestedRegisterMemSizes[i],
++ "galcore register region");
++
++ if (mem_region == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to claim %lu bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSizes[i]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->registerBases[i] = (gctPOINTER) ioremap_nocache(
++ physical, device->requestedRegisterMemSizes[i]);
++
++ if (device->registerBases[i] == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Unable to map %ld bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSizes[i]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ physical += device->requestedRegisterMemSizes[i];
++ }
++ }
++ }
++
++ /* Set the base address */
++ device->baseAddress = device->physBase = PhysBaseAddr;
++ device->physSize = PhysSize;
++ device->mmu = Args->mmu;
++
++ /* Construct the gckOS object. */
++ gcmkONERROR(gckOS_Construct(device, &device->os));
++
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ if (IrqLine3D0 != -1)
++#else
++ if (IrqLine != -1)
++#endif
++ {
++ /* Construct the gckKERNEL object. */
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_MAJOR, device,
++ gcvNULL, &device->kernels[gcvCORE_MAJOR]));
++
++ sharedDB = device->kernels[gcvCORE_MAJOR]->db;
++
++ /* Initialize core mapping */
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_MAJOR;
++ }
++
++ /* Setup the ISR manager. */
++ gcmkONERROR(gckHARDWARE_SetIsrManager(
++ device->kernels[gcvCORE_MAJOR]->hardware,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR,
++ device
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(
++ device->kernels[gcvCORE_MAJOR]->hardware, FastClear, Compression
++ ));
++
++ if(PowerManagement != -1)
++ {
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_MAJOR]->hardware, gcvFALSE
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_MAJOR]->hardware, PowerManagement
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_MAJOR]->hardware, gcvTRUE
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_MAJOR]->hardware, gcvFALSE
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_MAJOR]->hardware, gcvTRUE
++ ));
++ }
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ gcmkONERROR(gckHARDWARE_SetMinFscaleValue(
++ device->kernels[gcvCORE_MAJOR]->hardware, Args->gpu3DMinClock
++ ));
++#endif
++
++ gcmkONERROR(gckHARDWARE_SetGpuProfiler(
++ device->kernels[gcvCORE_MAJOR]->hardware, GpuProfiler
++ ));
++
++ gcmkVERIFY_OK(gckKERNEL_SetRecovery(
++ device->kernels[gcvCORE_MAJOR], Args->recovery, Args->stuckDump
++ ));
++
++#if COMMAND_PROCESSOR_VERSION == 1
++ /* Start the command queue. */
++ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_MAJOR]->command));
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_MAJOR] = gcvNULL;
++ }
++
++#if gcdMULTI_GPU_AFFINITY
++ if (IrqLine3D1 != -1)
++ {
++ /* Construct the gckKERNEL object. */
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_OCL, device,
++ gcvNULL, &device->kernels[gcvCORE_OCL]));
++
++ if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_OCL]->db;
++
++ /* Initialize core mapping */
++ if (device->kernels[gcvCORE_MAJOR] == gcvNULL)
++ {
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_OCL;
++ }
++ }
++ else
++ {
++ device->coreMapping[gcvHARDWARE_OCL] = gcvCORE_OCL;
++ }
++
++ /* Setup the ISR manager. */
++ gcmkONERROR(gckHARDWARE_SetIsrManager(
++ device->kernels[gcvCORE_OCL]->hardware,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR,
++ device
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(
++ device->kernels[gcvCORE_OCL]->hardware, FastClear, Compression
++ ));
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ gcmkONERROR(gckHARDWARE_SetMinFscaleValue(
++ device->kernels[gcvCORE_OCL]->hardware, Args->gpu3DMinClock
++ ));
++#endif
++ if(PowerManagement != -1)
++ {
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_OCL]->hardware, gcvFALSE
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_OCL]->hardware, PowerManagement
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_OCL]->hardware, gcvTRUE
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_OCL]->hardware, gcvFALSE
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_OCL]->hardware, gcvTRUE
++ ));
++ }
++
++#if COMMAND_PROCESSOR_VERSION == 1
++ /* Start the command queue. */
++ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_OCL]->command));
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_OCL] = gcvNULL;
++ }
++#endif
++
++ if (IrqLine2D != -1)
++ {
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_2D, device,
++ sharedDB, &device->kernels[gcvCORE_2D]));
++
++ if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_2D]->db;
++
++ /* Verify the hardware type */
++ gcmkONERROR(gckHARDWARE_GetType(device->kernels[gcvCORE_2D]->hardware, &type));
++
++ if (type != gcvHARDWARE_2D)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Unexpected hardware type: %d\n",
++ __FUNCTION__, __LINE__,
++ type
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Initialize core mapping */
++ if (device->kernels[gcvCORE_MAJOR] == gcvNULL
++#if gcdMULTI_GPU_AFFINITY
++ && device->kernels[gcvCORE_OCL] == gcvNULL
++#endif
++ )
++ {
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_2D;
++ }
++ }
++ else
++ {
++ device->coreMapping[gcvHARDWARE_2D] = gcvCORE_2D;
++ }
++
++ /* Setup the ISR manager. */
++ gcmkONERROR(gckHARDWARE_SetIsrManager(
++ device->kernels[gcvCORE_2D]->hardware,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR_2D,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR_2D,
++ device
++ ));
++
++ if(PowerManagement != -1)
++ {
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_2D]->hardware, gcvFALSE
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_2D]->hardware, PowerManagement
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_2D]->hardware, gcvTRUE
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckHARDWARE_SetPowerManagementLock(
++ device->kernels[gcvCORE_2D]->hardware, gcvFALSE
++ ));
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_2D]->hardware, gcvTRUE
++ ));
++ }
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ gcmkONERROR(gckHARDWARE_SetMinFscaleValue(
++ device->kernels[gcvCORE_2D]->hardware, 1
++ ));
++#endif
++
++ gcmkVERIFY_OK(gckKERNEL_SetRecovery(
++ device->kernels[gcvCORE_2D], Args->recovery, Args->stuckDump
++ ));
++
++#if COMMAND_PROCESSOR_VERSION == 1
++ /* Start the command queue. */
++ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_2D]->command));
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_2D] = gcvNULL;
++ }
++
++ if (IrqLineVG != -1)
++ {
++#if gcdENABLE_VG
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_VG, device,
++ sharedDB, &device->kernels[gcvCORE_VG]));
++ /* Initialize core mapping */
++ if (device->kernels[gcvCORE_MAJOR] == gcvNULL
++ && device->kernels[gcvCORE_2D] == gcvNULL
++#if gcdMULTI_GPU_AFFINITY
++ && device->kernels[gcvCORE_OCL] == gcvNULL
++#endif
++ )
++ {
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_VG;
++ }
++ }
++ else
++ {
++ device->coreMapping[gcvHARDWARE_VG] = gcvCORE_VG;
++ }
++
++ if(PowerManagement != -1)
++ {
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_VG]->vg->hardware,
++ PowerManagement
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_VG]->vg->hardware,
++ gcvTRUE
++ ));
++ }
++
++
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_VG] = gcvNULL;
++ }
++
++ /* Initialize the ISR. */
++#if gcdMULTI_GPU
++ device->irqLine3D[gcvCORE_3D_0_ID] = IrqLine3D0;
++#if gcdMULTI_GPU > 1
++ device->irqLine3D[gcvCORE_3D_1_ID] = IrqLine3D1;
++#endif
++#elif gcdMULTI_GPU_AFFINITY
++ device->irqLines[gcvCORE_MAJOR] = IrqLine3D0;
++ device->irqLines[gcvCORE_OCL] = IrqLine3D1;
++#else
++ device->irqLines[gcvCORE_MAJOR] = IrqLine;
++#endif
++ device->irqLines[gcvCORE_2D] = IrqLine2D;
++ device->irqLines[gcvCORE_VG] = IrqLineVG;
++
++ /* Initialize the kernel thread semaphores. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdMULTI_GPU
++ if (i == gcvCORE_MAJOR)
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ if (device->irqLine3D[j] != -1) init_waitqueue_head(&device->intrWaitQueue3D[j]);
++ }
++ }
++ else
++#endif
++ {
++ if (device->irqLines[i] != -1) sema_init(&device->semas[i], 0);
++ }
++ }
++
++ device->signal = Signal;
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL) break;
++ }
++
++ if (i == gcdMAX_GPU_COUNT)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ /* Query the ceiling of the system memory. */
++ gcmkONERROR(gckVGHARDWARE_QuerySystemMemory(
++ device->kernels[i]->vg->hardware,
++ &device->systemMemorySize,
++ &device->systemMemoryBaseAddress
++ ));
++ /* query the amount of video memory */
++ gcmkONERROR(gckVGHARDWARE_QueryMemory(
++ device->kernels[i]->vg->hardware,
++ &device->internalSize, &internalBaseAddress, &internalAlignment,
++ &device->externalSize, &externalBaseAddress, &externalAlignment,
++ &horizontalTileSize, &verticalTileSize
++ ));
++ }
++ else
++#endif
++ {
++ /* Query the ceiling of the system memory. */
++ gcmkONERROR(gckHARDWARE_QuerySystemMemory(
++ device->kernels[i]->hardware,
++ &device->systemMemorySize,
++ &device->systemMemoryBaseAddress
++ ));
++
++ /* query the amount of video memory */
++ gcmkONERROR(gckHARDWARE_QueryMemory(
++ device->kernels[i]->hardware,
++ &device->internalSize, &internalBaseAddress, &internalAlignment,
++ &device->externalSize, &externalBaseAddress, &externalAlignment,
++ &horizontalTileSize, &verticalTileSize
++ ));
++ }
++
++
++ /* Grab the first availiable kernel */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdMULTI_GPU
++ if (i == gcvCORE_MAJOR)
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ if (device->irqLine3D[j] != -1)
++ {
++ kernel = device->kernels[i];
++ break;
++ }
++ }
++ }
++ else
++#endif
++ {
++ if (device->irqLines[i] != -1)
++ {
++ kernel = device->kernels[i];
++ break;
++ }
++ }
++ }
++
++ /* Set up the internal memory region. */
++ if (device->internalSize > 0)
++ {
++ status = gckVIDMEM_Construct(
++ device->os,
++ internalBaseAddress, device->internalSize, internalAlignment,
++ 0, &device->internalVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable internal heap. */
++ device->internalSize = 0;
++ }
++ else
++ {
++ /* Map internal memory. */
++ device->internalLogical
++ = (gctPOINTER) ioremap_nocache(physical, device->internalSize);
++
++ if (device->internalLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->internalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
++ device->internalPhysicalName = gcmPTR_TO_NAME(device->internalPhysical);
++ physical += device->internalSize;
++ }
++ }
++
++ if (device->externalSize > 0)
++ {
++ /* create the external memory heap */
++ status = gckVIDMEM_Construct(
++ device->os,
++ externalBaseAddress, device->externalSize, externalAlignment,
++ 0, &device->externalVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable internal heap. */
++ device->externalSize = 0;
++ }
++ else
++ {
++ /* Map external memory. */
++ device->externalLogical
++ = (gctPOINTER) ioremap_nocache(physical, device->externalSize);
++
++ if (device->externalLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->externalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
++ device->externalPhysicalName = gcmPTR_TO_NAME(device->externalPhysical);
++ physical += device->externalSize;
++ }
++ }
++
++ /* set up the contiguous memory */
++ device->contiguousSize = ContiguousSize;
++
++ if (ContiguousSize > 0)
++ {
++ if (ContiguousBase == 0)
++ {
++ while (device->contiguousSize > 0)
++ {
++ /* Allocate contiguous memory. */
++ status = _AllocateMemory(
++ device,
++ device->contiguousSize,
++ &device->contiguousBase,
++ &device->contiguousPhysical,
++ &physAddr
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ device->contiguousPhysicalName = gcmPTR_TO_NAME(device->contiguousPhysical);
++ status = gckVIDMEM_Construct(
++ device->os,
++ physAddr | device->systemMemoryBaseAddress,
++ device->contiguousSize,
++ 64,
++ BankSize,
++ &device->contiguousVidMem
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ break;
++ }
++
++ gcmkONERROR(_FreeMemory(
++ device,
++ device->contiguousBase,
++ device->contiguousPhysical
++ ));
++
++ gcmRELEASE_NAME(device->contiguousPhysicalName);
++ device->contiguousBase = gcvNULL;
++ device->contiguousPhysical = gcvNULL;
++ }
++
++ if (device->contiguousSize <= (4 << 20))
++ {
++ device->contiguousSize = 0;
++ }
++ else
++ {
++ device->contiguousSize -= (4 << 20);
++ }
++ }
++ }
++ else
++ {
++ /* Create the contiguous memory heap. */
++ status = gckVIDMEM_Construct(
++ device->os,
++ ContiguousBase | device->systemMemoryBaseAddress,
++ ContiguousSize,
++ 64, BankSize,
++ &device->contiguousVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable contiguous memory pool. */
++ device->contiguousVidMem = gcvNULL;
++ device->contiguousSize = 0;
++ }
++ else
++ {
++ if (Args->contiguousRequested == gcvFALSE)
++ {
++ mem_region = request_mem_region(
++ ContiguousBase, ContiguousSize, "galcore managed memory"
++ );
++
++ if (mem_region == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to claim %ld bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ ContiguousSize, ContiguousBase
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ device->requestedContiguousBase = ContiguousBase;
++ device->requestedContiguousSize = ContiguousSize;
++ device->contiguousRequested = Args->contiguousRequested;
++
++ device->contiguousPhysical = gcvNULL;
++ device->contiguousPhysicalName = 0;
++ device->contiguousSize = ContiguousSize;
++ device->contiguousMapped = gcvTRUE;
++ }
++ }
++ }
++
++ /* Return pointer to the device. */
++ *Device = device;
++
++ gcmkFOOTER_ARG("*Device=0x%x", * Device);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(device));
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Destroy
++**
++** Class destructor.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Destroy(
++ gckGALDEVICE Device)
++{
++ gctINT i;
++#if gcdMULTI_GPU
++ gctINT j;
++#endif
++ gckKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ if (Device != gcvNULL)
++ {
++ /* Grab the first availiable kernel */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdMULTI_GPU
++ if (i == gcvCORE_MAJOR)
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ if (Device->irqLine3D[j] != -1)
++ {
++ kernel = Device->kernels[i];
++ break;
++ }
++ }
++ }
++ else
++#endif
++ {
++ if (Device->irqLines[i] != -1)
++ {
++ kernel = Device->kernels[i];
++ break;
++ }
++ }
++ }
++
++ if (Device->internalPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->internalPhysicalName);
++ Device->internalPhysicalName = 0;
++ }
++ if (Device->externalPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->externalPhysicalName);
++ Device->externalPhysicalName = 0;
++ }
++ if (Device->contiguousPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->contiguousPhysicalName);
++ Device->contiguousPhysicalName = 0;
++ }
++
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Device->kernels[i] != gcvNULL)
++ {
++ /* Destroy the gckKERNEL object. */
++ gcmkVERIFY_OK(gckKERNEL_Destroy(Device->kernels[i]));
++ Device->kernels[i] = gcvNULL;
++ }
++ }
++
++ if (Device->internalLogical != gcvNULL)
++ {
++ /* Unmap the internal memory. */
++ iounmap(Device->internalLogical);
++ Device->internalLogical = gcvNULL;
++ }
++
++ if (Device->internalVidMem != gcvNULL)
++ {
++ /* Destroy the internal heap. */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->internalVidMem));
++ Device->internalVidMem = gcvNULL;
++ }
++
++ if (Device->externalLogical != gcvNULL)
++ {
++ /* Unmap the external memory. */
++ iounmap(Device->externalLogical);
++ Device->externalLogical = gcvNULL;
++ }
++
++ if (Device->externalVidMem != gcvNULL)
++ {
++ /* destroy the external heap */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->externalVidMem));
++ Device->externalVidMem = gcvNULL;
++ }
++
++ if (Device->contiguousBase != gcvNULL)
++ {
++ if (Device->contiguousMapped == gcvFALSE)
++ {
++ gcmkVERIFY_OK(_FreeMemory(
++ Device,
++ Device->contiguousBase,
++ Device->contiguousPhysical
++ ));
++ }
++
++ Device->contiguousBase = gcvNULL;
++ Device->contiguousPhysical = gcvNULL;
++ }
++
++ if (Device->requestedContiguousBase != 0
++ && Device->contiguousRequested == gcvFALSE
++ )
++ {
++ release_mem_region(Device->requestedContiguousBase, Device->requestedContiguousSize);
++ Device->requestedContiguousBase = 0;
++ Device->requestedContiguousSize = 0;
++ }
++
++ if (Device->contiguousVidMem != gcvNULL)
++ {
++ /* Destroy the contiguous heap. */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->contiguousVidMem));
++ Device->contiguousVidMem = gcvNULL;
++ }
++
++ if (Device->dbgNode)
++ {
++ gckDEBUGFS_FreeNode(Device->dbgNode);
++
++ if(Device->dbgNode != gcvNULL)
++ {
++ kfree(Device->dbgNode);
++ Device->dbgNode = gcvNULL;
++ }
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdMULTI_GPU
++ if (i == gcvCORE_MAJOR)
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ if (Device->registerBase3D[j] != gcvNULL)
++ {
++ /* Unmap register memory. */
++ iounmap(Device->registerBase3D[j]);
++ if (Device->requestedRegisterMemBase3D[j] != 0)
++ {
++ release_mem_region(Device->requestedRegisterMemBase3D[j],
++ Device->requestedRegisterMemSize3D[j]);
++ }
++
++ Device->registerBase3D[j] = gcvNULL;
++ Device->requestedRegisterMemBase3D[j] = 0;
++ Device->requestedRegisterMemSize3D[j] = 0;
++ }
++ }
++ }
++ else
++#endif
++ {
++ if (Device->registerBases[i] != gcvNULL)
++ {
++ /* Unmap register memory. */
++ iounmap(Device->registerBases[i]);
++ if (Device->requestedRegisterMemBases[i] != 0)
++ {
++ release_mem_region(Device->requestedRegisterMemBases[i],
++ Device->requestedRegisterMemSizes[i]);
++ }
++
++ Device->registerBases[i] = gcvNULL;
++ Device->requestedRegisterMemBases[i] = 0;
++ Device->requestedRegisterMemSizes[i] = 0;
++ }
++ }
++ }
++
++ /* Destroy the gckOS object. */
++ if (Device->os != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_Destroy(Device->os));
++ Device->os = gcvNULL;
++ }
++
++ _DebugfsCleanup(Device);
++
++ /* Free the device. */
++ kfree(Device);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Setup_ISR
++**
++** Start the ISR routine.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Setup successfully.
++** gcvSTATUS_GENERIC_IO
++** Setup failed.
++*/
++gceSTATUS
++gckGALDEVICE_Setup_ISR(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++ gctINT ret = 0;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->irqLines[gcvCORE_MAJOR] < 0)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Hook up the isr based on the irq line. */
++#ifdef FLAREON
++ gc500_handle.dev_name = "galcore interrupt service";
++ gc500_handle.dev_id = Device;
++ gc500_handle.handler = isrRoutine;
++ gc500_handle.intr_gen = GPIO_INTR_LEVEL_TRIGGER;
++ gc500_handle.intr_trig = GPIO_TRIG_HIGH_LEVEL;
++
++ ret = dove_gpio_request(
++ DOVE_GPIO0_7, &gc500_handle
++ );
++#else
++#if gcdMULTI_GPU
++ ret = request_irq(
++ Device->irqLine3D[gcvCORE_3D_0_ID], isrRoutine3D0, IRQF_DISABLED,
++ "galcore_3d_0", Device
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLine3D[gcvCORE_3D_0_ID], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitialized3D[gcvCORE_3D_0_ID] = gcvTRUE;
++
++#if gcdMULTI_GPU > 1
++ ret = request_irq(
++ Device->irqLine3D[gcvCORE_3D_1_ID], isrRoutine3D1, IRQF_DISABLED,
++ "galcore_3d_1", Device
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLine3D[gcvCORE_3D_1_ID], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitialized3D[gcvCORE_3D_1_ID] = gcvTRUE;
++#endif
++#elif gcdMULTI_GPU_AFFINITY
++ ret = request_irq(
++ Device->irqLines[gcvCORE_MAJOR], isrRoutine3D0, IRQF_DISABLED,
++ "galcore_3d_0", Device
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[gcvCORE_MAJOR], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[gcvCORE_MAJOR] = gcvTRUE;
++
++ ret = request_irq(
++ Device->irqLines[gcvCORE_OCL], isrRoutine3D1, IRQF_DISABLED,
++ "galcore_3d_1", Device
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[gcvCORE_OCL], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[gcvCORE_OCL] = gcvTRUE;
++#else
++ ret = request_irq(
++ Device->irqLines[gcvCORE_MAJOR], isrRoutine, IRQF_DISABLED,
++ "galcore interrupt service", Device
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[gcvCORE_MAJOR], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[gcvCORE_MAJOR] = gcvTRUE;
++#endif
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckGALDEVICE_Setup_ISR_2D(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++ gctINT ret;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->irqLines[gcvCORE_2D] < 0)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Hook up the isr based on the irq line. */
++#ifdef FLAREON
++ gc500_handle.dev_name = "galcore interrupt service";
++ gc500_handle.dev_id = Device;
++ gc500_handle.handler = isrRoutine2D;
++ gc500_handle.intr_gen = GPIO_INTR_LEVEL_TRIGGER;
++ gc500_handle.intr_trig = GPIO_TRIG_HIGH_LEVEL;
++
++ ret = dove_gpio_request(
++ DOVE_GPIO0_7, &gc500_handle
++ );
++#else
++ ret = request_irq(
++ Device->irqLines[gcvCORE_2D], isrRoutine2D, IRQF_DISABLED,
++ "galcore interrupt service for 2D", Device
++ );
++#endif
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[gcvCORE_2D], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[gcvCORE_2D] = gcvTRUE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckGALDEVICE_Setup_ISR_VG(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++ gctINT ret;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->irqLines[gcvCORE_VG] < 0)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Hook up the isr based on the irq line. */
++#ifdef FLAREON
++ gc500_handle.dev_name = "galcore interrupt service";
++ gc500_handle.dev_id = Device;
++ gc500_handle.handler = isrRoutineVG;
++ gc500_handle.intr_gen = GPIO_INTR_LEVEL_TRIGGER;
++ gc500_handle.intr_trig = GPIO_TRIG_HIGH_LEVEL;
++
++ ret = dove_gpio_request(
++ DOVE_GPIO0_7, &gc500_handle
++ );
++#else
++ ret = request_irq(
++ Device->irqLines[gcvCORE_VG], isrRoutineVG, IRQF_DISABLED,
++ "galcore interrupt service for 2D", Device
++ );
++#endif
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[gcvCORE_VG], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[gcvCORE_VG] = gcvTRUE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Release_ISR
++**
++** Release the irq line.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Release_ISR(
++ IN gckGALDEVICE Device
++ )
++{
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++#if gcdMULTI_GPU
++ /* release the irq */
++ if (Device->isrInitialized3D[gcvCORE_3D_0_ID])
++ {
++ free_irq(Device->irqLine3D[gcvCORE_3D_0_ID], Device);
++ Device->isrInitialized3D[gcvCORE_3D_0_ID] = gcvFALSE;
++ }
++#if gcdMULTI_GPU > 1
++ /* release the irq */
++ if (Device->isrInitialized3D[gcvCORE_3D_1_ID])
++ {
++ free_irq(Device->irqLine3D[gcvCORE_3D_1_ID], Device);
++ Device->isrInitialized3D[gcvCORE_3D_1_ID] = gcvFALSE;
++ }
++#endif
++#else
++ /* release the irq */
++ if (Device->isrInitializeds[gcvCORE_MAJOR])
++ {
++#ifdef FLAREON
++ dove_gpio_free(DOVE_GPIO0_7, "galcore interrupt service");
++#else
++ free_irq(Device->irqLines[gcvCORE_MAJOR], Device);
++#endif
++ Device->isrInitializeds[gcvCORE_MAJOR] = gcvFALSE;
++ }
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckGALDEVICE_Release_ISR_2D(
++ IN gckGALDEVICE Device
++ )
++{
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ /* release the irq */
++ if (Device->isrInitializeds[gcvCORE_2D])
++ {
++#ifdef FLAREON
++ dove_gpio_free(DOVE_GPIO0_7, "galcore interrupt service");
++#else
++ free_irq(Device->irqLines[gcvCORE_2D], Device);
++#endif
++
++ Device->isrInitializeds[gcvCORE_2D] = gcvFALSE;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckGALDEVICE_Release_ISR_VG(
++ IN gckGALDEVICE Device
++ )
++{
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ /* release the irq */
++ if (Device->isrInitializeds[gcvCORE_VG])
++ {
++#ifdef FLAREON
++ dove_gpio_free(DOVE_GPIO0_7, "galcore interrupt service");
++#else
++ free_irq(Device->irqLines[gcvCORE_VG], Device);
++#endif
++
++ Device->isrInitializeds[gcvCORE_VG] = gcvFALSE;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Start_Threads
++**
++** Start the daemon threads.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Start successfully.
++** gcvSTATUS_GENERIC_IO
++** Start failed.
++*/
++gceSTATUS
++gckGALDEVICE_Start_Threads(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++ struct task_struct * task;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++#if gcdMULTI_GPU
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine3D0, Device, "galcore_3d_0");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxt3D[gcvCORE_3D_0_ID] = task;
++ Device->threadInitialized3D[gcvCORE_3D_0_ID] = gcvTRUE;
++
++#if gcdMULTI_GPU > 1
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine3D1, Device, "galcore_3d_1");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxt3D[gcvCORE_3D_1_ID] = task;
++ Device->threadInitialized3D[gcvCORE_3D_1_ID] = gcvTRUE;
++#endif
++ }
++#elif gcdMULTI_GPU_AFFINITY
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine3D0, Device, "galcore_3d_0");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_MAJOR] = task;
++ Device->threadInitializeds[gcvCORE_MAJOR] = gcvTRUE;
++ }
++
++ if (Device->kernels[gcvCORE_OCL] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine3D1, Device, "galcore_3d_1");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_OCL] = task;
++ Device->threadInitializeds[gcvCORE_OCL] = gcvTRUE;
++ }
++#else
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine, Device, "galcore daemon thread");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_MAJOR] = task;
++ Device->threadInitializeds[gcvCORE_MAJOR] = gcvTRUE;
++ }
++#endif
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine2D, Device, "galcore daemon thread for 2D");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_2D] = task;
++ Device->threadInitializeds[gcvCORE_2D] = gcvTRUE;
++ }
++ else
++ {
++ Device->threadInitializeds[gcvCORE_2D] = gcvFALSE;
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutineVG, Device, "galcore daemon thread for VG");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_VG] = task;
++ Device->threadInitializeds[gcvCORE_VG] = gcvTRUE;
++ }
++ else
++ {
++ Device->threadInitializeds[gcvCORE_VG] = gcvFALSE;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Stop_Threads
++**
++** Stop the gal device, including the following actions: stop the daemon
++** thread, release the irq.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Stop_Threads(
++ gckGALDEVICE Device
++ )
++{
++ gctINT i;
++#if gcdMULTI_GPU
++ gctINT j;
++#endif
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdMULTI_GPU
++ if (i == gcvCORE_MAJOR)
++ {
++ for (j = 0; j < gcdMULTI_GPU; j++)
++ {
++ /* Stop the kernel threads. */
++ if (Device->threadInitialized3D[j])
++ {
++ Device->killThread = gcvTRUE;
++ Device->dataReady3D[j] = gcvTRUE;
++ wake_up_interruptible(&Device->intrWaitQueue3D[j]);
++
++ kthread_stop(Device->threadCtxt3D[j]);
++ Device->threadCtxt3D[j] = gcvNULL;
++ Device->threadInitialized3D[j] = gcvFALSE;
++ }
++ }
++ }
++ else
++#endif
++ {
++ /* Stop the kernel threads. */
++ if (Device->threadInitializeds[i])
++ {
++ Device->killThread = gcvTRUE;
++ up(&Device->semas[i]);
++
++ kthread_stop(Device->threadCtxts[i]);
++ Device->threadCtxts[i] = gcvNULL;
++ Device->threadInitializeds[i] = gcvFALSE;
++ }
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Start
++**
++** Start the gal device, including the following actions: setup the isr routine
++** and start the daemoni thread.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Start successfully.
++*/
++gceSTATUS
++gckGALDEVICE_Start(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ /* Start the kernel thread. */
++ gcmkONERROR(gckGALDEVICE_Start_Threads(Device));
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR(Device));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_MAJOR]->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR_2D(Device));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_2D]->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR_VG(Device));
++
++#if gcdENABLE_VG
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_VG]->vg->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++#endif
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Stop
++**
++** Stop the gal device, including the following actions: stop the daemon
++** thread, release the irq.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Stop(
++ gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_MAJOR]->hardware, gcvPOWER_OFF
++ ));
++
++ /* Remove the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR(Device));
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR_2D(Device));
++
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_2D]->hardware, gcvPOWER_OFF
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR_VG(Device));
++
++#if gcdENABLE_VG
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_VG]->vg->hardware, gcvPOWER_OFF
++ ));
++#endif
++ }
++
++ /* Stop the kernel thread. */
++ gcmkONERROR(gckGALDEVICE_Stop_Threads(Device));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_device.h 2015-05-01 14:57:59.599427001 -0500
+@@ -0,0 +1,215 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_device_h_
++#define __gc_hal_kernel_device_h_
++
++#include "gc_hal_kernel_debugfs.h"
++
++/******************************************************************************\
++******************************* gckGALDEVICE Structure *******************************
++\******************************************************************************/
++
++typedef struct _gckGALDEVICE
++{
++ /* Objects. */
++ gckOS os;
++ gckKERNEL kernels[gcdMAX_GPU_COUNT];
++
++ gcsPLATFORM* platform;
++
++ /* Attributes. */
++ gctSIZE_T internalSize;
++ gctPHYS_ADDR internalPhysical;
++ gctUINT32 internalPhysicalName;
++ gctPOINTER internalLogical;
++ gckVIDMEM internalVidMem;
++ gctSIZE_T externalSize;
++ gctPHYS_ADDR externalPhysical;
++ gctUINT32 externalPhysicalName;
++ gctPOINTER externalLogical;
++ gckVIDMEM externalVidMem;
++ gckVIDMEM contiguousVidMem;
++ gctPOINTER contiguousBase;
++ gctPHYS_ADDR contiguousPhysical;
++ gctUINT32 contiguousPhysicalName;
++ gctSIZE_T contiguousSize;
++ gctBOOL contiguousMapped;
++ gctPOINTER contiguousMappedUser;
++ gctBOOL contiguousRequested;
++ gctSIZE_T systemMemorySize;
++ gctUINT32 systemMemoryBaseAddress;
++#if gcdMULTI_GPU
++ gctPOINTER registerBase3D[gcdMULTI_GPU];
++ gctSIZE_T registerSize3D[gcdMULTI_GPU];
++#endif
++ gctPOINTER registerBases[gcdMAX_GPU_COUNT];
++ gctSIZE_T registerSizes[gcdMAX_GPU_COUNT];
++ gctUINT32 baseAddress;
++ gctUINT32 physBase;
++ gctUINT32 physSize;
++ gctBOOL mmu;
++#if gcdMULTI_GPU
++ gctUINT32 requestedRegisterMemBase3D[gcdMULTI_GPU];
++ gctSIZE_T requestedRegisterMemSize3D[gcdMULTI_GPU];
++#endif
++ gctUINT32 requestedRegisterMemBases[gcdMAX_GPU_COUNT];
++ gctSIZE_T requestedRegisterMemSizes[gcdMAX_GPU_COUNT];
++ gctUINT32 requestedContiguousBase;
++ gctSIZE_T requestedContiguousSize;
++
++ /* IRQ management. */
++#if gcdMULTI_GPU
++ gctINT irqLine3D[gcdMULTI_GPU];
++ gctBOOL isrInitialized3D[gcdMULTI_GPU];
++ gctBOOL dataReady3D[gcdMULTI_GPU];
++#endif
++ gctINT irqLines[gcdMAX_GPU_COUNT];
++ gctBOOL isrInitializeds[gcdMAX_GPU_COUNT];
++
++ /* Thread management. */
++#if gcdMULTI_GPU
++ struct task_struct *threadCtxt3D[gcdMULTI_GPU];
++ wait_queue_head_t intrWaitQueue3D[gcdMULTI_GPU];
++ gctBOOL threadInitialized3D[gcdMULTI_GPU];
++#endif
++ struct task_struct *threadCtxts[gcdMAX_GPU_COUNT];
++ struct semaphore semas[gcdMAX_GPU_COUNT];
++ gctBOOL threadInitializeds[gcdMAX_GPU_COUNT];
++ gctBOOL killThread;
++
++ /* Signal management. */
++ gctINT signal;
++
++ /* Core mapping */
++ gceCORE coreMapping[8];
++
++ /* States before suspend. */
++ gceCHIPPOWERSTATE statesStored[gcdMAX_GPU_COUNT];
++
++ /* Device Debug File System Entry in kernel. */
++ struct _gcsDEBUGFS_Node * dbgNode;
++
++ gcsDEBUGFS_DIR debugfsDir;
++}
++* gckGALDEVICE;
++
++typedef struct _gcsHAL_PRIVATE_DATA
++{
++ gckGALDEVICE device;
++ gctPOINTER mappedMemory;
++ gctPOINTER contiguousLogical;
++ /* The process opening the device may not be the same as the one that closes it. */
++ gctUINT32 pidOpen;
++}
++gcsHAL_PRIVATE_DATA, * gcsHAL_PRIVATE_DATA_PTR;
++
++typedef struct _gcsDEVICE_CONSTRUCT_ARGS
++{
++ gctBOOL recovery;
++ gctUINT stuckDump;
++ gctUINT gpu3DMinClock;
++
++ gctBOOL contiguousRequested;
++ gcsPLATFORM* platform;
++ gctBOOL mmu;
++}
++gcsDEVICE_CONSTRUCT_ARGS;
++
++gceSTATUS gckGALDEVICE_Setup_ISR(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Setup_ISR_2D(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Setup_ISR_VG(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Release_ISR(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Release_ISR_2D(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Release_ISR_VG(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Start_Threads(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Stop_Threads(
++ gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Start(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Stop(
++ gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Construct(
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ IN gctINT IrqLine3D0,
++ IN gctUINT32 RegisterMemBase3D0,
++ IN gctSIZE_T RegisterMemSize3D0,
++ IN gctINT IrqLine3D1,
++ IN gctUINT32 RegisterMemBase3D1,
++ IN gctSIZE_T RegisterMemSize3D1,
++#else
++ IN gctINT IrqLine,
++ IN gctUINT32 RegisterMemBase,
++ IN gctSIZE_T RegisterMemSize,
++#endif
++ IN gctINT IrqLine2D,
++ IN gctUINT32 RegisterMemBase2D,
++ IN gctSIZE_T RegisterMemSize2D,
++ IN gctINT IrqLineVG,
++ IN gctUINT32 RegisterMemBaseVG,
++ IN gctSIZE_T RegisterMemSizeVG,
++ IN gctUINT32 ContiguousBase,
++ IN gctSIZE_T ContiguousSize,
++ IN gctSIZE_T BankSize,
++ IN gctINT FastClear,
++ IN gctINT Compression,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize,
++ IN gctINT Signal,
++ IN gctUINT LogFileSize,
++ IN gctINT PowerManagement,
++ IN gctINT GpuProfiler,
++ IN gcsDEVICE_CONSTRUCT_ARGS * Args,
++ OUT gckGALDEVICE *Device
++ );
++
++gceSTATUS gckGALDEVICE_Destroy(
++ IN gckGALDEVICE Device
++ );
++
++#endif /* __gc_hal_kernel_device_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_iommu.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_iommu.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_iommu.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_iommu.c 2015-05-01 14:57:59.599427001 -0500
+@@ -0,0 +1,216 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel_device.h"
++
++#include <linux/iommu.h>
++#include <linux/platform_device.h>
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++typedef struct _gcsIOMMU
++{
++ struct iommu_domain * domain;
++ struct device * device;
++}
++gcsIOMMU;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++static int
++_IOMMU_Fault_Handler(
++ struct iommu_domain * Domain,
++ struct device * Dev,
++ unsigned long DomainAddress,
++ int flags,
++ void * args
++ )
++#else
++static int
++_IOMMU_Fault_Handler(
++ struct iommu_domain * Domain,
++ struct device * Dev,
++ unsigned long DomainAddress,
++ int flags
++ )
++#endif
++{
++ return 0;
++}
++
++static int
++_FlatMapping(
++ IN gckIOMMU Iommu
++ )
++{
++ gceSTATUS status;
++ gctUINT32 physical;
++
++ for (physical = 0; physical < 0x80000000; physical += PAGE_SIZE)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "Map %x => %x bytes = %d",
++ physical, physical, PAGE_SIZE
++ );
++
++ gcmkONERROR(gckIOMMU_Map(Iommu, physical, physical, PAGE_SIZE));
++ }
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++void
++gckIOMMU_Destory(
++ IN gckOS Os,
++ IN gckIOMMU Iommu
++ )
++{
++ gcmkHEADER();
++
++ if (Iommu->domain && Iommu->device)
++ {
++ iommu_attach_device(Iommu->domain, Iommu->device);
++ }
++
++ if (Iommu->domain)
++ {
++ iommu_domain_free(Iommu->domain);
++ }
++
++ if (Iommu)
++ {
++ gcmkOS_SAFE_FREE(Os, Iommu);
++ }
++
++ gcmkFOOTER_NO();
++}
++
++gceSTATUS
++gckIOMMU_Construct(
++ IN gckOS Os,
++ OUT gckIOMMU * Iommu
++ )
++{
++ gceSTATUS status;
++ gckIOMMU iommu = gcvNULL;
++ struct device *dev;
++ int ret;
++
++ gcmkHEADER();
++
++ dev = &Os->device->platform->device->dev;
++
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsIOMMU), (gctPOINTER *)&iommu));
++
++ gckOS_ZeroMemory(iommu, gcmSIZEOF(gcsIOMMU));
++
++ iommu->domain = iommu_domain_alloc(&platform_bus_type);
++
++ if (!iommu->domain)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "iommu_domain_alloc() fail");
++
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ iommu_set_fault_handler(iommu->domain, _IOMMU_Fault_Handler, dev);
++#else
++ iommu_set_fault_handler(iommu->domain, _IOMMU_Fault_Handler);
++#endif
++
++ ret = iommu_attach_device(iommu->domain, dev);
++
++ if (ret)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS, "iommu_attach_device() fail %d", ret);
++
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++ iommu->device = dev;
++
++ _FlatMapping(iommu);
++
++ *Iommu = iommu;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ gckIOMMU_Destory(Os, iommu);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckIOMMU_Map(
++ IN gckIOMMU Iommu,
++ IN gctUINT32 DomainAddress,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes
++ )
++{
++ gceSTATUS status;
++ int ret;
++
++ gcmkHEADER_ARG("DomainAddress=%#X, Physical=%#X, Bytes=%d",
++ DomainAddress, Physical, Bytes);
++
++ ret = iommu_map(Iommu->domain, DomainAddress, Physical, Bytes, 0);
++
++ if (ret)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ gcmkFOOTER();
++ return status;
++
++}
++
++gceSTATUS
++gckIOMMU_Unmap(
++ IN gckIOMMU Iommu,
++ IN gctUINT32 DomainAddress,
++ IN gctUINT32 Bytes
++ )
++{
++ gcmkHEADER();
++
++ iommu_unmap(Iommu->domain, DomainAddress, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.c 2015-05-01 14:57:59.599427001 -0500
+@@ -0,0 +1,497 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckKERNEL_QueryVideoMemory
++**
++** Query the amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to an gcsHAL_INTERFACE structure that will be filled in with
++** the memory information.
++*/
++gceSTATUS
++gckKERNEL_QueryVideoMemory(
++ IN gckKERNEL Kernel,
++ OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("Kernel=%p", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Get internal memory size and physical address. */
++ Interface->u.QueryVideoMemory.internalSize = device->internalSize;
++ Interface->u.QueryVideoMemory.internalPhysical = device->internalPhysicalName;
++
++ /* Get external memory size and physical address. */
++ Interface->u.QueryVideoMemory.externalSize = device->externalSize;
++ Interface->u.QueryVideoMemory.externalPhysical = device->externalPhysicalName;
++
++ /* Get contiguous memory size and physical address. */
++ Interface->u.QueryVideoMemory.contiguousSize = device->contiguousSize;
++ Interface->u.QueryVideoMemory.contiguousPhysical = device->contiguousPhysicalName;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_GetVideoMemoryPool
++**
++** Get the gckVIDMEM object belonging to the specified pool.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcePOOL Pool
++** Pool to query gckVIDMEM object for.
++**
++** OUTPUT:
++**
++** gckVIDMEM * VideoMemory
++** Pointer to a variable that will hold the pointer to the gckVIDMEM
++** object belonging to the requested pool.
++*/
++gceSTATUS
++gckKERNEL_GetVideoMemoryPool(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ OUT gckVIDMEM * VideoMemory
++ )
++{
++ gckGALDEVICE device;
++ gckVIDMEM videoMemory;
++
++ gcmkHEADER_ARG("Kernel=%p Pool=%d", Kernel, Pool);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(VideoMemory != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Dispatch on pool. */
++ switch (Pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ videoMemory = device->internalVidMem;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ videoMemory = device->externalVidMem;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ videoMemory = device->contiguousVidMem;
++ break;
++
++ default:
++ /* Unknown pool. */
++ videoMemory = NULL;
++ }
++
++ /* Return pointer to the gckVIDMEM object. */
++ *VideoMemory = videoMemory;
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*VideoMemory=%p", *VideoMemory);
++ return (videoMemory == NULL) ? gcvSTATUS_OUT_OF_MEMORY : gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapMemory
++**
++** Map video memory into the current process space.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of video memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the base address of the mapped
++** memory region.
++*/
++gceSTATUS
++gckKERNEL_MapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckKERNEL kernel = Kernel;
++ gctPHYS_ADDR physical = gcmNAME_TO_PTR(Physical);
++
++ return gckOS_MapMemory(Kernel->os, physical, Bytes, Logical);
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_UnmapMemory
++**
++** Unmap video memory from the current process space.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of video memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** gctPOINTER Logical
++** Base address of the mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gckKERNEL kernel = Kernel;
++ gctPHYS_ADDR physical = gcmNAME_TO_PTR(Physical);
++
++ return gckOS_UnmapMemory(Kernel->os, physical, Bytes, Logical);
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapVideoMemory
++**
++** Get the logical address for a hardware specific memory address for the
++** current process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE to map the memory into the user space.
++**
++** gctUINT32 Address
++** Hardware specific memory address.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** specified memory address.
++*/
++gceSTATUS
++gckKERNEL_MapVideoMemoryEx(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckGALDEVICE device = gcvNULL;
++ PLINUX_MDL mdl = gcvNULL;
++ PLINUX_MDL_MAP mdlMap = gcvNULL;
++ gcePOOL pool = gcvPOOL_UNKNOWN;
++ gctUINT32 offset = 0;
++ gctUINT32 base = 0;
++ gceSTATUS status;
++ gctPOINTER logical = gcvNULL;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Kernel=%p InUserSpace=%d Address=%08x",
++ Kernel, InUserSpace, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Logical != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkONERROR(
++ gckVGHARDWARE_SplitMemory(Kernel->vg->hardware, Address, &pool, &offset));
++ }
++ else
++#endif
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkONERROR(
++ gckHARDWARE_SplitMemory(Kernel->hardware, Address, &pool, &offset));
++ }
++
++ /* Dispatch on pool. */
++ switch (pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ logical = device->internalLogical;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ logical = device->externalLogical;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ if (device->contiguousMapped)
++ {
++ logical = device->contiguousBase;
++ }
++ else
++ {
++ gctINT processID;
++ gckOS_GetProcessID(&processID);
++
++ mdl = (PLINUX_MDL) device->contiguousPhysical;
++
++ mdlMap = FindMdlMap(mdl, processID);
++ gcmkASSERT(mdlMap);
++
++ logical = (gctPOINTER) mdlMap->vmaAddr;
++ }
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkVERIFY_OK(
++ gckVGHARDWARE_SplitMemory(Kernel->vg->hardware,
++ device->contiguousVidMem->baseAddress,
++ &pool,
++ &base));
++ }
++ else
++#endif
++ {
++ gctUINT32 systemBaseAddress = 0;
++
++ if (Kernel->hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &systemBaseAddress));
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_CPUPhysicalToGPUPhysical(
++ Kernel->os,
++ device->contiguousVidMem->baseAddress - systemBaseAddress,
++ &baseAddress
++ ));
++
++ gcmkVERIFY_OK(
++ gckHARDWARE_SplitMemory(Kernel->hardware,
++ baseAddress,
++ &pool,
++ &base));
++ }
++ offset -= base;
++ break;
++
++ default:
++ /* Invalid memory pool. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Build logical address of specified address. */
++ *Logical = (gctPOINTER) ((gctUINT8_PTR) logical + offset);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=%p", *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Retunn the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapVideoMemory
++**
++** Get the logical address for a hardware specific memory address for the
++** current process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE to map the memory into the user space.
++**
++** gctUINT32 Address
++** Hardware specific memory address.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** specified memory address.
++*/
++gceSTATUS
++gckKERNEL_MapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ return gckKERNEL_MapVideoMemoryEx(Kernel, gcvCORE_MAJOR, InUserSpace, Address, Logical);
++}
++/*******************************************************************************
++**
++** gckKERNEL_Notify
++**
++** This function iscalled by clients to notify the gckKERNRL object of an event.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gceNOTIFY Notification
++** Notification event.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Notify(
++ IN gckKERNEL Kernel,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gceNOTIFY Notification,
++ IN gctBOOL Data
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=%p Notification=%d Data=%d",
++ Kernel, Notification, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Dispatch on notifcation. */
++ switch (Notification)
++ {
++ case gcvNOTIFY_INTERRUPT:
++ /* Process the interrupt. */
++#if COMMAND_PROCESSOR_VERSION > 1
++ status = gckINTERRUPT_Notify(Kernel->interrupt, Data);
++#else
++ status = gckHARDWARE_Interrupt(Kernel->hardware,
++#if gcdMULTI_GPU
++ CoreId,
++#endif
++ Data);
++#endif
++ break;
++
++ default:
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QuerySettings(
++ IN gckKERNEL Kernel,
++ OUT gcsKERNEL_SETTINGS * Settings
++ )
++{
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("Kernel=%p", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Settings != gcvNULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Fill in signal. */
++ Settings->signal = device->signal;
++
++ /* Success. */
++ gcmkFOOTER_ARG("Settings->signal=%d", Settings->signal);
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_linux.h 2015-05-01 14:57:59.599427001 -0500
+@@ -0,0 +1,399 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_linux_h_
++#define __gc_hal_kernel_linux_h_
++
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/signal.h>
++#ifdef FLAREON
++# include <asm/arch-realview/dove_gpio_irq.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/vmalloc.h>
++#include <linux/dma-mapping.h>
++#include <linux/kthread.h>
++
++#include <linux/idr.h>
++
++#ifdef MODVERSIONS
++# include <linux/modversions.h>
++#endif
++#include <asm/io.h>
++#include <asm/uaccess.h>
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
++#include <linux/clk.h>
++#endif
++
++#define NTSTRSAFE_NO_CCH_FUNCTIONS
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_platform.h"
++#include "gc_hal_kernel_device.h"
++#include "gc_hal_kernel_os.h"
++#include "gc_hal_kernel_debugfs.h"
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
++#define FIND_TASK_BY_PID(x) pid_task(find_vpid(x), PIDTYPE_PID)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++#define FIND_TASK_BY_PID(x) find_task_by_vpid(x)
++#else
++#define FIND_TASK_BY_PID(x) find_task_by_pid(x)
++#endif
++
++#define _WIDE(string) L##string
++#define WIDE(string) _WIDE(string)
++
++#define countof(a) (sizeof(a) / sizeof(a[0]))
++
++#ifndef DEVICE_NAME
++#ifdef CONFIG_DOVE_GPU
++# define DEVICE_NAME "dove_gpu"
++#else
++# define DEVICE_NAME "galcore"
++#endif
++#endif
++
++#define GetPageCount(size, offset) ((((size) + ((offset) & ~PAGE_CACHE_MASK)) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION (3,7,0)
++#define gcdVM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP)
++#else
++#define gcdVM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
++#endif
++
++/* Protection bit when mapping memroy to user sapce */
++#define gcmkPAGED_MEMROY_PROT(x) pgprot_writecombine(x)
++
++#if gcdNONPAGED_MEMORY_BUFFERABLE
++#define gcmkIOREMAP ioremap_wc
++#define gcmkNONPAGED_MEMROY_PROT(x) pgprot_writecombine(x)
++#elif !gcdNONPAGED_MEMORY_CACHEABLE
++#define gcmkIOREMAP ioremap_nocache
++#define gcmkNONPAGED_MEMROY_PROT(x) pgprot_noncached(x)
++#endif
++
++#define gcdSUPPRESS_OOM_MESSAGE 1
++
++#if gcdSUPPRESS_OOM_MESSAGE
++#define gcdNOWARN __GFP_NOWARN
++#else
++#define gcdNOWARN 0
++#endif
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++typedef struct _gcsIOMMU * gckIOMMU;
++
++typedef struct _gcsUSER_MAPPING * gcsUSER_MAPPING_PTR;
++typedef struct _gcsUSER_MAPPING
++{
++ /* Pointer to next mapping structure. */
++ gcsUSER_MAPPING_PTR next;
++
++ /* Physical address of this mapping. */
++ gctUINT32 physical;
++
++ /* Logical address of this mapping. */
++ gctPOINTER logical;
++
++ /* Number of bytes of this mapping. */
++ gctSIZE_T bytes;
++
++ /* Starting address of this mapping. */
++ gctINT8_PTR start;
++
++ /* Ending address of this mapping. */
++ gctINT8_PTR end;
++}
++gcsUSER_MAPPING;
++
++typedef struct _gcsINTEGER_DB * gcsINTEGER_DB_PTR;
++typedef struct _gcsINTEGER_DB
++{
++ struct idr idr;
++ spinlock_t lock;
++ gctINT curr;
++}
++gcsINTEGER_DB;
++
++struct _gckOS
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to device */
++ gckGALDEVICE device;
++
++ /* Memory management */
++ gctPOINTER memoryLock;
++ gctPOINTER memoryMapLock;
++
++ struct _LINUX_MDL *mdlHead;
++ struct _LINUX_MDL *mdlTail;
++
++ /* Kernel process ID. */
++ gctUINT32 kernelProcessID;
++
++ /* Signal management. */
++
++ /* Lock. */
++ gctPOINTER signalMutex;
++
++ /* signal id database. */
++ gcsINTEGER_DB signalDB;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /* Lock. */
++ gctPOINTER syncPointMutex;
++
++ /* sync point id database. */
++ gcsINTEGER_DB syncPointDB;
++#endif
++
++ gcsUSER_MAPPING_PTR userMap;
++ gctPOINTER debugLock;
++
++ /* workqueue for os timer. */
++ struct workqueue_struct * workqueue;
++
++ /* Allocate extra page to avoid cache overflow */
++ struct page* paddingPage;
++
++ /* Detect unfreed allocation. */
++ atomic_t allocateCount;
++
++ struct list_head allocatorList;
++
++ gcsDEBUGFS_DIR allocatorDebugfsDir;
++
++ /* Lock for register access check. */
++ struct mutex registerAccessLocks[gcdMAX_GPU_COUNT];
++
++ /* External power states. */
++ gctBOOL powerStates[gcdMAX_GPU_COUNT];
++
++ /* External clock states. */
++ gctBOOL clockStates[gcdMAX_GPU_COUNT];
++
++ /* IOMMU. */
++ gckIOMMU iommu;
++};
++
++typedef struct _gcsSIGNAL * gcsSIGNAL_PTR;
++typedef struct _gcsSIGNAL
++{
++ /* Kernel sync primitive. */
++ struct completion obj;
++
++ /* Manual reset flag. */
++ gctBOOL manualReset;
++
++ /* The reference counter. */
++ atomic_t ref;
++
++ /* The owner of the signal. */
++ gctHANDLE process;
++
++ gckHARDWARE hardware;
++
++ /* ID. */
++ gctUINT32 id;
++}
++gcsSIGNAL;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++typedef struct _gcsSYNC_POINT * gcsSYNC_POINT_PTR;
++typedef struct _gcsSYNC_POINT
++{
++ /* The reference counter. */
++ atomic_t ref;
++
++ /* State. */
++ atomic_t state;
++
++ /* timeline. */
++ struct sync_timeline * timeline;
++
++ /* ID. */
++ gctUINT32 id;
++}
++gcsSYNC_POINT;
++#endif
++
++typedef struct _gcsPageInfo * gcsPageInfo_PTR;
++typedef struct _gcsPageInfo
++{
++ struct page **pages;
++ gctUINT32_PTR pageTable;
++ gctUINT32 extraPage;
++ gctUINT32 address;
++#if gcdPROCESS_ADDRESS_SPACE
++ gckMMU mmu;
++#endif
++}
++gcsPageInfo;
++
++typedef struct _gcsOSTIMER * gcsOSTIMER_PTR;
++typedef struct _gcsOSTIMER
++{
++ struct delayed_work work;
++ gctTIMERFUNCTION function;
++ gctPOINTER data;
++} gcsOSTIMER;
++
++gceSTATUS
++gckOS_ImportAllocators(
++ gckOS Os
++ );
++
++gceSTATUS
++gckOS_FreeAllocators(
++ gckOS Os
++ );
++
++gceSTATUS
++_HandleOuterCache(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Type
++ );
++
++gceSTATUS
++_ConvertLogical2Physical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ IN PLINUX_MDL Mdl,
++ OUT gctUINT32_PTR Physical
++ );
++
++gctSTRING
++_CreateKernelVirtualMapping(
++ IN PLINUX_MDL Mdl
++ );
++
++void
++_DestoryKernelVirtualMapping(
++ IN gctSTRING Addr
++ );
++
++void
++_UnmapUserLogical(
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++ );
++
++static inline gctINT
++_GetProcessID(
++ void
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ return task_tgid_vnr(current);
++#else
++ return current->tgid;
++#endif
++}
++
++static inline struct page *
++_NonContiguousToPage(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return Pages[Index];
++}
++
++static inline unsigned long
++_NonContiguousToPfn(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return page_to_pfn(_NonContiguousToPage(Pages, Index));
++}
++
++static inline unsigned long
++_NonContiguousToPhys(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return page_to_phys(_NonContiguousToPage(Pages, Index));
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++static inline int
++is_vmalloc_addr(
++ void *Addr
++ )
++{
++ unsigned long addr = (unsigned long)Addr;
++
++ return addr >= VMALLOC_START && addr < VMALLOC_END;
++}
++#endif
++
++#ifdef CONFIG_IOMMU_SUPPORT
++void
++gckIOMMU_Destory(
++ IN gckOS Os,
++ IN gckIOMMU Iommu
++ );
++
++gceSTATUS
++gckIOMMU_Construct(
++ IN gckOS Os,
++ OUT gckIOMMU * Iommu
++ );
++
++gceSTATUS
++gckIOMMU_Map(
++ IN gckIOMMU Iommu,
++ IN gctUINT32 DomainAddress,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes
++ );
++
++gceSTATUS
++gckIOMMU_Unmap(
++ IN gckIOMMU Iommu,
++ IN gctUINT32 DomainAddress,
++ IN gctUINT32 Bytes
++ );
++#endif
++
++#endif /* __gc_hal_kernel_linux_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_math.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_math.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_math.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_math.c 2015-05-01 14:57:59.599427001 -0500
+@@ -0,0 +1,32 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++gctINT
++gckMATH_ModuloInt(
++ IN gctINT X,
++ IN gctINT Y
++ )
++{
++ if(Y ==0) {return 0;}
++ else {return X % Y;}
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.c 2015-05-01 14:57:59.603427001 -0500
+@@ -0,0 +1,8740 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mman.h>
++#include <asm/atomic.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/workqueue.h>
++#include <linux/irqflags.h>
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
++#include <linux/math64.h>
++#endif
++#include <linux/delay.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++#include <linux/anon_inodes.h>
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++#include <linux/file.h>
++#include "gc_hal_kernel_sync.h"
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++#include "gc_hal_kernel_allocator.h"
++
++#define MEMORY_LOCK(os) \
++ gcmkVERIFY_OK(gckOS_AcquireMutex( \
++ (os), \
++ (os)->memoryLock, \
++ gcvINFINITE))
++
++#define MEMORY_UNLOCK(os) \
++ gcmkVERIFY_OK(gckOS_ReleaseMutex((os), (os)->memoryLock))
++
++#define MEMORY_MAP_LOCK(os) \
++ gcmkVERIFY_OK(gckOS_AcquireMutex( \
++ (os), \
++ (os)->memoryMapLock, \
++ gcvINFINITE))
++
++#define MEMORY_MAP_UNLOCK(os) \
++ gcmkVERIFY_OK(gckOS_ReleaseMutex((os), (os)->memoryMapLock))
++
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++static gctINT
++_GetThreadID(
++ void
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ return task_pid_vnr(current);
++#else
++ return current->pid;
++#endif
++}
++
++static PLINUX_MDL
++_CreateMdl(
++ void
++ )
++{
++ PLINUX_MDL mdl;
++
++ gcmkHEADER();
++
++ mdl = (PLINUX_MDL)kzalloc(sizeof(struct _LINUX_MDL), GFP_KERNEL | gcdNOWARN);
++
++ gcmkFOOTER_ARG("0x%X", mdl);
++ return mdl;
++}
++
++static gceSTATUS
++_DestroyMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap
++ );
++
++static gceSTATUS
++_DestroyMdl(
++ IN PLINUX_MDL Mdl
++ )
++{
++ PLINUX_MDL_MAP mdlMap, next;
++
++ gcmkHEADER_ARG("Mdl=0x%X", Mdl);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Mdl != gcvNULL);
++
++ mdlMap = Mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ next = mdlMap->next;
++
++ gcmkVERIFY_OK(_DestroyMdlMap(Mdl, mdlMap));
++
++ mdlMap = next;
++ }
++
++ kfree(Mdl);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++static PLINUX_MDL_MAP
++_CreateMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X ProcessID=%d", Mdl, ProcessID);
++
++ mdlMap = (PLINUX_MDL_MAP)kmalloc(sizeof(struct _LINUX_MDL_MAP), GFP_KERNEL | gcdNOWARN);
++ if (mdlMap == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ mdlMap->pid = ProcessID;
++ mdlMap->vmaAddr = gcvNULL;
++ mdlMap->vma = gcvNULL;
++ mdlMap->count = 0;
++
++ mdlMap->next = Mdl->maps;
++ Mdl->maps = mdlMap;
++
++ gcmkFOOTER_ARG("0x%X", mdlMap);
++ return mdlMap;
++}
++
++static gceSTATUS
++_DestroyMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap
++ )
++{
++ PLINUX_MDL_MAP prevMdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X MdlMap=0x%X", Mdl, MdlMap);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(MdlMap != gcvNULL);
++ gcmkASSERT(Mdl->maps != gcvNULL);
++
++ if (Mdl->maps == MdlMap)
++ {
++ Mdl->maps = MdlMap->next;
++ }
++ else
++ {
++ prevMdlMap = Mdl->maps;
++
++ while (prevMdlMap->next != MdlMap)
++ {
++ prevMdlMap = prevMdlMap->next;
++
++ gcmkASSERT(prevMdlMap != gcvNULL);
++ }
++
++ prevMdlMap->next = MdlMap->next;
++ }
++
++ kfree(MdlMap);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++extern PLINUX_MDL_MAP
++FindMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X ProcessID=%d", Mdl, ProcessID);
++ if(Mdl == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++ mdlMap = Mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if (mdlMap->pid == ProcessID)
++ {
++ gcmkFOOTER_ARG("0x%X", mdlMap);
++ return mdlMap;
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvNULL;
++}
++
++/*******************************************************************************
++** Integer Id Management.
++*/
++gceSTATUS
++_AllocateIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctPOINTER KernelPointer,
++ OUT gctUINT32 *Id
++ )
++{
++ int result;
++ gctINT next;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ idr_preload(GFP_KERNEL | gcdNOWARN);
++
++ spin_lock(&Database->lock);
++
++ next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1;
++
++ result = idr_alloc(&Database->idr, KernelPointer, next, 0, GFP_ATOMIC);
++
++ /* ID allocated should not be 0. */
++ gcmkASSERT(result != 0);
++
++ if (result > 0)
++ {
++ Database->curr = *Id = result;
++ }
++
++ spin_unlock(&Database->lock);
++
++ idr_preload_end();
++
++ if (result < 0)
++ {
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#else
++again:
++ if (idr_pre_get(&Database->idr, GFP_KERNEL | gcdNOWARN) == 0)
++ {
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ spin_lock(&Database->lock);
++
++ next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1;
++
++ /* Try to get a id greater than 0. */
++ result = idr_get_new_above(&Database->idr, KernelPointer, next, Id);
++
++ if (!result)
++ {
++ Database->curr = *Id;
++ }
++
++ spin_unlock(&Database->lock);
++
++ if (result == -EAGAIN)
++ {
++ goto again;
++ }
++
++ if (result != 0)
++ {
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_QueryIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gctPOINTER pointer;
++
++ spin_lock(&Database->lock);
++
++ pointer = idr_find(&Database->idr, Id);
++
++ spin_unlock(&Database->lock);
++
++ if(pointer)
++ {
++ *KernelPointer = pointer;
++ return gcvSTATUS_OK;
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_OS,
++ "%s(%d) Id = %d is not found",
++ __FUNCTION__, __LINE__, Id);
++
++ return gcvSTATUS_NOT_FOUND;
++ }
++}
++
++gceSTATUS
++_DestroyIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctUINT32 Id
++ )
++{
++ spin_lock(&Database->lock);
++
++ idr_remove(&Database->idr, Id);
++
++ spin_unlock(&Database->lock);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_QueryProcessPageTable(
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ spinlock_t *lock;
++ gctUINTPTR_T logical = (gctUINTPTR_T)Logical;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ if (!current->mm)
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pgd = pgd_offset(current->mm, logical);
++ if (pgd_none(*pgd) || pgd_bad(*pgd))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pud = pud_offset(pgd, logical);
++ if (pud_none(*pud) || pud_bad(*pud))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pmd = pmd_offset(pud, logical);
++ if (pmd_none(*pmd) || pmd_bad(*pmd))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pte = pte_offset_map_lock(current->mm, pmd, logical, &lock);
++ if (!pte)
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ if (!pte_present(*pte))
++ {
++ pte_unmap_unlock(pte, lock);
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ *Address = (pte_pfn(*pte) << PAGE_SHIFT) | (logical & ~PAGE_MASK);
++ pte_unmap_unlock(pte, lock);
++
++ return gcvSTATUS_OK;
++}
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED && defined(CONFIG_OUTER_CACHE)
++static inline gceSTATUS
++outer_func(
++ gceCACHEOPERATION Type,
++ unsigned long Start,
++ unsigned long End
++ )
++{
++ switch (Type)
++ {
++ case gcvCACHE_CLEAN:
++ outer_clean_range(Start, End);
++ break;
++ case gcvCACHE_INVALIDATE:
++ outer_inv_range(Start, End);
++ break;
++ case gcvCACHE_FLUSH:
++ outer_flush_range(Start, End);
++ break;
++ default:
++ return gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_OUTER_CACHE_PATCH
++/*******************************************************************************
++** _HandleOuterCache
++**
++** Handle the outer cache for the specified addresses.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctPOINTER Physical
++** Physical address to flush.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++**
++** gceOUTERCACHE_OPERATION Type
++** Operation need to be execute.
++*/
++gceSTATUS
++_HandleOuterCache(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Type
++ )
++{
++ gceSTATUS status;
++ unsigned long paddr;
++ gctPOINTER vaddr;
++ gctUINT32 offset, bytes, left;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu",
++ Os, Logical, Bytes);
++
++ if (Physical != gcvINVALID_ADDRESS)
++ {
++ /* Non paged memory or gcvPOOL_USER surface */
++ paddr = (unsigned long) Physical;
++ gcmkONERROR(outer_func(Type, paddr, paddr + Bytes));
++ }
++ else
++ {
++ /* Non contiguous virtual memory */
++ vaddr = Logical;
++ left = Bytes;
++
++ while (left)
++ {
++ /* Handle (part of) current page. */
++ offset = (gctUINTPTR_T)vaddr & ~PAGE_MASK;
++
++ bytes = gcmMIN(left, PAGE_SIZE - offset);
++
++ gcmkONERROR(_QueryProcessPageTable(vaddr, (gctUINT32*)&paddr));
++ gcmkONERROR(outer_func(Type, paddr, paddr + bytes));
++
++ vaddr = (gctUINT8_PTR)vaddr + bytes;
++ left -= bytes;
++ }
++ }
++
++ mb();
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++#endif
++
++gctBOOL
++_AllowAccess(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address
++ )
++{
++ gctUINT32 data;
++
++ /* Check external clock state. */
++ if (Os->clockStates[Core] == gcvFALSE)
++ {
++ gcmkPRINT("[galcore]: %s(%d) External clock off", __FUNCTION__, __LINE__);
++ return gcvFALSE;
++ }
++
++ /* Check internal clock state. */
++ if (Address == 0)
++ {
++ return gcvTRUE;
++ }
++
++#if gcdMULTI_GPU
++ if (Core == gcvCORE_MAJOR)
++ {
++ data = readl((gctUINT8 *)Os->device->registerBases[gcvCORE_3D_0_ID] + 0x0);
++ }
++ else
++#endif
++ {
++ data = readl((gctUINT8 *)Os->device->registerBases[Core] + 0x0);
++ }
++
++ if ((data & 0x3) == 0x3)
++ {
++ gcmkPRINT("[galcore]: %s(%d) Internal clock off", __FUNCTION__, __LINE__);
++ return gcvFALSE;
++ }
++
++ return gcvTRUE;
++}
++
++static gceSTATUS
++_ShrinkMemory(
++ IN gckOS Os
++ )
++{
++ gcsPLATFORM * platform;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ platform = Os->device->platform;
++
++ if (platform && platform->ops->shrinkMemory)
++ {
++ platform->ops->shrinkMemory(platform);
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Construct
++**
++** Construct a new gckOS object.
++**
++** INPUT:
++**
++** gctPOINTER Context
++** Pointer to the gckGALDEVICE class.
++**
++** OUTPUT:
++**
++** gckOS * Os
++** Pointer to a variable that will hold the pointer to the gckOS object.
++*/
++gceSTATUS
++gckOS_Construct(
++ IN gctPOINTER Context,
++ OUT gckOS * Os
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++ gctINT i;
++
++ gcmkHEADER_ARG("Context=0x%X", Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Os != gcvNULL);
++
++ /* Allocate the gckOS object. */
++ os = (gckOS) kmalloc(gcmSIZEOF(struct _gckOS), GFP_KERNEL | gcdNOWARN);
++
++ if (os == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ /* Zero the memory. */
++ gckOS_ZeroMemory(os, gcmSIZEOF(struct _gckOS));
++
++ /* Initialize the gckOS object. */
++ os->object.type = gcvOBJ_OS;
++
++ /* Set device device. */
++ os->device = Context;
++
++ /* Set allocateCount to 0, gckOS_Allocate has not been used yet. */
++ atomic_set(&os->allocateCount, 0);
++
++ /* Initialize the memory lock. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->memoryLock));
++ gcmkONERROR(gckOS_CreateMutex(os, &os->memoryMapLock));
++
++ /* Create debug lock mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->debugLock));
++
++ os->mdlHead = os->mdlTail = gcvNULL;
++
++ /* Get the kernel process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&os->kernelProcessID));
++
++ /*
++ * Initialize the signal manager.
++ */
++
++ /* Initialize mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->signalMutex));
++
++ /* Initialize signal id database lock. */
++ spin_lock_init(&os->signalDB.lock);
++
++ /* Initialize signal id database. */
++ idr_init(&os->signalDB.idr);
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /*
++ * Initialize the sync point manager.
++ */
++
++ /* Initialize mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->syncPointMutex));
++
++ /* Initialize sync point id database lock. */
++ spin_lock_init(&os->syncPointDB.lock);
++
++ /* Initialize sync point id database. */
++ idr_init(&os->syncPointDB.idr);
++#endif
++
++ /* Create a workqueue for os timer. */
++ os->workqueue = create_singlethread_workqueue("galcore workqueue");
++
++ if (os->workqueue == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ os->paddingPage = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN);
++ if (os->paddingPage == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++ else
++ {
++ SetPageReserved(os->paddingPage);
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ mutex_init(&os->registerAccessLocks[i]);
++ }
++
++ gckOS_ImportAllocators(os);
++
++#ifdef CONFIG_IOMMU_SUPPORT
++ if (((gckGALDEVICE)(os->device))->mmu == gcvFALSE)
++ {
++ /* Only use IOMMU when internal MMU is not enabled. */
++ status = gckIOMMU_Construct(os, &os->iommu);
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Fail to setup IOMMU",
++ __FUNCTION__, __LINE__
++ );
++ }
++ }
++#endif
++
++ /* Return pointer to the gckOS object. */
++ *Os = os;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Os=0x%X", *Os);
++ return gcvSTATUS_OK;
++
++OnError:
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ if (os->syncPointMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->syncPointMutex));
++ }
++#endif
++
++ if (os->signalMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->signalMutex));
++ }
++
++ if (os->memoryMapLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->memoryMapLock));
++ }
++
++ if (os->memoryLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->memoryLock));
++ }
++
++ if (os->debugLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->debugLock));
++ }
++
++ if (os->workqueue != gcvNULL)
++ {
++ destroy_workqueue(os->workqueue);
++ }
++
++ kfree(os);
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Destroy
++**
++** Destroy an gckOS object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Destroy(
++ IN gckOS Os
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ if (Os->paddingPage != gcvNULL)
++ {
++ ClearPageReserved(Os->paddingPage);
++ __free_page(Os->paddingPage);
++ Os->paddingPage = gcvNULL;
++ }
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /*
++ * Destroy the sync point manager.
++ */
++
++ /* Destroy the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->syncPointMutex));
++#endif
++
++ /*
++ * Destroy the signal manager.
++ */
++
++ /* Destroy the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->signalMutex));
++
++ /* Destroy the memory lock. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->memoryMapLock));
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->memoryLock));
++
++ /* Destroy debug lock mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->debugLock));
++
++ /* Wait for all works done. */
++ flush_workqueue(Os->workqueue);
++
++ /* Destory work queue. */
++ destroy_workqueue(Os->workqueue);
++
++ gckOS_FreeAllocators(Os);
++
++#ifdef CONFIG_IOMMU_SUPPORT
++ if (Os->iommu)
++ {
++ gckIOMMU_Destory(Os, Os->iommu);
++ }
++#endif
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(~0U);
++
++ /* Mark the gckOS object as unknown. */
++ Os->object.type = gcvOBJ_UNKNOWN;
++
++
++ /* Free the gckOS object. */
++ kfree(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_CreateKernelVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ )
++{
++ gceSTATUS status;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++ gckALLOCATOR allocator = mdl->allocator;
++
++ gcmkHEADER();
++
++ *PageCount = mdl->numPages;
++
++ gcmkONERROR(allocator->ops->MapKernel(allocator, mdl, Logical));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_DestroyKernelVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++ gckALLOCATOR allocator = mdl->allocator;
++
++ gcmkHEADER();
++
++ allocator->ops->UnmapKernel(allocator, mdl, Logical);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_CreateUserVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ )
++{
++ return gckOS_LockPages(Os, Physical, Bytes, gcvFALSE, Logical, PageCount);
++}
++
++gceSTATUS
++gckOS_DestroyUserVirtualMapping(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ return gckOS_UnlockPages(Os, Physical, Bytes, Logical);
++}
++
++/*******************************************************************************
++**
++** gckOS_Allocate
++**
++** Allocate memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the allocated memory location.
++*/
++gceSTATUS
++gckOS_Allocate(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ gcmkONERROR(gckOS_AllocateMemory(Os, Bytes, Memory));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%X", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Free
++**
++** Free allocated memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Memory
++** Pointer to memory allocation to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Free(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Memory=0x%X", Os, Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ gcmkONERROR(gckOS_FreeMemory(Os, Memory));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocateMemory
++**
++** Allocate memory wrapper.
++**
++** INPUT:
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the allocated memory location.
++*/
++gceSTATUS
++gckOS_AllocateMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gctPOINTER memory;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ if (Bytes > PAGE_SIZE)
++ {
++ memory = (gctPOINTER) vmalloc(Bytes);
++ }
++ else
++ {
++ memory = (gctPOINTER) kmalloc(Bytes, GFP_KERNEL | gcdNOWARN);
++ }
++
++ if (memory == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Increase count. */
++ atomic_inc(&Os->allocateCount);
++
++ /* Return pointer to the memory allocation. */
++ *Memory = memory;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%X", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeMemory
++**
++** Free allocated memory wrapper.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory allocation to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreeMemory(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ )
++{
++ gcmkHEADER_ARG("Memory=0x%X", Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Free the memory from the OS pool. */
++ if (is_vmalloc_addr(Memory))
++ {
++ vfree(Memory);
++ }
++ else
++ {
++ kfree(Memory);
++ }
++
++ /* Decrease count. */
++ atomic_dec(&Os->allocateCount);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapMemory
++**
++** Map physical memory into the current process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the logical address of the
++** mapped memory.
++*/
++gceSTATUS
++gckOS_MapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = FindMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++
++ if (mdlMap->vmaAddr == gcvNULL)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (char *)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (char *)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): mdl->numPages: %d mdl->vmaAddr: 0x%X",
++ __FUNCTION__, __LINE__,
++ mdl->numPages,
++ mdlMap->vmaAddr
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (!mdlMap->vma)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): find_vma error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ up_write(&current->mm->mmap_sem);
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++#ifndef NO_DMA_COHERENT
++ if (dma_mmap_writecombine(gcvNULL,
++ mdlMap->vma,
++ mdl->addr,
++ mdl->dmaHandle,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): dma_mmap_coherent error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#else
++#if !gcdPAGED_MEMORY_CACHEABLE
++ mdlMap->vma->vm_page_prot = gcmkPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++# endif
++ mdlMap->vma->vm_pgoff = 0;
++
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ mdl->dmaHandle >> PAGE_SHIFT,
++ mdl->numPages*PAGE_SIZE,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): remap_pfn_range error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#endif
++
++ up_write(&current->mm->mmap_sem);
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ *Logical = mdlMap->vmaAddr;
++
++ gcmkFOOTER_ARG("*Logical=0x%X", *Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapMemory
++**
++** Unmap physical memory out of the current process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gckOS_UnmapMemoryEx(Os, Physical, Bytes, Logical, _GetProcessID());
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckOS_UnmapMemoryEx
++**
++** Unmap physical memory in the specified process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** gctUINT32 PID
++** Pid of the process that opened the device and mapped this memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapMemoryEx(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical,
++ IN gctUINT32 PID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X PID=%d",
++ Os, Physical, Bytes, Logical, PID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PID != 0);
++
++ MEMORY_LOCK(Os);
++
++ if (Logical)
++ {
++ mdlMap = FindMdlMap(mdl, PID);
++
++ if (mdlMap == gcvNULL || mdlMap->vmaAddr == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ _UnmapUserLogical(mdlMap->vmaAddr, mdl->numPages * PAGE_SIZE);
++
++ gcmkVERIFY_OK(_DestroyMdlMap(mdl, mdlMap));
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserLogical
++**
++** Unmap user logical memory out of physical memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserLogical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gckOS_UnmapMemory(Os, Physical, Bytes, Logical);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocateNonPagedMemory
++**
++** Allocate a number of pages from non-paged memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the pages need to be mapped into user space.
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that holds the number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that hold the number of bytes allocated.
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that will hold the physical address of the
++** allocation.
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** allocation.
++*/
++gceSTATUS
++gckOS_AllocateNonPagedMemory(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gctSIZE_T bytes;
++ gctINT numPages;
++ PLINUX_MDL mdl = gcvNULL;
++ PLINUX_MDL_MAP mdlMap = gcvNULL;
++ gctSTRING addr;
++ gckKERNEL kernel;
++#ifdef NO_DMA_COHERENT
++ struct page * page;
++ long size, order;
++ gctPOINTER vaddr;
++#endif
++ gctBOOL locked = gcvFALSE;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ Os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Align number of bytes to page size. */
++ bytes = gcmALIGN(*Bytes, PAGE_SIZE);
++
++ /* Get total number of pages.. */
++ numPages = GetPageCount(bytes, 0);
++
++ /* Allocate mdl+vector structure */
++ mdl = _CreateMdl();
++ if (mdl == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ mdl->pagedMem = 0;
++ mdl->numPages = numPages;
++
++ MEMORY_LOCK(Os);
++ locked = gcvTRUE;
++
++#ifndef NO_DMA_COHERENT
++#ifdef CONFIG_ARM64
++ addr = dma_alloc_coherent(gcvNULL,
++#else
++ addr = dma_alloc_writecombine(gcvNULL,
++#endif
++ mdl->numPages * PAGE_SIZE,
++ &mdl->dmaHandle,
++ GFP_KERNEL | gcdNOWARN);
++#else
++ size = mdl->numPages * PAGE_SIZE;
++ order = get_order(size);
++
++ page = alloc_pages(GFP_KERNEL | gcdNOWARN, order);
++
++ if (page == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ vaddr = (gctPOINTER)page_address(page);
++ mdl->contiguous = gcvTRUE;
++ mdl->u.contiguousPages = page;
++ addr = _CreateKernelVirtualMapping(mdl);
++ mdl->dmaHandle = virt_to_phys(vaddr);
++ mdl->kaddr = vaddr;
++
++ /* Trigger a page fault. */
++ memset(addr, 0, numPages * PAGE_SIZE);
++
++#if !defined(CONFIG_PPC)
++ /* Cache invalidate. */
++ dma_sync_single_for_device(
++ gcvNULL,
++ page_to_phys(page),
++ bytes,
++ DMA_FROM_DEVICE);
++#endif
++
++ while (size > 0)
++ {
++ SetPageReserved(virt_to_page(vaddr));
++
++ vaddr += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++#endif
++
++ if (addr == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ kernel = Os->device->kernels[gcvCORE_MAJOR] != gcvNULL ?
++ Os->device->kernels[gcvCORE_MAJOR] : Os->device->kernels[gcvCORE_2D];
++ if (((Os->device->baseAddress & 0x80000000) != (mdl->dmaHandle & 0x80000000)) &&
++ kernel->hardware->mmuVersion == 0)
++ {
++ mdl->dmaHandle = (mdl->dmaHandle & ~0x80000000)
++ | (Os->device->baseAddress & 0x80000000);
++ }
++
++ mdl->addr = addr;
++
++ if (InUserSpace)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Only after mmap this will be valid. */
++
++ /* We need to map this to user space. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING) vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING) do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++#ifndef NO_DMA_COHERENT
++ if (dma_mmap_coherent(gcvNULL,
++ mdlMap->vma,
++ mdl->addr,
++ mdl->dmaHandle,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): dma_mmap_coherent error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#else
++#if !gcdSECURITY
++ mdlMap->vma->vm_page_prot = gcmkNONPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++#endif
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++ mdlMap->vma->vm_pgoff = 0;
++
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ mdl->dmaHandle >> PAGE_SHIFT,
++ mdl->numPages * PAGE_SIZE,
++ mdlMap->vma->vm_page_prot))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): remap_pfn_range error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#endif /* NO_DMA_COHERENT */
++
++ up_write(&current->mm->mmap_sem);
++
++ *Logical = mdlMap->vmaAddr;
++ }
++ else
++ {
++#if gcdSECURITY
++ *Logical = (gctPOINTER)mdl->kaddr;
++#else
++ *Logical = (gctPOINTER)mdl->addr;
++#endif
++ }
++
++ /*
++ * Add this to a global list.
++ * Will be used by get physical address
++ * and mapuser pointer functions.
++ */
++
++ if (!Os->mdlHead)
++ {
++ /* Initialize the queue. */
++ Os->mdlHead = Os->mdlTail = mdl;
++ }
++ else
++ {
++ /* Add to the tail. */
++ mdl->prev = Os->mdlTail;
++ Os->mdlTail->next = mdl;
++ Os->mdlTail = mdl;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Return allocated memory. */
++ *Bytes = bytes;
++ *Physical = (gctPHYS_ADDR) mdl;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *Physical=0x%X *Logical=0x%X",
++ *Bytes, *Physical, *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mdlMap != gcvNULL)
++ {
++ /* Free LINUX_MDL_MAP. */
++ gcmkVERIFY_OK(_DestroyMdlMap(mdl, mdlMap));
++ }
++
++ if (mdl != gcvNULL)
++ {
++ /* Free LINUX_MDL. */
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++ }
++ *Physical = gcvNULL;
++ *Bytes = 0;
++
++ if (locked)
++ {
++ /* Unlock memory. */
++ MEMORY_UNLOCK(Os);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeNonPagedMemory
++**
++** Free previously allocated and mapped pages from non-paged memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes allocated.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocated memory.
++**
++** gctPOINTER Logical
++** Logical address of the allocated memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckOS_FreeNonPagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ )
++{
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++#ifdef NO_DMA_COHERENT
++ unsigned size;
++ gctPOINTER vaddr;
++#endif /* NO_DMA_COHERENT */
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu Physical=0x%X Logical=0x%X",
++ Os, Bytes, Physical, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Convert physical address into a pointer to a MDL. */
++ mdl = (PLINUX_MDL) Physical;
++
++ MEMORY_LOCK(Os);
++
++#ifndef NO_DMA_COHERENT
++#ifdef CONFIG_ARM64
++ dma_free_coherent(gcvNULL,
++#else
++ dma_free_writecombine(gcvNULL,
++#endif
++ mdl->numPages * PAGE_SIZE,
++ mdl->addr,
++ mdl->dmaHandle);
++#else
++ size = mdl->numPages * PAGE_SIZE;
++ vaddr = mdl->kaddr;
++
++ while (size > 0)
++ {
++ ClearPageReserved(virt_to_page(vaddr));
++
++ vaddr += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++
++ free_pages((unsigned long)mdl->kaddr, get_order(mdl->numPages * PAGE_SIZE));
++
++ _DestoryKernelVirtualMapping(mdl->addr);
++#endif /* NO_DMA_COHERENT */
++
++ mdlMap = mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ /* No mapped memory exists when free nonpaged memory */
++ gcmkASSERT(mdlMap->vmaAddr == gcvNULL);
++
++ mdlMap = mdlMap->next;
++ }
++
++ /* Remove the node from global list.. */
++ if (mdl == Os->mdlHead)
++ {
++ if ((Os->mdlHead = mdl->next) == gcvNULL)
++ {
++ Os->mdlTail = gcvNULL;
++ }
++ }
++ else
++ {
++ mdl->prev->next = mdl->next;
++ if (mdl == Os->mdlTail)
++ {
++ Os->mdlTail = mdl->prev;
++ }
++ else
++ {
++ mdl->next->prev = mdl->prev;
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReadRegister
++**
++** Read data from a register.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Address of register.
++**
++** OUTPUT:
++**
++** gctUINT32 * Data
++** Pointer to a variable that receives the data read from the register.
++*/
++gceSTATUS
++gckOS_ReadRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ )
++{
++ return gckOS_ReadRegisterEx(Os, gcvCORE_MAJOR, Address, Data);
++}
++
++gceSTATUS
++gckOS_ReadRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%X", Os, Core, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++#if !gcdMULTI_GPU
++ gcmkVERIFY_ARGUMENT(Address < Os->device->requestedRegisterMemSizes[Core]);
++#endif
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ if (!in_interrupt())
++ {
++ mutex_lock(&Os->registerAccessLocks[Core]);
++ }
++
++ BUG_ON(!_AllowAccess(Os, Core, Address));
++
++#if gcdMULTI_GPU
++ if (Core == gcvCORE_MAJOR)
++ {
++ *Data = readl((gctUINT8 *)Os->device->registerBase3D[gcvCORE_3D_0_ID] + Address);
++ }
++ else
++#endif
++ {
++ *Data = readl((gctUINT8 *)Os->device->registerBases[Core] + Address);
++ }
++
++ if (!in_interrupt())
++ {
++ mutex_unlock(&Os->registerAccessLocks[Core]);
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++}
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckOS_ReadRegisterByCoreId(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 CoreId,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d CoreId=%d Address=0x%X",
++ Os, Core, CoreId, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ *Data = readl((gctUINT8 *)Os->device->registerBase3D[CoreId] + Address);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_WriteRegister
++**
++** Write data to a register.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Address of register.
++**
++** gctUINT32 Data
++** Data for register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WriteRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ )
++{
++ return gckOS_WriteRegisterEx(Os, gcvCORE_MAJOR, Address, Data);
++}
++
++gceSTATUS
++gckOS_WriteRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%X Data=0x%08x", Os, Core, Address, Data);
++
++#if !gcdMULTI_GPU
++ gcmkVERIFY_ARGUMENT(Address < Os->device->requestedRegisterMemSizes[Core]);
++#endif
++
++ if (!in_interrupt())
++ {
++ mutex_lock(&Os->registerAccessLocks[Core]);
++ }
++
++ BUG_ON(!_AllowAccess(Os, Core, Address));
++
++#if gcdMULTI_GPU
++ if (Core == gcvCORE_MAJOR)
++ {
++ writel(Data, (gctUINT8 *)Os->device->registerBase3D[gcvCORE_3D_0_ID] + Address);
++#if gcdMULTI_GPU > 1
++ writel(Data, (gctUINT8 *)Os->device->registerBase3D[gcvCORE_3D_1_ID] + Address);
++#endif
++ }
++ else
++#endif
++ {
++ writel(Data, (gctUINT8 *)Os->device->registerBases[Core] + Address);
++ }
++
++ if (!in_interrupt())
++ {
++ mutex_unlock(&Os->registerAccessLocks[Core]);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckOS_WriteRegisterByCoreId(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 CoreId,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d CoreId=%d Address=0x%X Data=0x%08x",
++ Os, Core, CoreId, Address, Data);
++
++ writel(Data, (gctUINT8 *)Os->device->registerBase3D[CoreId] + Address);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_GetPageSize
++**
++** Get the system's page size.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * PageSize
++** Pointer to a variable that will receive the system's page size.
++*/
++gceSTATUS gckOS_GetPageSize(
++ IN gckOS Os,
++ OUT gctSIZE_T * PageSize
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(PageSize != gcvNULL);
++
++ /* Return the page size. */
++ *PageSize = (gctSIZE_T) PAGE_SIZE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*PageSize", *PageSize);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPhysicalAddress
++**
++** Get the physical system address of a corresponding virtual address.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Logical
++** Logical address.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Poinetr to a variable that receives the 32-bit physical adress.
++*/
++gceSTATUS
++gckOS_GetPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X", Os, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Query page table of current process first. */
++ status = _QueryProcessPageTable(Logical, Address);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Get current process ID. */
++ processID = _GetProcessID();
++
++ /* Route through other function. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddressProcess(Os, Logical, processID, Address));
++ }
++
++ gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Os, *Address, Address));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_UserLogicalToPhysical
++**
++** Get the physical system address of a corresponding user virtual address.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Logical
++** Logical address.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable that receives the 32-bit physical address.
++*/
++gceSTATUS gckOS_UserLogicalToPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ return gckOS_GetPhysicalAddress(Os, Logical, Address);
++}
++
++#if gcdSECURE_USER
++static gceSTATUS
++gckOS_AddMapping(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gcsUSER_MAPPING_PTR map;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Logical=0x%X Bytes=%lu",
++ Os, Physical, Logical, Bytes);
++
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(gcsUSER_MAPPING),
++ (gctPOINTER *) &map));
++
++ map->next = Os->userMap;
++ map->physical = Physical - Os->device->baseAddress;
++ map->logical = Logical;
++ map->bytes = Bytes;
++ map->start = (gctINT8_PTR) Logical;
++ map->end = map->start + Bytes;
++
++ Os->userMap = map;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckOS_RemoveMapping(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gcsUSER_MAPPING_PTR map, prev;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu", Os, Logical, Bytes);
++
++ for (map = Os->userMap, prev = gcvNULL; map != gcvNULL; map = map->next)
++ {
++ if ((map->logical == Logical)
++ && (map->bytes == Bytes)
++ )
++ {
++ break;
++ }
++
++ prev = map;
++ }
++
++ if (map == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
++ }
++
++ if (prev == gcvNULL)
++ {
++ Os->userMap = map->next;
++ }
++ else
++ {
++ prev->next = map->next;
++ }
++
++ gcmkONERROR(gcmkOS_SAFE_FREE(Os, map));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++_ConvertLogical2Physical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ IN PLINUX_MDL Mdl,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ gctINT8_PTR base, vBase;
++ gctUINT32 offset;
++ PLINUX_MDL_MAP map;
++ gcsUSER_MAPPING_PTR userMap;
++
++#if gcdSECURITY
++ base = (Mdl == gcvNULL) ? gcvNULL : (gctINT8_PTR) Mdl->kaddr;
++#else
++ base = (Mdl == gcvNULL) ? gcvNULL : (gctINT8_PTR) Mdl->addr;
++#endif
++
++ /* Check for the logical address match. */
++ if ((base != gcvNULL)
++ && ((gctINT8_PTR) Logical >= base)
++ && ((gctINT8_PTR) Logical < base + Mdl->numPages * PAGE_SIZE)
++ )
++ {
++ offset = (gctINT8_PTR) Logical - base;
++
++ if (Mdl->dmaHandle != 0)
++ {
++ /* The memory was from coherent area. */
++ *Physical = (gctUINT32) Mdl->dmaHandle + offset;
++ }
++ else if (Mdl->pagedMem && !Mdl->contiguous)
++ {
++ /* paged memory is not mapped to kernel space. */
++ return gcvSTATUS_INVALID_ADDRESS;
++ }
++ else
++ {
++ *Physical = gcmPTR2INT32(virt_to_phys(base)) + offset;
++ }
++
++ return gcvSTATUS_OK;
++ }
++
++ /* Walk user maps. */
++ for (userMap = Os->userMap; userMap != gcvNULL; userMap = userMap->next)
++ {
++ if (((gctINT8_PTR) Logical >= userMap->start)
++ && ((gctINT8_PTR) Logical < userMap->end)
++ )
++ {
++ *Physical = userMap->physical
++ + (gctUINT32) ((gctINT8_PTR) Logical - userMap->start);
++
++ return gcvSTATUS_OK;
++ }
++ }
++
++ if (ProcessID != Os->kernelProcessID)
++ {
++ map = FindMdlMap(Mdl, (gctINT) ProcessID);
++ vBase = (map == gcvNULL) ? gcvNULL : (gctINT8_PTR) map->vmaAddr;
++
++ /* Is the given address within that range. */
++ if ((vBase != gcvNULL)
++ && ((gctINT8_PTR) Logical >= vBase)
++ && ((gctINT8_PTR) Logical < vBase + Mdl->numPages * PAGE_SIZE)
++ )
++ {
++ offset = (gctINT8_PTR) Logical - vBase;
++
++ if (Mdl->dmaHandle != 0)
++ {
++ /* The memory was from coherent area. */
++ *Physical = (gctUINT32) Mdl->dmaHandle + offset;
++ }
++ else if (Mdl->pagedMem && !Mdl->contiguous)
++ {
++ *Physical = _NonContiguousToPhys(Mdl->u.nonContiguousPages, offset/PAGE_SIZE);
++ }
++ else
++ {
++ *Physical = page_to_phys(Mdl->u.contiguousPages) + offset;
++ }
++
++ return gcvSTATUS_OK;
++ }
++ }
++
++ /* Address not yet found. */
++ return gcvSTATUS_INVALID_ADDRESS;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPhysicalAddressProcess
++**
++** Get the physical system address of a corresponding virtual address for a
++** given process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctPOINTER Logical
++** Logical address.
++**
++** gctUINT32 ProcessID
++** Process ID.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Poinetr to a variable that receives the 32-bit physical adress.
++*/
++gceSTATUS
++gckOS_GetPhysicalAddressProcess(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32 * Address
++ )
++{
++ PLINUX_MDL mdl;
++ gctINT8_PTR base;
++ gckALLOCATOR allocator = gcvNULL;
++ gceSTATUS status = gcvSTATUS_INVALID_ADDRESS;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X ProcessID=%d", Os, Logical, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ /* First try the contiguous memory pool. */
++ if (Os->device->contiguousMapped)
++ {
++ base = (gctINT8_PTR) Os->device->contiguousBase;
++
++ if (((gctINT8_PTR) Logical >= base)
++ && ((gctINT8_PTR) Logical < base + Os->device->contiguousSize)
++ )
++ {
++ /* Convert logical address into physical. */
++ *Address = Os->device->contiguousVidMem->baseAddress
++ + (gctINT8_PTR) Logical - base;
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ /* Try the contiguous memory pool. */
++ mdl = (PLINUX_MDL) Os->device->contiguousPhysical;
++ status = _ConvertLogical2Physical(Os,
++ Logical,
++ ProcessID,
++ mdl,
++ Address);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Walk all MDLs. */
++ for (mdl = Os->mdlHead; mdl != gcvNULL; mdl = mdl->next)
++ {
++ /* Try this MDL. */
++ allocator = mdl->allocator;
++
++ if (allocator)
++ {
++ status = allocator->ops->LogicalToPhysical(
++ allocator,
++ mdl,
++ Logical,
++ ProcessID,
++ Address
++ );
++ }
++ else
++ {
++ status = _ConvertLogical2Physical(Os,
++ Logical,
++ ProcessID,
++ mdl,
++ Address);
++ }
++
++ if (gcmIS_SUCCESS(status))
++ {
++ break;
++ }
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkONERROR(status);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapPhysical
++**
++** Map a physical address into kernel space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Physical
++** Physical address of the memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the base address of the mapped
++** memory.
++*/
++gceSTATUS
++gckOS_MapPhysical(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ gctPOINTER logical;
++ PLINUX_MDL mdl;
++ gctUINT32 physical = Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ /* Go through our mapping to see if we know this physical address already. */
++ mdl = Os->mdlHead;
++
++ while (mdl != gcvNULL)
++ {
++ if (mdl->dmaHandle != 0)
++ {
++ if ((physical >= mdl->dmaHandle)
++ && (physical < mdl->dmaHandle + mdl->numPages * PAGE_SIZE)
++ )
++ {
++ *Logical = mdl->addr + (physical - mdl->dmaHandle);
++ break;
++ }
++ }
++
++ mdl = mdl->next;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ if (mdl == gcvNULL)
++ {
++ struct page * page = pfn_to_page(physical >> PAGE_SHIFT);
++
++ if (pfn_valid(page_to_pfn(page)))
++ {
++ gctUINT32 offset = physical & ~PAGE_MASK;
++ struct page ** pages;
++ gctUINT numPages;
++ gctINT i;
++
++ numPages = GetPageCount(PAGE_ALIGN(offset + Bytes), 0);
++
++ pages = kmalloc(sizeof(struct page *) * numPages, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ for (i = 0; i < numPages; i++)
++ {
++ pages[i] = nth_page(page, i);
++ }
++
++ logical = vmap(pages, numPages, 0, gcmkNONPAGED_MEMROY_PROT(PAGE_KERNEL));
++
++ kfree(pages);
++
++ if (logical == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Failed to vmap",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Out of resources. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ logical += offset;
++ }
++ else
++ {
++ /* Map memory as cached memory. */
++ request_mem_region(physical, Bytes, "MapRegion");
++ logical = (gctPOINTER) ioremap_nocache(physical, Bytes);
++
++ if (logical == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Failed to ioremap",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Out of resources. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++ }
++
++ /* Return pointer to mapped memory. */
++ *Logical = logical;
++ }
++
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=0x%X", *Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapPhysical
++**
++** Unmap a previously mapped memory region from kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Logical
++** Pointer to the base address of the memory to unmap.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ PLINUX_MDL mdl;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu", Os, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ MEMORY_LOCK(Os);
++
++ mdl = Os->mdlHead;
++
++ while (mdl != gcvNULL)
++ {
++ if (mdl->addr != gcvNULL)
++ {
++ if (Logical >= (gctPOINTER)mdl->addr
++ && Logical < (gctPOINTER)((gctSTRING)mdl->addr + mdl->numPages * PAGE_SIZE))
++ {
++ break;
++ }
++ }
++
++ mdl = mdl->next;
++ }
++
++ if (mdl == gcvNULL)
++ {
++ /* Unmap the memory. */
++ vunmap((void *)((unsigned long)Logical & PAGE_MASK));
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateMutex
++**
++** Create a new mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Mutex
++** Pointer to a variable that will hold a pointer to the mutex.
++*/
++gceSTATUS
++gckOS_CreateMutex(
++ IN gckOS Os,
++ OUT gctPOINTER * Mutex
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Allocate the mutex structure. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct mutex), Mutex));
++
++ /* Initialize the mutex. */
++ mutex_init(*Mutex);
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*Mutex=0x%X", *Mutex);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DeleteMutex
++**
++** Delete a mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mute to be deleted.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DeleteMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%X", Os, Mutex);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Destroy the mutex. */
++ mutex_destroy((struct mutex *)Mutex);
++
++ /* Free the mutex structure. */
++ gcmkONERROR(gckOS_Free(Os, Mutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AcquireMutex
++**
++** Acquire a mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mutex to be acquired.
++**
++** gctUINT32 Timeout
++** Timeout value specified in milliseconds.
++** Specify the value of gcvINFINITE to keep the thread suspended
++** until the mutex has been acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AcquireMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%0x Timeout=%u", Os, Mutex, Timeout);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ if (Timeout == gcvINFINITE)
++ {
++ /* Lock the mutex. */
++ mutex_lock(Mutex);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ for (;;)
++ {
++ /* Try to acquire the mutex. */
++ if (mutex_trylock(Mutex))
++ {
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ if (Timeout-- == 0)
++ {
++ break;
++ }
++
++ /* Wait for 1 millisecond. */
++ gcmkVERIFY_OK(gckOS_Delay(Os, 1));
++ }
++
++ /* Timeout. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_TIMEOUT);
++ return gcvSTATUS_TIMEOUT;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReleaseMutex
++**
++** Release an acquired mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mutex to be released.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ReleaseMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%0x", Os, Mutex);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Release the mutex. */
++ mutex_unlock(Mutex);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomicExchange
++**
++** Atomically exchange a pair of 32-bit values.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN OUT gctINT32_PTR Target
++** Pointer to the 32-bit value to exchange.
++**
++** IN gctINT32 NewValue
++** Specifies a new value for the 32-bit value pointed to by Target.
++**
++** OUT gctINT32_PTR OldValue
++** The old value of the 32-bit value pointed to by Target.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomicExchange(
++ IN gckOS Os,
++ IN OUT gctUINT32_PTR Target,
++ IN gctUINT32 NewValue,
++ OUT gctUINT32_PTR OldValue
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Target=0x%X NewValue=%u", Os, Target, NewValue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(OldValue != gcvNULL);
++
++ /* Exchange the pair of 32-bit values. */
++ *OldValue = (gctUINT32) atomic_xchg((atomic_t *) Target, (int) NewValue);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*OldValue=%u", *OldValue);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomicExchangePtr
++**
++** Atomically exchange a pair of pointers.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN OUT gctPOINTER * Target
++** Pointer to the 32-bit value to exchange.
++**
++** IN gctPOINTER NewValue
++** Specifies a new value for the pointer pointed to by Target.
++**
++** OUT gctPOINTER * OldValue
++** The old value of the pointer pointed to by Target.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomicExchangePtr(
++ IN gckOS Os,
++ IN OUT gctPOINTER * Target,
++ IN gctPOINTER NewValue,
++ OUT gctPOINTER * OldValue
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Target=0x%X NewValue=0x%X", Os, Target, NewValue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(OldValue != gcvNULL);
++
++ /* Exchange the pair of pointers. */
++ *OldValue = (gctPOINTER)(gctUINTPTR_T) atomic_xchg((atomic_t *) Target, (int)(gctUINTPTR_T) NewValue);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*OldValue=0x%X", *OldValue);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomicSetMask
++**
++** Atomically set mask to Atom
++**
++** INPUT:
++** IN OUT gctPOINTER Atom
++** Pointer to the atom to set.
++**
++** IN gctUINT32 Mask
++** Mask to set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSetMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ )
++{
++ gctUINT32 oval, nval;
++
++ gcmkHEADER_ARG("Atom=0x%0x", Atom);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ do
++ {
++ oval = atomic_read((atomic_t *) Atom);
++ nval = oval | Mask;
++ } while (atomic_cmpxchg((atomic_t *) Atom, oval, nval) != oval);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomClearMask
++**
++** Atomically clear mask from Atom
++**
++** INPUT:
++** IN OUT gctPOINTER Atom
++** Pointer to the atom to clear.
++**
++** IN gctUINT32 Mask
++** Mask to clear.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomClearMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ )
++{
++ gctUINT32 oval, nval;
++
++ gcmkHEADER_ARG("Atom=0x%0x", Atom);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ do
++ {
++ oval = atomic_read((atomic_t *) Atom);
++ nval = oval & ~Mask;
++ } while (atomic_cmpxchg((atomic_t *) Atom, oval, nval) != oval);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomConstruct
++**
++** Create an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Atom
++** Pointer to a variable receiving the constructed atom.
++*/
++gceSTATUS
++gckOS_AtomConstruct(
++ IN gckOS Os,
++ OUT gctPOINTER * Atom
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Allocate the atom. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(atomic_t), Atom));
++
++ /* Initialize the atom. */
++ atomic_set((atomic_t *) *Atom, 0);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Atom=0x%X", *Atom);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomDestroy
++**
++** Destroy an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomDestroy(
++ IN gckOS Os,
++ OUT gctPOINTER Atom
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Free the atom. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Os, Atom));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomGet
++**
++** Get the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the value of the atom.
++*/
++gceSTATUS
++gckOS_AtomGet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Return the current value of atom. */
++ *Value = atomic_read((atomic_t *) Atom);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomSet
++**
++** Set the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** gctINT32 Value
++** The value of the atom.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ IN gctINT32 Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x Value=%d", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Set the current value of atom. */
++ atomic_set((atomic_t *) Atom, Value);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomIncrement
++**
++** Atomically increment the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable that receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomIncrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Increment the atom. */
++ *Value = atomic_inc_return((atomic_t *) Atom) - 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomDecrement
++**
++** Atomically decrement the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable that receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomDecrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Decrement the atom. */
++ *Value = atomic_dec_return((atomic_t *) Atom) + 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Delay
++**
++** Delay execution of the current thread for a number of milliseconds.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Delay
++** Delay to sleep, specified in milliseconds.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Delay(
++ IN gckOS Os,
++ IN gctUINT32 Delay
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Delay=%u", Os, Delay);
++
++ if (Delay > 0)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ ktime_t delay = ktime_set((Delay / MSEC_PER_SEC), (Delay % MSEC_PER_SEC) * NSEC_PER_MSEC);
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_hrtimeout(&delay, HRTIMER_MODE_REL);
++#else
++ msleep(Delay);
++#endif
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetTicks
++**
++** Get the number of milliseconds since the system started.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gctUINT32_PTR Time
++** Pointer to a variable to get time.
++**
++*/
++gceSTATUS
++gckOS_GetTicks(
++ OUT gctUINT32_PTR Time
++ )
++{
++ gcmkHEADER();
++
++ *Time = jiffies_to_msecs(jiffies);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_TicksAfter
++**
++** Compare time values got from gckOS_GetTicks.
++**
++** INPUT:
++** gctUINT32 Time1
++** First time value to be compared.
++**
++** gctUINT32 Time2
++** Second time value to be compared.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR IsAfter
++** Pointer to a variable to result.
++**
++*/
++gceSTATUS
++gckOS_TicksAfter(
++ IN gctUINT32 Time1,
++ IN gctUINT32 Time2,
++ OUT gctBOOL_PTR IsAfter
++ )
++{
++ gcmkHEADER();
++
++ *IsAfter = time_after((unsigned long)Time1, (unsigned long)Time2);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetTime
++**
++** Get the number of microseconds since the system started.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gctUINT64_PTR Time
++** Pointer to a variable to get time.
++**
++*/
++gceSTATUS
++gckOS_GetTime(
++ OUT gctUINT64_PTR Time
++ )
++{
++ struct timeval tv;
++ gcmkHEADER();
++
++ /* Return the time of day in microseconds. */
++ do_gettimeofday(&tv);
++ *Time = (tv.tv_sec * 1000000ULL) + tv.tv_usec;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MemoryBarrier
++**
++** Make sure the CPU has executed everything up to this point and the data got
++** written to the specified pointer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Address
++** Address of memory that needs to be barriered.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_MemoryBarrier(
++ IN gckOS Os,
++ IN gctPOINTER Address
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Address=0x%X", Os, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++#if gcdNONPAGED_MEMORY_BUFFERABLE \
++ && defined (CONFIG_ARM) \
++ && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++ /* drain write buffer */
++ dsb();
++
++ /* drain outer cache's write buffer? */
++#else
++ mb();
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocatePagedMemory
++**
++** Allocate memory from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocatePagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++
++ /* Allocate the memory. */
++ gcmkONERROR(gckOS_AllocatePagedMemoryEx(Os, gcvALLOC_FLAG_NONE, Bytes, gcvNULL, Physical));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Physical=0x%X", *Physical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocatePagedMemoryEx
++**
++** Allocate memory from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Flag
++** Allocation attribute.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctUINT32 * Gid
++** Save the global ID for the piece of allocated memory.
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocatePagedMemoryEx(
++ IN gckOS Os,
++ IN gctUINT32 Flag,
++ IN gctSIZE_T Bytes,
++ OUT gctUINT32 * Gid,
++ OUT gctPHYS_ADDR * Physical
++ )
++{
++ gctINT numPages;
++ PLINUX_MDL mdl = gcvNULL;
++ gctSIZE_T bytes;
++ gceSTATUS status = gcvSTATUS_OUT_OF_MEMORY;
++ gckALLOCATOR allocator;
++
++ gcmkHEADER_ARG("Os=0x%X Flag=%x Bytes=%lu", Os, Flag, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++
++ bytes = gcmALIGN(Bytes, PAGE_SIZE);
++
++ numPages = GetPageCount(bytes, 0);
++
++ mdl = _CreateMdl();
++ if (mdl == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Walk all allocators. */
++ list_for_each_entry(allocator, &Os->allocatorList, head)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d) flag = %x allocator->capability = %x",
++ __FUNCTION__, __LINE__, Flag, allocator->capability);
++
++ if ((Flag & allocator->capability) != Flag)
++ {
++ continue;
++ }
++
++ status = allocator->ops->Alloc(allocator, mdl, numPages, Flag);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ mdl->allocator = allocator;
++ break;
++ }
++ }
++
++ /* Check status. */
++ gcmkONERROR(status);
++
++ mdl->dmaHandle = 0;
++ mdl->addr = 0;
++ mdl->numPages = numPages;
++ mdl->pagedMem = 1;
++ mdl->contiguous = Flag & gcvALLOC_FLAG_CONTIGUOUS;
++
++ if (Gid != gcvNULL)
++ {
++ *Gid = mdl->gid;
++ }
++
++ MEMORY_LOCK(Os);
++
++ /*
++ * Add this to a global list.
++ * Will be used by get physical address
++ * and mapuser pointer functions.
++ */
++ if (!Os->mdlHead)
++ {
++ /* Initialize the queue. */
++ Os->mdlHead = Os->mdlTail = mdl;
++ }
++ else
++ {
++ /* Add to tail. */
++ mdl->prev = Os->mdlTail;
++ Os->mdlTail->next = mdl;
++ Os->mdlTail = mdl;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Return physical address. */
++ *Physical = (gctPHYS_ADDR) mdl;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Physical=0x%X", *Physical);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mdl != gcvNULL)
++ {
++ /* Free the memory. */
++ _DestroyMdl(mdl);
++ }
++ *Physical = gcvNULL;
++
++ /* Return the status. */
++ gcmkFOOTER_ARG("Os=0x%X Flag=%x Bytes=%lu", Os, Flag, Bytes);
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreePagedMemory
++**
++** Free memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreePagedMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes
++ )
++{
++ PLINUX_MDL mdl = (PLINUX_MDL) Physical;
++ gckALLOCATOR allocator = (gckALLOCATOR)mdl->allocator;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ MEMORY_LOCK(Os);
++
++ /* Remove the node from global list. */
++ if (mdl == Os->mdlHead)
++ {
++ if ((Os->mdlHead = mdl->next) == gcvNULL)
++ {
++ Os->mdlTail = gcvNULL;
++ }
++ }
++ else
++ {
++ mdl->prev->next = mdl->next;
++
++ if (mdl == Os->mdlTail)
++ {
++ Os->mdlTail = mdl->prev;
++ }
++ else
++ {
++ mdl->next->prev = mdl->prev;
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ allocator->ops->Free(allocator, mdl);
++
++ /* Free the structure... */
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_LockPages
++**
++** Lock memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** gctBOOL Cacheable
++** Cache mode of mapping.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the address of the mapped
++** memory.
++**
++** gctSIZE_T * PageCount
++** Pointer to a variable that receives the number of pages required for
++** the page table according to the GPU page size.
++*/
++gceSTATUS
++gckOS_LockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Cacheable,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ )
++{
++ gceSTATUS status;
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++ gckALLOCATOR allocator;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount != gcvNULL);
++
++ mdl = (PLINUX_MDL) Physical;
++ allocator = mdl->allocator;
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = FindMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++
++ if (mdlMap->vmaAddr == gcvNULL)
++ {
++ status = allocator->ops->MapUser(allocator, mdl, mdlMap, Cacheable);
++
++ if (gcmIS_ERROR(status))
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", status);
++ return status;
++ }
++ }
++
++ mdlMap->count++;
++
++ /* Convert pointer to MDL. */
++ *Logical = mdlMap->vmaAddr;
++
++ /* Return the page number according to the GPU page size. */
++ gcmkASSERT((PAGE_SIZE % 4096) == 0);
++ gcmkASSERT((PAGE_SIZE / 4096) >= 1);
++
++ *PageCount = mdl->numPages * (PAGE_SIZE / 4096);
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkVERIFY_OK(gckOS_CacheFlush(
++ Os,
++ _GetProcessID(),
++ Physical,
++ gcvINVALID_ADDRESS,
++ (gctPOINTER)mdlMap->vmaAddr,
++ mdl->numPages * PAGE_SIZE
++ ));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=0x%X *PageCount=%lu", *Logical, *PageCount);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapPages
++**
++** Map paged memory into a page table.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T PageCount
++** Number of pages required for the physical address.
++**
++** gctPOINTER PageTable
++** Pointer to the page table to fill in.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_MapPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ )
++{
++ return gckOS_MapPagesEx(Os,
++ gcvCORE_MAJOR,
++ Physical,
++ PageCount,
++ 0,
++ PageTable);
++}
++
++gceSTATUS
++gckOS_MapPagesEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctUINT32 Address,
++ IN gctPOINTER PageTable
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ PLINUX_MDL mdl;
++ gctUINT32* table;
++ gctUINT32 offset;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gckMMU mmu;
++ PLINUX_MDL mmuMdl;
++ gctUINT32 bytes;
++ gctPHYS_ADDR pageTablePhysical;
++#endif
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gckKERNEL kernel = Os->device->kernels[Core];
++ gckMMU mmu;
++#endif
++ gckALLOCATOR allocator;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Physical=0x%X PageCount=%u PageTable=0x%X",
++ Os, Core, Physical, PageCount, PageTable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++
++ /* Convert pointer to MDL. */
++ mdl = (PLINUX_MDL)Physical;
++
++ allocator = mdl->allocator;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Physical->0x%X PageCount->0x%X PagedMemory->?%d",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)Physical,
++ (gctUINT32)(gctUINTPTR_T)PageCount,
++ mdl->pagedMem
++ );
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkONERROR(gckKERNEL_GetProcessMMU(kernel, &mmu));
++#endif
++
++ table = (gctUINT32 *)PageTable;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ mmu = Os->device->kernels[Core]->mmu;
++ bytes = PageCount * sizeof(*table);
++ mmuMdl = (PLINUX_MDL)mmu->pageTablePhysical;
++#endif
++
++ /* Get all the physical addresses and store them in the page table. */
++
++ offset = 0;
++ PageCount = PageCount / (PAGE_SIZE / 4096);
++
++ /* Try to get the user pages so DMA can happen. */
++ while (PageCount-- > 0)
++ {
++ gctUINT i;
++ gctUINT32 phys = ~0;
++
++ if (mdl->pagedMem && !mdl->contiguous)
++ {
++ allocator->ops->Physical(allocator, mdl, offset, &phys);
++ }
++ else
++ {
++ if (!mdl->pagedMem)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): we should not get this call for Non Paged Memory!",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ phys = page_to_phys(nth_page(mdl->u.contiguousPages, offset));
++ }
++
++ gcmkVERIFY_OK(gckOS_CPUPhysicalToGPUPhysical(Os, phys, &phys));
++
++#ifdef CONFIG_IOMMU_SUPPORT
++ if (Os->iommu)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Setup mapping in IOMMU %x => %x",
++ __FUNCTION__, __LINE__,
++ Address + (offset * PAGE_SIZE), phys
++ );
++
++ /* When use IOMMU, GPU use system PAGE_SIZE. */
++ gcmkONERROR(gckIOMMU_Map(
++ Os->iommu, Address + (offset * PAGE_SIZE), phys, PAGE_SIZE));
++ }
++ else
++#endif
++ {
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ for (i = 0; i < (PAGE_SIZE / 4096); i++)
++ {
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ phys + (i * 4096),
++ table++));
++ }
++ }
++ else
++#endif
++ {
++ for (i = 0; i < (PAGE_SIZE / 4096); i++)
++ {
++#if gcdPROCESS_ADDRESS_SPACE
++ gctUINT32_PTR pageTableEntry;
++ gckMMU_GetPageEntry(mmu, Address + (offset * 4096), &pageTableEntry);
++ gcmkONERROR(
++ gckMMU_SetPage(mmu,
++ phys + (i * 4096),
++ pageTableEntry));
++#else
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ phys + (i * 4096),
++ table++));
++#endif
++ }
++ }
++ }
++
++ offset += 1;
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Get physical address of pageTable */
++ pageTablePhysical = (gctPHYS_ADDR)(mmuMdl->dmaHandle +
++ ((gctUINT32 *)PageTable - mmu->pageTableLogical));
++
++ /* Flush the mmu page table cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Os,
++ _GetProcessID(),
++ gcvNULL,
++ pageTablePhysical,
++ PageTable,
++ bytes
++ ));
++#endif
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_UnmapPages(
++ IN gckOS Os,
++ IN gctSIZE_T PageCount,
++ IN gctUINT32 Address
++ )
++{
++#ifdef CONFIG_IOMMU_SUPPORT
++ if (Os->iommu)
++ {
++ gcmkVERIFY_OK(gckIOMMU_Unmap(
++ Os->iommu, Address, PageCount * PAGE_SIZE));
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnlockPages
++**
++** Unlock memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** gctPOINTER Logical
++** Address of the mapped memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnlockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++ gckALLOCATOR allocator = mdl->allocator;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%u Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if ((mdlMap->vmaAddr != gcvNULL) && (_GetProcessID() == mdlMap->pid))
++ {
++ if (--mdlMap->count == 0)
++ {
++ allocator->ops->UnmapUser(
++ allocator,
++ mdlMap->vmaAddr,
++ mdl->numPages * PAGE_SIZE);
++
++ mdlMap->vmaAddr = gcvNULL;
++ }
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckOS_AllocateContiguous
++**
++** Allocate memory from the contiguous pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the pages need to be mapped into user space.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that receives the number of bytes allocated.
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the logical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocateContiguous(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ Os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Same as non-paged memory for now. */
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(Os,
++ InUserSpace,
++ Bytes,
++ Physical,
++ Logical));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *Physical=0x%X *Logical=0x%X",
++ *Bytes, *Physical, *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeContiguous
++**
++** Free memory allocated from the contiguous pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctPOINTER Logical
++** Logicval address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreeContiguous(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Logical=0x%X Bytes=%lu",
++ Os, Physical, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Same of non-paged memory for now. */
++ gcmkONERROR(gckOS_FreeNonPagedMemory(Os, Bytes, Physical, Logical));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_VG
++/******************************************************************************
++**
++** gckOS_GetKernelLogical
++**
++** Return the kernel logical pointer that corresponods to the specified
++** hardware address.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Hardware physical address.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to a variable receiving the pointer in kernel address space.
++*/
++gceSTATUS
++gckOS_GetKernelLogical(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ return gckOS_GetKernelLogicalEx(Os, gcvCORE_MAJOR, Address, KernelPointer);
++}
++
++gceSTATUS
++gckOS_GetKernelLogicalEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%08x", Os, Core, Address);
++
++ do
++ {
++ gckGALDEVICE device;
++ gckKERNEL kernel;
++ gcePOOL pool;
++ gctUINT32 offset;
++ gctPOINTER logical;
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Os->device;
++
++ /* Kernel shortcut. */
++ kernel = device->kernels[Core];
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkERR_BREAK(gckVGHARDWARE_SplitMemory(
++ kernel->vg->hardware, Address, &pool, &offset
++ ));
++ }
++ else
++#endif
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkERR_BREAK(gckHARDWARE_SplitMemory(
++ kernel->hardware, Address, &pool, &offset
++ ));
++ }
++
++ /* Dispatch on pool. */
++ switch (pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ logical = device->internalLogical;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ logical = device->externalLogical;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ logical = device->contiguousBase;
++ break;
++
++ default:
++ /* Invalid memory pool. */
++ gcmkFOOTER();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Build logical address of specified address. */
++ * KernelPointer = ((gctUINT8_PTR) logical) + offset;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*KernelPointer=0x%X", *KernelPointer);
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_MapUserPointer
++**
++** Map a pointer from the user process into the kernel address space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Pointer
++** Pointer in user process space that needs to be mapped.
++**
++** gctSIZE_T Size
++** Number of bytes that need to be mapped.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to a variable receiving the mapped pointer in kernel address
++** space.
++*/
++gceSTATUS
++gckOS_MapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gctPOINTER buf = gcvNULL;
++ gctUINT32 len;
++
++ gcmkHEADER_ARG("Os=0x%X Pointer=0x%X Size=%lu", Os, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++
++ buf = kmalloc(Size, GFP_KERNEL | gcdNOWARN);
++ if (buf == gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to allocate memory.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ len = copy_from_user(buf, Pointer, Size);
++ if (len != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to copy data from user.",
++ __FUNCTION__, __LINE__
++ );
++
++ if (buf != gcvNULL)
++ {
++ kfree(buf);
++ }
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_GENERIC_IO);
++ return gcvSTATUS_GENERIC_IO;
++ }
++
++ *KernelPointer = buf;
++
++ gcmkFOOTER_ARG("*KernelPointer=0x%X", *KernelPointer);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserPointer
++**
++** Unmap a user process pointer from the kernel address space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Pointer
++** Pointer in user process space that needs to be unmapped.
++**
++** gctSIZE_T Size
++** Number of bytes that need to be unmapped.
++**
++** gctPOINTER KernelPointer
++** Pointer in kernel address space that needs to be unmapped.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ IN gctPOINTER KernelPointer
++ )
++{
++ gctUINT32 len;
++
++ gcmkHEADER_ARG("Os=0x%X Pointer=0x%X Size=%lu KernelPointer=0x%X",
++ Os, Pointer, Size, KernelPointer);
++
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++
++ len = copy_to_user(Pointer, KernelPointer, Size);
++
++ kfree(KernelPointer);
++
++ if (len != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to copy data to user.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_GENERIC_IO);
++ return gcvSTATUS_GENERIC_IO;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_QueryNeedCopy
++**
++** Query whether the memory can be accessed or mapped directly or it has to be
++** copied.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID of the current process.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR NeedCopy
++** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or
++** gcvFALSE if the memory can be accessed or mapped dircetly.
++*/
++gceSTATUS
++gckOS_QueryNeedCopy(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ OUT gctBOOL_PTR NeedCopy
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d", Os, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(NeedCopy != gcvNULL);
++
++ /* We need to copy data. */
++ *NeedCopy = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*NeedCopy=%d", *NeedCopy);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyFromUserData
++**
++** Copy data from user to kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyFromUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X KernelPointer=0x%X Pointer=0x%X Size=%lu",
++ Os, KernelPointer, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Copy data from user. */
++ if (copy_from_user(KernelPointer, Pointer, Size) != 0)
++ {
++ /* Could not copy all the bytes. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyToUserData
++**
++** Copy data from kernel to user memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyToUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X KernelPointer=0x%X Pointer=0x%X Size=%lu",
++ Os, KernelPointer, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Copy data to user. */
++ if (copy_to_user(Pointer, KernelPointer, Size) != 0)
++ {
++ /* Could not copy all the bytes. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_WriteMemory
++**
++** Write data to a memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Address
++** Address of the memory to write to.
++**
++** gctUINT32 Data
++** Data for register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WriteMemory(
++ IN gckOS Os,
++ IN gctPOINTER Address,
++ IN gctUINT32 Data
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Os=0x%X Address=0x%X Data=%u", Os, Address, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Write memory. */
++ if (access_ok(VERIFY_WRITE, Address, 4))
++ {
++ /* User address. */
++ if(put_user(Data, (gctUINT32*)Address))
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
++ }
++ }
++ else
++ {
++ /* Kernel address. */
++ *(gctUINT32 *)Address = Data;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapUserMemory
++**
++** Lock down a user buffer and return an DMA'able address to be used by the
++** hardware to access it.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory to lock down.
++**
++** gctSIZE_T Size
++** Size in bytes of the memory to lock down.
++**
++** OUTPUT:
++**
++** gctPOINTER * Info
++** Pointer to variable receiving the information record required by
++** gckOS_UnmapUserMemory.
++**
++** gctUINT32_PTR Address
++** Pointer to a variable that will receive the address DMA'able by the
++** hardware.
++*/
++gceSTATUS
++gckOS_MapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%x Core=%d Memory=0x%x Size=%lu", Os, Core, Memory, Size);
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckOS_AddMapping(Os, *Address, Memory, Size));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++#else
++{
++ gctSIZE_T pageCount, i, j;
++ gctUINT32_PTR pageTable;
++ gctUINT32 address = 0, physical = ~0U;
++ gctUINTPTR_T start, end, memory;
++ gctUINT32 offset;
++ gctINT result = 0;
++#if gcdPROCESS_ADDRESS_SPACE
++ gckMMU mmu;
++#endif
++
++ gcsPageInfo_PTR info = gcvNULL;
++ struct page **pages = gcvNULL;
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL || Physical != ~0U);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ do
++ {
++ gctSIZE_T extraPage;
++
++ memory = (gctUINTPTR_T) Memory;
++
++ /* Get the number of required pages. */
++ end = (memory + Size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ start = memory >> PAGE_SHIFT;
++ pageCount = end - start;
++
++ /* Allocate extra 64 bytes to avoid cache overflow */
++ extraPage = (((memory + gcmALIGN(Size + 64, 64) + PAGE_SIZE - 1) >> PAGE_SHIFT) > end) ? 1 : 0;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pageCount: %d.",
++ __FUNCTION__, __LINE__,
++ pageCount
++ );
++
++ /* Overflow. */
++ if ((memory + Size) < memory)
++ {
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ MEMORY_MAP_LOCK(Os);
++
++ /* Allocate the Info struct. */
++ info = (gcsPageInfo_PTR)kmalloc(sizeof(gcsPageInfo), GFP_KERNEL | gcdNOWARN);
++
++ if (info == gcvNULL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ info->extraPage = 0;
++
++ /* Allocate the array of page addresses. */
++ pages = (struct page **)kmalloc((pageCount + extraPage) * sizeof(struct page *), GFP_KERNEL | gcdNOWARN);
++
++ if (pages == gcvNULL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ if (Physical != ~0U)
++ {
++ for (i = 0; i < pageCount; i++)
++ {
++ pages[i] = pfn_to_page((Physical >> PAGE_SHIFT) + i);
++
++ if (pfn_valid(page_to_pfn(pages[i])))
++ {
++ get_page(pages[i]);
++ }
++ }
++ }
++ else
++ {
++ /* Get the user pages. */
++ down_read(&current->mm->mmap_sem);
++
++ result = get_user_pages(current,
++ current->mm,
++ memory & PAGE_MASK,
++ pageCount,
++ 1,
++ 0,
++ pages,
++ gcvNULL
++ );
++
++ up_read(&current->mm->mmap_sem);
++
++ if (result <=0 || result < pageCount)
++ {
++ struct vm_area_struct *vma;
++
++ /* Release the pages if any. */
++ if (result > 0)
++ {
++ for (i = 0; i < result; i++)
++ {
++ if (pages[i] == gcvNULL)
++ {
++ break;
++ }
++
++ page_cache_release(pages[i]);
++ pages[i] = gcvNULL;
++ }
++
++ result = 0;
++ }
++
++ vma = find_vma(current->mm, memory);
++
++ if (vma && (vma->vm_flags & VM_PFNMAP))
++ {
++ pte_t * pte;
++ spinlock_t * ptl;
++ gctUINTPTR_T logical = memory;
++
++ for (i = 0; i < pageCount; i++)
++ {
++ pgd_t * pgd = pgd_offset(current->mm, logical);
++ pud_t * pud = pud_offset(pgd, logical);
++
++ if (pud)
++ {
++ pmd_t * pmd = pmd_offset(pud, logical);
++ pte = pte_offset_map_lock(current->mm, pmd, logical, &ptl);
++ if (!pte)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ pages[i] = pte_page(*pte);
++ pte_unmap_unlock(pte, ptl);
++
++ /* Advance to next. */
++ logical += PAGE_SIZE;
++ }
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Check if this memory is contiguous for old mmu. */
++ if (Os->device->kernels[Core]->hardware->mmuVersion == 0)
++ {
++ for (i = 1; i < pageCount; i++)
++ {
++ if (pages[i] != nth_page(pages[0], i))
++ {
++ /* Non-contiguous. */
++ break;
++ }
++ }
++
++ if (i == pageCount)
++ {
++ /* Contiguous memory. */
++ physical = page_to_phys(pages[0]) | (memory & ~PAGE_MASK);
++
++ if (!((physical - Os->device->baseAddress) & 0x80000000))
++ {
++ kfree(pages);
++ pages = gcvNULL;
++
++ info->pages = gcvNULL;
++ info->pageTable = gcvNULL;
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ *Address = physical - Os->device->baseAddress;
++ *Info = info;
++
++ gcmkVERIFY_OK(
++ gckOS_CPUPhysicalToGPUPhysical(Os, *Address, Address));
++
++ gcmkFOOTER_ARG("*Info=0x%X *Address=0x%08x",
++ *Info, *Address);
++
++ return gcvSTATUS_OK;
++ }
++ }
++ }
++
++ /* Reference pages. */
++ for (i = 0; i < pageCount; i++)
++ {
++ if (pfn_valid(page_to_pfn(pages[i])))
++ {
++ get_page(pages[i]);
++ }
++ }
++ }
++ }
++
++ for (i = 0; i < pageCount; i++)
++ {
++#ifdef CONFIG_ARM
++ gctUINT32 data;
++ get_user(data, (gctUINT32*)((memory & PAGE_MASK) + i * PAGE_SIZE));
++#endif
++
++ /* Flush(clean) the data cache. */
++ gcmkONERROR(gckOS_CacheFlush(Os, _GetProcessID(), gcvNULL,
++ page_to_phys(pages[i]),
++ (gctPOINTER)(memory & PAGE_MASK) + i*PAGE_SIZE,
++ PAGE_SIZE));
++ }
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkONERROR(gckKERNEL_GetProcessMMU(Os->device->kernels[Core], &mmu));
++#endif
++
++ if (extraPage)
++ {
++ pages[pageCount++] = Os->paddingPage;
++ info->extraPage = 1;
++ }
++
++#if gcdSECURITY
++ {
++ gctPHYS_ADDR physicalArrayPhysical;
++ gctPOINTER physicalArrayLogical;
++ gctUINT32_PTR logical;
++ gctSIZE_T bytes = pageCount * gcmSIZEOF(gctUINT32);
++ pageTable = gcvNULL;
++
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(
++ Os,
++ gcvFALSE,
++ &bytes,
++ &physicalArrayPhysical,
++ &physicalArrayLogical
++ ));
++
++ logical = physicalArrayLogical;
++
++ /* Fill the page table. */
++ for (i = 0; i < pageCount; i++)
++ {
++ gctUINT32 phys;
++ phys = page_to_phys(pages[i]);
++
++ logical[i] = phys;
++ }
++ j = 0;
++
++
++ gcmkONERROR(gckKERNEL_SecurityMapMemory(
++ Os->device->kernels[Core],
++ physicalArrayLogical,
++ pageCount,
++ &address
++ ));
++
++ gcmkONERROR(gckOS_FreeNonPagedMemory(
++ Os,
++ 1,
++ physicalArrayPhysical,
++ physicalArrayLogical
++ ));
++ }
++
++#else
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Allocate pages inside the page table. */
++ gcmkERR_BREAK(gckVGMMU_AllocatePages(Os->device->kernels[Core]->vg->mmu,
++ pageCount * (PAGE_SIZE/4096),
++ (gctPOINTER *) &pageTable,
++ &address));
++ }
++ else
++#endif
++ {
++#if gcdPROCESS_ADDRESS_SPACE
++ /* Allocate pages inside the page table. */
++ gcmkERR_BREAK(gckMMU_AllocatePages(mmu,
++ pageCount * (PAGE_SIZE/4096),
++ (gctPOINTER *) &pageTable,
++ &address));
++#else
++ /* Allocate pages inside the page table. */
++ gcmkERR_BREAK(gckMMU_AllocatePages(Os->device->kernels[Core]->mmu,
++ pageCount * (PAGE_SIZE/4096),
++ (gctPOINTER *) &pageTable,
++ &address));
++#endif
++ }
++
++ /* Fill the page table. */
++ for (i = 0; i < pageCount; i++)
++ {
++ gctUINT32 phys;
++ gctUINT32_PTR tab = pageTable + i * (PAGE_SIZE/4096);
++
++#if gcdPROCESS_ADDRESS_SPACE
++ gckMMU_GetPageEntry(mmu, address + i * 4096, &tab);
++#endif
++ phys = page_to_phys(pages[i]);
++
++#ifdef CONFIG_IOMMU_SUPPORT
++ if (Os->iommu)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Setup mapping in IOMMU %x => %x",
++ __FUNCTION__, __LINE__,
++ Address + (i * PAGE_SIZE), phys
++ );
++
++ gcmkONERROR(gckIOMMU_Map(
++ Os->iommu, address + i * PAGE_SIZE, phys, PAGE_SIZE));
++ }
++ else
++#endif
++ {
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkVERIFY_OK(
++ gckOS_CPUPhysicalToGPUPhysical(Os, phys, &phys));
++
++ /* Get the physical address from page struct. */
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ phys,
++ tab));
++ }
++ else
++#endif
++ {
++ /* Get the physical address from page struct. */
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ phys,
++ tab));
++ }
++
++ for (j = 1; j < (PAGE_SIZE/4096); j++)
++ {
++ pageTable[i * (PAGE_SIZE/4096) + j] = pageTable[i * (PAGE_SIZE/4096)] + 4096 * j;
++ }
++ }
++
++#if !gcdPROCESS_ADDRESS_SPACE
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pageTable[%d]: 0x%X 0x%X.",
++ __FUNCTION__, __LINE__,
++ i, phys, pageTable[i]);
++#endif
++ }
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkONERROR(gckVGMMU_Flush(Os->device->kernels[Core]->vg->mmu));
++ }
++ else
++#endif
++ {
++#if gcdPROCESS_ADDRESS_SPACE
++ info->mmu = mmu;
++ gcmkONERROR(gckMMU_Flush(mmu));
++#else
++ gcmkONERROR(gckMMU_Flush(Os->device->kernels[Core]->mmu, gcvSURF_TYPE_UNKNOWN));
++#endif
++ }
++#endif
++ info->address = address;
++
++ /* Save pointer to page table. */
++ info->pageTable = pageTable;
++ info->pages = pages;
++
++ *Info = (gctPOINTER) info;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): info->pages: 0x%X, info->pageTable: 0x%X, info: 0x%X.",
++ __FUNCTION__, __LINE__,
++ info->pages,
++ info->pageTable,
++ info
++ );
++
++ offset = (Physical != ~0U)
++ ? (Physical & ~PAGE_MASK)
++ : (memory & ~PAGE_MASK);
++
++ /* Return address. */
++ *Address = address + offset;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Address: 0x%X.",
++ __FUNCTION__, __LINE__,
++ *Address
++ );
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++OnError:
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error occured: %d.",
++ __FUNCTION__, __LINE__,
++ status
++ );
++
++ /* Release page array. */
++ if (result > 0 && pages != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: page table is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ for (i = 0; i < result; i++)
++ {
++ if (pages[i] == gcvNULL)
++ {
++ break;
++ }
++ page_cache_release(pages[i]);
++ }
++ }
++
++ if (info!= gcvNULL && pages != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: pages is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Free the page table. */
++ kfree(pages);
++ info->pages = gcvNULL;
++ }
++
++ /* Release page info struct. */
++ if (info != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: info is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Free the page info struct. */
++ kfree(info);
++ *Info = gcvNULL;
++ }
++ }
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ /* Return the status. */
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkFOOTER_ARG("*Info=0x%X *Address=0x%08x", *Info, *Address);
++ }
++ else
++ {
++ gcmkFOOTER();
++ }
++
++ return status;
++}
++#endif
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserMemory
++**
++** Unlock a user buffer and that was previously locked down by
++** gckOS_MapUserMemory.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory to unlock.
++**
++** gctSIZE_T Size
++** Size in bytes of the memory to unlock.
++**
++** gctPOINTER Info
++** Information record returned by gckOS_MapUserMemory.
++**
++** gctUINT32_PTR Address
++** The address returned by gckOS_MapUserMemory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Memory=0x%X Size=%lu Info=0x%X Address0x%08x",
++ Os, Core, Memory, Size, Info, Address);
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckOS_RemoveMapping(Os, Memory, Size));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++#else
++{
++ gctUINTPTR_T memory, start, end;
++ gcsPageInfo_PTR info;
++ gctSIZE_T pageCount, i;
++ struct page **pages;
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ do
++ {
++ info = (gcsPageInfo_PTR) Info;
++
++ pages = info->pages;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): info=0x%X, pages=0x%X.",
++ __FUNCTION__, __LINE__,
++ info, pages
++ );
++
++ /* Invalid page array. */
++ if (pages == gcvNULL && info->pageTable == gcvNULL)
++ {
++ kfree(info);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ memory = (gctUINTPTR_T)Memory;
++ end = (memory + Size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ start = memory >> PAGE_SHIFT;
++ pageCount = end - start;
++
++ /* Overflow. */
++ if ((memory + Size) < memory)
++ {
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): memory: 0x%X, pageCount: %d, pageTable: 0x%X.",
++ __FUNCTION__, __LINE__,
++ memory, pageCount, info->pageTable
++ );
++
++ MEMORY_MAP_LOCK(Os);
++
++#if !gcdSECURITY
++ gcmkASSERT(info->pageTable != gcvNULL);
++#endif
++
++ if (info->extraPage)
++ {
++ pageCount += 1;
++ }
++
++#if gcdSECURITY
++ if (info->address > 0x80000000)
++ {
++ gckKERNEL_SecurityUnmapMemory(
++ Os->device->kernels[Core],
++ info->address,
++ pageCount
++ );
++ }
++ else
++ {
++ gcmkPRINT("Wrong address %s(%d) %x", __FUNCTION__, __LINE__, info->address);
++ }
++#else
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Free the pages from the MMU. */
++ gcmkERR_BREAK(gckVGMMU_FreePages(Os->device->kernels[Core]->vg->mmu,
++ info->pageTable,
++ pageCount * (PAGE_SIZE/4096)
++ ));
++ }
++ else
++#endif
++ {
++ /* Free the pages from the MMU. */
++#if gcdPROCESS_ADDRESS_SPACE
++ gcmkERR_BREAK(gckMMU_FreePagesEx(info->mmu,
++ info->address,
++ pageCount * (PAGE_SIZE/4096)
++ ));
++
++#else
++ gcmkERR_BREAK(gckMMU_FreePages(Os->device->kernels[Core]->mmu,
++ info->pageTable,
++ pageCount * (PAGE_SIZE/4096)
++ ));
++#endif
++
++ gcmkERR_BREAK(gckOS_UnmapPages(
++ Os,
++ pageCount * (PAGE_SIZE/4096),
++ info->address
++ ));
++ }
++#endif
++
++ if (info->extraPage)
++ {
++ pageCount -= 1;
++ info->extraPage = 0;
++ }
++
++ /* Release the page cache. */
++ if (pages)
++ {
++ for (i = 0; i < pageCount; i++)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pages[%d]: 0x%X.",
++ __FUNCTION__, __LINE__,
++ i, pages[i]
++ );
++
++ if (!PageReserved(pages[i]))
++ {
++ SetPageDirty(pages[i]);
++ }
++
++ if (pfn_valid(page_to_pfn(pages[i])))
++ {
++ page_cache_release(pages[i]);
++ }
++ }
++ }
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ if (info != gcvNULL)
++ {
++ /* Free the page array. */
++ if (info->pages != gcvNULL)
++ {
++ kfree(info->pages);
++ }
++
++ kfree(info);
++ }
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++}
++
++/*******************************************************************************
++**
++** gckOS_GetBaseAddress
++**
++** Get the base address for the physical memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR BaseAddress
++** Pointer to a variable that will receive the base address.
++*/
++gceSTATUS
++gckOS_GetBaseAddress(
++ IN gckOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL);
++
++ /* Return base address. */
++ *BaseAddress = Os->device->baseAddress;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_SuspendInterrupt(
++ IN gckOS Os
++ )
++{
++ return gckOS_SuspendInterruptEx(Os, gcvCORE_MAJOR);
++}
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckOS_SuspendInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ if (Core == gcvCORE_MAJOR)
++ {
++ disable_irq(Os->device->irqLine3D[gcvCORE_3D_0_ID]);
++ disable_irq(Os->device->irqLine3D[gcvCORE_3D_1_ID]);
++ }
++ else
++ {
++ disable_irq(Os->device->irqLines[Core]);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#else
++gceSTATUS
++gckOS_SuspendInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ disable_irq(Os->device->irqLines[Core]);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckOS_ResumeInterrupt(
++ IN gckOS Os
++ )
++{
++ return gckOS_ResumeInterruptEx(Os, gcvCORE_MAJOR);
++}
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckOS_ResumeInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ if (Core == gcvCORE_MAJOR)
++ {
++ enable_irq(Os->device->irqLine3D[gcvCORE_3D_0_ID]);
++ enable_irq(Os->device->irqLine3D[gcvCORE_3D_1_ID]);
++ }
++ else
++ {
++ enable_irq(Os->device->irqLines[Core]);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#else
++gceSTATUS
++gckOS_ResumeInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ enable_irq(Os->device->irqLines[Core]);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckOS_MemCopy(
++ IN gctPOINTER Destination,
++ IN gctCONST_POINTER Source,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Destination=0x%X Source=0x%X Bytes=%lu",
++ Destination, Source, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Destination != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Source != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ memcpy(Destination, Source, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_ZeroMemory(
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Memory=0x%X Bytes=%lu", Memory, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ memset(Memory, 0, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************* Cache Control ********************************
++*******************************************************************************/
++
++/*******************************************************************************
++** gckOS_CacheClean
++**
++** Clean the cache for the specified addresses. The GPU is going to need the
++** data. If the system is allocating memory as non-cachable, this function can
++** be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Physical
++** Physical address to flush.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheClean(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcsPLATFORM * platform;
++
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ platform = Os->device->platform;
++
++ if (platform && platform->ops->cache)
++ {
++ platform->ops->cache(
++ platform,
++ ProcessID,
++ Handle,
++ Physical,
++ Logical,
++ Bytes,
++ gcvCACHE_CLEAN
++ );
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++
++ /* Inner cache. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
++ dmac_map_area(Logical, Bytes, DMA_TO_DEVICE);
++# else
++ dmac_clean_range(Logical, Logical + Bytes);
++# endif
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, Physical, Logical, Bytes, gcvCACHE_CLEAN);
++#else
++ outer_clean_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++
++ dma_cache_wback((unsigned long) Logical, Bytes);
++
++#elif defined(CONFIG_PPC)
++
++ /* TODO */
++
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_TO_DEVICE);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** gckOS_CacheInvalidate
++**
++** Invalidate the cache for the specified addresses. The GPU is going to need
++** data. If the system is allocating memory as non-cachable, this function can
++** be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheInvalidate(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcsPLATFORM * platform;
++
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ platform = Os->device->platform;
++
++ if (platform && platform->ops->cache)
++ {
++ platform->ops->cache(
++ platform,
++ ProcessID,
++ Handle,
++ Physical,
++ Logical,
++ Bytes,
++ gcvCACHE_INVALIDATE
++ );
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++
++ /* Inner cache. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
++ dmac_map_area(Logical, Bytes, DMA_FROM_DEVICE);
++# else
++ dmac_inv_range(Logical, Logical + Bytes);
++# endif
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, Physical, Logical, Bytes, gcvCACHE_INVALIDATE);
++#else
++ outer_inv_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++ dma_cache_inv((unsigned long) Logical, Bytes);
++#elif defined(CONFIG_PPC)
++ /* TODO */
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_FROM_DEVICE);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** gckOS_CacheFlush
++**
++** Clean the cache for the specified addresses and invalidate the lines as
++** well. The GPU is going to need and modify the data. If the system is
++** allocating memory as non-cachable, this function can be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheFlush(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcsPLATFORM * platform;
++
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ platform = Os->device->platform;
++
++ if (platform && platform->ops->cache)
++ {
++ platform->ops->cache(
++ platform,
++ ProcessID,
++ Handle,
++ Physical,
++ Logical,
++ Bytes,
++ gcvCACHE_FLUSH
++ );
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++ /* Inner cache. */
++ dmac_flush_range(Logical, Logical + Bytes);
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, Physical, Logical, Bytes, gcvCACHE_FLUSH);
++#else
++ outer_flush_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++ dma_cache_wback_inv((unsigned long) Logical, Bytes);
++#elif defined(CONFIG_PPC)
++ /* TODO */
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_BIDIRECTIONAL);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************* Broadcasting *********************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_Broadcast
++**
++** System hook for broadcast events from the kernel driver.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gceBROADCAST Reason
++** Reason for the broadcast. Can be one of the following values:
++**
++** gcvBROADCAST_GPU_IDLE
++** Broadcasted when the kernel driver thinks the GPU might be
++** idle. This can be used to handle power management.
++**
++** gcvBROADCAST_GPU_COMMIT
++** Broadcasted when any client process commits a command
++** buffer. This can be used to handle power management.
++**
++** gcvBROADCAST_GPU_STUCK
++** Broadcasted when the kernel driver hits the timeout waiting
++** for the GPU.
++**
++** gcvBROADCAST_FIRST_PROCESS
++** First process is trying to connect to the kernel.
++**
++** gcvBROADCAST_LAST_PROCESS
++** Last process has detached from the kernel.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Broadcast(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gceBROADCAST Reason
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Hardware=0x%X Reason=%d", Os, Hardware, Reason);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ switch (Reason)
++ {
++ case gcvBROADCAST_FIRST_PROCESS:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "First process has attached");
++ break;
++
++ case gcvBROADCAST_LAST_PROCESS:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "Last process has detached");
++
++ /* Put GPU OFF. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware,
++ gcvPOWER_OFF_BROADCAST));
++ break;
++
++ case gcvBROADCAST_GPU_IDLE:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "GPU idle.");
++
++ /* Put GPU IDLE. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware,
++#if gcdPOWER_SUSPEND_WHEN_IDLE
++ gcvPOWER_SUSPEND_BROADCAST));
++#else
++ gcvPOWER_IDLE_BROADCAST));
++#endif
++
++ /* Add idle process DB. */
++ gcmkONERROR(gckKERNEL_AddProcessDB(Hardware->kernel,
++ 1,
++ gcvDB_IDLE,
++ gcvNULL, gcvNULL, 0));
++ break;
++
++ case gcvBROADCAST_GPU_COMMIT:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "COMMIT has arrived.");
++
++ /* Add busy process DB. */
++ gcmkONERROR(gckKERNEL_AddProcessDB(Hardware->kernel,
++ 0,
++ gcvDB_IDLE,
++ gcvNULL, gcvNULL, 0));
++
++ /* Put GPU ON. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware, gcvPOWER_ON_AUTO));
++ break;
++
++ case gcvBROADCAST_GPU_STUCK:
++ gcmkTRACE_N(gcvLEVEL_ERROR, 0, "gcvBROADCAST_GPU_STUCK\n");
++ gcmkONERROR(gckKERNEL_Recovery(Hardware->kernel));
++ break;
++
++ case gcvBROADCAST_AXI_BUS_ERROR:
++ gcmkTRACE_N(gcvLEVEL_ERROR, 0, "gcvBROADCAST_AXI_BUS_ERROR\n");
++ gcmkONERROR(gckHARDWARE_DumpGPUState(Hardware));
++ gcmkONERROR(gckKERNEL_Recovery(Hardware->kernel));
++ break;
++
++ case gcvBROADCAST_OUT_OF_MEMORY:
++ gcmkTRACE_N(gcvLEVEL_INFO, 0, "gcvBROADCAST_OUT_OF_MEMORY\n");
++
++ status = _ShrinkMemory(Os);
++
++ if (status == gcvSTATUS_NOT_SUPPORTED)
++ {
++ goto OnError;
++ }
++
++ gcmkONERROR(status);
++
++ break;
++
++ default:
++ /* Skip unimplemented broadcast. */
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_BroadcastHurry
++**
++** The GPU is running too slow.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT Urgency
++** The higher the number, the higher the urgency to speed up the GPU.
++** The maximum value is defined by the gcdDYNAMIC_EVENT_THRESHOLD.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_BroadcastHurry(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Urgency
++ )
++{
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x Urgency=%u", Os, Hardware, Urgency);
++
++ /* Do whatever you need to do to speed up the GPU now. */
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_BroadcastCalibrateSpeed
++**
++** Calibrate the speed of the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT Idle, Time
++** Idle/Time will give the percentage the GPU is idle, so you can use
++** this to calibrate the working point of the GPU.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_BroadcastCalibrateSpeed(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Idle,
++ IN gctUINT Time
++ )
++{
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x Idle=%u Time=%u",
++ Os, Hardware, Idle, Time);
++
++ /* Do whatever you need to do to callibrate the GPU speed. */
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************** Semaphores **********************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_CreateSemaphore
++**
++** Create a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Semaphore
++** Pointer to the variable that will receive the created semaphore.
++*/
++gceSTATUS
++gckOS_CreateSemaphore(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ )
++{
++ gceSTATUS status;
++ struct semaphore *sem = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Allocate the semaphore structure. */
++ sem = (struct semaphore *)kmalloc(gcmSIZEOF(struct semaphore), GFP_KERNEL | gcdNOWARN);
++ if (sem == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the semaphore. */
++ sema_init(sem, 1);
++
++ /* Return to caller. */
++ *Semaphore = (gctPOINTER) sem;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AcquireSemaphore
++**
++** Acquire a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%08X Semaphore=0x%08X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Acquire the semaphore. */
++ if (down_interruptible((struct semaphore *) Semaphore))
++ {
++ gcmkONERROR(gcvSTATUS_INTERRUPTED);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_TryAcquireSemaphore
++**
++** Try to acquire a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_TryAcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Acquire the semaphore. */
++ if (down_trylock((struct semaphore *) Semaphore))
++ {
++ /* Timeout. */
++ status = gcvSTATUS_TIMEOUT;
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReleaseSemaphore
++**
++** Release a previously acquired semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be released.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ReleaseSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Release the semaphore. */
++ up((struct semaphore *) Semaphore);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroySemaphore
++**
++** Destroy a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroySemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Free the sempahore structure. */
++ kfree(Semaphore);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetProcessID
++**
++** Get current process ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ProcessID
++** Pointer to the variable that receives the process ID.
++*/
++gceSTATUS
++gckOS_GetProcessID(
++ OUT gctUINT32_PTR ProcessID
++ )
++{
++ /* Get process ID. */
++ if (ProcessID != gcvNULL)
++ {
++ *ProcessID = _GetProcessID();
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetThreadID
++**
++** Get current thread ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ThreadID
++** Pointer to the variable that receives the thread ID.
++*/
++gceSTATUS
++gckOS_GetThreadID(
++ OUT gctUINT32_PTR ThreadID
++ )
++{
++ /* Get thread ID. */
++ if (ThreadID != gcvNULL)
++ {
++ *ThreadID = _GetThreadID();
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetGPUPower
++**
++** Set the power of the GPU on or off.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gceCORE Core
++** GPU whose power is set.
++**
++** gctBOOL Clock
++** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock.
++**
++** gctBOOL Power
++** gcvTRUE to turn on the power, or gcvFALSE to turn off the power.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUPower(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctBOOL Clock,
++ IN gctBOOL Power
++ )
++{
++ gcsPLATFORM * platform;
++
++ gctBOOL powerChange = gcvFALSE;
++ gctBOOL clockChange = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Clock=%d Power=%d", Os, Core, Clock, Power);
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ platform = Os->device->platform;
++
++ powerChange = (Power != Os->powerStates[Core]);
++
++ clockChange = (Clock != Os->clockStates[Core]);
++
++ if (powerChange && (Power == gcvTRUE))
++ {
++ if (platform && platform->ops->setPower)
++ {
++ gcmkVERIFY_OK(platform->ops->setPower(platform, Core, Power));
++ }
++
++ Os->powerStates[Core] = Power;
++ }
++
++ if (clockChange)
++ {
++ mutex_lock(&Os->registerAccessLocks[Core]);
++
++ if (platform && platform->ops->setClock)
++ {
++ gcmkVERIFY_OK(platform->ops->setClock(platform, Core, Clock));
++ }
++
++ Os->clockStates[Core] = Clock;
++
++ mutex_unlock(&Os->registerAccessLocks[Core]);
++ }
++
++ if (powerChange && (Power == gcvFALSE))
++ {
++ if (platform && platform->ops->setPower)
++ {
++ gcmkVERIFY_OK(platform->ops->setPower(platform, Core, Power));
++ }
++
++ Os->powerStates[Core] = Power;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ResetGPU
++**
++** Reset the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ResetGPU(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gceSTATUS status = gcvSTATUS_NOT_SUPPORTED;
++ gcsPLATFORM * platform;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ platform = Os->device->platform;
++
++ if (platform && platform->ops->reset)
++ {
++ status = platform->ops->reset(platform, Core);
++ }
++
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_PrepareGPUFrequency
++**
++** Prepare to set GPU frequency and voltage.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose frequency and voltage will be set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_PrepareGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_FinishGPUFrequency
++**
++** Finish GPU frequency setting.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose frequency and voltage is set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FinishGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_QueryGPUFrequency
++**
++** Query the current frequency of the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctUINT32 * Frequency
++** Pointer to a gctUINT32 to obtain current frequency, in MHz.
++**
++** gctUINT8 * Scale
++** Pointer to a gctUINT8 to obtain current scale(1 - 64).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_QueryGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gctUINT32 * Frequency,
++ OUT gctUINT8 * Scale
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetGPUFrequency
++**
++** Set frequency and voltage of the GPU.
++**
++** 1. DVFS manager gives the target scale of full frequency, BSP must find
++** a real frequency according to this scale and board's configure.
++**
++** 2. BSP should find a suitable voltage for this frequency.
++**
++** 3. BSP must make sure setting take effect before this function returns.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctUINT8 Scale
++** Target scale of full frequency, range is [1, 64]. 1 means 1/64 of
++** full frequency and 64 means 64/64 of full frequency.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT8 Scale
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*----------------------------------------------------------------------------*/
++/*----- Profile --------------------------------------------------------------*/
++
++gceSTATUS
++gckOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ )
++{
++ struct timespec time;
++
++ ktime_get_ts(&time);
++
++ *Tick = time.tv_nsec + time.tv_sec * 1000000000ULL;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ )
++{
++ struct timespec res;
++
++ hrtimer_get_res(CLOCK_MONOTONIC, &res);
++
++ *TickRate = res.tv_nsec + res.tv_sec * 1000000000ULL;
++
++ return gcvSTATUS_OK;
++}
++
++gctUINT32
++gckOS_ProfileToMS(
++ IN gctUINT64 Ticks
++ )
++{
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
++ return div_u64(Ticks, 1000000);
++#else
++ gctUINT64 rem = Ticks;
++ gctUINT64 b = 1000000;
++ gctUINT64 res, d = 1;
++ gctUINT32 high = rem >> 32;
++
++ /* Reduce the thing a bit first */
++ res = 0;
++ if (high >= 1000000)
++ {
++ high /= 1000000;
++ res = (gctUINT64) high << 32;
++ rem -= (gctUINT64) (high * 1000000) << 32;
++ }
++
++ while (((gctINT64) b > 0) && (b < rem))
++ {
++ b <<= 1;
++ d <<= 1;
++ }
++
++ do
++ {
++ if (rem >= b)
++ {
++ rem -= b;
++ res += d;
++ }
++
++ b >>= 1;
++ d >>= 1;
++ }
++ while (d);
++
++ return (gctUINT32) res;
++#endif
++}
++
++/******************************************************************************\
++******************************* Signal Management ******************************
++\******************************************************************************/
++
++#undef _GC_OBJ_ZONE
++#define _GC_OBJ_ZONE gcvZONE_SIGNAL
++
++/*******************************************************************************
++**
++** gckOS_CreateSignal
++**
++** Create a new signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL ManualReset
++** If set to gcvTRUE, gckOS_Signal with gcvFALSE must be called in
++** order to set the signal to nonsignaled state.
++** If set to gcvFALSE, the signal will automatically be set to
++** nonsignaled state by gckOS_WaitSignal function.
++**
++** OUTPUT:
++**
++** gctSIGNAL * Signal
++** Pointer to a variable receiving the created gctSIGNAL.
++*/
++gceSTATUS
++gckOS_CreateSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X ManualReset=%d", Os, ManualReset);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ /* Create an event structure. */
++ signal = (gcsSIGNAL_PTR) kmalloc(sizeof(gcsSIGNAL), GFP_KERNEL | gcdNOWARN);
++
++ if (signal == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Save the process ID. */
++ signal->process = (gctHANDLE)(gctUINTPTR_T) _GetProcessID();
++ signal->manualReset = ManualReset;
++ signal->hardware = gcvNULL;
++ init_completion(&signal->obj);
++ atomic_set(&signal->ref, 1);
++
++ gcmkONERROR(_AllocateIntegerId(&Os->signalDB, signal, &signal->id));
++
++ *Signal = (gctSIGNAL)(gctUINTPTR_T)signal->id;
++
++ gcmkFOOTER_ARG("*Signal=0x%X", *Signal);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (signal != gcvNULL)
++ {
++ kfree(signal);
++ }
++
++ gcmkFOOTER_NO();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalQueryHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ OUT gckHARDWARE * Hardware
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Hardware=0x%X", Os, Signal, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ *Hardware = signal->hardware;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalSetHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Hardware=0x%X", Os, Signal, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ signal->hardware = Hardware;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroySignal
++**
++** Destroy a signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroySignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X", Os, Signal);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->signalMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ if (atomic_dec_and_test(&signal->ref))
++ {
++ gcmkVERIFY_OK(_DestroyIntegerId(&Os->signalDB, signal->id));
++
++ /* Free the sgianl. */
++ kfree(signal);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Signal
++**
++** Set a state of the specified signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctBOOL State
++** If gcvTRUE, the signal will be set to signaled state.
++** If gcvFALSE, the signal will be set to nonsignaled state.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Signal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X State=%d", Os, Signal, State);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->signalMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ if (State)
++ {
++ /* unbind the signal from hardware. */
++ signal->hardware = gcvNULL;
++
++ /* Set the event to a signaled state. */
++ complete(&signal->obj);
++ }
++ else
++ {
++ /* Set the event to an unsignaled state. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)
++ reinit_completion(&signal->obj);
++#else
++ INIT_COMPLETION(signal->obj);
++#endif
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_SetSignalVG(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++ struct task_struct * userTask;
++ struct siginfo info;
++
++ userTask = FIND_TASK_BY_PID((pid_t)(gctUINTPTR_T) Process);
++
++ if (userTask != gcvNULL)
++ {
++ info.si_signo = 48;
++ info.si_code = __SI_CODE(__SI_RT, SI_KERNEL);
++ info.si_pid = 0;
++ info.si_uid = 0;
++ info.si_ptr = (gctPOINTER) Signal;
++
++ /* Signals with numbers between 32 and 63 are real-time,
++ send a real-time signal to the user process. */
++ result = send_sig_info(48, &info, userTask);
++
++ printk("gckOS_SetSignalVG:0x%x\n", result);
++ /* Error? */
++ if (result < 0)
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ else
++ {
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Return status. */
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_UserSignal
++**
++** Set the specified signal which is owned by a process to signaled state.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process
++ )
++{
++ gceSTATUS status;
++ gctSIGNAL signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Process=%d",
++ Os, Signal, (gctINT32)(gctUINTPTR_T)Process);
++
++ /* Map the signal into kernel space. */
++ gcmkONERROR(gckOS_MapSignal(Os, Signal, Process, &signal));
++
++ /* Signal. */
++ status = gckOS_Signal(Os, signal, gcvTRUE);
++
++ /* Unmap the signal */
++ gcmkVERIFY_OK(gckOS_UnmapSignal(Os, Signal));
++
++ gcmkFOOTER();
++ return status;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_WaitSignal
++**
++** Wait for a signal to become signaled.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctUINT32 Wait
++** Number of milliseconds to wait.
++** Pass the value of gcvINFINITE for an infinite wait.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WaitSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Wait=0x%08X", Os, Signal, Wait);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ might_sleep();
++
++ spin_lock_irq(&signal->obj.wait.lock);
++
++ if (signal->obj.done)
++ {
++ if (!signal->manualReset)
++ {
++ signal->obj.done = 0;
++ }
++
++ status = gcvSTATUS_OK;
++ }
++ else if (Wait == 0)
++ {
++ status = gcvSTATUS_TIMEOUT;
++ }
++ else
++ {
++ /* Convert wait to milliseconds. */
++ long timeout = (Wait == gcvINFINITE)
++ ? MAX_SCHEDULE_TIMEOUT
++ : Wait * HZ / 1000;
++
++ DECLARE_WAITQUEUE(wait, current);
++ wait.flags |= WQ_FLAG_EXCLUSIVE;
++ __add_wait_queue_tail(&signal->obj.wait, &wait);
++
++ while (gcvTRUE)
++ {
++ if (signal_pending(current))
++ {
++ /* Interrupt received. */
++ status = gcvSTATUS_INTERRUPTED;
++ break;
++ }
++
++ __set_current_state(TASK_INTERRUPTIBLE);
++ spin_unlock_irq(&signal->obj.wait.lock);
++ timeout = schedule_timeout(timeout);
++ spin_lock_irq(&signal->obj.wait.lock);
++
++ if (signal->obj.done)
++ {
++ if (!signal->manualReset)
++ {
++ signal->obj.done = 0;
++ }
++
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ if (timeout == 0)
++ {
++
++ status = gcvSTATUS_TIMEOUT;
++ break;
++ }
++ }
++
++ __remove_wait_queue(&signal->obj.wait, &wait);
++ }
++
++ spin_unlock_irq(&signal->obj.wait.lock);
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER_ARG("Signal=0x%X status=%d", Signal, status);
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapSignal
++**
++** Map a signal in to the current process space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to tha gctSIGNAL to map.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** OUTPUT:
++**
++** gctSIGNAL * MappedSignal
++** Pointer to a variable receiving the mapped gctSIGNAL.
++*/
++gceSTATUS
++gckOS_MapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process,
++ OUT gctSIGNAL * MappedSignal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Process=0x%X", Os, Signal, Process);
++
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++ gcmkVERIFY_ARGUMENT(MappedSignal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ if(atomic_inc_return(&signal->ref) <= 1)
++ {
++ /* The previous value is 0, it has been deleted. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ *MappedSignal = (gctSIGNAL) Signal;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*MappedSignal=0x%X", *MappedSignal);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapSignal
++**
++** Unmap a signal .
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to that gctSIGNAL mapped.
++*/
++gceSTATUS
++gckOS_UnmapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ )
++{
++ return gckOS_DestroySignal(Os, Signal);
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateUserSignal
++**
++** Create a new signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL ManualReset
++** If set to gcvTRUE, gckOS_Signal with gcvFALSE must be called in
++** order to set the signal to nonsignaled state.
++** If set to gcvFALSE, the signal will automatically be set to
++** nonsignaled state by gckOS_WaitSignal function.
++**
++** OUTPUT:
++**
++** gctINT * SignalID
++** Pointer to a variable receiving the created signal's ID.
++*/
++gceSTATUS
++gckOS_CreateUserSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctINT * SignalID
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T signal;
++
++ /* Create a new signal. */
++ gcmkONERROR(gckOS_CreateSignal(Os, ManualReset, (gctSIGNAL *) &signal));
++ *SignalID = (gctINT) signal;
++
++OnError:
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroyUserSignal
++**
++** Destroy a signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** The signal's ID.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroyUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID
++ )
++{
++ return gckOS_DestroySignal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID);
++}
++
++/*******************************************************************************
++**
++** gckOS_WaitUserSignal
++**
++** Wait for a signal used in the user mode to become signaled.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** Signal ID.
++**
++** gctUINT32 Wait
++** Number of milliseconds to wait.
++** Pass the value of gcvINFINITE for an infinite wait.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WaitUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctUINT32 Wait
++ )
++{
++ return gckOS_WaitSignal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID, Wait);
++}
++
++/*******************************************************************************
++**
++** gckOS_SignalUserSignal
++**
++** Set a state of the specified signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** SignalID.
++**
++** gctBOOL State
++** If gcvTRUE, the signal will be set to signaled state.
++** If gcvFALSE, the signal will be set to nonsignaled state.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SignalUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctBOOL State
++ )
++{
++ return gckOS_Signal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID, State);
++}
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_CreateSemaphoreVG(
++ IN gckOS Os,
++ OUT gctSEMAPHORE * Semaphore
++ )
++{
++ gceSTATUS status;
++ struct semaphore * newSemaphore;
++
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ do
++ {
++ /* Allocate the semaphore structure. */
++ newSemaphore = (struct semaphore *)kmalloc(gcmSIZEOF(struct semaphore), GFP_KERNEL | gcdNOWARN);
++ if (newSemaphore == gcvNULL)
++ {
++ gcmkERR_BREAK(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the semaphore. */
++ sema_init(newSemaphore, 0);
++
++ /* Set the handle. */
++ * Semaphore = (gctSEMAPHORE) newSemaphore;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++
++gceSTATUS
++gckOS_IncrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Increment the semaphore's count. */
++ up((struct semaphore *) Semaphore);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DecrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ do
++ {
++ /* Decrement the semaphore's count. If the count is zero, wait
++ until it gets incremented. */
++ result = down_interruptible((struct semaphore *) Semaphore);
++
++ /* Signal received? */
++ if (result != 0)
++ {
++ status = gcvSTATUS_TERMINATE;
++ break;
++ }
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetSignal
++**
++** Set the specified signal to signaled state.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetSignal(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++ struct task_struct * userTask;
++ struct siginfo info;
++
++ userTask = FIND_TASK_BY_PID((pid_t)(gctUINTPTR_T) Process);
++
++ if (userTask != gcvNULL)
++ {
++ info.si_signo = 48;
++ info.si_code = __SI_CODE(__SI_RT, SI_KERNEL);
++ info.si_pid = 0;
++ info.si_uid = 0;
++ info.si_ptr = (gctPOINTER) Signal;
++
++ /* Signals with numbers between 32 and 63 are real-time,
++ send a real-time signal to the user process. */
++ result = send_sig_info(48, &info, userTask);
++
++ /* Error? */
++ if (result < 0)
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ else
++ {
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Return status. */
++ return status;
++}
++
++/******************************************************************************\
++******************************** Thread Object *********************************
++\******************************************************************************/
++
++gceSTATUS
++gckOS_StartThread(
++ IN gckOS Os,
++ IN gctTHREADFUNC ThreadFunction,
++ IN gctPOINTER ThreadParameter,
++ OUT gctTHREAD * Thread
++ )
++{
++ gceSTATUS status;
++ struct task_struct * thread;
++
++ gcmkHEADER_ARG("Os=0x%X ", Os);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(ThreadFunction != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ do
++ {
++ /* Create the thread. */
++ thread = kthread_create(
++ ThreadFunction,
++ ThreadParameter,
++ "Vivante Kernel Thread"
++ );
++
++ /* Failed? */
++ if (IS_ERR(thread))
++ {
++ status = gcvSTATUS_GENERIC_IO;
++ break;
++ }
++
++ /* Start the thread. */
++ wake_up_process(thread);
++
++ /* Set the thread handle. */
++ * Thread = (gctTHREAD) thread;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++gceSTATUS
++gckOS_StopThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Thread=0x%x", Os, Thread);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ /* Thread should have already been enabled to terminate. */
++ kthread_stop((struct task_struct *) Thread);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_VerifyThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Thread=0x%x", Os, Thread);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++/******************************************************************************\
++******************************** Software Timer ********************************
++\******************************************************************************/
++
++void
++_TimerFunction(
++ struct work_struct * work
++ )
++{
++ gcsOSTIMER_PTR timer = (gcsOSTIMER_PTR)work;
++
++ gctTIMERFUNCTION function = timer->function;
++
++ function(timer->data);
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateTimer
++**
++** Create a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctTIMERFUNCTION Function.
++** Pointer to a call back function which will be called when timer is
++** expired.
++**
++** gctPOINTER Data.
++** Private data which will be passed to call back function.
++**
++** OUTPUT:
++**
++** gctPOINTER * Timer
++** Pointer to a variable receiving the created timer.
++*/
++gceSTATUS
++gckOS_CreateTimer(
++ IN gckOS Os,
++ IN gctTIMERFUNCTION Function,
++ IN gctPOINTER Data,
++ OUT gctPOINTER * Timer
++ )
++{
++ gceSTATUS status;
++ gcsOSTIMER_PTR pointer;
++ gcmkHEADER_ARG("Os=0x%X Function=0x%X Data=0x%X", Os, Function, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ gcmkONERROR(gckOS_Allocate(Os, sizeof(gcsOSTIMER), (gctPOINTER)&pointer));
++
++ pointer->function = Function;
++ pointer->data = Data;
++
++ INIT_DELAYED_WORK(&pointer->work, _TimerFunction);
++
++ *Timer = pointer;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroyTimer
++**
++** Destory a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be destoryed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroyTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ )
++{
++ gcsOSTIMER_PTR timer;
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X", Os, Timer);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cancel_delayed_work_sync(&timer->work);
++#else
++ cancel_delayed_work(&timer->work);
++ flush_workqueue(Os->workqueue);
++#endif
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, Timer));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_StartTimer
++**
++** Schedule a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be scheduled.
++**
++** gctUINT32 Delay
++** Delay in milliseconds.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_StartTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer,
++ IN gctUINT32 Delay
++ )
++{
++ gcsOSTIMER_PTR timer;
++
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X Delay=%u", Os, Timer, Delay);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Delay != 0);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
++ mod_delayed_work(Os->workqueue, &timer->work, msecs_to_jiffies(Delay));
++#else
++ if (unlikely(delayed_work_pending(&timer->work)))
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cancel_delayed_work_sync(&timer->work);
++#else
++ cancel_delayed_work(&timer->work);
++ flush_workqueue(Os->workqueue);
++#endif
++ }
++
++ queue_delayed_work(Os->workqueue, &timer->work, msecs_to_jiffies(Delay));
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_StopTimer
++**
++** Cancel a unscheduled timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be cancel.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_StopTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ )
++{
++ gcsOSTIMER_PTR timer;
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X", Os, Timer);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++ cancel_delayed_work(&timer->work);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_GetProcessNameByPid(
++ IN gctINT Pid,
++ IN gctSIZE_T Length,
++ OUT gctUINT8_PTR String
++ )
++{
++ struct task_struct *task;
++
++ /* Get the task_struct of the task with pid. */
++ rcu_read_lock();
++
++ task = FIND_TASK_BY_PID(Pid);
++
++ if (task == gcvNULL)
++ {
++ rcu_read_unlock();
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ /* Get name of process. */
++ strncpy(String, task->comm, Length);
++
++ rcu_read_unlock();
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DumpCallStack(
++ IN gckOS Os
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ dump_stack();
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_DetectProcessByName
++**
++** task->comm maybe part of process name, so this function
++** can only be used for debugging.
++**
++** INPUT:
++**
++** gctCONST_POINTER Name
++** Pointer to a string to hold name to be check. If the length
++** of name is longer than TASK_COMM_LEN (16), use part of name
++** to detect.
++**
++** OUTPUT:
++**
++** gcvSTATUS_TRUE if name of current process matches Name.
++**
++*/
++gceSTATUS
++gckOS_DetectProcessByName(
++ IN gctCONST_POINTER Name
++ )
++{
++ char comm[sizeof(current->comm)];
++
++ memset(comm, 0, sizeof(comm));
++
++ gcmkVERIFY_OK(
++ gckOS_GetProcessNameByPid(_GetProcessID(), sizeof(current->comm), comm));
++
++ return strstr(comm, Name) ? gcvSTATUS_TRUE
++ : gcvSTATUS_FALSE;
++}
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++
++gceSTATUS
++gckOS_CreateSyncPoint(
++ IN gckOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ /* Create an sync point structure. */
++ syncPoint = (gcsSYNC_POINT_PTR) kmalloc(
++ sizeof(gcsSYNC_POINT), GFP_KERNEL | gcdNOWARN);
++
++ if (syncPoint == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the sync point. */
++ atomic_set(&syncPoint->ref, 1);
++ atomic_set(&syncPoint->state, 0);
++
++ gcmkONERROR(_AllocateIntegerId(&Os->syncPointDB, syncPoint, &syncPoint->id));
++
++ *SyncPoint = (gctSYNC_POINT)(gctUINTPTR_T)syncPoint->id;
++
++ gcmkFOOTER_ARG("*SyncPonint=%d", syncPoint->id);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (syncPoint != gcvNULL)
++ {
++ kfree(syncPoint);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_ReferenceSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ /* Initialize the sync point. */
++ atomic_inc(&syncPoint->ref);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_DestroySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->syncPointMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ if (atomic_dec_and_test(&syncPoint->ref))
++ {
++ gcmkVERIFY_OK(_DestroyIntegerId(&Os->syncPointDB, syncPoint->id));
++
++ /* Free the sgianl. */
++ syncPoint->timeline = gcvNULL;
++ kfree(syncPoint);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++ struct sync_timeline * timeline;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->syncPointMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Set signaled state. */
++ atomic_set(&syncPoint->state, 1);
++
++ /* Get parent timeline. */
++ timeline = syncPoint->timeline;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ acquired = gcvFALSE;
++
++ /* Signal timeline. */
++ if (timeline)
++ {
++ sync_timeline_signal(timeline);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_QuerySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctBOOL_PTR State
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Get state. */
++ *State = atomic_read(&syncPoint->state);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_CreateSyncTimeline(
++ IN gckOS Os,
++ OUT gctHANDLE * Timeline
++ )
++{
++ struct viv_sync_timeline * timeline;
++
++ /* Create viv sync timeline. */
++ timeline = viv_sync_timeline_create("viv timeline", Os);
++
++ if (timeline == gcvNULL)
++ {
++ /* Out of memory. */
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ *Timeline = (gctHANDLE) timeline;
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DestroySyncTimeline(
++ IN gckOS Os,
++ IN gctHANDLE Timeline
++ )
++{
++ struct viv_sync_timeline * timeline;
++ gcmkASSERT(Timeline != gcvNULL);
++
++ /* Destroy timeline. */
++ timeline = (struct viv_sync_timeline *) Timeline;
++ sync_timeline_destroy(&timeline->obj);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_CreateNativeFence(
++ IN gckOS Os,
++ IN gctHANDLE Timeline,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ )
++{
++ int fd = -1;
++ struct viv_sync_timeline *timeline;
++ struct sync_pt * pt = gcvNULL;
++ struct sync_fence * fence;
++ char name[32];
++ gcsSYNC_POINT_PTR syncPoint;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Timeline=0x%X SyncPoint=%d",
++ Os, Timeline, (gctUINT)(gctUINTPTR_T)SyncPoint);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ /* Cast timeline. */
++ timeline = (struct viv_sync_timeline *) Timeline;
++
++ fd = get_unused_fd();
++
++ if (fd < 0)
++ {
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Create viv_sync_pt. */
++ pt = viv_sync_pt_create(timeline, SyncPoint);
++
++ if (pt == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Reference sync_timeline. */
++ syncPoint->timeline = &timeline->obj;
++
++ /* Build fence name. */
++ snprintf(name, 32, "viv sync_fence-%u", (gctUINT)(gctUINTPTR_T)SyncPoint);
++
++ /* Create sync_fence. */
++ fence = sync_fence_create(name, pt);
++
++ if (fence == NULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Install fence to fd. */
++ sync_fence_install(fence, fd);
++
++ *FenceFD = fd;
++ gcmkFOOTER_ARG("*FenceFD=%d", fd);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Error roll back. */
++ if (pt)
++ {
++ sync_pt_free(pt);
++ }
++
++ if (fd > 0)
++ {
++ put_unused_fd(fd);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++#if gcdSECURITY
++gceSTATUS
++gckOS_AllocatePageArray(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageArrayLogical,
++ OUT gctPHYS_ADDR * PageArrayPhysical
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ PLINUX_MDL mdl;
++ gctUINT32* table;
++ gctUINT32 offset;
++ gctSIZE_T bytes;
++ gckALLOCATOR allocator;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X PageCount=%u",
++ Os, Physical, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ bytes = PageCount * gcmSIZEOF(gctUINT32);
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(
++ Os,
++ gcvFALSE,
++ &bytes,
++ PageArrayPhysical,
++ PageArrayLogical
++ ));
++
++ table = *PageArrayLogical;
++
++ /* Convert pointer to MDL. */
++ mdl = (PLINUX_MDL)Physical;
++
++ allocator = mdl->allocator;
++
++ /* Get all the physical addresses and store them in the page table. */
++
++ offset = 0;
++ PageCount = PageCount / (PAGE_SIZE / 4096);
++
++ /* Try to get the user pages so DMA can happen. */
++ while (PageCount-- > 0)
++ {
++ unsigned long phys = ~0;
++
++ if (mdl->pagedMem && !mdl->contiguous)
++ {
++ if (allocator)
++ {
++ gctUINT32 phys_addr;
++ allocator->ops->Physical(allocator, mdl, offset, &phys_addr);
++ phys = (unsigned long)phys_addr;
++ }
++ }
++ else
++ {
++ if (!mdl->pagedMem)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): we should not get this call for Non Paged Memory!",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ phys = page_to_phys(nth_page(mdl->u.contiguousPages, offset));
++ }
++
++ table[offset] = phys;
++
++ offset += 1;
++ }
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++gckOS_CPUPhysicalToGPUPhysical(
++ IN gckOS Os,
++ IN gctUINT32 CPUPhysical,
++ IN gctUINT32_PTR GPUPhysical
++ )
++{
++ gcsPLATFORM * platform;
++ gcmkHEADER_ARG("CPUPhysical=0x%X", CPUPhysical);
++
++ platform = Os->device->platform;
++
++ if (platform && platform->ops->getGPUPhysical)
++ {
++ gcmkVERIFY_OK(
++ platform->ops->getGPUPhysical(platform, CPUPhysical, GPUPhysical));
++ }
++ else
++ {
++ *GPUPhysical = CPUPhysical;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_GPUPhysicalToCPUPhysical(
++ IN gckOS Os,
++ IN gctUINT32 GPUPhysical,
++ IN gctUINT32_PTR CPUPhysical
++ )
++{
++ gcmkHEADER_ARG("GPUPhysical=0x%X", GPUPhysical);
++
++ *CPUPhysical = GPUPhysical;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_PhysicalToPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Physical,
++ OUT gctUINT32 * PhysicalAddress
++ )
++{
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++ gckALLOCATOR allocator = mdl->allocator;
++
++ if (allocator)
++ {
++ return allocator->ops->Physical(allocator, mdl, 0, PhysicalAddress);
++ }
++
++ return gcvSTATUS_NOT_SUPPORTED;
++}
++
++gceSTATUS
++gckOS_QueryOption(
++ IN gckOS Os,
++ IN gctCONST_STRING Option,
++ OUT gctUINT32 * Value
++ )
++{
++ gckGALDEVICE device = Os->device;
++
++ if (!strcmp(Option, "physBase"))
++ {
++ *Value = device->physBase;
++ return gcvSTATUS_OK;
++ }
++ else if (!strcmp(Option, "physSize"))
++ {
++ *Value = device->physSize;
++ return gcvSTATUS_OK;
++ }
++ else if (!strcmp(Option, "mmu"))
++ {
++#if gcdSECURITY
++ *Value = 0;
++#else
++ *Value = device->mmu;
++#endif
++ return gcvSTATUS_OK;
++ }
++
++ return gcvSTATUS_NOT_SUPPORTED;
++}
++
++static int
++fd_release(
++ struct inode *inode,
++ struct file *file
++ )
++{
++ gcsFDPRIVATE_PTR private = (gcsFDPRIVATE_PTR)file->private_data;
++
++ if (private && private->release)
++ {
++ return private->release(private);
++ }
++
++ return 0;
++}
++
++static const struct file_operations fd_fops = {
++ .release = fd_release,
++};
++
++gceSTATUS
++gckOS_GetFd(
++ IN gctSTRING Name,
++ IN gcsFDPRIVATE_PTR Private,
++ OUT gctINT *Fd
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ *Fd = anon_inode_getfd(Name, &fd_fops, Private, O_RDWR);
++
++ if (*Fd < 0)
++ {
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ return gcvSTATUS_OK;
++#else
++ return gcvSTATUS_NOT_SUPPORTED;
++#endif
++}
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_os.h 2015-05-01 14:57:59.603427001 -0500
+@@ -0,0 +1,90 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_os_h_
++#define __gc_hal_kernel_os_h_
++
++typedef struct _LINUX_MDL_MAP
++{
++ gctINT pid;
++ gctPOINTER vmaAddr;
++ gctUINT32 count;
++ struct vm_area_struct * vma;
++ struct _LINUX_MDL_MAP * next;
++}
++LINUX_MDL_MAP;
++
++typedef struct _LINUX_MDL_MAP * PLINUX_MDL_MAP;
++
++typedef struct _LINUX_MDL
++{
++ char * addr;
++
++ union _pages
++ {
++ /* Pointer to a array of pages. */
++ struct page * contiguousPages;
++ /* Pointer to a array of pointers to page. */
++ struct page ** nonContiguousPages;
++ }
++ u;
++
++#ifdef NO_DMA_COHERENT
++ gctPOINTER kaddr;
++#endif /* NO_DMA_COHERENT */
++
++ gctINT numPages;
++ gctINT pagedMem;
++ gctBOOL contiguous;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ gctBOOL exact;
++#endif
++ dma_addr_t dmaHandle;
++ PLINUX_MDL_MAP maps;
++ struct _LINUX_MDL * prev;
++ struct _LINUX_MDL * next;
++
++ /* Pointer to allocator which allocates memory for this mdl. */
++ void * allocator;
++
++ /* Private data used by allocator. */
++ void * priv;
++
++ uint gid;
++}
++LINUX_MDL, *PLINUX_MDL;
++
++extern PLINUX_MDL_MAP
++FindMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT PID
++ );
++
++typedef struct _DRIVER_ARGS
++{
++ gctUINT64 InputBuffer;
++ gctUINT64 InputBufferSize;
++ gctUINT64 OutputBuffer;
++ gctUINT64 OutputBufferSize;
++}
++DRIVER_ARGS;
++
++#endif /* __gc_hal_kernel_os_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_platform.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_platform.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_platform.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_platform.h 2015-05-01 14:57:59.603427001 -0500
+@@ -0,0 +1,279 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef _gc_hal_kernel_platform_h_
++#define _gc_hal_kernel_platform_h_
++#include <linux/mm.h>
++
++typedef struct _gcsMODULE_PARAMETERS
++{
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ gctINT irqLine3D0;
++ gctUINT registerMemBase3D0;
++ gctUINT registerMemSize3D0;
++ gctINT irqLine3D1;
++ gctUINT registerMemBase3D1;
++ gctUINT registerMemSize3D1;
++#else
++ gctINT irqLine;
++ gctUINT registerMemBase;
++ gctUINT registerMemSize;
++#endif
++ gctINT irqLine2D;
++ gctUINT registerMemBase2D;
++ gctUINT registerMemSize2D;
++ gctINT irqLineVG;
++ gctUINT registerMemBaseVG;
++ gctUINT registerMemSizeVG;
++ gctUINT contiguousSize;
++ gctUINT contiguousBase;
++ gctUINT contiguousRequested;
++ gctUINT bankSize;
++ gctINT fastClear;
++ gctINT compression;
++ gctINT powerManagement;
++ gctINT gpuProfiler;
++ gctINT signal;
++ gctUINT baseAddress;
++ gctUINT physSize;
++ gctUINT logFileSize;
++ gctUINT recovery;
++ gctUINT stuckDump;
++ gctUINT showArgs;
++ gctUINT gpu3DMinClock;
++}
++gcsMODULE_PARAMETERS;
++
++typedef struct _gcsPLATFORM * gckPLATFORM;
++
++typedef struct _gcsPLATFORM_OPERATIONS
++{
++ /*******************************************************************************
++ **
++ ** needAddDevice
++ **
++ ** Determine whether platform_device is created by initialization code.
++ ** If platform_device is created by BSP, return gcvFLASE here.
++ */
++ gctBOOL
++ (*needAddDevice)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** adjustParam
++ **
++ ** Override content of arguments, if a argument is not changed here, it will
++ ** keep as default value or value set by insmod command line.
++ */
++ gceSTATUS
++ (*adjustParam)(
++ IN gckPLATFORM Platform,
++ OUT gcsMODULE_PARAMETERS *Args
++ );
++
++ /*******************************************************************************
++ **
++ ** adjustDriver
++ **
++ ** Override content of platform_driver which will be registered.
++ */
++ gceSTATUS
++ (*adjustDriver)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** getPower
++ **
++ ** Prepare power and clock operation.
++ */
++ gceSTATUS
++ (*getPower)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** putPower
++ **
++ ** Finish power and clock operation.
++ */
++ gceSTATUS
++ (*putPower)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** allocPriv
++ **
++ ** Construct platform private data.
++ */
++ gceSTATUS
++ (*allocPriv)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** freePriv
++ **
++ ** free platform private data.
++ */
++ gceSTATUS
++ (*freePriv)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** setPower
++ **
++ ** Set power state of specified GPU.
++ **
++ ** INPUT:
++ **
++ ** gceCORE GPU
++ ** GPU neeed to config.
++ **
++ ** gceBOOL Enable
++ ** Enable or disable power.
++ */
++ gceSTATUS
++ (*setPower)(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU,
++ IN gctBOOL Enable
++ );
++
++ /*******************************************************************************
++ **
++ ** setClock
++ **
++ ** Set clock state of specified GPU.
++ **
++ ** INPUT:
++ **
++ ** gceCORE GPU
++ ** GPU neeed to config.
++ **
++ ** gceBOOL Enable
++ ** Enable or disable clock.
++ */
++ gceSTATUS
++ (*setClock)(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU,
++ IN gctBOOL Enable
++ );
++
++ /*******************************************************************************
++ **
++ ** reset
++ **
++ ** Reset GPU outside.
++ **
++ ** INPUT:
++ **
++ ** gceCORE GPU
++ ** GPU neeed to reset.
++ */
++ gceSTATUS
++ (*reset)(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU
++ );
++
++ /*******************************************************************************
++ **
++ ** getGPUPhysical
++ **
++ ** Convert CPU physical address to GPU physical address if they are
++ ** different.
++ */
++ gceSTATUS
++ (*getGPUPhysical)(
++ IN gckPLATFORM Platform,
++ IN gctUINT32 CPUPhysical,
++ OUT gctUINT32_PTR GPUPhysical
++ );
++
++ /*******************************************************************************
++ **
++ ** adjustProt
++ **
++ ** Override Prot flag when mapping paged memory to userspace.
++ */
++ gceSTATUS
++ (*adjustProt)(
++ IN struct vm_area_struct * vma
++ );
++
++ /*******************************************************************************
++ **
++ ** shrinkMemory
++ **
++ ** Do something to collect memory, eg, act as oom killer.
++ */
++ gceSTATUS
++ (*shrinkMemory)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** cache
++ **
++ ** Cache operation.
++ */
++ gceSTATUS
++ (*cache)(
++ IN gckPLATFORM Platform,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++}
++gcsPLATFORM_OPERATIONS;
++
++typedef struct _gcsPLATFORM
++{
++ struct platform_device* device;
++ struct platform_driver* driver;
++
++ gcsPLATFORM_OPERATIONS* ops;
++
++ void* priv;
++}
++gcsPLATFORM;
++
++void
++gckPLATFORM_QueryOperations(
++ IN gcsPLATFORM_OPERATIONS ** Operations
++ );
++
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_probe.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_probe.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_probe.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_probe.c 2015-05-01 14:57:59.603427001 -0500
+@@ -0,0 +1,1347 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <linux/device.h>
++#include <linux/slab.h>
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_driver.h"
++
++#if USE_PLATFORM_DRIVER
++# include <linux/platform_device.h>
++#endif
++
++#ifdef CONFIG_PXA_DVFM
++# include <mach/dvfm.h>
++# include <mach/pxa3xx_dvfm.h>
++#endif
++
++
++/* Zone used for header/footer. */
++#define _GC_OBJ_ZONE gcvZONE_DRIVER
++
++MODULE_DESCRIPTION("Vivante Graphics Driver");
++MODULE_LICENSE("GPL");
++
++static struct class* gpuClass;
++
++static gcsPLATFORM platform;
++
++static gckGALDEVICE galDevice;
++
++static uint major = 199;
++module_param(major, uint, 0644);
++
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++static int irqLine3D0 = -1;
++module_param(irqLine3D0, int, 0644);
++
++static ulong registerMemBase3D0 = 0;
++module_param(registerMemBase3D0, ulong, 0644);
++
++static ulong registerMemSize3D0 = 2 << 10;
++module_param(registerMemSize3D0, ulong, 0644);
++
++static int irqLine3D1 = -1;
++module_param(irqLine3D1, int, 0644);
++
++static ulong registerMemBase3D1 = 0;
++module_param(registerMemBase3D1, ulong, 0644);
++
++static ulong registerMemSize3D1 = 2 << 10;
++module_param(registerMemSize3D1, ulong, 0644);
++#else
++static int irqLine = -1;
++module_param(irqLine, int, 0644);
++
++static ulong registerMemBase = 0x80000000;
++module_param(registerMemBase, ulong, 0644);
++
++static ulong registerMemSize = 2 << 10;
++module_param(registerMemSize, ulong, 0644);
++#endif
++
++static int irqLine2D = -1;
++module_param(irqLine2D, int, 0644);
++
++static ulong registerMemBase2D = 0x00000000;
++module_param(registerMemBase2D, ulong, 0644);
++
++static ulong registerMemSize2D = 2 << 10;
++module_param(registerMemSize2D, ulong, 0644);
++
++static int irqLineVG = -1;
++module_param(irqLineVG, int, 0644);
++
++static ulong registerMemBaseVG = 0x00000000;
++module_param(registerMemBaseVG, ulong, 0644);
++
++static ulong registerMemSizeVG = 2 << 10;
++module_param(registerMemSizeVG, ulong, 0644);
++
++#ifndef gcdDEFAULT_CONTIGUOUS_SIZE
++#define gcdDEFAULT_CONTIGUOUS_SIZE (4 << 20)
++#endif
++static ulong contiguousSize = gcdDEFAULT_CONTIGUOUS_SIZE;
++module_param(contiguousSize, ulong, 0644);
++
++static ulong contiguousBase = 0;
++module_param(contiguousBase, ulong, 0644);
++
++static ulong bankSize = 0;
++module_param(bankSize, ulong, 0644);
++
++static int fastClear = -1;
++module_param(fastClear, int, 0644);
++
++static int compression = -1;
++module_param(compression, int, 0644);
++
++static int powerManagement = -1;
++module_param(powerManagement, int, 0644);
++
++static int gpuProfiler = 0;
++module_param(gpuProfiler, int, 0644);
++
++static int signal = 48;
++module_param(signal, int, 0644);
++
++static ulong baseAddress = 0;
++module_param(baseAddress, ulong, 0644);
++
++static ulong physSize = 0;
++module_param(physSize, ulong, 0644);
++
++static uint logFileSize = 0;
++module_param(logFileSize,uint, 0644);
++
++static uint recovery = 1;
++module_param(recovery, uint, 0644);
++MODULE_PARM_DESC(recovery, "Recover GPU from stuck (1: Enable, 0: Disable)");
++
++/* Middle needs about 40KB buffer, Maximal may need more than 200KB buffer. */
++static uint stuckDump = 1;
++module_param(stuckDump, uint, 0644);
++MODULE_PARM_DESC(stuckDump, "Level of stuck dump content (1: Minimal, 2: Middle, 3: Maximal)");
++
++static int showArgs = 0;
++module_param(showArgs, int, 0644);
++
++static int mmu = 1;
++module_param(mmu, int, 0644);
++
++static int gpu3DMinClock = 1;
++
++static int contiguousRequested = 0;
++
++static int drv_open(
++ struct inode* inode,
++ struct file* filp
++ );
++
++static int drv_release(
++ struct inode* inode,
++ struct file* filp
++ );
++
++static long drv_ioctl(
++ struct file* filp,
++ unsigned int ioctlCode,
++ unsigned long arg
++ );
++
++static int drv_mmap(
++ struct file* filp,
++ struct vm_area_struct* vma
++ );
++
++static struct file_operations driver_fops =
++{
++ .owner = THIS_MODULE,
++ .open = drv_open,
++ .release = drv_release,
++ .unlocked_ioctl = drv_ioctl,
++#ifdef HAVE_COMPAT_IOCTL
++ .compat_ioctl = drv_ioctl,
++#endif
++ .mmap = drv_mmap,
++};
++
++void
++_UpdateModuleParam(
++ gcsMODULE_PARAMETERS *Param
++ )
++{
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++#else
++ irqLine = Param->irqLine ;
++ registerMemBase = Param->registerMemBase;
++ registerMemSize = Param->registerMemSize;
++#endif
++ irqLine2D = Param->irqLine2D ;
++ registerMemBase2D = Param->registerMemBase2D;
++ registerMemSize2D = Param->registerMemSize2D;
++ irqLineVG = Param->irqLineVG;
++ registerMemBaseVG = Param->registerMemBaseVG;
++ registerMemSizeVG = Param->registerMemSizeVG;
++ contiguousSize = Param->contiguousSize;
++ contiguousBase = Param->contiguousBase;
++ bankSize = Param->bankSize;
++ fastClear = Param->fastClear;
++ compression = Param->compression;
++ powerManagement = Param->powerManagement;
++ gpuProfiler = Param->gpuProfiler;
++ signal = Param->signal;
++ baseAddress = Param->baseAddress;
++ physSize = Param->physSize;
++ logFileSize = Param->logFileSize;
++ recovery = Param->recovery;
++ stuckDump = Param->stuckDump;
++ showArgs = Param->showArgs;
++ contiguousRequested = Param->contiguousRequested;
++ gpu3DMinClock = Param->gpu3DMinClock;
++}
++
++void
++gckOS_DumpParam(
++ void
++ )
++{
++ printk("Galcore options:\n");
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ printk(" irqLine3D0 = %d\n", irqLine3D0);
++ printk(" registerMemBase3D0 = 0x%08lX\n", registerMemBase3D0);
++ printk(" registerMemSize3D0 = 0x%08lX\n", registerMemSize3D0);
++
++ if (irqLine3D1 != -1)
++ {
++ printk(" irqLine3D1 = %d\n", irqLine3D1);
++ printk(" registerMemBase3D1 = 0x%08lX\n", registerMemBase3D1);
++ printk(" registerMemSize3D1 = 0x%08lX\n", registerMemSize3D1);
++ }
++#else
++ printk(" irqLine = %d\n", irqLine);
++ printk(" registerMemBase = 0x%08lX\n", registerMemBase);
++ printk(" registerMemSize = 0x%08lX\n", registerMemSize);
++#endif
++
++ if (irqLine2D != -1)
++ {
++ printk(" irqLine2D = %d\n", irqLine2D);
++ printk(" registerMemBase2D = 0x%08lX\n", registerMemBase2D);
++ printk(" registerMemSize2D = 0x%08lX\n", registerMemSize2D);
++ }
++
++ if (irqLineVG != -1)
++ {
++ printk(" irqLineVG = %d\n", irqLineVG);
++ printk(" registerMemBaseVG = 0x%08lX\n", registerMemBaseVG);
++ printk(" registerMemSizeVG = 0x%08lX\n", registerMemSizeVG);
++ }
++
++ printk(" contiguousSize = %ld\n", contiguousSize);
++ printk(" contiguousBase = 0x%08lX\n", contiguousBase);
++ printk(" bankSize = 0x%08lX\n", bankSize);
++ printk(" fastClear = %d\n", fastClear);
++ printk(" compression = %d\n", compression);
++ printk(" signal = %d\n", signal);
++ printk(" powerManagement = %d\n", powerManagement);
++ printk(" baseAddress = 0x%08lX\n", baseAddress);
++ printk(" physSize = 0x%08lX\n", physSize);
++ printk(" logFileSize = %d KB \n", logFileSize);
++ printk(" recovery = %d\n", recovery);
++ printk(" stuckDump = %d\n", stuckDump);
++ printk(" gpuProfiler = %d\n", gpuProfiler);
++}
++
++int drv_open(
++ struct inode* inode,
++ struct file* filp
++ )
++{
++ gceSTATUS status;
++ gctBOOL attached = gcvFALSE;
++ gcsHAL_PRIVATE_DATA_PTR data = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = kmalloc(sizeof(gcsHAL_PRIVATE_DATA), GFP_KERNEL | __GFP_NOWARN);
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ data->device = galDevice;
++ data->mappedMemory = gcvNULL;
++ data->contiguousLogical = gcvNULL;
++ gcmkONERROR(gckOS_GetProcessID(&data->pidOpen));
++
++ /* Attached the process. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkONERROR(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvTRUE));
++ }
++ }
++ attached = gcvTRUE;
++
++ if (!galDevice->contiguousMapped)
++ {
++ if (galDevice->contiguousPhysical != gcvNULL)
++ {
++ gcmkONERROR(gckOS_MapMemory(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ &data->contiguousLogical
++ ));
++ }
++ }
++
++ filp->private_data = data;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ if (data != gcvNULL)
++ {
++ if (data->contiguousLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapMemory(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ data->contiguousLogical
++ ));
++ }
++
++ kfree(data);
++ }
++
++ if (attached)
++ {
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvFALSE));
++ }
++ }
++ }
++
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++int drv_release(
++ struct inode* inode,
++ struct file* filp
++ )
++{
++ gceSTATUS status;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gckGALDEVICE device;
++ gctINT i;
++
++ gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (!device->contiguousMapped)
++ {
++ if (data->contiguousLogical != gcvNULL)
++ {
++ gcmkONERROR(gckOS_UnmapMemoryEx(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ data->contiguousLogical,
++ data->pidOpen
++ ));
++
++ data->contiguousLogical = gcvNULL;
++ }
++ }
++
++ /* A process gets detached. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkONERROR(gckKERNEL_AttachProcessEx(galDevice->kernels[i], gcvFALSE, data->pidOpen));
++ }
++ }
++
++ kfree(data);
++ filp->private_data = NULL;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++long drv_ioctl(
++ struct file* filp,
++ unsigned int ioctlCode,
++ unsigned long arg
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gctUINT32 copyLen;
++ DRIVER_ARGS drvArgs;
++ gckGALDEVICE device;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gctINT32 i, count;
++ gckVIDMEM_NODE nodeObject;
++
++ gcmkHEADER_ARG(
++ "filp=0x%08X ioctlCode=0x%08X arg=0x%08X",
++ filp, ioctlCode, arg
++ );
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if ((ioctlCode != IOCTL_GCHAL_INTERFACE)
++ && (ioctlCode != IOCTL_GCHAL_KERNEL_INTERFACE)
++ )
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): unknown command %d\n",
++ __FUNCTION__, __LINE__,
++ ioctlCode
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Get the drvArgs. */
++ copyLen = copy_from_user(
++ &drvArgs, (void *) arg, sizeof(DRIVER_ARGS)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of the input arguments.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Now bring in the gcsHAL_INTERFACE structure. */
++ if ((drvArgs.InputBufferSize != sizeof(gcsHAL_INTERFACE))
++ || (drvArgs.OutputBufferSize != sizeof(gcsHAL_INTERFACE))
++ )
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): input or/and output structures are invalid.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ copyLen = copy_from_user(
++ &iface, gcmUINT64_TO_PTR(drvArgs.InputBuffer), sizeof(gcsHAL_INTERFACE)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of input HAL interface.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (iface.command == gcvHAL_CHIP_INFO)
++ {
++ count = 0;
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ iface.u.ChipInfo.types[count] = gcvHARDWARE_VG;
++ }
++ else
++#endif
++ {
++ gcmkVERIFY_OK(gckHARDWARE_GetType(device->kernels[i]->hardware,
++ &iface.u.ChipInfo.types[count]));
++ }
++ count++;
++ }
++ }
++
++ iface.u.ChipInfo.count = count;
++ iface.status = status = gcvSTATUS_OK;
++ }
++ else
++ {
++ if (iface.hardwareType > 7)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): unknown hardwareType %d\n",
++ __FUNCTION__, __LINE__,
++ iface.hardwareType
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if gcdENABLE_VG
++ if (device->coreMapping[iface.hardwareType] == gcvCORE_VG)
++ {
++ status = gckVGKERNEL_Dispatch(device->kernels[gcvCORE_VG],
++ (ioctlCode == IOCTL_GCHAL_INTERFACE),
++ &iface);
++ }
++ else
++#endif
++ {
++ status = gckKERNEL_Dispatch(device->kernels[device->coreMapping[iface.hardwareType]],
++ (ioctlCode == IOCTL_GCHAL_INTERFACE),
++ &iface);
++ }
++ }
++
++ /* Redo system call after pending signal is handled. */
++ if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkFOOTER();
++ return -ERESTARTSYS;
++ }
++
++ if (gcmIS_SUCCESS(status) && (iface.command == gcvHAL_LOCK_VIDEO_MEMORY))
++ {
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 processID;
++
++ gckOS_GetProcessID(&processID);
++
++ gcmkONERROR(gckVIDMEM_HANDLE_Lookup(device->kernels[device->coreMapping[iface.hardwareType]],
++ processID,
++ (gctUINT32)iface.u.LockVideoMemory.node,
++ &nodeObject));
++ node = nodeObject->node;
++
++ /* Special case for mapped memory. */
++ if ((data->mappedMemory != gcvNULL)
++ && (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ )
++ {
++ /* Compute offset into mapped memory. */
++ gctUINT32 offset
++ = (gctUINT8 *) gcmUINT64_TO_PTR(iface.u.LockVideoMemory.memory)
++ - (gctUINT8 *) device->contiguousBase;
++
++ /* Compute offset into user-mapped region. */
++ iface.u.LockVideoMemory.memory =
++ gcmPTR_TO_UINT64((gctUINT8 *) data->mappedMemory + offset);
++ }
++ }
++
++ /* Copy data back to the user. */
++ copyLen = copy_to_user(
++ gcmUINT64_TO_PTR(drvArgs.OutputBuffer), &iface, sizeof(gcsHAL_INTERFACE)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of output HAL interface.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++static int drv_mmap(
++ struct file* filp,
++ struct vm_area_struct* vma
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("filp=0x%08X vma=0x%08X", filp, vma);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if !gcdPAGED_MEMORY_CACHEABLE
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ vma->vm_flags |= gcdVM_FLAGS;
++#endif
++ vma->vm_pgoff = 0;
++
++ if (device->contiguousMapped)
++ {
++ unsigned long size = vma->vm_end - vma->vm_start;
++ int ret = 0;
++
++ if (size > device->contiguousSize)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Invalid mapping size.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ ret = io_remap_pfn_range(
++ vma,
++ vma->vm_start,
++ device->requestedContiguousBase >> PAGE_SHIFT,
++ size,
++ vma->vm_page_prot
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): io_remap_pfn_range failed %d\n",
++ __FUNCTION__, __LINE__,
++ ret
++ );
++
++ data->mappedMemory = gcvNULL;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ data->mappedMemory = (gctPOINTER) vma->vm_start;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++ }
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++
++#if !USE_PLATFORM_DRIVER
++static int __init drv_init(void)
++#else
++static int drv_init(void)
++#endif
++{
++ int ret;
++ int result = -EINVAL;
++ gceSTATUS status;
++ gckGALDEVICE device = gcvNULL;
++ struct class* device_class = gcvNULL;
++
++ gcsDEVICE_CONSTRUCT_ARGS args = {
++ .recovery = recovery,
++ .stuckDump = stuckDump,
++ .gpu3DMinClock = gpu3DMinClock,
++ .contiguousRequested = contiguousRequested,
++ .platform = &platform,
++ .mmu = mmu,
++ };
++
++ gcmkHEADER();
++
++ printk(KERN_INFO "Galcore version %d.%d.%d.%d\n",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD);
++
++#if !VIVANTE_PROFILER_PM
++ /* when enable gpu profiler, we need to turn off gpu powerMangement */
++ if (gpuProfiler)
++ {
++ powerManagement = 0;
++ }
++#endif
++
++ if (showArgs)
++ {
++ gckOS_DumpParam();
++ }
++
++ if (logFileSize != 0)
++ {
++ gckDEBUGFS_Initialize();
++ }
++
++ /* Create the GAL device. */
++ status = gckGALDEVICE_Construct(
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ irqLine3D0,
++ registerMemBase3D0, registerMemSize3D0,
++ irqLine3D1,
++ registerMemBase3D1, registerMemSize3D1,
++#else
++ irqLine,
++ registerMemBase, registerMemSize,
++#endif
++ irqLine2D,
++ registerMemBase2D, registerMemSize2D,
++ irqLineVG,
++ registerMemBaseVG, registerMemSizeVG,
++ contiguousBase, contiguousSize,
++ bankSize, fastClear, compression, baseAddress, physSize, signal,
++ logFileSize,
++ powerManagement,
++ gpuProfiler,
++ &args,
++ &device
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the GAL device: status=%d\n",
++ __FUNCTION__, __LINE__, status);
++
++ goto OnError;
++ }
++
++ /* Start the GAL device. */
++ gcmkONERROR(gckGALDEVICE_Start(device));
++
++ if ((physSize != 0)
++ && (device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ && (device->kernels[gcvCORE_MAJOR]->hardware->mmuVersion != 0))
++ {
++ /* Reset the base address */
++ device->baseAddress = 0;
++ }
++
++ /* Register the character device. */
++ ret = register_chrdev(major, DEVICE_NAME, &driver_fops);
++
++ if (ret < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not allocate major number for mmap.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ if (major == 0)
++ {
++ major = ret;
++ }
++
++ /* Create the device class. */
++ device_class = class_create(THIS_MODULE, "graphics_class");
++
++ if (IS_ERR(device_class))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the class.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++ device_create(device_class, NULL, MKDEV(major, 0), NULL, DEVICE_NAME);
++#else
++ device_create(device_class, NULL, MKDEV(major, 0), DEVICE_NAME);
++#endif
++
++ galDevice = device;
++ gpuClass = device_class;
++
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "%s(%d): irqLine3D0=%d, contiguousSize=%lu, memBase3D0=0x%lX\n",
++ __FUNCTION__, __LINE__,
++ irqLine3D0, contiguousSize, registerMemBase3D0
++ );
++#else
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "%s(%d): irqLine=%d, contiguousSize=%lu, memBase=0x%lX\n",
++ __FUNCTION__, __LINE__,
++ irqLine, contiguousSize, registerMemBase
++ );
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ /* Roll back. */
++ if (device_class != gcvNULL)
++ {
++ device_destroy(device_class, MKDEV(major, 0));
++ class_destroy(device_class);
++ }
++
++ if (device != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckGALDEVICE_Stop(device));
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(device));
++ }
++
++ gcmkFOOTER();
++ return result;
++}
++
++#if !USE_PLATFORM_DRIVER
++static void __exit drv_exit(void)
++#else
++static void drv_exit(void)
++#endif
++{
++ gcmkHEADER();
++
++ gcmkASSERT(gpuClass != gcvNULL);
++ device_destroy(gpuClass, MKDEV(major, 0));
++ class_destroy(gpuClass);
++
++ unregister_chrdev(major, DEVICE_NAME);
++
++ gcmkVERIFY_OK(gckGALDEVICE_Stop(galDevice));
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(galDevice));
++
++ if(gckDEBUGFS_IsEnabled())
++ {
++ gckDEBUGFS_Terminate();
++ }
++
++ gcmkFOOTER_NO();
++}
++
++#if !USE_PLATFORM_DRIVER
++ module_init(drv_init);
++ module_exit(drv_exit);
++#else
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++static int gpu_probe(struct platform_device *pdev)
++#else
++static int __devinit gpu_probe(struct platform_device *pdev)
++#endif
++{
++ int ret = -ENODEV;
++ gcsMODULE_PARAMETERS moduleParam = {
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++#else
++ .irqLine = irqLine,
++ .registerMemBase = registerMemBase,
++ .registerMemSize = registerMemSize,
++#endif
++ .irqLine2D = irqLine2D,
++ .registerMemBase2D = registerMemBase2D,
++ .registerMemSize2D = registerMemSize2D,
++ .irqLineVG = irqLineVG,
++ .registerMemBaseVG = registerMemBaseVG,
++ .registerMemSizeVG = registerMemSizeVG,
++ .contiguousSize = contiguousSize,
++ .contiguousBase = contiguousBase,
++ .bankSize = bankSize,
++ .fastClear = fastClear,
++ .compression = compression,
++ .powerManagement = powerManagement,
++ .gpuProfiler = gpuProfiler,
++ .signal = signal,
++ .baseAddress = baseAddress,
++ .physSize = physSize,
++ .logFileSize = logFileSize,
++ .recovery = recovery,
++ .stuckDump = stuckDump,
++ .showArgs = showArgs,
++ .gpu3DMinClock = gpu3DMinClock,
++ };
++
++ gcmkHEADER();
++
++ platform.device = pdev;
++
++ if (platform.ops->getPower)
++ {
++ if (gcmIS_ERROR(platform.ops->getPower(&platform)))
++ {
++ gcmkFOOTER_NO();
++ return ret;
++ }
++ }
++
++ if (platform.ops->adjustParam)
++ {
++ /* Override default module param. */
++ platform.ops->adjustParam(&platform, &moduleParam);
++
++ /* Update module param because drv_init() uses them directly. */
++ _UpdateModuleParam(&moduleParam);
++ }
++
++ ret = drv_init();
++
++ if (!ret)
++ {
++ platform_set_drvdata(pdev, galDevice);
++
++ gcmkFOOTER_NO();
++ return ret;
++ }
++
++ gcmkFOOTER_ARG(KERN_INFO "Failed to register gpu driver: %d\n", ret);
++ return ret;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++static int gpu_remove(struct platform_device *pdev)
++#else
++static int __devexit gpu_remove(struct platform_device *pdev)
++#endif
++{
++ gcmkHEADER();
++
++ drv_exit();
++
++ if (platform.ops->putPower)
++ {
++ platform.ops->putPower(&platform);
++ }
++
++ gcmkFOOTER_NO();
++ return 0;
++}
++
++static int gpu_suspend(struct platform_device *dev, pm_message_t state)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++ gctINT i;
++
++ device = platform_get_drvdata(dev);
++
++ if (!device)
++ {
++ return -1;
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++ /* Store states. */
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_QueryPowerManagementState(device->kernels[i]->vg->hardware, &device->statesStored[i]);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_QueryPowerManagementState(device->kernels[i]->hardware, &device->statesStored[i]);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, gcvPOWER_OFF);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_OFF);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++ }
++ }
++
++ return 0;
++}
++
++static int gpu_resume(struct platform_device *dev)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++ gctINT i;
++ gceCHIPPOWERSTATE statesStored;
++
++ device = platform_get_drvdata(dev);
++
++ if (!device)
++ {
++ return -1;
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, gcvPOWER_ON);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_ON);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++ /* Convert global state to crossponding internal state. */
++ switch(device->statesStored[i])
++ {
++ case gcvPOWER_OFF:
++ statesStored = gcvPOWER_OFF_BROADCAST;
++ break;
++ case gcvPOWER_IDLE:
++ statesStored = gcvPOWER_IDLE_BROADCAST;
++ break;
++ case gcvPOWER_SUSPEND:
++ statesStored = gcvPOWER_SUSPEND_BROADCAST;
++ break;
++ case gcvPOWER_ON:
++ statesStored = gcvPOWER_ON_AUTO;
++ break;
++ default:
++ statesStored = device->statesStored[i];
++ break;
++ }
++
++ /* Restore states. */
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, statesStored);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, statesStored);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++ }
++ }
++
++ return 0;
++}
++
++#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
++#ifdef CONFIG_PM_SLEEP
++static int gpu_system_suspend(struct device *dev)
++{
++ pm_message_t state={0};
++ return gpu_suspend(to_platform_device(dev), state);
++}
++
++static int gpu_system_resume(struct device *dev)
++{
++ return gpu_resume(to_platform_device(dev));
++}
++#endif
++
++static const struct dev_pm_ops gpu_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(gpu_system_suspend, gpu_system_resume)
++};
++#endif
++
++static struct platform_driver gpu_driver = {
++ .probe = gpu_probe,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++ .remove = gpu_remove,
++#else
++ .remove = __devexit_p(gpu_remove),
++#endif
++
++ .suspend = gpu_suspend,
++ .resume = gpu_resume,
++
++ .driver = {
++ .name = DEVICE_NAME,
++#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
++ .pm = &gpu_pm_ops,
++#endif
++ }
++};
++
++static int __init gpu_init(void)
++{
++ int ret = 0;
++
++ memset(&platform, 0, sizeof(gcsPLATFORM));
++
++ gckPLATFORM_QueryOperations(&platform.ops);
++
++ if (platform.ops == gcvNULL)
++ {
++ printk(KERN_ERR "galcore: No platform specific operations.\n");
++ ret = -ENODEV;
++ goto out;
++ }
++
++ if (platform.ops->allocPriv)
++ {
++ /* Allocate platform private data. */
++ if (gcmIS_ERROR(platform.ops->allocPriv(&platform)))
++ {
++ ret = -ENOMEM;
++ goto out;
++ }
++ }
++
++ if (platform.ops->needAddDevice
++ && platform.ops->needAddDevice(&platform))
++ {
++ /* Allocate device */
++ platform.device = platform_device_alloc(DEVICE_NAME, -1);
++ if (!platform.device)
++ {
++ printk(KERN_ERR "galcore: platform_device_alloc failed.\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ /* Add device */
++ ret = platform_device_add(platform.device);
++ if (ret)
++ {
++ printk(KERN_ERR "galcore: platform_device_add failed.\n");
++ goto put_dev;
++ }
++ }
++
++ platform.driver = &gpu_driver;
++
++ if (platform.ops->adjustDriver)
++ {
++ /* Override default platform_driver struct. */
++ platform.ops->adjustDriver(&platform);
++ }
++
++ ret = platform_driver_register(&gpu_driver);
++ if (!ret)
++ {
++ goto out;
++ }
++
++ platform_device_del(platform.device);
++put_dev:
++ platform_device_put(platform.device);
++
++out:
++ return ret;
++}
++
++static void __exit gpu_exit(void)
++{
++ platform_driver_unregister(&gpu_driver);
++
++ if (platform.ops->needAddDevice
++ && platform.ops->needAddDevice(&platform))
++ {
++ platform_device_unregister(platform.device);
++ }
++
++ if (platform.priv)
++ {
++ /* Free platform private data. */
++ platform.ops->freePriv(&platform);
++ }
++}
++
++module_init(gpu_init);
++module_exit(gpu_exit);
++
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_security_channel.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_security_channel.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_security_channel.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_security_channel.c 2015-05-01 14:57:59.603427001 -0500
+@@ -0,0 +1,385 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include <linux/slab.h>
++
++#include "tee_client_api.h"
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++#define GPU3D_UUID { 0xcc9f80ea, 0xa836, 0x11e3, { 0x9b, 0x07, 0x78, 0x2b, 0xcb, 0x5c, 0xf3, 0xe3 } }
++
++static const TEEC_UUID gpu3d_uuid = GPU3D_UUID;
++TEEC_Context teecContext;
++
++typedef struct _gcsSecurityChannel {
++ gckOS os;
++ TEEC_Session session;
++ int * virtual;
++ TEEC_SharedMemory inputBuffer;
++ gctUINT32 bytes;
++ gctPOINTER mutex;
++} gcsSecurityChannel;
++
++TEEC_SharedMemory *
++gpu3d_allocate_secure_mem(
++ gckOS Os,
++ unsigned int size
++ )
++{
++ TEEC_Result result;
++ TEEC_Context *context = &teecContext;
++ TEEC_SharedMemory *shm = NULL;
++ void *handle = NULL;
++ unsigned int phyAddr = 0xFFFFFFFF;
++ gceSTATUS status;
++ gctSIZE_T bytes = size;
++
++ shm = kmalloc(sizeof(TEEC_SharedMemory), GFP_KERNEL);
++
++ if (NULL == shm)
++ {
++ return NULL;
++ }
++
++ memset(shm, 0, sizeof(TEEC_SharedMemory));
++
++ status = gckOS_AllocatePagedMemoryEx(
++ Os,
++ gcvALLOC_FLAG_SECURITY,
++ bytes,
++ gcvNULL,
++ (gctPHYS_ADDR *)&handle);
++
++ if (gcmIS_ERROR(status))
++ {
++ kfree(shm);
++ return NULL;
++ }
++
++ status = gckOS_PhysicalToPhysicalAddress(
++ Os,
++ handle,
++ &phyAddr);
++
++ if (gcmIS_ERROR(status))
++ {
++ kfree(shm);
++ return NULL;
++ }
++
++ /* record the handle into shm->user_data */
++ shm->userdata = handle;
++
++ /* [b] Bulk input buffer. */
++ shm->size = size;
++ shm->flags = TEEC_MEM_INPUT;
++
++ /* Use TEE Client API to register the underlying memory buffer. */
++ shm->phyAddr = (void *)phyAddr;
++
++ result = TEEC_RegisterSharedMemory(
++ context,
++ shm);
++
++ if (result != TEEC_SUCCESS)
++ {
++ gckOS_FreePagedMemory(Os, (gctPHYS_ADDR)handle, shm->size);
++ kfree(shm);
++ return NULL;
++ }
++
++ return shm;
++}
++
++void gpu3d_release_secure_mem(
++ gckOS Os,
++ void *shm_handle
++ )
++{
++ TEEC_SharedMemory *shm = shm_handle;
++ void * handle;
++
++ if (!shm)
++ {
++ return;
++ }
++
++ handle = shm->userdata;
++
++ TEEC_ReleaseSharedMemory(shm);
++ gckOS_FreePagedMemory(Os, (gctPHYS_ADDR)handle, shm->size);
++
++ kfree(shm);
++
++ return;
++}
++
++static TEEC_Result gpu3d_session_callback(
++ TEEC_Session* session,
++ uint32_t commandID,
++ TEEC_Operation* operation,
++ void* userdata
++ )
++{
++ gcsSecurityChannel *channel = userdata;
++
++ if (channel == gcvNULL)
++ {
++ return TEEC_ERROR_BAD_PARAMETERS;
++ }
++
++ switch(commandID)
++ {
++ case gcvTA_CALLBACK_ALLOC_SECURE_MEM:
++ {
++ uint32_t size = operation->params[0].value.a;
++ TEEC_SharedMemory *shm = NULL;
++
++ shm = gpu3d_allocate_secure_mem(channel->os, size);
++
++ /* use the value to save the pointer in client side */
++ operation->params[0].value.a = (uint32_t)shm;
++ operation->params[0].value.b = (uint32_t)shm->phyAddr;
++
++ break;
++ }
++ case gcvTA_CALLBACK_FREE_SECURE_MEM:
++ {
++ TEEC_SharedMemory *shm = (TEEC_SharedMemory *)operation->params[0].value.a;
++
++ gpu3d_release_secure_mem(channel->os, shm);
++ break;
++ }
++ default:
++ break;
++ }
++
++ return TEEC_SUCCESS;
++}
++
++gceSTATUS
++gckOS_OpenSecurityChannel(
++ IN gckOS Os,
++ IN gceCORE GPU,
++ OUT gctUINT32 *Channel
++ )
++{
++ gceSTATUS status;
++ TEEC_Result result;
++ static bool initialized = gcvFALSE;
++ gcsSecurityChannel *channel = gcvNULL;
++
++ TEEC_Operation operation = {0};
++
++ /* Connect to TEE. */
++ if (initialized == gcvFALSE)
++ {
++ result = TEEC_InitializeContext(NULL, &teecContext);
++
++ if (result != TEEC_SUCCESS) {
++ gcmkONERROR(gcvSTATUS_CHIP_NOT_READY);
++ }
++
++ initialized = gcvTRUE;
++ }
++
++ /* Construct channel. */
++ gcmkONERROR(
++ gckOS_Allocate(Os, gcmSIZEOF(*channel), (gctPOINTER *)&channel));
++
++ gckOS_ZeroMemory(channel, gcmSIZEOF(gcsSecurityChannel));
++
++ channel->os = Os;
++
++ gcmkONERROR(gckOS_CreateMutex(Os, &channel->mutex));
++
++ /* Allocate shared memory for passing gcTA_INTERFACE. */
++ channel->bytes = gcmSIZEOF(gcsTA_INTERFACE);
++ channel->virtual = kmalloc(channel->bytes, GFP_KERNEL | __GFP_NOWARN);
++
++ if (!channel->virtual)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ channel->inputBuffer.size = channel->bytes;
++ channel->inputBuffer.flags = TEEC_MEM_INPUT | TEEC_MEM_OUTPUT;
++ channel->inputBuffer.phyAddr = (void *)virt_to_phys(channel->virtual);
++
++ result = TEEC_RegisterSharedMemory(&teecContext, &channel->inputBuffer);
++
++ if (result != TEEC_SUCCESS)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ operation.paramTypes = TEEC_PARAM_TYPES(
++ TEEC_VALUE_INPUT,
++ TEEC_NONE,
++ TEEC_NONE,
++ TEEC_NONE);
++
++ operation.params[0].value.a = GPU;
++
++ /* Open session with TEE application. */
++ result = TEEC_OpenSession(
++ &teecContext,
++ &channel->session,
++ &gpu3d_uuid,
++ TEEC_LOGIN_USER,
++ NULL,
++ &operation,
++ NULL);
++
++ /* Prepare callback. */
++ TEEC_RegisterCallback(&channel->session, gpu3d_session_callback, channel);
++
++ *Channel = (gctUINT32)channel;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (channel)
++ {
++ if (channel->virtual)
++ {
++ }
++
++ if (channel->mutex)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, channel->mutex));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(Os, channel));
++ }
++
++ return status;
++}
++
++gceSTATUS
++gckOS_CloseSecurityChannel(
++ IN gctUINT32 Channel
++ )
++{
++ /* TODO . */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_CallSecurityService(
++ IN gctUINT32 Channel,
++ IN gcsTA_INTERFACE *Interface
++ )
++{
++ gceSTATUS status;
++ TEEC_Result result;
++ gcsSecurityChannel *channel = (gcsSecurityChannel *)Channel;
++ TEEC_Operation operation = {0};
++
++ gcmkHEADER();
++ gcmkVERIFY_ARGUMENT(Channel != 0);
++
++ gckOS_AcquireMutex(channel->os, channel->mutex, gcvINFINITE);
++
++ gckOS_MemCopy(channel->virtual, Interface, channel->bytes);
++
++ operation.paramTypes = TEEC_PARAM_TYPES(
++ TEEC_MEMREF_PARTIAL_INPUT,
++ TEEC_NONE,
++ TEEC_NONE,
++ TEEC_NONE);
++
++ /* Note: we use the updated size in the MemRef output by the encryption. */
++ operation.params[0].memref.parent = &channel->inputBuffer;
++ operation.params[0].memref.offset = 0;
++ operation.params[0].memref.size = sizeof(gcsTA_INTERFACE);
++ operation.started = true;
++
++ /* Start the commit command within the TEE application. */
++ result = TEEC_InvokeCommand(
++ &channel->session,
++ gcvTA_COMMAND_DISPATCH,
++ &operation,
++ NULL);
++
++ gckOS_MemCopy(Interface, channel->virtual, channel->bytes);
++
++ gckOS_ReleaseMutex(channel->os, channel->mutex);
++
++ if (result != TEEC_SUCCESS)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_InitSecurityChannel(
++ IN gctUINT32 Channel
++ )
++{
++ gceSTATUS status;
++ TEEC_Result result;
++ gcsSecurityChannel *channel = (gcsSecurityChannel *)Channel;
++ TEEC_Operation operation = {0};
++
++ gcmkHEADER();
++ gcmkVERIFY_ARGUMENT(Channel != 0);
++
++ operation.paramTypes = TEEC_PARAM_TYPES(
++ TEEC_MEMREF_PARTIAL_INPUT,
++ TEEC_NONE,
++ TEEC_NONE,
++ TEEC_NONE);
++
++ /* Note: we use the updated size in the MemRef output by the encryption. */
++ operation.params[0].memref.parent = &channel->inputBuffer;
++ operation.params[0].memref.offset = 0;
++ operation.params[0].memref.size = gcmSIZEOF(gcsTA_INTERFACE);
++ operation.started = true;
++
++ /* Start the commit command within the TEE application. */
++ result = TEEC_InvokeCommand(
++ &channel->session,
++ gcvTA_COMMAND_INIT,
++ &operation,
++ NULL);
++
++ if (result != TEEC_SUCCESS)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.c 2015-05-01 14:57:59.603427001 -0500
+@@ -0,0 +1,177 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <gc_hal.h>
++#include <gc_hal_base.h>
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++
++#include <linux/kernel.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/uaccess.h>
++
++#include "gc_hal_kernel_sync.h"
++
++static struct sync_pt *
++viv_sync_pt_dup(
++ struct sync_pt * sync_pt
++ )
++{
++ gceSTATUS status;
++ struct viv_sync_pt *pt;
++ struct viv_sync_pt *src;
++ struct viv_sync_timeline *obj;
++
++ src = (struct viv_sync_pt *) sync_pt;
++ obj = (struct viv_sync_timeline *) sync_pt->parent;
++
++ /* Create the new sync_pt. */
++ pt = (struct viv_sync_pt *)
++ sync_pt_create(&obj->obj, sizeof(struct viv_sync_pt));
++
++ pt->stamp = src->stamp;
++ pt->sync = src->sync;
++
++ /* Reference sync point. */
++ status = gckOS_ReferenceSyncPoint(obj->os, pt->sync);
++
++ if (gcmIS_ERROR(status))
++ {
++ sync_pt_free((struct sync_pt *)pt);
++ return NULL;
++ }
++
++ return (struct sync_pt *)pt;
++}
++
++static int
++viv_sync_pt_has_signaled(
++ struct sync_pt * sync_pt
++ )
++{
++ gceSTATUS status;
++ gctBOOL state;
++ struct viv_sync_pt * pt;
++ struct viv_sync_timeline * obj;
++
++ pt = (struct viv_sync_pt *)sync_pt;
++ obj = (struct viv_sync_timeline *)sync_pt->parent;
++
++ status = gckOS_QuerySyncPoint(obj->os, pt->sync, &state);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error. */
++ return -1;
++ }
++
++ return state;
++}
++
++static int
++viv_sync_pt_compare(
++ struct sync_pt * a,
++ struct sync_pt * b
++ )
++{
++ int ret;
++ struct viv_sync_pt * pt1 = (struct viv_sync_pt *) a;
++ struct viv_sync_pt * pt2 = (struct viv_sync_pt *) b;
++
++ ret = (pt1->stamp < pt2->stamp) ? -1
++ : (pt1->stamp == pt2->stamp) ? 0
++ : 1;
++
++ return ret;
++}
++
++static void
++viv_sync_pt_free(
++ struct sync_pt * sync_pt
++ )
++{
++ struct viv_sync_pt * pt;
++ struct viv_sync_timeline * obj;
++
++ pt = (struct viv_sync_pt *) sync_pt;
++ obj = (struct viv_sync_timeline *) sync_pt->parent;
++
++ gckOS_DestroySyncPoint(obj->os, pt->sync);
++}
++
++static struct sync_timeline_ops viv_timeline_ops =
++{
++ .driver_name = "viv_sync",
++ .dup = viv_sync_pt_dup,
++ .has_signaled = viv_sync_pt_has_signaled,
++ .compare = viv_sync_pt_compare,
++ .free_pt = viv_sync_pt_free,
++};
++
++struct viv_sync_timeline *
++viv_sync_timeline_create(
++ const char * name,
++ gckOS os
++ )
++{
++ struct viv_sync_timeline * obj;
++
++ obj = (struct viv_sync_timeline *)
++ sync_timeline_create(&viv_timeline_ops, sizeof(struct viv_sync_timeline), name);
++
++ obj->os = os;
++ obj->stamp = 0;
++
++ return obj;
++}
++
++struct sync_pt *
++viv_sync_pt_create(
++ struct viv_sync_timeline * obj,
++ gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ struct viv_sync_pt * pt;
++
++ pt = (struct viv_sync_pt *)
++ sync_pt_create(&obj->obj, sizeof(struct viv_sync_pt));
++
++ pt->stamp = obj->stamp++;
++ pt->sync = SyncPoint;
++
++ /* Dup signal. */
++ status = gckOS_ReferenceSyncPoint(obj->os, SyncPoint);
++
++ if (gcmIS_ERROR(status))
++ {
++ sync_pt_free((struct sync_pt *)pt);
++ return NULL;
++ }
++
++ return (struct sync_pt *) pt;
++}
++
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.h linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.h
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/gc_hal_kernel_sync.h 2015-05-01 14:57:59.603427001 -0500
+@@ -0,0 +1,72 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_sync_h_
++#define __gc_hal_kernel_sync_h_
++
++#include <linux/types.h>
++
++/* sync.h is in drivers/staging/android/ for now. */
++#include <sync.h>
++
++#include <gc_hal.h>
++#include <gc_hal_base.h>
++
++struct viv_sync_timeline
++{
++ /* Parent object. */
++ struct sync_timeline obj;
++
++ /* Timestamp when sync_pt is created. */
++ gctUINT stamp;
++
++ /* Pointer to os struct. */
++ gckOS os;
++};
++
++
++struct viv_sync_pt
++{
++ /* Parent object. */
++ struct sync_pt pt;
++
++ /* Reference sync point*/
++ gctSYNC_POINT sync;
++
++ /* Timestamp when sync_pt is created. */
++ gctUINT stamp;
++};
++
++/* Create viv_sync_timeline object. */
++struct viv_sync_timeline *
++viv_sync_timeline_create(
++ const char * Name,
++ gckOS Os
++ );
++
++/* Create viv_sync_pt object. */
++struct sync_pt *
++viv_sync_pt_create(
++ struct viv_sync_timeline * Obj,
++ gctSYNC_POINT SyncPoint
++ );
++
++#endif /* __gc_hal_kernel_sync_h_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.c linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.c
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.c 2015-05-01 14:57:59.603427001 -0500
+@@ -0,0 +1,880 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel_platform.h"
++#include "gc_hal_kernel_device.h"
++#include "gc_hal_driver.h"
++#include <linux/slab.h>
++
++#if USE_PLATFORM_DRIVER
++# include <linux/platform_device.h>
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++#include <mach/viv_gpu.h>
++#else
++#include <linux/pm_runtime.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
++#include <mach/busfreq.h>
++#else
++#include <linux/busfreq-imx6.h>
++#include <linux/reset.h>
++#endif
++#endif
++
++#include <linux/clk.h>
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
++#include <mach/hardware.h>
++#endif
++#include <linux/pm_runtime.h>
++
++#include <linux/regulator/consumer.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++#include <linux/device_cooling.h>
++#define REG_THERMAL_NOTIFIER(a) register_devfreq_cooling_notifier(a);
++#define UNREG_THERMAL_NOTIFIER(a) unregister_devfreq_cooling_notifier(a);
++#else
++extern int register_thermal_notifier(struct notifier_block *nb);
++extern int unregister_thermal_notifier(struct notifier_block *nb);
++#define REG_THERMAL_NOTIFIER(a) register_thermal_notifier(a);
++#define UNREG_THERMAL_NOTIFIER(a) unregister_thermal_notifier(a);
++#endif
++
++static int initgpu3DMinClock = 1;
++module_param(initgpu3DMinClock, int, 0644);
++
++struct platform_device *pdevice;
++
++#ifdef CONFIG_GPU_LOW_MEMORY_KILLER
++# include <linux/kernel.h>
++# include <linux/mm.h>
++# include <linux/oom.h>
++# include <linux/sched.h>
++
++struct task_struct *lowmem_deathpending;
++
++static int
++task_notify_func(struct notifier_block *self, unsigned long val, void *data);
++
++static struct notifier_block task_nb = {
++ .notifier_call = task_notify_func,
++};
++
++static int
++task_notify_func(struct notifier_block *self, unsigned long val, void *data)
++{
++ struct task_struct *task = data;
++
++ if (task == lowmem_deathpending)
++ lowmem_deathpending = NULL;
++
++ return NOTIFY_OK;
++}
++
++extern struct task_struct *lowmem_deathpending;
++static unsigned long lowmem_deathpending_timeout;
++
++static int force_contiguous_lowmem_shrink(IN gckKERNEL Kernel)
++{
++ struct task_struct *p;
++ struct task_struct *selected = NULL;
++ int tasksize;
++ int ret = -1;
++ int min_adj = 0;
++ int selected_tasksize = 0;
++ int selected_oom_adj;
++ /*
++ * If we already have a death outstanding, then
++ * bail out right away; indicating to vmscan
++ * that we have nothing further to offer on
++ * this pass.
++ *
++ */
++ if (lowmem_deathpending &&
++ time_before_eq(jiffies, lowmem_deathpending_timeout))
++ return 0;
++ selected_oom_adj = min_adj;
++
++ rcu_read_lock();
++ for_each_process(p) {
++ struct mm_struct *mm;
++ struct signal_struct *sig;
++ gcuDATABASE_INFO info;
++ int oom_adj;
++
++ task_lock(p);
++ mm = p->mm;
++ sig = p->signal;
++ if (!mm || !sig) {
++ task_unlock(p);
++ continue;
++ }
++ oom_adj = sig->oom_score_adj;
++ if (oom_adj < min_adj) {
++ task_unlock(p);
++ continue;
++ }
++
++ tasksize = 0;
++ task_unlock(p);
++ rcu_read_unlock();
++
++ if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_VIDEO_MEMORY, &info) == gcvSTATUS_OK){
++ tasksize += info.counters.bytes / PAGE_SIZE;
++ }
++ if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_CONTIGUOUS, &info) == gcvSTATUS_OK){
++ tasksize += info.counters.bytes / PAGE_SIZE;
++ }
++
++ rcu_read_lock();
++
++ if (tasksize <= 0)
++ continue;
++
++ gckOS_Print("<gpu> pid %d (%s), adj %d, size %d \n", p->pid, p->comm, oom_adj, tasksize);
++
++ if (selected) {
++ if (oom_adj < selected_oom_adj)
++ continue;
++ if (oom_adj == selected_oom_adj &&
++ tasksize <= selected_tasksize)
++ continue;
++ }
++ selected = p;
++ selected_tasksize = tasksize;
++ selected_oom_adj = oom_adj;
++ }
++ if (selected) {
++ gckOS_Print("<gpu> send sigkill to %d (%s), adj %d, size %d\n",
++ selected->pid, selected->comm,
++ selected_oom_adj, selected_tasksize);
++ lowmem_deathpending = selected;
++ lowmem_deathpending_timeout = jiffies + HZ;
++ force_sig(SIGKILL, selected);
++ ret = 0;
++ }
++ rcu_read_unlock();
++ return ret;
++}
++
++
++gceSTATUS
++_ShrinkMemory(
++ IN gckPLATFORM Platform
++ )
++{
++ struct platform_device *pdev;
++ gckGALDEVICE galDevice;
++ gckKERNEL kernel;
++
++ pdev = Platform->device;
++
++ galDevice = platform_get_drvdata(pdev);
++
++ kernel = galDevice->kernels[gcvCORE_MAJOR];
++
++ if (kernel != gcvNULL)
++ {
++ force_contiguous_lowmem_shrink(kernel);
++ }
++ else
++ {
++ gcmkPRINT("%s(%d) can't find kernel! ", __FUNCTION__, __LINE__);
++ }
++
++ return gcvSTATUS_OK;
++}
++#endif
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++static int thermal_hot_pm_notify(struct notifier_block *nb, unsigned long event,
++ void *dummy)
++{
++ static gctUINT orgFscale, minFscale, maxFscale;
++ static gctBOOL bAlreadyTooHot = gcvFALSE;
++ gckHARDWARE hardware;
++ gckGALDEVICE galDevice;
++
++ galDevice = platform_get_drvdata(pdevice);
++ if (!galDevice)
++ {
++ /* GPU is not ready, so it is meaningless to change GPU freq. */
++ return NOTIFY_OK;
++ }
++
++ if (!galDevice->kernels[gcvCORE_MAJOR])
++ {
++ return NOTIFY_OK;
++ }
++
++ hardware = galDevice->kernels[gcvCORE_MAJOR]->hardware;
++
++ if (!hardware)
++ {
++ return NOTIFY_OK;
++ }
++
++ if (event && !bAlreadyTooHot) {
++ gckHARDWARE_GetFscaleValue(hardware,&orgFscale,&minFscale, &maxFscale);
++ gckHARDWARE_SetFscaleValue(hardware, minFscale);
++ bAlreadyTooHot = gcvTRUE;
++ gckOS_Print("System is too hot. GPU3D will work at %d/64 clock.\n", minFscale);
++ } else if (!event && bAlreadyTooHot) {
++ gckHARDWARE_SetFscaleValue(hardware, orgFscale);
++ gckOS_Print("Hot alarm is canceled. GPU3D clock will return to %d/64\n", orgFscale);
++ bAlreadyTooHot = gcvFALSE;
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block thermal_hot_pm_notifier = {
++ .notifier_call = thermal_hot_pm_notify,
++ };
++
++static ssize_t show_gpu3DMinClock(struct device_driver *dev, char *buf)
++{
++ gctUINT currentf,minf,maxf;
++ gckGALDEVICE galDevice;
++
++ galDevice = platform_get_drvdata(pdevice);
++ if(galDevice->kernels[gcvCORE_MAJOR])
++ {
++ gckHARDWARE_GetFscaleValue(galDevice->kernels[gcvCORE_MAJOR]->hardware,
++ &currentf, &minf, &maxf);
++ }
++ snprintf(buf, PAGE_SIZE, "%d\n", minf);
++ return strlen(buf);
++}
++
++static ssize_t update_gpu3DMinClock(struct device_driver *dev, const char *buf, size_t count)
++{
++
++ gctINT fields;
++ gctUINT MinFscaleValue;
++ gckGALDEVICE galDevice;
++
++ galDevice = platform_get_drvdata(pdevice);
++ if(galDevice->kernels[gcvCORE_MAJOR])
++ {
++ fields = sscanf(buf, "%d", &MinFscaleValue);
++ if (fields < 1)
++ return -EINVAL;
++
++ gckHARDWARE_SetMinFscaleValue(galDevice->kernels[gcvCORE_MAJOR]->hardware,MinFscaleValue);
++ }
++
++ return count;
++}
++
++static DRIVER_ATTR(gpu3DMinClock, S_IRUGO | S_IWUSR, show_gpu3DMinClock, update_gpu3DMinClock);
++#endif
++
++
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++static const struct of_device_id mxs_gpu_dt_ids[] = {
++ { .compatible = "fsl,imx6q-gpu", },
++ {/* sentinel */}
++};
++MODULE_DEVICE_TABLE(of, mxs_gpu_dt_ids);
++#endif
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++struct contiguous_mem_pool {
++ struct dma_attrs attrs;
++ dma_addr_t phys;
++ void *virt;
++ size_t size;
++};
++#endif
++
++struct imx_priv {
++ /* Clock management.*/
++ struct clk *clk_3d_core;
++ struct clk *clk_3d_shader;
++ struct clk *clk_3d_axi;
++ struct clk *clk_2d_core;
++ struct clk *clk_2d_axi;
++ struct clk *clk_vg_axi;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ /*Power management.*/
++ struct regulator *gpu_regulator;
++#endif
++#endif
++ /*Run time pm*/
++ struct device *pmdev;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct contiguous_mem_pool *pool;
++ struct reset_control *rstc[gcdMAX_GPU_COUNT];
++#endif
++};
++
++static struct imx_priv imxPriv;
++
++gceSTATUS
++gckPLATFORM_AdjustParam(
++ IN gckPLATFORM Platform,
++ OUT gcsMODULE_PARAMETERS *Args
++ )
++{
++ struct resource* res;
++ struct platform_device* pdev = Platform->device;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct device_node *dn =pdev->dev.of_node;
++ const u32 *prop;
++#else
++ struct viv_gpu_platform_data *pdata;
++#endif
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phys_baseaddr");
++ if (res)
++ Args->baseAddress = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_3d");
++ if (res)
++ Args->irqLine = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_3d");
++ if (res)
++ {
++ Args->registerMemBase = res->start;
++ Args->registerMemSize = res->end - res->start + 1;
++ }
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_2d");
++ if (res)
++ Args->irqLine2D = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_2d");
++ if (res)
++ {
++ Args->registerMemBase2D = res->start;
++ Args->registerMemSize2D = res->end - res->start + 1;
++ }
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_vg");
++ if (res)
++ Args->irqLineVG = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_vg");
++ if (res)
++ {
++ Args->registerMemBaseVG = res->start;
++ Args->registerMemSizeVG = res->end - res->start + 1;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ Args->contiguousBase = 0;
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ prop = of_get_property(dn, "contiguousbase", NULL);
++ if(prop)
++ Args->contiguousBase = *prop;
++ of_property_read_u32(dn,"contiguoussize", (u32 *)&contiguousSize);
++#else
++ pdata = pdev->dev.platform_data;
++ if (pdata) {
++ Args->contiguousBase = pdata->reserved_mem_base;
++ Args->contiguousSize = pdata->reserved_mem_size;
++ }
++#endif
++ if (Args->contiguousSize == 0)
++ gckOS_Print("Warning: No contiguous memory is reserverd for gpu.!\n ");
++
++ Args->gpu3DMinClock = initgpu3DMinClock;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_AllocPriv(
++ IN gckPLATFORM Platform
++ )
++{
++ Platform->priv = &imxPriv;
++
++#ifdef CONFIG_GPU_LOW_MEMORY_KILLER
++ task_free_register(&task_nb);
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_FreePriv(
++ IN gckPLATFORM Platform
++ )
++{
++#ifdef CONFIG_GPU_LOW_MEMORY_KILLER
++ task_free_unregister(&task_nb);
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_GetPower(
++ IN gckPLATFORM Platform
++ )
++{
++ struct device* pdev = &Platform->device->dev;
++ struct imx_priv *priv = Platform->priv;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct reset_control *rstc;
++#endif
++
++#ifdef CONFIG_PM
++ /*Init runtime pm for gpu*/
++ pm_runtime_enable(pdev);
++ priv->pmdev = pdev;
++#endif
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ rstc = devm_reset_control_get(pdev, "gpu3d");
++ priv->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc;
++ rstc = devm_reset_control_get(pdev, "gpu2d");
++ priv->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc;
++ rstc = devm_reset_control_get(pdev, "gpuvg");
++ priv->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc;
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ /*get gpu regulator*/
++ priv->gpu_regulator = regulator_get(pdev, "cpu_vddgpu");
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ priv->gpu_regulator = devm_regulator_get(pdev, "pu");
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if (IS_ERR(priv->gpu_regulator)) {
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to get gpu regulator \n",
++ __FUNCTION__, __LINE__);
++ return gcvSTATUS_NOT_FOUND;
++ }
++#endif
++#endif
++
++ /*Initialize the clock structure*/
++ priv->clk_3d_core = clk_get(pdev, "gpu3d_clk");
++ if (!IS_ERR(priv->clk_3d_core)) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (cpu_is_mx6q()) {
++ priv->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
++ if (IS_ERR(priv->clk_3d_shader)) {
++ clk_put(priv->clk_3d_core);
++ priv->clk_3d_core = NULL;
++ priv->clk_3d_shader = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
++ }
++ }
++#else
++ priv->clk_3d_axi = clk_get(pdev, "gpu3d_axi_clk");
++ priv->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
++ if (IS_ERR(priv->clk_3d_shader)) {
++ clk_put(priv->clk_3d_core);
++ priv->clk_3d_core = NULL;
++ priv->clk_3d_shader = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
++ }
++#endif
++ } else {
++ priv->clk_3d_core = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_clk failed, disable 3d!\n");
++ }
++
++ priv->clk_2d_core = clk_get(pdev, "gpu2d_clk");
++ if (IS_ERR(priv->clk_2d_core)) {
++ priv->clk_2d_core = NULL;
++ gckOS_Print("galcore: clk_get 2d core clock failed, disable 2d/vg!\n");
++ } else {
++ priv->clk_2d_axi = clk_get(pdev, "gpu2d_axi_clk");
++ if (IS_ERR(priv->clk_2d_axi)) {
++ priv->clk_2d_axi = NULL;
++ gckOS_Print("galcore: clk_get 2d axi clock failed, disable 2d\n");
++ }
++
++ priv->clk_vg_axi = clk_get(pdev, "openvg_axi_clk");
++ if (IS_ERR(priv->clk_vg_axi)) {
++ priv->clk_vg_axi = NULL;
++ gckOS_Print("galcore: clk_get vg clock failed, disable vg!\n");
++ }
++ }
++
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ pdevice = Platform->device;
++ REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++ {
++ int ret = 0;
++ ret = driver_create_file(pdevice->dev.driver, &driver_attr_gpu3DMinClock);
++ if(ret)
++ dev_err(&pdevice->dev, "create gpu3DMinClock attr failed (%d)\n", ret);
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_PutPower(
++ IN gckPLATFORM Platform
++ )
++{
++ struct imx_priv *priv = Platform->priv;
++
++ /*Disable clock*/
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ if (priv->clk_3d_axi) {
++ clk_put(priv->clk_3d_axi);
++ priv->clk_3d_axi = NULL;
++ }
++#endif
++ if (priv->clk_3d_core) {
++ clk_put(priv->clk_3d_core);
++ priv->clk_3d_core = NULL;
++ }
++ if (priv->clk_3d_shader) {
++ clk_put(priv->clk_3d_shader);
++ priv->clk_3d_shader = NULL;
++ }
++ if (priv->clk_2d_core) {
++ clk_put(priv->clk_2d_core);
++ priv->clk_2d_core = NULL;
++ }
++ if (priv->clk_2d_axi) {
++ clk_put(priv->clk_2d_axi);
++ priv->clk_2d_axi = NULL;
++ }
++ if (priv->clk_vg_axi) {
++ clk_put(priv->clk_vg_axi);
++ priv->clk_vg_axi = NULL;
++ }
++
++#ifdef CONFIG_PM
++ if(priv->pmdev)
++ pm_runtime_disable(priv->pmdev);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (priv->gpu_regulator) {
++ regulator_put(priv->gpu_regulator);
++ priv->gpu_regulator = NULL;
++ }
++#endif
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++
++ driver_remove_file(pdevice->dev.driver, &driver_attr_gpu3DMinClock);
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_SetPower(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU,
++ IN gctBOOL Enable
++ )
++{
++ struct imx_priv* priv = Platform->priv;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ int ret;
++#endif
++#endif
++
++ if (Enable)
++ {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if(!IS_ERR(priv->gpu_regulator)) {
++ ret = regulator_enable(priv->gpu_regulator);
++ if (ret != 0)
++ gckOS_Print("%s(%d): fail to enable pu regulator %d!\n",
++ __FUNCTION__, __LINE__, ret);
++ }
++#else
++ imx_gpc_power_up_pu(true);
++#endif
++#endif
++
++#ifdef CONFIG_PM
++ pm_runtime_get_sync(priv->pmdev);
++#endif
++ }
++ else
++ {
++#ifdef CONFIG_PM
++ pm_runtime_put_sync(priv->pmdev);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if(!IS_ERR(priv->gpu_regulator))
++ regulator_disable(priv->gpu_regulator);
++#else
++ imx_gpc_power_up_pu(false);
++#endif
++#endif
++
++ }
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_SetClock(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU,
++ IN gctBOOL Enable
++ )
++{
++ struct imx_priv* priv = Platform->priv;
++ struct clk *clk_3dcore = priv->clk_3d_core;
++ struct clk *clk_3dshader = priv->clk_3d_shader;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct clk *clk_3d_axi = priv->clk_3d_axi;
++#endif
++ struct clk *clk_2dcore = priv->clk_2d_core;
++ struct clk *clk_2d_axi = priv->clk_2d_axi;
++ struct clk *clk_vg_axi = priv->clk_vg_axi;
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (Enable) {
++ switch (GPU) {
++ case gcvCORE_MAJOR:
++ clk_enable(clk_3dcore);
++ if (cpu_is_mx6q())
++ clk_enable(clk_3dshader);
++ break;
++ case gcvCORE_2D:
++ clk_enable(clk_2dcore);
++ clk_enable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_enable(clk_2dcore);
++ clk_enable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ } else {
++ switch (GPU) {
++ case gcvCORE_MAJOR:
++ if (cpu_is_mx6q())
++ clk_disable(clk_3dshader);
++ clk_disable(clk_3dcore);
++ break;
++ case gcvCORE_2D:
++ clk_disable(clk_2dcore);
++ clk_disable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_disable(clk_2dcore);
++ clk_disable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ }
++#else
++ if (Enable) {
++ switch (GPU) {
++ case gcvCORE_MAJOR:
++ clk_prepare(clk_3dcore);
++ clk_enable(clk_3dcore);
++ clk_prepare(clk_3dshader);
++ clk_enable(clk_3dshader);
++ clk_prepare(clk_3d_axi);
++ clk_enable(clk_3d_axi);
++ break;
++ case gcvCORE_2D:
++ clk_prepare(clk_2dcore);
++ clk_enable(clk_2dcore);
++ clk_prepare(clk_2d_axi);
++ clk_enable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_prepare(clk_2dcore);
++ clk_enable(clk_2dcore);
++ clk_prepare(clk_vg_axi);
++ clk_enable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ } else {
++ switch (GPU) {
++ case gcvCORE_MAJOR:
++ clk_disable(clk_3dshader);
++ clk_unprepare(clk_3dshader);
++ clk_disable(clk_3dcore);
++ clk_unprepare(clk_3dcore);
++ clk_disable(clk_3d_axi);
++ clk_unprepare(clk_3d_axi);
++ break;
++ case gcvCORE_2D:
++ clk_disable(clk_2dcore);
++ clk_unprepare(clk_2dcore);
++ clk_disable(clk_2d_axi);
++ clk_unprepare(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_disable(clk_2dcore);
++ clk_unprepare(clk_2dcore);
++ clk_disable(clk_vg_axi);
++ clk_unprepare(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++#ifdef CONFIG_PM
++static int gpu_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static int gpu_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static struct dev_pm_ops gpu_pm_ops;
++#endif
++#endif
++
++gceSTATUS
++_AdjustDriver(
++ IN gckPLATFORM Platform
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct platform_driver * driver = Platform->driver;
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ driver->driver.of_match_table = mxs_gpu_dt_ids;
++#endif
++
++ /* Override PM callbacks to add runtime PM callbacks. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ /* Fill local structure with original value. */
++ memcpy(&gpu_pm_ops, driver->driver.pm, sizeof(struct dev_pm_ops));
++
++ /* Add runtime PM callback. */
++#ifdef CONFIG_PM_RUNTIME
++ gpu_pm_ops.runtime_suspend = gpu_runtime_suspend;
++ gpu_pm_ops.runtime_resume = gpu_runtime_resume;
++ gpu_pm_ops.runtime_idle = NULL;
++#endif
++
++ /* Replace callbacks. */
++ driver->driver.pm = &gpu_pm_ops;
++#endif
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_Reset(
++ IN gckPLATFORM Platform,
++ gceCORE GPU
++ )
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++#define SRC_SCR_OFFSET 0
++#define BP_SRC_SCR_GPU3D_RST 1
++#define BP_SRC_SCR_GPU2D_RST 4
++ void __iomem *src_base = IO_ADDRESS(SRC_BASE_ADDR);
++ gctUINT32 bit_offset,val;
++
++ if(GPU == gcvCORE_MAJOR) {
++ bit_offset = BP_SRC_SCR_GPU3D_RST;
++ } else if((GPU == gcvCORE_VG)
++ ||(GPU == gcvCORE_2D)) {
++ bit_offset = BP_SRC_SCR_GPU2D_RST;
++ } else {
++ return gcvSTATUS_INVALID_CONFIG;
++ }
++ val = __raw_readl(src_base + SRC_SCR_OFFSET);
++ val &= ~(1 << (bit_offset));
++ val |= (1 << (bit_offset));
++ __raw_writel(val, src_base + SRC_SCR_OFFSET);
++
++ while ((__raw_readl(src_base + SRC_SCR_OFFSET) &
++ (1 << (bit_offset))) != 0) {
++ }
++
++ return gcvSTATUS_NOT_SUPPORTED;
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct imx_priv* priv = Platform->priv;
++ struct reset_control *rstc = priv->rstc[GPU];
++ if (rstc)
++ reset_control_reset(rstc);
++#else
++ imx_src_reset_gpu((int)GPU);
++#endif
++ return gcvSTATUS_OK;
++}
++
++gcsPLATFORM_OPERATIONS platformOperations = {
++ .adjustParam = gckPLATFORM_AdjustParam,
++ .allocPriv = _AllocPriv,
++ .freePriv = _FreePriv,
++ .getPower = _GetPower,
++ .putPower = _PutPower,
++ .setPower = _SetPower,
++ .setClock = _SetClock,
++ .adjustDriver = _AdjustDriver,
++ .reset = _Reset,
++#ifdef CONFIG_GPU_LOW_MEMORY_KILLER
++ .shrinkMemory = _ShrinkMemory,
++#endif
++};
++
++void
++gckPLATFORM_QueryOperations(
++ IN gcsPLATFORM_OPERATIONS ** Operations
++ )
++{
++ *Operations = &platformOperations;
++}
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.config linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.config
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.config 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.config 2015-05-01 14:57:59.603427001 -0500
+@@ -0,0 +1,15 @@
++EXTRA_CFLAGS += -DgcdDEFAULT_CONTIGUOUS_SIZE=134217728
++
++ifneq ($(CONFIG_ANDROID),)
++# build for android
++EXTRA_CFLAGS += -DgcdANDROID_NATIVE_FENCE_SYNC=3
++
++ifeq ($(CONFIG_SYNC),)
++$(warn CONFIG_SYNC is not set in kernel config)
++$(warn Android native fence sync needs CONFIG_SYNC)
++endif
++endif
++
++EXTRA_CFLAGS += -DLINUX_CMA_FSL=1
++ALLOCATOR_ARRAY_H_LOCATION := $(OS_KERNEL_DIR)/allocator/freescale
++CUSTOMER_ALLOCATOR_OBJS := $(ALLOCATOR_ARRAY_H_LOCATION)/gc_hal_kernel_allocator_cma.o
+diff -Nur linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/Kbuild linux-3.14.40/drivers/mxc/gpu-viv/v5/Kbuild
+--- linux-3.14.40.orig/drivers/mxc/gpu-viv/v5/Kbuild 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/gpu-viv/v5/Kbuild 2015-05-01 14:57:59.603427001 -0500
+@@ -0,0 +1,272 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2014 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++#
++# Linux build file for kernel HAL driver.
++#
++
++AQROOT := $(srctree)/drivers/mxc/gpu-viv/v5
++
++include $(AQROOT)/config
++
++KERNEL_DIR ?= $(TOOL_DIR)/kernel
++
++OS_KERNEL_DIR := hal/os/linux/kernel
++ARCH_KERNEL_DIR := hal/kernel/arch
++ARCH_VG_KERNEL_DIR := hal/kernel/archvg
++HAL_KERNEL_DIR := hal/kernel
++
++# Check and include platform config.
++ifneq ($(PLATFORM),)
++
++# Get platform config path.
++PLATFORM_CONFIG ?= $(AQROOT)/$(OS_KERNEL_DIR)/platform/$(PLATFORM).config
++
++# Check whether it exists.
++PLATFORM_CONFIG := $(wildcard $(PLATFORM_CONFIG))
++
++# Include it if exists.
++ifneq ($(PLATFORM_CONFIG),)
++include $(PLATFORM_CONFIG)
++endif
++
++endif
++
++MODULE_NAME ?= galcore
++CUSTOMER_ALLOCATOR_OBJS ?=
++ALLOCATOR_ARRAY_H_LOCATION ?= $(OS_KERNEL_DIR)/allocator/default/
++
++EXTRA_CFLAGS += -Werror
++
++OBJS := $(OS_KERNEL_DIR)/gc_hal_kernel_device.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_linux.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_math.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_os.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_debugfs.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_allocator.o \
++
++ifneq ($(CONFIG_IOMMU_SUPPORT),)
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_iommu.o
++endif
++
++ifneq ($(PLATFORM),)
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_probe.o
++OBJS += $(OS_KERNEL_DIR)/platform/$(PLATFORM).o
++else
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_driver.o
++endif
++
++OBJS += $(HAL_KERNEL_DIR)/gc_hal_kernel.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_command.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_db.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_debug.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_event.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_heap.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_mmu.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_video_memory.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_power.o
++
++OBJS += $(ARCH_KERNEL_DIR)/gc_hal_kernel_context.o \
++ $(ARCH_KERNEL_DIR)/gc_hal_kernel_hardware.o
++
++ifeq ($(VIVANTE_ENABLE_3D), 1)
++OBJS += $(ARCH_KERNEL_DIR)/gc_hal_kernel_recorder.o
++endif
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++OBJS +=\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_command_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_interrupt_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_mmu_vg.o\
++ $(ARCH_VG_KERNEL_DIR)/gc_hal_kernel_hardware_command_vg.o\
++ $(ARCH_VG_KERNEL_DIR)/gc_hal_kernel_hardware_vg.o
++endif
++
++ifneq ($(CONFIG_SYNC),)
++EXTRA_CFLAGS += -Idrivers/staging/android
++
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_sync.o
++endif
++
++ifeq ($(SECURITY), 1)
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_security_channel.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_security.o
++endif
++
++ifneq ($(CUSTOMER_ALLOCATOR_OBJS),)
++OBJS += $(CUSTOMER_ALLOCATOR_OBJS)
++endif
++
++ifeq ($(KERNELRELEASE), )
++
++.PHONY: all clean install
++
++# Define targets.
++all:
++ @make V=$(V) ARCH=$(ARCH_TYPE) -C $(KERNEL_DIR) SUBDIRS=`pwd` modules
++
++clean:
++ @rm -rf $(OBJS)
++ @rm -rf modules.order Module.symvers
++ @find $(AQROOT) -name ".gc_*.cmd" | xargs rm -f
++
++install: all
++ @mkdir -p $(SDK_DIR)/drivers
++
++else
++
++
++EXTRA_CFLAGS += -DLINUX -DDRIVER
++
++ifeq ($(FLAREON),1)
++EXTRA_CFLAGS += -DFLAREON
++endif
++
++ifeq ($(DEBUG), 1)
++EXTRA_CFLAGS += -DDBG=1 -DDEBUG -D_DEBUG
++else
++EXTRA_CFLAGS += -DDBG=0
++endif
++
++ifeq ($(NO_DMA_COHERENT), 1)
++EXTRA_CFLAGS += -DNO_DMA_COHERENT
++endif
++
++ifeq ($(CONFIG_DOVE_GPU), 1)
++EXTRA_CFLAGS += -DCONFIG_DOVE_GPU=1
++endif
++
++ifneq ($(USE_PLATFORM_DRIVER), 0)
++EXTRA_CFLAGS += -DUSE_PLATFORM_DRIVER=1
++else
++EXTRA_CFLAGS += -DUSE_PLATFORM_DRIVER=0
++endif
++
++EXTRA_CFLAGS += -DVIVANTE_PROFILER=1
++EXTRA_CFLAGS += -DVIVANTE_PROFILER_CONTEXT=1
++
++ifeq ($(ENABLE_GPU_CLOCK_BY_DRIVER), 1)
++EXTRA_CFLAGS += -DENABLE_GPU_CLOCK_BY_DRIVER=1
++else
++EXTRA_CFLAGS += -DENABLE_GPU_CLOCK_BY_DRIVER=0
++endif
++
++ifeq ($(USE_NEW_LINUX_SIGNAL), 1)
++EXTRA_CFLAGS += -DUSE_NEW_LINUX_SIGNAL=1
++else
++EXTRA_CFLAGS += -DUSE_NEW_LINUX_SIGNAL=0
++endif
++
++ifeq ($(FORCE_ALL_VIDEO_MEMORY_CACHED), 1)
++EXTRA_CFLAGS += -DgcdPAGED_MEMORY_CACHEABLE=1
++else
++EXTRA_CFLAGS += -DgcdPAGED_MEMORY_CACHEABLE=0
++endif
++
++ifeq ($(NONPAGED_MEMORY_CACHEABLE), 1)
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_CACHEABLE=1
++else
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_CACHEABLE=0
++endif
++
++ifeq ($(NONPAGED_MEMORY_BUFFERABLE), 1)
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_BUFFERABLE=1
++else
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_BUFFERABLE=0
++endif
++
++ifeq ($(CACHE_FUNCTION_UNIMPLEMENTED), 1)
++EXTRA_CFLAGS += -DgcdCACHE_FUNCTION_UNIMPLEMENTED=1
++else
++EXTRA_CFLAGS += -DgcdCACHE_FUNCTION_UNIMPLEMENTED=0
++endif
++
++ifeq ($(CONFIG_SMP), y)
++EXTRA_CFLAGS += -DgcdSMP=1
++else
++EXTRA_CFLAGS += -DgcdSMP=0
++endif
++
++ifeq ($(VIVANTE_ENABLE_3D),0)
++EXTRA_CFLAGS += -DgcdENABLE_3D=0
++else
++EXTRA_CFLAGS += -DgcdENABLE_3D=1
++endif
++
++ifeq ($(VIVANTE_ENABLE_2D),0)
++EXTRA_CFLAGS += -DgcdENABLE_2D=0
++else
++EXTRA_CFLAGS += -DgcdENABLE_2D=1
++endif
++
++ifeq ($(VIVANTE_ENABLE_VG),0)
++EXTRA_CFLAGS += -DgcdENABLE_VG=0
++else
++EXTRA_CFLAGS += -DgcdENABLE_VG=1
++endif
++
++ifeq ($(ENABLE_OUTER_CACHE_PATCH), 1)
++EXTRA_CFLAGS += -DgcdENABLE_OUTER_CACHE_PATCH=1
++else
++EXTRA_CFLAGS += -DgcdENABLE_OUTER_CACHE_PATCH=0
++endif
++
++ifeq ($(USE_BANK_ALIGNMENT), 1)
++ EXTRA_CFLAGS += -DgcdENABLE_BANK_ALIGNMENT=1
++ ifneq ($(BANK_BIT_START), 0)
++ ifneq ($(BANK_BIT_END), 0)
++ EXTRA_CFLAGS += -DgcdBANK_BIT_START=$(BANK_BIT_START)
++ EXTRA_CFLAGS += -DgcdBANK_BIT_END=$(BANK_BIT_END)
++ endif
++ endif
++
++ ifneq ($(BANK_CHANNEL_BIT), 0)
++ EXTRA_CFLAGS += -DgcdBANK_CHANNEL_BIT=$(BANK_CHANNEL_BIT)
++ endif
++endif
++
++ifeq ($(gcdFPGA_BUILD), 1)
++EXTRA_CFLAGS += -DgcdFPGA_BUILD=1
++else
++EXTRA_CFLAGS += -DgcdFPGA_BUILD=0
++endif
++
++ifeq ($(SECURITY), 1)
++EXTRA_CFLAGS += -DgcdSECURITY=1
++endif
++
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/inc
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/arch
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/inc
++EXTRA_CFLAGS += -I$(AQROOT)/hal/os/linux/kernel
++EXTRA_CFLAGS += -I$(AQROOT)/$(ALLOCATOR_ARRAY_H_LOCATION)
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/archvg
++endif
++
++obj-$(CONFIG_MXC_GPU_VIV) += galcore.o
++
++galcore-objs := $(OBJS)
++
++endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/hdmi-cec/Kconfig linux-3.14.40/drivers/mxc/hdmi-cec/Kconfig
+--- linux-3.14.40.orig/drivers/mxc/hdmi-cec/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/hdmi-cec/Kconfig 2015-05-01 14:57:59.607427001 -0500
+@@ -0,0 +1,11 @@
++
++menu "MXC HDMI CEC (Consumer Electronics Control) support"
++
++config MXC_HDMI_CEC
++ tristate "Support for MXC HDMI CEC (Consumer Electronics Control)"
++ depends on MFD_MXC_HDMI
++ depends on FB_MXC_HDMI
++ help
++ The HDMI CEC device implement low level protocol on i.MX6x platforms.
++
++endmenu
+diff -Nur linux-3.14.40.orig/drivers/mxc/hdmi-cec/Makefile linux-3.14.40/drivers/mxc/hdmi-cec/Makefile
+--- linux-3.14.40.orig/drivers/mxc/hdmi-cec/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/hdmi-cec/Makefile 2015-05-01 14:57:59.607427001 -0500
+@@ -0,0 +1 @@
++obj-$(CONFIG_MXC_HDMI_CEC) += mxc_hdmi-cec.o
+diff -Nur linux-3.14.40.orig/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c linux-3.14.40/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c
+--- linux-3.14.40.orig/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c 2015-05-01 14:57:59.607427001 -0500
+@@ -0,0 +1,629 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file mxc_hdmi-cec.c
++ *
++ * @brief HDMI CEC system initialization and file operation implementation
++ *
++ * @ingroup HDMI
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/wait.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <linux/fsl_devices.h>
++#include <linux/uaccess.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/workqueue.h>
++#include <linux/sizes.h>
++
++#include <linux/console.h>
++#include <linux/types.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <linux/pinctrl/consumer.h>
++
++#include <video/mxc_hdmi.h>
++
++#include "mxc_hdmi-cec.h"
++
++
++#define MAX_MESSAGE_LEN 17
++
++#define MESSAGE_TYPE_RECEIVE_SUCCESS 1
++#define MESSAGE_TYPE_NOACK 2
++#define MESSAGE_TYPE_DISCONNECTED 3
++#define MESSAGE_TYPE_CONNECTED 4
++#define MESSAGE_TYPE_SEND_SUCCESS 5
++
++#define CEC_TX_INPROGRESS -1
++#define CEC_TX_AVAIL 0
++
++struct hdmi_cec_priv {
++ int receive_error;
++ int send_error;
++ u8 Logical_address;
++ bool cec_state;
++ u8 last_msg[MAX_MESSAGE_LEN];
++ u8 msg_len;
++ int tx_answer;
++ u16 latest_cec_stat;
++ u8 link_status;
++ spinlock_t irq_lock;
++ struct delayed_work hdmi_cec_work;
++ struct mutex lock;
++};
++
++struct hdmi_cec_event {
++ int event_type;
++ int msg_len;
++ u8 msg[MAX_MESSAGE_LEN];
++ struct list_head list;
++};
++
++
++static LIST_HEAD(head);
++
++static int hdmi_cec_ready = 0;
++static int hdmi_cec_started;
++static int hdmi_cec_major;
++static struct class *hdmi_cec_class;
++static struct hdmi_cec_priv hdmi_cec_data;
++static u8 open_count;
++
++static wait_queue_head_t hdmi_cec_queue;
++static wait_queue_head_t tx_cec_queue;
++
++static irqreturn_t mxc_hdmi_cec_isr(int irq, void *data)
++{
++ struct hdmi_cec_priv *hdmi_cec = data;
++ u16 cec_stat = 0;
++ unsigned long flags;
++ u8 phy_stat0;
++ irqreturn_t ret = IRQ_HANDLED;
++
++ spin_lock_irqsave(&hdmi_cec->irq_lock, flags);
++
++ hdmi_writeb(0x7f, HDMI_IH_MUTE_CEC_STAT0);
++
++ cec_stat = hdmi_readb(HDMI_IH_CEC_STAT0);
++ hdmi_writeb(cec_stat, HDMI_IH_CEC_STAT0);
++ phy_stat0 = hdmi_readb(HDMI_PHY_STAT0) & 0x02;
++
++ if ((cec_stat & (HDMI_IH_CEC_STAT0_ERROR_INIT | \
++ HDMI_IH_CEC_STAT0_NACK | HDMI_IH_CEC_STAT0_EOM | \
++ HDMI_IH_CEC_STAT0_DONE)) == 0) {
++ ret = IRQ_NONE;
++ cec_stat = 0;
++ }
++ if (hdmi_cec->link_status ^ phy_stat0) {
++ /* HPD value changed */
++ hdmi_cec->link_status = phy_stat0;
++ if (hdmi_cec->link_status)
++ cec_stat |= 0x80; /* Connected */
++ else
++ cec_stat |= 0x100; /* Disconnected */
++ }
++ pr_debug("HDMI CEC interrupt received\n");
++ hdmi_cec->latest_cec_stat = cec_stat ;
++
++ schedule_delayed_work(&(hdmi_cec->hdmi_cec_work), msecs_to_jiffies(20));
++
++ spin_unlock_irqrestore(&hdmi_cec->irq_lock, flags);
++
++ return ret;
++}
++
++void mxc_hdmi_cec_handle(u16 cec_stat)
++{
++ u8 val = 0, i = 0;
++ struct hdmi_cec_event *event = NULL;
++ /*The current transmission is successful (for initiator only).*/
++ if (!open_count)
++ return;
++
++ if (cec_stat & HDMI_IH_CEC_STAT0_DONE) {
++ hdmi_cec_data.tx_answer = cec_stat;
++ wake_up(&tx_cec_queue);
++ }
++ /*EOM is detected so that the received data is ready in the receiver data buffer*/
++ if (cec_stat & HDMI_IH_CEC_STAT0_EOM) {
++ hdmi_writeb(0x02, HDMI_IH_CEC_STAT0);
++ event = vmalloc(sizeof(struct hdmi_cec_event));
++ if (NULL == event) {
++ pr_err("%s: Not enough memory!\n", __func__);
++ return;
++ }
++ memset(event, 0, sizeof(struct hdmi_cec_event));
++ event->msg_len = hdmi_readb(HDMI_CEC_RX_CNT);
++ if (!event->msg_len) {
++ pr_err("%s: Invalid CEC message length!\n", __func__);
++ return;
++ }
++ event->event_type = MESSAGE_TYPE_RECEIVE_SUCCESS;
++ for (i = 0; i < event->msg_len; i++)
++ event->msg[i] = hdmi_readb(HDMI_CEC_RX_DATA0+i);
++ hdmi_writeb(0x0, HDMI_CEC_LOCK);
++ mutex_lock(&hdmi_cec_data.lock);
++ list_add_tail(&event->list, &head);
++ mutex_unlock(&hdmi_cec_data.lock);
++ wake_up(&hdmi_cec_queue);
++ }
++ /*An error is detected on cec line (for initiator only). */
++ if (cec_stat & HDMI_IH_CEC_STAT0_ERROR_INIT) {
++ mutex_lock(&hdmi_cec_data.lock);
++ hdmi_cec_data.send_error++;
++ if (hdmi_cec_data.send_error > 2) {
++ pr_err("%s:Re-transmission is attempted more than 2 times!\n", __func__);
++ hdmi_cec_data.send_error = 0;
++ mutex_unlock(&hdmi_cec_data.lock);
++ hdmi_cec_data.tx_answer = cec_stat;
++ wake_up(&tx_cec_queue);
++ return;
++ }
++ for (i = 0; i < hdmi_cec_data.msg_len; i++)
++ hdmi_writeb(hdmi_cec_data.last_msg[i], HDMI_CEC_TX_DATA0+i);
++ hdmi_writeb(hdmi_cec_data.msg_len, HDMI_CEC_TX_CNT);
++ val = hdmi_readb(HDMI_CEC_CTRL);
++ val |= 0x01;
++ hdmi_writeb(val, HDMI_CEC_CTRL);
++ mutex_unlock(&hdmi_cec_data.lock);
++ }
++ /*A frame is not acknowledged in a directly addressed message. Or a frame is negatively acknowledged in
++ a broadcast message (for initiator only).*/
++ if (cec_stat & HDMI_IH_CEC_STAT0_NACK) {
++ hdmi_cec_data.tx_answer = cec_stat;
++ wake_up(&tx_cec_queue);
++ }
++ /*An error is notified by a follower. Abnormal logic data bit error (for follower).*/
++ if (cec_stat & HDMI_IH_CEC_STAT0_ERROR_FOLL) {
++ hdmi_cec_data.receive_error++;
++ }
++ /*HDMI cable connected*/
++ if (cec_stat & 0x80) {
++ pr_info("HDMI link connected\n");
++ event = vmalloc(sizeof(struct hdmi_cec_event));
++ if (NULL == event) {
++ pr_err("%s: Not enough memory\n", __func__);
++ return;
++ }
++ memset(event, 0, sizeof(struct hdmi_cec_event));
++ event->event_type = MESSAGE_TYPE_CONNECTED;
++ mutex_lock(&hdmi_cec_data.lock);
++ list_add_tail(&event->list, &head);
++ mutex_unlock(&hdmi_cec_data.lock);
++ wake_up(&hdmi_cec_queue);
++ }
++ /*HDMI cable disconnected*/
++ if (cec_stat & 0x100) {
++ pr_info("HDMI link disconnected\n");
++ event = vmalloc(sizeof(struct hdmi_cec_event));
++ if (NULL == event) {
++ pr_err("%s: Not enough memory!\n", __func__);
++ return;
++ }
++ memset(event, 0, sizeof(struct hdmi_cec_event));
++ event->event_type = MESSAGE_TYPE_DISCONNECTED;
++ mutex_lock(&hdmi_cec_data.lock);
++ list_add_tail(&event->list, &head);
++ mutex_unlock(&hdmi_cec_data.lock);
++ wake_up(&hdmi_cec_queue);
++ }
++ return;
++}
++EXPORT_SYMBOL(mxc_hdmi_cec_handle);
++static void mxc_hdmi_cec_worker(struct work_struct *work)
++{
++ u8 val;
++ mxc_hdmi_cec_handle(hdmi_cec_data.latest_cec_stat);
++ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL | HDMI_IH_CEC_STAT0_ARB_LOST;
++ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
++}
++
++/*!
++ * @brief open function for cec file operation
++ *
++ * @return 0 on success or negative error code on error
++ */
++static int hdmi_cec_open(struct inode *inode, struct file *filp)
++{
++ mutex_lock(&hdmi_cec_data.lock);
++ if (open_count) {
++ mutex_unlock(&hdmi_cec_data.lock);
++ return -EBUSY;
++ }
++ open_count = 1;
++ filp->private_data = (void *)(&hdmi_cec_data);
++ hdmi_cec_data.Logical_address = 15;
++ hdmi_cec_data.cec_state = false;
++ mutex_unlock(&hdmi_cec_data.lock);
++ return 0;
++}
++
++static ssize_t hdmi_cec_read(struct file *file, char __user *buf, size_t count,
++ loff_t *ppos)
++{
++ struct hdmi_cec_event *event = NULL;
++ pr_debug("function : %s\n", __func__);
++
++ if (!open_count)
++ return -ENODEV;
++ mutex_lock(&hdmi_cec_data.lock);
++ if (false == hdmi_cec_data.cec_state) {
++ mutex_unlock(&hdmi_cec_data.lock);
++ return -EACCES;
++ }
++
++ if (list_empty(&head)) {
++ if (file->f_flags & O_NONBLOCK) {
++ mutex_unlock(&hdmi_cec_data.lock);
++ return -EAGAIN;
++ } else {
++ do {
++ mutex_unlock(&hdmi_cec_data.lock);
++ if (wait_event_interruptible(hdmi_cec_queue, (!list_empty(&head))))
++ return -ERESTARTSYS;
++ mutex_lock(&hdmi_cec_data.lock);
++ } while (list_empty(&head));
++ }
++ }
++
++ event = list_first_entry(&head, struct hdmi_cec_event, list);
++ list_del(&event->list);
++ mutex_unlock(&hdmi_cec_data.lock);
++ if (copy_to_user(buf, event,
++ sizeof(struct hdmi_cec_event) - sizeof(struct list_head))) {
++ vfree(event);
++ return -EFAULT;
++ }
++ vfree(event);
++ return (sizeof(struct hdmi_cec_event) - sizeof(struct list_head));
++}
++
++static ssize_t hdmi_cec_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ int ret = 0 , i = 0;
++ u8 msg[MAX_MESSAGE_LEN];
++ u8 msg_len = 0, val = 0;
++
++ pr_debug("function : %s\n", __func__);
++
++ if (!open_count)
++ return -ENODEV;
++ mutex_lock(&hdmi_cec_data.lock);
++ if (false == hdmi_cec_data.cec_state) {
++ mutex_unlock(&hdmi_cec_data.lock);
++ return -EACCES;
++ }
++ /* Ensure that there is only one writer who is the only listener of tx_cec_queue */
++ if (hdmi_cec_data.tx_answer != CEC_TX_AVAIL) {
++ mutex_unlock(&hdmi_cec_data.lock);
++ return -EBUSY;
++ }
++ mutex_unlock(&hdmi_cec_data.lock);
++ if (count > MAX_MESSAGE_LEN)
++ return -EINVAL;
++ memset(&msg, 0, MAX_MESSAGE_LEN);
++ ret = copy_from_user(&msg, buf, count);
++ if (ret)
++ return -EACCES;
++ mutex_lock(&hdmi_cec_data.lock);
++ hdmi_cec_data.send_error = 0;
++ hdmi_cec_data.tx_answer = CEC_TX_INPROGRESS;
++ msg_len = count;
++ hdmi_writeb(msg_len, HDMI_CEC_TX_CNT);
++ for (i = 0; i < msg_len; i++)
++ hdmi_writeb(msg[i], HDMI_CEC_TX_DATA0+i);
++ val = hdmi_readb(HDMI_CEC_CTRL);
++ val |= 0x01;
++ hdmi_writeb(val, HDMI_CEC_CTRL);
++ memcpy(hdmi_cec_data.last_msg, msg, msg_len);
++ hdmi_cec_data.msg_len = msg_len;
++ mutex_unlock(&hdmi_cec_data.lock);
++
++ ret = wait_event_interruptible_timeout(tx_cec_queue, hdmi_cec_data.tx_answer != CEC_TX_INPROGRESS, HZ);
++
++ if (ret < 0) {
++ ret = -ERESTARTSYS;
++ goto tx_out;
++ }
++
++ if (hdmi_cec_data.tx_answer & HDMI_IH_CEC_STAT0_DONE)
++ /* msg correctly sent */
++ ret = msg_len;
++ else
++ ret = -EIO;
++
++ tx_out:
++ hdmi_cec_data.tx_answer = CEC_TX_AVAIL;
++ return ret;
++}
++
++void hdmi_cec_start_device(void)
++{
++ u8 val;
++
++ if (!hdmi_cec_ready || hdmi_cec_started)
++ return;
++
++ val = hdmi_readb(HDMI_MC_CLKDIS);
++ val &= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ hdmi_writeb(val, HDMI_MC_CLKDIS);
++ hdmi_writeb(0x02, HDMI_CEC_CTRL);
++ /* Force read unlock */
++ hdmi_writeb(0x0, HDMI_CEC_LOCK);
++ val = HDMI_IH_CEC_STAT0_ERROR_INIT | HDMI_IH_CEC_STAT0_NACK | HDMI_IH_CEC_STAT0_EOM | HDMI_IH_CEC_STAT0_DONE;
++ hdmi_writeb(val, HDMI_CEC_POLARITY);
++ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL | HDMI_IH_CEC_STAT0_ARB_LOST;
++ hdmi_writeb(val, HDMI_CEC_MASK);
++ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
++ hdmi_cec_data.link_status = hdmi_readb(HDMI_PHY_STAT0) & 0x02;
++ mutex_lock(&hdmi_cec_data.lock);
++ hdmi_cec_data.cec_state = true;
++ mutex_unlock(&hdmi_cec_data.lock);
++
++ hdmi_cec_started = 1;
++}
++EXPORT_SYMBOL(hdmi_cec_start_device);
++
++void hdmi_cec_stop_device(void)
++{
++ u8 val;
++
++ if (!hdmi_cec_ready || !hdmi_cec_started)
++ return;
++
++ hdmi_writeb(0x10, HDMI_CEC_CTRL);
++ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL | HDMI_IH_CEC_STAT0_ERROR_INIT | HDMI_IH_CEC_STAT0_ARB_LOST | \
++ HDMI_IH_CEC_STAT0_NACK | HDMI_IH_CEC_STAT0_EOM | HDMI_IH_CEC_STAT0_DONE;
++ hdmi_writeb(val, HDMI_CEC_MASK);
++ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
++ hdmi_writeb(0x0, HDMI_CEC_POLARITY);
++ val = hdmi_readb(HDMI_MC_CLKDIS);
++ val |= HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ hdmi_writeb(val, HDMI_MC_CLKDIS);
++ mutex_lock(&hdmi_cec_data.lock);
++ hdmi_cec_data.cec_state = false;
++ mutex_unlock(&hdmi_cec_data.lock);
++
++ hdmi_cec_started = 0;
++}
++EXPORT_SYMBOL(hdmi_cec_stop_device);
++
++/*!
++ * @brief IO ctrl function for vpu file operation
++ * @param cmd IO ctrl command
++ * @return 0 on success or negative error code on error
++ */
++static long hdmi_cec_ioctl(struct file *filp, u_int cmd,
++ u_long arg)
++{
++ int ret = 0, status = 0;
++ u8 val = 0, msg = 0;
++ struct mxc_edid_cfg hdmi_edid_cfg;
++ pr_debug("function : %s\n", __func__);
++ if (!open_count)
++ return -ENODEV;
++ switch (cmd) {
++ case HDMICEC_IOC_SETLOGICALADDRESS:
++ mutex_lock(&hdmi_cec_data.lock);
++ if (false == hdmi_cec_data.cec_state) {
++ mutex_unlock(&hdmi_cec_data.lock);
++ pr_err("Trying to set logical address while not started\n");
++ return -EACCES;
++ }
++ hdmi_cec_data.Logical_address = (u8)arg;
++ if (hdmi_cec_data.Logical_address <= 7) {
++ val = 1 << hdmi_cec_data.Logical_address;
++ hdmi_writeb(val, HDMI_CEC_ADDR_L);
++ hdmi_writeb(0, HDMI_CEC_ADDR_H);
++ } else if (hdmi_cec_data.Logical_address > 7 && hdmi_cec_data.Logical_address <= 15) {
++ val = 1 << (hdmi_cec_data.Logical_address - 8);
++ hdmi_writeb(val, HDMI_CEC_ADDR_H);
++ hdmi_writeb(0, HDMI_CEC_ADDR_L);
++ } else
++ ret = -EINVAL;
++ /*Send Polling message with same source and destination address*/
++ if (0 == ret && 15 != hdmi_cec_data.Logical_address) {
++ msg = (hdmi_cec_data.Logical_address << 4)|hdmi_cec_data.Logical_address;
++ hdmi_writeb(1, HDMI_CEC_TX_CNT);
++ hdmi_writeb(msg, HDMI_CEC_TX_DATA0);
++ val = hdmi_readb(HDMI_CEC_CTRL);
++ val |= 0x01;
++ hdmi_writeb(val, HDMI_CEC_CTRL);
++ }
++ mutex_unlock(&hdmi_cec_data.lock);
++ break;
++ case HDMICEC_IOC_STARTDEVICE:
++ hdmi_cec_start_device();
++ break;
++ case HDMICEC_IOC_STOPDEVICE:
++ hdmi_cec_stop_device();
++ break;
++ case HDMICEC_IOC_GETPHYADDRESS:
++ hdmi_get_edid_cfg(&hdmi_edid_cfg);
++ status = copy_to_user((void __user *)arg,
++ &hdmi_edid_cfg.physical_address,
++ 4*sizeof(u8));
++ if (status)
++ ret = -EFAULT;
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++ return ret;
++}
++
++/*!
++ * @brief Release function for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int hdmi_cec_release(struct inode *inode, struct file *filp)
++{
++ mutex_lock(&hdmi_cec_data.lock);
++ if (open_count) {
++ open_count = 0;
++ hdmi_cec_data.cec_state = false;
++ hdmi_cec_data.Logical_address = 15;
++ }
++ mutex_unlock(&hdmi_cec_data.lock);
++
++ return 0;
++}
++
++static unsigned int hdmi_cec_poll(struct file *file, poll_table *wait)
++{
++ unsigned int mask = 0;
++
++ pr_debug("function : %s\n", __func__);
++
++ poll_wait(file, &hdmi_cec_queue, wait);
++
++ mutex_lock(&hdmi_cec_data.lock);
++ if (hdmi_cec_data.tx_answer == CEC_TX_AVAIL)
++ mask = (POLLOUT | POLLWRNORM);
++ if (!list_empty(&head))
++ mask |= (POLLIN | POLLRDNORM);
++ mutex_unlock(&hdmi_cec_data.lock);
++ return mask;
++}
++
++
++const struct file_operations hdmi_cec_fops = {
++ .owner = THIS_MODULE,
++ .read = hdmi_cec_read,
++ .write = hdmi_cec_write,
++ .open = hdmi_cec_open,
++ .unlocked_ioctl = hdmi_cec_ioctl,
++ .release = hdmi_cec_release,
++ .poll = hdmi_cec_poll,
++};
++
++static int hdmi_cec_dev_probe(struct platform_device *pdev)
++{
++ int err = 0;
++ struct device *temp_class;
++ struct resource *res;
++ struct pinctrl *pinctrl;
++ int irq = platform_get_irq(pdev, 0);
++
++ hdmi_cec_major = register_chrdev(hdmi_cec_major, "mxc_hdmi_cec", &hdmi_cec_fops);
++ if (hdmi_cec_major < 0) {
++ dev_err(&pdev->dev, "hdmi_cec: unable to get a major for HDMI CEC\n");
++ err = -EBUSY;
++ goto out;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++ if (unlikely(res == NULL)) {
++ dev_err(&pdev->dev, "hdmi_cec:No HDMI irq line provided\n");
++ goto err_out_chrdev;
++ }
++ spin_lock_init(&hdmi_cec_data.irq_lock);
++
++ err = devm_request_irq(&pdev->dev, irq, mxc_hdmi_cec_isr, IRQF_SHARED,
++ dev_name(&pdev->dev), &hdmi_cec_data);
++ if (err < 0) {
++ dev_err(&pdev->dev, "hdmi_cec:Unable to request irq: %d\n", err);
++ goto err_out_chrdev;
++ }
++
++ hdmi_cec_class = class_create(THIS_MODULE, "mxc_hdmi_cec");
++ if (IS_ERR(hdmi_cec_class)) {
++ err = PTR_ERR(hdmi_cec_class);
++ goto err_out_chrdev;
++ }
++
++ temp_class = device_create(hdmi_cec_class, NULL, MKDEV(hdmi_cec_major, 0),
++ NULL, "mxc_hdmi_cec");
++ if (IS_ERR(temp_class)) {
++ err = PTR_ERR(temp_class);
++ goto err_out_class;
++ }
++
++ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(&pdev->dev, "can't get/select CEC pinctrl\n");
++ goto err_out_class;
++ }
++
++ init_waitqueue_head(&hdmi_cec_queue);
++ init_waitqueue_head(&tx_cec_queue);
++
++ INIT_LIST_HEAD(&head);
++
++ mutex_init(&hdmi_cec_data.lock);
++ hdmi_cec_data.Logical_address = 15;
++ hdmi_cec_data.tx_answer = CEC_TX_AVAIL;
++ platform_set_drvdata(pdev, &hdmi_cec_data);
++ INIT_DELAYED_WORK(&hdmi_cec_data.hdmi_cec_work, mxc_hdmi_cec_worker);
++
++ dev_info(&pdev->dev, "HDMI CEC initialized\n");
++ hdmi_cec_ready = 1;
++ goto out;
++
++err_out_class:
++ device_destroy(hdmi_cec_class, MKDEV(hdmi_cec_major, 0));
++ class_destroy(hdmi_cec_class);
++err_out_chrdev:
++ unregister_chrdev(hdmi_cec_major, "mxc_hdmi_cec");
++out:
++ return err;
++}
++
++static int hdmi_cec_dev_remove(struct platform_device *pdev)
++{
++ if (hdmi_cec_data.cec_state)
++ hdmi_cec_stop_device();
++ if (hdmi_cec_major > 0) {
++ device_destroy(hdmi_cec_class, MKDEV(hdmi_cec_major, 0));
++ class_destroy(hdmi_cec_class);
++ unregister_chrdev(hdmi_cec_major, "mxc_hdmi_cec");
++ hdmi_cec_major = 0;
++}
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_cec_match[] = {
++ { .compatible = "fsl,imx6q-hdmi-cec", },
++ { .compatible = "fsl,imx6dl-hdmi-cec", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver mxc_hdmi_cec_driver = {
++ .probe = hdmi_cec_dev_probe,
++ .remove = hdmi_cec_dev_remove,
++ .driver = {
++ .name = "mxc_hdmi_cec",
++ .of_match_table = imx_hdmi_cec_match,
++ },
++};
++
++module_platform_driver(mxc_hdmi_cec_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Linux HDMI CEC driver for Freescale i.MX/MXC");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:mxc_hdmi_cec");
++
+diff -Nur linux-3.14.40.orig/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h linux-3.14.40/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h
+--- linux-3.14.40.orig/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h 2015-05-01 14:57:59.607427001 -0500
+@@ -0,0 +1,38 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#ifndef _HDMICEC_H_
++#define _HDMICEC_H_
++#include <linux/ioctl.h>
++
++/*
++ * Ioctl definitions
++ */
++
++/* Use 'k' as magic number */
++#define HDMICEC_IOC_MAGIC 'H'
++/*
++ * S means "Set" through a ptr,
++ * T means "Tell" directly with the argument value
++ * G means "Get": reply by setting through a pointer
++ * Q means "Query": response is on the return value
++ * X means "eXchange": G and S atomically
++ * H means "sHift": T and Q atomically
++ */
++#define HDMICEC_IOC_SETLOGICALADDRESS \
++ _IOW(HDMICEC_IOC_MAGIC, 1, unsigned char)
++#define HDMICEC_IOC_STARTDEVICE _IO(HDMICEC_IOC_MAGIC, 2)
++#define HDMICEC_IOC_STOPDEVICE _IO(HDMICEC_IOC_MAGIC, 3)
++#define HDMICEC_IOC_GETPHYADDRESS \
++ _IOR(HDMICEC_IOC_MAGIC, 4, unsigned char[4])
++
++#endif /* !_HDMICEC_H_ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c linux-3.14.40/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c
+--- linux-3.14.40.orig/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c 2015-05-01 14:57:59.607427001 -0500
+@@ -0,0 +1,495 @@
++/*
++ * Copyright 2009-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*
++ * @file ipu_calc_stripes_sizes.c
++ *
++ * @brief IPU IC functions
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/math64.h>
++
++#define BPP_32 0
++#define BPP_16 3
++#define BPP_8 5
++#define BPP_24 1
++#define BPP_12 4
++#define BPP_18 2
++
++static u32 truncate(u32 up, /* 0: down; else: up */
++ u64 a, /* must be non-negative */
++ u32 b)
++{
++ u32 d;
++ u64 div;
++ div = div_u64(a, b);
++ d = b * (div >> 32);
++ if (up && (a > (((u64)d) << 32)))
++ return d+b;
++ else
++ return d;
++}
++
++static unsigned int f_calc(unsigned int pfs, unsigned int bpp, unsigned int *write)
++{/* return input_f */
++ unsigned int f_calculated = 0;
++ switch (pfs) {
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YUV444P:
++ f_calculated = 16;
++ break;
++
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_UYVY:
++ f_calculated = 8;
++ break;
++
++ case IPU_PIX_FMT_NV12:
++ f_calculated = 8;
++ break;
++
++ default:
++ f_calculated = 0;
++ break;
++
++ }
++ if (!f_calculated) {
++ switch (bpp) {
++ case BPP_32:
++ f_calculated = 2;
++ break;
++
++ case BPP_16:
++ f_calculated = 4;
++ break;
++
++ case BPP_8:
++ case BPP_24:
++ f_calculated = 8;
++ break;
++
++ case BPP_12:
++ f_calculated = 16;
++ break;
++
++ case BPP_18:
++ f_calculated = 32;
++ break;
++
++ default:
++ f_calculated = 0;
++ break;
++ }
++ }
++ return f_calculated;
++}
++
++
++static unsigned int m_calc(unsigned int pfs)
++{
++ unsigned int m_calculated = 0;
++ switch (pfs) {
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YUV444P:
++ m_calculated = 16;
++ break;
++
++ case IPU_PIX_FMT_NV12:
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_UYVY:
++ m_calculated = 8;
++ break;
++
++ default:
++ m_calculated = 8;
++ break;
++
++ }
++ return m_calculated;
++}
++
++static int calc_split_resize_coeffs(unsigned int inSize, unsigned int outSize,
++ unsigned int *resizeCoeff,
++ unsigned int *downsizeCoeff)
++{
++ uint32_t tempSize;
++ uint32_t tempDownsize;
++
++ if (inSize > 4096) {
++ pr_debug("IC input size(%d) cannot exceed 4096\n",
++ inSize);
++ return -EINVAL;
++ }
++
++ if (outSize > 1024) {
++ pr_debug("IC output size(%d) cannot exceed 1024\n",
++ outSize);
++ return -EINVAL;
++ }
++
++ if ((outSize << 3) < inSize) {
++ pr_debug("IC cannot downsize more than 8:1\n");
++ return -EINVAL;
++ }
++
++ /* Compute downsizing coefficient */
++ /* Output of downsizing unit cannot be more than 1024 */
++ tempDownsize = 0;
++ tempSize = inSize;
++ while (((tempSize > 1024) || (tempSize >= outSize * 2)) &&
++ (tempDownsize < 2)) {
++ tempSize >>= 1;
++ tempDownsize++;
++ }
++ *downsizeCoeff = tempDownsize;
++
++ /* compute resizing coefficient using the following equation:
++ resizeCoeff = M*(SI -1)/(SO - 1)
++ where M = 2^13, SI - input size, SO - output size */
++ *resizeCoeff = (8192L * (tempSize - 1)) / (outSize - 1);
++ if (*resizeCoeff >= 16384L) {
++ pr_debug("Overflow on IC resize coefficient.\n");
++ return -EINVAL;
++ }
++
++ pr_debug("resizing from %u -> %u pixels, "
++ "downsize=%u, resize=%u.%lu (reg=%u)\n", inSize, outSize,
++ *downsizeCoeff, (*resizeCoeff >= 8192L) ? 1 : 0,
++ ((*resizeCoeff & 0x1FFF) * 10000L) / 8192L, *resizeCoeff);
++
++ return 0;
++}
++
++/* Stripe parameters calculator */
++/**************************************************************************
++Notes:
++MSW = the maximal width allowed for a stripe
++ i.MX31: 720, i.MX35: 800, i.MX37/51/53: 1024
++cirr = the maximal inverse resizing ratio for which overlap in the input
++ is requested; typically cirr~2
++flags
++ bit 0 - equal_stripes
++ 0 each stripe is allowed to have independent parameters
++ for maximal image quality
++ 1 the stripes are requested to have identical parameters
++ (except the base address), for maximal performance
++ bit 1 - vertical/horizontal
++ 0 horizontal
++ 1 vertical
++
++If performance is the top priority (above image quality)
++ Avoid overlap, by setting CIRR = 0
++ This will also force effectively identical_stripes = 1
++ Choose IF & OF that corresponds to the same IOX/SX for both stripes
++ Choose IFW & OFW such that
++ IFW/IM, IFW/IF, OFW/OM, OFW/OF are even integers
++ The function returns an error status:
++ 0: no error
++ 1: invalid input parameters -> aborted without result
++ Valid parameters should satisfy the following conditions
++ IFW <= OFW, otherwise downsizing is required
++ - which is not supported yet
++ 4 <= IFW,OFW, so some interpolation may be needed even without overlap
++ IM, OM, IF, OF should not vanish
++ 2*IF <= IFW
++ so the frame can be split to two equal stripes, even without overlap
++ 2*(OF+IF/irr_opt) <= OFW
++ so a valid positive INW exists even for equal stripes
++ OF <= MSW, otherwise, the left stripe cannot be sufficiently large
++ MSW < OFW, so splitting to stripes is required
++ OFW <= 2*MSW, so two stripes are sufficient
++ (this also implies that 2<=MSW)
++ 2: OF is not a multiple of OM - not fully-supported yet
++ Output is produced but OW is not guaranited to be a multiple of OM
++ 4: OFW reduced to be a multiple of OM
++ 8: CIRR > 1: truncated to 1
++ Overlap is not supported (and not needed) y for upsizing)
++**************************************************************************/
++int ipu_calc_stripes_sizes(const unsigned int input_frame_width,
++ /* input frame width;>1 */
++ unsigned int output_frame_width, /* output frame width; >1 */
++ const unsigned int maximal_stripe_width,
++ /* the maximal width allowed for a stripe */
++ const unsigned long long cirr, /* see above */
++ const unsigned int flags, /* see above */
++ u32 input_pixelformat,/* pixel format after of read channel*/
++ u32 output_pixelformat,/* pixel format after of write channel*/
++ struct stripe_param *left,
++ struct stripe_param *right)
++{
++ const unsigned int irr_frac_bits = 13;
++ const unsigned long irr_steps = 1 << irr_frac_bits;
++ const u64 dirr = ((u64)1) << (32 - 2);
++ /* The maximum relative difference allowed between the irrs */
++ const u64 cr = ((u64)4) << 32;
++ /* The importance ratio between the two terms in the cost function below */
++
++ unsigned int status;
++ unsigned int temp;
++ unsigned int onw_min;
++ unsigned int inw = 0, onw = 0, inw_best = 0;
++ /* number of pixels in the left stripe NOT hidden by the right stripe */
++ u64 irr_opt; /* the optimal inverse resizing ratio */
++ u64 rr_opt; /* the optimal resizing ratio = 1/irr_opt*/
++ u64 dinw; /* the misalignment between the stripes */
++ /* (measured in units of input columns) */
++ u64 difwl, difwr = 0;
++ /* The number of input columns not reflected in the output */
++ /* the resizing ratio used for the right stripe is */
++ /* left->irr and right->irr respectively */
++ u64 cost, cost_min;
++ u64 div; /* result of division */
++ bool equal_stripes = (flags & 0x1) != 0;
++ bool vertical = (flags & 0x2) != 0;
++
++ unsigned int input_m, input_f, output_m, output_f; /* parameters for upsizing by stripes */
++ unsigned int resize_coeff;
++ unsigned int downsize_coeff;
++
++ status = 0;
++
++ if (vertical) {
++ input_f = 2;
++ input_m = 8;
++ output_f = 8;
++ output_m = 2;
++ } else {
++ input_f = f_calc(input_pixelformat, 0, NULL);
++ input_m = m_calc(input_pixelformat);
++ output_f = input_m;
++ output_m = m_calc(output_pixelformat);
++ }
++ if ((input_frame_width < 4) || (output_frame_width < 4))
++ return 1;
++
++ irr_opt = div_u64((((u64)(input_frame_width - 1)) << 32),
++ (output_frame_width - 1));
++ rr_opt = div_u64((((u64)(output_frame_width - 1)) << 32),
++ (input_frame_width - 1));
++
++ if ((input_m == 0) || (output_m == 0) || (input_f == 0) || (output_f == 0)
++ || (input_frame_width < (2 * input_f))
++ || ((((u64)output_frame_width) << 32) <
++ (2 * ((((u64)output_f) << 32) + (input_f * rr_opt))))
++ || (maximal_stripe_width < output_f)
++ || ((output_frame_width <= maximal_stripe_width)
++ && (equal_stripes == 0))
++ || ((2 * maximal_stripe_width) < output_frame_width))
++ return 1;
++
++ if (output_f % output_m)
++ status += 2;
++
++ temp = truncate(0, (((u64)output_frame_width) << 32), output_m);
++ if (temp < output_frame_width) {
++ output_frame_width = temp;
++ status += 4;
++ }
++
++ pr_debug("---------------->\n"
++ "if = %d\n"
++ "im = %d\n"
++ "of = %d\n"
++ "om = %d\n"
++ "irr_opt = %llu\n"
++ "rr_opt = %llu\n"
++ "cirr = %llu\n"
++ "pixel in = %08x\n"
++ "pixel out = %08x\n"
++ "ifw = %d\n"
++ "ofwidth = %d\n",
++ input_f,
++ input_m,
++ output_f,
++ output_m,
++ irr_opt,
++ rr_opt,
++ cirr,
++ input_pixelformat,
++ output_pixelformat,
++ input_frame_width,
++ output_frame_width
++ );
++
++ if (equal_stripes) {
++ if ((irr_opt > cirr) /* overlap in the input is not requested */
++ && ((input_frame_width % (input_m << 1)) == 0)
++ && ((input_frame_width % (input_f << 1)) == 0)
++ && ((output_frame_width % (output_m << 1)) == 0)
++ && ((output_frame_width % (output_f << 1)) == 0)) {
++ /* without overlap */
++ left->input_width = right->input_width = right->input_column =
++ input_frame_width >> 1;
++ left->output_width = right->output_width = right->output_column =
++ output_frame_width >> 1;
++ left->input_column = 0;
++ left->output_column = 0;
++ div = div_u64(((((u64)irr_steps) << 32) *
++ (right->input_width - 1)), (right->output_width - 1));
++ left->irr = right->irr = truncate(0, div, 1);
++ } else { /* with overlap */
++ onw = truncate(0, (((u64)output_frame_width - 1) << 32) >> 1,
++ output_f);
++ inw = truncate(0, onw * irr_opt, input_f);
++ /* this is the maximal inw which allows the same resizing ratio */
++ /* in both stripes */
++ onw = truncate(1, (inw * rr_opt), output_f);
++ div = div_u64((((u64)(irr_steps * inw)) <<
++ 32), onw);
++ left->irr = right->irr = truncate(0, div, 1);
++ left->output_width = right->output_width =
++ output_frame_width - onw;
++ /* These are valid assignments for output_width, */
++ /* assuming output_f is a multiple of output_m */
++ div = (((u64)(left->output_width-1) * (left->irr)) << 32);
++ div = (((u64)1) << 32) + div_u64(div, irr_steps);
++
++ left->input_width = right->input_width = truncate(1, div, input_m);
++
++ div = div_u64((((u64)((right->output_width - 1) * right->irr)) <<
++ 32), irr_steps);
++ difwr = (((u64)(input_frame_width - 1 - inw)) << 32) - div;
++ div = div_u64((difwr + (((u64)input_f) << 32)), 2);
++ left->input_column = truncate(0, div, input_f);
++
++
++ /* This splits the truncated input columns evenly */
++ /* between the left and right margins */
++ right->input_column = left->input_column + inw;
++ left->output_column = 0;
++ right->output_column = onw;
++ }
++ if (left->input_width > left->output_width) {
++ if (calc_split_resize_coeffs(left->input_width,
++ left->output_width,
++ &resize_coeff,
++ &downsize_coeff) < 0)
++ return -EINVAL;
++
++ if (downsize_coeff > 0) {
++ left->irr = right->irr =
++ (downsize_coeff << 14) | resize_coeff;
++ }
++ }
++ pr_debug("inw %d, onw %d, ilw %d, ilc %d, olw %d,"
++ " irw %d, irc %d, orw %d, orc %d, "
++ "difwr %llu, lirr %u\n",
++ inw, onw, left->input_width,
++ left->input_column, left->output_width,
++ right->input_width, right->input_column,
++ right->output_width,
++ right->output_column, difwr, left->irr);
++ } else { /* independent stripes */
++ onw_min = output_frame_width - maximal_stripe_width;
++ /* onw is a multiple of output_f, in the range */
++ /* [max(output_f,output_frame_width-maximal_stripe_width),*/
++ /*min(output_frame_width-2,maximal_stripe_width)] */
++ /* definitely beyond the cost of any valid setting */
++ cost_min = (((u64)input_frame_width) << 32) + cr;
++ onw = truncate(0, ((u64)maximal_stripe_width), output_f);
++ if (output_frame_width - onw == 1)
++ onw -= output_f; /* => onw and output_frame_width-1-onw are positive */
++ inw = truncate(0, onw * irr_opt, input_f);
++ /* this is the maximal inw which allows the same resizing ratio */
++ /* in both stripes */
++ onw = truncate(1, inw * rr_opt, output_f);
++ do {
++ div = div_u64((((u64)(irr_steps * inw)) << 32), onw);
++ left->irr = truncate(0, div, 1);
++ div = div_u64((((u64)(onw * left->irr)) << 32),
++ irr_steps);
++ dinw = (((u64)inw) << 32) - div;
++
++ div = div_u64((((u64)((output_frame_width - 1 - onw) * left->irr)) <<
++ 32), irr_steps);
++
++ difwl = (((u64)(input_frame_width - 1 - inw)) << 32) - div;
++
++ cost = difwl + (((u64)(cr * dinw)) >> 32);
++
++ if (cost < cost_min) {
++ inw_best = inw;
++ cost_min = cost;
++ }
++
++ inw -= input_f;
++ onw = truncate(1, inw * rr_opt, output_f);
++ /* This is the minimal onw which allows the same resizing ratio */
++ /* in both stripes */
++ } while (onw >= onw_min);
++
++ inw = inw_best;
++ onw = truncate(1, inw * rr_opt, output_f);
++ div = div_u64((((u64)(irr_steps * inw)) << 32), onw);
++ left->irr = truncate(0, div, 1);
++
++ left->output_width = onw;
++ right->output_width = output_frame_width - onw;
++ /* These are valid assignments for output_width, */
++ /* assuming output_f is a multiple of output_m */
++ left->input_width = truncate(1, ((u64)(inw + 1)) << 32, input_m);
++ right->input_width = truncate(1, ((u64)(input_frame_width - inw)) <<
++ 32, input_m);
++
++ div = div_u64((((u64)(irr_steps * (input_frame_width - 1 - inw))) <<
++ 32), (right->output_width - 1));
++ right->irr = truncate(0, div, 1);
++ temp = truncate(0, ((u64)left->irr) * ((((u64)1) << 32) + dirr), 1);
++ if (temp < right->irr)
++ right->irr = temp;
++ div = div_u64(((u64)((right->output_width - 1) * right->irr) <<
++ 32), irr_steps);
++ difwr = (u64)(input_frame_width - 1 - inw) - div;
++
++
++ div = div_u64((difwr + (((u64)input_f) << 32)), 2);
++ left->input_column = truncate(0, div, input_f);
++
++ /* This splits the truncated input columns evenly */
++ /* between the left and right margins */
++ right->input_column = left->input_column + inw;
++ left->output_column = 0;
++ right->output_column = onw;
++ if (left->input_width > left->output_width) {
++ if (calc_split_resize_coeffs(left->input_width,
++ left->output_width,
++ &resize_coeff,
++ &downsize_coeff) < 0)
++ return -EINVAL;
++ left->irr = (downsize_coeff << 14) | resize_coeff;
++ }
++ if (right->input_width > right->output_width) {
++ if (calc_split_resize_coeffs(right->input_width,
++ right->output_width,
++ &resize_coeff,
++ &downsize_coeff) < 0)
++ return -EINVAL;
++ right->irr = (downsize_coeff << 14) | resize_coeff;
++ }
++ }
++ return status;
++}
++EXPORT_SYMBOL(ipu_calc_stripes_sizes);
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/ipu_capture.c linux-3.14.40/drivers/mxc/ipu3/ipu_capture.c
+--- linux-3.14.40.orig/drivers/mxc/ipu3/ipu_capture.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/ipu_capture.c 2015-05-01 14:57:59.607427001 -0500
+@@ -0,0 +1,816 @@
++/*
++ * Copyright 2008-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_capture.c
++ *
++ * @brief IPU capture dase functions
++ *
++ * @ingroup IPU
++ */
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++
++#include "ipu_prv.h"
++#include "ipu_regs.h"
++
++/*!
++ * _ipu_csi_mclk_set
++ *
++ * @param ipu ipu handler
++ * @param pixel_clk desired pixel clock frequency in Hz
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_mclk_set(struct ipu_soc *ipu, uint32_t pixel_clk, uint32_t csi)
++{
++ uint32_t temp;
++ uint32_t div_ratio;
++
++ div_ratio = (clk_get_rate(ipu->ipu_clk) / pixel_clk) - 1;
++
++ if (div_ratio > 0xFF || div_ratio < 0) {
++ dev_dbg(ipu->dev, "value of pixel_clk extends normal range\n");
++ return -EINVAL;
++ }
++
++ temp = ipu_csi_read(ipu, csi, CSI_SENS_CONF);
++ temp &= ~CSI_SENS_CONF_DIVRATIO_MASK;
++ ipu_csi_write(ipu, csi, temp |
++ (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT),
++ CSI_SENS_CONF);
++
++ return 0;
++}
++
++/*!
++ * ipu_csi_init_interface
++ * Sets initial values for the CSI registers.
++ * The width and height of the sensor and the actual frame size will be
++ * set to the same values.
++ * @param ipu ipu handler
++ * @param width Sensor width
++ * @param height Sensor height
++ * @param pixel_fmt pixel format
++ * @param cfg_param ipu_csi_signal_cfg_t structure
++ * @param csi csi 0 or csi 1
++ *
++ * @return 0 for success, -EINVAL for error
++ */
++int32_t
++ipu_csi_init_interface(struct ipu_soc *ipu, uint16_t width, uint16_t height,
++ uint32_t pixel_fmt, ipu_csi_signal_cfg_t cfg_param)
++{
++ uint32_t data = 0;
++ uint32_t csi = cfg_param.csi;
++
++ /* Set SENS_DATA_FORMAT bits (8, 9 and 10)
++ RGB or YUV444 is 0 which is current value in data so not set
++ explicitly
++ This is also the default value if attempts are made to set it to
++ something invalid. */
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_YUYV:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
++ break;
++ case IPU_PIX_FMT_UYVY:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
++ break;
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_BGR24:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_RGB_YUV444;
++ break;
++ case IPU_PIX_FMT_GENERIC:
++ case IPU_PIX_FMT_GENERIC_16:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
++ break;
++ case IPU_PIX_FMT_RGB565:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
++ break;
++ case IPU_PIX_FMT_RGB555:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* Set the CSI_SENS_CONF register remaining fields */
++ data |= cfg_param.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
++ cfg_param.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
++ cfg_param.data_pol << CSI_SENS_CONF_DATA_POL_SHIFT |
++ cfg_param.Vsync_pol << CSI_SENS_CONF_VSYNC_POL_SHIFT |
++ cfg_param.Hsync_pol << CSI_SENS_CONF_HSYNC_POL_SHIFT |
++ cfg_param.pixclk_pol << CSI_SENS_CONF_PIX_CLK_POL_SHIFT |
++ cfg_param.ext_vsync << CSI_SENS_CONF_EXT_VSYNC_SHIFT |
++ cfg_param.clk_mode << CSI_SENS_CONF_SENS_PRTCL_SHIFT |
++ cfg_param.pack_tight << CSI_SENS_CONF_PACK_TIGHT_SHIFT |
++ cfg_param.force_eof << CSI_SENS_CONF_FORCE_EOF_SHIFT |
++ cfg_param.data_en_pol << CSI_SENS_CONF_DATA_EN_POL_SHIFT;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ ipu_csi_write(ipu, csi, data, CSI_SENS_CONF);
++
++ /* Setup sensor frame size */
++ ipu_csi_write(ipu, csi, (width - 1) | (height - 1) << 16, CSI_SENS_FRM_SIZE);
++
++ /* Set CCIR registers */
++ if (cfg_param.clk_mode == IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE) {
++ ipu_csi_write(ipu, csi, 0x40030, CSI_CCIR_CODE_1);
++ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
++ } else if (cfg_param.clk_mode == IPU_CSI_CLK_MODE_CCIR656_INTERLACED) {
++ if (width == 720 && height == 625) {
++ /* PAL case */
++ /*
++ * Field0BlankEnd = 0x6, Field0BlankStart = 0x2,
++ * Field0ActiveEnd = 0x4, Field0ActiveStart = 0
++ */
++ ipu_csi_write(ipu, csi, 0x40596, CSI_CCIR_CODE_1);
++ /*
++ * Field1BlankEnd = 0x7, Field1BlankStart = 0x3,
++ * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1
++ */
++ ipu_csi_write(ipu, csi, 0xD07DF, CSI_CCIR_CODE_2);
++
++ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
++
++ } else if (width == 720 && height == 525) {
++ /* NTSC case */
++ /*
++ * Field0BlankEnd = 0x7, Field0BlankStart = 0x3,
++ * Field0ActiveEnd = 0x5, Field0ActiveStart = 0x1
++ */
++ ipu_csi_write(ipu, csi, 0xD07DF, CSI_CCIR_CODE_1);
++ /*
++ * Field1BlankEnd = 0x6, Field1BlankStart = 0x2,
++ * Field1ActiveEnd = 0x4, Field1ActiveStart = 0
++ */
++ ipu_csi_write(ipu, csi, 0x40596, CSI_CCIR_CODE_2);
++ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
++ } else {
++ dev_err(ipu->dev, "Unsupported CCIR656 interlaced "
++ "video mode\n");
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return -EINVAL;
++ }
++ _ipu_csi_ccir_err_detection_enable(ipu, csi);
++ } else if ((cfg_param.clk_mode ==
++ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR) ||
++ (cfg_param.clk_mode ==
++ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR) ||
++ (cfg_param.clk_mode ==
++ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR) ||
++ (cfg_param.clk_mode ==
++ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR)) {
++ ipu_csi_write(ipu, csi, 0x40030, CSI_CCIR_CODE_1);
++ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
++ _ipu_csi_ccir_err_detection_enable(ipu, csi);
++ } else if ((cfg_param.clk_mode == IPU_CSI_CLK_MODE_GATED_CLK) ||
++ (cfg_param.clk_mode == IPU_CSI_CLK_MODE_NONGATED_CLK)) {
++ _ipu_csi_ccir_err_detection_disable(ipu, csi);
++ }
++
++ dev_dbg(ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
++ ipu_csi_read(ipu, csi, CSI_SENS_CONF));
++ dev_dbg(ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
++ ipu_csi_read(ipu, csi, CSI_ACT_FRM_SIZE));
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_csi_init_interface);
++
++/*!
++ * ipu_csi_get_sensor_protocol
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns sensor protocol
++ */
++int32_t ipu_csi_get_sensor_protocol(struct ipu_soc *ipu, uint32_t csi)
++{
++ int ret;
++ _ipu_get(ipu);
++ ret = (ipu_csi_read(ipu, csi, CSI_SENS_CONF) &
++ CSI_SENS_CONF_SENS_PRTCL_MASK) >>
++ CSI_SENS_CONF_SENS_PRTCL_SHIFT;
++ _ipu_put(ipu);
++ return ret;
++}
++EXPORT_SYMBOL(ipu_csi_get_sensor_protocol);
++
++/*!
++ * ipu_csi_enable_mclk
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ * @param flag true to enable mclk, false to disable mclk
++ * @param wait true to wait 100ms make clock stable, false not wait
++ *
++ * @return Returns 0 on success
++ */
++int ipu_csi_enable_mclk(struct ipu_soc *ipu, int csi, bool flag, bool wait)
++{
++ /* Return immediately if there is no csi_clk to manage */
++ if (ipu->csi_clk[csi] == NULL)
++ return 0;
++
++ if (flag) {
++ clk_enable(ipu->csi_clk[csi]);
++ if (wait == true)
++ msleep(10);
++ } else {
++ clk_disable(ipu->csi_clk[csi]);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_csi_enable_mclk);
++
++/*!
++ * ipu_csi_get_window_size
++ *
++ * @param ipu ipu handler
++ * @param width pointer to window width
++ * @param height pointer to window height
++ * @param csi csi 0 or csi 1
++ */
++void ipu_csi_get_window_size(struct ipu_soc *ipu, uint32_t *width, uint32_t *height, uint32_t csi)
++{
++ uint32_t reg;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ reg = ipu_csi_read(ipu, csi, CSI_ACT_FRM_SIZE);
++ *width = (reg & 0xFFFF) + 1;
++ *height = (reg >> 16 & 0xFFFF) + 1;
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_csi_get_window_size);
++
++/*!
++ * ipu_csi_set_window_size
++ *
++ * @param ipu ipu handler
++ * @param width window width
++ * @param height window height
++ * @param csi csi 0 or csi 1
++ */
++void ipu_csi_set_window_size(struct ipu_soc *ipu, uint32_t width, uint32_t height, uint32_t csi)
++{
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ ipu_csi_write(ipu, csi, (width - 1) | (height - 1) << 16, CSI_ACT_FRM_SIZE);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_csi_set_window_size);
++
++/*!
++ * ipu_csi_set_window_pos
++ *
++ * @param ipu ipu handler
++ * @param left uint32 window x start
++ * @param top uint32 window y start
++ * @param csi csi 0 or csi 1
++ */
++void ipu_csi_set_window_pos(struct ipu_soc *ipu, uint32_t left, uint32_t top, uint32_t csi)
++{
++ uint32_t temp;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp &= ~(CSI_HSC_MASK | CSI_VSC_MASK);
++ temp |= ((top << CSI_VSC_SHIFT) | (left << CSI_HSC_SHIFT));
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_csi_set_window_pos);
++
++/*!
++ * _ipu_csi_horizontal_downsize_enable
++ * Enable horizontal downsizing(decimation) by 2.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_horizontal_downsize_enable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp |= CSI_HORI_DOWNSIZE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++}
++
++/*!
++ * _ipu_csi_horizontal_downsize_disable
++ * Disable horizontal downsizing(decimation) by 2.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_horizontal_downsize_disable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp &= ~CSI_HORI_DOWNSIZE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++}
++
++/*!
++ * _ipu_csi_vertical_downsize_enable
++ * Enable vertical downsizing(decimation) by 2.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_vertical_downsize_enable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp |= CSI_VERT_DOWNSIZE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++}
++
++/*!
++ * _ipu_csi_vertical_downsize_disable
++ * Disable vertical downsizing(decimation) by 2.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_vertical_downsize_disable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp &= ~CSI_VERT_DOWNSIZE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++}
++
++/*!
++ * _ipu_csi_set_test_generator
++ *
++ * @param ipu ipu handler
++ * @param active 1 for active and 0 for inactive
++ * @param r_value red value for the generated pattern of even pixel
++ * @param g_value green value for the generated pattern of even
++ * pixel
++ * @param b_value blue value for the generated pattern of even pixel
++ * @param pixel_clk desired pixel clock frequency in Hz
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_set_test_generator(struct ipu_soc *ipu, bool active, uint32_t r_value,
++ uint32_t g_value, uint32_t b_value, uint32_t pix_clk, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_TST_CTRL);
++
++ if (active == false) {
++ temp &= ~CSI_TEST_GEN_MODE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_TST_CTRL);
++ } else {
++ /* Set sensb_mclk div_ratio*/
++ _ipu_csi_mclk_set(ipu, pix_clk, csi);
++
++ temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK |
++ CSI_TEST_GEN_B_MASK);
++ temp |= CSI_TEST_GEN_MODE_EN;
++ temp |= (r_value << CSI_TEST_GEN_R_SHIFT) |
++ (g_value << CSI_TEST_GEN_G_SHIFT) |
++ (b_value << CSI_TEST_GEN_B_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_TST_CTRL);
++ }
++}
++
++/*!
++ * _ipu_csi_ccir_err_detection_en
++ * Enable error detection and correction for
++ * CCIR interlaced mode with protection bit.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_ccir_err_detection_enable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_CCIR_CODE_1);
++ temp |= CSI_CCIR_ERR_DET_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_CCIR_CODE_1);
++
++}
++
++/*!
++ * _ipu_csi_ccir_err_detection_disable
++ * Disable error detection and correction for
++ * CCIR interlaced mode with protection bit.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_ccir_err_detection_disable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_CCIR_CODE_1);
++ temp &= ~CSI_CCIR_ERR_DET_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_CCIR_CODE_1);
++
++}
++
++/*!
++ * _ipu_csi_set_mipi_di
++ *
++ * @param ipu ipu handler
++ * @param num MIPI data identifier 0-3 handled by CSI
++ * @param di_val data identifier value
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_set_mipi_di(struct ipu_soc *ipu, uint32_t num, uint32_t di_val, uint32_t csi)
++{
++ uint32_t temp;
++ int retval = 0;
++
++ if (di_val > 0xFFL) {
++ retval = -EINVAL;
++ goto err;
++ }
++
++ temp = ipu_csi_read(ipu, csi, CSI_MIPI_DI);
++
++ switch (num) {
++ case IPU_CSI_MIPI_DI0:
++ temp &= ~CSI_MIPI_DI0_MASK;
++ temp |= (di_val << CSI_MIPI_DI0_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
++ break;
++ case IPU_CSI_MIPI_DI1:
++ temp &= ~CSI_MIPI_DI1_MASK;
++ temp |= (di_val << CSI_MIPI_DI1_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
++ break;
++ case IPU_CSI_MIPI_DI2:
++ temp &= ~CSI_MIPI_DI2_MASK;
++ temp |= (di_val << CSI_MIPI_DI2_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
++ break;
++ case IPU_CSI_MIPI_DI3:
++ temp &= ~CSI_MIPI_DI3_MASK;
++ temp |= (di_val << CSI_MIPI_DI3_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
++ break;
++ default:
++ retval = -EINVAL;
++ }
++
++err:
++ return retval;
++}
++
++/*!
++ * _ipu_csi_set_skip_isp
++ *
++ * @param ipu ipu handler
++ * @param skip select frames to be skipped and set the
++ * correspond bits to 1
++ * @param max_ratio number of frames in a skipping set and the
++ * maximum value of max_ratio is 5
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_set_skip_isp(struct ipu_soc *ipu, uint32_t skip, uint32_t max_ratio, uint32_t csi)
++{
++ uint32_t temp;
++ int retval = 0;
++
++ if (max_ratio > 5) {
++ retval = -EINVAL;
++ goto err;
++ }
++
++ temp = ipu_csi_read(ipu, csi, CSI_SKIP);
++ temp &= ~(CSI_MAX_RATIO_SKIP_ISP_MASK | CSI_SKIP_ISP_MASK);
++ temp |= (max_ratio << CSI_MAX_RATIO_SKIP_ISP_SHIFT) |
++ (skip << CSI_SKIP_ISP_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_SKIP);
++
++err:
++ return retval;
++}
++
++/*!
++ * _ipu_csi_set_skip_smfc
++ *
++ * @param ipu ipu handler
++ * @param skip select frames to be skipped and set the
++ * correspond bits to 1
++ * @param max_ratio number of frames in a skipping set and the
++ * maximum value of max_ratio is 5
++ * @param id csi to smfc skipping id
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_set_skip_smfc(struct ipu_soc *ipu, uint32_t skip,
++ uint32_t max_ratio, uint32_t id, uint32_t csi)
++{
++ uint32_t temp;
++ int retval = 0;
++
++ if (max_ratio > 5 || id > 3) {
++ retval = -EINVAL;
++ goto err;
++ }
++
++ temp = ipu_csi_read(ipu, csi, CSI_SKIP);
++ temp &= ~(CSI_MAX_RATIO_SKIP_SMFC_MASK | CSI_ID_2_SKIP_MASK |
++ CSI_SKIP_SMFC_MASK);
++ temp |= (max_ratio << CSI_MAX_RATIO_SKIP_SMFC_SHIFT) |
++ (id << CSI_ID_2_SKIP_SHIFT) |
++ (skip << CSI_SKIP_SMFC_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_SKIP);
++
++err:
++ return retval;
++}
++
++/*!
++ * _ipu_smfc_init
++ * Map CSI frames to IDMAC channels.
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel 0-3
++ * @param mipi_id mipi id number 0-3
++ * @param csi csi0 or csi1
++ */
++void _ipu_smfc_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t mipi_id, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_smfc_read(ipu, SMFC_MAP);
++
++ switch (channel) {
++ case CSI_MEM0:
++ temp &= ~SMFC_MAP_CH0_MASK;
++ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH0_SHIFT;
++ break;
++ case CSI_MEM1:
++ temp &= ~SMFC_MAP_CH1_MASK;
++ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH1_SHIFT;
++ break;
++ case CSI_MEM2:
++ temp &= ~SMFC_MAP_CH2_MASK;
++ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH2_SHIFT;
++ break;
++ case CSI_MEM3:
++ temp &= ~SMFC_MAP_CH3_MASK;
++ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH3_SHIFT;
++ break;
++ default:
++ return;
++ }
++
++ ipu_smfc_write(ipu, temp, SMFC_MAP);
++}
++
++/*!
++ * _ipu_smfc_set_wmc
++ * Caution: The number of required channels, the enabled channels
++ * and the FIFO size per channel are configured restrictedly.
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel 0-3
++ * @param set set 1 or clear 0
++ * @param level water mark level when FIFO is on the
++ * relative size
++ */
++void _ipu_smfc_set_wmc(struct ipu_soc *ipu, ipu_channel_t channel, bool set, uint32_t level)
++{
++ uint32_t temp;
++
++ temp = ipu_smfc_read(ipu, SMFC_WMC);
++
++ switch (channel) {
++ case CSI_MEM0:
++ if (set == true) {
++ temp &= ~SMFC_WM0_SET_MASK;
++ temp |= level << SMFC_WM0_SET_SHIFT;
++ } else {
++ temp &= ~SMFC_WM0_CLR_MASK;
++ temp |= level << SMFC_WM0_CLR_SHIFT;
++ }
++ break;
++ case CSI_MEM1:
++ if (set == true) {
++ temp &= ~SMFC_WM1_SET_MASK;
++ temp |= level << SMFC_WM1_SET_SHIFT;
++ } else {
++ temp &= ~SMFC_WM1_CLR_MASK;
++ temp |= level << SMFC_WM1_CLR_SHIFT;
++ }
++ break;
++ case CSI_MEM2:
++ if (set == true) {
++ temp &= ~SMFC_WM2_SET_MASK;
++ temp |= level << SMFC_WM2_SET_SHIFT;
++ } else {
++ temp &= ~SMFC_WM2_CLR_MASK;
++ temp |= level << SMFC_WM2_CLR_SHIFT;
++ }
++ break;
++ case CSI_MEM3:
++ if (set == true) {
++ temp &= ~SMFC_WM3_SET_MASK;
++ temp |= level << SMFC_WM3_SET_SHIFT;
++ } else {
++ temp &= ~SMFC_WM3_CLR_MASK;
++ temp |= level << SMFC_WM3_CLR_SHIFT;
++ }
++ break;
++ default:
++ return;
++ }
++
++ ipu_smfc_write(ipu, temp, SMFC_WMC);
++}
++
++/*!
++ * _ipu_smfc_set_burst_size
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel 0-3
++ * @param bs burst size of IDMAC channel,
++ * the value programmed here shoud be BURST_SIZE-1
++ */
++void _ipu_smfc_set_burst_size(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t bs)
++{
++ uint32_t temp;
++
++ temp = ipu_smfc_read(ipu, SMFC_BS);
++
++ switch (channel) {
++ case CSI_MEM0:
++ temp &= ~SMFC_BS0_MASK;
++ temp |= bs << SMFC_BS0_SHIFT;
++ break;
++ case CSI_MEM1:
++ temp &= ~SMFC_BS1_MASK;
++ temp |= bs << SMFC_BS1_SHIFT;
++ break;
++ case CSI_MEM2:
++ temp &= ~SMFC_BS2_MASK;
++ temp |= bs << SMFC_BS2_SHIFT;
++ break;
++ case CSI_MEM3:
++ temp &= ~SMFC_BS3_MASK;
++ temp |= bs << SMFC_BS3_SHIFT;
++ break;
++ default:
++ return;
++ }
++
++ ipu_smfc_write(ipu, temp, SMFC_BS);
++}
++
++/*!
++ * _ipu_csi_init
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t csi)
++{
++ uint32_t csi_sens_conf, csi_dest;
++ int retval = 0;
++
++ switch (channel) {
++ case CSI_MEM0:
++ case CSI_MEM1:
++ case CSI_MEM2:
++ case CSI_MEM3:
++ csi_dest = CSI_DATA_DEST_IDMAC;
++ break;
++ case CSI_PRP_ENC_MEM:
++ case CSI_PRP_VF_MEM:
++ csi_dest = CSI_DATA_DEST_IC;
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ csi_sens_conf = ipu_csi_read(ipu, csi, CSI_SENS_CONF);
++ csi_sens_conf &= ~CSI_SENS_CONF_DATA_DEST_MASK;
++ ipu_csi_write(ipu, csi, csi_sens_conf | (csi_dest <<
++ CSI_SENS_CONF_DATA_DEST_SHIFT), CSI_SENS_CONF);
++err:
++ return retval;
++}
++
++/*!
++ * csi_irq_handler
++ *
++ * @param irq interrupt id
++ * @param dev_id pointer to ipu handler
++ *
++ * @return Returns if irq is handled
++ */
++static irqreturn_t csi_irq_handler(int irq, void *dev_id)
++{
++ struct ipu_soc *ipu = dev_id;
++ struct completion *comp = &ipu->csi_comp;
++
++ complete(comp);
++ return IRQ_HANDLED;
++}
++
++/*!
++ * _ipu_csi_wait4eof
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel
++ *
++ */
++void _ipu_csi_wait4eof(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ int ret;
++ int irq = 0;
++
++ if (channel == CSI_MEM0)
++ irq = IPU_IRQ_CSI0_OUT_EOF;
++ else if (channel == CSI_MEM1)
++ irq = IPU_IRQ_CSI1_OUT_EOF;
++ else if (channel == CSI_MEM2)
++ irq = IPU_IRQ_CSI2_OUT_EOF;
++ else if (channel == CSI_MEM3)
++ irq = IPU_IRQ_CSI3_OUT_EOF;
++ else if (channel == CSI_PRP_ENC_MEM)
++ irq = IPU_IRQ_PRP_ENC_OUT_EOF;
++ else if (channel == CSI_PRP_VF_MEM)
++ irq = IPU_IRQ_PRP_VF_OUT_EOF;
++ else{
++ dev_err(ipu->dev, "Not a CSI channel\n");
++ return;
++ }
++
++ init_completion(&ipu->csi_comp);
++ ret = ipu_request_irq(ipu, irq, csi_irq_handler, 0, NULL, ipu);
++ if (ret < 0) {
++ dev_err(ipu->dev, "CSI irq %d in use\n", irq);
++ return;
++ }
++ ret = wait_for_completion_timeout(&ipu->csi_comp, msecs_to_jiffies(500));
++ ipu_free_irq(ipu, irq, ipu);
++ dev_dbg(ipu->dev, "CSI stop timeout - %d * 10ms\n", 5 - ret);
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/ipu_common.c linux-3.14.40/drivers/mxc/ipu3/ipu_common.c
+--- linux-3.14.40.orig/drivers/mxc/ipu3/ipu_common.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/ipu_common.c 2015-05-01 14:57:59.607427001 -0500
+@@ -0,0 +1,3134 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_common.c
++ *
++ * @brief This file contains the IPU driver common API functions.
++ *
++ * @ingroup IPU
++ */
++#include <linux/busfreq-imx6.h>
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/irq.h>
++#include <linux/irqdesc.h>
++#include <linux/module.h>
++#include <linux/mod_devicetable.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/pm_runtime.h>
++#include <linux/reset.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++
++#include <asm/cacheflush.h>
++
++#include "ipu_param_mem.h"
++#include "ipu_regs.h"
++
++static struct ipu_soc ipu_array[MXC_IPU_MAX_NUM];
++int g_ipu_hw_rev;
++
++/* Static functions */
++static irqreturn_t ipu_sync_irq_handler(int irq, void *desc);
++static irqreturn_t ipu_err_irq_handler(int irq, void *desc);
++
++static inline uint32_t channel_2_dma(ipu_channel_t ch, ipu_buffer_t type)
++{
++ return ((uint32_t) ch >> (6 * type)) & 0x3F;
++};
++
++static inline int _ipu_is_ic_chan(uint32_t dma_chan)
++{
++ return (((dma_chan >= 11) && (dma_chan <= 22) && (dma_chan != 17) &&
++ (dma_chan != 18)));
++}
++
++static inline int _ipu_is_vdi_out_chan(uint32_t dma_chan)
++{
++ return (dma_chan == 5);
++}
++
++static inline int _ipu_is_ic_graphic_chan(uint32_t dma_chan)
++{
++ return (dma_chan == 14 || dma_chan == 15);
++}
++
++/* Either DP BG or DP FG can be graphic window */
++static inline int _ipu_is_dp_graphic_chan(uint32_t dma_chan)
++{
++ return (dma_chan == 23 || dma_chan == 27);
++}
++
++static inline int _ipu_is_irt_chan(uint32_t dma_chan)
++{
++ return ((dma_chan >= 45) && (dma_chan <= 50));
++}
++
++static inline int _ipu_is_dmfc_chan(uint32_t dma_chan)
++{
++ return ((dma_chan >= 23) && (dma_chan <= 29));
++}
++
++static inline int _ipu_is_smfc_chan(uint32_t dma_chan)
++{
++ return ((dma_chan >= 0) && (dma_chan <= 3));
++}
++
++static inline int _ipu_is_trb_chan(uint32_t dma_chan)
++{
++ return (((dma_chan == 8) || (dma_chan == 9) ||
++ (dma_chan == 10) || (dma_chan == 13) ||
++ (dma_chan == 21) || (dma_chan == 23) ||
++ (dma_chan == 27) || (dma_chan == 28)) &&
++ (g_ipu_hw_rev >= IPU_V3DEX));
++}
++
++/*
++ * We usually use IDMAC 23 as full plane and IDMAC 27 as partial
++ * plane.
++ * IDMAC 23/24/28/41 can drive a display respectively - primary
++ * IDMAC 27 depends on IDMAC 23 - nonprimary
++ */
++static inline int _ipu_is_primary_disp_chan(uint32_t dma_chan)
++{
++ return ((dma_chan == 23) || (dma_chan == 24) ||
++ (dma_chan == 28) || (dma_chan == 41));
++}
++
++static inline int _ipu_is_sync_irq(uint32_t irq)
++{
++ /* sync interrupt register number */
++ int reg_num = irq / 32 + 1;
++
++ return ((reg_num == 1) || (reg_num == 2) || (reg_num == 3) ||
++ (reg_num == 4) || (reg_num == 7) || (reg_num == 8) ||
++ (reg_num == 11) || (reg_num == 12) || (reg_num == 13) ||
++ (reg_num == 14) || (reg_num == 15));
++}
++
++#define idma_is_valid(ch) (ch != NO_DMA)
++#define idma_mask(ch) (idma_is_valid(ch) ? (1UL << (ch & 0x1F)) : 0)
++#define idma_is_set(ipu, reg, dma) (ipu_idmac_read(ipu, reg(dma)) & idma_mask(dma))
++#define tri_cur_buf_mask(ch) (idma_mask(ch*2) * 3)
++#define tri_cur_buf_shift(ch) (ffs(idma_mask(ch*2)) - 1)
++
++static int ipu_clk_setup_enable(struct ipu_soc *ipu,
++ struct ipu_pltfm_data *pdata)
++{
++ char pixel_clk_0[] = "ipu1_pclk_0";
++ char pixel_clk_1[] = "ipu1_pclk_1";
++ char pixel_clk_0_sel[] = "ipu1_pclk0_sel";
++ char pixel_clk_1_sel[] = "ipu1_pclk1_sel";
++ char pixel_clk_0_div[] = "ipu1_pclk0_div";
++ char pixel_clk_1_div[] = "ipu1_pclk1_div";
++ char *ipu_pixel_clk_sel[] = { "ipu1", "ipu1_di0", "ipu1_di1", };
++ char *pclk_sel;
++ struct clk *clk;
++ int ret;
++ int i;
++
++ pixel_clk_0[3] += pdata->id;
++ pixel_clk_1[3] += pdata->id;
++ pixel_clk_0_sel[3] += pdata->id;
++ pixel_clk_1_sel[3] += pdata->id;
++ pixel_clk_0_div[3] += pdata->id;
++ pixel_clk_1_div[3] += pdata->id;
++ for (i = 0; i < ARRAY_SIZE(ipu_pixel_clk_sel); i++) {
++ pclk_sel = ipu_pixel_clk_sel[i];
++ pclk_sel[3] += pdata->id;
++ }
++ dev_dbg(ipu->dev, "ipu_clk = %lu\n", clk_get_rate(ipu->ipu_clk));
++
++ clk = clk_register_mux_pix_clk(ipu->dev, pixel_clk_0_sel,
++ (const char **)ipu_pixel_clk_sel,
++ ARRAY_SIZE(ipu_pixel_clk_sel),
++ 0, pdata->id, 0, 0);
++ if (IS_ERR(clk)) {
++ dev_err(ipu->dev, "clk_register mux di0 failed");
++ return PTR_ERR(clk);
++ }
++ ipu->pixel_clk_sel[0] = clk;
++ clk = clk_register_mux_pix_clk(ipu->dev, pixel_clk_1_sel,
++ (const char **)ipu_pixel_clk_sel,
++ ARRAY_SIZE(ipu_pixel_clk_sel),
++ 0, pdata->id, 1, 0);
++ if (IS_ERR(clk)) {
++ dev_err(ipu->dev, "clk_register mux di1 failed");
++ return PTR_ERR(clk);
++ }
++ ipu->pixel_clk_sel[1] = clk;
++
++ clk = clk_register_div_pix_clk(ipu->dev, pixel_clk_0_div,
++ pixel_clk_0_sel, 0, pdata->id, 0, 0);
++ if (IS_ERR(clk)) {
++ dev_err(ipu->dev, "clk register di0 div failed");
++ return PTR_ERR(clk);
++ }
++ clk = clk_register_div_pix_clk(ipu->dev, pixel_clk_1_div,
++ pixel_clk_1_sel, CLK_SET_RATE_PARENT, pdata->id, 1, 0);
++ if (IS_ERR(clk)) {
++ dev_err(ipu->dev, "clk register di1 div failed");
++ return PTR_ERR(clk);
++ }
++
++ ipu->pixel_clk[0] = clk_register_gate_pix_clk(ipu->dev, pixel_clk_0,
++ pixel_clk_0_div, CLK_SET_RATE_PARENT,
++ pdata->id, 0, 0);
++ if (IS_ERR(ipu->pixel_clk[0])) {
++ dev_err(ipu->dev, "clk register di0 gate failed");
++ return PTR_ERR(ipu->pixel_clk[0]);
++ }
++ ipu->pixel_clk[1] = clk_register_gate_pix_clk(ipu->dev, pixel_clk_1,
++ pixel_clk_1_div, CLK_SET_RATE_PARENT,
++ pdata->id, 1, 0);
++ if (IS_ERR(ipu->pixel_clk[1])) {
++ dev_err(ipu->dev, "clk register di1 gate failed");
++ return PTR_ERR(ipu->pixel_clk[1]);
++ }
++
++ ret = clk_set_parent(ipu->pixel_clk_sel[0], ipu->ipu_clk);
++ if (ret) {
++ dev_err(ipu->dev, "clk set parent failed");
++ return ret;
++ }
++
++ ret = clk_set_parent(ipu->pixel_clk_sel[1], ipu->ipu_clk);
++ if (ret) {
++ dev_err(ipu->dev, "clk set parent failed");
++ return ret;
++ }
++
++ ipu->di_clk[0] = devm_clk_get(ipu->dev, "di0");
++ if (IS_ERR(ipu->di_clk[0])) {
++ dev_err(ipu->dev, "clk_get di0 failed");
++ return PTR_ERR(ipu->di_clk[0]);
++ }
++ ipu->di_clk[1] = devm_clk_get(ipu->dev, "di1");
++ if (IS_ERR(ipu->di_clk[1])) {
++ dev_err(ipu->dev, "clk_get di1 failed");
++ return PTR_ERR(ipu->di_clk[1]);
++ }
++
++ ipu->di_clk_sel[0] = devm_clk_get(ipu->dev, "di0_sel");
++ if (IS_ERR(ipu->di_clk_sel[0])) {
++ dev_err(ipu->dev, "clk_get di0_sel failed");
++ return PTR_ERR(ipu->di_clk_sel[0]);
++ }
++ ipu->di_clk_sel[1] = devm_clk_get(ipu->dev, "di1_sel");
++ if (IS_ERR(ipu->di_clk_sel[1])) {
++ dev_err(ipu->dev, "clk_get di1_sel failed");
++ return PTR_ERR(ipu->di_clk_sel[1]);
++ }
++
++ return 0;
++}
++
++static int ipu_mem_reset(struct ipu_soc *ipu)
++{
++ int timeout = 1000;
++
++ ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
++
++ while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
++ if (!timeout--)
++ return -ETIME;
++ msleep(1);
++ }
++
++ return 0;
++}
++
++struct ipu_soc *ipu_get_soc(int id)
++{
++ if (id >= MXC_IPU_MAX_NUM)
++ return ERR_PTR(-ENODEV);
++ else if (!ipu_array[id].online)
++ return ERR_PTR(-ENODEV);
++ else
++ return &(ipu_array[id]);
++}
++EXPORT_SYMBOL_GPL(ipu_get_soc);
++
++void _ipu_get(struct ipu_soc *ipu)
++{
++ int ret;
++
++ ret = clk_enable(ipu->ipu_clk);
++ if (ret < 0)
++ BUG();
++}
++
++void _ipu_put(struct ipu_soc *ipu)
++{
++ clk_disable(ipu->ipu_clk);
++}
++
++void ipu_disable_hsp_clk(struct ipu_soc *ipu)
++{
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_disable_hsp_clk);
++
++static struct platform_device_id imx_ipu_type[] = {
++ {
++ .name = "ipu-imx6q",
++ .driver_data = IPU_V3H,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, imx_ipu_type);
++
++static const struct of_device_id imx_ipuv3_dt_ids[] = {
++ { .compatible = "fsl,imx6q-ipu", .data = &imx_ipu_type[IMX6Q_IPU], },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_ipuv3_dt_ids);
++
++/*!
++ * This function is called by the driver framework to initialize the IPU
++ * hardware.
++ *
++ * @param dev The device structure for the IPU passed in by the
++ * driver framework.
++ *
++ * @return Returns 0 on success or negative error code on error
++ */
++static int ipu_probe(struct platform_device *pdev)
++{
++ struct ipu_soc *ipu;
++ struct resource *res;
++ unsigned long ipu_base;
++ const struct of_device_id *of_id =
++ of_match_device(imx_ipuv3_dt_ids, &pdev->dev);
++ struct ipu_pltfm_data *pltfm_data;
++ int ret = 0;
++ u32 bypass_reset;
++
++ dev_dbg(&pdev->dev, "<%s>\n", __func__);
++
++ pltfm_data = devm_kzalloc(&pdev->dev, sizeof(struct ipu_pltfm_data),
++ GFP_KERNEL);
++ if (!pltfm_data)
++ return -ENOMEM;
++
++ ret = of_property_read_u32(pdev->dev.of_node,
++ "bypass_reset", &bypass_reset);
++ if (ret < 0) {
++ dev_dbg(&pdev->dev, "can not get bypass_reset\n");
++ return ret;
++ }
++ pltfm_data->bypass_reset = (bool)bypass_reset;
++
++ pltfm_data->id = of_alias_get_id(pdev->dev.of_node, "ipu");
++ if (pltfm_data->id < 0) {
++ dev_dbg(&pdev->dev, "can not get alias id\n");
++ return pltfm_data->id;
++ }
++
++ if (of_id)
++ pdev->id_entry = of_id->data;
++ pltfm_data->devtype = pdev->id_entry->driver_data;
++ g_ipu_hw_rev = pltfm_data->devtype;
++
++ ipu = &ipu_array[pltfm_data->id];
++ memset(ipu, 0, sizeof(struct ipu_soc));
++ ipu->dev = &pdev->dev;
++ ipu->pdata = pltfm_data;
++ dev_dbg(ipu->dev, "IPU rev:%d\n", g_ipu_hw_rev);
++ spin_lock_init(&ipu->int_reg_spin_lock);
++ spin_lock_init(&ipu->rdy_reg_spin_lock);
++ mutex_init(&ipu->mutex_lock);
++
++ ipu->irq_sync = platform_get_irq(pdev, 0);
++ ipu->irq_err = platform_get_irq(pdev, 1);
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ if (!res || ipu->irq_sync < 0 || ipu->irq_err < 0) {
++ dev_err(&pdev->dev, "can't get device resources\n");
++ return -ENODEV;
++ }
++
++ if (!devm_request_mem_region(&pdev->dev, res->start,
++ resource_size(res), pdev->name))
++ return -EBUSY;
++
++ ret = devm_request_irq(&pdev->dev, ipu->irq_sync,
++ ipu_sync_irq_handler, 0, pdev->name, ipu);
++ if (ret) {
++ dev_err(ipu->dev, "request SYNC interrupt failed\n");
++ return ret;
++ }
++ ret = devm_request_irq(&pdev->dev, ipu->irq_err,
++ ipu_err_irq_handler, 0, pdev->name, ipu);
++ if (ret) {
++ dev_err(ipu->dev, "request ERR interrupt failed\n");
++ return ret;
++ }
++
++ ipu_base = res->start;
++ /* base fixup */
++ if (g_ipu_hw_rev == IPU_V3H) /* IPUv3H */
++ ipu_base += IPUV3H_REG_BASE;
++ else if (g_ipu_hw_rev == IPU_V3M) /* IPUv3M */
++ ipu_base += IPUV3M_REG_BASE;
++ else /* IPUv3D, v3E, v3EX */
++ ipu_base += IPUV3DEX_REG_BASE;
++
++ ipu->cm_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_CM_REG_BASE, PAGE_SIZE);
++ ipu->ic_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_IC_REG_BASE, PAGE_SIZE);
++ ipu->idmac_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_IDMAC_REG_BASE, PAGE_SIZE);
++ /* DP Registers are accessed thru the SRM */
++ ipu->dp_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_SRM_REG_BASE, PAGE_SIZE);
++ ipu->dc_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DC_REG_BASE, PAGE_SIZE);
++ ipu->dmfc_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DMFC_REG_BASE, PAGE_SIZE);
++ ipu->di_reg[0] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DI0_REG_BASE, PAGE_SIZE);
++ ipu->di_reg[1] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DI1_REG_BASE, PAGE_SIZE);
++ ipu->smfc_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_SMFC_REG_BASE, PAGE_SIZE);
++ ipu->csi_reg[0] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_CSI0_REG_BASE, PAGE_SIZE);
++ ipu->csi_reg[1] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_CSI1_REG_BASE, PAGE_SIZE);
++ ipu->cpmem_base = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_CPMEM_REG_BASE, SZ_128K);
++ ipu->tpmem_base = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_TPM_REG_BASE, SZ_64K);
++ ipu->dc_tmpl_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DC_TMPL_REG_BASE, SZ_128K);
++ ipu->vdi_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_VDI_REG_BASE, PAGE_SIZE);
++ ipu->disp_base[1] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DISP1_BASE, SZ_4K);
++ if (!ipu->cm_reg || !ipu->ic_reg || !ipu->idmac_reg ||
++ !ipu->dp_reg || !ipu->dc_reg || !ipu->dmfc_reg ||
++ !ipu->di_reg[0] || !ipu->di_reg[1] || !ipu->smfc_reg ||
++ !ipu->csi_reg[0] || !ipu->csi_reg[1] || !ipu->cpmem_base ||
++ !ipu->tpmem_base || !ipu->dc_tmpl_reg || !ipu->disp_base[1]
++ || !ipu->vdi_reg)
++ return -ENOMEM;
++
++ dev_dbg(ipu->dev, "IPU CM Regs = %p\n", ipu->cm_reg);
++ dev_dbg(ipu->dev, "IPU IC Regs = %p\n", ipu->ic_reg);
++ dev_dbg(ipu->dev, "IPU IDMAC Regs = %p\n", ipu->idmac_reg);
++ dev_dbg(ipu->dev, "IPU DP Regs = %p\n", ipu->dp_reg);
++ dev_dbg(ipu->dev, "IPU DC Regs = %p\n", ipu->dc_reg);
++ dev_dbg(ipu->dev, "IPU DMFC Regs = %p\n", ipu->dmfc_reg);
++ dev_dbg(ipu->dev, "IPU DI0 Regs = %p\n", ipu->di_reg[0]);
++ dev_dbg(ipu->dev, "IPU DI1 Regs = %p\n", ipu->di_reg[1]);
++ dev_dbg(ipu->dev, "IPU SMFC Regs = %p\n", ipu->smfc_reg);
++ dev_dbg(ipu->dev, "IPU CSI0 Regs = %p\n", ipu->csi_reg[0]);
++ dev_dbg(ipu->dev, "IPU CSI1 Regs = %p\n", ipu->csi_reg[1]);
++ dev_dbg(ipu->dev, "IPU CPMem = %p\n", ipu->cpmem_base);
++ dev_dbg(ipu->dev, "IPU TPMem = %p\n", ipu->tpmem_base);
++ dev_dbg(ipu->dev, "IPU DC Template Mem = %p\n", ipu->dc_tmpl_reg);
++ dev_dbg(ipu->dev, "IPU Display Region 1 Mem = %p\n", ipu->disp_base[1]);
++ dev_dbg(ipu->dev, "IPU VDI Regs = %p\n", ipu->vdi_reg);
++
++ ipu->ipu_clk = devm_clk_get(ipu->dev, "bus");
++ if (IS_ERR(ipu->ipu_clk)) {
++ dev_err(ipu->dev, "clk_get ipu failed");
++ return PTR_ERR(ipu->ipu_clk);
++ }
++
++ /* ipu_clk is always prepared */
++ ret = clk_prepare_enable(ipu->ipu_clk);
++ if (ret < 0) {
++ dev_err(ipu->dev, "ipu clk enable failed\n");
++ return ret;
++ }
++
++ ipu->online = true;
++
++ platform_set_drvdata(pdev, ipu);
++
++ if (!pltfm_data->bypass_reset) {
++ ret = device_reset(&pdev->dev);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to reset: %d\n", ret);
++ return ret;
++ }
++
++ ipu_mem_reset(ipu);
++
++ ipu_disp_init(ipu);
++
++ /* Set MCU_T to divide MCU access window into 2 */
++ ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
++ IPU_DISP_GEN);
++ }
++
++ /* setup ipu clk tree after ipu reset */
++ ret = ipu_clk_setup_enable(ipu, pltfm_data);
++ if (ret < 0) {
++ dev_err(ipu->dev, "ipu clk setup failed\n");
++ ipu->online = false;
++ return ret;
++ }
++
++ /* Set sync refresh channels and CSI->mem channel as high priority */
++ ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
++
++ /* Enable error interrupts by default */
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(5));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(6));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(9));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(10));
++
++ if (!pltfm_data->bypass_reset)
++ clk_disable(ipu->ipu_clk);
++
++ register_ipu_device(ipu, ipu->pdata->id);
++
++ pm_runtime_enable(&pdev->dev);
++
++ return ret;
++}
++
++int ipu_remove(struct platform_device *pdev)
++{
++ struct ipu_soc *ipu = platform_get_drvdata(pdev);
++
++ unregister_ipu_device(ipu, ipu->pdata->id);
++
++ clk_put(ipu->ipu_clk);
++
++ return 0;
++}
++
++void ipu_dump_registers(struct ipu_soc *ipu)
++{
++ dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n", ipu_cm_read(ipu, IPU_CONF));
++ dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n", ipu_idmac_read(ipu, IDMAC_CONF));
++ dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
++ dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
++ dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
++ dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
++ dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
++ dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
++ dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
++ dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
++ if (g_ipu_hw_rev >= IPU_V3DEX) {
++ dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL0 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(0)));
++ dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL1 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(32)));
++ }
++ dev_dbg(ipu->dev, "DMFC_WR_CHAN = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_WR_CHAN));
++ dev_dbg(ipu->dev, "DMFC_WR_CHAN_DEF = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_WR_CHAN_DEF));
++ dev_dbg(ipu->dev, "DMFC_DP_CHAN = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_DP_CHAN));
++ dev_dbg(ipu->dev, "DMFC_DP_CHAN_DEF = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_DP_CHAN_DEF));
++ dev_dbg(ipu->dev, "DMFC_IC_CTRL = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_IC_CTRL));
++ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
++ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
++ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
++ dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
++ dev_dbg(ipu->dev, "IPU_VDIC_VDI_FSIZE = \t0x%08X\n",
++ ipu_vdi_read(ipu, VDI_FSIZE));
++ dev_dbg(ipu->dev, "IPU_VDIC_VDI_C = \t0x%08X\n",
++ ipu_vdi_read(ipu, VDI_C));
++ dev_dbg(ipu->dev, "IPU_IC_CONF = \t0x%08X\n",
++ ipu_ic_read(ipu, IC_CONF));
++}
++
++/*!
++ * This function is called to initialize a logical IPU channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID to init.
++ *
++ * @param params Input parameter containing union of channel
++ * initialization parameters.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_init_channel(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params)
++{
++ int ret = 0;
++ bool bad_pixfmt;
++ uint32_t ipu_conf, reg, in_g_pixel_fmt, sec_dma;
++
++ dev_dbg(ipu->dev, "init channel = %d\n", IPU_CHAN_ID(channel));
++
++ ret = pm_runtime_get_sync(ipu->dev);
++ if (ret < 0) {
++ dev_err(ipu->dev, "ch = %d, pm_runtime_get failed:%d!\n",
++ IPU_CHAN_ID(channel), ret);
++ dump_stack();
++ return ret;
++ }
++ /*
++ * Here, ret could be 1 if the device's runtime PM status was
++ * already 'active', so clear it to be 0.
++ */
++ ret = 0;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ /* Re-enable error interrupts every time a channel is initialized */
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(5));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(6));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(9));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(10));
++
++ if (ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) {
++ dev_warn(ipu->dev, "Warning: channel already initialized %d\n",
++ IPU_CHAN_ID(channel));
++ }
++
++ ipu_conf = ipu_cm_read(ipu, IPU_CONF);
++
++ switch (channel) {
++ case CSI_MEM0:
++ case CSI_MEM1:
++ case CSI_MEM2:
++ case CSI_MEM3:
++ if (params->csi_mem.csi > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ if (params->csi_mem.interlaced)
++ ipu->chan_is_interlaced[channel_2_dma(channel,
++ IPU_OUTPUT_BUFFER)] = true;
++ else
++ ipu->chan_is_interlaced[channel_2_dma(channel,
++ IPU_OUTPUT_BUFFER)] = false;
++
++ ipu->smfc_use_count++;
++ ipu->csi_channel[params->csi_mem.csi] = channel;
++
++ /*SMFC setting*/
++ if (params->csi_mem.mipi_en) {
++ ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_mem.csi));
++ _ipu_smfc_init(ipu, channel, params->csi_mem.mipi_vc,
++ params->csi_mem.csi);
++ _ipu_csi_set_mipi_di(ipu, params->csi_mem.mipi_vc,
++ params->csi_mem.mipi_id, params->csi_mem.csi);
++ } else {
++ ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_mem.csi));
++ _ipu_smfc_init(ipu, channel, 0, params->csi_mem.csi);
++ }
++
++ /*CSI data (include compander) dest*/
++ _ipu_csi_init(ipu, channel, params->csi_mem.csi);
++ break;
++ case CSI_PRP_ENC_MEM:
++ if (params->csi_prp_enc_mem.csi > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++ if ((ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == MEM_VDI_MEM)) {
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->using_ic_dirct_ch = CSI_PRP_ENC_MEM;
++
++ ipu->ic_use_count++;
++ ipu->csi_channel[params->csi_prp_enc_mem.csi] = channel;
++
++ if (params->csi_prp_enc_mem.mipi_en) {
++ ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_prp_enc_mem.csi));
++ _ipu_csi_set_mipi_di(ipu,
++ params->csi_prp_enc_mem.mipi_vc,
++ params->csi_prp_enc_mem.mipi_id,
++ params->csi_prp_enc_mem.csi);
++ } else
++ ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_prp_enc_mem.csi));
++
++ /*CSI0/1 feed into IC*/
++ ipu_conf &= ~IPU_CONF_IC_INPUT;
++ if (params->csi_prp_enc_mem.csi)
++ ipu_conf |= IPU_CONF_CSI_SEL;
++ else
++ ipu_conf &= ~IPU_CONF_CSI_SEL;
++
++ /*PRP skip buffer in memory, only valid when RWS_EN is true*/
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
++
++ /*CSI data (include compander) dest*/
++ _ipu_csi_init(ipu, channel, params->csi_prp_enc_mem.csi);
++ _ipu_ic_init_prpenc(ipu, params, true);
++ break;
++ case CSI_PRP_VF_MEM:
++ if (params->csi_prp_vf_mem.csi > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++ if ((ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == MEM_VDI_MEM)) {
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->using_ic_dirct_ch = CSI_PRP_VF_MEM;
++
++ ipu->ic_use_count++;
++ ipu->csi_channel[params->csi_prp_vf_mem.csi] = channel;
++
++ if (params->csi_prp_vf_mem.mipi_en) {
++ ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_prp_vf_mem.csi));
++ _ipu_csi_set_mipi_di(ipu,
++ params->csi_prp_vf_mem.mipi_vc,
++ params->csi_prp_vf_mem.mipi_id,
++ params->csi_prp_vf_mem.csi);
++ } else
++ ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_prp_vf_mem.csi));
++
++ /*CSI0/1 feed into IC*/
++ ipu_conf &= ~IPU_CONF_IC_INPUT;
++ if (params->csi_prp_vf_mem.csi)
++ ipu_conf |= IPU_CONF_CSI_SEL;
++ else
++ ipu_conf &= ~IPU_CONF_CSI_SEL;
++
++ /*PRP skip buffer in memory, only valid when RWS_EN is true*/
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
++
++ /*CSI data (include compander) dest*/
++ _ipu_csi_init(ipu, channel, params->csi_prp_vf_mem.csi);
++ _ipu_ic_init_prpvf(ipu, params, true);
++ break;
++ case MEM_PRP_VF_MEM:
++ if (params->mem_prp_vf_mem.graphics_combine_en) {
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ in_g_pixel_fmt = params->mem_prp_vf_mem.in_g_pixel_fmt;
++ bad_pixfmt =
++ _ipu_ch_param_bad_alpha_pos(in_g_pixel_fmt);
++
++ if (params->mem_prp_vf_mem.alpha_chan_en) {
++ if (bad_pixfmt) {
++ dev_err(ipu->dev, "bad pixel format "
++ "for graphics plane from "
++ "ch%d\n", sec_dma);
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
++ }
++ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
++ }
++
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg | FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
++
++ _ipu_ic_init_prpvf(ipu, params, false);
++ ipu->ic_use_count++;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == MEM_VDI_MEM) ||
++ (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->using_ic_dirct_ch = MEM_VDI_PRP_VF_MEM;
++ ipu->ic_use_count++;
++ ipu->vdi_use_count++;
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ reg &= ~FS_VDI_SRC_SEL_MASK;
++ ipu_cm_write(ipu, reg , IPU_FS_PROC_FLOW1);
++
++ if (params->mem_prp_vf_mem.graphics_combine_en)
++ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
++ _ipu_ic_init_prpvf(ipu, params, false);
++ _ipu_vdi_init(ipu, channel, params);
++ break;
++ case MEM_VDI_PRP_VF_MEM_P:
++ case MEM_VDI_PRP_VF_MEM_N:
++ case MEM_VDI_MEM_P:
++ case MEM_VDI_MEM_N:
++ _ipu_vdi_init(ipu, channel, params);
++ break;
++ case MEM_VDI_MEM:
++ if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->using_ic_dirct_ch = MEM_VDI_MEM;
++ ipu->ic_use_count++;
++ ipu->vdi_use_count++;
++ _ipu_vdi_init(ipu, channel, params);
++ break;
++ case MEM_ROT_VF_MEM:
++ ipu->ic_use_count++;
++ ipu->rot_use_count++;
++ _ipu_ic_init_rotate_vf(ipu, params);
++ break;
++ case MEM_PRP_ENC_MEM:
++ ipu->ic_use_count++;
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg | FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
++ _ipu_ic_init_prpenc(ipu, params, false);
++ break;
++ case MEM_ROT_ENC_MEM:
++ ipu->ic_use_count++;
++ ipu->rot_use_count++;
++ _ipu_ic_init_rotate_enc(ipu, params);
++ break;
++ case MEM_PP_MEM:
++ if (params->mem_pp_mem.graphics_combine_en) {
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ in_g_pixel_fmt = params->mem_pp_mem.in_g_pixel_fmt;
++ bad_pixfmt =
++ _ipu_ch_param_bad_alpha_pos(in_g_pixel_fmt);
++
++ if (params->mem_pp_mem.alpha_chan_en) {
++ if (bad_pixfmt) {
++ dev_err(ipu->dev, "bad pixel format "
++ "for graphics plane from "
++ "ch%d\n", sec_dma);
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
++ }
++
++ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
++ }
++
++ _ipu_ic_init_pp(ipu, params);
++ ipu->ic_use_count++;
++ break;
++ case MEM_ROT_PP_MEM:
++ _ipu_ic_init_rotate_pp(ipu, params);
++ ipu->ic_use_count++;
++ ipu->rot_use_count++;
++ break;
++ case MEM_DC_SYNC:
++ if (params->mem_dc_sync.di > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ ipu->dc_di_assignment[1] = params->mem_dc_sync.di;
++ _ipu_dc_init(ipu, 1, params->mem_dc_sync.di,
++ params->mem_dc_sync.interlaced,
++ params->mem_dc_sync.out_pixel_fmt);
++ ipu->di_use_count[params->mem_dc_sync.di]++;
++ ipu->dc_use_count++;
++ ipu->dmfc_use_count++;
++ break;
++ case MEM_BG_SYNC:
++ if (params->mem_dp_bg_sync.di > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ if (params->mem_dp_bg_sync.alpha_chan_en)
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
++
++ ipu->dc_di_assignment[5] = params->mem_dp_bg_sync.di;
++ _ipu_dp_init(ipu, channel, params->mem_dp_bg_sync.in_pixel_fmt,
++ params->mem_dp_bg_sync.out_pixel_fmt);
++ _ipu_dc_init(ipu, 5, params->mem_dp_bg_sync.di,
++ params->mem_dp_bg_sync.interlaced,
++ params->mem_dp_bg_sync.out_pixel_fmt);
++ ipu->di_use_count[params->mem_dp_bg_sync.di]++;
++ ipu->dc_use_count++;
++ ipu->dp_use_count++;
++ ipu->dmfc_use_count++;
++ break;
++ case MEM_FG_SYNC:
++ _ipu_dp_init(ipu, channel, params->mem_dp_fg_sync.in_pixel_fmt,
++ params->mem_dp_fg_sync.out_pixel_fmt);
++
++ if (params->mem_dp_fg_sync.alpha_chan_en)
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
++
++ ipu->dc_use_count++;
++ ipu->dp_use_count++;
++ ipu->dmfc_use_count++;
++ break;
++ case DIRECT_ASYNC0:
++ if (params->direct_async.di > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ ipu->dc_di_assignment[8] = params->direct_async.di;
++ _ipu_dc_init(ipu, 8, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
++ ipu->di_use_count[params->direct_async.di]++;
++ ipu->dc_use_count++;
++ break;
++ case DIRECT_ASYNC1:
++ if (params->direct_async.di > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ ipu->dc_di_assignment[9] = params->direct_async.di;
++ _ipu_dc_init(ipu, 9, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
++ ipu->di_use_count[params->direct_async.di]++;
++ ipu->dc_use_count++;
++ break;
++ default:
++ dev_err(ipu->dev, "Missing channel initialization\n");
++ break;
++ }
++
++ ipu->channel_init_mask |= 1L << IPU_CHAN_ID(channel);
++
++ ipu_cm_write(ipu, ipu_conf, IPU_CONF);
++
++err:
++ mutex_unlock(&ipu->mutex_lock);
++ return ret;
++}
++EXPORT_SYMBOL(ipu_init_channel);
++
++/*!
++ * This function is called to uninitialize a logical IPU channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID to uninit.
++ */
++void ipu_uninit_channel(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t reg;
++ uint32_t in_dma, out_dma = 0;
++ uint32_t ipu_conf;
++ uint32_t dc_chan = 0;
++ int ret;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if ((ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
++ dev_dbg(ipu->dev, "Channel already uninitialized %d\n",
++ IPU_CHAN_ID(channel));
++ mutex_unlock(&ipu->mutex_lock);
++ return;
++ }
++
++ /* Make sure channel is disabled */
++ /* Get input and output dma channels */
++ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
++ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
++
++ if (idma_is_set(ipu, IDMAC_CHA_EN, in_dma) ||
++ idma_is_set(ipu, IDMAC_CHA_EN, out_dma)) {
++ dev_err(ipu->dev,
++ "Channel %d is not disabled, disable first\n",
++ IPU_CHAN_ID(channel));
++ mutex_unlock(&ipu->mutex_lock);
++ return;
++ }
++
++ ipu_conf = ipu_cm_read(ipu, IPU_CONF);
++
++ /* Reset the double buffer */
++ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(in_dma));
++ ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_DB_MODE_SEL(in_dma));
++ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(out_dma));
++ ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_DB_MODE_SEL(out_dma));
++
++ /* Reset the triple buffer */
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(in_dma));
++ ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_TRB_MODE_SEL(in_dma));
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(out_dma));
++ ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_TRB_MODE_SEL(out_dma));
++
++ if (_ipu_is_ic_chan(in_dma) || _ipu_is_dp_graphic_chan(in_dma)) {
++ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = false;
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = false;
++ }
++
++ switch (channel) {
++ case CSI_MEM0:
++ case CSI_MEM1:
++ case CSI_MEM2:
++ case CSI_MEM3:
++ ipu->smfc_use_count--;
++ if (ipu->csi_channel[0] == channel) {
++ ipu->csi_channel[0] = CHAN_NONE;
++ } else if (ipu->csi_channel[1] == channel) {
++ ipu->csi_channel[1] = CHAN_NONE;
++ }
++ break;
++ case CSI_PRP_ENC_MEM:
++ ipu->ic_use_count--;
++ if (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)
++ ipu->using_ic_dirct_ch = 0;
++ _ipu_ic_uninit_prpenc(ipu);
++ if (ipu->csi_channel[0] == channel) {
++ ipu->csi_channel[0] = CHAN_NONE;
++ } else if (ipu->csi_channel[1] == channel) {
++ ipu->csi_channel[1] = CHAN_NONE;
++ }
++ break;
++ case CSI_PRP_VF_MEM:
++ ipu->ic_use_count--;
++ if (ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM)
++ ipu->using_ic_dirct_ch = 0;
++ _ipu_ic_uninit_prpvf(ipu);
++ if (ipu->csi_channel[0] == channel) {
++ ipu->csi_channel[0] = CHAN_NONE;
++ } else if (ipu->csi_channel[1] == channel) {
++ ipu->csi_channel[1] = CHAN_NONE;
++ }
++ break;
++ case MEM_PRP_VF_MEM:
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_prpvf(ipu);
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ ipu->ic_use_count--;
++ ipu->vdi_use_count--;
++ if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM)
++ ipu->using_ic_dirct_ch = 0;
++ _ipu_ic_uninit_prpvf(ipu);
++ _ipu_vdi_uninit(ipu);
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
++ break;
++ case MEM_VDI_MEM:
++ ipu->ic_use_count--;
++ ipu->vdi_use_count--;
++ if (ipu->using_ic_dirct_ch == MEM_VDI_MEM)
++ ipu->using_ic_dirct_ch = 0;
++ _ipu_vdi_uninit(ipu);
++ break;
++ case MEM_VDI_PRP_VF_MEM_P:
++ case MEM_VDI_PRP_VF_MEM_N:
++ case MEM_VDI_MEM_P:
++ case MEM_VDI_MEM_N:
++ break;
++ case MEM_ROT_VF_MEM:
++ ipu->rot_use_count--;
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_rotate_vf(ipu);
++ break;
++ case MEM_PRP_ENC_MEM:
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_prpenc(ipu);
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
++ break;
++ case MEM_ROT_ENC_MEM:
++ ipu->rot_use_count--;
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_rotate_enc(ipu);
++ break;
++ case MEM_PP_MEM:
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_pp(ipu);
++ break;
++ case MEM_ROT_PP_MEM:
++ ipu->rot_use_count--;
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_rotate_pp(ipu);
++ break;
++ case MEM_DC_SYNC:
++ dc_chan = 1;
++ _ipu_dc_uninit(ipu, 1);
++ ipu->di_use_count[ipu->dc_di_assignment[1]]--;
++ ipu->dc_use_count--;
++ ipu->dmfc_use_count--;
++ break;
++ case MEM_BG_SYNC:
++ dc_chan = 5;
++ _ipu_dp_uninit(ipu, channel);
++ _ipu_dc_uninit(ipu, 5);
++ ipu->di_use_count[ipu->dc_di_assignment[5]]--;
++ ipu->dc_use_count--;
++ ipu->dp_use_count--;
++ ipu->dmfc_use_count--;
++ break;
++ case MEM_FG_SYNC:
++ _ipu_dp_uninit(ipu, channel);
++ ipu->dc_use_count--;
++ ipu->dp_use_count--;
++ ipu->dmfc_use_count--;
++ break;
++ case DIRECT_ASYNC0:
++ dc_chan = 8;
++ _ipu_dc_uninit(ipu, 8);
++ ipu->di_use_count[ipu->dc_di_assignment[8]]--;
++ ipu->dc_use_count--;
++ break;
++ case DIRECT_ASYNC1:
++ dc_chan = 9;
++ _ipu_dc_uninit(ipu, 9);
++ ipu->di_use_count[ipu->dc_di_assignment[9]]--;
++ ipu->dc_use_count--;
++ break;
++ default:
++ break;
++ }
++
++ if (ipu->ic_use_count == 0)
++ ipu_conf &= ~IPU_CONF_IC_EN;
++ if (ipu->vdi_use_count == 0) {
++ ipu_conf &= ~IPU_CONF_ISP_EN;
++ ipu_conf &= ~IPU_CONF_VDI_EN;
++ ipu_conf &= ~IPU_CONF_IC_INPUT;
++ }
++ if (ipu->rot_use_count == 0)
++ ipu_conf &= ~IPU_CONF_ROT_EN;
++ if (ipu->dc_use_count == 0)
++ ipu_conf &= ~IPU_CONF_DC_EN;
++ if (ipu->dp_use_count == 0)
++ ipu_conf &= ~IPU_CONF_DP_EN;
++ if (ipu->dmfc_use_count == 0)
++ ipu_conf &= ~IPU_CONF_DMFC_EN;
++ if (ipu->di_use_count[0] == 0) {
++ ipu_conf &= ~IPU_CONF_DI0_EN;
++ }
++ if (ipu->di_use_count[1] == 0) {
++ ipu_conf &= ~IPU_CONF_DI1_EN;
++ }
++ if (ipu->smfc_use_count == 0)
++ ipu_conf &= ~IPU_CONF_SMFC_EN;
++
++ ipu_cm_write(ipu, ipu_conf, IPU_CONF);
++
++ ipu->channel_init_mask &= ~(1L << IPU_CHAN_ID(channel));
++
++ /*
++ * Disable pixel clk and its parent clock(if the parent clock
++ * usecount is 1) after clearing DC/DP/DI bits in IPU_CONF
++ * register to prevent LVDS display channel starvation.
++ */
++ if (_ipu_is_primary_disp_chan(in_dma))
++ clk_disable_unprepare(ipu->pixel_clk[ipu->dc_di_assignment[dc_chan]]);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ ret = pm_runtime_put_sync_suspend(ipu->dev);
++ if (ret < 0) {
++ dev_err(ipu->dev, "ch = %d, pm_runtime_put failed:%d!\n",
++ IPU_CHAN_ID(channel), ret);
++ dump_stack();
++ }
++
++ WARN_ON(ipu->ic_use_count < 0);
++ WARN_ON(ipu->vdi_use_count < 0);
++ WARN_ON(ipu->rot_use_count < 0);
++ WARN_ON(ipu->dc_use_count < 0);
++ WARN_ON(ipu->dp_use_count < 0);
++ WARN_ON(ipu->dmfc_use_count < 0);
++ WARN_ON(ipu->smfc_use_count < 0);
++}
++EXPORT_SYMBOL(ipu_uninit_channel);
++
++/*!
++ * This function is called to initialize buffer(s) for logical IPU channel.
++ *
++ * @param ipu ipu handler
++ *
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param pixel_fmt Input parameter for pixel format of buffer.
++ * Pixel format is a FOURCC ASCII code.
++ *
++ * @param width Input parameter for width of buffer in pixels.
++ *
++ * @param height Input parameter for height of buffer in pixels.
++ *
++ * @param stride Input parameter for stride length of buffer
++ * in pixels.
++ *
++ * @param rot_mode Input parameter for rotation setting of buffer.
++ * A rotation setting other than
++ * IPU_ROTATE_VERT_FLIP
++ * should only be used for input buffers of
++ * rotation channels.
++ *
++ * @param phyaddr_0 Input parameter buffer 0 physical address.
++ *
++ * @param phyaddr_1 Input parameter buffer 1 physical address.
++ * Setting this to a value other than NULL enables
++ * double buffering mode.
++ *
++ * @param phyaddr_2 Input parameter buffer 2 physical address.
++ * Setting this to a value other than NULL enables
++ * triple buffering mode, phyaddr_1 should not be
++ * NULL then.
++ *
++ * @param u private u offset for additional cropping,
++ * zero if not used.
++ *
++ * @param v private v offset for additional cropping,
++ * zero if not used.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_init_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type,
++ uint32_t pixel_fmt,
++ uint16_t width, uint16_t height,
++ uint32_t stride,
++ ipu_rotate_mode_t rot_mode,
++ dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
++ dma_addr_t phyaddr_2,
++ uint32_t u, uint32_t v)
++{
++ uint32_t reg;
++ uint32_t dma_chan;
++ uint32_t burst_size;
++
++ dma_chan = channel_2_dma(channel, type);
++ if (!idma_is_valid(dma_chan))
++ return -EINVAL;
++
++ if (stride < width * bytes_per_pixel(pixel_fmt))
++ stride = width * bytes_per_pixel(pixel_fmt);
++
++ if (stride % 4) {
++ dev_err(ipu->dev,
++ "Stride not 32-bit aligned, stride = %d\n", stride);
++ return -EINVAL;
++ }
++ /* IC & IRT channels' width must be multiple of 8 pixels */
++ if ((_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan))
++ && (width % 8)) {
++ dev_err(ipu->dev, "Width must be 8 pixel multiple\n");
++ return -EINVAL;
++ }
++
++ if (_ipu_is_vdi_out_chan(dma_chan) &&
++ ((width < 16) || (height < 16) || (width % 2) || (height % 4))) {
++ dev_err(ipu->dev, "vdi width/height limited err\n");
++ return -EINVAL;
++ }
++
++ /* IPUv3EX and IPUv3M support triple buffer */
++ if ((!_ipu_is_trb_chan(dma_chan)) && phyaddr_2) {
++ dev_err(ipu->dev, "Chan%d doesn't support triple buffer "
++ "mode\n", dma_chan);
++ return -EINVAL;
++ }
++ if (!phyaddr_1 && phyaddr_2) {
++ dev_err(ipu->dev, "Chan%d's buf1 physical addr is NULL for "
++ "triple buffer mode\n", dma_chan);
++ return -EINVAL;
++ }
++
++ mutex_lock(&ipu->mutex_lock);
++
++ /* Build parameter memory data for DMA channel */
++ _ipu_ch_param_init(ipu, dma_chan, pixel_fmt, width, height, stride, u, v, 0,
++ phyaddr_0, phyaddr_1, phyaddr_2);
++
++ /* Set correlative channel parameter of local alpha channel */
++ if ((_ipu_is_ic_graphic_chan(dma_chan) ||
++ _ipu_is_dp_graphic_chan(dma_chan)) &&
++ (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] == true)) {
++ _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, true);
++ _ipu_ch_param_set_alpha_buffer_memory(ipu, dma_chan);
++ _ipu_ch_param_set_alpha_condition_read(ipu, dma_chan);
++ /* fix alpha width as 8 and burst size as 16*/
++ _ipu_ch_params_set_alpha_width(ipu, dma_chan, 8);
++ _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
++ } else if (_ipu_is_ic_graphic_chan(dma_chan) &&
++ ipu_pixel_format_has_alpha(pixel_fmt))
++ _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, false);
++
++ if (rot_mode)
++ _ipu_ch_param_set_rotation(ipu, dma_chan, rot_mode);
++
++ /* IC and ROT channels have restriction of 8 or 16 pix burst length */
++ if (_ipu_is_ic_chan(dma_chan) || _ipu_is_vdi_out_chan(dma_chan)) {
++ if ((width % 16) == 0)
++ _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
++ else
++ _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
++ } else if (_ipu_is_irt_chan(dma_chan)) {
++ _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
++ _ipu_ch_param_set_block_mode(ipu, dma_chan);
++ } else if (_ipu_is_dmfc_chan(dma_chan)) {
++ burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
++ _ipu_dmfc_set_wait4eot(ipu, dma_chan, width);
++ _ipu_dmfc_set_burst_size(ipu, dma_chan, burst_size);
++ }
++
++ if (_ipu_disp_chan_is_interlaced(ipu, channel) ||
++ ipu->chan_is_interlaced[dma_chan])
++ _ipu_ch_param_set_interlaced_scan(ipu, dma_chan);
++
++ if (_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan) ||
++ _ipu_is_vdi_out_chan(dma_chan)) {
++ burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
++ _ipu_ic_idma_init(ipu, dma_chan, width, height, burst_size,
++ rot_mode);
++ } else if (_ipu_is_smfc_chan(dma_chan)) {
++ burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
++ /*
++ * This is different from IPUv3 spec, but it is confirmed
++ * in IPUforum that SMFC burst size should be NPB[6:3]
++ * when IDMAC works in 16-bit generic data mode.
++ */
++ if (pixel_fmt == IPU_PIX_FMT_GENERIC)
++ /* 8 bits per pixel */
++ burst_size = burst_size >> 4;
++ else if (pixel_fmt == IPU_PIX_FMT_GENERIC_16)
++ /* 16 bits per pixel */
++ burst_size = burst_size >> 3;
++ else
++ burst_size = burst_size >> 2;
++ _ipu_smfc_set_burst_size(ipu, channel, burst_size-1);
++ }
++
++ /* AXI-id */
++ if (idma_is_set(ipu, IDMAC_CHA_PRI, dma_chan)) {
++ unsigned reg = IDMAC_CH_LOCK_EN_1;
++ uint32_t value = 0;
++ if (ipu->pdata->devtype == IPU_V3H) {
++ _ipu_ch_param_set_axi_id(ipu, dma_chan, 0);
++ switch (dma_chan) {
++ case 5:
++ value = 0x3;
++ break;
++ case 11:
++ value = 0x3 << 2;
++ break;
++ case 12:
++ value = 0x3 << 4;
++ break;
++ case 14:
++ value = 0x3 << 6;
++ break;
++ case 15:
++ value = 0x3 << 8;
++ break;
++ case 20:
++ value = 0x3 << 10;
++ break;
++ case 21:
++ value = 0x3 << 12;
++ break;
++ case 22:
++ value = 0x3 << 14;
++ break;
++ case 23:
++ value = 0x3 << 16;
++ break;
++ case 27:
++ value = 0x3 << 18;
++ break;
++ case 28:
++ value = 0x3 << 20;
++ break;
++ case 45:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 0;
++ break;
++ case 46:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 2;
++ break;
++ case 47:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 4;
++ break;
++ case 48:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 6;
++ break;
++ case 49:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 8;
++ break;
++ case 50:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 10;
++ break;
++ default:
++ break;
++ }
++ value |= ipu_idmac_read(ipu, reg);
++ ipu_idmac_write(ipu, value, reg);
++ } else
++ _ipu_ch_param_set_axi_id(ipu, dma_chan, 1);
++ } else {
++ if (ipu->pdata->devtype == IPU_V3H)
++ _ipu_ch_param_set_axi_id(ipu, dma_chan, 1);
++ }
++
++ _ipu_ch_param_dump(ipu, dma_chan);
++
++ if (phyaddr_2 && g_ipu_hw_rev >= IPU_V3DEX) {
++ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
++ reg &= ~idma_mask(dma_chan);
++ ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
++
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
++ reg |= idma_mask(dma_chan);
++ ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
++
++ /* Set IDMAC third buffer's cpmem number */
++ /* See __ipu_ch_get_third_buf_cpmem_num() for mapping */
++ ipu_idmac_write(ipu, 0x00444047L, IDMAC_SUB_ADDR_4);
++ ipu_idmac_write(ipu, 0x46004241L, IDMAC_SUB_ADDR_3);
++ ipu_idmac_write(ipu, 0x00000045L, IDMAC_SUB_ADDR_1);
++
++ /* Reset to buffer 0 */
++ ipu_cm_write(ipu, tri_cur_buf_mask(dma_chan),
++ IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
++ } else {
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
++ reg &= ~idma_mask(dma_chan);
++ ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
++
++ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
++ if (phyaddr_1)
++ reg |= idma_mask(dma_chan);
++ else
++ reg &= ~idma_mask(dma_chan);
++ ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
++
++ /* Reset to buffer 0 */
++ ipu_cm_write(ipu, idma_mask(dma_chan),
++ IPU_CHA_CUR_BUF(dma_chan));
++
++ }
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_init_channel_buffer);
++
++/*!
++ * This function is called to update the physical address of a buffer for
++ * a logical IPU channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param bufNum Input parameter for buffer number to update.
++ * 0 or 1 are the only valid values.
++ *
++ * @param phyaddr Input parameter buffer physical address.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail. This function will fail if the buffer is set to ready.
++ */
++int32_t ipu_update_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t bufNum, dma_addr_t phyaddr)
++{
++ uint32_t reg;
++ int ret = 0;
++ uint32_t dma_chan = channel_2_dma(channel, type);
++ unsigned long lock_flags;
++
++ if (dma_chan == IDMA_CHAN_INVALID)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ if (bufNum == 0)
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
++ else if (bufNum == 1)
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
++ else
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
++
++ if ((reg & idma_mask(dma_chan)) == 0)
++ _ipu_ch_param_set_buffer(ipu, dma_chan, bufNum, phyaddr);
++ else
++ ret = -EACCES;
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_update_channel_buffer);
++
++/*!
++ * This function is called to update the band mode setting for
++ * a logical IPU channel.
++ *
++ * @param ipu ipu handler
++ *
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param band_height Input parameter for band lines:
++ * shoule be log2(4/8/16/32/64/128/256).
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_set_channel_bandmode(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t band_height)
++{
++ uint32_t reg;
++ int ret = 0;
++ uint32_t dma_chan = channel_2_dma(channel, type);
++
++ if ((2 > band_height) || (8 < band_height))
++ return -EINVAL;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ reg = ipu_idmac_read(ipu, IDMAC_BAND_EN(dma_chan));
++ reg |= 1 << (dma_chan % 32);
++ ipu_idmac_write(ipu, reg, IDMAC_BAND_EN(dma_chan));
++
++ _ipu_ch_param_set_bandmode(ipu, dma_chan, band_height);
++ dev_dbg(ipu->dev, "dma_chan:%d, band_height:%d.\n\n",
++ dma_chan, 1 << band_height);
++ mutex_unlock(&ipu->mutex_lock);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_set_channel_bandmode);
++
++/*!
++ * This function is called to initialize a buffer for logical IPU channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param pixel_fmt Input parameter for pixel format of buffer.
++ * Pixel format is a FOURCC ASCII code.
++ *
++ * @param width Input parameter for width of buffer in pixels.
++ *
++ * @param height Input parameter for height of buffer in pixels.
++ *
++ * @param stride Input parameter for stride length of buffer
++ * in pixels.
++ *
++ * @param u predefined private u offset for additional cropping,
++ * zero if not used.
++ *
++ * @param v predefined private v offset for additional cropping,
++ * zero if not used.
++ *
++ * @param vertical_offset vertical offset for Y coordinate
++ * in the existed frame
++ *
++ *
++ * @param horizontal_offset horizontal offset for X coordinate
++ * in the existed frame
++ *
++ *
++ * @return Returns 0 on success or negative error code on fail
++ * This function will fail if any buffer is set to ready.
++ */
++
++int32_t ipu_update_channel_offset(struct ipu_soc *ipu,
++ ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t pixel_fmt,
++ uint16_t width, uint16_t height,
++ uint32_t stride,
++ uint32_t u, uint32_t v,
++ uint32_t vertical_offset, uint32_t horizontal_offset)
++{
++ int ret = 0;
++ uint32_t dma_chan = channel_2_dma(channel, type);
++ unsigned long lock_flags;
++
++ if (dma_chan == IDMA_CHAN_INVALID)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ if ((ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan)) & idma_mask(dma_chan)) ||
++ (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan)) & idma_mask(dma_chan)) ||
++ ((ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan)) & idma_mask(dma_chan)) &&
++ (ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan)) & idma_mask(dma_chan)) &&
++ _ipu_is_trb_chan(dma_chan)))
++ ret = -EACCES;
++ else
++ _ipu_ch_offset_update(ipu, dma_chan, pixel_fmt, width, height, stride,
++ u, v, 0, vertical_offset, horizontal_offset);
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_update_channel_offset);
++
++
++/*!
++ * This function is called to set a channel's buffer as ready.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param bufNum Input parameter for which buffer number set to
++ * ready state.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_select_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t bufNum)
++{
++ uint32_t dma_chan = channel_2_dma(channel, type);
++ unsigned long lock_flags;
++
++ if (dma_chan == IDMA_CHAN_INVALID)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ /* Mark buffer to be ready. */
++ if (bufNum == 0)
++ ipu_cm_write(ipu, idma_mask(dma_chan),
++ IPU_CHA_BUF0_RDY(dma_chan));
++ else if (bufNum == 1)
++ ipu_cm_write(ipu, idma_mask(dma_chan),
++ IPU_CHA_BUF1_RDY(dma_chan));
++ else
++ ipu_cm_write(ipu, idma_mask(dma_chan),
++ IPU_CHA_BUF2_RDY(dma_chan));
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_select_buffer);
++
++/*!
++ * This function is called to set a channel's buffer as ready.
++ *
++ * @param ipu ipu handler
++ * @param bufNum Input parameter for which buffer number set to
++ * ready state.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_select_multi_vdi_buffer(struct ipu_soc *ipu, uint32_t bufNum)
++{
++
++ uint32_t dma_chan = channel_2_dma(MEM_VDI_PRP_VF_MEM, IPU_INPUT_BUFFER);
++ uint32_t mask_bit =
++ idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_P, IPU_INPUT_BUFFER))|
++ idma_mask(dma_chan)|
++ idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_N, IPU_INPUT_BUFFER));
++ unsigned long lock_flags;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ /* Mark buffers to be ready. */
++ if (bufNum == 0)
++ ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF0_RDY(dma_chan));
++ else
++ ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF1_RDY(dma_chan));
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_select_multi_vdi_buffer);
++
++#define NA -1
++static int proc_dest_sel[] = {
++ 0, 1, 1, 3, 5, 5, 4, 7, 8, 9, 10, 11, 12, 14, 15, 16,
++ 0, 1, 1, 5, 5, 5, 5, 5, 7, 8, 9, 10, 11, 12, 14, 31 };
++static int proc_src_sel[] = { 0, 6, 7, 6, 7, 8, 5, NA, NA, NA,
++ NA, NA, NA, NA, NA, 1, 2, 3, 4, 7, 8, NA, 8, NA };
++static int disp_src_sel[] = { 0, 6, 7, 8, 3, 4, 5, NA, NA, NA,
++ NA, NA, NA, NA, NA, 1, NA, 2, NA, 3, 4, 4, 4, 4 };
++
++
++/*!
++ * This function links 2 channels together for automatic frame
++ * synchronization. The output of the source channel is linked to the input of
++ * the destination channel.
++ *
++ * @param ipu ipu handler
++ * @param src_ch Input parameter for the logical channel ID of
++ * the source channel.
++ *
++ * @param dest_ch Input parameter for the logical channel ID of
++ * the destination channel.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_link_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
++{
++ int retval = 0;
++ uint32_t fs_proc_flow1;
++ uint32_t fs_proc_flow2;
++ uint32_t fs_proc_flow3;
++ uint32_t fs_disp_flow1;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
++ fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
++ fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
++
++ switch (src_ch) {
++ case CSI_MEM0:
++ fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
++ fs_proc_flow3 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_SMFC0_DEST_SEL_OFFSET;
++ break;
++ case CSI_MEM1:
++ fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
++ fs_proc_flow3 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_SMFC1_DEST_SEL_OFFSET;
++ break;
++ case CSI_MEM2:
++ fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
++ fs_proc_flow3 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_SMFC2_DEST_SEL_OFFSET;
++ break;
++ case CSI_MEM3:
++ fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
++ fs_proc_flow3 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_SMFC3_DEST_SEL_OFFSET;
++ break;
++ case CSI_PRP_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPENC_DEST_SEL_OFFSET;
++ break;
++ case CSI_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPVF_DEST_SEL_OFFSET;
++ break;
++ case MEM_PP_MEM:
++ fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PP_DEST_SEL_OFFSET;
++ break;
++ case MEM_ROT_PP_MEM:
++ fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PP_ROT_DEST_SEL_OFFSET;
++ break;
++ case MEM_PRP_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPENC_DEST_SEL_OFFSET;
++ break;
++ case MEM_ROT_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPENC_ROT_DEST_SEL_OFFSET;
++ break;
++ case MEM_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPVF_DEST_SEL_OFFSET;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPVF_DEST_SEL_OFFSET;
++ break;
++ case MEM_ROT_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPVF_ROT_DEST_SEL_OFFSET;
++ break;
++ case MEM_VDOA_MEM:
++ fs_proc_flow3 &= ~FS_VDOA_DEST_SEL_MASK;
++ if (MEM_VDI_MEM == dest_ch)
++ fs_proc_flow3 |= FS_VDOA_DEST_SEL_VDI;
++ else if (MEM_PP_MEM == dest_ch)
++ fs_proc_flow3 |= FS_VDOA_DEST_SEL_IC;
++ else {
++ retval = -EINVAL;
++ goto err;
++ }
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ switch (dest_ch) {
++ case MEM_PP_MEM:
++ fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
++ if (MEM_VDOA_MEM == src_ch)
++ fs_proc_flow1 |= FS_PP_SRC_SEL_VDOA;
++ else
++ fs_proc_flow1 |= proc_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_PP_SRC_SEL_OFFSET;
++ break;
++ case MEM_ROT_PP_MEM:
++ fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_PP_ROT_SRC_SEL_OFFSET;
++ break;
++ case MEM_PRP_ENC_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
++ break;
++ case MEM_ROT_ENC_MEM:
++ fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_PRPENC_ROT_SRC_SEL_OFFSET;
++ break;
++ case MEM_PRP_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
++ break;
++ case MEM_ROT_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_PRPVF_ROT_SRC_SEL_OFFSET;
++ break;
++ case MEM_DC_SYNC:
++ fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC1_SRC_SEL_OFFSET;
++ break;
++ case MEM_BG_SYNC:
++ fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_DP_SYNC0_SRC_SEL_OFFSET;
++ break;
++ case MEM_FG_SYNC:
++ fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_DP_SYNC1_SRC_SEL_OFFSET;
++ break;
++ case MEM_DC_ASYNC:
++ fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC2_SRC_SEL_OFFSET;
++ break;
++ case MEM_BG_ASYNC0:
++ fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_DP_ASYNC0_SRC_SEL_OFFSET;
++ break;
++ case MEM_FG_ASYNC0:
++ fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_DP_ASYNC1_SRC_SEL_OFFSET;
++ break;
++ case MEM_VDI_MEM:
++ fs_proc_flow1 &= ~FS_VDI_SRC_SEL_MASK;
++ if (MEM_VDOA_MEM == src_ch)
++ fs_proc_flow1 |= FS_VDI_SRC_SEL_VDOA;
++ else {
++ retval = -EINVAL;
++ goto err;
++ }
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
++ ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
++ ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
++
++err:
++ mutex_unlock(&ipu->mutex_lock);
++ return retval;
++}
++EXPORT_SYMBOL(ipu_link_channels);
++
++/*!
++ * This function unlinks 2 channels and disables automatic frame
++ * synchronization.
++ *
++ * @param ipu ipu handler
++ * @param src_ch Input parameter for the logical channel ID of
++ * the source channel.
++ *
++ * @param dest_ch Input parameter for the logical channel ID of
++ * the destination channel.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_unlink_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
++{
++ int retval = 0;
++ uint32_t fs_proc_flow1;
++ uint32_t fs_proc_flow2;
++ uint32_t fs_proc_flow3;
++ uint32_t fs_disp_flow1;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
++ fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
++ fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
++
++ switch (src_ch) {
++ case CSI_MEM0:
++ fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
++ break;
++ case CSI_MEM1:
++ fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
++ break;
++ case CSI_MEM2:
++ fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
++ break;
++ case CSI_MEM3:
++ fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
++ break;
++ case CSI_PRP_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
++ break;
++ case CSI_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ break;
++ case MEM_PP_MEM:
++ fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
++ break;
++ case MEM_ROT_PP_MEM:
++ fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
++ break;
++ case MEM_PRP_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
++ break;
++ case MEM_ROT_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
++ break;
++ case MEM_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ break;
++ case MEM_ROT_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
++ break;
++ case MEM_VDOA_MEM:
++ fs_proc_flow3 &= ~FS_VDOA_DEST_SEL_MASK;
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ switch (dest_ch) {
++ case MEM_PP_MEM:
++ fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
++ break;
++ case MEM_ROT_PP_MEM:
++ fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
++ break;
++ case MEM_PRP_ENC_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ break;
++ case MEM_ROT_ENC_MEM:
++ fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
++ break;
++ case MEM_PRP_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ break;
++ case MEM_ROT_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
++ break;
++ case MEM_DC_SYNC:
++ fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
++ break;
++ case MEM_BG_SYNC:
++ fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
++ break;
++ case MEM_FG_SYNC:
++ fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
++ break;
++ case MEM_DC_ASYNC:
++ fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
++ break;
++ case MEM_BG_ASYNC0:
++ fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
++ break;
++ case MEM_FG_ASYNC0:
++ fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
++ break;
++ case MEM_VDI_MEM:
++ fs_proc_flow1 &= ~FS_VDI_SRC_SEL_MASK;
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
++ ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
++ ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
++
++err:
++ mutex_unlock(&ipu->mutex_lock);
++ return retval;
++}
++EXPORT_SYMBOL(ipu_unlink_channels);
++
++/*!
++ * This function check whether a logical channel was enabled.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @return This function returns 1 while request channel is enabled or
++ * 0 for not enabled.
++ */
++int32_t ipu_is_channel_busy(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t reg;
++ uint32_t in_dma;
++ uint32_t out_dma;
++
++ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
++ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
++
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
++ if (reg & idma_mask(in_dma))
++ return 1;
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
++ if (reg & idma_mask(out_dma))
++ return 1;
++ return 0;
++}
++EXPORT_SYMBOL(ipu_is_channel_busy);
++
++/*!
++ * This function enables a logical channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_enable_channel(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t reg;
++ uint32_t ipu_conf;
++ uint32_t in_dma;
++ uint32_t out_dma;
++ uint32_t sec_dma;
++ uint32_t thrd_dma;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if (ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) {
++ dev_err(ipu->dev, "Warning: channel already enabled %d\n",
++ IPU_CHAN_ID(channel));
++ mutex_unlock(&ipu->mutex_lock);
++ return -EACCES;
++ }
++
++ /* Get input and output dma channels */
++ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
++ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
++
++ ipu_conf = ipu_cm_read(ipu, IPU_CONF);
++ if (ipu->di_use_count[0] > 0) {
++ ipu_conf |= IPU_CONF_DI0_EN;
++ }
++ if (ipu->di_use_count[1] > 0) {
++ ipu_conf |= IPU_CONF_DI1_EN;
++ }
++ if (ipu->dp_use_count > 0)
++ ipu_conf |= IPU_CONF_DP_EN;
++ if (ipu->dc_use_count > 0)
++ ipu_conf |= IPU_CONF_DC_EN;
++ if (ipu->dmfc_use_count > 0)
++ ipu_conf |= IPU_CONF_DMFC_EN;
++ if (ipu->ic_use_count > 0)
++ ipu_conf |= IPU_CONF_IC_EN;
++ if (ipu->vdi_use_count > 0) {
++ ipu_conf |= IPU_CONF_ISP_EN;
++ ipu_conf |= IPU_CONF_VDI_EN;
++ ipu_conf |= IPU_CONF_IC_INPUT;
++ }
++ if (ipu->rot_use_count > 0)
++ ipu_conf |= IPU_CONF_ROT_EN;
++ if (ipu->smfc_use_count > 0)
++ ipu_conf |= IPU_CONF_SMFC_EN;
++ ipu_cm_write(ipu, ipu_conf, IPU_CONF);
++
++ if (idma_is_valid(in_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
++ }
++ if (idma_is_valid(out_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
++ }
++
++ if ((ipu->sec_chan_en[IPU_CHAN_ID(channel)]) &&
++ ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM) ||
++ (channel == MEM_VDI_PRP_VF_MEM))) {
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
++ }
++ if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
++ ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM))) {
++ thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
++
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++ ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_SEP_ALPHA);
++ } else if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
++ ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC))) {
++ thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
++ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++ ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_SEP_ALPHA);
++ }
++
++ if ((channel == MEM_DC_SYNC) || (channel == MEM_BG_SYNC) ||
++ (channel == MEM_FG_SYNC)) {
++ reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_WM_EN(in_dma));
++
++ _ipu_dp_dc_enable(ipu, channel);
++ }
++
++ if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
++ _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma) ||
++ _ipu_is_vdi_out_chan(out_dma))
++ _ipu_ic_enable_task(ipu, channel);
++
++ ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(channel);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_enable_channel);
++
++/*!
++ * This function check buffer ready for a logical channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to clear.
++ *
++ * @param bufNum Input parameter for which buffer number clear
++ * ready state.
++ *
++ */
++int32_t ipu_check_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum)
++{
++ uint32_t dma_chan = channel_2_dma(channel, type);
++ uint32_t reg;
++ unsigned long lock_flags;
++
++ if (dma_chan == IDMA_CHAN_INVALID)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ if (bufNum == 0)
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
++ else if (bufNum == 1)
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
++ else
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ if (reg & idma_mask(dma_chan))
++ return 1;
++ else
++ return 0;
++}
++EXPORT_SYMBOL(ipu_check_buffer_ready);
++
++/*!
++ * This function clear buffer ready for a logical channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to clear.
++ *
++ * @param bufNum Input parameter for which buffer number clear
++ * ready state.
++ *
++ */
++void _ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum)
++{
++ uint32_t dma_ch = channel_2_dma(channel, type);
++
++ if (!idma_is_valid(dma_ch))
++ return;
++
++ ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
++ if (bufNum == 0)
++ ipu_cm_write(ipu, idma_mask(dma_ch),
++ IPU_CHA_BUF0_RDY(dma_ch));
++ else if (bufNum == 1)
++ ipu_cm_write(ipu, idma_mask(dma_ch),
++ IPU_CHA_BUF1_RDY(dma_ch));
++ else
++ ipu_cm_write(ipu, idma_mask(dma_ch),
++ IPU_CHA_BUF2_RDY(dma_ch));
++ ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
++}
++
++void ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum)
++{
++ unsigned long lock_flags;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ _ipu_clear_buffer_ready(ipu, channel, type, bufNum);
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++}
++EXPORT_SYMBOL(ipu_clear_buffer_ready);
++
++/*!
++ * This function disables a logical channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param wait_for_stop Flag to set whether to wait for channel end
++ * of frame or return immediately.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_disable_channel(struct ipu_soc *ipu, ipu_channel_t channel, bool wait_for_stop)
++{
++ uint32_t reg;
++ uint32_t in_dma;
++ uint32_t out_dma;
++ uint32_t sec_dma = NO_DMA;
++ uint32_t thrd_dma = NO_DMA;
++ uint16_t fg_pos_x, fg_pos_y;
++ unsigned long lock_flags;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if ((ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
++ dev_dbg(ipu->dev, "Channel already disabled %d\n",
++ IPU_CHAN_ID(channel));
++ mutex_unlock(&ipu->mutex_lock);
++ return -EACCES;
++ }
++
++ /* Get input and output dma channels */
++ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
++ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
++
++ if ((idma_is_valid(in_dma) &&
++ !idma_is_set(ipu, IDMAC_CHA_EN, in_dma))
++ && (idma_is_valid(out_dma) &&
++ !idma_is_set(ipu, IDMAC_CHA_EN, out_dma))) {
++ mutex_unlock(&ipu->mutex_lock);
++ return -EINVAL;
++ }
++
++ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)])
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) {
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
++ }
++
++ if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
++ (channel == MEM_DC_SYNC)) {
++ if (channel == MEM_FG_SYNC) {
++ _ipu_disp_get_window_pos(ipu, channel, &fg_pos_x, &fg_pos_y);
++ _ipu_disp_set_window_pos(ipu, channel, 0, 0);
++ }
++
++ _ipu_dp_dc_disable(ipu, channel, false);
++
++ /*
++ * wait for BG channel EOF then disable FG-IDMAC,
++ * it avoid FG NFB4EOF error.
++ */
++ if ((channel == MEM_FG_SYNC) && (ipu_is_channel_busy(ipu, MEM_BG_SYNC))) {
++ int timeout = 50;
++
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF),
++ IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF));
++ while ((ipu_cm_read(ipu, IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF)) &
++ IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF)) == 0) {
++ msleep(10);
++ timeout -= 10;
++ if (timeout <= 0) {
++ dev_err(ipu->dev, "warning: wait for bg sync eof timeout\n");
++ break;
++ }
++ }
++ }
++ } else if (wait_for_stop && !_ipu_is_smfc_chan(out_dma) &&
++ channel != CSI_PRP_VF_MEM && channel != CSI_PRP_ENC_MEM) {
++ while (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma) ||
++ idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma) ||
++ (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
++ idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma)) ||
++ (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
++ idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))) {
++ uint32_t irq = 0xffffffff;
++ int timeout = 50000;
++
++ if (idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma))
++ irq = out_dma;
++ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
++ idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma))
++ irq = sec_dma;
++ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
++ idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))
++ irq = thrd_dma;
++ if (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma))
++ irq = in_dma;
++
++ if (irq == 0xffffffff) {
++ dev_dbg(ipu->dev, "warning: no channel busy, break\n");
++ break;
++ }
++
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(irq),
++ IPUIRQ_2_STATREG(irq));
++
++ dev_dbg(ipu->dev, "warning: channel %d busy, need wait\n", irq);
++
++ while (((ipu_cm_read(ipu, IPUIRQ_2_STATREG(irq))
++ & IPUIRQ_2_MASK(irq)) == 0) &&
++ (idma_is_set(ipu, IDMAC_CHA_BUSY, irq))) {
++ udelay(10);
++ timeout -= 10;
++ if (timeout <= 0) {
++ ipu_dump_registers(ipu);
++ dev_err(ipu->dev, "warning: disable ipu dma channel %d during its busy state\n", irq);
++ break;
++ }
++ }
++ dev_dbg(ipu->dev, "wait_time:%d\n", 50000 - timeout);
++
++ }
++ }
++
++ if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
++ (channel == MEM_DC_SYNC)) {
++ reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_WM_EN(in_dma));
++ }
++
++ /* Disable IC task */
++ if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
++ _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma) ||
++ _ipu_is_vdi_out_chan(out_dma))
++ _ipu_ic_disable_task(ipu, channel);
++
++ /* Disable DMA channel(s) */
++ if (idma_is_valid(in_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
++ ipu_cm_write(ipu, idma_mask(in_dma), IPU_CHA_CUR_BUF(in_dma));
++ ipu_cm_write(ipu, tri_cur_buf_mask(in_dma),
++ IPU_CHA_TRIPLE_CUR_BUF(in_dma));
++ }
++ if (idma_is_valid(out_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
++ ipu_cm_write(ipu, idma_mask(out_dma), IPU_CHA_CUR_BUF(out_dma));
++ ipu_cm_write(ipu, tri_cur_buf_mask(out_dma),
++ IPU_CHA_TRIPLE_CUR_BUF(out_dma));
++ }
++ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
++ ipu_cm_write(ipu, idma_mask(sec_dma), IPU_CHA_CUR_BUF(sec_dma));
++ }
++ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
++ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC) {
++ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++ ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_SEP_ALPHA);
++ } else {
++ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++ ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_SEP_ALPHA);
++ }
++ ipu_cm_write(ipu, idma_mask(thrd_dma), IPU_CHA_CUR_BUF(thrd_dma));
++ }
++
++ if (channel == MEM_FG_SYNC)
++ _ipu_disp_set_window_pos(ipu, channel, fg_pos_x, fg_pos_y);
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ /* Set channel buffers NOT to be ready */
++ if (idma_is_valid(in_dma)) {
++ _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 1);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 2);
++ }
++ if (idma_is_valid(out_dma)) {
++ _ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 1);
++ }
++ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
++ _ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 1);
++ }
++ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
++ _ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 1);
++ }
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(channel));
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disable_channel);
++
++/*!
++ * This function enables CSI.
++ *
++ * @param ipu ipu handler
++ * @param csi csi num 0 or 1
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_enable_csi(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t reg;
++
++ if (csi > 1) {
++ dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
++ return -EINVAL;
++ }
++
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ ipu->csi_use_count[csi]++;
++
++ if (ipu->csi_use_count[csi] == 1) {
++ reg = ipu_cm_read(ipu, IPU_CONF);
++ if (csi == 0)
++ ipu_cm_write(ipu, reg | IPU_CONF_CSI0_EN, IPU_CONF);
++ else
++ ipu_cm_write(ipu, reg | IPU_CONF_CSI1_EN, IPU_CONF);
++ }
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return 0;
++}
++EXPORT_SYMBOL(ipu_enable_csi);
++
++/*!
++ * This function disables CSI.
++ *
++ * @param ipu ipu handler
++ * @param csi csi num 0 or 1
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_disable_csi(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t reg;
++
++ if (csi > 1) {
++ dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
++ return -EINVAL;
++ }
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ ipu->csi_use_count[csi]--;
++ if (ipu->csi_use_count[csi] == 0) {
++ _ipu_csi_wait4eof(ipu, ipu->csi_channel[csi]);
++ reg = ipu_cm_read(ipu, IPU_CONF);
++ if (csi == 0)
++ ipu_cm_write(ipu, reg & ~IPU_CONF_CSI0_EN, IPU_CONF);
++ else
++ ipu_cm_write(ipu, reg & ~IPU_CONF_CSI1_EN, IPU_CONF);
++ }
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disable_csi);
++
++static irqreturn_t ipu_sync_irq_handler(int irq, void *desc)
++{
++ struct ipu_soc *ipu = desc;
++ int i;
++ uint32_t line, bit, int_stat, int_ctrl;
++ irqreturn_t result = IRQ_NONE;
++ const int int_reg[] = { 1, 2, 3, 4, 11, 12, 13, 14, 15, 0 };
++
++ spin_lock(&ipu->int_reg_spin_lock);
++
++ for (i = 0; int_reg[i] != 0; i++) {
++ int_stat = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
++ int_ctrl = ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
++ int_stat &= int_ctrl;
++ ipu_cm_write(ipu, int_stat, IPU_INT_STAT(int_reg[i]));
++ while ((line = ffs(int_stat)) != 0) {
++ bit = --line;
++ int_stat &= ~(1UL << line);
++ line += (int_reg[i] - 1) * 32;
++ result |=
++ ipu->irq_list[line].handler(line,
++ ipu->irq_list[line].
++ dev_id);
++ if (ipu->irq_list[line].flags & IPU_IRQF_ONESHOT) {
++ int_ctrl &= ~(1UL << bit);
++ ipu_cm_write(ipu, int_ctrl,
++ IPU_INT_CTRL(int_reg[i]));
++ }
++ }
++ }
++
++ spin_unlock(&ipu->int_reg_spin_lock);
++
++ return result;
++}
++
++static irqreturn_t ipu_err_irq_handler(int irq, void *desc)
++{
++ struct ipu_soc *ipu = desc;
++ int i;
++ uint32_t int_stat;
++ const int err_reg[] = { 5, 6, 9, 10, 0 };
++
++ spin_lock(&ipu->int_reg_spin_lock);
++
++ for (i = 0; err_reg[i] != 0; i++) {
++ int_stat = ipu_cm_read(ipu, IPU_INT_STAT(err_reg[i]));
++ int_stat &= ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i]));
++ if (int_stat) {
++ ipu_cm_write(ipu, int_stat, IPU_INT_STAT(err_reg[i]));
++ dev_warn(ipu->dev,
++ "IPU Warning - IPU_INT_STAT_%d = 0x%08X\n",
++ err_reg[i], int_stat);
++ /* Disable interrupts so we only get error once */
++ int_stat = ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i])) &
++ ~int_stat;
++ ipu_cm_write(ipu, int_stat, IPU_INT_CTRL(err_reg[i]));
++ }
++ }
++
++ spin_unlock(&ipu->int_reg_spin_lock);
++
++ return IRQ_HANDLED;
++}
++
++/*!
++ * This function enables the interrupt for the specified interrupt line.
++ * The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to enable interrupt for.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int ipu_enable_irq(struct ipu_soc *ipu, uint32_t irq)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++ int ret = 0;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ /*
++ * Check sync interrupt handler only, since we do nothing for
++ * error interrupts but than print out register values in the
++ * error interrupt source handler.
++ */
++ if (_ipu_is_sync_irq(irq) && (ipu->irq_list[irq].handler == NULL)) {
++ dev_err(ipu->dev, "handler hasn't been registered on sync "
++ "irq %d\n", irq);
++ ret = -EACCES;
++ goto out;
++ }
++
++ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
++ reg |= IPUIRQ_2_MASK(irq);
++ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
++out:
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_enable_irq);
++
++/*!
++ * This function disables the interrupt for the specified interrupt line.
++ * The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to disable interrupt for.
++ *
++ */
++void ipu_disable_irq(struct ipu_soc *ipu, uint32_t irq)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
++ reg &= ~IPUIRQ_2_MASK(irq);
++ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
++
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_disable_irq);
++
++/*!
++ * This function clears the interrupt for the specified interrupt line.
++ * The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to clear interrupt for.
++ *
++ */
++void ipu_clear_irq(struct ipu_soc *ipu, uint32_t irq)
++{
++ unsigned long lock_flags;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(irq), IPUIRQ_2_STATREG(irq));
++
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_clear_irq);
++
++/*!
++ * This function returns the current interrupt status for the specified
++ * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to get status for.
++ *
++ * @return Returns true if the interrupt is pending/asserted or false if
++ * the interrupt is not pending.
++ */
++bool ipu_get_irq_status(struct ipu_soc *ipu, uint32_t irq)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++ reg = ipu_cm_read(ipu, IPUIRQ_2_STATREG(irq));
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++
++ if (reg & IPUIRQ_2_MASK(irq))
++ return true;
++ else
++ return false;
++}
++EXPORT_SYMBOL(ipu_get_irq_status);
++
++/*!
++ * This function registers an interrupt handler function for the specified
++ * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to get status for.
++ *
++ * @param handler Input parameter for address of the handler
++ * function.
++ *
++ * @param irq_flags Flags for interrupt mode. Currently not used.
++ *
++ * @param devname Input parameter for string name of driver
++ * registering the handler.
++ *
++ * @param dev_id Input parameter for pointer of data to be
++ * passed to the handler.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int ipu_request_irq(struct ipu_soc *ipu, uint32_t irq,
++ irqreturn_t(*handler) (int, void *),
++ uint32_t irq_flags, const char *devname, void *dev_id)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++ int ret = 0;
++
++ BUG_ON(irq >= IPU_IRQ_COUNT);
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ if (ipu->irq_list[irq].handler != NULL) {
++ dev_err(ipu->dev,
++ "handler already installed on irq %d\n", irq);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /*
++ * Check sync interrupt handler only, since we do nothing for
++ * error interrupts but than print out register values in the
++ * error interrupt source handler.
++ */
++ if (_ipu_is_sync_irq(irq) && (handler == NULL)) {
++ dev_err(ipu->dev, "handler is NULL for sync irq %d\n", irq);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ ipu->irq_list[irq].handler = handler;
++ ipu->irq_list[irq].flags = irq_flags;
++ ipu->irq_list[irq].dev_id = dev_id;
++ ipu->irq_list[irq].name = devname;
++
++ /* clear irq stat for previous use */
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(irq), IPUIRQ_2_STATREG(irq));
++ /* enable the interrupt */
++ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
++ reg |= IPUIRQ_2_MASK(irq);
++ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
++out:
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_request_irq);
++
++/*!
++ * This function unregisters an interrupt handler for the specified interrupt
++ * line. The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to get status for.
++ *
++ * @param dev_id Input parameter for pointer of data to be passed
++ * to the handler. This must match value passed to
++ * ipu_request_irq().
++ *
++ */
++void ipu_free_irq(struct ipu_soc *ipu, uint32_t irq, void *dev_id)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ /* disable the interrupt */
++ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
++ reg &= ~IPUIRQ_2_MASK(irq);
++ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
++ if (ipu->irq_list[irq].dev_id == dev_id)
++ memset(&ipu->irq_list[irq], 0, sizeof(ipu->irq_list[irq]));
++
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_free_irq);
++
++uint32_t ipu_get_cur_buffer_idx(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type)
++{
++ uint32_t reg, dma_chan;
++
++ dma_chan = channel_2_dma(channel, type);
++ if (!idma_is_valid(dma_chan))
++ return -EINVAL;
++
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
++ if ((reg & idma_mask(dma_chan)) && _ipu_is_trb_chan(dma_chan)) {
++ reg = ipu_cm_read(ipu, IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
++ return (reg & tri_cur_buf_mask(dma_chan)) >>
++ tri_cur_buf_shift(dma_chan);
++ } else {
++ reg = ipu_cm_read(ipu, IPU_CHA_CUR_BUF(dma_chan));
++ if (reg & idma_mask(dma_chan))
++ return 1;
++ else
++ return 0;
++ }
++}
++EXPORT_SYMBOL(ipu_get_cur_buffer_idx);
++
++uint32_t _ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t stat = 0;
++ uint32_t task_stat_reg = ipu_cm_read(ipu, IPU_PROC_TASK_STAT);
++
++ switch (channel) {
++ case MEM_PRP_VF_MEM:
++ stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
++ break;
++ case MEM_ROT_VF_MEM:
++ stat =
++ (task_stat_reg & TSTAT_VF_ROT_MASK) >> TSTAT_VF_ROT_OFFSET;
++ break;
++ case MEM_PRP_ENC_MEM:
++ stat = (task_stat_reg & TSTAT_ENC_MASK) >> TSTAT_ENC_OFFSET;
++ break;
++ case MEM_ROT_ENC_MEM:
++ stat =
++ (task_stat_reg & TSTAT_ENC_ROT_MASK) >>
++ TSTAT_ENC_ROT_OFFSET;
++ break;
++ case MEM_PP_MEM:
++ stat = (task_stat_reg & TSTAT_PP_MASK) >> TSTAT_PP_OFFSET;
++ break;
++ case MEM_ROT_PP_MEM:
++ stat =
++ (task_stat_reg & TSTAT_PP_ROT_MASK) >> TSTAT_PP_ROT_OFFSET;
++ break;
++
++ default:
++ stat = TASK_STAT_IDLE;
++ break;
++ }
++ return stat;
++}
++
++/*!
++ * This function check for a logical channel status
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @return This function returns 0 on idle and 1 on busy.
++ *
++ */
++uint32_t ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t dma_status;
++
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ dma_status = ipu_is_channel_busy(ipu, channel);
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++
++ dev_dbg(ipu->dev, "%s, dma_status:%d.\n", __func__, dma_status);
++
++ return dma_status;
++}
++EXPORT_SYMBOL(ipu_channel_status);
++
++int32_t ipu_swap_channel(struct ipu_soc *ipu, ipu_channel_t from_ch, ipu_channel_t to_ch)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++ int from_dma = channel_2_dma(from_ch, IPU_INPUT_BUFFER);
++ int to_dma = channel_2_dma(to_ch, IPU_INPUT_BUFFER);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ /* enable target channel */
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(to_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(to_dma), IDMAC_CHA_EN(to_dma));
++
++ ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(to_ch);
++
++ /* switch dp dc */
++ _ipu_dp_dc_disable(ipu, from_ch, true);
++
++ /* disable source channel */
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(from_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(from_dma), IDMAC_CHA_EN(from_dma));
++ ipu_cm_write(ipu, idma_mask(from_dma), IPU_CHA_CUR_BUF(from_dma));
++ ipu_cm_write(ipu, tri_cur_buf_mask(from_dma),
++ IPU_CHA_TRIPLE_CUR_BUF(from_dma));
++
++ ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(from_ch));
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 1);
++ _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 2);
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_swap_channel);
++
++uint32_t bytes_per_pixel(uint32_t fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_GENERIC: /*generic data */
++ case IPU_PIX_FMT_RGB332:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YUV444P:
++ return 1;
++ break;
++ case IPU_PIX_FMT_GENERIC_16: /* generic data */
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_UYVY:
++ return 2;
++ break;
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_YUV444:
++ return 3;
++ break;
++ case IPU_PIX_FMT_GENERIC_32: /*generic data */
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_ABGR32:
++ return 4;
++ break;
++ default:
++ return 1;
++ break;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(bytes_per_pixel);
++
++ipu_color_space_t format_to_colorspace(uint32_t fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_RGB666:
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_GBR24:
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_ABGR32:
++ case IPU_PIX_FMT_LVDS666:
++ case IPU_PIX_FMT_LVDS888:
++ return RGB;
++ break;
++
++ default:
++ return YCbCr;
++ break;
++ }
++ return RGB;
++}
++
++bool ipu_pixel_format_has_alpha(uint32_t fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_ABGR32:
++ return true;
++ break;
++ default:
++ return false;
++ break;
++ }
++ return false;
++}
++
++bool ipu_ch_param_bad_alpha_pos(uint32_t pixel_fmt)
++{
++ return _ipu_ch_param_bad_alpha_pos(pixel_fmt);
++}
++EXPORT_SYMBOL(ipu_ch_param_bad_alpha_pos);
++
++#ifdef CONFIG_PM
++static int ipu_suspend(struct device *dev)
++{
++ struct ipu_soc *ipu = dev_get_drvdata(dev);
++
++ /* All IDMAC channel and IPU clock should be disabled.*/
++ if (ipu->pdata->pg)
++ ipu->pdata->pg(1);
++
++ dev_dbg(dev, "ipu suspend.\n");
++ return 0;
++}
++
++static int ipu_resume(struct device *dev)
++{
++ struct ipu_soc *ipu = dev_get_drvdata(dev);
++
++ if (ipu->pdata->pg) {
++ ipu->pdata->pg(0);
++
++ _ipu_get(ipu);
++ _ipu_dmfc_init(ipu, dmfc_type_setup, 1);
++ /* Set sync refresh channels as high priority */
++ ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
++ _ipu_put(ipu);
++ }
++ dev_dbg(dev, "ipu resume.\n");
++ return 0;
++}
++
++int ipu_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ dev_dbg(dev, "ipu busfreq high release.\n");
++
++ return 0;
++}
++
++int ipu_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ dev_dbg(dev, "ipu busfreq high requst.\n");
++
++ return 0;
++}
++
++static const struct dev_pm_ops ipu_pm_ops = {
++ SET_RUNTIME_PM_OPS(ipu_runtime_suspend, ipu_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(ipu_suspend, ipu_resume)
++};
++#endif
++
++/*!
++ * This structure contains pointers to the power management callback functions.
++ */
++static struct platform_driver mxcipu_driver = {
++ .driver = {
++ .name = "imx-ipuv3",
++ .of_match_table = imx_ipuv3_dt_ids,
++ #ifdef CONFIG_PM
++ .pm = &ipu_pm_ops,
++ #endif
++ },
++ .probe = ipu_probe,
++ .id_table = imx_ipu_type,
++ .remove = ipu_remove,
++};
++
++int32_t __init ipu_gen_init(void)
++{
++ int32_t ret;
++
++ ret = platform_driver_register(&mxcipu_driver);
++ return 0;
++}
++
++subsys_initcall(ipu_gen_init);
++
++static void __exit ipu_gen_uninit(void)
++{
++ platform_driver_unregister(&mxcipu_driver);
++}
++
++module_exit(ipu_gen_uninit);
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/ipu_device.c linux-3.14.40/drivers/mxc/ipu3/ipu_device.c
+--- linux-3.14.40.orig/drivers/mxc/ipu3/ipu_device.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/ipu_device.c 2015-05-01 14:57:59.611427001 -0500
+@@ -0,0 +1,3717 @@
++/*
++ * Copyright 2005-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_device.c
++ *
++ * @brief This file contains the IPUv3 driver device interface and fops functions.
++ *
++ * @ingroup IPU
++ */
++#include <linux/clk.h>
++#include <linux/cpumask.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/kernel.h>
++#include <linux/kthread.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/sched.h>
++#include <linux/sched/rt.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/time.h>
++#include <linux/types.h>
++#include <linux/vmalloc.h>
++#include <linux/wait.h>
++
++#include <asm/cacheflush.h>
++#include <asm/outercache.h>
++
++#include "ipu_param_mem.h"
++#include "ipu_regs.h"
++#include "vdoa.h"
++
++#define CHECK_RETCODE(cont, str, err, label, ret) \
++do { \
++ if (cont) { \
++ dev_err(t->dev, "ERR:[0x%p]-no:0x%x "#str" ret:%d," \
++ "line:%d\n", t, t->task_no, ret, __LINE__);\
++ if (ret != -EACCES) { \
++ t->state = err; \
++ goto label; \
++ } \
++ } \
++} while (0)
++
++#define CHECK_RETCODE_CONT(cont, str, err, ret) \
++do { \
++ if (cont) { \
++ dev_err(t->dev, "ERR:[0x%p]-no:0x%x"#str" ret:%d," \
++ "line:%d\n", t, t->task_no, ret, __LINE__);\
++ if (ret != -EACCES) { \
++ if (t->state == STATE_OK) \
++ t->state = err; \
++ } \
++ } \
++} while (0)
++
++#undef DBG_IPU_PERF
++#ifdef DBG_IPU_PERF
++#define CHECK_PERF(ts) \
++do { \
++ getnstimeofday(ts); \
++} while (0)
++
++#define DECLARE_PERF_VAR \
++ struct timespec ts_queue; \
++ struct timespec ts_dotask; \
++ struct timespec ts_waitirq; \
++ struct timespec ts_sche; \
++ struct timespec ts_rel; \
++ struct timespec ts_frame
++
++#define PRINT_TASK_STATISTICS \
++do { \
++ ts_queue = timespec_sub(tsk->ts_dotask, tsk->ts_queue); \
++ ts_dotask = timespec_sub(tsk->ts_waitirq, tsk->ts_dotask); \
++ ts_waitirq = timespec_sub(tsk->ts_inirq, tsk->ts_waitirq); \
++ ts_sche = timespec_sub(tsk->ts_wakeup, tsk->ts_inirq); \
++ ts_rel = timespec_sub(tsk->ts_rel, tsk->ts_wakeup); \
++ ts_frame = timespec_sub(tsk->ts_rel, tsk->ts_queue); \
++ dev_dbg(tsk->dev, "[0x%p] no-0x%x, ts_q:%ldus, ts_do:%ldus," \
++ "ts_waitirq:%ldus,ts_sche:%ldus, ts_rel:%ldus," \
++ "ts_frame: %ldus\n", tsk, tsk->task_no, \
++ ts_queue.tv_nsec / NSEC_PER_USEC + ts_queue.tv_sec * USEC_PER_SEC,\
++ ts_dotask.tv_nsec / NSEC_PER_USEC + ts_dotask.tv_sec * USEC_PER_SEC,\
++ ts_waitirq.tv_nsec / NSEC_PER_USEC + ts_waitirq.tv_sec * USEC_PER_SEC,\
++ ts_sche.tv_nsec / NSEC_PER_USEC + ts_sche.tv_sec * USEC_PER_SEC,\
++ ts_rel.tv_nsec / NSEC_PER_USEC + ts_rel.tv_sec * USEC_PER_SEC,\
++ ts_frame.tv_nsec / NSEC_PER_USEC + ts_frame.tv_sec * USEC_PER_SEC); \
++ if ((ts_frame.tv_nsec/NSEC_PER_USEC + ts_frame.tv_sec*USEC_PER_SEC) > \
++ 80000) \
++ dev_dbg(tsk->dev, "ts_frame larger than 80ms [0x%p] no-0x%x.\n"\
++ , tsk, tsk->task_no); \
++} while (0)
++#else
++#define CHECK_PERF(ts)
++#define DECLARE_PERF_VAR
++#define PRINT_TASK_STATISTICS
++#endif
++
++#define IPU_PP_CH_VF (IPU_TASK_ID_VF - 1)
++#define IPU_PP_CH_PP (IPU_TASK_ID_PP - 1)
++#define MAX_PP_CH (IPU_TASK_ID_MAX - 1)
++#define VDOA_DEF_TIMEOUT_MS (HZ/2)
++
++/* Strucutures and variables for exporting MXC IPU as device*/
++typedef enum {
++ STATE_OK = 0,
++ STATE_QUEUE,
++ STATE_IN_PROGRESS,
++ STATE_ERR,
++ STATE_TIMEOUT,
++ STATE_RES_TIMEOUT,
++ STATE_NO_IPU,
++ STATE_NO_IRQ,
++ STATE_IPU_BUSY,
++ STATE_IRQ_FAIL,
++ STATE_IRQ_TIMEOUT,
++ STATE_ENABLE_CHAN_FAIL,
++ STATE_DISABLE_CHAN_FAIL,
++ STATE_SEL_BUF_FAIL,
++ STATE_INIT_CHAN_FAIL,
++ STATE_LINK_CHAN_FAIL,
++ STATE_UNLINK_CHAN_FAIL,
++ STATE_INIT_CHAN_BUF_FAIL,
++ STATE_INIT_CHAN_BAND_FAIL,
++ STATE_SYS_NO_MEM,
++ STATE_VDOA_IRQ_TIMEOUT,
++ STATE_VDOA_IRQ_FAIL,
++ STATE_VDOA_TASK_FAIL,
++} ipu_state_t;
++
++enum {
++ INPUT_CHAN_VDI_P = 1,
++ INPUT_CHAN,
++ INPUT_CHAN_VDI_N,
++};
++
++struct ipu_state_msg {
++ int state;
++ char *msg;
++} state_msg[] = {
++ {STATE_OK, "ok"},
++ {STATE_QUEUE, "split queue"},
++ {STATE_IN_PROGRESS, "split in progress"},
++ {STATE_ERR, "error"},
++ {STATE_TIMEOUT, "split task timeout"},
++ {STATE_RES_TIMEOUT, "wait resource timeout"},
++ {STATE_NO_IPU, "no ipu found"},
++ {STATE_NO_IRQ, "no irq found for task"},
++ {STATE_IPU_BUSY, "ipu busy"},
++ {STATE_IRQ_FAIL, "request irq failed"},
++ {STATE_IRQ_TIMEOUT, "wait for irq timeout"},
++ {STATE_ENABLE_CHAN_FAIL, "ipu enable channel fail"},
++ {STATE_DISABLE_CHAN_FAIL, "ipu disable channel fail"},
++ {STATE_SEL_BUF_FAIL, "ipu select buf fail"},
++ {STATE_INIT_CHAN_FAIL, "ipu init channel fail"},
++ {STATE_LINK_CHAN_FAIL, "ipu link channel fail"},
++ {STATE_UNLINK_CHAN_FAIL, "ipu unlink channel fail"},
++ {STATE_INIT_CHAN_BUF_FAIL, "ipu init channel buffer fail"},
++ {STATE_INIT_CHAN_BAND_FAIL, "ipu init channel band mode fail"},
++ {STATE_SYS_NO_MEM, "sys no mem: -ENOMEM"},
++ {STATE_VDOA_IRQ_TIMEOUT, "wait for vdoa irq timeout"},
++ {STATE_VDOA_IRQ_FAIL, "vdoa irq fail"},
++ {STATE_VDOA_TASK_FAIL, "vdoa task fail"},
++};
++
++struct stripe_setting {
++ u32 iw;
++ u32 ih;
++ u32 ow;
++ u32 oh;
++ u32 outh_resize_ratio;
++ u32 outv_resize_ratio;
++ u32 i_left_pos;
++ u32 i_right_pos;
++ u32 i_top_pos;
++ u32 i_bottom_pos;
++ u32 o_left_pos;
++ u32 o_right_pos;
++ u32 o_top_pos;
++ u32 o_bottom_pos;
++ u32 rl_split_line;
++ u32 ud_split_line;
++};
++
++struct task_set {
++#define NULL_MODE 0x0
++#define IC_MODE 0x1
++#define ROT_MODE 0x2
++#define VDI_MODE 0x4
++#define IPU_PREPROCESS_MODE_MASK (IC_MODE | ROT_MODE | VDI_MODE)
++/* VDOA_MODE means this task use vdoa, and VDOA has two modes:
++ * BAND MODE and non-BAND MODE. Non-band mode will do transfer data
++ * to memory. BAND mode needs hareware sync with IPU, it is used default
++ * if connected to VDIC.
++ */
++#define VDOA_MODE 0x8
++#define VDOA_BAND_MODE 0x10
++ u8 mode;
++#define IC_VF 0x1
++#define IC_PP 0x2
++#define ROT_VF 0x4
++#define ROT_PP 0x8
++#define VDI_VF 0x10
++#define VDOA_ONLY 0x20
++ u8 task;
++#define NO_SPLIT 0x0
++#define RL_SPLIT 0x1
++#define UD_SPLIT 0x2
++#define LEFT_STRIPE 0x1
++#define RIGHT_STRIPE 0x2
++#define UP_STRIPE 0x4
++#define DOWN_STRIPE 0x8
++#define SPLIT_MASK 0xF
++ u8 split_mode;
++ u8 band_lines;
++ ipu_channel_t ic_chan;
++ ipu_channel_t rot_chan;
++ ipu_channel_t vdi_ic_p_chan;
++ ipu_channel_t vdi_ic_n_chan;
++
++ u32 i_off;
++ u32 i_uoff;
++ u32 i_voff;
++ u32 istride;
++
++ u32 ov_off;
++ u32 ov_uoff;
++ u32 ov_voff;
++ u32 ovstride;
++
++ u32 ov_alpha_off;
++ u32 ov_alpha_stride;
++
++ u32 o_off;
++ u32 o_uoff;
++ u32 o_voff;
++ u32 ostride;
++
++ u32 r_fmt;
++ u32 r_width;
++ u32 r_height;
++ u32 r_stride;
++ dma_addr_t r_paddr;
++
++ struct stripe_setting sp_setting;
++};
++
++struct ipu_split_task {
++ struct ipu_task task;
++ struct ipu_task_entry *parent_task;
++ struct ipu_task_entry *child_task;
++ u32 task_no;
++};
++
++struct ipu_task_entry {
++ struct ipu_input input;
++ struct ipu_output output;
++
++ bool overlay_en;
++ struct ipu_overlay overlay;
++#define DEF_TIMEOUT_MS 1000
++#define DEF_DELAY_MS 20
++ int timeout;
++ int irq;
++
++ u8 task_id;
++ u8 ipu_id;
++ u8 task_in_list;
++ u8 split_done;
++ struct mutex split_lock;
++ struct mutex vdic_lock;
++ wait_queue_head_t split_waitq;
++
++ struct list_head node;
++ struct list_head split_list;
++ struct ipu_soc *ipu;
++ struct device *dev;
++ struct task_set set;
++ wait_queue_head_t task_waitq;
++ struct completion irq_comp;
++ struct kref refcount;
++ ipu_state_t state;
++ u32 task_no;
++ atomic_t done;
++ atomic_t res_free;
++ atomic_t res_get;
++
++ struct ipu_task_entry *parent;
++ char *vditmpbuf[2];
++ u32 old_save_lines;
++ u32 old_size;
++ bool buf1filled;
++ bool buf0filled;
++
++ vdoa_handle_t vdoa_handle;
++ struct vdoa_output_mem {
++ void *vaddr;
++ dma_addr_t paddr;
++ int size;
++ } vdoa_dma;
++
++#ifdef DBG_IPU_PERF
++ struct timespec ts_queue;
++ struct timespec ts_dotask;
++ struct timespec ts_waitirq;
++ struct timespec ts_inirq;
++ struct timespec ts_wakeup;
++ struct timespec ts_rel;
++#endif
++};
++
++struct ipu_channel_tabel {
++ struct mutex lock;
++ u8 used[MXC_IPU_MAX_NUM][MAX_PP_CH];
++ u8 vdoa_used;
++};
++
++struct ipu_thread_data {
++ struct ipu_soc *ipu;
++ u32 id;
++ u32 is_vdoa;
++};
++
++struct ipu_alloc_list {
++ struct list_head list;
++ dma_addr_t phy_addr;
++ void *cpu_addr;
++ u32 size;
++ void *file_index;
++};
++
++static LIST_HEAD(ipu_alloc_list);
++static DEFINE_MUTEX(ipu_alloc_lock);
++static struct ipu_channel_tabel ipu_ch_tbl;
++static LIST_HEAD(ipu_task_list);
++static DEFINE_SPINLOCK(ipu_task_list_lock);
++static DECLARE_WAIT_QUEUE_HEAD(thread_waitq);
++static DECLARE_WAIT_QUEUE_HEAD(res_waitq);
++static atomic_t req_cnt;
++static atomic_t file_index = ATOMIC_INIT(1);
++static int major;
++static int max_ipu_no;
++static int thread_id;
++static atomic_t frame_no;
++static struct class *ipu_class;
++static struct device *ipu_dev;
++static int debug;
++module_param(debug, int, 0600);
++#ifdef DBG_IPU_PERF
++static struct timespec ts_frame_max;
++static u32 ts_frame_avg;
++static atomic_t frame_cnt;
++#endif
++
++static bool deinterlace_3_field(struct ipu_task_entry *t)
++{
++ return ((t->set.mode & VDI_MODE) &&
++ (t->input.deinterlace.motion != HIGH_MOTION));
++}
++
++static u32 tiled_filed_size(struct ipu_task_entry *t)
++{
++ u32 field_size;
++
++ /* note: page_align is required by VPU hw ouput buffer */
++ field_size = TILED_NV12_FRAME_SIZE(t->input.width, t->input.height/2);
++ return field_size;
++}
++
++static bool only_ic(u8 mode)
++{
++ mode = mode & IPU_PREPROCESS_MODE_MASK;
++ return ((mode == IC_MODE) || (mode == VDI_MODE));
++}
++
++static bool only_rot(u8 mode)
++{
++ mode = mode & IPU_PREPROCESS_MODE_MASK;
++ return (mode == ROT_MODE);
++}
++
++static bool ic_and_rot(u8 mode)
++{
++ mode = mode & IPU_PREPROCESS_MODE_MASK;
++ return ((mode == (IC_MODE | ROT_MODE)) ||
++ (mode == (VDI_MODE | ROT_MODE)));
++}
++
++static bool need_split(struct ipu_task_entry *t)
++{
++ return ((t->set.split_mode != NO_SPLIT) || (t->task_no & SPLIT_MASK));
++}
++
++unsigned int fmt_to_bpp(unsigned int pixelformat)
++{
++ u32 bpp;
++
++ switch (pixelformat) {
++ case IPU_PIX_FMT_RGB565:
++ /*interleaved 422*/
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_UYVY:
++ /*non-interleaved 422*/
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YVU422P:
++ bpp = 16;
++ break;
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_YUV444:
++ case IPU_PIX_FMT_YUV444P:
++ bpp = 24;
++ break;
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_ABGR32:
++ bpp = 32;
++ break;
++ /*non-interleaved 420*/
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_NV12:
++ bpp = 12;
++ break;
++ default:
++ bpp = 8;
++ break;
++ }
++ return bpp;
++}
++EXPORT_SYMBOL_GPL(fmt_to_bpp);
++
++cs_t colorspaceofpixel(int fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_ABGR32:
++ return RGB_CS;
++ break;
++ case IPU_PIX_FMT_UYVY:
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YUV444:
++ case IPU_PIX_FMT_YUV444P:
++ case IPU_PIX_FMT_NV12:
++ case IPU_PIX_FMT_TILED_NV12:
++ case IPU_PIX_FMT_TILED_NV12F:
++ return YUV_CS;
++ break;
++ default:
++ return NULL_CS;
++ }
++}
++EXPORT_SYMBOL_GPL(colorspaceofpixel);
++
++int need_csc(int ifmt, int ofmt)
++{
++ cs_t ics, ocs;
++
++ ics = colorspaceofpixel(ifmt);
++ ocs = colorspaceofpixel(ofmt);
++
++ if ((ics == NULL_CS) || (ocs == NULL_CS))
++ return -1;
++ else if (ics != ocs)
++ return 1;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(need_csc);
++
++static int soc_max_in_width(u32 is_vdoa)
++{
++ return is_vdoa ? 8192 : 4096;
++}
++
++static int soc_max_vdi_in_width(void)
++{
++ return IPU_MAX_VDI_IN_WIDTH;
++}
++static int soc_max_in_height(void)
++{
++ return 4096;
++}
++
++static int soc_max_out_width(void)
++{
++ /* mx51/mx53/mx6q is 1024*/
++ return 1024;
++}
++
++static int soc_max_out_height(void)
++{
++ /* mx51/mx53/mx6q is 1024*/
++ return 1024;
++}
++
++static void dump_task_info(struct ipu_task_entry *t)
++{
++ if (!debug)
++ return;
++ dev_dbg(t->dev, "[0x%p]input:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n", (void *)t, t->input.format);
++ dev_dbg(t->dev, "[0x%p]\twidth = %d\n", (void *)t, t->input.width);
++ dev_dbg(t->dev, "[0x%p]\theight = %d\n", (void *)t, t->input.height);
++ dev_dbg(t->dev, "[0x%p]\tcrop.w = %d\n", (void *)t, t->input.crop.w);
++ dev_dbg(t->dev, "[0x%p]\tcrop.h = %d\n", (void *)t, t->input.crop.h);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.x = %d\n",
++ (void *)t, t->input.crop.pos.x);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.y = %d\n",
++ (void *)t, t->input.crop.pos.y);
++ dev_dbg(t->dev, "[0x%p]input buffer:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n", (void *)t, t->input.paddr);
++ dev_dbg(t->dev, "[0x%p]\ti_off = 0x%x\n", (void *)t, t->set.i_off);
++ dev_dbg(t->dev, "[0x%p]\ti_uoff = 0x%x\n", (void *)t, t->set.i_uoff);
++ dev_dbg(t->dev, "[0x%p]\ti_voff = 0x%x\n", (void *)t, t->set.i_voff);
++ dev_dbg(t->dev, "[0x%p]\tistride = %d\n", (void *)t, t->set.istride);
++ if (t->input.deinterlace.enable) {
++ dev_dbg(t->dev, "[0x%p]deinterlace enabled with:\n", (void *)t);
++ if (t->input.deinterlace.motion != HIGH_MOTION) {
++ dev_dbg(t->dev, "[0x%p]\tlow/medium motion\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr_n = 0x%x\n",
++ (void *)t, t->input.paddr_n);
++ } else
++ dev_dbg(t->dev, "[0x%p]\thigh motion\n", (void *)t);
++ }
++
++ dev_dbg(t->dev, "[0x%p]output:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n", (void *)t, t->output.format);
++ dev_dbg(t->dev, "[0x%p]\twidth = %d\n", (void *)t, t->output.width);
++ dev_dbg(t->dev, "[0x%p]\theight = %d\n", (void *)t, t->output.height);
++ dev_dbg(t->dev, "[0x%p]\tcrop.w = %d\n", (void *)t, t->output.crop.w);
++ dev_dbg(t->dev, "[0x%p]\tcrop.h = %d\n", (void *)t, t->output.crop.h);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.x = %d\n",
++ (void *)t, t->output.crop.pos.x);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.y = %d\n",
++ (void *)t, t->output.crop.pos.y);
++ dev_dbg(t->dev, "[0x%p]\trotate = %d\n", (void *)t, t->output.rotate);
++ dev_dbg(t->dev, "[0x%p]output buffer:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n", (void *)t, t->output.paddr);
++ dev_dbg(t->dev, "[0x%p]\to_off = 0x%x\n", (void *)t, t->set.o_off);
++ dev_dbg(t->dev, "[0x%p]\to_uoff = 0x%x\n", (void *)t, t->set.o_uoff);
++ dev_dbg(t->dev, "[0x%p]\to_voff = 0x%x\n", (void *)t, t->set.o_voff);
++ dev_dbg(t->dev, "[0x%p]\tostride = %d\n", (void *)t, t->set.ostride);
++
++ if (t->overlay_en) {
++ dev_dbg(t->dev, "[0x%p]overlay:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n",
++ (void *)t, t->overlay.format);
++ dev_dbg(t->dev, "[0x%p]\twidth = %d\n",
++ (void *)t, t->overlay.width);
++ dev_dbg(t->dev, "[0x%p]\theight = %d\n",
++ (void *)t, t->overlay.height);
++ dev_dbg(t->dev, "[0x%p]\tcrop.w = %d\n",
++ (void *)t, t->overlay.crop.w);
++ dev_dbg(t->dev, "[0x%p]\tcrop.h = %d\n",
++ (void *)t, t->overlay.crop.h);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.x = %d\n",
++ (void *)t, t->overlay.crop.pos.x);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.y = %d\n",
++ (void *)t, t->overlay.crop.pos.y);
++ dev_dbg(t->dev, "[0x%p]overlay buffer:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n",
++ (void *)t, t->overlay.paddr);
++ dev_dbg(t->dev, "[0x%p]\tov_off = 0x%x\n",
++ (void *)t, t->set.ov_off);
++ dev_dbg(t->dev, "[0x%p]\tov_uoff = 0x%x\n",
++ (void *)t, t->set.ov_uoff);
++ dev_dbg(t->dev, "[0x%p]\tov_voff = 0x%x\n",
++ (void *)t, t->set.ov_voff);
++ dev_dbg(t->dev, "[0x%p]\tovstride = %d\n",
++ (void *)t, t->set.ovstride);
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ dev_dbg(t->dev, "[0x%p]local alpha enabled with:\n",
++ (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n",
++ (void *)t, t->overlay.alpha.loc_alp_paddr);
++ dev_dbg(t->dev, "[0x%p]\tov_alpha_off = 0x%x\n",
++ (void *)t, t->set.ov_alpha_off);
++ dev_dbg(t->dev, "[0x%p]\tov_alpha_stride = %d\n",
++ (void *)t, t->set.ov_alpha_stride);
++ } else
++ dev_dbg(t->dev, "[0x%p]globle alpha enabled with value 0x%x\n",
++ (void *)t, t->overlay.alpha.gvalue);
++ if (t->overlay.colorkey.enable)
++ dev_dbg(t->dev, "[0x%p]colorkey enabled with value 0x%x\n",
++ (void *)t, t->overlay.colorkey.value);
++ }
++
++ dev_dbg(t->dev, "[0x%p]want task_id = %d\n", (void *)t, t->task_id);
++ dev_dbg(t->dev, "[0x%p]want task mode is 0x%x\n",
++ (void *)t, t->set.mode);
++ dev_dbg(t->dev, "[0x%p]\tIC_MODE = 0x%x\n", (void *)t, IC_MODE);
++ dev_dbg(t->dev, "[0x%p]\tROT_MODE = 0x%x\n", (void *)t, ROT_MODE);
++ dev_dbg(t->dev, "[0x%p]\tVDI_MODE = 0x%x\n", (void *)t, VDI_MODE);
++ dev_dbg(t->dev, "[0x%p]\tTask_no = 0x%x\n\n\n", (void *)t, t->task_no);
++}
++
++static void dump_check_err(struct device *dev, int err)
++{
++ switch (err) {
++ case IPU_CHECK_ERR_INPUT_CROP:
++ dev_err(dev, "input crop setting error\n");
++ break;
++ case IPU_CHECK_ERR_OUTPUT_CROP:
++ dev_err(dev, "output crop setting error\n");
++ break;
++ case IPU_CHECK_ERR_OVERLAY_CROP:
++ dev_err(dev, "overlay crop setting error\n");
++ break;
++ case IPU_CHECK_ERR_INPUT_OVER_LIMIT:
++ dev_err(dev, "input over limitation\n");
++ break;
++ case IPU_CHECK_ERR_OVERLAY_WITH_VDI:
++ dev_err(dev, "do not support overlay with deinterlace\n");
++ break;
++ case IPU_CHECK_ERR_OV_OUT_NO_FIT:
++ dev_err(dev,
++ "width/height of overlay and ic output should be same\n");
++ break;
++ case IPU_CHECK_ERR_PROC_NO_NEED:
++ dev_err(dev, "no ipu processing need\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_INPUTW_OVER:
++ dev_err(dev, "split mode input width overflow\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_INPUTH_OVER:
++ dev_err(dev, "split mode input height overflow\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER:
++ dev_err(dev, "split mode output width overflow\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER:
++ dev_err(dev, "split mode output height overflow\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_WITH_ROT:
++ dev_err(dev, "not support split mode with rotation\n");
++ break;
++ case IPU_CHECK_ERR_W_DOWNSIZE_OVER:
++ dev_err(dev, "horizontal downsizing ratio overflow\n");
++ break;
++ case IPU_CHECK_ERR_H_DOWNSIZE_OVER:
++ dev_err(dev, "vertical downsizing ratio overflow\n");
++ break;
++ default:
++ break;
++ }
++}
++
++static void dump_check_warn(struct device *dev, int warn)
++{
++ if (warn & IPU_CHECK_WARN_INPUT_OFFS_NOT8ALIGN)
++ dev_warn(dev, "input u/v offset not 8 align\n");
++ if (warn & IPU_CHECK_WARN_OUTPUT_OFFS_NOT8ALIGN)
++ dev_warn(dev, "output u/v offset not 8 align\n");
++ if (warn & IPU_CHECK_WARN_OVERLAY_OFFS_NOT8ALIGN)
++ dev_warn(dev, "overlay u/v offset not 8 align\n");
++}
++
++static int set_crop(struct ipu_crop *crop, int width, int height, int fmt)
++{
++ if ((width == 0) || (height == 0)) {
++ pr_err("Invalid param: width=%d, height=%d\n", width, height);
++ return -EINVAL;
++ }
++
++ if ((IPU_PIX_FMT_TILED_NV12 == fmt) ||
++ (IPU_PIX_FMT_TILED_NV12F == fmt)) {
++ if (crop->w || crop->h) {
++ if (((crop->w + crop->pos.x) > width)
++ || ((crop->h + crop->pos.y) > height)
++ || (0 != (crop->w % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ || (0 != (crop->h % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ || (0 != (crop->pos.x % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ || (0 != (crop->pos.y % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ ) {
++ pr_err("set_crop error MB align.\n");
++ return -EINVAL;
++ }
++ } else {
++ crop->pos.x = 0;
++ crop->pos.y = 0;
++ crop->w = width;
++ crop->h = height;
++ if ((0 != (crop->w % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ || (0 != (crop->h % IPU_PIX_FMT_TILED_NV12_MBALIGN))) {
++ pr_err("set_crop error w/h MB align.\n");
++ return -EINVAL;
++ }
++ }
++ } else {
++ if (crop->w || crop->h) {
++ if (((crop->w + crop->pos.x) > (width + 16))
++ || ((crop->h + crop->pos.y) > height + 16)) {
++ pr_err("set_crop error exceeds width/height.\n");
++ return -EINVAL;
++ }
++ } else {
++ crop->pos.x = 0;
++ crop->pos.y = 0;
++ crop->w = width;
++ crop->h = height;
++ }
++ crop->w -= crop->w%8;
++ crop->h -= crop->h%8;
++ }
++
++ if ((crop->w == 0) || (crop->h == 0)) {
++ pr_err("Invalid crop param: crop.w=%d, crop.h=%d\n",
++ crop->w, crop->h);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static void update_offset(unsigned int fmt,
++ unsigned int width, unsigned int height,
++ unsigned int pos_x, unsigned int pos_y,
++ int *off, int *uoff, int *voff, int *stride)
++{
++ /* NOTE: u v offset should based on start point of off*/
++ switch (fmt) {
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ *off = pos_y * width + pos_x;
++ *uoff = (width * (height - pos_y) - pos_x)
++ + (width/2) * (pos_y/2) + pos_x/2;
++ /* In case height is odd, round up to even */
++ *voff = *uoff + (width/2) * ((height+1)/2);
++ break;
++ case IPU_PIX_FMT_YVU420P:
++ *off = pos_y * width + pos_x;
++ *voff = (width * (height - pos_y) - pos_x)
++ + (width/2) * (pos_y/2) + pos_x/2;
++ /* In case height is odd, round up to even */
++ *uoff = *voff + (width/2) * ((height+1)/2);
++ break;
++ case IPU_PIX_FMT_YVU422P:
++ *off = pos_y * width + pos_x;
++ *voff = (width * (height - pos_y) - pos_x)
++ + (width/2) * pos_y + pos_x/2;
++ *uoff = *voff + (width/2) * height;
++ break;
++ case IPU_PIX_FMT_YUV422P:
++ *off = pos_y * width + pos_x;
++ *uoff = (width * (height - pos_y) - pos_x)
++ + (width/2) * pos_y + pos_x/2;
++ *voff = *uoff + (width/2) * height;
++ break;
++ case IPU_PIX_FMT_YUV444P:
++ *off = pos_y * width + pos_x;
++ *uoff = width * height;
++ *voff = width * height * 2;
++ break;
++ case IPU_PIX_FMT_NV12:
++ *off = pos_y * width + pos_x;
++ *uoff = (width * (height - pos_y) - pos_x)
++ + width * (pos_y/2) + pos_x;
++ break;
++ case IPU_PIX_FMT_TILED_NV12:
++ /*
++ * tiled format, progressive:
++ * assuming that line is aligned with MB height (aligned to 16)
++ * offset = line * stride + (pixel / MB_width) * pixels_in_MB
++ * = line * stride + (pixel / 16) * 256
++ * = line * stride + pixel * 16
++ */
++ *off = pos_y * width + (pos_x << 4);
++ *uoff = ALIGN(width * height, SZ_4K) + (*off >> 1) - *off;
++ break;
++ case IPU_PIX_FMT_TILED_NV12F:
++ /*
++ * tiled format, interlaced:
++ * same as above, only number of pixels in MB is 128,
++ * instead of 256
++ */
++ *off = (pos_y >> 1) * width + (pos_x << 3);
++ *uoff = ALIGN(width * height/2, SZ_4K) + (*off >> 1) - *off;
++ break;
++ default:
++ *off = (pos_y * width + pos_x) * fmt_to_bpp(fmt)/8;
++ break;
++ }
++ *stride = width * bytes_per_pixel(fmt);
++}
++
++static int update_split_setting(struct ipu_task_entry *t, bool vdi_split)
++{
++ struct stripe_param left_stripe;
++ struct stripe_param right_stripe;
++ struct stripe_param up_stripe;
++ struct stripe_param down_stripe;
++ u32 iw, ih, ow, oh;
++ u32 max_width;
++ int ret;
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT)
++ return IPU_CHECK_ERR_SPLIT_WITH_ROT;
++
++ iw = t->input.crop.w;
++ ih = t->input.crop.h;
++
++ ow = t->output.crop.w;
++ oh = t->output.crop.h;
++
++ memset(&left_stripe, 0, sizeof(left_stripe));
++ memset(&right_stripe, 0, sizeof(right_stripe));
++ memset(&up_stripe, 0, sizeof(up_stripe));
++ memset(&down_stripe, 0, sizeof(down_stripe));
++
++ if (t->set.split_mode & RL_SPLIT) {
++ /*
++ * We do want equal strips: initialize stripes in case
++ * calc_stripes returns before actually doing the calculation
++ */
++ left_stripe.input_width = iw / 2;
++ left_stripe.output_width = ow / 2;
++ right_stripe.input_column = iw / 2;
++ right_stripe.output_column = ow / 2;
++
++ if (vdi_split)
++ max_width = soc_max_vdi_in_width();
++ else
++ max_width = soc_max_out_width();
++ ret = ipu_calc_stripes_sizes(iw,
++ ow,
++ max_width,
++ (((unsigned long long)1) << 32), /* 32bit for fractional*/
++ 1, /* equal stripes */
++ t->input.format,
++ t->output.format,
++ &left_stripe,
++ &right_stripe);
++ if (ret < 0)
++ return IPU_CHECK_ERR_W_DOWNSIZE_OVER;
++ else if (ret)
++ dev_dbg(t->dev, "Warn: no:0x%x,calc_stripes ret:%d\n",
++ t->task_no, ret);
++ t->set.sp_setting.iw = left_stripe.input_width;
++ t->set.sp_setting.ow = left_stripe.output_width;
++ t->set.sp_setting.outh_resize_ratio = left_stripe.irr;
++ t->set.sp_setting.i_left_pos = left_stripe.input_column;
++ t->set.sp_setting.o_left_pos = left_stripe.output_column;
++ t->set.sp_setting.i_right_pos = right_stripe.input_column;
++ t->set.sp_setting.o_right_pos = right_stripe.output_column;
++ } else {
++ t->set.sp_setting.iw = iw;
++ t->set.sp_setting.ow = ow;
++ t->set.sp_setting.outh_resize_ratio = 0;
++ t->set.sp_setting.i_left_pos = 0;
++ t->set.sp_setting.o_left_pos = 0;
++ t->set.sp_setting.i_right_pos = 0;
++ t->set.sp_setting.o_right_pos = 0;
++ }
++ if ((t->set.sp_setting.iw + t->set.sp_setting.i_right_pos) > (iw+16))
++ return IPU_CHECK_ERR_SPLIT_INPUTW_OVER;
++ if (((t->set.sp_setting.ow + t->set.sp_setting.o_right_pos) > ow)
++ || (t->set.sp_setting.ow > soc_max_out_width()))
++ return IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER;
++ if (rounddown(t->set.sp_setting.ow, 8) * 8 <=
++ rounddown(t->set.sp_setting.iw, 8))
++ return IPU_CHECK_ERR_W_DOWNSIZE_OVER;
++
++ if (t->set.split_mode & UD_SPLIT) {
++ /*
++ * We do want equal strips: initialize stripes in case
++ * calc_stripes returns before actually doing the calculation
++ */
++ up_stripe.input_width = ih / 2;
++ up_stripe.output_width = oh / 2;
++ down_stripe.input_column = ih / 2;
++ down_stripe.output_column = oh / 2;
++ ret = ipu_calc_stripes_sizes(ih,
++ oh,
++ soc_max_out_height(),
++ (((unsigned long long)1) << 32), /* 32bit for fractional*/
++ 0x1 | 0x2, /* equal stripes and vertical */
++ t->input.format,
++ t->output.format,
++ &up_stripe,
++ &down_stripe);
++ if (ret < 0)
++ return IPU_CHECK_ERR_H_DOWNSIZE_OVER;
++ else if (ret)
++ dev_err(t->dev, "Warn: no:0x%x,calc_stripes ret:%d\n",
++ t->task_no, ret);
++ t->set.sp_setting.ih = up_stripe.input_width;
++ t->set.sp_setting.oh = up_stripe.output_width;
++ t->set.sp_setting.outv_resize_ratio = up_stripe.irr;
++ t->set.sp_setting.i_top_pos = up_stripe.input_column;
++ t->set.sp_setting.o_top_pos = up_stripe.output_column;
++ t->set.sp_setting.i_bottom_pos = down_stripe.input_column;
++ t->set.sp_setting.o_bottom_pos = down_stripe.output_column;
++ } else {
++ t->set.sp_setting.ih = ih;
++ t->set.sp_setting.oh = oh;
++ t->set.sp_setting.outv_resize_ratio = 0;
++ t->set.sp_setting.i_top_pos = 0;
++ t->set.sp_setting.o_top_pos = 0;
++ t->set.sp_setting.i_bottom_pos = 0;
++ t->set.sp_setting.o_bottom_pos = 0;
++ }
++
++ /* downscale case: enforce limits */
++ if (((t->set.sp_setting.ih + t->set.sp_setting.i_bottom_pos) > (ih))
++ && (t->set.sp_setting.ih >= t->set.sp_setting.oh))
++ return IPU_CHECK_ERR_SPLIT_INPUTH_OVER;
++ /* upscale case: relax limits because ipu_calc_stripes_sizes() may
++ create input stripe that falls just outside of the input window */
++ else if ((t->set.sp_setting.ih + t->set.sp_setting.i_bottom_pos)
++ > (ih+16))
++ return IPU_CHECK_ERR_SPLIT_INPUTH_OVER;
++ if (((t->set.sp_setting.oh + t->set.sp_setting.o_bottom_pos) > oh)
++ || (t->set.sp_setting.oh > soc_max_out_height()))
++ return IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER;
++ if (rounddown(t->set.sp_setting.oh, 8) * 8 <=
++ rounddown(t->set.sp_setting.ih, 8))
++ return IPU_CHECK_ERR_H_DOWNSIZE_OVER;
++
++ return IPU_CHECK_OK;
++}
++
++static int check_task(struct ipu_task_entry *t)
++{
++ int tmp;
++ int ret = IPU_CHECK_OK;
++ int timeout;
++ bool vdi_split = false;
++ int ocw, och;
++
++ if ((IPU_PIX_FMT_TILED_NV12 == t->overlay.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->overlay.format) ||
++ (IPU_PIX_FMT_TILED_NV12 == t->output.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->output.format) ||
++ ((IPU_PIX_FMT_TILED_NV12F == t->input.format) &&
++ !t->input.deinterlace.enable)) {
++ ret = IPU_CHECK_ERR_NOT_SUPPORT;
++ goto done;
++ }
++
++ /* check input */
++ ret = set_crop(&t->input.crop, t->input.width, t->input.height,
++ t->input.format);
++ if (ret < 0) {
++ ret = IPU_CHECK_ERR_INPUT_CROP;
++ goto done;
++ } else
++ update_offset(t->input.format, t->input.width, t->input.height,
++ t->input.crop.pos.x, t->input.crop.pos.y,
++ &t->set.i_off, &t->set.i_uoff,
++ &t->set.i_voff, &t->set.istride);
++
++ /* check output */
++ ret = set_crop(&t->output.crop, t->output.width, t->output.height,
++ t->output.format);
++ if (ret < 0) {
++ ret = IPU_CHECK_ERR_OUTPUT_CROP;
++ goto done;
++ } else
++ update_offset(t->output.format,
++ t->output.width, t->output.height,
++ t->output.crop.pos.x, t->output.crop.pos.y,
++ &t->set.o_off, &t->set.o_uoff,
++ &t->set.o_voff, &t->set.ostride);
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ /*
++ * Cache output width and height and
++ * swap them so that we may check
++ * downsize overflow correctly.
++ */
++ ocw = t->output.crop.h;
++ och = t->output.crop.w;
++ } else {
++ ocw = t->output.crop.w;
++ och = t->output.crop.h;
++ }
++
++ if (ocw * 8 <= t->input.crop.w) {
++ ret = IPU_CHECK_ERR_W_DOWNSIZE_OVER;
++ goto done;
++ }
++
++ if (och * 8 <= t->input.crop.h) {
++ ret = IPU_CHECK_ERR_H_DOWNSIZE_OVER;
++ goto done;
++ }
++
++ if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
++ if ((t->input.crop.w > soc_max_in_width(1)) ||
++ (t->input.crop.h > soc_max_in_height())) {
++ ret = IPU_CHECK_ERR_INPUT_OVER_LIMIT;
++ goto done;
++ }
++ /* output fmt: NV12 and YUYV, now don't support resize */
++ if (((IPU_PIX_FMT_NV12 != t->output.format) &&
++ (IPU_PIX_FMT_YUYV != t->output.format)) ||
++ (t->input.crop.w != t->output.crop.w) ||
++ (t->input.crop.h != t->output.crop.h)) {
++ ret = IPU_CHECK_ERR_NOT_SUPPORT;
++ goto done;
++ }
++ }
++
++ /* check overlay if there is */
++ if (t->overlay_en) {
++ if (t->input.deinterlace.enable) {
++ ret = IPU_CHECK_ERR_OVERLAY_WITH_VDI;
++ goto done;
++ }
++
++ ret = set_crop(&t->overlay.crop, t->overlay.width,
++ t->overlay.height, t->overlay.format);
++ if (ret < 0) {
++ ret = IPU_CHECK_ERR_OVERLAY_CROP;
++ goto done;
++ } else {
++ ocw = t->output.crop.w;
++ och = t->output.crop.h;
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ ocw = t->output.crop.h;
++ och = t->output.crop.w;
++ }
++ if ((t->overlay.crop.w != ocw) ||
++ (t->overlay.crop.h != och)) {
++ ret = IPU_CHECK_ERR_OV_OUT_NO_FIT;
++ goto done;
++ }
++
++ update_offset(t->overlay.format,
++ t->overlay.width, t->overlay.height,
++ t->overlay.crop.pos.x, t->overlay.crop.pos.y,
++ &t->set.ov_off, &t->set.ov_uoff,
++ &t->set.ov_voff, &t->set.ovstride);
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ t->set.ov_alpha_stride = t->overlay.width;
++ t->set.ov_alpha_off = t->overlay.crop.pos.y *
++ t->overlay.width + t->overlay.crop.pos.x;
++ }
++ }
++ }
++
++ /* input overflow? */
++ if (!((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format))) {
++ if ((t->input.crop.w > soc_max_in_width(0)) ||
++ (t->input.crop.h > soc_max_in_height())) {
++ ret = IPU_CHECK_ERR_INPUT_OVER_LIMIT;
++ goto done;
++ }
++ }
++
++ /* check task mode */
++ t->set.mode = NULL_MODE;
++ t->set.split_mode = NO_SPLIT;
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ /*output swap*/
++ tmp = t->output.crop.w;
++ t->output.crop.w = t->output.crop.h;
++ t->output.crop.h = tmp;
++ }
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT)
++ t->set.mode |= ROT_MODE;
++
++ /*need resize or CSC?*/
++ if ((t->input.crop.w != t->output.crop.w) ||
++ (t->input.crop.h != t->output.crop.h) ||
++ need_csc(t->input.format, t->output.format))
++ t->set.mode |= IC_MODE;
++
++ /*need flip?*/
++ if ((t->set.mode == NULL_MODE) && (t->output.rotate > IPU_ROTATE_NONE))
++ t->set.mode |= IC_MODE;
++
++ /*need IDMAC do format(same color space)?*/
++ if ((t->set.mode == NULL_MODE) && (t->input.format != t->output.format))
++ t->set.mode |= IC_MODE;
++
++ /*overlay support*/
++ if (t->overlay_en)
++ t->set.mode |= IC_MODE;
++
++ /*deinterlace*/
++ if (t->input.deinterlace.enable) {
++ t->set.mode &= ~IC_MODE;
++ t->set.mode |= VDI_MODE;
++ }
++ if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
++ if (t->set.mode & ROT_MODE) {
++ ret = IPU_CHECK_ERR_NOT_SUPPORT;
++ goto done;
++ }
++ t->set.mode |= VDOA_MODE;
++ if (IPU_PIX_FMT_TILED_NV12F == t->input.format)
++ t->set.mode |= VDOA_BAND_MODE;
++ t->set.mode &= ~IC_MODE;
++ }
++
++ if ((t->set.mode & (IC_MODE | VDI_MODE)) &&
++ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
++ if (t->output.crop.w > soc_max_out_width())
++ t->set.split_mode |= RL_SPLIT;
++ if (t->output.crop.h > soc_max_out_height())
++ t->set.split_mode |= UD_SPLIT;
++ if (!t->set.split_mode && (t->set.mode & VDI_MODE) &&
++ (t->input.crop.w > soc_max_vdi_in_width())) {
++ t->set.split_mode |= RL_SPLIT;
++ vdi_split = true;
++ }
++ if (t->set.split_mode) {
++ if ((t->set.split_mode == RL_SPLIT) ||
++ (t->set.split_mode == UD_SPLIT))
++ timeout = DEF_TIMEOUT_MS * 2 + DEF_DELAY_MS;
++ else
++ timeout = DEF_TIMEOUT_MS * 4 + DEF_DELAY_MS;
++ if (t->timeout < timeout)
++ t->timeout = timeout;
++
++ ret = update_split_setting(t, vdi_split);
++ if (ret > IPU_CHECK_ERR_MIN)
++ goto done;
++ }
++ }
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ /*output swap*/
++ tmp = t->output.crop.w;
++ t->output.crop.w = t->output.crop.h;
++ t->output.crop.h = tmp;
++ }
++
++ if (t->set.mode == NULL_MODE) {
++ ret = IPU_CHECK_ERR_PROC_NO_NEED;
++ goto done;
++ }
++
++ if ((t->set.i_uoff % 8) || (t->set.i_voff % 8))
++ ret |= IPU_CHECK_WARN_INPUT_OFFS_NOT8ALIGN;
++ if ((t->set.o_uoff % 8) || (t->set.o_voff % 8))
++ ret |= IPU_CHECK_WARN_OUTPUT_OFFS_NOT8ALIGN;
++ if (t->overlay_en && ((t->set.ov_uoff % 8) || (t->set.ov_voff % 8)))
++ ret |= IPU_CHECK_WARN_OVERLAY_OFFS_NOT8ALIGN;
++
++done:
++ /* dump msg */
++ if (debug) {
++ if (ret > IPU_CHECK_ERR_MIN)
++ dump_check_err(t->dev, ret);
++ else if (ret != IPU_CHECK_OK)
++ dump_check_warn(t->dev, ret);
++ }
++
++ return ret;
++}
++
++static int prepare_task(struct ipu_task_entry *t)
++{
++ int ret = 0;
++
++ ret = check_task(t);
++ if (ret > IPU_CHECK_ERR_MIN)
++ return -EINVAL;
++
++ if (t->set.mode & VDI_MODE) {
++ t->task_id = IPU_TASK_ID_VF;
++ t->set.task = VDI_VF;
++ if (t->set.mode & ROT_MODE)
++ t->set.task |= ROT_VF;
++ }
++
++ if (VDOA_MODE == t->set.mode) {
++ if (t->set.task != 0) {
++ dev_err(t->dev, "ERR: vdoa only task:0x%x, [0x%p].\n",
++ t->set.task, t);
++ return -EINVAL;
++ }
++ t->set.task |= VDOA_ONLY;
++ }
++
++ if (VDOA_BAND_MODE & t->set.mode) {
++ /* to save band size: 1<<3 = 8 lines */
++ t->set.band_lines = 3;
++ }
++
++ dump_task_info(t);
++
++ return ret;
++}
++
++static uint32_t ic_vf_pp_is_busy(struct ipu_soc *ipu, bool is_vf)
++{
++ uint32_t status;
++ uint32_t status_vf;
++ uint32_t status_rot;
++
++ if (is_vf) {
++ status = ipu_channel_status(ipu, MEM_VDI_PRP_VF_MEM);
++ status_vf = ipu_channel_status(ipu, MEM_PRP_VF_MEM);
++ status_rot = ipu_channel_status(ipu, MEM_ROT_VF_MEM);
++ return status || status_vf || status_rot;
++ } else {
++ status = ipu_channel_status(ipu, MEM_PP_MEM);
++ status_rot = ipu_channel_status(ipu, MEM_ROT_PP_MEM);
++ return status || status_rot;
++ }
++}
++
++static int _get_vdoa_ipu_res(struct ipu_task_entry *t)
++{
++ int i;
++ struct ipu_soc *ipu;
++ u8 *used;
++ uint32_t found_ipu = 0;
++ uint32_t found_vdoa = 0;
++ struct ipu_channel_tabel *tbl = &ipu_ch_tbl;
++
++ mutex_lock(&tbl->lock);
++ if (t->set.mode & VDOA_MODE) {
++ if (NULL != t->vdoa_handle)
++ found_vdoa = 1;
++ else {
++ found_vdoa = tbl->vdoa_used ? 0 : 1;
++ if (found_vdoa) {
++ tbl->vdoa_used = 1;
++ vdoa_get_handle(&t->vdoa_handle);
++ } else
++ /* first get vdoa->ipu resource sequence */
++ goto out;
++ if (t->set.task & VDOA_ONLY)
++ goto out;
++ }
++ }
++
++ for (i = 0; i < max_ipu_no; i++) {
++ ipu = ipu_get_soc(i);
++ if (IS_ERR(ipu))
++ dev_err(t->dev, "no:0x%x,found_vdoa:%d, ipu:%d\n",
++ t->task_no, found_vdoa, i);
++
++ used = &tbl->used[i][IPU_PP_CH_VF];
++ if (t->set.mode & VDI_MODE) {
++ if (0 == *used) {
++ *used = 1;
++ found_ipu = 1;
++ break;
++ }
++ } else if ((t->set.mode & IC_MODE) || only_rot(t->set.mode)) {
++ if (0 == *used) {
++ t->task_id = IPU_TASK_ID_VF;
++ if (t->set.mode & IC_MODE)
++ t->set.task |= IC_VF;
++ if (t->set.mode & ROT_MODE)
++ t->set.task |= ROT_VF;
++ *used = 1;
++ found_ipu = 1;
++ break;
++ }
++ } else
++ dev_err(t->dev, "no:0x%x,found_vdoa:%d, mode:0x%x\n",
++ t->task_no, found_vdoa, t->set.mode);
++ }
++ if (found_ipu)
++ goto next;
++
++ for (i = 0; i < max_ipu_no; i++) {
++ ipu = ipu_get_soc(i);
++ if (IS_ERR(ipu))
++ dev_err(t->dev, "no:0x%x,found_vdoa:%d, ipu:%d\n",
++ t->task_no, found_vdoa, i);
++
++ if ((t->set.mode & IC_MODE) || only_rot(t->set.mode)) {
++ used = &tbl->used[i][IPU_PP_CH_PP];
++ if (0 == *used) {
++ t->task_id = IPU_TASK_ID_PP;
++ if (t->set.mode & IC_MODE)
++ t->set.task |= IC_PP;
++ if (t->set.mode & ROT_MODE)
++ t->set.task |= ROT_PP;
++ *used = 1;
++ found_ipu = 1;
++ break;
++ }
++ }
++ }
++
++next:
++ if (found_ipu) {
++ t->ipu = ipu;
++ t->ipu_id = i;
++ t->dev = ipu->dev;
++ if (atomic_inc_return(&t->res_get) == 2)
++ dev_err(t->dev,
++ "ERR no:0x%x,found_vdoa:%d,get ipu twice\n",
++ t->task_no, found_vdoa);
++ }
++out:
++ dev_dbg(t->dev,
++ "%s:no:0x%x,found_vdoa:%d, found_ipu:%d\n",
++ __func__, t->task_no, found_vdoa, found_ipu);
++ mutex_unlock(&tbl->lock);
++ if (t->set.task & VDOA_ONLY)
++ return found_vdoa;
++ else if (t->set.mode & VDOA_MODE)
++ return found_vdoa && found_ipu;
++ else
++ return found_ipu;
++}
++
++static void put_vdoa_ipu_res(struct ipu_task_entry *tsk, int vdoa_only)
++{
++ int ret;
++ int rel_vdoa = 0, rel_ipu = 0;
++ struct ipu_channel_tabel *tbl = &ipu_ch_tbl;
++
++ mutex_lock(&tbl->lock);
++ if (tsk->set.mode & VDOA_MODE) {
++ if (!tbl->vdoa_used && tsk->vdoa_handle)
++ dev_err(tsk->dev,
++ "ERR no:0x%x,vdoa not used,mode:0x%x\n",
++ tsk->task_no, tsk->set.mode);
++ if (tbl->vdoa_used && tsk->vdoa_handle) {
++ tbl->vdoa_used = 0;
++ vdoa_put_handle(&tsk->vdoa_handle);
++ if (tsk->ipu)
++ tsk->ipu->vdoa_en = 0;
++ rel_vdoa = 1;
++ if (vdoa_only || (tsk->set.task & VDOA_ONLY))
++ goto out;
++ }
++ }
++
++ tbl->used[tsk->ipu_id][tsk->task_id - 1] = 0;
++ rel_ipu = 1;
++ ret = atomic_inc_return(&tsk->res_free);
++ if (ret == 2)
++ dev_err(tsk->dev,
++ "ERR no:0x%x,rel_vdoa:%d,put ipu twice\n",
++ tsk->task_no, rel_vdoa);
++out:
++ dev_dbg(tsk->dev,
++ "%s:no:0x%x,rel_vdoa:%d, rel_ipu:%d\n",
++ __func__, tsk->task_no, rel_vdoa, rel_ipu);
++ mutex_unlock(&tbl->lock);
++}
++
++static int get_vdoa_ipu_res(struct ipu_task_entry *t)
++{
++ int ret;
++ uint32_t found = 0;
++
++ found = _get_vdoa_ipu_res(t);
++ if (!found) {
++ t->ipu_id = -1;
++ t->ipu = NULL;
++ /* blocking to get resource */
++ ret = atomic_inc_return(&req_cnt);
++ dev_dbg(t->dev,
++ "wait_res:no:0x%x,req_cnt:%d\n", t->task_no, ret);
++ ret = wait_event_timeout(res_waitq, _get_vdoa_ipu_res(t),
++ msecs_to_jiffies(t->timeout - DEF_DELAY_MS));
++ if (ret == 0) {
++ dev_err(t->dev, "ERR[0x%p,no-0x%x] wait_res timeout:%dms!\n",
++ t, t->task_no, t->timeout - DEF_DELAY_MS);
++ ret = -ETIMEDOUT;
++ t->state = STATE_RES_TIMEOUT;
++ goto out;
++ } else {
++ if (!(t->set.task & VDOA_ONLY) && (!t->ipu))
++ dev_err(t->dev,
++ "ERR[no-0x%x] can not get ipu!\n",
++ t->task_no);
++ ret = atomic_read(&req_cnt);
++ if (ret > 0)
++ ret = atomic_dec_return(&req_cnt);
++ else
++ dev_err(t->dev,
++ "ERR[no-0x%x] req_cnt:%d mismatch!\n",
++ t->task_no, ret);
++ dev_dbg(t->dev, "no-0x%x,[0x%p],req_cnt:%d, got_res!\n",
++ t->task_no, t, ret);
++ found = 1;
++ }
++ }
++
++out:
++ return found;
++}
++
++static struct ipu_task_entry *create_task_entry(struct ipu_task *task)
++{
++ struct ipu_task_entry *tsk;
++
++ tsk = kzalloc(sizeof(struct ipu_task_entry), GFP_KERNEL);
++ if (!tsk)
++ return ERR_PTR(-ENOMEM);
++ kref_init(&tsk->refcount);
++ tsk->state = -EINVAL;
++ tsk->ipu_id = -1;
++ tsk->dev = ipu_dev;
++ tsk->input = task->input;
++ tsk->output = task->output;
++ tsk->overlay_en = task->overlay_en;
++ if (tsk->overlay_en)
++ tsk->overlay = task->overlay;
++ if (task->timeout > DEF_TIMEOUT_MS)
++ tsk->timeout = task->timeout;
++ else
++ tsk->timeout = DEF_TIMEOUT_MS;
++
++ return tsk;
++}
++
++static void task_mem_free(struct kref *ref)
++{
++ struct ipu_task_entry *tsk =
++ container_of(ref, struct ipu_task_entry, refcount);
++ kfree(tsk);
++}
++
++int create_split_child_task(struct ipu_split_task *sp_task)
++{
++ int ret = 0;
++ struct ipu_task_entry *tsk;
++
++ tsk = create_task_entry(&sp_task->task);
++ if (IS_ERR(tsk))
++ return PTR_ERR(tsk);
++
++ sp_task->child_task = tsk;
++ tsk->task_no = sp_task->task_no;
++
++ ret = prepare_task(tsk);
++ if (ret < 0)
++ goto err;
++
++ tsk->parent = sp_task->parent_task;
++ tsk->set.sp_setting = sp_task->parent_task->set.sp_setting;
++
++ list_add(&tsk->node, &tsk->parent->split_list);
++ dev_dbg(tsk->dev, "[0x%p] sp_tsk Q list,no-0x%x\n", tsk, tsk->task_no);
++ tsk->state = STATE_QUEUE;
++ CHECK_PERF(&tsk->ts_queue);
++err:
++ return ret;
++}
++
++static inline int sp_task_check_done(struct ipu_split_task *sp_task,
++ struct ipu_task_entry *parent, int num, int *idx)
++{
++ int i;
++ int ret = 0;
++ struct ipu_task_entry *tsk;
++ struct mutex *lock = &parent->split_lock;
++
++ *idx = -EINVAL;
++ mutex_lock(lock);
++ for (i = 0; i < num; i++) {
++ tsk = sp_task[i].child_task;
++ if (tsk && tsk->split_done) {
++ *idx = i;
++ ret = 1;
++ goto out;
++ }
++ }
++
++out:
++ mutex_unlock(lock);
++ return ret;
++}
++
++static int create_split_task(
++ int stripe,
++ struct ipu_split_task *sp_task)
++{
++ struct ipu_task *task = &(sp_task->task);
++ struct ipu_task_entry *t = sp_task->parent_task;
++ int ret;
++
++ sp_task->task_no |= stripe;
++
++ task->input = t->input;
++ task->output = t->output;
++ task->overlay_en = t->overlay_en;
++ if (task->overlay_en)
++ task->overlay = t->overlay;
++ task->task_id = t->task_id;
++ if ((t->set.split_mode == RL_SPLIT) ||
++ (t->set.split_mode == UD_SPLIT))
++ task->timeout = t->timeout / 2;
++ else
++ task->timeout = t->timeout / 4;
++
++ task->input.crop.w = t->set.sp_setting.iw;
++ task->input.crop.h = t->set.sp_setting.ih;
++ if (task->overlay_en) {
++ task->overlay.crop.w = t->set.sp_setting.ow;
++ task->overlay.crop.h = t->set.sp_setting.oh;
++ }
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ task->output.crop.w = t->set.sp_setting.oh;
++ task->output.crop.h = t->set.sp_setting.ow;
++ t->set.sp_setting.rl_split_line = t->set.sp_setting.o_bottom_pos;
++ t->set.sp_setting.ud_split_line = t->set.sp_setting.o_right_pos;
++
++ } else {
++ task->output.crop.w = t->set.sp_setting.ow;
++ task->output.crop.h = t->set.sp_setting.oh;
++ t->set.sp_setting.rl_split_line = t->set.sp_setting.o_right_pos;
++ t->set.sp_setting.ud_split_line = t->set.sp_setting.o_bottom_pos;
++ }
++
++ if (stripe & LEFT_STRIPE)
++ task->input.crop.pos.x += t->set.sp_setting.i_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->input.crop.pos.x += t->set.sp_setting.i_right_pos;
++ if (stripe & UP_STRIPE)
++ task->input.crop.pos.y += t->set.sp_setting.i_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->input.crop.pos.y += t->set.sp_setting.i_bottom_pos;
++
++ if (task->overlay_en) {
++ if (stripe & LEFT_STRIPE)
++ task->overlay.crop.pos.x += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->overlay.crop.pos.x += t->set.sp_setting.o_right_pos;
++ if (stripe & UP_STRIPE)
++ task->overlay.crop.pos.y += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->overlay.crop.pos.y += t->set.sp_setting.o_bottom_pos;
++ }
++
++ switch (t->output.rotate) {
++ case IPU_ROTATE_NONE:
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_right_pos;
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_bottom_pos;
++ break;
++ case IPU_ROTATE_VERT_FLIP:
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_right_pos;
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
++ break;
++ case IPU_ROTATE_HORIZ_FLIP:
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_bottom_pos;
++ break;
++ case IPU_ROTATE_180:
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
++ break;
++ case IPU_ROTATE_90_RIGHT:
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_right_pos;
++ break;
++ case IPU_ROTATE_90_RIGHT_HFLIP:
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_bottom_pos;
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_right_pos;
++ break;
++ case IPU_ROTATE_90_RIGHT_VFLIP:
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
++ break;
++ case IPU_ROTATE_90_LEFT:
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_bottom_pos;
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
++ break;
++ default:
++ dev_err(t->dev, "ERR:should not be here\n");
++ break;
++ }
++
++ ret = create_split_child_task(sp_task);
++ if (ret < 0)
++ dev_err(t->dev, "ERR:create_split_child_task() ret:%d\n", ret);
++ return ret;
++}
++
++static int queue_split_task(struct ipu_task_entry *t,
++ struct ipu_split_task *sp_task, uint32_t size)
++{
++ int err[4];
++ int ret = 0;
++ int i, j;
++ struct ipu_task_entry *tsk = NULL;
++ struct mutex *lock = &t->split_lock;
++ struct mutex *vdic_lock = &t->vdic_lock;
++
++ dev_dbg(t->dev, "Split task 0x%p, no-0x%x, size:%d\n",
++ t, t->task_no, size);
++ mutex_init(lock);
++ mutex_init(vdic_lock);
++ init_waitqueue_head(&t->split_waitq);
++ INIT_LIST_HEAD(&t->split_list);
++ for (j = 0; j < size; j++) {
++ memset(&sp_task[j], 0, sizeof(*sp_task));
++ sp_task[j].parent_task = t;
++ sp_task[j].task_no = t->task_no;
++ }
++
++ if (t->set.split_mode == RL_SPLIT) {
++ i = 0;
++ err[i] = create_split_task(RIGHT_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 1;
++ err[i] = create_split_task(LEFT_STRIPE, &sp_task[i]);
++ } else if (t->set.split_mode == UD_SPLIT) {
++ i = 0;
++ err[i] = create_split_task(DOWN_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 1;
++ err[i] = create_split_task(UP_STRIPE, &sp_task[i]);
++ } else {
++ i = 0;
++ err[i] = create_split_task(RIGHT_STRIPE | DOWN_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 1;
++ err[i] = create_split_task(LEFT_STRIPE | DOWN_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 2;
++ err[i] = create_split_task(RIGHT_STRIPE | UP_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 3;
++ err[i] = create_split_task(LEFT_STRIPE | UP_STRIPE, &sp_task[i]);
++ }
++
++err_start:
++ for (j = 0; j < (i + 1); j++) {
++ if (err[j] < 0) {
++ if (sp_task[j].child_task)
++ dev_err(t->dev,
++ "sp_task[%d],no-0x%x fail state:%d, queue err:%d.\n",
++ j, sp_task[j].child_task->task_no,
++ sp_task[j].child_task->state, err[j]);
++ goto err_exit;
++ }
++ dev_dbg(t->dev, "[0x%p] sp_task[%d], no-0x%x state:%s, queue ret:%d.\n",
++ sp_task[j].child_task, j, sp_task[j].child_task->task_no,
++ state_msg[sp_task[j].child_task->state].msg, err[j]);
++ }
++
++ return ret;
++
++err_exit:
++ for (j = 0; j < (i + 1); j++) {
++ if (err[j] < 0 && !ret)
++ ret = err[j];
++ tsk = sp_task[j].child_task;
++ if (!tsk)
++ continue;
++ kfree(tsk);
++ }
++ t->state = STATE_ERR;
++ return ret;
++
++}
++
++static int init_tiled_buf(struct ipu_soc *ipu, struct ipu_task_entry *t,
++ ipu_channel_t channel, uint32_t ch_type)
++{
++ int ret = 0;
++ int i;
++ uint32_t ipu_fmt;
++ dma_addr_t inbuf_base = 0;
++ u32 field_size;
++ struct vdoa_params param;
++ struct vdoa_ipu_buf buf;
++ struct ipu_soc *ipu_idx;
++ u32 ipu_stride, obuf_size;
++ u32 height, width;
++ ipu_buffer_t type;
++
++ if ((IPU_PIX_FMT_YUYV != t->output.format) &&
++ (IPU_PIX_FMT_NV12 != t->output.format)) {
++ dev_err(t->dev, "ERR:[0x%d] output format\n", t->task_no);
++ return -EINVAL;
++ }
++
++ memset(&param, 0, sizeof(param));
++ /* init channel tiled bufs */
++ if (deinterlace_3_field(t) &&
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
++ field_size = tiled_filed_size(t);
++ if (INPUT_CHAN_VDI_P == ch_type) {
++ inbuf_base = t->input.paddr + field_size;
++ param.vfield_buf.prev_veba = inbuf_base + t->set.i_off;
++ } else if (INPUT_CHAN == ch_type) {
++ inbuf_base = t->input.paddr_n;
++ param.vfield_buf.cur_veba = inbuf_base + t->set.i_off;
++ } else if (INPUT_CHAN_VDI_N == ch_type) {
++ inbuf_base = t->input.paddr_n + field_size;
++ param.vfield_buf.next_veba = inbuf_base + t->set.i_off;
++ } else
++ return -EINVAL;
++ height = t->input.crop.h >> 1; /* field format for vdoa */
++ width = t->input.crop.w;
++ param.vfield_buf.vubo = t->set.i_uoff;
++ param.interlaced = 1;
++ param.scan_order = 1;
++ type = IPU_INPUT_BUFFER;
++ } else if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) &&
++ (INPUT_CHAN == ch_type)) {
++ height = t->input.crop.h;
++ width = t->input.crop.w;
++ param.vframe_buf.veba = t->input.paddr + t->set.i_off;
++ param.vframe_buf.vubo = t->set.i_uoff;
++ type = IPU_INPUT_BUFFER;
++ } else
++ return -EINVAL;
++
++ param.band_mode = (t->set.mode & VDOA_BAND_MODE) ? 1 : 0;
++ if (param.band_mode && (t->set.band_lines != 3) &&
++ (t->set.band_lines != 4) && (t->set.band_lines != 5))
++ return -EINVAL;
++ else if (param.band_mode)
++ param.band_lines = (1 << t->set.band_lines);
++ for (i = 0; i < max_ipu_no; i++) {
++ ipu_idx = ipu_get_soc(i);
++ if (!IS_ERR(ipu_idx) && ipu_idx == ipu)
++ break;
++ }
++ if (t->set.task & VDOA_ONLY)
++ /* dummy, didn't need ipu res */
++ i = 0;
++ if (max_ipu_no == i) {
++ dev_err(t->dev, "ERR:[0x%p] get ipu num\n", t);
++ return -EINVAL;
++ }
++
++ param.ipu_num = i;
++ param.vpu_stride = t->input.width;
++ param.height = height;
++ param.width = width;
++ if (IPU_PIX_FMT_NV12 == t->output.format)
++ param.pfs = VDOA_PFS_NV12;
++ else
++ param.pfs = VDOA_PFS_YUYV;
++ ipu_fmt = (param.pfs == VDOA_PFS_YUYV) ? IPU_PIX_FMT_YUYV :
++ IPU_PIX_FMT_NV12;
++ ipu_stride = param.width * bytes_per_pixel(ipu_fmt);
++ obuf_size = PAGE_ALIGN(param.width * param.height *
++ fmt_to_bpp(ipu_fmt)/8);
++ dev_dbg(t->dev, "band_mode:%d, band_lines:%d\n",
++ param.band_mode, param.band_lines);
++ if (!param.band_mode) {
++ /* note: if only for tiled -> raster convert and
++ no other post-processing, we don't need alloc buf
++ and use output buffer directly.
++ */
++ if (t->set.task & VDOA_ONLY)
++ param.ieba0 = t->output.paddr;
++ else {
++ dev_err(t->dev, "ERR:[0x%d] vdoa task\n", t->task_no);
++ return -EINVAL;
++ }
++ } else {
++ if (IPU_PIX_FMT_TILED_NV12F != t->input.format) {
++ dev_err(t->dev, "ERR [0x%d] vdoa task\n", t->task_no);
++ return -EINVAL;
++ }
++ }
++ ret = vdoa_setup(t->vdoa_handle, &param);
++ if (ret)
++ goto done;
++ vdoa_get_output_buf(t->vdoa_handle, &buf);
++ if (t->set.task & VDOA_ONLY)
++ goto done;
++
++ ret = ipu_init_channel_buffer(ipu,
++ channel,
++ type,
++ ipu_fmt,
++ width,
++ height,
++ ipu_stride,
++ IPU_ROTATE_NONE,
++ buf.ieba0,
++ buf.ieba1,
++ 0,
++ buf.iubo,
++ 0);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++
++ if (param.band_mode) {
++ ret = ipu_set_channel_bandmode(ipu, channel,
++ type, t->set.band_lines);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BAND_FAIL;
++ goto done;
++ }
++ }
++done:
++ return ret;
++}
++
++static int init_tiled_ch_bufs(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ int ret = 0;
++
++ if (IPU_PIX_FMT_TILED_NV12 == t->input.format) {
++ ret = init_tiled_buf(ipu, t, t->set.ic_chan, INPUT_CHAN);
++ CHECK_RETCODE(ret < 0, "init tiled_ch", t->state, done, ret);
++ } else if (IPU_PIX_FMT_TILED_NV12F == t->input.format) {
++ ret = init_tiled_buf(ipu, t, t->set.ic_chan, INPUT_CHAN);
++ CHECK_RETCODE(ret < 0, "init tiled_ch-c", t->state, done, ret);
++ ret = init_tiled_buf(ipu, t, t->set.vdi_ic_p_chan,
++ INPUT_CHAN_VDI_P);
++ CHECK_RETCODE(ret < 0, "init tiled_ch-p", t->state, done, ret);
++ ret = init_tiled_buf(ipu, t, t->set.vdi_ic_n_chan,
++ INPUT_CHAN_VDI_N);
++ CHECK_RETCODE(ret < 0, "init tiled_ch-n", t->state, done, ret);
++ } else {
++ ret = -EINVAL;
++ dev_err(t->dev, "ERR[no-0x%x] invalid fmt:0x%x!\n",
++ t->task_no, t->input.format);
++ }
++
++done:
++ return ret;
++}
++
++static int init_ic(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ int ret = 0;
++ ipu_channel_params_t params;
++ dma_addr_t inbuf = 0, ovbuf = 0, ov_alp_buf = 0;
++ dma_addr_t inbuf_p = 0, inbuf_n = 0;
++ dma_addr_t outbuf = 0;
++ int out_uoff = 0, out_voff = 0, out_rot;
++ int out_w = 0, out_h = 0, out_stride;
++ int out_fmt;
++ u32 vdi_frame_idx = 0;
++
++ memset(&params, 0, sizeof(params));
++
++ /* is it need link a rot channel */
++ if (ic_and_rot(t->set.mode)) {
++ outbuf = t->set.r_paddr;
++ out_w = t->set.r_width;
++ out_h = t->set.r_height;
++ out_stride = t->set.r_stride;
++ out_fmt = t->set.r_fmt;
++ out_uoff = 0;
++ out_voff = 0;
++ out_rot = IPU_ROTATE_NONE;
++ } else {
++ outbuf = t->output.paddr + t->set.o_off;
++ out_w = t->output.crop.w;
++ out_h = t->output.crop.h;
++ out_stride = t->set.ostride;
++ out_fmt = t->output.format;
++ out_uoff = t->set.o_uoff;
++ out_voff = t->set.o_voff;
++ out_rot = t->output.rotate;
++ }
++
++ /* settings */
++ params.mem_prp_vf_mem.in_width = t->input.crop.w;
++ params.mem_prp_vf_mem.out_width = out_w;
++ params.mem_prp_vf_mem.in_height = t->input.crop.h;
++ params.mem_prp_vf_mem.out_height = out_h;
++ params.mem_prp_vf_mem.in_pixel_fmt = t->input.format;
++ params.mem_prp_vf_mem.out_pixel_fmt = out_fmt;
++ params.mem_prp_vf_mem.motion_sel = t->input.deinterlace.motion;
++
++ params.mem_prp_vf_mem.outh_resize_ratio =
++ t->set.sp_setting.outh_resize_ratio;
++ params.mem_prp_vf_mem.outv_resize_ratio =
++ t->set.sp_setting.outv_resize_ratio;
++
++ if (t->overlay_en) {
++ params.mem_prp_vf_mem.in_g_pixel_fmt = t->overlay.format;
++ params.mem_prp_vf_mem.graphics_combine_en = 1;
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_GLOBAL)
++ params.mem_prp_vf_mem.global_alpha_en = 1;
++ else if (t->overlay.alpha.loc_alp_paddr)
++ params.mem_prp_vf_mem.alpha_chan_en = 1;
++ /* otherwise, alpha bending per pixel is used. */
++ params.mem_prp_vf_mem.alpha = t->overlay.alpha.gvalue;
++ if (t->overlay.colorkey.enable) {
++ params.mem_prp_vf_mem.key_color_en = 1;
++ params.mem_prp_vf_mem.key_color = t->overlay.colorkey.value;
++ }
++ }
++
++ if (t->input.deinterlace.enable) {
++ if (t->input.deinterlace.field_fmt & IPU_DEINTERLACE_FIELD_MASK)
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_BOTTOM;
++ else
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_TOP;
++
++ if (t->input.deinterlace.field_fmt & IPU_DEINTERLACE_RATE_EN)
++ vdi_frame_idx = t->input.deinterlace.field_fmt &
++ IPU_DEINTERLACE_RATE_FRAME1;
++ }
++
++ if (t->set.mode & VDOA_MODE)
++ ipu->vdoa_en = 1;
++
++ /* init channels */
++ if (!(t->set.task & VDOA_ONLY)) {
++ ret = ipu_init_channel(ipu, t->set.ic_chan, &params);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_FAIL;
++ goto done;
++ }
++ }
++
++ if (deinterlace_3_field(t)) {
++ ret = ipu_init_channel(ipu, t->set.vdi_ic_p_chan, &params);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_FAIL;
++ goto done;
++ }
++ ret = ipu_init_channel(ipu, t->set.vdi_ic_n_chan, &params);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_FAIL;
++ goto done;
++ }
++ }
++
++ /* init channel bufs */
++ if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
++ ret = init_tiled_ch_bufs(ipu, t);
++ if (ret < 0)
++ goto done;
++ } else {
++ if ((deinterlace_3_field(t)) &&
++ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
++ if (params.mem_prp_vf_mem.field_fmt ==
++ IPU_DEINTERLACE_FIELD_TOP) {
++ if (vdi_frame_idx) {
++ inbuf_p = t->input.paddr + t->set.istride +
++ t->set.i_off;
++ inbuf = t->input.paddr_n + t->set.i_off;
++ inbuf_n = t->input.paddr_n + t->set.istride +
++ t->set.i_off;
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_BOTTOM;
++ } else {
++ inbuf_p = t->input.paddr + t->set.i_off;
++ inbuf = t->input.paddr + t->set.istride + t->set.i_off;
++ inbuf_n = t->input.paddr_n + t->set.i_off;
++ }
++ } else {
++ if (vdi_frame_idx) {
++ inbuf_p = t->input.paddr + t->set.i_off;
++ inbuf = t->input.paddr_n + t->set.istride + t->set.i_off;
++ inbuf_n = t->input.paddr_n + t->set.i_off;
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_TOP;
++ } else {
++ inbuf_p = t->input.paddr + t->set.istride +
++ t->set.i_off;
++ inbuf = t->input.paddr + t->set.i_off;
++ inbuf_n = t->input.paddr_n + t->set.istride +
++ t->set.i_off;
++ }
++ }
++ } else {
++ if (t->input.deinterlace.enable) {
++ if (params.mem_prp_vf_mem.field_fmt ==
++ IPU_DEINTERLACE_FIELD_TOP) {
++ if (vdi_frame_idx) {
++ inbuf = t->input.paddr + t->set.istride + t->set.i_off;
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_BOTTOM;
++ } else
++ inbuf = t->input.paddr + t->set.i_off;
++ } else {
++ if (vdi_frame_idx) {
++ inbuf = t->input.paddr + t->set.i_off;
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_TOP;
++ } else
++ inbuf = t->input.paddr + t->set.istride + t->set.i_off;
++ }
++ } else
++ inbuf = t->input.paddr + t->set.i_off;
++ }
++
++ if (t->overlay_en)
++ ovbuf = t->overlay.paddr + t->set.ov_off;
++ }
++ if (t->overlay_en && (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL))
++ ov_alp_buf = t->overlay.alpha.loc_alp_paddr
++ + t->set.ov_alpha_off;
++
++ if ((IPU_PIX_FMT_TILED_NV12 != t->input.format) &&
++ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.ic_chan,
++ IPU_INPUT_BUFFER,
++ t->input.format,
++ t->input.crop.w,
++ t->input.crop.h,
++ t->set.istride,
++ IPU_ROTATE_NONE,
++ inbuf,
++ 0,
++ 0,
++ t->set.i_uoff,
++ t->set.i_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++ if (deinterlace_3_field(t) &&
++ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.vdi_ic_p_chan,
++ IPU_INPUT_BUFFER,
++ t->input.format,
++ t->input.crop.w,
++ t->input.crop.h,
++ t->set.istride,
++ IPU_ROTATE_NONE,
++ inbuf_p,
++ 0,
++ 0,
++ t->set.i_uoff,
++ t->set.i_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.vdi_ic_n_chan,
++ IPU_INPUT_BUFFER,
++ t->input.format,
++ t->input.crop.w,
++ t->input.crop.h,
++ t->set.istride,
++ IPU_ROTATE_NONE,
++ inbuf_n,
++ 0,
++ 0,
++ t->set.i_uoff,
++ t->set.i_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++
++ if (t->overlay_en) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.ic_chan,
++ IPU_GRAPH_IN_BUFFER,
++ t->overlay.format,
++ t->overlay.crop.w,
++ t->overlay.crop.h,
++ t->set.ovstride,
++ IPU_ROTATE_NONE,
++ ovbuf,
++ 0,
++ 0,
++ t->set.ov_uoff,
++ t->set.ov_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.ic_chan,
++ IPU_ALPHA_IN_BUFFER,
++ IPU_PIX_FMT_GENERIC,
++ t->overlay.crop.w,
++ t->overlay.crop.h,
++ t->set.ov_alpha_stride,
++ IPU_ROTATE_NONE,
++ ov_alp_buf,
++ 0,
++ 0,
++ 0, 0);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++
++ if (!(t->set.task & VDOA_ONLY)) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.ic_chan,
++ IPU_OUTPUT_BUFFER,
++ out_fmt,
++ out_w,
++ out_h,
++ out_stride,
++ out_rot,
++ outbuf,
++ 0,
++ 0,
++ out_uoff,
++ out_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++
++ if ((t->set.mode & VDOA_BAND_MODE) && (t->set.task & VDI_VF)) {
++ ret = ipu_link_channels(ipu, MEM_VDOA_MEM, t->set.ic_chan);
++ CHECK_RETCODE(ret < 0, "ipu_link_ch vdoa_ic",
++ STATE_LINK_CHAN_FAIL, done, ret);
++ }
++
++done:
++ return ret;
++}
++
++static void uninit_ic(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ int ret;
++
++ if ((t->set.mode & VDOA_BAND_MODE) && (t->set.task & VDI_VF)) {
++ ret = ipu_unlink_channels(ipu, MEM_VDOA_MEM, t->set.ic_chan);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_unlink_ch vdoa_ic",
++ STATE_UNLINK_CHAN_FAIL, ret);
++ }
++ ipu_uninit_channel(ipu, t->set.ic_chan);
++ if (deinterlace_3_field(t)) {
++ ipu_uninit_channel(ipu, t->set.vdi_ic_p_chan);
++ ipu_uninit_channel(ipu, t->set.vdi_ic_n_chan);
++ }
++}
++
++static int init_rot(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ int ret = 0;
++ dma_addr_t inbuf = 0, outbuf = 0;
++ int in_uoff = 0, in_voff = 0;
++ int in_fmt, in_width, in_height, in_stride;
++
++ /* init channel */
++ ret = ipu_init_channel(ipu, t->set.rot_chan, NULL);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_FAIL;
++ goto done;
++ }
++
++ /* init channel buf */
++ /* is it need link to a ic channel */
++ if (ic_and_rot(t->set.mode)) {
++ in_fmt = t->set.r_fmt;
++ in_width = t->set.r_width;
++ in_height = t->set.r_height;
++ in_stride = t->set.r_stride;
++ inbuf = t->set.r_paddr;
++ in_uoff = 0;
++ in_voff = 0;
++ } else {
++ in_fmt = t->input.format;
++ in_width = t->input.crop.w;
++ in_height = t->input.crop.h;
++ in_stride = t->set.istride;
++ inbuf = t->input.paddr + t->set.i_off;
++ in_uoff = t->set.i_uoff;
++ in_voff = t->set.i_voff;
++ }
++ outbuf = t->output.paddr + t->set.o_off;
++
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.rot_chan,
++ IPU_INPUT_BUFFER,
++ in_fmt,
++ in_width,
++ in_height,
++ in_stride,
++ t->output.rotate,
++ inbuf,
++ 0,
++ 0,
++ in_uoff,
++ in_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.rot_chan,
++ IPU_OUTPUT_BUFFER,
++ t->output.format,
++ t->output.crop.w,
++ t->output.crop.h,
++ t->set.ostride,
++ IPU_ROTATE_NONE,
++ outbuf,
++ 0,
++ 0,
++ t->set.o_uoff,
++ t->set.o_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++
++done:
++ return ret;
++}
++
++static void uninit_rot(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ ipu_uninit_channel(ipu, t->set.rot_chan);
++}
++
++static int get_irq(struct ipu_task_entry *t)
++{
++ int irq;
++ ipu_channel_t chan;
++
++ if (only_ic(t->set.mode))
++ chan = t->set.ic_chan;
++ else
++ chan = t->set.rot_chan;
++
++ switch (chan) {
++ case MEM_ROT_VF_MEM:
++ irq = IPU_IRQ_PRP_VF_ROT_OUT_EOF;
++ break;
++ case MEM_ROT_PP_MEM:
++ irq = IPU_IRQ_PP_ROT_OUT_EOF;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ case MEM_PRP_VF_MEM:
++ irq = IPU_IRQ_PRP_VF_OUT_EOF;
++ break;
++ case MEM_PP_MEM:
++ irq = IPU_IRQ_PP_OUT_EOF;
++ break;
++ case MEM_VDI_MEM:
++ irq = IPU_IRQ_VDIC_OUT_EOF;
++ break;
++ default:
++ irq = -EINVAL;
++ }
++
++ return irq;
++}
++
++static irqreturn_t task_irq_handler(int irq, void *dev_id)
++{
++ struct ipu_task_entry *prev_tsk = dev_id;
++
++ CHECK_PERF(&prev_tsk->ts_inirq);
++ complete(&prev_tsk->irq_comp);
++ dev_dbg(prev_tsk->dev, "[0x%p] no-0x%x in-irq!",
++ prev_tsk, prev_tsk->task_no);
++
++ return IRQ_HANDLED;
++}
++
++/* Fix deinterlace up&down split mode medium line */
++static void vdi_split_process(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ u32 vdi_size;
++ u32 vdi_save_lines;
++ u32 stripe_mode;
++ u32 task_no;
++ u32 i, offset_addr;
++ u32 line_size;
++ unsigned char *base_off;
++ struct ipu_task_entry *parent = t->parent;
++ struct mutex *lock = &parent->vdic_lock;
++
++ if (!parent) {
++ dev_err(t->dev, "ERR[0x%x]invalid parent\n", t->task_no);
++ return;
++ }
++ mutex_lock(lock);
++ stripe_mode = t->task_no & 0xf;
++ task_no = t->task_no >> 4;
++
++ /* Save both luma and chroma part for interleaved YUV(e.g. YUYV).
++ * Save luma part for non-interleaved and partial-interleaved
++ * YUV format (e.g NV12 and YV12). */
++ if (t->output.format == IPU_PIX_FMT_YUYV ||
++ t->output.format == IPU_PIX_FMT_UYVY)
++ line_size = t->output.crop.w * fmt_to_bpp(t->output.format)/8;
++ else
++ line_size = t->output.crop.w;
++
++ vdi_save_lines = (t->output.crop.h - t->set.sp_setting.ud_split_line)/2;
++ vdi_size = vdi_save_lines * line_size;
++ if (vdi_save_lines <= 0) {
++ dev_err(t->dev, "[0x%p] vdi_save_line error\n", (void *)t);
++ mutex_unlock(lock);
++ return;
++ }
++
++ /*check vditmpbuf buffer have alloced or buffer size is changed */
++ if ((vdi_save_lines != parent->old_save_lines) ||
++ (vdi_size != parent->old_size)) {
++ if (parent->vditmpbuf[0] != NULL)
++ kfree(parent->vditmpbuf[0]);
++ if (parent->vditmpbuf[1] != NULL)
++ kfree(parent->vditmpbuf[1]);
++
++ parent->vditmpbuf[0] = kmalloc(vdi_size, GFP_KERNEL);
++ if (parent->vditmpbuf[0] == NULL) {
++ dev_err(t->dev,
++ "[0x%p]Falied Alloc vditmpbuf[0]\n", (void *)t);
++ mutex_unlock(lock);
++ return;
++ }
++ memset(parent->vditmpbuf[0], 0, vdi_size);
++
++ parent->vditmpbuf[1] = kmalloc(vdi_size, GFP_KERNEL);
++ if (parent->vditmpbuf[1] == NULL) {
++ dev_err(t->dev,
++ "[0x%p]Falied Alloc vditmpbuf[1]\n", (void *)t);
++ mutex_unlock(lock);
++ return;
++ }
++ memset(parent->vditmpbuf[1], 0, vdi_size);
++
++ parent->old_save_lines = vdi_save_lines;
++ parent->old_size = vdi_size;
++ }
++
++ if (pfn_valid(t->output.paddr >> PAGE_SHIFT)) {
++ base_off = page_address(pfn_to_page(t->output.paddr >> PAGE_SHIFT));
++ base_off += t->output.paddr & ((1 << PAGE_SHIFT) - 1);
++ } else {
++ base_off = (char *)ioremap_nocache(t->output.paddr,
++ t->output.width * t->output.height *
++ fmt_to_bpp(t->output.format)/8);
++ }
++ if (base_off == NULL) {
++ dev_err(t->dev, "ERR[0x%p]Failed get virtual address\n", t);
++ mutex_unlock(lock);
++ return;
++ }
++
++ /* UP stripe or UP&LEFT stripe */
++ if ((stripe_mode == UP_STRIPE) ||
++ (stripe_mode == (UP_STRIPE | LEFT_STRIPE))) {
++ if (!parent->buf0filled) {
++ offset_addr = t->set.o_off +
++ t->set.sp_setting.ud_split_line*t->set.ostride;
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_size);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_size);
++
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(parent->vditmpbuf[0] + i*line_size,
++ base_off + offset_addr +
++ i*t->set.ostride, line_size);
++ parent->buf0filled = true;
++ } else {
++ offset_addr = t->set.o_off + (t->output.crop.h -
++ vdi_save_lines) * t->set.ostride;
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(base_off + offset_addr + i*t->set.ostride,
++ parent->vditmpbuf[0] + i*line_size, line_size);
++
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + i*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + i*t->set.ostride);
++ parent->buf0filled = false;
++ }
++ }
++ /*Down stripe or Down&Left stripe*/
++ else if ((stripe_mode == DOWN_STRIPE) ||
++ (stripe_mode == (DOWN_STRIPE | LEFT_STRIPE))) {
++ if (!parent->buf0filled) {
++ offset_addr = t->set.o_off + vdi_save_lines*t->set.ostride;
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_size);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_size);
++
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(parent->vditmpbuf[0] + i*line_size,
++ base_off + offset_addr + i*t->set.ostride,
++ line_size);
++ parent->buf0filled = true;
++ } else {
++ offset_addr = t->set.o_off;
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(base_off + offset_addr + i*t->set.ostride,
++ parent->vditmpbuf[0] + i*line_size,
++ line_size);
++
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + i*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + i*t->set.ostride);
++ parent->buf0filled = false;
++ }
++ }
++ /*Up&Right stripe*/
++ else if (stripe_mode == (UP_STRIPE | RIGHT_STRIPE)) {
++ if (!parent->buf1filled) {
++ offset_addr = t->set.o_off +
++ t->set.sp_setting.ud_split_line*t->set.ostride;
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_size);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_size);
++
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(parent->vditmpbuf[1] + i*line_size,
++ base_off + offset_addr + i*t->set.ostride,
++ line_size);
++ parent->buf1filled = true;
++ } else {
++ offset_addr = t->set.o_off +
++ (t->output.crop.h - vdi_save_lines)*t->set.ostride;
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(base_off + offset_addr + i*t->set.ostride,
++ parent->vditmpbuf[1] + i*line_size,
++ line_size);
++
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + i*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + i*t->set.ostride);
++ parent->buf1filled = false;
++ }
++ }
++ /*Down stripe or Down&Right stript*/
++ else if (stripe_mode == (DOWN_STRIPE | RIGHT_STRIPE)) {
++ if (!parent->buf1filled) {
++ offset_addr = t->set.o_off + vdi_save_lines*t->set.ostride;
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_save_lines*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_save_lines*t->set.ostride);
++
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(parent->vditmpbuf[1] + i*line_size,
++ base_off + offset_addr + i*t->set.ostride,
++ line_size);
++ parent->buf1filled = true;
++ } else {
++ offset_addr = t->set.o_off;
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(base_off + offset_addr + i*t->set.ostride,
++ parent->vditmpbuf[1] + i*line_size,
++ line_size);
++
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_save_lines*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_save_lines*t->set.ostride);
++ parent->buf1filled = false;
++ }
++ }
++ if (!pfn_valid(t->output.paddr >> PAGE_SHIFT))
++ iounmap(base_off);
++ mutex_unlock(lock);
++}
++
++static void do_task_release(struct ipu_task_entry *t, int fail)
++{
++ int ret;
++ struct ipu_soc *ipu = t->ipu;
++
++ if (t->input.deinterlace.enable && !fail &&
++ (t->task_no & (UP_STRIPE | DOWN_STRIPE)))
++ vdi_split_process(ipu, t);
++
++ ipu_free_irq(ipu, t->irq, t);
++
++ if (t->vdoa_dma.vaddr)
++ dma_free_coherent(t->dev,
++ t->vdoa_dma.size,
++ t->vdoa_dma.vaddr,
++ t->vdoa_dma.paddr);
++
++ if (only_ic(t->set.mode)) {
++ ret = ipu_disable_channel(ipu, t->set.ic_chan, true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_ic",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ if (deinterlace_3_field(t)) {
++ ret = ipu_disable_channel(ipu, t->set.vdi_ic_p_chan,
++ true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_ic_p",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ ret = ipu_disable_channel(ipu, t->set.vdi_ic_n_chan,
++ true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_ic_n",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ }
++ } else if (only_rot(t->set.mode)) {
++ ret = ipu_disable_channel(ipu, t->set.rot_chan, true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_rot",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ } else if (ic_and_rot(t->set.mode)) {
++ ret = ipu_unlink_channels(ipu, t->set.ic_chan, t->set.rot_chan);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_unlink_ch",
++ STATE_UNLINK_CHAN_FAIL, ret);
++ ret = ipu_disable_channel(ipu, t->set.rot_chan, true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch ic_and_rot-rot",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ ret = ipu_disable_channel(ipu, t->set.ic_chan, true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch ic_and_rot-ic",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ if (deinterlace_3_field(t)) {
++ ret = ipu_disable_channel(ipu, t->set.vdi_ic_p_chan,
++ true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch icrot-ic-p",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ ret = ipu_disable_channel(ipu, t->set.vdi_ic_n_chan,
++ true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch icrot-ic-n",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ }
++ }
++
++ if (only_ic(t->set.mode))
++ uninit_ic(ipu, t);
++ else if (only_rot(t->set.mode))
++ uninit_rot(ipu, t);
++ else if (ic_and_rot(t->set.mode)) {
++ uninit_ic(ipu, t);
++ uninit_rot(ipu, t);
++ }
++
++ t->state = STATE_OK;
++ CHECK_PERF(&t->ts_rel);
++ return;
++}
++
++static void do_task_vdoa_only(struct ipu_task_entry *t)
++{
++ int ret;
++
++ ret = init_tiled_ch_bufs(NULL, t);
++ CHECK_RETCODE(ret < 0, "do_vdoa_only", STATE_ERR, out, ret);
++ ret = vdoa_start(t->vdoa_handle, VDOA_DEF_TIMEOUT_MS);
++ vdoa_stop(t->vdoa_handle);
++ CHECK_RETCODE(ret < 0, "vdoa_wait4complete, do_vdoa_only",
++ STATE_VDOA_IRQ_TIMEOUT, out, ret);
++
++ t->state = STATE_OK;
++out:
++ return;
++}
++
++static void do_task(struct ipu_task_entry *t)
++{
++ int r_size;
++ int irq;
++ int ret;
++ uint32_t busy;
++ struct ipu_soc *ipu = t->ipu;
++
++ CHECK_PERF(&t->ts_dotask);
++
++ if (!ipu) {
++ t->state = STATE_NO_IPU;
++ return;
++ }
++
++ init_completion(&t->irq_comp);
++ dev_dbg(ipu->dev, "[0x%p]Do task no:0x%x: id %d\n", (void *)t,
++ t->task_no, t->task_id);
++ dump_task_info(t);
++
++ if (t->set.task & IC_PP) {
++ t->set.ic_chan = MEM_PP_MEM;
++ dev_dbg(ipu->dev, "[0x%p]ic channel MEM_PP_MEM\n", (void *)t);
++ } else if (t->set.task & IC_VF) {
++ t->set.ic_chan = MEM_PRP_VF_MEM;
++ dev_dbg(ipu->dev, "[0x%p]ic channel MEM_PRP_VF_MEM\n", (void *)t);
++ } else if (t->set.task & VDI_VF) {
++ if (t->set.mode & VDOA_BAND_MODE) {
++ t->set.ic_chan = MEM_VDI_MEM;
++ if (deinterlace_3_field(t)) {
++ t->set.vdi_ic_p_chan = MEM_VDI_MEM_P;
++ t->set.vdi_ic_n_chan = MEM_VDI_MEM_N;
++ }
++ dev_dbg(ipu->dev, "[0x%p]ic ch MEM_VDI_MEM\n",
++ (void *)t);
++ } else {
++ t->set.ic_chan = MEM_VDI_PRP_VF_MEM;
++ if (deinterlace_3_field(t)) {
++ t->set.vdi_ic_p_chan = MEM_VDI_PRP_VF_MEM_P;
++ t->set.vdi_ic_n_chan = MEM_VDI_PRP_VF_MEM_N;
++ }
++ dev_dbg(ipu->dev,
++ "[0x%p]ic ch MEM_VDI_PRP_VF_MEM\n", t);
++ }
++ }
++
++ if (t->set.task & ROT_PP) {
++ t->set.rot_chan = MEM_ROT_PP_MEM;
++ dev_dbg(ipu->dev, "[0x%p]rot channel MEM_ROT_PP_MEM\n", (void *)t);
++ } else if (t->set.task & ROT_VF) {
++ t->set.rot_chan = MEM_ROT_VF_MEM;
++ dev_dbg(ipu->dev, "[0x%p]rot channel MEM_ROT_VF_MEM\n", (void *)t);
++ }
++
++ if (t->task_id == IPU_TASK_ID_VF)
++ busy = ic_vf_pp_is_busy(ipu, true);
++ else if (t->task_id == IPU_TASK_ID_PP)
++ busy = ic_vf_pp_is_busy(ipu, false);
++ else {
++ dev_err(ipu->dev, "ERR[no:0x%x]ipu task_id:%d invalid!\n",
++ t->task_no, t->task_id);
++ return;
++ }
++ if (busy) {
++ dev_err(ipu->dev, "ERR[0x%p-no:0x%x]ipu task_id:%d busy!\n",
++ (void *)t, t->task_no, t->task_id);
++ t->state = STATE_IPU_BUSY;
++ return;
++ }
++
++ irq = get_irq(t);
++ if (irq < 0) {
++ t->state = STATE_NO_IRQ;
++ return;
++ }
++ t->irq = irq;
++
++ /* channel setup */
++ if (only_ic(t->set.mode)) {
++ dev_dbg(t->dev, "[0x%p]only ic mode\n", (void *)t);
++ ret = init_ic(ipu, t);
++ CHECK_RETCODE(ret < 0, "init_ic only_ic",
++ t->state, chan_setup, ret);
++ } else if (only_rot(t->set.mode)) {
++ dev_dbg(t->dev, "[0x%p]only rot mode\n", (void *)t);
++ ret = init_rot(ipu, t);
++ CHECK_RETCODE(ret < 0, "init_rot only_rot",
++ t->state, chan_setup, ret);
++ } else if (ic_and_rot(t->set.mode)) {
++ int rot_idx = (t->task_id == IPU_TASK_ID_VF) ? 0 : 1;
++
++ dev_dbg(t->dev, "[0x%p]ic + rot mode\n", (void *)t);
++ t->set.r_fmt = t->output.format;
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ t->set.r_width = t->output.crop.h;
++ t->set.r_height = t->output.crop.w;
++ } else {
++ t->set.r_width = t->output.crop.w;
++ t->set.r_height = t->output.crop.h;
++ }
++ t->set.r_stride = t->set.r_width *
++ bytes_per_pixel(t->set.r_fmt);
++ r_size = PAGE_ALIGN(t->set.r_width * t->set.r_height
++ * fmt_to_bpp(t->set.r_fmt)/8);
++
++ if (r_size > ipu->rot_dma[rot_idx].size) {
++ dev_dbg(t->dev, "[0x%p]realloc rot buffer\n", (void *)t);
++
++ if (ipu->rot_dma[rot_idx].vaddr)
++ dma_free_coherent(t->dev,
++ ipu->rot_dma[rot_idx].size,
++ ipu->rot_dma[rot_idx].vaddr,
++ ipu->rot_dma[rot_idx].paddr);
++
++ ipu->rot_dma[rot_idx].size = r_size;
++ ipu->rot_dma[rot_idx].vaddr = dma_alloc_coherent(t->dev,
++ r_size,
++ &ipu->rot_dma[rot_idx].paddr,
++ GFP_DMA | GFP_KERNEL);
++ CHECK_RETCODE(ipu->rot_dma[rot_idx].vaddr == NULL,
++ "ic_and_rot", STATE_SYS_NO_MEM,
++ chan_setup, -ENOMEM);
++ }
++ t->set.r_paddr = ipu->rot_dma[rot_idx].paddr;
++
++ dev_dbg(t->dev, "[0x%p]rotation:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n", (void *)t, t->set.r_fmt);
++ dev_dbg(t->dev, "[0x%p]\twidth = %d\n", (void *)t, t->set.r_width);
++ dev_dbg(t->dev, "[0x%p]\theight = %d\n", (void *)t, t->set.r_height);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n", (void *)t, t->set.r_paddr);
++ dev_dbg(t->dev, "[0x%p]\trstride = %d\n", (void *)t, t->set.r_stride);
++
++ ret = init_ic(ipu, t);
++ CHECK_RETCODE(ret < 0, "init_ic ic_and_rot",
++ t->state, chan_setup, ret);
++ ret = init_rot(ipu, t);
++ CHECK_RETCODE(ret < 0, "init_rot ic_and_rot",
++ t->state, chan_setup, ret);
++ ret = ipu_link_channels(ipu, t->set.ic_chan,
++ t->set.rot_chan);
++ CHECK_RETCODE(ret < 0, "ipu_link_ch ic_and_rot",
++ STATE_LINK_CHAN_FAIL, chan_setup, ret);
++ } else {
++ dev_err(t->dev, "ERR [0x%p]do task: should not be here\n", t);
++ t->state = STATE_ERR;
++ return;
++ }
++
++ ret = ipu_request_irq(ipu, irq, task_irq_handler, 0, NULL, t);
++ CHECK_RETCODE(ret < 0, "ipu_req_irq",
++ STATE_IRQ_FAIL, chan_setup, ret);
++
++ /* enable/start channel */
++ if (only_ic(t->set.mode)) {
++ ret = ipu_enable_channel(ipu, t->set.ic_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_ic",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ if (deinterlace_3_field(t)) {
++ ret = ipu_enable_channel(ipu, t->set.vdi_ic_p_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_ic_p",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ ret = ipu_enable_channel(ipu, t->set.vdi_ic_n_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_ic_n",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ }
++
++ ret = ipu_select_buffer(ipu, t->set.ic_chan, IPU_OUTPUT_BUFFER,
++ 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (t->overlay_en) {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_GRAPH_IN_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic_g",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_ALPHA_IN_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic_a",
++ STATE_SEL_BUF_FAIL, chan_buf,
++ ret);
++ }
++ }
++ if (!(t->set.mode & VDOA_BAND_MODE)) {
++ if (deinterlace_3_field(t))
++ ipu_select_multi_vdi_buffer(ipu, 0);
++ else {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_INPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic_i",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ }
++ }
++ } else if (only_rot(t->set.mode)) {
++ ret = ipu_enable_channel(ipu, t->set.rot_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_rot",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ ret = ipu_select_buffer(ipu, t->set.rot_chan,
++ IPU_OUTPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_rot_o",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ ret = ipu_select_buffer(ipu, t->set.rot_chan,
++ IPU_INPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_rot_i",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ } else if (ic_and_rot(t->set.mode)) {
++ ret = ipu_enable_channel(ipu, t->set.rot_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-rot",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ ret = ipu_enable_channel(ipu, t->set.ic_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-ic",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ if (deinterlace_3_field(t)) {
++ ret = ipu_enable_channel(ipu, t->set.vdi_ic_p_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-p",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ ret = ipu_enable_channel(ipu, t->set.vdi_ic_n_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-n",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ }
++
++ ret = ipu_select_buffer(ipu, t->set.rot_chan,
++ IPU_OUTPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-rot-o",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (t->overlay_en) {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_GRAPH_IN_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-ic-g",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_ALPHA_IN_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf icrot-ic-a",
++ STATE_SEL_BUF_FAIL,
++ chan_buf, ret);
++ }
++ }
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_OUTPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-ic-o",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (deinterlace_3_field(t))
++ ipu_select_multi_vdi_buffer(ipu, 0);
++ else {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_INPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-ic-i",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ }
++ }
++
++ if (need_split(t))
++ t->state = STATE_IN_PROGRESS;
++
++ if (t->set.mode & VDOA_BAND_MODE) {
++ ret = vdoa_start(t->vdoa_handle, VDOA_DEF_TIMEOUT_MS);
++ CHECK_RETCODE(ret < 0, "vdoa_wait4complete, do_vdoa_band",
++ STATE_VDOA_IRQ_TIMEOUT, chan_rel, ret);
++ }
++
++ CHECK_PERF(&t->ts_waitirq);
++ ret = wait_for_completion_timeout(&t->irq_comp,
++ msecs_to_jiffies(t->timeout - DEF_DELAY_MS));
++ CHECK_PERF(&t->ts_wakeup);
++ CHECK_RETCODE(ret == 0, "wait_for_comp_timeout",
++ STATE_IRQ_TIMEOUT, chan_rel, ret);
++ dev_dbg(t->dev, "[0x%p] no-0x%x ipu irq done!", t, t->task_no);
++
++chan_rel:
++chan_buf:
++chan_en:
++chan_setup:
++ if (t->set.mode & VDOA_BAND_MODE)
++ vdoa_stop(t->vdoa_handle);
++ do_task_release(t, t->state >= STATE_ERR);
++ return;
++}
++
++static void do_task_vdoa_vdi(struct ipu_task_entry *t)
++{
++ int i;
++ int ret;
++ u32 stripe_width;
++
++ /* FIXME: crop mode not support now */
++ stripe_width = t->input.width >> 1;
++ t->input.crop.pos.x = 0;
++ t->input.crop.pos.y = 0;
++ t->input.crop.w = stripe_width;
++ t->input.crop.h = t->input.height;
++ t->output.crop.w = stripe_width;
++ t->output.crop.h = t->input.height;
++
++ for (i = 0; i < 2; i++) {
++ t->input.crop.pos.x = t->input.crop.pos.x + i * stripe_width;
++ t->output.crop.pos.x = t->output.crop.pos.x + i * stripe_width;
++ /* check input */
++ ret = set_crop(&t->input.crop, t->input.width, t->input.height,
++ t->input.format);
++ if (ret < 0) {
++ ret = STATE_ERR;
++ goto done;
++ } else
++ update_offset(t->input.format,
++ t->input.width, t->input.height,
++ t->input.crop.pos.x,
++ t->input.crop.pos.y,
++ &t->set.i_off, &t->set.i_uoff,
++ &t->set.i_voff, &t->set.istride);
++ dev_dbg(t->dev, "i_off:0x%x, i_uoff:0x%x, istride:%d.\n",
++ t->set.i_off, t->set.i_uoff, t->set.istride);
++ /* check output */
++ ret = set_crop(&t->output.crop, t->input.width,
++ t->output.height, t->output.format);
++ if (ret < 0) {
++ ret = STATE_ERR;
++ goto done;
++ } else
++ update_offset(t->output.format,
++ t->output.width, t->output.height,
++ t->output.crop.pos.x,
++ t->output.crop.pos.y,
++ &t->set.o_off, &t->set.o_uoff,
++ &t->set.o_voff, &t->set.ostride);
++
++ dev_dbg(t->dev, "o_off:0x%x, o_uoff:0x%x, ostride:%d.\n",
++ t->set.o_off, t->set.o_uoff, t->set.ostride);
++
++ do_task(t);
++ }
++
++ return;
++done:
++ dev_err(t->dev, "ERR %s set_crop.\n", __func__);
++ t->state = ret;
++ return;
++}
++
++static void get_res_do_task(struct ipu_task_entry *t)
++{
++ uint32_t found;
++ uint32_t split_child;
++ struct mutex *lock;
++
++ found = get_vdoa_ipu_res(t);
++ if (!found) {
++ dev_err(t->dev, "ERR:[0x%p] no-0x%x can not get res\n",
++ t, t->task_no);
++ return;
++ } else {
++ if (t->set.task & VDOA_ONLY)
++ do_task_vdoa_only(t);
++ else if ((IPU_PIX_FMT_TILED_NV12F == t->input.format) &&
++ (t->set.mode & VDOA_BAND_MODE) &&
++ (t->input.crop.w > soc_max_vdi_in_width()))
++ do_task_vdoa_vdi(t);
++ else
++ do_task(t);
++ put_vdoa_ipu_res(t, 0);
++ }
++ if (t->state != STATE_OK) {
++ dev_err(t->dev, "ERR:[0x%p] no-0x%x state: %s\n",
++ t, t->task_no, state_msg[t->state].msg);
++ }
++
++ split_child = need_split(t) && t->parent;
++ if (split_child) {
++ lock = &t->parent->split_lock;
++ mutex_lock(lock);
++ t->split_done = 1;
++ mutex_unlock(lock);
++ wake_up(&t->parent->split_waitq);
++ }
++
++ return;
++}
++
++static void wait_split_task_complete(struct ipu_task_entry *parent,
++ struct ipu_split_task *sp_task, uint32_t size)
++{
++ struct ipu_task_entry *tsk = NULL;
++ int ret = 0, rc;
++ int j, idx = -1;
++ unsigned long flags;
++ struct mutex *lock = &parent->split_lock;
++ int k, busy_vf, busy_pp;
++ struct ipu_soc *ipu;
++ DECLARE_PERF_VAR;
++
++ for (j = 0; j < size; j++) {
++ rc = wait_event_timeout(
++ parent->split_waitq,
++ sp_task_check_done(sp_task, parent, size, &idx),
++ msecs_to_jiffies(parent->timeout - DEF_DELAY_MS));
++ if (!rc) {
++ dev_err(parent->dev,
++ "ERR:[0x%p] no-0x%x, split_task timeout,j:%d,"
++ "size:%d.\n",
++ parent, parent->task_no, j, size);
++ ret = -ETIMEDOUT;
++ goto out;
++ } else {
++ if (idx < 0) {
++ dev_err(parent->dev,
++ "ERR:[0x%p] no-0x%x, invalid task idx:%d\n",
++ parent, parent->task_no, idx);
++ continue;
++ }
++ tsk = sp_task[idx].child_task;
++ mutex_lock(lock);
++ if (!tsk->split_done || !tsk->ipu)
++ dev_err(tsk->dev,
++ "ERR:no-0x%x,split not done:%d/null ipu:0x%p\n",
++ tsk->task_no, tsk->split_done, tsk->ipu);
++ tsk->split_done = 0;
++ mutex_unlock(lock);
++
++ dev_dbg(tsk->dev,
++ "[0x%p] no-0x%x sp_tsk[%d] done,state:%d.\n",
++ tsk, tsk->task_no, idx, tsk->state);
++ #ifdef DBG_IPU_PERF
++ CHECK_PERF(&tsk->ts_rel);
++ PRINT_TASK_STATISTICS;
++ #endif
++ }
++ }
++
++out:
++ if (ret == -ETIMEDOUT) {
++ /* debug */
++ for (k = 0; k < max_ipu_no; k++) {
++ ipu = ipu_get_soc(k);
++ if (IS_ERR(ipu)) {
++ dev_err(parent->dev, "no:0x%x, null ipu:%d\n",
++ parent->task_no, k);
++ } else {
++ busy_vf = ic_vf_pp_is_busy(ipu, true);
++ busy_pp = ic_vf_pp_is_busy(ipu, false);
++ dev_err(parent->dev,
++ "ERR:ipu[%d] busy_vf:%d, busy_pp:%d.\n",
++ k, busy_vf, busy_pp);
++ }
++ }
++ for (k = 0; k < size; k++) {
++ tsk = sp_task[k].child_task;
++ if (!tsk)
++ continue;
++ dev_err(parent->dev,
++ "ERR: sp_task[%d][0x%p] no-0x%x done:%d,"
++ "state:%s,on_list:%d, ipu:0x%p,timeout!\n",
++ k, tsk, tsk->task_no, tsk->split_done,
++ state_msg[tsk->state].msg, tsk->task_in_list,
++ tsk->ipu);
++ }
++ }
++
++ for (j = 0; j < size; j++) {
++ tsk = sp_task[j].child_task;
++ if (!tsk)
++ continue;
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++ if (tsk->task_in_list) {
++ list_del(&tsk->node);
++ tsk->task_in_list = 0;
++ dev_dbg(tsk->dev,
++ "[0x%p] no-0x%x,id:%d sp_tsk timeout list_del.\n",
++ tsk, tsk->task_no, tsk->task_id);
++ }
++ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
++ if (!tsk->ipu)
++ continue;
++ if (tsk->state != STATE_OK) {
++ dev_err(tsk->dev,
++ "ERR:[0x%p] no-0x%x,id:%d, sp_tsk state: %s\n",
++ tsk, tsk->task_no, tsk->task_id,
++ state_msg[tsk->state].msg);
++ }
++ kref_put(&tsk->refcount, task_mem_free);
++ }
++
++ kfree(parent->vditmpbuf[0]);
++ kfree(parent->vditmpbuf[1]);
++
++ if (ret < 0)
++ parent->state = STATE_TIMEOUT;
++ else
++ parent->state = STATE_OK;
++ return;
++}
++
++static inline int find_task(struct ipu_task_entry **t, int thread_id)
++{
++ int found;
++ unsigned long flags;
++ struct ipu_task_entry *tsk;
++ struct list_head *task_list = &ipu_task_list;
++
++ *t = NULL;
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++ found = !list_empty(task_list);
++ if (found) {
++ tsk = list_first_entry(task_list, struct ipu_task_entry, node);
++ if (tsk->task_in_list) {
++ list_del(&tsk->node);
++ tsk->task_in_list = 0;
++ *t = tsk;
++ kref_get(&tsk->refcount);
++ dev_dbg(tsk->dev,
++ "thread_id:%d,[0x%p] task_no:0x%x,mode:0x%x list_del\n",
++ thread_id, tsk, tsk->task_no, tsk->set.mode);
++ } else
++ dev_err(tsk->dev,
++ "thread_id:%d,task_no:0x%x,mode:0x%x not on list_del\n",
++ thread_id, tsk->task_no, tsk->set.mode);
++ }
++ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
++
++ return found;
++}
++
++static int ipu_task_thread(void *argv)
++{
++ struct ipu_task_entry *tsk;
++ struct ipu_task_entry *sp_tsk0;
++ struct ipu_split_task sp_task[4];
++ /* priority lower than irq_thread */
++ const struct sched_param param = {
++ .sched_priority = MAX_USER_RT_PRIO/2 - 1,
++ };
++ int ret;
++ int curr_thread_id;
++ uint32_t size;
++ unsigned long flags;
++ unsigned int cpu;
++ struct cpumask cpu_mask;
++ struct ipu_thread_data *data = (struct ipu_thread_data *)argv;
++
++ thread_id++;
++ curr_thread_id = thread_id;
++ sched_setscheduler(current, SCHED_FIFO, &param);
++
++ if (!data->is_vdoa) {
++ cpu = cpumask_first(cpu_online_mask);
++ cpumask_set_cpu(cpu, &cpu_mask);
++ ret = sched_setaffinity(data->ipu->thread[data->id]->pid,
++ &cpu_mask);
++ if (ret < 0) {
++ pr_err("%s: sched_setaffinity fail:%d.\n", __func__, ret);
++ }
++ pr_debug("%s: sched_setaffinity cpu:%d.\n", __func__, cpu);
++ }
++
++ while (!kthread_should_stop()) {
++ int split_fail = 0;
++ int split_parent;
++ int split_child;
++
++ wait_event_interruptible(thread_waitq, find_task(&tsk, curr_thread_id));
++
++ if (!tsk) {
++ pr_err("thread:%d can not find task.\n",
++ curr_thread_id);
++ continue;
++ }
++
++ /* note: other threads run split child task */
++ split_parent = need_split(tsk) && !tsk->parent;
++ split_child = need_split(tsk) && tsk->parent;
++ if (split_parent) {
++ if ((tsk->set.split_mode == RL_SPLIT) ||
++ (tsk->set.split_mode == UD_SPLIT))
++ size = 2;
++ else
++ size = 4;
++ ret = queue_split_task(tsk, sp_task, size);
++ if (ret < 0) {
++ split_fail = 1;
++ } else {
++ struct list_head *pos;
++
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++
++ sp_tsk0 = list_first_entry(&tsk->split_list,
++ struct ipu_task_entry, node);
++ list_del(&sp_tsk0->node);
++
++ list_for_each(pos, &tsk->split_list) {
++ struct ipu_task_entry *tmp;
++
++ tmp = list_entry(pos,
++ struct ipu_task_entry, node);
++ tmp->task_in_list = 1;
++ dev_dbg(tmp->dev,
++ "[0x%p] no-0x%x,id:%d sp_tsk "
++ "add_to_list.\n", tmp,
++ tmp->task_no, tmp->task_id);
++ }
++ /* add to global list */
++ list_splice(&tsk->split_list, &ipu_task_list);
++
++ spin_unlock_irqrestore(&ipu_task_list_lock,
++ flags);
++ /* let the parent thread do the first sp_task */
++ /* FIXME: ensure the correct sequence for split
++ 4size: 5/6->9/a*/
++ if (!sp_tsk0)
++ dev_err(tsk->dev,
++ "ERR: no-0x%x,can not get split_tsk0\n",
++ tsk->task_no);
++ wake_up_interruptible(&thread_waitq);
++ get_res_do_task(sp_tsk0);
++ dev_dbg(sp_tsk0->dev,
++ "thread:%d complete tsk no:0x%x.\n",
++ curr_thread_id, sp_tsk0->task_no);
++ ret = atomic_read(&req_cnt);
++ if (ret > 0) {
++ wake_up(&res_waitq);
++ dev_dbg(sp_tsk0->dev,
++ "sp_tsk0 sche thread:%d no:0x%x,"
++ "req_cnt:%d\n", curr_thread_id,
++ sp_tsk0->task_no, ret);
++ /* For other threads to get_res */
++ schedule();
++ }
++ }
++ } else
++ get_res_do_task(tsk);
++
++ /* wait for all 4 sp_task finished here or timeout
++ and then release all resources */
++ if (split_parent && !split_fail)
++ wait_split_task_complete(tsk, sp_task, size);
++
++ if (!split_child) {
++ atomic_inc(&tsk->done);
++ wake_up(&tsk->task_waitq);
++ }
++
++ dev_dbg(tsk->dev, "thread:%d complete tsk no:0x%x-[0x%p].\n",
++ curr_thread_id, tsk->task_no, tsk);
++ ret = atomic_read(&req_cnt);
++ if (ret > 0) {
++ wake_up(&res_waitq);
++ dev_dbg(tsk->dev, "sche thread:%d no:0x%x,req_cnt:%d\n",
++ curr_thread_id, tsk->task_no, ret);
++ /* note: give cpu to other threads to get_res */
++ schedule();
++ }
++
++ kref_put(&tsk->refcount, task_mem_free);
++ }
++
++ pr_info("ERR %s exit.\n", __func__);
++ return 0;
++}
++
++int ipu_check_task(struct ipu_task *task)
++{
++ struct ipu_task_entry *tsk;
++ int ret = 0;
++
++ tsk = create_task_entry(task);
++ if (IS_ERR(tsk))
++ return PTR_ERR(tsk);
++
++ ret = check_task(tsk);
++
++ task->input = tsk->input;
++ task->output = tsk->output;
++ task->overlay = tsk->overlay;
++ dump_task_info(tsk);
++
++ kref_put(&tsk->refcount, task_mem_free);
++ if (ret != 0)
++ pr_debug("%s ret:%d.\n", __func__, ret);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_check_task);
++
++int ipu_queue_task(struct ipu_task *task)
++{
++ struct ipu_task_entry *tsk;
++ unsigned long flags;
++ int ret;
++ u32 tmp_task_no;
++ DECLARE_PERF_VAR;
++
++ tsk = create_task_entry(task);
++ if (IS_ERR(tsk))
++ return PTR_ERR(tsk);
++
++ CHECK_PERF(&tsk->ts_queue);
++ ret = prepare_task(tsk);
++ if (ret < 0)
++ goto done;
++
++ if (need_split(tsk)) {
++ CHECK_PERF(&tsk->ts_dotask);
++ CHECK_PERF(&tsk->ts_waitirq);
++ CHECK_PERF(&tsk->ts_inirq);
++ CHECK_PERF(&tsk->ts_wakeup);
++ }
++
++ /* task_no last four bits for split task type*/
++ tmp_task_no = atomic_inc_return(&frame_no);
++ tsk->task_no = tmp_task_no << 4;
++ init_waitqueue_head(&tsk->task_waitq);
++
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++ list_add_tail(&tsk->node, &ipu_task_list);
++ tsk->task_in_list = 1;
++ dev_dbg(tsk->dev, "[0x%p,no-0x%x] list_add_tail\n", tsk, tsk->task_no);
++ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
++ wake_up_interruptible(&thread_waitq);
++
++ ret = wait_event_timeout(tsk->task_waitq, atomic_read(&tsk->done),
++ msecs_to_jiffies(tsk->timeout));
++ if (0 == ret) {
++ /* note: the timeout should larger than the internal timeout!*/
++ ret = -ETIMEDOUT;
++ dev_err(tsk->dev, "ERR: [0x%p] no-0x%x, timeout:%dms!\n",
++ tsk, tsk->task_no, tsk->timeout);
++ } else {
++ if (STATE_OK != tsk->state) {
++ dev_err(tsk->dev, "ERR: [0x%p] no-0x%x,state %d: %s\n",
++ tsk, tsk->task_no, tsk->state,
++ state_msg[tsk->state].msg);
++ ret = -ECANCELED;
++ } else
++ ret = 0;
++ }
++
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++ if (tsk->task_in_list) {
++ list_del(&tsk->node);
++ tsk->task_in_list = 0;
++ dev_dbg(tsk->dev, "[0x%p] no:0x%x list_del\n",
++ tsk, tsk->task_no);
++ }
++ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
++
++#ifdef DBG_IPU_PERF
++ CHECK_PERF(&tsk->ts_rel);
++ PRINT_TASK_STATISTICS;
++ if (ts_frame_avg == 0)
++ ts_frame_avg = ts_frame.tv_nsec / NSEC_PER_USEC +
++ ts_frame.tv_sec * USEC_PER_SEC;
++ else
++ ts_frame_avg = (ts_frame_avg + ts_frame.tv_nsec / NSEC_PER_USEC
++ + ts_frame.tv_sec * USEC_PER_SEC)/2;
++ if (timespec_compare(&ts_frame, &ts_frame_max) > 0)
++ ts_frame_max = ts_frame;
++
++ atomic_inc(&frame_cnt);
++
++ if ((atomic_read(&frame_cnt) % 1000) == 0)
++ pr_debug("ipu_dev: max frame time:%ldus, avg frame time:%dus,"
++ "frame_cnt:%d\n", ts_frame_max.tv_nsec / NSEC_PER_USEC
++ + ts_frame_max.tv_sec * USEC_PER_SEC,
++ ts_frame_avg, atomic_read(&frame_cnt));
++#endif
++done:
++ if (ret < 0)
++ dev_err(tsk->dev, "ERR: no-0x%x,ipu_queue_task err:%d\n",
++ tsk->task_no, ret);
++
++ kref_put(&tsk->refcount, task_mem_free);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_queue_task);
++
++static int mxc_ipu_open(struct inode *inode, struct file *file)
++{
++ file->private_data = (void *)atomic_inc_return(&file_index);
++ return 0;
++}
++
++static long mxc_ipu_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ int __user *argp = (void __user *)arg;
++ int ret = 0;
++
++ switch (cmd) {
++ case IPU_CHECK_TASK:
++ {
++ struct ipu_task task;
++
++ if (copy_from_user
++ (&task, (struct ipu_task *) arg,
++ sizeof(struct ipu_task)))
++ return -EFAULT;
++ ret = ipu_check_task(&task);
++ if (copy_to_user((struct ipu_task *) arg,
++ &task, sizeof(struct ipu_task)))
++ return -EFAULT;
++ break;
++ }
++ case IPU_QUEUE_TASK:
++ {
++ struct ipu_task task;
++
++ if (copy_from_user
++ (&task, (struct ipu_task *) arg,
++ sizeof(struct ipu_task)))
++ return -EFAULT;
++ ret = ipu_queue_task(&task);
++ break;
++ }
++ case IPU_ALLOC:
++ {
++ int size;
++ struct ipu_alloc_list *mem;
++
++ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
++ if (mem == NULL)
++ return -ENOMEM;
++
++ if (get_user(size, argp))
++ return -EFAULT;
++
++ mem->size = PAGE_ALIGN(size);
++
++ mem->cpu_addr = dma_alloc_coherent(ipu_dev, size,
++ &mem->phy_addr,
++ GFP_DMA | GFP_KERNEL);
++ if (mem->cpu_addr == NULL) {
++ kfree(mem);
++ return -ENOMEM;
++ }
++ mem->file_index = file->private_data;
++ mutex_lock(&ipu_alloc_lock);
++ list_add(&mem->list, &ipu_alloc_list);
++ mutex_unlock(&ipu_alloc_lock);
++
++ dev_dbg(ipu_dev, "allocated %d bytes @ 0x%08X\n",
++ mem->size, mem->phy_addr);
++
++ if (put_user(mem->phy_addr, argp))
++ return -EFAULT;
++
++ break;
++ }
++ case IPU_FREE:
++ {
++ unsigned long offset;
++ struct ipu_alloc_list *mem;
++
++ if (get_user(offset, argp))
++ return -EFAULT;
++
++ ret = -EINVAL;
++ mutex_lock(&ipu_alloc_lock);
++ list_for_each_entry(mem, &ipu_alloc_list, list) {
++ if (mem->phy_addr == offset) {
++ list_del(&mem->list);
++ dma_free_coherent(ipu_dev,
++ mem->size,
++ mem->cpu_addr,
++ mem->phy_addr);
++ kfree(mem);
++ ret = 0;
++ break;
++ }
++ }
++ mutex_unlock(&ipu_alloc_lock);
++ if (0 == ret)
++ dev_dbg(ipu_dev, "free %d bytes @ 0x%08X\n",
++ mem->size, mem->phy_addr);
++
++ break;
++ }
++ default:
++ break;
++ }
++ return ret;
++}
++
++static int mxc_ipu_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ bool found = false;
++ u32 len;
++ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++ struct ipu_alloc_list *mem;
++
++ mutex_lock(&ipu_alloc_lock);
++ list_for_each_entry(mem, &ipu_alloc_list, list) {
++ if (offset == mem->phy_addr) {
++ found = true;
++ len = mem->size;
++ break;
++ }
++ }
++ mutex_unlock(&ipu_alloc_lock);
++ if (!found)
++ return -EINVAL;
++
++ if (vma->vm_end - vma->vm_start > len)
++ return -EINVAL;
++
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot)) {
++ printk(KERN_ERR
++ "mmap failed!\n");
++ return -ENOBUFS;
++ }
++ return 0;
++}
++
++static int mxc_ipu_release(struct inode *inode, struct file *file)
++{
++ struct ipu_alloc_list *mem;
++ struct ipu_alloc_list *n;
++
++ mutex_lock(&ipu_alloc_lock);
++ list_for_each_entry_safe(mem, n, &ipu_alloc_list, list) {
++ if ((mem->cpu_addr != 0) &&
++ (file->private_data == mem->file_index)) {
++ list_del(&mem->list);
++ dma_free_coherent(ipu_dev,
++ mem->size,
++ mem->cpu_addr,
++ mem->phy_addr);
++ dev_dbg(ipu_dev, "rel-free %d bytes @ 0x%08X\n",
++ mem->size, mem->phy_addr);
++ kfree(mem);
++ }
++ }
++ mutex_unlock(&ipu_alloc_lock);
++ atomic_dec(&file_index);
++
++ return 0;
++}
++
++static struct file_operations mxc_ipu_fops = {
++ .owner = THIS_MODULE,
++ .open = mxc_ipu_open,
++ .mmap = mxc_ipu_mmap,
++ .release = mxc_ipu_release,
++ .unlocked_ioctl = mxc_ipu_ioctl,
++};
++
++int register_ipu_device(struct ipu_soc *ipu, int id)
++{
++ int ret = 0;
++ static int idx;
++ static struct ipu_thread_data thread_data[5];
++
++ if (!major) {
++ major = register_chrdev(0, "mxc_ipu", &mxc_ipu_fops);
++ if (major < 0) {
++ printk(KERN_ERR "Unable to register mxc_ipu as a char device\n");
++ ret = major;
++ goto register_cdev_fail;
++ }
++
++ ipu_class = class_create(THIS_MODULE, "mxc_ipu");
++ if (IS_ERR(ipu_class)) {
++ ret = PTR_ERR(ipu_class);
++ goto ipu_class_fail;
++ }
++
++ ipu_dev = device_create(ipu_class, NULL, MKDEV(major, 0),
++ NULL, "mxc_ipu");
++ if (IS_ERR(ipu_dev)) {
++ ret = PTR_ERR(ipu_dev);
++ goto dev_create_fail;
++ }
++ ipu_dev->dma_mask = kmalloc(sizeof(*ipu_dev->dma_mask), GFP_KERNEL);
++ *ipu_dev->dma_mask = DMA_BIT_MASK(32);
++ ipu_dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++ mutex_init(&ipu_ch_tbl.lock);
++ }
++ max_ipu_no = ++id;
++ ipu->rot_dma[0].size = 0;
++ ipu->rot_dma[1].size = 0;
++
++ thread_data[idx].ipu = ipu;
++ thread_data[idx].id = 0;
++ thread_data[idx].is_vdoa = 0;
++ ipu->thread[0] = kthread_run(ipu_task_thread, &thread_data[idx++],
++ "ipu%d_task", id);
++ if (IS_ERR(ipu->thread[0])) {
++ ret = PTR_ERR(ipu->thread[0]);
++ goto kthread0_fail;
++ }
++
++ thread_data[idx].ipu = ipu;
++ thread_data[idx].id = 1;
++ thread_data[idx].is_vdoa = 0;
++ ipu->thread[1] = kthread_run(ipu_task_thread, &thread_data[idx++],
++ "ipu%d_task", id);
++ if (IS_ERR(ipu->thread[1])) {
++ ret = PTR_ERR(ipu->thread[1]);
++ goto kthread1_fail;
++ }
++
++
++ return ret;
++
++kthread1_fail:
++ kthread_stop(ipu->thread[0]);
++kthread0_fail:
++ if (id == 0)
++ device_destroy(ipu_class, MKDEV(major, 0));
++dev_create_fail:
++ if (id == 0) {
++ class_destroy(ipu_class);
++ }
++ipu_class_fail:
++ if (id == 0)
++ unregister_chrdev(major, "mxc_ipu");
++register_cdev_fail:
++ return ret;
++}
++
++void unregister_ipu_device(struct ipu_soc *ipu, int id)
++{
++ int i;
++
++ kthread_stop(ipu->thread[0]);
++ kthread_stop(ipu->thread[1]);
++ for (i = 0; i < 2; i++) {
++ if (ipu->rot_dma[i].vaddr)
++ dma_free_coherent(ipu_dev,
++ ipu->rot_dma[i].size,
++ ipu->rot_dma[i].vaddr,
++ ipu->rot_dma[i].paddr);
++ }
++
++ if (major) {
++ device_destroy(ipu_class, MKDEV(major, 0));
++ class_destroy(ipu_class);
++ unregister_chrdev(major, "mxc_ipu");
++ major = 0;
++ }
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/ipu_disp.c linux-3.14.40/drivers/mxc/ipu3/ipu_disp.c
+--- linux-3.14.40.orig/drivers/mxc/ipu3/ipu_disp.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/ipu_disp.c 2015-05-01 14:57:59.611427001 -0500
+@@ -0,0 +1,1962 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_disp.c
++ *
++ * @brief IPU display submodule API functions
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/errno.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++
++#include <asm/atomic.h>
++
++#include "ipu_param_mem.h"
++#include "ipu_regs.h"
++
++struct dp_csc_param_t {
++ int mode;
++ void *coeff;
++};
++
++#define SYNC_WAVE 0
++#define NULL_WAVE (-1)
++#define ASYNC_SER_WAVE 6
++
++/* DC display ID assignments */
++#define DC_DISP_ID_SYNC(di) (di)
++#define DC_DISP_ID_SERIAL 2
++#define DC_DISP_ID_ASYNC 3
++
++int dmfc_type_setup;
++
++void _ipu_dmfc_init(struct ipu_soc *ipu, int dmfc_type, int first)
++{
++ u32 dmfc_wr_chan, dmfc_dp_chan;
++
++ if (first) {
++ if (dmfc_type_setup > dmfc_type)
++ dmfc_type = dmfc_type_setup;
++ else
++ dmfc_type_setup = dmfc_type;
++
++ /* disable DMFC-IC channel*/
++ ipu_dmfc_write(ipu, 0x2, DMFC_IC_CTRL);
++ } else if (dmfc_type_setup >= DMFC_HIGH_RESOLUTION_DC) {
++ dev_dbg(ipu->dev, "DMFC high resolution has set, will not change\n");
++ return;
++ } else
++ dmfc_type_setup = dmfc_type;
++
++ if (dmfc_type == DMFC_HIGH_RESOLUTION_DC) {
++ /* 1 - segment 0~3;
++ * 5B - segement 4, 5;
++ * 5F - segement 6, 7;
++ * 1C, 2C and 6B, 6F unused;
++ */
++ dev_info(ipu->dev, "IPU DMFC DC HIGH RESOLUTION: 1(0~3), 5B(4,5), 5F(6,7)\n");
++ dmfc_wr_chan = 0x00000088;
++ dmfc_dp_chan = 0x00009694;
++ ipu->dmfc_size_28 = 256*4;
++ ipu->dmfc_size_29 = 0;
++ ipu->dmfc_size_24 = 0;
++ ipu->dmfc_size_27 = 128*4;
++ ipu->dmfc_size_23 = 128*4;
++ } else if (dmfc_type == DMFC_HIGH_RESOLUTION_DP) {
++ /* 1 - segment 0, 1;
++ * 5B - segement 2~5;
++ * 5F - segement 6,7;
++ * 1C, 2C and 6B, 6F unused;
++ */
++ dev_info(ipu->dev, "IPU DMFC DP HIGH RESOLUTION: 1(0,1), 5B(2~5), 5F(6,7)\n");
++ dmfc_wr_chan = 0x00000090;
++ dmfc_dp_chan = 0x0000968a;
++ ipu->dmfc_size_28 = 128*4;
++ ipu->dmfc_size_29 = 0;
++ ipu->dmfc_size_24 = 0;
++ ipu->dmfc_size_27 = 128*4;
++ ipu->dmfc_size_23 = 256*4;
++ } else if (dmfc_type == DMFC_HIGH_RESOLUTION_ONLY_DP) {
++ /* 5B - segement 0~3;
++ * 5F - segement 4~7;
++ * 1, 1C, 2C and 6B, 6F unused;
++ */
++ dev_info(ipu->dev, "IPU DMFC ONLY-DP HIGH RESOLUTION: 5B(0~3), 5F(4~7)\n");
++ dmfc_wr_chan = 0x00000000;
++ dmfc_dp_chan = 0x00008c88;
++ ipu->dmfc_size_28 = 0;
++ ipu->dmfc_size_29 = 0;
++ ipu->dmfc_size_24 = 0;
++ ipu->dmfc_size_27 = 256*4;
++ ipu->dmfc_size_23 = 256*4;
++ } else {
++ /* 1 - segment 0, 1;
++ * 5B - segement 4, 5;
++ * 5F - segement 6, 7;
++ * 1C, 2C and 6B, 6F unused;
++ */
++ dev_info(ipu->dev, "IPU DMFC NORMAL mode: 1(0~1), 5B(4,5), 5F(6,7)\n");
++ dmfc_wr_chan = 0x00000090;
++ dmfc_dp_chan = 0x00009694;
++ ipu->dmfc_size_28 = 128*4;
++ ipu->dmfc_size_29 = 0;
++ ipu->dmfc_size_24 = 0;
++ ipu->dmfc_size_27 = 128*4;
++ ipu->dmfc_size_23 = 128*4;
++ }
++ ipu_dmfc_write(ipu, dmfc_wr_chan, DMFC_WR_CHAN);
++ ipu_dmfc_write(ipu, 0x202020F6, DMFC_WR_CHAN_DEF);
++ ipu_dmfc_write(ipu, dmfc_dp_chan, DMFC_DP_CHAN);
++ /* Enable chan 5 watermark set at 5 bursts and clear at 7 bursts */
++ ipu_dmfc_write(ipu, 0x2020F6F6, DMFC_DP_CHAN_DEF);
++}
++
++static int __init dmfc_setup(char *options)
++{
++ get_option(&options, &dmfc_type_setup);
++ if (dmfc_type_setup > DMFC_HIGH_RESOLUTION_ONLY_DP)
++ dmfc_type_setup = DMFC_HIGH_RESOLUTION_ONLY_DP;
++ return 1;
++}
++__setup("dmfc=", dmfc_setup);
++
++void _ipu_dmfc_set_wait4eot(struct ipu_soc *ipu, int dma_chan, int width)
++{
++ u32 dmfc_gen1 = ipu_dmfc_read(ipu, DMFC_GENERAL1);
++
++ if (width >= HIGH_RESOLUTION_WIDTH) {
++ if (dma_chan == 23)
++ _ipu_dmfc_init(ipu, DMFC_HIGH_RESOLUTION_DP, 0);
++ else if (dma_chan == 28)
++ _ipu_dmfc_init(ipu, DMFC_HIGH_RESOLUTION_DC, 0);
++ }
++
++ if (dma_chan == 23) { /*5B*/
++ if (ipu->dmfc_size_23/width > 3)
++ dmfc_gen1 |= 1UL << 20;
++ else
++ dmfc_gen1 &= ~(1UL << 20);
++ } else if (dma_chan == 24) { /*6B*/
++ if (ipu->dmfc_size_24/width > 1)
++ dmfc_gen1 |= 1UL << 22;
++ else
++ dmfc_gen1 &= ~(1UL << 22);
++ } else if (dma_chan == 27) { /*5F*/
++ if (ipu->dmfc_size_27/width > 2)
++ dmfc_gen1 |= 1UL << 21;
++ else
++ dmfc_gen1 &= ~(1UL << 21);
++ } else if (dma_chan == 28) { /*1*/
++ if (ipu->dmfc_size_28/width > 2)
++ dmfc_gen1 |= 1UL << 16;
++ else
++ dmfc_gen1 &= ~(1UL << 16);
++ } else if (dma_chan == 29) { /*6F*/
++ if (ipu->dmfc_size_29/width > 1)
++ dmfc_gen1 |= 1UL << 23;
++ else
++ dmfc_gen1 &= ~(1UL << 23);
++ }
++
++ ipu_dmfc_write(ipu, dmfc_gen1, DMFC_GENERAL1);
++}
++
++void _ipu_dmfc_set_burst_size(struct ipu_soc *ipu, int dma_chan, int burst_size)
++{
++ u32 dmfc_wr_chan = ipu_dmfc_read(ipu, DMFC_WR_CHAN);
++ u32 dmfc_dp_chan = ipu_dmfc_read(ipu, DMFC_DP_CHAN);
++ int dmfc_bs = 0;
++
++ switch (burst_size) {
++ case 64:
++ dmfc_bs = 0x40;
++ break;
++ case 32:
++ case 20:
++ dmfc_bs = 0x80;
++ break;
++ case 16:
++ dmfc_bs = 0xc0;
++ break;
++ default:
++ dev_err(ipu->dev, "Unsupported burst size %d\n",
++ burst_size);
++ return;
++ }
++
++ if (dma_chan == 23) { /*5B*/
++ dmfc_dp_chan &= ~(0xc0);
++ dmfc_dp_chan |= dmfc_bs;
++ } else if (dma_chan == 27) { /*5F*/
++ dmfc_dp_chan &= ~(0xc000);
++ dmfc_dp_chan |= (dmfc_bs << 8);
++ } else if (dma_chan == 28) { /*1*/
++ dmfc_wr_chan &= ~(0xc0);
++ dmfc_wr_chan |= dmfc_bs;
++ }
++
++ ipu_dmfc_write(ipu, dmfc_wr_chan, DMFC_WR_CHAN);
++ ipu_dmfc_write(ipu, dmfc_dp_chan, DMFC_DP_CHAN);
++}
++
++static void _ipu_di_data_wave_config(struct ipu_soc *ipu,
++ int di, int wave_gen,
++ int access_size, int component_size)
++{
++ u32 reg;
++ reg = (access_size << DI_DW_GEN_ACCESS_SIZE_OFFSET) |
++ (component_size << DI_DW_GEN_COMPONENT_SIZE_OFFSET);
++ ipu_di_write(ipu, di, reg, DI_DW_GEN(wave_gen));
++}
++
++static void _ipu_di_data_pin_config(struct ipu_soc *ipu,
++ int di, int wave_gen, int di_pin, int set,
++ int up, int down)
++{
++ u32 reg;
++
++ reg = ipu_di_read(ipu, di, DI_DW_GEN(wave_gen));
++ reg &= ~(0x3 << (di_pin * 2));
++ reg |= set << (di_pin * 2);
++ ipu_di_write(ipu, di, reg, DI_DW_GEN(wave_gen));
++
++ ipu_di_write(ipu, di, (down << 16) | up, DI_DW_SET(wave_gen, set));
++}
++
++static void _ipu_di_sync_config(struct ipu_soc *ipu,
++ int di, int wave_gen,
++ int run_count, int run_src,
++ int offset_count, int offset_src,
++ int repeat_count, int cnt_clr_src,
++ int cnt_polarity_gen_en,
++ int cnt_polarity_clr_src,
++ int cnt_polarity_trigger_src,
++ int cnt_up, int cnt_down)
++{
++ u32 reg;
++
++ if ((run_count >= 0x1000) || (offset_count >= 0x1000) || (repeat_count >= 0x1000) ||
++ (cnt_up >= 0x400) || (cnt_down >= 0x400)) {
++ dev_err(ipu->dev, "DI%d counters out of range.\n", di);
++ return;
++ }
++
++ reg = (run_count << 19) | (++run_src << 16) |
++ (offset_count << 3) | ++offset_src;
++ ipu_di_write(ipu, di, reg, DI_SW_GEN0(wave_gen));
++ reg = (cnt_polarity_gen_en << 29) | (++cnt_clr_src << 25) |
++ (++cnt_polarity_trigger_src << 12) | (++cnt_polarity_clr_src << 9);
++ reg |= (cnt_down << 16) | cnt_up;
++ if (repeat_count == 0) {
++ /* Enable auto reload */
++ reg |= 0x10000000;
++ }
++ ipu_di_write(ipu, di, reg, DI_SW_GEN1(wave_gen));
++ reg = ipu_di_read(ipu, di, DI_STP_REP(wave_gen));
++ reg &= ~(0xFFFF << (16 * ((wave_gen - 1) & 0x1)));
++ reg |= repeat_count << (16 * ((wave_gen - 1) & 0x1));
++ ipu_di_write(ipu, di, reg, DI_STP_REP(wave_gen));
++}
++
++static void _ipu_dc_map_link(struct ipu_soc *ipu,
++ int current_map,
++ int base_map_0, int buf_num_0,
++ int base_map_1, int buf_num_1,
++ int base_map_2, int buf_num_2)
++{
++ int ptr_0 = base_map_0 * 3 + buf_num_0;
++ int ptr_1 = base_map_1 * 3 + buf_num_1;
++ int ptr_2 = base_map_2 * 3 + buf_num_2;
++ int ptr;
++ u32 reg;
++ ptr = (ptr_2 << 10) + (ptr_1 << 5) + ptr_0;
++
++ reg = ipu_dc_read(ipu, DC_MAP_CONF_PTR(current_map));
++ reg &= ~(0x1F << ((16 * (current_map & 0x1))));
++ reg |= ptr << ((16 * (current_map & 0x1)));
++ ipu_dc_write(ipu, reg, DC_MAP_CONF_PTR(current_map));
++}
++
++static void _ipu_dc_map_config(struct ipu_soc *ipu,
++ int map, int byte_num, int offset, int mask)
++{
++ int ptr = map * 3 + byte_num;
++ u32 reg;
++
++ reg = ipu_dc_read(ipu, DC_MAP_CONF_VAL(ptr));
++ reg &= ~(0xFFFF << (16 * (ptr & 0x1)));
++ reg |= ((offset << 8) | mask) << (16 * (ptr & 0x1));
++ ipu_dc_write(ipu, reg, DC_MAP_CONF_VAL(ptr));
++
++ reg = ipu_dc_read(ipu, DC_MAP_CONF_PTR(map));
++ reg &= ~(0x1F << ((16 * (map & 0x1)) + (5 * byte_num)));
++ reg |= ptr << ((16 * (map & 0x1)) + (5 * byte_num));
++ ipu_dc_write(ipu, reg, DC_MAP_CONF_PTR(map));
++}
++
++static void _ipu_dc_map_clear(struct ipu_soc *ipu, int map)
++{
++ u32 reg = ipu_dc_read(ipu, DC_MAP_CONF_PTR(map));
++ ipu_dc_write(ipu, reg & ~(0xFFFF << (16 * (map & 0x1))),
++ DC_MAP_CONF_PTR(map));
++}
++
++static void _ipu_dc_write_tmpl(struct ipu_soc *ipu,
++ int word, u32 opcode, u32 operand, int map,
++ int wave, int glue, int sync, int stop)
++{
++ u32 reg;
++
++ if (opcode == WRG) {
++ reg = sync;
++ reg |= (glue << 4);
++ reg |= (++wave << 11);
++ reg |= ((operand & 0x1FFFF) << 15);
++ ipu_dc_tmpl_write(ipu, reg, word * 8);
++
++ reg = (operand >> 17);
++ reg |= opcode << 7;
++ reg |= (stop << 9);
++ ipu_dc_tmpl_write(ipu, reg, word * 8 + 4);
++ } else {
++ reg = sync;
++ reg |= (glue << 4);
++ reg |= (++wave << 11);
++ reg |= (++map << 15);
++ reg |= (operand << 20) & 0xFFF00000;
++ ipu_dc_tmpl_write(ipu, reg, word * 8);
++
++ reg = (operand >> 12);
++ reg |= opcode << 4;
++ reg |= (stop << 9);
++ ipu_dc_tmpl_write(ipu, reg, word * 8 + 4);
++ }
++}
++
++static void _ipu_dc_link_event(struct ipu_soc *ipu,
++ int chan, int event, int addr, int priority)
++{
++ u32 reg;
++ u32 address_shift;
++ if (event < DC_EVEN_UGDE0) {
++ reg = ipu_dc_read(ipu, DC_RL_CH(chan, event));
++ reg &= ~(0xFFFF << (16 * (event & 0x1)));
++ reg |= ((addr << 8) | priority) << (16 * (event & 0x1));
++ ipu_dc_write(ipu, reg, DC_RL_CH(chan, event));
++ } else {
++ reg = ipu_dc_read(ipu, DC_UGDE_0((event - DC_EVEN_UGDE0) / 2));
++ if ((event - DC_EVEN_UGDE0) & 0x1) {
++ reg &= ~(0x2FF << 16);
++ reg |= (addr << 16);
++ reg |= priority ? (2 << 24) : 0x0;
++ } else {
++ reg &= ~0xFC00FFFF;
++ if (priority)
++ chan = (chan >> 1) +
++ ((((chan & 0x1) + ((chan & 0x2) >> 1))) | (chan >> 3));
++ else
++ chan = 0x7;
++ address_shift = ((event - DC_EVEN_UGDE0) >> 1) ? 7 : 8;
++ reg |= (addr << address_shift) | (priority << 3) | chan;
++ }
++ ipu_dc_write(ipu, reg, DC_UGDE_0((event - DC_EVEN_UGDE0) / 2));
++ }
++}
++
++/* Y = R * 1.200 + G * 2.343 + B * .453 + 0.250;
++ U = R * -.672 + G * -1.328 + B * 2.000 + 512.250.;
++ V = R * 2.000 + G * -1.672 + B * -.328 + 512.250.;*/
++static const int rgb2ycbcr_coeff[5][3] = {
++ {0x4D, 0x96, 0x1D},
++ {-0x2B, -0x55, 0x80},
++ {0x80, -0x6B, -0x15},
++ {0x0000, 0x0200, 0x0200}, /* B0, B1, B2 */
++ {0x2, 0x2, 0x2}, /* S0, S1, S2 */
++};
++
++/* R = (1.164 * (Y - 16)) + (1.596 * (Cr - 128));
++ G = (1.164 * (Y - 16)) - (0.392 * (Cb - 128)) - (0.813 * (Cr - 128));
++ B = (1.164 * (Y - 16)) + (2.017 * (Cb - 128); */
++static const int ycbcr2rgb_coeff[5][3] = {
++ {0x095, 0x000, 0x0CC},
++ {0x095, 0x3CE, 0x398},
++ {0x095, 0x0FF, 0x000},
++ {0x3E42, 0x010A, 0x3DD6}, /*B0,B1,B2 */
++ {0x1, 0x1, 0x1}, /*S0,S1,S2 */
++};
++
++#define mask_a(a) ((u32)(a) & 0x3FF)
++#define mask_b(b) ((u32)(b) & 0x3FFF)
++
++/* Pls keep S0, S1 and S2 as 0x2 by using this convertion */
++static int _rgb_to_yuv(int n, int red, int green, int blue)
++{
++ int c;
++ c = red * rgb2ycbcr_coeff[n][0];
++ c += green * rgb2ycbcr_coeff[n][1];
++ c += blue * rgb2ycbcr_coeff[n][2];
++ c /= 16;
++ c += rgb2ycbcr_coeff[3][n] * 4;
++ c += 8;
++ c /= 16;
++ if (c < 0)
++ c = 0;
++ if (c > 255)
++ c = 255;
++ return c;
++}
++
++/*
++ * Row is for BG: RGB2YUV YUV2RGB RGB2RGB YUV2YUV CSC_NONE
++ * Column is for FG: RGB2YUV YUV2RGB RGB2RGB YUV2YUV CSC_NONE
++ */
++static struct dp_csc_param_t dp_csc_array[CSC_NUM][CSC_NUM] = {
++{{DP_COM_CONF_CSC_DEF_BOTH, &rgb2ycbcr_coeff}, {0, 0}, {0, 0}, {DP_COM_CONF_CSC_DEF_BG, &rgb2ycbcr_coeff}, {DP_COM_CONF_CSC_DEF_BG, &rgb2ycbcr_coeff} },
++{{0, 0}, {DP_COM_CONF_CSC_DEF_BOTH, &ycbcr2rgb_coeff}, {DP_COM_CONF_CSC_DEF_BG, &ycbcr2rgb_coeff}, {0, 0}, {DP_COM_CONF_CSC_DEF_BG, &ycbcr2rgb_coeff} },
++{{0, 0}, {DP_COM_CONF_CSC_DEF_FG, &ycbcr2rgb_coeff}, {0, 0}, {0, 0}, {0, 0} },
++{{DP_COM_CONF_CSC_DEF_FG, &rgb2ycbcr_coeff}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
++{{DP_COM_CONF_CSC_DEF_FG, &rgb2ycbcr_coeff}, {DP_COM_CONF_CSC_DEF_FG, &ycbcr2rgb_coeff}, {0, 0}, {0, 0}, {0, 0} }
++};
++
++void __ipu_dp_csc_setup(struct ipu_soc *ipu,
++ int dp, struct dp_csc_param_t dp_csc_param,
++ bool srm_mode_update)
++{
++ u32 reg;
++ const int (*coeff)[5][3];
++
++ if (dp_csc_param.mode >= 0) {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(dp));
++ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
++ reg |= dp_csc_param.mode;
++ ipu_dp_write(ipu, reg, DP_COM_CONF(dp));
++ }
++
++ coeff = dp_csc_param.coeff;
++
++ if (coeff) {
++ ipu_dp_write(ipu, mask_a((*coeff)[0][0]) |
++ (mask_a((*coeff)[0][1]) << 16), DP_CSC_A_0(dp));
++ ipu_dp_write(ipu, mask_a((*coeff)[0][2]) |
++ (mask_a((*coeff)[1][0]) << 16), DP_CSC_A_1(dp));
++ ipu_dp_write(ipu, mask_a((*coeff)[1][1]) |
++ (mask_a((*coeff)[1][2]) << 16), DP_CSC_A_2(dp));
++ ipu_dp_write(ipu, mask_a((*coeff)[2][0]) |
++ (mask_a((*coeff)[2][1]) << 16), DP_CSC_A_3(dp));
++ ipu_dp_write(ipu, mask_a((*coeff)[2][2]) |
++ (mask_b((*coeff)[3][0]) << 16) |
++ ((*coeff)[4][0] << 30), DP_CSC_0(dp));
++ ipu_dp_write(ipu, mask_b((*coeff)[3][1]) | ((*coeff)[4][1] << 14) |
++ (mask_b((*coeff)[3][2]) << 16) |
++ ((*coeff)[4][2] << 30), DP_CSC_1(dp));
++ }
++
++ if (srm_mode_update) {
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ }
++}
++
++int _ipu_dp_init(struct ipu_soc *ipu,
++ ipu_channel_t channel, uint32_t in_pixel_fmt,
++ uint32_t out_pixel_fmt)
++{
++ int in_fmt, out_fmt;
++ int dp;
++ int partial = false;
++ uint32_t reg;
++
++ if (channel == MEM_FG_SYNC) {
++ dp = DP_SYNC;
++ partial = true;
++ } else if (channel == MEM_BG_SYNC) {
++ dp = DP_SYNC;
++ partial = false;
++ } else if (channel == MEM_BG_ASYNC0) {
++ dp = DP_ASYNC0;
++ partial = false;
++ } else {
++ return -EINVAL;
++ }
++
++ in_fmt = format_to_colorspace(in_pixel_fmt);
++ out_fmt = format_to_colorspace(out_pixel_fmt);
++
++ if (partial) {
++ if (in_fmt == RGB) {
++ if (out_fmt == RGB)
++ ipu->fg_csc_type = RGB2RGB;
++ else
++ ipu->fg_csc_type = RGB2YUV;
++ } else {
++ if (out_fmt == RGB)
++ ipu->fg_csc_type = YUV2RGB;
++ else
++ ipu->fg_csc_type = YUV2YUV;
++ }
++ } else {
++ if (in_fmt == RGB) {
++ if (out_fmt == RGB)
++ ipu->bg_csc_type = RGB2RGB;
++ else
++ ipu->bg_csc_type = RGB2YUV;
++ } else {
++ if (out_fmt == RGB)
++ ipu->bg_csc_type = YUV2RGB;
++ else
++ ipu->bg_csc_type = YUV2YUV;
++ }
++ }
++
++ /* Transform color key from rgb to yuv if CSC is enabled */
++ reg = ipu_dp_read(ipu, DP_COM_CONF(dp));
++ if (ipu->color_key_4rgb && (reg & DP_COM_CONF_GWCKE) &&
++ (((ipu->fg_csc_type == RGB2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
++ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == RGB2YUV)) ||
++ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
++ ((ipu->fg_csc_type == YUV2RGB) && (ipu->bg_csc_type == YUV2RGB)))) {
++ int red, green, blue;
++ int y, u, v;
++ uint32_t color_key = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(dp)) & 0xFFFFFFL;
++
++ dev_dbg(ipu->dev, "_ipu_dp_init color key 0x%x need change to yuv fmt!\n", color_key);
++
++ red = (color_key >> 16) & 0xFF;
++ green = (color_key >> 8) & 0xFF;
++ blue = color_key & 0xFF;
++
++ y = _rgb_to_yuv(0, red, green, blue);
++ u = _rgb_to_yuv(1, red, green, blue);
++ v = _rgb_to_yuv(2, red, green, blue);
++ color_key = (y << 16) | (u << 8) | v;
++
++ reg = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(dp)) & 0xFF000000L;
++ ipu_dp_write(ipu, reg | color_key, DP_GRAPH_WIND_CTRL(dp));
++ ipu->color_key_4rgb = false;
++
++ dev_dbg(ipu->dev, "_ipu_dp_init color key change to yuv fmt 0x%x!\n", color_key);
++ }
++
++ __ipu_dp_csc_setup(ipu, dp, dp_csc_array[ipu->bg_csc_type][ipu->fg_csc_type], true);
++
++ return 0;
++}
++
++void _ipu_dp_uninit(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ int dp;
++ int partial = false;
++
++ if (channel == MEM_FG_SYNC) {
++ dp = DP_SYNC;
++ partial = true;
++ } else if (channel == MEM_BG_SYNC) {
++ dp = DP_SYNC;
++ partial = false;
++ } else if (channel == MEM_BG_ASYNC0) {
++ dp = DP_ASYNC0;
++ partial = false;
++ } else {
++ return;
++ }
++
++ if (partial)
++ ipu->fg_csc_type = CSC_NONE;
++ else
++ ipu->bg_csc_type = CSC_NONE;
++
++ __ipu_dp_csc_setup(ipu, dp, dp_csc_array[ipu->bg_csc_type][ipu->fg_csc_type], false);
++}
++
++void _ipu_dc_init(struct ipu_soc *ipu, int dc_chan, int di, bool interlaced, uint32_t pixel_fmt)
++{
++ u32 reg = 0;
++
++ if ((dc_chan == 1) || (dc_chan == 5)) {
++ if (interlaced) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 0, 3);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 0, 2);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 0, 1);
++ } else {
++ if (di) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 2, 3);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 3, 2);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 1, 1);
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE1, 9, 5);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE1, 8, 5);
++ }
++ } else {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 5, 3);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 6, 2);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 12, 1);
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE0, 10, 5);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE0, 11, 5);
++ }
++ }
++ }
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NF, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NFIELD, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOF, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOFIELD, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR, 0, 0);
++
++ reg = 0x2;
++ reg |= DC_DISP_ID_SYNC(di) << DC_WR_CH_CONF_PROG_DISP_ID_OFFSET;
++ reg |= di << 2;
++ if (interlaced)
++ reg |= DC_WR_CH_CONF_FIELD_MODE;
++ } else if ((dc_chan == 8) || (dc_chan == 9)) {
++ /* async channels */
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_0, 0x64, 1);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_1, 0x64, 1);
++
++ reg = 0x3;
++ reg |= DC_DISP_ID_SERIAL << DC_WR_CH_CONF_PROG_DISP_ID_OFFSET;
++ }
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
++
++ ipu_dc_write(ipu, 0x00000000, DC_WR_CH_ADDR(dc_chan));
++
++ ipu_dc_write(ipu, 0x00000084, DC_GEN);
++}
++
++void _ipu_dc_uninit(struct ipu_soc *ipu, int dc_chan)
++{
++ if ((dc_chan == 1) || (dc_chan == 5)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NF, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NFIELD, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOF, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOFIELD, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE1, 0, 0);
++ } else if ((dc_chan == 8) || (dc_chan == 9)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_W_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_W_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_W_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_W_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_R_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_R_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_R_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_R_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_R_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_R_1, 0, 0);
++ }
++}
++
++int _ipu_disp_chan_is_interlaced(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ if (channel == MEM_DC_SYNC)
++ return !!(ipu_dc_read(ipu, DC_WR_CH_CONF_1) &
++ DC_WR_CH_CONF_FIELD_MODE);
++ else if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC))
++ return !!(ipu_dc_read(ipu, DC_WR_CH_CONF_5) &
++ DC_WR_CH_CONF_FIELD_MODE);
++ return 0;
++}
++
++void _ipu_dp_dc_enable(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ int di;
++ uint32_t reg;
++ uint32_t dc_chan;
++ int irq = 0;
++
++ if (channel == MEM_FG_SYNC)
++ irq = IPU_IRQ_DP_SF_END;
++ else if (channel == MEM_DC_SYNC)
++ dc_chan = 1;
++ else if (channel == MEM_BG_SYNC)
++ dc_chan = 5;
++ else
++ return;
++
++ if (channel == MEM_FG_SYNC) {
++ /* Enable FG channel */
++ reg = ipu_dp_read(ipu, DP_COM_CONF(DP_SYNC));
++ ipu_dp_write(ipu, reg | DP_COM_CONF_FG_EN, DP_COM_CONF(DP_SYNC));
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ return;
++ } else if (channel == MEM_BG_SYNC) {
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ }
++
++ di = ipu->dc_di_assignment[dc_chan];
++
++ /* Make sure other DC sync channel is not assigned same DI */
++ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(6 - dc_chan));
++ if ((di << 2) == (reg & DC_WR_CH_CONF_PROG_DI_ID)) {
++ reg &= ~DC_WR_CH_CONF_PROG_DI_ID;
++ reg |= di ? 0 : DC_WR_CH_CONF_PROG_DI_ID;
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(6 - dc_chan));
++ }
++
++ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(dc_chan));
++ reg |= 4 << DC_WR_CH_CONF_PROG_TYPE_OFFSET;
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
++
++ clk_prepare_enable(ipu->pixel_clk[di]);
++}
++
++static irqreturn_t dc_irq_handler(int irq, void *dev_id)
++{
++ struct ipu_soc *ipu = dev_id;
++ struct completion *comp = &ipu->dc_comp;
++ uint32_t reg;
++ uint32_t dc_chan;
++
++ if (irq == IPU_IRQ_DC_FC_1)
++ dc_chan = 1;
++ else
++ dc_chan = 5;
++
++ if (!ipu->dc_swap) {
++ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(dc_chan));
++ reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
++
++ reg = ipu_cm_read(ipu, IPU_DISP_GEN);
++ if (ipu->dc_di_assignment[dc_chan])
++ reg &= ~DI1_COUNTER_RELEASE;
++ else
++ reg &= ~DI0_COUNTER_RELEASE;
++ ipu_cm_write(ipu, reg, IPU_DISP_GEN);
++ }
++
++ complete(comp);
++ return IRQ_HANDLED;
++}
++
++void _ipu_dp_dc_disable(struct ipu_soc *ipu, ipu_channel_t channel, bool swap)
++{
++ int ret;
++ uint32_t reg;
++ uint32_t csc;
++ uint32_t dc_chan;
++ int irq = 0;
++ int timeout = 50;
++
++ ipu->dc_swap = swap;
++
++ if (channel == MEM_DC_SYNC) {
++ dc_chan = 1;
++ irq = IPU_IRQ_DC_FC_1;
++ } else if (channel == MEM_BG_SYNC) {
++ dc_chan = 5;
++ irq = IPU_IRQ_DP_SF_END;
++ } else if (channel == MEM_FG_SYNC) {
++ /* Disable FG channel */
++ dc_chan = 5;
++
++ reg = ipu_dp_read(ipu, DP_COM_CONF(DP_SYNC));
++ csc = reg & DP_COM_CONF_CSC_DEF_MASK;
++ if (csc == DP_COM_CONF_CSC_DEF_FG)
++ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
++
++ reg &= ~DP_COM_CONF_FG_EN;
++ ipu_dp_write(ipu, reg, DP_COM_CONF(DP_SYNC));
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++
++ if (ipu_is_channel_busy(ipu, MEM_BG_SYNC)) {
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(IPU_IRQ_DP_SF_END),
++ IPUIRQ_2_STATREG(IPU_IRQ_DP_SF_END));
++ while ((ipu_cm_read(ipu, IPUIRQ_2_STATREG(IPU_IRQ_DP_SF_END)) &
++ IPUIRQ_2_MASK(IPU_IRQ_DP_SF_END)) == 0) {
++ msleep(2);
++ timeout -= 2;
++ if (timeout <= 0)
++ break;
++ }
++ }
++ return;
++ } else {
++ return;
++ }
++
++ init_completion(&ipu->dc_comp);
++ ret = ipu_request_irq(ipu, irq, dc_irq_handler, 0, NULL, ipu);
++ if (ret < 0) {
++ dev_err(ipu->dev, "DC irq %d in use\n", irq);
++ return;
++ }
++ ret = wait_for_completion_timeout(&ipu->dc_comp, msecs_to_jiffies(50));
++ ipu_free_irq(ipu, irq, ipu);
++ dev_dbg(ipu->dev, "DC stop timeout - %d * 10ms\n", 5 - ret);
++
++ if (ipu->dc_swap) {
++ /* Swap DC channel 1 and 5 settings, and disable old dc chan */
++ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(dc_chan));
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(6 - dc_chan));
++ reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
++ reg ^= DC_WR_CH_CONF_PROG_DI_ID;
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
++ }
++}
++
++void _ipu_init_dc_mappings(struct ipu_soc *ipu)
++{
++ /* IPU_PIX_FMT_RGB24 */
++ _ipu_dc_map_clear(ipu, 0);
++ _ipu_dc_map_config(ipu, 0, 0, 7, 0xFF);
++ _ipu_dc_map_config(ipu, 0, 1, 15, 0xFF);
++ _ipu_dc_map_config(ipu, 0, 2, 23, 0xFF);
++
++ /* IPU_PIX_FMT_RGB666 */
++ _ipu_dc_map_clear(ipu, 1);
++ _ipu_dc_map_config(ipu, 1, 0, 5, 0xFC);
++ _ipu_dc_map_config(ipu, 1, 1, 11, 0xFC);
++ _ipu_dc_map_config(ipu, 1, 2, 17, 0xFC);
++
++ /* IPU_PIX_FMT_YUV444 */
++ _ipu_dc_map_clear(ipu, 2);
++ _ipu_dc_map_config(ipu, 2, 0, 15, 0xFF);
++ _ipu_dc_map_config(ipu, 2, 1, 23, 0xFF);
++ _ipu_dc_map_config(ipu, 2, 2, 7, 0xFF);
++
++ /* IPU_PIX_FMT_RGB565 */
++ _ipu_dc_map_clear(ipu, 3);
++ _ipu_dc_map_config(ipu, 3, 0, 4, 0xF8);
++ _ipu_dc_map_config(ipu, 3, 1, 10, 0xFC);
++ _ipu_dc_map_config(ipu, 3, 2, 15, 0xF8);
++
++ /* IPU_PIX_FMT_LVDS666 */
++ _ipu_dc_map_clear(ipu, 4);
++ _ipu_dc_map_config(ipu, 4, 0, 5, 0xFC);
++ _ipu_dc_map_config(ipu, 4, 1, 13, 0xFC);
++ _ipu_dc_map_config(ipu, 4, 2, 21, 0xFC);
++
++ /* IPU_PIX_FMT_VYUY 16bit width */
++ _ipu_dc_map_clear(ipu, 5);
++ _ipu_dc_map_config(ipu, 5, 0, 7, 0xFF);
++ _ipu_dc_map_config(ipu, 5, 1, 0, 0x0);
++ _ipu_dc_map_config(ipu, 5, 2, 15, 0xFF);
++ _ipu_dc_map_clear(ipu, 6);
++ _ipu_dc_map_config(ipu, 6, 0, 0, 0x0);
++ _ipu_dc_map_config(ipu, 6, 1, 7, 0xFF);
++ _ipu_dc_map_config(ipu, 6, 2, 15, 0xFF);
++
++ /* IPU_PIX_FMT_UYUV 16bit width */
++ _ipu_dc_map_clear(ipu, 7);
++ _ipu_dc_map_link(ipu, 7, 6, 0, 6, 1, 6, 2);
++ _ipu_dc_map_clear(ipu, 8);
++ _ipu_dc_map_link(ipu, 8, 5, 0, 5, 1, 5, 2);
++
++ /* IPU_PIX_FMT_YUYV 16bit width */
++ _ipu_dc_map_clear(ipu, 9);
++ _ipu_dc_map_link(ipu, 9, 5, 2, 5, 1, 5, 0);
++ _ipu_dc_map_clear(ipu, 10);
++ _ipu_dc_map_link(ipu, 10, 5, 1, 5, 2, 5, 0);
++
++ /* IPU_PIX_FMT_YVYU 16bit width */
++ _ipu_dc_map_clear(ipu, 11);
++ _ipu_dc_map_link(ipu, 11, 5, 1, 5, 2, 5, 0);
++ _ipu_dc_map_clear(ipu, 12);
++ _ipu_dc_map_link(ipu, 12, 5, 2, 5, 1, 5, 0);
++
++ /* IPU_PIX_FMT_GBR24 */
++ /* IPU_PIX_FMT_VYU444 */
++ _ipu_dc_map_clear(ipu, 13);
++ _ipu_dc_map_link(ipu, 13, 0, 2, 0, 0, 0, 1);
++
++ /* IPU_PIX_FMT_BGR24 */
++ _ipu_dc_map_clear(ipu, 14);
++ _ipu_dc_map_link(ipu, 14, 0, 2, 0, 1, 0, 0);
++}
++
++int _ipu_pixfmt_to_map(uint32_t fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_GENERIC:
++ case IPU_PIX_FMT_RGB24:
++ return 0;
++ case IPU_PIX_FMT_RGB666:
++ return 1;
++ case IPU_PIX_FMT_YUV444:
++ return 2;
++ case IPU_PIX_FMT_RGB565:
++ return 3;
++ case IPU_PIX_FMT_LVDS666:
++ return 4;
++ case IPU_PIX_FMT_VYUY:
++ return 6;
++ case IPU_PIX_FMT_UYVY:
++ return 8;
++ case IPU_PIX_FMT_YUYV:
++ return 10;
++ case IPU_PIX_FMT_YVYU:
++ return 12;
++ case IPU_PIX_FMT_GBR24:
++ case IPU_PIX_FMT_VYU444:
++ return 13;
++ case IPU_PIX_FMT_BGR24:
++ return 14;
++ }
++
++ return -1;
++}
++
++/*!
++ * This function sets the colorspace for of dp.
++ * modes.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param param If it's not NULL, update the csc table
++ * with this parameter.
++ *
++ * @return N/A
++ */
++void _ipu_dp_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3])
++{
++ int dp;
++ struct dp_csc_param_t dp_csc_param;
++
++ if (channel == MEM_FG_SYNC)
++ dp = DP_SYNC;
++ else if (channel == MEM_BG_SYNC)
++ dp = DP_SYNC;
++ else if (channel == MEM_BG_ASYNC0)
++ dp = DP_ASYNC0;
++ else
++ return;
++
++ dp_csc_param.mode = -1;
++ dp_csc_param.coeff = param;
++ __ipu_dp_csc_setup(ipu, dp, dp_csc_param, true);
++}
++
++void ipu_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3])
++{
++ _ipu_dp_set_csc_coefficients(ipu, channel, param);
++}
++EXPORT_SYMBOL(ipu_set_csc_coefficients);
++
++/*!
++ * This function is called to adapt synchronous LCD panel to IPU restriction.
++ *
++ */
++void adapt_panel_to_ipu_restricitions(struct ipu_soc *ipu, uint16_t *v_start_width,
++ uint16_t *v_sync_width,
++ uint16_t *v_end_width)
++{
++ if (*v_end_width < 2) {
++ uint16_t diff = 2 - *v_end_width;
++ if (*v_start_width >= diff) {
++ *v_end_width = 2;
++ *v_start_width = *v_start_width - diff;
++ } else if (*v_sync_width > diff) {
++ *v_end_width = 2;
++ *v_sync_width = *v_sync_width - diff;
++ } else
++ dev_err(ipu->dev, "WARNING: try to adapt timming, but failed\n");
++ dev_err(ipu->dev, "WARNING: adapt panel end blank lines\n");
++ }
++}
++
++/*!
++ * This function is called to initialize a synchronous LCD panel.
++ *
++ * @param ipu ipu handler
++ * @param disp The DI the panel is attached to.
++ *
++ * @param pixel_clk Desired pixel clock frequency in Hz.
++ *
++ * @param pixel_fmt Input parameter for pixel format of buffer.
++ * Pixel format is a FOURCC ASCII code.
++ *
++ * @param width The width of panel in pixels.
++ *
++ * @param height The height of panel in pixels.
++ *
++ * @param hStartWidth The number of pixel clocks between the HSYNC
++ * signal pulse and the start of valid data.
++ *
++ * @param hSyncWidth The width of the HSYNC signal in units of pixel
++ * clocks.
++ *
++ * @param hEndWidth The number of pixel clocks between the end of
++ * valid data and the HSYNC signal for next line.
++ *
++ * @param vStartWidth The number of lines between the VSYNC
++ * signal pulse and the start of valid data.
++ *
++ * @param vSyncWidth The width of the VSYNC signal in units of lines
++ *
++ * @param vEndWidth The number of lines between the end of valid
++ * data and the VSYNC signal for next frame.
++ *
++ * @param sig Bitfield of signal polarities for LCD interface.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_init_sync_panel(struct ipu_soc *ipu, int disp, uint32_t pixel_clk,
++ uint16_t width, uint16_t height,
++ uint32_t pixel_fmt,
++ uint16_t h_start_width, uint16_t h_sync_width,
++ uint16_t h_end_width, uint16_t v_start_width,
++ uint16_t v_sync_width, uint16_t v_end_width,
++ uint32_t v_to_h_sync, ipu_di_signal_cfg_t sig)
++{
++ uint32_t field0_offset = 0;
++ uint32_t field1_offset;
++ uint32_t reg;
++ uint32_t di_gen, vsync_cnt;
++ uint32_t div, rounded_pixel_clk;
++ uint32_t h_total, v_total;
++ int map;
++ int ret;
++ struct clk *ldb_di0_clk, *ldb_di1_clk;
++ struct clk *di_parent;
++
++ dev_dbg(ipu->dev, "panel size = %d x %d\n", width, height);
++
++ if ((v_sync_width == 0) || (h_sync_width == 0))
++ return -EINVAL;
++
++ adapt_panel_to_ipu_restricitions(ipu, &v_start_width, &v_sync_width, &v_end_width);
++ h_total = width + h_sync_width + h_start_width + h_end_width;
++ v_total = height + v_sync_width + v_start_width + v_end_width;
++
++ /* Init clocking */
++ dev_dbg(ipu->dev, "pixel clk = %d\n", pixel_clk);
++
++ di_parent = clk_get_parent(ipu->di_clk_sel[disp]);
++ if (!di_parent) {
++ dev_err(ipu->dev, "get di clk parent fail\n");
++ return -EINVAL;
++ }
++ ldb_di0_clk = clk_get(ipu->dev, "ldb_di0");
++ if (IS_ERR(ldb_di0_clk)) {
++ dev_err(ipu->dev, "clk_get di0 failed");
++ return PTR_ERR(ldb_di0_clk);
++ }
++ ldb_di1_clk = clk_get(ipu->dev, "ldb_di1");
++ if (IS_ERR(ldb_di1_clk)) {
++ dev_err(ipu->dev, "clk_get di1 failed");
++ return PTR_ERR(ldb_di1_clk);
++ }
++
++ if (ldb_di0_clk == di_parent || ldb_di1_clk == di_parent) {
++ /* if di clk parent is tve/ldb, then keep it;*/
++ dev_dbg(ipu->dev, "use special clk parent\n");
++ ret = clk_set_parent(ipu->pixel_clk_sel[disp], ipu->di_clk[disp]);
++ if (ret) {
++ dev_err(ipu->dev, "set pixel clk error:%d\n", ret);
++ return ret;
++ }
++ clk_put(ldb_di0_clk);
++ clk_put(ldb_di1_clk);
++ } else {
++ /* try ipu clk first*/
++ dev_dbg(ipu->dev, "try ipu internal clk\n");
++ ret = clk_set_parent(ipu->pixel_clk_sel[disp], ipu->ipu_clk);
++ if (ret) {
++ dev_err(ipu->dev, "set pixel clk error:%d\n", ret);
++ return ret;
++ }
++ rounded_pixel_clk = clk_round_rate(ipu->pixel_clk[disp], pixel_clk);
++ dev_dbg(ipu->dev, "rounded pix clk:%d\n", rounded_pixel_clk);
++ /*
++ * we will only use 1/2 fraction for ipu clk,
++ * so if the clk rate is not fit, try ext clk.
++ */
++ if (!sig.int_clk &&
++ ((rounded_pixel_clk >= pixel_clk + pixel_clk/200) ||
++ (rounded_pixel_clk <= pixel_clk - pixel_clk/200))) {
++ dev_dbg(ipu->dev, "try ipu ext di clk\n");
++
++ rounded_pixel_clk =
++ clk_round_rate(ipu->di_clk[disp], pixel_clk);
++ ret = clk_set_rate(ipu->di_clk[disp],
++ rounded_pixel_clk);
++ if (ret) {
++ dev_err(ipu->dev,
++ "set di clk rate error:%d\n", ret);
++ return ret;
++ }
++ dev_dbg(ipu->dev, "di clk:%d\n", rounded_pixel_clk);
++ ret = clk_set_parent(ipu->pixel_clk_sel[disp],
++ ipu->di_clk[disp]);
++ if (ret) {
++ dev_err(ipu->dev,
++ "set pixel clk parent error:%d\n", ret);
++ return ret;
++ }
++ }
++ }
++ rounded_pixel_clk = clk_round_rate(ipu->pixel_clk[disp], pixel_clk);
++ dev_dbg(ipu->dev, "round pixel clk:%d\n", rounded_pixel_clk);
++ ret = clk_set_rate(ipu->pixel_clk[disp], rounded_pixel_clk);
++ if (ret) {
++ dev_err(ipu->dev, "set pixel clk rate error:%d\n", ret);
++ return ret;
++ }
++ msleep(5);
++ /* Get integer portion of divider */
++ div = clk_get_rate(clk_get_parent(ipu->pixel_clk_sel[disp])) / rounded_pixel_clk;
++ dev_dbg(ipu->dev, "div:%d\n", div);
++ if (!div) {
++ dev_err(ipu->dev, "invalid pixel clk div = 0\n");
++ return -EINVAL;
++ }
++
++
++ mutex_lock(&ipu->mutex_lock);
++
++ _ipu_di_data_wave_config(ipu, disp, SYNC_WAVE, div - 1, div - 1);
++ _ipu_di_data_pin_config(ipu, disp, SYNC_WAVE, DI_PIN15, 3, 0, div * 2);
++
++ map = _ipu_pixfmt_to_map(pixel_fmt);
++ if (map < 0) {
++ dev_dbg(ipu->dev, "IPU_DISP: No MAP\n");
++ mutex_unlock(&ipu->mutex_lock);
++ return -EINVAL;
++ }
++
++ /*clear DI*/
++ di_gen = ipu_di_read(ipu, disp, DI_GENERAL);
++ di_gen &= (0x3 << 20);
++ ipu_di_write(ipu, disp, di_gen, DI_GENERAL);
++
++ if (sig.interlaced) {
++ if (g_ipu_hw_rev >= IPU_V3DEX) {
++ /* Setup internal HSYNC waveform */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 1, /* counter */
++ h_total/2 - 1, /* run count */
++ DI_SYNC_CLK, /* run_resolution */
++ 0, /* offset */
++ DI_SYNC_NONE, /* offset resolution */
++ 0, /* repeat count */
++ DI_SYNC_NONE, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* Field 1 VSYNC waveform */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 2, /* counter */
++ h_total - 1, /* run count */
++ DI_SYNC_CLK, /* run_resolution */
++ 0, /* offset */
++ DI_SYNC_NONE, /* offset resolution */
++ 0, /* repeat count */
++ DI_SYNC_NONE, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 2*div /* COUNT DOWN */
++ );
++
++ /* Setup internal HSYNC waveform */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 3, /* counter */
++ v_total*2 - 1, /* run count */
++ DI_SYNC_INT_HSYNC, /* run_resolution */
++ 1, /* offset */
++ DI_SYNC_INT_HSYNC, /* offset resolution */
++ 0, /* repeat count */
++ DI_SYNC_NONE, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 2*div /* COUNT DOWN */
++ );
++
++ /* Active Field ? */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 4, /* counter */
++ v_total/2 - 1, /* run count */
++ DI_SYNC_HSYNC, /* run_resolution */
++ v_start_width, /* offset */
++ DI_SYNC_HSYNC, /* offset resolution */
++ 2, /* repeat count */
++ DI_SYNC_VSYNC, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* Active Line */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 5, /* counter */
++ 0, /* run count */
++ DI_SYNC_HSYNC, /* run_resolution */
++ 0, /* offset */
++ DI_SYNC_NONE, /* offset resolution */
++ height/2, /* repeat count */
++ 4, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* Field 0 VSYNC waveform */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 6, /* counter */
++ v_total - 1, /* run count */
++ DI_SYNC_HSYNC, /* run_resolution */
++ 0, /* offset */
++ DI_SYNC_NONE, /* offset resolution */
++ 0, /* repeat count */
++ DI_SYNC_NONE, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* DC VSYNC waveform */
++ vsync_cnt = 7;
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 7, /* counter */
++ v_total/2 - 1, /* run count */
++ DI_SYNC_HSYNC, /* run_resolution */
++ 9, /* offset */
++ DI_SYNC_HSYNC, /* offset resolution */
++ 2, /* repeat count */
++ DI_SYNC_VSYNC, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* active pixel waveform */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 8, /* counter */
++ 0, /* run count */
++ DI_SYNC_CLK, /* run_resolution */
++ h_start_width, /* offset */
++ DI_SYNC_CLK, /* offset resolution */
++ width, /* repeat count */
++ 5, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* Second VSYNC */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ 9, /* counter */
++ v_total - 1, /* run count */
++ DI_SYNC_INT_HSYNC, /* run_resolution */
++ v_total/2, /* offset */
++ DI_SYNC_INT_HSYNC, /* offset resolution */
++ 0, /* repeat count */
++ DI_SYNC_HSYNC, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 2*div /* COUNT DOWN */
++ );
++
++ /* set gentime select and tag sel */
++ reg = ipu_di_read(ipu, disp, DI_SW_GEN1(9));
++ reg &= 0x1FFFFFFF;
++ reg |= (3-1)<<29 | 0x00008000;
++ ipu_di_write(ipu, disp, reg, DI_SW_GEN1(9));
++
++ ipu_di_write(ipu, disp, v_total / 2 - 1, DI_SCR_CONF);
++
++ /* set y_sel = 1 */
++ di_gen |= 0x10000000;
++ di_gen |= DI_GEN_POLARITY_5;
++ di_gen |= DI_GEN_POLARITY_8;
++ } else {
++ /* Setup internal HSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, 1, h_total - 1, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ field1_offset = v_sync_width + v_start_width + height / 2 +
++ v_end_width;
++ if (sig.odd_field_first) {
++ field0_offset = field1_offset - 1;
++ field1_offset = 0;
++ }
++ v_total += v_start_width + v_end_width;
++
++ /* Field 1 VSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, 2, v_total - 1, 1,
++ field0_offset,
++ field0_offset ? 1 : DI_SYNC_NONE,
++ 0, DI_SYNC_NONE, 0,
++ DI_SYNC_NONE, DI_SYNC_NONE, 0, 4);
++
++ /* Setup internal HSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, 3, h_total - 1, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, 0,
++ DI_SYNC_NONE, DI_SYNC_NONE, 0, 4);
++
++ /* Active Field ? */
++ _ipu_di_sync_config(ipu, disp, 4,
++ field0_offset ?
++ field0_offset : field1_offset - 2,
++ 1, v_start_width + v_sync_width, 1, 2, 2,
++ 0, DI_SYNC_NONE, DI_SYNC_NONE, 0, 0);
++
++ /* Active Line */
++ _ipu_di_sync_config(ipu, disp, 5, 0, 1,
++ 0, DI_SYNC_NONE,
++ height / 2, 4, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ /* Field 0 VSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, 6, v_total - 1, 1,
++ 0, DI_SYNC_NONE,
++ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ /* DC VSYNC waveform */
++ vsync_cnt = 7;
++ _ipu_di_sync_config(ipu, disp, 7, 0, 1,
++ field1_offset,
++ field1_offset ? 1 : DI_SYNC_NONE,
++ 1, 2, 0, DI_SYNC_NONE, DI_SYNC_NONE, 0, 0);
++
++ /* active pixel waveform */
++ _ipu_di_sync_config(ipu, disp, 8, 0, DI_SYNC_CLK,
++ h_sync_width + h_start_width, DI_SYNC_CLK,
++ width, 5, 0, DI_SYNC_NONE, DI_SYNC_NONE,
++ 0, 0);
++
++ /* ??? */
++ _ipu_di_sync_config(ipu, disp, 9, v_total - 1, 2,
++ 0, DI_SYNC_NONE,
++ 0, DI_SYNC_NONE, 6, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ reg = ipu_di_read(ipu, disp, DI_SW_GEN1(9));
++ reg |= 0x8000;
++ ipu_di_write(ipu, disp, reg, DI_SW_GEN1(9));
++
++ ipu_di_write(ipu, disp, v_sync_width + v_start_width +
++ v_end_width + height / 2 - 1, DI_SCR_CONF);
++ }
++
++ /* Init template microcode */
++ _ipu_dc_write_tmpl(ipu, 0, WROD(0), 0, map, SYNC_WAVE, 0, 8, 1);
++
++ if (sig.Hsync_pol)
++ di_gen |= DI_GEN_POLARITY_3;
++ if (sig.Vsync_pol)
++ di_gen |= DI_GEN_POLARITY_2;
++ } else {
++ /* Setup internal HSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, 1, h_total - 1, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ /* Setup external (delayed) HSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_HSYNC, h_total - 1,
++ DI_SYNC_CLK, div * v_to_h_sync, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE, 1, DI_SYNC_NONE,
++ DI_SYNC_CLK, 0, h_sync_width * 2);
++ /* Setup VSYNC waveform */
++ vsync_cnt = DI_SYNC_VSYNC;
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_VSYNC, v_total - 1,
++ DI_SYNC_INT_HSYNC, 0, DI_SYNC_NONE, 0,
++ DI_SYNC_NONE, 1, DI_SYNC_NONE,
++ DI_SYNC_INT_HSYNC, 0, v_sync_width * 2);
++ ipu_di_write(ipu, disp, v_total - 1, DI_SCR_CONF);
++
++ /* Setup active data waveform to sync with DC */
++ _ipu_di_sync_config(ipu, disp, 4, 0, DI_SYNC_HSYNC,
++ v_sync_width + v_start_width, DI_SYNC_HSYNC, height,
++ DI_SYNC_VSYNC, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++ _ipu_di_sync_config(ipu, disp, 5, 0, DI_SYNC_CLK,
++ h_sync_width + h_start_width, DI_SYNC_CLK,
++ width, 4, 0, DI_SYNC_NONE, DI_SYNC_NONE, 0,
++ 0);
++
++ /* set VGA delayed hsync/vsync no matter VGA enabled */
++ if (disp) {
++ /* couter 7 for VGA delay HSYNC */
++ _ipu_di_sync_config(ipu, disp, 7,
++ h_total - 1, DI_SYNC_CLK,
++ 18, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE,
++ 1, DI_SYNC_NONE, DI_SYNC_CLK,
++ 0, h_sync_width * 2);
++
++ /* couter 8 for VGA delay VSYNC */
++ _ipu_di_sync_config(ipu, disp, 8,
++ v_total - 1, DI_SYNC_INT_HSYNC,
++ 1, DI_SYNC_INT_HSYNC,
++ 0, DI_SYNC_NONE,
++ 1, DI_SYNC_NONE, DI_SYNC_INT_HSYNC,
++ 0, v_sync_width * 2);
++ }
++
++ /* reset all unused counters */
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(6));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(6));
++ if (!disp) {
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(7));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(7));
++ ipu_di_write(ipu, disp, 0, DI_STP_REP(7));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(8));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(8));
++ ipu_di_write(ipu, disp, 0, DI_STP_REP(8));
++ }
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(9));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(9));
++ ipu_di_write(ipu, disp, 0, DI_STP_REP(9));
++
++ reg = ipu_di_read(ipu, disp, DI_STP_REP(6));
++ reg &= 0x0000FFFF;
++ ipu_di_write(ipu, disp, reg, DI_STP_REP(6));
++
++ /* Init template microcode */
++ if (disp) {
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_write_tmpl(ipu, 8, WROD(0), 0, (map - 1), SYNC_WAVE, 0, 5, 1);
++ _ipu_dc_write_tmpl(ipu, 9, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
++ /* configure user events according to DISP NUM */
++ ipu_dc_write(ipu, (width - 1), DC_UGDE_3(disp));
++ }
++ _ipu_dc_write_tmpl(ipu, 2, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
++ _ipu_dc_write_tmpl(ipu, 3, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
++ _ipu_dc_write_tmpl(ipu, 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
++ _ipu_dc_write_tmpl(ipu, 1, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
++
++ } else {
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_write_tmpl(ipu, 10, WROD(0), 0, (map - 1), SYNC_WAVE, 0, 5, 1);
++ _ipu_dc_write_tmpl(ipu, 11, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
++ /* configure user events according to DISP NUM */
++ ipu_dc_write(ipu, width - 1, DC_UGDE_3(disp));
++ }
++ _ipu_dc_write_tmpl(ipu, 5, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
++ _ipu_dc_write_tmpl(ipu, 6, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
++ _ipu_dc_write_tmpl(ipu, 7, WRG, 0, map, NULL_WAVE, 0, 0, 1);
++ _ipu_dc_write_tmpl(ipu, 12, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
++ }
++
++ if (sig.Hsync_pol) {
++ di_gen |= DI_GEN_POLARITY_2;
++ if (disp)
++ di_gen |= DI_GEN_POLARITY_7;
++ }
++ if (sig.Vsync_pol) {
++ di_gen |= DI_GEN_POLARITY_3;
++ if (disp)
++ di_gen |= DI_GEN_POLARITY_8;
++ }
++ }
++ /* changinc DISP_CLK polarity: it can be wrong for some applications */
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY))
++ di_gen |= 0x00020000;
++
++ if (!sig.clk_pol)
++ di_gen |= DI_GEN_POLARITY_DISP_CLK;
++
++ ipu_di_write(ipu, disp, di_gen, DI_GENERAL);
++
++ ipu_di_write(ipu, disp, (--vsync_cnt << DI_VSYNC_SEL_OFFSET) |
++ 0x00000002, DI_SYNC_AS_GEN);
++ reg = ipu_di_read(ipu, disp, DI_POL);
++ reg &= ~(DI_POL_DRDY_DATA_POLARITY | DI_POL_DRDY_POLARITY_15);
++ if (sig.enable_pol)
++ reg |= DI_POL_DRDY_POLARITY_15;
++ if (sig.data_pol)
++ reg |= DI_POL_DRDY_DATA_POLARITY;
++ ipu_di_write(ipu, disp, reg, DI_POL);
++
++ ipu_dc_write(ipu, width, DC_DISP_CONF2(DC_DISP_ID_SYNC(disp)));
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_init_sync_panel);
++
++void ipu_uninit_sync_panel(struct ipu_soc *ipu, int disp)
++{
++ uint32_t reg;
++ uint32_t di_gen;
++
++ if ((disp != 0) || (disp != 1))
++ return;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ di_gen = ipu_di_read(ipu, disp, DI_GENERAL);
++ di_gen |= 0x3ff | DI_GEN_POLARITY_DISP_CLK;
++ ipu_di_write(ipu, disp, di_gen, DI_GENERAL);
++
++ reg = ipu_di_read(ipu, disp, DI_POL);
++ reg |= 0x3ffffff;
++ ipu_di_write(ipu, disp, reg, DI_POL);
++
++ mutex_unlock(&ipu->mutex_lock);
++}
++EXPORT_SYMBOL(ipu_uninit_sync_panel);
++
++int ipu_init_async_panel(struct ipu_soc *ipu, int disp, int type, uint32_t cycle_time,
++ uint32_t pixel_fmt, ipu_adc_sig_cfg_t sig)
++{
++ int map;
++ u32 ser_conf = 0;
++ u32 div;
++ u32 di_clk = clk_get_rate(ipu->ipu_clk);
++
++ /* round up cycle_time, then calcalate the divider using scaled math */
++ cycle_time += (1000000000UL / di_clk) - 1;
++ div = (cycle_time * (di_clk / 256UL)) / (1000000000UL / 256UL);
++
++ map = _ipu_pixfmt_to_map(pixel_fmt);
++ if (map < 0)
++ return -EINVAL;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if (type == IPU_PANEL_SERIAL) {
++ ipu_di_write(ipu, disp, (div << 24) | ((sig.ifc_width - 1) << 4),
++ DI_DW_GEN(ASYNC_SER_WAVE));
++
++ _ipu_di_data_pin_config(ipu, disp, ASYNC_SER_WAVE, DI_PIN_CS,
++ 0, 0, (div * 2) + 1);
++ _ipu_di_data_pin_config(ipu, disp, ASYNC_SER_WAVE, DI_PIN_SER_CLK,
++ 1, div, div * 2);
++ _ipu_di_data_pin_config(ipu, disp, ASYNC_SER_WAVE, DI_PIN_SER_RS,
++ 2, 0, 0);
++
++ _ipu_dc_write_tmpl(ipu, 0x64, WROD(0), 0, map, ASYNC_SER_WAVE, 0, 0, 1);
++
++ /* Configure DC for serial panel */
++ ipu_dc_write(ipu, 0x14, DC_DISP_CONF1(DC_DISP_ID_SERIAL));
++
++ if (sig.clk_pol)
++ ser_conf |= DI_SER_CONF_SERIAL_CLK_POL;
++ if (sig.data_pol)
++ ser_conf |= DI_SER_CONF_SERIAL_DATA_POL;
++ if (sig.rs_pol)
++ ser_conf |= DI_SER_CONF_SERIAL_RS_POL;
++ if (sig.cs_pol)
++ ser_conf |= DI_SER_CONF_SERIAL_CS_POL;
++ ipu_di_write(ipu, disp, ser_conf, DI_SER_CONF);
++ }
++
++ mutex_unlock(&ipu->mutex_lock);
++ return 0;
++}
++EXPORT_SYMBOL(ipu_init_async_panel);
++
++/*!
++ * This function sets the foreground and background plane global alpha blending
++ * modes. This function also sets the DP graphic plane according to the
++ * parameter of IPUv3 DP channel.
++ *
++ * @param ipu ipu handler
++ * @param channel IPUv3 DP channel
++ *
++ * @param enable Boolean to enable or disable global alpha
++ * blending. If disabled, local blending is used.
++ *
++ * @param alpha Global alpha value.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_disp_set_global_alpha(struct ipu_soc *ipu, ipu_channel_t channel,
++ bool enable, uint8_t alpha)
++{
++ uint32_t reg;
++ uint32_t flow;
++ bool bg_chan;
++
++ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC)
++ flow = DP_SYNC;
++ else if (channel == MEM_BG_ASYNC0 || channel == MEM_FG_ASYNC0)
++ flow = DP_ASYNC0;
++ else if (channel == MEM_BG_ASYNC1 || channel == MEM_FG_ASYNC1)
++ flow = DP_ASYNC1;
++ else
++ return -EINVAL;
++
++ if (channel == MEM_BG_SYNC || channel == MEM_BG_ASYNC0 ||
++ channel == MEM_BG_ASYNC1)
++ bg_chan = true;
++ else
++ bg_chan = false;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if (bg_chan) {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GWSEL, DP_COM_CONF(flow));
++ } else {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg | DP_COM_CONF_GWSEL, DP_COM_CONF(flow));
++ }
++
++ if (enable) {
++ reg = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(flow)) & 0x00FFFFFFL;
++ ipu_dp_write(ipu, reg | ((uint32_t) alpha << 24),
++ DP_GRAPH_WIND_CTRL(flow));
++
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg | DP_COM_CONF_GWAM, DP_COM_CONF(flow));
++ } else {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GWAM, DP_COM_CONF(flow));
++ }
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disp_set_global_alpha);
++
++/*!
++ * This function sets the transparent color key for SDC graphic plane.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param enable Boolean to enable or disable color key
++ *
++ * @param colorKey 24-bit RGB color for transparent color key.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_disp_set_color_key(struct ipu_soc *ipu, ipu_channel_t channel,
++ bool enable, uint32_t color_key)
++{
++ uint32_t reg, flow;
++ int y, u, v;
++ int red, green, blue;
++
++ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC)
++ flow = DP_SYNC;
++ else if (channel == MEM_BG_ASYNC0 || channel == MEM_FG_ASYNC0)
++ flow = DP_ASYNC0;
++ else if (channel == MEM_BG_ASYNC1 || channel == MEM_FG_ASYNC1)
++ flow = DP_ASYNC1;
++ else
++ return -EINVAL;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ ipu->color_key_4rgb = true;
++ /* Transform color key from rgb to yuv if CSC is enabled */
++ if (((ipu->fg_csc_type == RGB2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
++ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == RGB2YUV)) ||
++ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
++ ((ipu->fg_csc_type == YUV2RGB) && (ipu->bg_csc_type == YUV2RGB))) {
++
++ dev_dbg(ipu->dev, "color key 0x%x need change to yuv fmt\n", color_key);
++
++ red = (color_key >> 16) & 0xFF;
++ green = (color_key >> 8) & 0xFF;
++ blue = color_key & 0xFF;
++
++ y = _rgb_to_yuv(0, red, green, blue);
++ u = _rgb_to_yuv(1, red, green, blue);
++ v = _rgb_to_yuv(2, red, green, blue);
++ color_key = (y << 16) | (u << 8) | v;
++
++ ipu->color_key_4rgb = false;
++
++ dev_dbg(ipu->dev, "color key change to yuv fmt 0x%x\n", color_key);
++ }
++
++ if (enable) {
++ reg = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(flow)) & 0xFF000000L;
++ ipu_dp_write(ipu, reg | color_key, DP_GRAPH_WIND_CTRL(flow));
++
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg | DP_COM_CONF_GWCKE, DP_COM_CONF(flow));
++ } else {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GWCKE, DP_COM_CONF(flow));
++ }
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disp_set_color_key);
++
++/*!
++ * This function sets the gamma correction for DP output.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param enable Boolean to enable or disable gamma correction.
++ *
++ * @param constk Gamma piecewise linear approximation constk coeff.
++ *
++ * @param slopek Gamma piecewise linear approximation slopek coeff.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_disp_set_gamma_correction(struct ipu_soc *ipu, ipu_channel_t channel, bool enable, int constk[], int slopek[])
++{
++ uint32_t reg, flow, i;
++
++ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC)
++ flow = DP_SYNC;
++ else if (channel == MEM_BG_ASYNC0 || channel == MEM_FG_ASYNC0)
++ flow = DP_ASYNC0;
++ else if (channel == MEM_BG_ASYNC1 || channel == MEM_FG_ASYNC1)
++ flow = DP_ASYNC1;
++ else
++ return -EINVAL;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ for (i = 0; i < 8; i++)
++ ipu_dp_write(ipu, (constk[2*i] & 0x1ff) | ((constk[2*i+1] & 0x1ff) << 16), DP_GAMMA_C(flow, i));
++ for (i = 0; i < 4; i++)
++ ipu_dp_write(ipu, (slopek[4*i] & 0xff) | ((slopek[4*i+1] & 0xff) << 8) |
++ ((slopek[4*i+2] & 0xff) << 16) | ((slopek[4*i+3] & 0xff) << 24), DP_GAMMA_S(flow, i));
++
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ if (enable) {
++ if ((ipu->bg_csc_type == RGB2YUV) || (ipu->bg_csc_type == YUV2YUV))
++ reg |= DP_COM_CONF_GAMMA_YUV_EN;
++ else
++ reg &= ~DP_COM_CONF_GAMMA_YUV_EN;
++ ipu_dp_write(ipu, reg | DP_COM_CONF_GAMMA_EN, DP_COM_CONF(flow));
++ } else
++ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GAMMA_EN, DP_COM_CONF(flow));
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disp_set_gamma_correction);
++
++/*!
++ * This function sets the window position of the foreground or background plane.
++ * modes.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param x_pos The X coordinate position to place window at.
++ * The position is relative to the top left corner.
++ *
++ * @param y_pos The Y coordinate position to place window at.
++ * The position is relative to the top left corner.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t _ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t x_pos, int16_t y_pos)
++{
++ u32 reg;
++ uint32_t flow = 0;
++ uint32_t dp_srm_shift;
++
++ if ((channel == MEM_FG_SYNC) || (channel == MEM_BG_SYNC)) {
++ flow = DP_SYNC;
++ dp_srm_shift = 3;
++ } else if (channel == MEM_FG_ASYNC0) {
++ flow = DP_ASYNC0;
++ dp_srm_shift = 5;
++ } else if (channel == MEM_FG_ASYNC1) {
++ flow = DP_ASYNC1;
++ dp_srm_shift = 7;
++ } else
++ return -EINVAL;
++
++ ipu_dp_write(ipu, (x_pos << 16) | y_pos, DP_FG_POS(flow));
++
++ if (ipu_is_channel_busy(ipu, channel)) {
++ /* controled by FSU if channel enabled */
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) & (~(0x3 << dp_srm_shift));
++ reg |= (0x1 << dp_srm_shift);
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ } else {
++ /* disable auto swap, controled by MCU if channel disabled */
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) & (~(0x3 << dp_srm_shift));
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ }
++
++ return 0;
++}
++
++int32_t ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t x_pos, int16_t y_pos)
++{
++ int ret;
++
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ ret = _ipu_disp_set_window_pos(ipu, channel, x_pos, y_pos);
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return ret;
++}
++EXPORT_SYMBOL(ipu_disp_set_window_pos);
++
++int32_t _ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t *x_pos, int16_t *y_pos)
++{
++ u32 reg;
++ uint32_t flow = 0;
++
++ if (channel == MEM_FG_SYNC)
++ flow = DP_SYNC;
++ else if (channel == MEM_FG_ASYNC0)
++ flow = DP_ASYNC0;
++ else if (channel == MEM_FG_ASYNC1)
++ flow = DP_ASYNC1;
++ else
++ return -EINVAL;
++
++ reg = ipu_dp_read(ipu, DP_FG_POS(flow));
++
++ *x_pos = (reg >> 16) & 0x7FF;
++ *y_pos = reg & 0x7FF;
++
++ return 0;
++}
++int32_t ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t *x_pos, int16_t *y_pos)
++{
++ int ret;
++
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ ret = _ipu_disp_get_window_pos(ipu, channel, x_pos, y_pos);
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return ret;
++}
++EXPORT_SYMBOL(ipu_disp_get_window_pos);
++
++void ipu_disp_direct_write(struct ipu_soc *ipu, ipu_channel_t channel, u32 value, u32 offset)
++{
++ if (channel == DIRECT_ASYNC0)
++ writel(value, ipu->disp_base[0] + offset);
++ else if (channel == DIRECT_ASYNC1)
++ writel(value, ipu->disp_base[1] + offset);
++}
++EXPORT_SYMBOL(ipu_disp_direct_write);
++
++void ipu_reset_disp_panel(struct ipu_soc *ipu)
++{
++ uint32_t tmp;
++
++ tmp = ipu_di_read(ipu, 1, DI_GENERAL);
++ ipu_di_write(ipu, 1, tmp | 0x08, DI_GENERAL);
++ msleep(10); /* tRES >= 100us */
++ tmp = ipu_di_read(ipu, 1, DI_GENERAL);
++ ipu_di_write(ipu, 1, tmp & ~0x08, DI_GENERAL);
++ msleep(60);
++
++ return;
++}
++EXPORT_SYMBOL(ipu_reset_disp_panel);
++
++void ipu_disp_init(struct ipu_soc *ipu)
++{
++ ipu->fg_csc_type = ipu->bg_csc_type = CSC_NONE;
++ ipu->color_key_4rgb = true;
++ _ipu_init_dc_mappings(ipu);
++ _ipu_dmfc_init(ipu, DMFC_NORMAL, 1);
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/ipu_ic.c linux-3.14.40/drivers/mxc/ipu3/ipu_ic.c
+--- linux-3.14.40.orig/drivers/mxc/ipu3/ipu_ic.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/ipu_ic.c 2015-05-01 14:57:59.611427001 -0500
+@@ -0,0 +1,924 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*
++ * @file ipu_ic.c
++ *
++ * @brief IPU IC functions
++ *
++ * @ingroup IPU
++ */
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++#include <linux/videodev2.h>
++
++#include "ipu_param_mem.h"
++#include "ipu_regs.h"
++
++enum {
++ IC_TASK_VIEWFINDER,
++ IC_TASK_ENCODER,
++ IC_TASK_POST_PROCESSOR
++};
++
++static void _init_csc(struct ipu_soc *ipu, uint8_t ic_task, ipu_color_space_t in_format,
++ ipu_color_space_t out_format, int csc_index);
++
++static int _calc_resize_coeffs(struct ipu_soc *ipu,
++ uint32_t inSize, uint32_t outSize,
++ uint32_t *resizeCoeff,
++ uint32_t *downsizeCoeff);
++
++void _ipu_vdi_set_top_field_man(struct ipu_soc *ipu, bool top_field_0)
++{
++ uint32_t reg;
++
++ reg = ipu_vdi_read(ipu, VDI_C);
++ if (top_field_0)
++ reg &= ~VDI_C_TOP_FIELD_MAN_1;
++ else
++ reg |= VDI_C_TOP_FIELD_MAN_1;
++ ipu_vdi_write(ipu, reg, VDI_C);
++}
++
++void _ipu_vdi_set_motion(struct ipu_soc *ipu, ipu_motion_sel motion_sel)
++{
++ uint32_t reg;
++
++ reg = ipu_vdi_read(ipu, VDI_C);
++ reg &= ~(VDI_C_MOT_SEL_FULL | VDI_C_MOT_SEL_MED | VDI_C_MOT_SEL_LOW);
++ if (motion_sel == HIGH_MOTION)
++ reg |= VDI_C_MOT_SEL_FULL;
++ else if (motion_sel == MED_MOTION)
++ reg |= VDI_C_MOT_SEL_MED;
++ else
++ reg |= VDI_C_MOT_SEL_LOW;
++
++ ipu_vdi_write(ipu, reg, VDI_C);
++ dev_dbg(ipu->dev, "VDI_C = \t0x%08X\n", reg);
++}
++
++void ic_dump_register(struct ipu_soc *ipu)
++{
++ printk(KERN_DEBUG "IC_CONF = \t0x%08X\n", ipu_ic_read(ipu, IC_CONF));
++ printk(KERN_DEBUG "IC_PRP_ENC_RSC = \t0x%08X\n",
++ ipu_ic_read(ipu, IC_PRP_ENC_RSC));
++ printk(KERN_DEBUG "IC_PRP_VF_RSC = \t0x%08X\n",
++ ipu_ic_read(ipu, IC_PRP_VF_RSC));
++ printk(KERN_DEBUG "IC_PP_RSC = \t0x%08X\n", ipu_ic_read(ipu, IC_PP_RSC));
++ printk(KERN_DEBUG "IC_IDMAC_1 = \t0x%08X\n", ipu_ic_read(ipu, IC_IDMAC_1));
++ printk(KERN_DEBUG "IC_IDMAC_2 = \t0x%08X\n", ipu_ic_read(ipu, IC_IDMAC_2));
++ printk(KERN_DEBUG "IC_IDMAC_3 = \t0x%08X\n", ipu_ic_read(ipu, IC_IDMAC_3));
++}
++
++void _ipu_ic_enable_task(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t ic_conf;
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++ switch (channel) {
++ case CSI_PRP_VF_MEM:
++ case MEM_PRP_VF_MEM:
++ ic_conf |= IC_CONF_PRPVF_EN;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ ic_conf |= IC_CONF_PRPVF_EN;
++ break;
++ case MEM_VDI_MEM:
++ ic_conf |= IC_CONF_PRPVF_EN | IC_CONF_RWS_EN ;
++ break;
++ case MEM_ROT_VF_MEM:
++ ic_conf |= IC_CONF_PRPVF_ROT_EN;
++ break;
++ case CSI_PRP_ENC_MEM:
++ case MEM_PRP_ENC_MEM:
++ ic_conf |= IC_CONF_PRPENC_EN;
++ break;
++ case MEM_ROT_ENC_MEM:
++ ic_conf |= IC_CONF_PRPENC_ROT_EN;
++ break;
++ case MEM_PP_MEM:
++ ic_conf |= IC_CONF_PP_EN;
++ break;
++ case MEM_ROT_PP_MEM:
++ ic_conf |= IC_CONF_PP_ROT_EN;
++ break;
++ default:
++ break;
++ }
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++}
++
++void _ipu_ic_disable_task(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t ic_conf;
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++ switch (channel) {
++ case CSI_PRP_VF_MEM:
++ case MEM_PRP_VF_MEM:
++ ic_conf &= ~IC_CONF_PRPVF_EN;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ ic_conf &= ~IC_CONF_PRPVF_EN;
++ break;
++ case MEM_VDI_MEM:
++ ic_conf &= ~(IC_CONF_PRPVF_EN | IC_CONF_RWS_EN);
++ break;
++ case MEM_ROT_VF_MEM:
++ ic_conf &= ~IC_CONF_PRPVF_ROT_EN;
++ break;
++ case CSI_PRP_ENC_MEM:
++ case MEM_PRP_ENC_MEM:
++ ic_conf &= ~IC_CONF_PRPENC_EN;
++ break;
++ case MEM_ROT_ENC_MEM:
++ ic_conf &= ~IC_CONF_PRPENC_ROT_EN;
++ break;
++ case MEM_PP_MEM:
++ ic_conf &= ~IC_CONF_PP_EN;
++ break;
++ case MEM_ROT_PP_MEM:
++ ic_conf &= ~IC_CONF_PP_ROT_EN;
++ break;
++ default:
++ break;
++ }
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++}
++
++void _ipu_vdi_init(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params)
++{
++ uint32_t reg;
++ uint32_t pixel_fmt;
++ uint32_t pix_per_burst;
++
++ reg = ((params->mem_prp_vf_mem.in_height-1) << 16) |
++ (params->mem_prp_vf_mem.in_width-1);
++ ipu_vdi_write(ipu, reg, VDI_FSIZE);
++
++ /* Full motion, only vertical filter is used
++ Burst size is 4 accesses */
++ if (params->mem_prp_vf_mem.in_pixel_fmt ==
++ IPU_PIX_FMT_UYVY ||
++ params->mem_prp_vf_mem.in_pixel_fmt ==
++ IPU_PIX_FMT_YUYV) {
++ pixel_fmt = VDI_C_CH_422;
++ pix_per_burst = 32;
++ } else {
++ pixel_fmt = VDI_C_CH_420;
++ pix_per_burst = 64;
++ }
++
++ reg = ipu_vdi_read(ipu, VDI_C);
++ reg |= pixel_fmt;
++ switch (channel) {
++ case MEM_VDI_PRP_VF_MEM:
++ reg |= VDI_C_BURST_SIZE2_4;
++ break;
++ case MEM_VDI_PRP_VF_MEM_P:
++ reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_SET_1 | VDI_C_VWM1_CLR_2;
++ break;
++ case MEM_VDI_PRP_VF_MEM_N:
++ reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_SET_1 | VDI_C_VWM3_CLR_2;
++ break;
++
++ case MEM_VDI_MEM:
++ reg |= (((pix_per_burst >> 2) - 1) & VDI_C_BURST_SIZE_MASK)
++ << VDI_C_BURST_SIZE2_OFFSET;
++ break;
++ case MEM_VDI_MEM_P:
++ reg |= (((pix_per_burst >> 2) - 1) & VDI_C_BURST_SIZE_MASK)
++ << VDI_C_BURST_SIZE1_OFFSET;
++ reg |= VDI_C_VWM1_SET_2 | VDI_C_VWM1_CLR_2;
++ break;
++ case MEM_VDI_MEM_N:
++ reg |= (((pix_per_burst >> 2) - 1) & VDI_C_BURST_SIZE_MASK)
++ << VDI_C_BURST_SIZE3_OFFSET;
++ reg |= VDI_C_VWM3_SET_2 | VDI_C_VWM3_CLR_2;
++ break;
++ default:
++ break;
++ }
++ ipu_vdi_write(ipu, reg, VDI_C);
++
++ if (params->mem_prp_vf_mem.field_fmt == IPU_DEINTERLACE_FIELD_TOP)
++ _ipu_vdi_set_top_field_man(ipu, true);
++ else if (params->mem_prp_vf_mem.field_fmt == IPU_DEINTERLACE_FIELD_BOTTOM)
++ _ipu_vdi_set_top_field_man(ipu, false);
++
++ _ipu_vdi_set_motion(ipu, params->mem_prp_vf_mem.motion_sel);
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~IC_CONF_RWS_EN;
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++void _ipu_vdi_uninit(struct ipu_soc *ipu)
++{
++ ipu_vdi_write(ipu, 0, VDI_FSIZE);
++ ipu_vdi_write(ipu, 0, VDI_C);
++}
++
++int _ipu_ic_init_prpvf(struct ipu_soc *ipu, ipu_channel_params_t *params,
++ bool src_is_csi)
++{
++ uint32_t reg, ic_conf;
++ uint32_t downsizeCoeff, resizeCoeff;
++ ipu_color_space_t in_fmt, out_fmt;
++ int ret = 0;
++
++ /* Setup vertical resizing */
++ if (!params->mem_prp_vf_mem.outv_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_prp_vf_mem.in_height,
++ params->mem_prp_vf_mem.out_height,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate prpvf height "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg = (downsizeCoeff << 30) | (resizeCoeff << 16);
++ } else
++ reg = (params->mem_prp_vf_mem.outv_resize_ratio) << 16;
++
++ /* Setup horizontal resizing */
++ if (!params->mem_prp_vf_mem.outh_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_prp_vf_mem.in_width,
++ params->mem_prp_vf_mem.out_width,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate prpvf width "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg |= (downsizeCoeff << 14) | resizeCoeff;
++ } else
++ reg |= params->mem_prp_vf_mem.outh_resize_ratio;
++
++ ipu_ic_write(ipu, reg, IC_PRP_VF_RSC);
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++
++ /* Setup color space conversion */
++ in_fmt = format_to_colorspace(params->mem_prp_vf_mem.in_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_prp_vf_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC1 */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, RGB, out_fmt, 1);
++ ic_conf |= IC_CONF_PRPVF_CSC1;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC1 */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, YCbCr, RGB, 1);
++ ic_conf |= IC_CONF_PRPVF_CSC1;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (params->mem_prp_vf_mem.graphics_combine_en) {
++ ic_conf |= IC_CONF_PRPVF_CMB;
++
++ if (!(ic_conf & IC_CONF_PRPVF_CSC1)) {
++ /* need transparent CSC1 conversion */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, RGB, RGB, 1);
++ ic_conf |= IC_CONF_PRPVF_CSC1; /* Enable RGB->RGB CSC */
++ }
++ in_fmt = format_to_colorspace(params->mem_prp_vf_mem.in_g_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_prp_vf_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC2 */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, RGB, out_fmt, 2);
++ ic_conf |= IC_CONF_PRPVF_CSC2;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC2 */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, YCbCr, RGB, 2);
++ ic_conf |= IC_CONF_PRPVF_CSC2;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (params->mem_prp_vf_mem.global_alpha_en) {
++ ic_conf |= IC_CONF_IC_GLB_LOC_A;
++ reg = ipu_ic_read(ipu, IC_CMBP_1);
++ reg &= ~(0xff);
++ reg |= params->mem_prp_vf_mem.alpha;
++ ipu_ic_write(ipu, reg, IC_CMBP_1);
++ } else
++ ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
++
++ if (params->mem_prp_vf_mem.key_color_en) {
++ ic_conf |= IC_CONF_KEY_COLOR_EN;
++ ipu_ic_write(ipu, params->mem_prp_vf_mem.key_color,
++ IC_CMBP_2);
++ } else
++ ic_conf &= ~IC_CONF_KEY_COLOR_EN;
++ } else {
++ ic_conf &= ~IC_CONF_PRPVF_CMB;
++ }
++
++ if (src_is_csi)
++ ic_conf &= ~IC_CONF_RWS_EN;
++ else
++ ic_conf |= IC_CONF_RWS_EN;
++
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++
++ return ret;
++}
++
++void _ipu_ic_uninit_prpvf(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~(IC_CONF_PRPVF_EN | IC_CONF_PRPVF_CMB |
++ IC_CONF_PRPVF_CSC2 | IC_CONF_PRPVF_CSC1);
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++void _ipu_ic_init_rotate_vf(struct ipu_soc *ipu, ipu_channel_params_t *params)
++{
++}
++
++void _ipu_ic_uninit_rotate_vf(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~IC_CONF_PRPVF_ROT_EN;
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++int _ipu_ic_init_prpenc(struct ipu_soc *ipu, ipu_channel_params_t *params,
++ bool src_is_csi)
++{
++ uint32_t reg, ic_conf;
++ uint32_t downsizeCoeff, resizeCoeff;
++ ipu_color_space_t in_fmt, out_fmt;
++ int ret = 0;
++
++ /* Setup vertical resizing */
++ if (!params->mem_prp_enc_mem.outv_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu,
++ params->mem_prp_enc_mem.in_height,
++ params->mem_prp_enc_mem.out_height,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate prpenc height "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg = (downsizeCoeff << 30) | (resizeCoeff << 16);
++ } else
++ reg = (params->mem_prp_enc_mem.outv_resize_ratio) << 16;
++
++ /* Setup horizontal resizing */
++ if (!params->mem_prp_enc_mem.outh_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_prp_enc_mem.in_width,
++ params->mem_prp_enc_mem.out_width,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate prpenc width "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg |= (downsizeCoeff << 14) | resizeCoeff;
++ } else
++ reg |= params->mem_prp_enc_mem.outh_resize_ratio;
++
++ ipu_ic_write(ipu, reg, IC_PRP_ENC_RSC);
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++
++ /* Setup color space conversion */
++ in_fmt = format_to_colorspace(params->mem_prp_enc_mem.in_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_prp_enc_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC1 */
++ _init_csc(ipu, IC_TASK_ENCODER, RGB, out_fmt, 1);
++ ic_conf |= IC_CONF_PRPENC_CSC1;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC1 */
++ _init_csc(ipu, IC_TASK_ENCODER, YCbCr, RGB, 1);
++ ic_conf |= IC_CONF_PRPENC_CSC1;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (src_is_csi)
++ ic_conf &= ~IC_CONF_RWS_EN;
++ else
++ ic_conf |= IC_CONF_RWS_EN;
++
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++
++ return ret;
++}
++
++void _ipu_ic_uninit_prpenc(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1);
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++void _ipu_ic_init_rotate_enc(struct ipu_soc *ipu, ipu_channel_params_t *params)
++{
++}
++
++void _ipu_ic_uninit_rotate_enc(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~(IC_CONF_PRPENC_ROT_EN);
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++int _ipu_ic_init_pp(struct ipu_soc *ipu, ipu_channel_params_t *params)
++{
++ uint32_t reg, ic_conf;
++ uint32_t downsizeCoeff, resizeCoeff;
++ ipu_color_space_t in_fmt, out_fmt;
++ int ret = 0;
++
++ /* Setup vertical resizing */
++ if (!params->mem_pp_mem.outv_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_pp_mem.in_height,
++ params->mem_pp_mem.out_height,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate pp height "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg = (downsizeCoeff << 30) | (resizeCoeff << 16);
++ } else {
++ reg = (params->mem_pp_mem.outv_resize_ratio) << 16;
++ }
++
++ /* Setup horizontal resizing */
++ if (!params->mem_pp_mem.outh_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_pp_mem.in_width,
++ params->mem_pp_mem.out_width,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate pp width "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg |= (downsizeCoeff << 14) | resizeCoeff;
++ } else {
++ reg |= params->mem_pp_mem.outh_resize_ratio;
++ }
++
++ ipu_ic_write(ipu, reg, IC_PP_RSC);
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++
++ /* Setup color space conversion */
++ in_fmt = format_to_colorspace(params->mem_pp_mem.in_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_pp_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC1 */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, RGB, out_fmt, 1);
++ ic_conf |= IC_CONF_PP_CSC1;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC1 */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, YCbCr, RGB, 1);
++ ic_conf |= IC_CONF_PP_CSC1;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (params->mem_pp_mem.graphics_combine_en) {
++ ic_conf |= IC_CONF_PP_CMB;
++
++ if (!(ic_conf & IC_CONF_PP_CSC1)) {
++ /* need transparent CSC1 conversion */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, RGB, RGB, 1);
++ ic_conf |= IC_CONF_PP_CSC1; /* Enable RGB->RGB CSC */
++ }
++
++ in_fmt = format_to_colorspace(params->mem_pp_mem.in_g_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_pp_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC2 */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, RGB, out_fmt, 2);
++ ic_conf |= IC_CONF_PP_CSC2;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC2 */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, YCbCr, RGB, 2);
++ ic_conf |= IC_CONF_PP_CSC2;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (params->mem_pp_mem.global_alpha_en) {
++ ic_conf |= IC_CONF_IC_GLB_LOC_A;
++ reg = ipu_ic_read(ipu, IC_CMBP_1);
++ reg &= ~(0xff00);
++ reg |= (params->mem_pp_mem.alpha << 8);
++ ipu_ic_write(ipu, reg, IC_CMBP_1);
++ } else
++ ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
++
++ if (params->mem_pp_mem.key_color_en) {
++ ic_conf |= IC_CONF_KEY_COLOR_EN;
++ ipu_ic_write(ipu, params->mem_pp_mem.key_color,
++ IC_CMBP_2);
++ } else
++ ic_conf &= ~IC_CONF_KEY_COLOR_EN;
++ } else {
++ ic_conf &= ~IC_CONF_PP_CMB;
++ }
++
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++
++ return ret;
++}
++
++void _ipu_ic_uninit_pp(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~(IC_CONF_PP_EN | IC_CONF_PP_CSC1 | IC_CONF_PP_CSC2 |
++ IC_CONF_PP_CMB);
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++void _ipu_ic_init_rotate_pp(struct ipu_soc *ipu, ipu_channel_params_t *params)
++{
++}
++
++void _ipu_ic_uninit_rotate_pp(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~IC_CONF_PP_ROT_EN;
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++int _ipu_ic_idma_init(struct ipu_soc *ipu, int dma_chan,
++ uint16_t width, uint16_t height,
++ int burst_size, ipu_rotate_mode_t rot)
++{
++ u32 ic_idmac_1, ic_idmac_2, ic_idmac_3;
++ u32 temp_rot = bitrev8(rot) >> 5;
++ bool need_hor_flip = false;
++
++ if ((burst_size != 8) && (burst_size != 16)) {
++ dev_dbg(ipu->dev, "Illegal burst length for IC\n");
++ return -EINVAL;
++ }
++
++ width--;
++ height--;
++
++ if (temp_rot & 0x2) /* Need horizontal flip */
++ need_hor_flip = true;
++
++ ic_idmac_1 = ipu_ic_read(ipu, IC_IDMAC_1);
++ ic_idmac_2 = ipu_ic_read(ipu, IC_IDMAC_2);
++ ic_idmac_3 = ipu_ic_read(ipu, IC_IDMAC_3);
++ if (dma_chan == 22) { /* PP output - CB2 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB2_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB2_BURST_16;
++
++ if (need_hor_flip)
++ ic_idmac_1 |= IC_IDMAC_1_PP_FLIP_RS;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_PP_FLIP_RS;
++
++ ic_idmac_2 &= ~IC_IDMAC_2_PP_HEIGHT_MASK;
++ ic_idmac_2 |= height << IC_IDMAC_2_PP_HEIGHT_OFFSET;
++
++ ic_idmac_3 &= ~IC_IDMAC_3_PP_WIDTH_MASK;
++ ic_idmac_3 |= width << IC_IDMAC_3_PP_WIDTH_OFFSET;
++ } else if (dma_chan == 11) { /* PP Input - CB5 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB5_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB5_BURST_16;
++ } else if (dma_chan == 47) { /* PP Rot input */
++ ic_idmac_1 &= ~IC_IDMAC_1_PP_ROT_MASK;
++ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PP_ROT_OFFSET;
++ }
++
++ if (dma_chan == 12) { /* PRP Input - CB6 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB6_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB6_BURST_16;
++ }
++
++ if (dma_chan == 20) { /* PRP ENC output - CB0 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB0_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB0_BURST_16;
++
++ if (need_hor_flip)
++ ic_idmac_1 |= IC_IDMAC_1_PRPENC_FLIP_RS;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_FLIP_RS;
++
++ ic_idmac_2 &= ~IC_IDMAC_2_PRPENC_HEIGHT_MASK;
++ ic_idmac_2 |= height << IC_IDMAC_2_PRPENC_HEIGHT_OFFSET;
++
++ ic_idmac_3 &= ~IC_IDMAC_3_PRPENC_WIDTH_MASK;
++ ic_idmac_3 |= width << IC_IDMAC_3_PRPENC_WIDTH_OFFSET;
++
++ } else if (dma_chan == 45) { /* PRP ENC Rot input */
++ ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_ROT_MASK;
++ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPENC_ROT_OFFSET;
++ }
++
++ if (dma_chan == 21) { /* PRP VF output - CB1 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB1_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB1_BURST_16;
++
++ if (need_hor_flip)
++ ic_idmac_1 |= IC_IDMAC_1_PRPVF_FLIP_RS;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_FLIP_RS;
++
++ ic_idmac_2 &= ~IC_IDMAC_2_PRPVF_HEIGHT_MASK;
++ ic_idmac_2 |= height << IC_IDMAC_2_PRPVF_HEIGHT_OFFSET;
++
++ ic_idmac_3 &= ~IC_IDMAC_3_PRPVF_WIDTH_MASK;
++ ic_idmac_3 |= width << IC_IDMAC_3_PRPVF_WIDTH_OFFSET;
++
++ } else if (dma_chan == 46) { /* PRP VF Rot input */
++ ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_ROT_MASK;
++ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPVF_ROT_OFFSET;
++ }
++
++ if (dma_chan == 14) { /* PRP VF graphics combining input - CB3 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB3_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB3_BURST_16;
++ } else if (dma_chan == 15) { /* PP graphics combining input - CB4 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB4_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB4_BURST_16;
++ } else if (dma_chan == 5) { /* VDIC OUTPUT - CB7 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB7_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB7_BURST_16;
++ }
++
++ ipu_ic_write(ipu, ic_idmac_1, IC_IDMAC_1);
++ ipu_ic_write(ipu, ic_idmac_2, IC_IDMAC_2);
++ ipu_ic_write(ipu, ic_idmac_3, IC_IDMAC_3);
++ return 0;
++}
++
++static void _init_csc(struct ipu_soc *ipu, uint8_t ic_task, ipu_color_space_t in_format,
++ ipu_color_space_t out_format, int csc_index)
++{
++ /*
++ * Y = 0.257 * R + 0.504 * G + 0.098 * B + 16;
++ * U = -0.148 * R - 0.291 * G + 0.439 * B + 128;
++ * V = 0.439 * R - 0.368 * G - 0.071 * B + 128;
++ */
++ static const uint32_t rgb2ycbcr_coeff[4][3] = {
++ {0x0042, 0x0081, 0x0019},
++ {0x01DA, 0x01B6, 0x0070},
++ {0x0070, 0x01A2, 0x01EE},
++ {0x0040, 0x0200, 0x0200}, /* A0, A1, A2 */
++ };
++
++ /* transparent RGB->RGB matrix for combining
++ */
++ static const uint32_t rgb2rgb_coeff[4][3] = {
++ {0x0080, 0x0000, 0x0000},
++ {0x0000, 0x0080, 0x0000},
++ {0x0000, 0x0000, 0x0080},
++ {0x0000, 0x0000, 0x0000}, /* A0, A1, A2 */
++ };
++
++/* R = (1.164 * (Y - 16)) + (1.596 * (Cr - 128));
++ G = (1.164 * (Y - 16)) - (0.392 * (Cb - 128)) - (0.813 * (Cr - 128));
++ B = (1.164 * (Y - 16)) + (2.017 * (Cb - 128); */
++ static const uint32_t ycbcr2rgb_coeff[4][3] = {
++ {149, 0, 204},
++ {149, 462, 408},
++ {149, 255, 0},
++ {8192 - 446, 266, 8192 - 554}, /* A0, A1, A2 */
++ };
++
++ uint32_t param;
++ uint32_t *base = NULL;
++
++ if (ic_task == IC_TASK_ENCODER) {
++ base = (uint32_t *)ipu->tpmem_base + 0x2008 / 4;
++ } else if (ic_task == IC_TASK_VIEWFINDER) {
++ if (csc_index == 1)
++ base = (uint32_t *)ipu->tpmem_base + 0x4028 / 4;
++ else
++ base = (uint32_t *)ipu->tpmem_base + 0x4040 / 4;
++ } else if (ic_task == IC_TASK_POST_PROCESSOR) {
++ if (csc_index == 1)
++ base = (uint32_t *)ipu->tpmem_base + 0x6060 / 4;
++ else
++ base = (uint32_t *)ipu->tpmem_base + 0x6078 / 4;
++ } else {
++ BUG();
++ }
++
++ if ((in_format == YCbCr) && (out_format == RGB)) {
++ /* Init CSC (YCbCr->RGB) */
++ param = (ycbcr2rgb_coeff[3][0] << 27) |
++ (ycbcr2rgb_coeff[0][0] << 18) |
++ (ycbcr2rgb_coeff[1][1] << 9) | ycbcr2rgb_coeff[2][2];
++ writel(param, base++);
++ /* scale = 2, sat = 0 */
++ param = (ycbcr2rgb_coeff[3][0] >> 5) | (2L << (40 - 32));
++ writel(param, base++);
++
++ param = (ycbcr2rgb_coeff[3][1] << 27) |
++ (ycbcr2rgb_coeff[0][1] << 18) |
++ (ycbcr2rgb_coeff[1][0] << 9) | ycbcr2rgb_coeff[2][0];
++ writel(param, base++);
++ param = (ycbcr2rgb_coeff[3][1] >> 5);
++ writel(param, base++);
++
++ param = (ycbcr2rgb_coeff[3][2] << 27) |
++ (ycbcr2rgb_coeff[0][2] << 18) |
++ (ycbcr2rgb_coeff[1][2] << 9) | ycbcr2rgb_coeff[2][1];
++ writel(param, base++);
++ param = (ycbcr2rgb_coeff[3][2] >> 5);
++ writel(param, base++);
++ } else if ((in_format == RGB) && (out_format == YCbCr)) {
++ /* Init CSC (RGB->YCbCr) */
++ param = (rgb2ycbcr_coeff[3][0] << 27) |
++ (rgb2ycbcr_coeff[0][0] << 18) |
++ (rgb2ycbcr_coeff[1][1] << 9) | rgb2ycbcr_coeff[2][2];
++ writel(param, base++);
++ /* scale = 1, sat = 0 */
++ param = (rgb2ycbcr_coeff[3][0] >> 5) | (1UL << 8);
++ writel(param, base++);
++
++ param = (rgb2ycbcr_coeff[3][1] << 27) |
++ (rgb2ycbcr_coeff[0][1] << 18) |
++ (rgb2ycbcr_coeff[1][0] << 9) | rgb2ycbcr_coeff[2][0];
++ writel(param, base++);
++ param = (rgb2ycbcr_coeff[3][1] >> 5);
++ writel(param, base++);
++
++ param = (rgb2ycbcr_coeff[3][2] << 27) |
++ (rgb2ycbcr_coeff[0][2] << 18) |
++ (rgb2ycbcr_coeff[1][2] << 9) | rgb2ycbcr_coeff[2][1];
++ writel(param, base++);
++ param = (rgb2ycbcr_coeff[3][2] >> 5);
++ writel(param, base++);
++ } else if ((in_format == RGB) && (out_format == RGB)) {
++ /* Init CSC */
++ param =
++ (rgb2rgb_coeff[3][0] << 27) | (rgb2rgb_coeff[0][0] << 18) |
++ (rgb2rgb_coeff[1][1] << 9) | rgb2rgb_coeff[2][2];
++ writel(param, base++);
++ /* scale = 2, sat = 0 */
++ param = (rgb2rgb_coeff[3][0] >> 5) | (2UL << 8);
++ writel(param, base++);
++
++ param =
++ (rgb2rgb_coeff[3][1] << 27) | (rgb2rgb_coeff[0][1] << 18) |
++ (rgb2rgb_coeff[1][0] << 9) | rgb2rgb_coeff[2][0];
++ writel(param, base++);
++ param = (rgb2rgb_coeff[3][1] >> 5);
++ writel(param, base++);
++
++ param =
++ (rgb2rgb_coeff[3][2] << 27) | (rgb2rgb_coeff[0][2] << 18) |
++ (rgb2rgb_coeff[1][2] << 9) | rgb2rgb_coeff[2][1];
++ writel(param, base++);
++ param = (rgb2rgb_coeff[3][2] >> 5);
++ writel(param, base++);
++ } else {
++ dev_err(ipu->dev, "Unsupported color space conversion\n");
++ }
++}
++
++static int _calc_resize_coeffs(struct ipu_soc *ipu,
++ uint32_t inSize, uint32_t outSize,
++ uint32_t *resizeCoeff,
++ uint32_t *downsizeCoeff)
++{
++ uint32_t tempSize;
++ uint32_t tempDownsize;
++
++ if (inSize > 4096) {
++ dev_err(ipu->dev, "IC input size(%d) cannot exceed 4096\n",
++ inSize);
++ return -EINVAL;
++ }
++
++ if (outSize > 1024) {
++ dev_err(ipu->dev, "IC output size(%d) cannot exceed 1024\n",
++ outSize);
++ return -EINVAL;
++ }
++
++ if ((outSize << 3) < inSize) {
++ dev_err(ipu->dev, "IC cannot downsize more than 8:1\n");
++ return -EINVAL;
++ }
++
++ /* Compute downsizing coefficient */
++ /* Output of downsizing unit cannot be more than 1024 */
++ tempDownsize = 0;
++ tempSize = inSize;
++ while (((tempSize > 1024) || (tempSize >= outSize * 2)) &&
++ (tempDownsize < 2)) {
++ tempSize >>= 1;
++ tempDownsize++;
++ }
++ *downsizeCoeff = tempDownsize;
++
++ /* compute resizing coefficient using the following equation:
++ resizeCoeff = M*(SI -1)/(SO - 1)
++ where M = 2^13, SI - input size, SO - output size */
++ *resizeCoeff = (8192L * (tempSize - 1)) / (outSize - 1);
++ if (*resizeCoeff >= 16384L) {
++ dev_err(ipu->dev, "Overflow on IC resize coefficient.\n");
++ return -EINVAL;
++ }
++
++ dev_dbg(ipu->dev, "resizing from %u -> %u pixels, "
++ "downsize=%u, resize=%u.%lu (reg=%u)\n", inSize, outSize,
++ *downsizeCoeff, (*resizeCoeff >= 8192L) ? 1 : 0,
++ ((*resizeCoeff & 0x1FFF) * 10000L) / 8192L, *resizeCoeff);
++
++ return 0;
++}
++
++void _ipu_vdi_toggle_top_field_man(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++ uint32_t mask_reg;
++
++ reg = ipu_vdi_read(ipu, VDI_C);
++ mask_reg = reg & VDI_C_TOP_FIELD_MAN_1;
++ if (mask_reg == VDI_C_TOP_FIELD_MAN_1)
++ reg &= ~VDI_C_TOP_FIELD_MAN_1;
++ else
++ reg |= VDI_C_TOP_FIELD_MAN_1;
++
++ ipu_vdi_write(ipu, reg, VDI_C);
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/ipu_param_mem.h linux-3.14.40/drivers/mxc/ipu3/ipu_param_mem.h
+--- linux-3.14.40.orig/drivers/mxc/ipu3/ipu_param_mem.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/ipu_param_mem.h 2015-05-01 14:57:59.611427001 -0500
+@@ -0,0 +1,921 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#ifndef __INCLUDE_IPU_PARAM_MEM_H__
++#define __INCLUDE_IPU_PARAM_MEM_H__
++
++#include <linux/bitrev.h>
++#include <linux/types.h>
++
++#include "ipu_prv.h"
++
++extern u32 *ipu_cpmem_base;
++
++struct ipu_ch_param_word {
++ uint32_t data[5];
++ uint32_t res[3];
++};
++
++struct ipu_ch_param {
++ struct ipu_ch_param_word word[2];
++};
++
++#define ipu_ch_param_addr(ipu, ch) (((struct ipu_ch_param *)ipu->cpmem_base) + (ch))
++
++#define _param_word(base, w) \
++ (((struct ipu_ch_param *)(base))->word[(w)].data)
++
++#define ipu_ch_param_set_field(base, w, bit, size, v) { \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ _param_word(base, w)[i] |= (v) << off; \
++ if (((bit)+(size)-1)/32 > i) { \
++ _param_word(base, w)[i + 1] |= (v) >> (off ? (32 - off) : 0); \
++ } \
++}
++
++#define ipu_ch_param_set_field_io(base, w, bit, size, v) { \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ unsigned reg_offset; \
++ u32 temp; \
++ reg_offset = sizeof(struct ipu_ch_param_word) * w / 4; \
++ reg_offset += i; \
++ temp = readl((u32 *)base + reg_offset); \
++ temp |= (v) << off; \
++ writel(temp, (u32 *)base + reg_offset); \
++ if (((bit)+(size)-1)/32 > i) { \
++ reg_offset++; \
++ temp = readl((u32 *)base + reg_offset); \
++ temp |= (v) >> (off ? (32 - off) : 0); \
++ writel(temp, (u32 *)base + reg_offset); \
++ } \
++}
++
++#define ipu_ch_param_mod_field(base, w, bit, size, v) { \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ u32 mask = (1UL << size) - 1; \
++ u32 temp = _param_word(base, w)[i]; \
++ temp &= ~(mask << off); \
++ _param_word(base, w)[i] = temp | (v) << off; \
++ if (((bit)+(size)-1)/32 > i) { \
++ temp = _param_word(base, w)[i + 1]; \
++ temp &= ~(mask >> (32 - off)); \
++ _param_word(base, w)[i + 1] = \
++ temp | ((v) >> (off ? (32 - off) : 0)); \
++ } \
++}
++
++#define ipu_ch_param_mod_field_io(base, w, bit, size, v) { \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ u32 mask = (1UL << size) - 1; \
++ unsigned reg_offset; \
++ u32 temp; \
++ reg_offset = sizeof(struct ipu_ch_param_word) * w / 4; \
++ reg_offset += i; \
++ temp = readl((u32 *)base + reg_offset); \
++ temp &= ~(mask << off); \
++ temp |= (v) << off; \
++ writel(temp, (u32 *)base + reg_offset); \
++ if (((bit)+(size)-1)/32 > i) { \
++ reg_offset++; \
++ temp = readl((u32 *)base + reg_offset); \
++ temp &= ~(mask >> (32 - off)); \
++ temp |= ((v) >> (off ? (32 - off) : 0)); \
++ writel(temp, (u32 *)base + reg_offset); \
++ } \
++}
++
++#define ipu_ch_param_read_field(base, w, bit, size) ({ \
++ u32 temp2; \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ u32 mask = (1UL << size) - 1; \
++ u32 temp1 = _param_word(base, w)[i]; \
++ temp1 = mask & (temp1 >> off); \
++ if (((bit)+(size)-1)/32 > i) { \
++ temp2 = _param_word(base, w)[i + 1]; \
++ temp2 &= mask >> (off ? (32 - off) : 0); \
++ temp1 |= temp2 << (off ? (32 - off) : 0); \
++ } \
++ temp1; \
++})
++
++#define ipu_ch_param_read_field_io(base, w, bit, size) ({ \
++ u32 temp1, temp2; \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ u32 mask = (1UL << size) - 1; \
++ unsigned reg_offset; \
++ reg_offset = sizeof(struct ipu_ch_param_word) * w / 4; \
++ reg_offset += i; \
++ temp1 = readl((u32 *)base + reg_offset); \
++ temp1 = mask & (temp1 >> off); \
++ if (((bit)+(size)-1)/32 > i) { \
++ reg_offset++; \
++ temp2 = readl((u32 *)base + reg_offset); \
++ temp2 &= mask >> (off ? (32 - off) : 0); \
++ temp1 |= temp2 << (off ? (32 - off) : 0); \
++ } \
++ temp1; \
++})
++
++static inline int __ipu_ch_get_third_buf_cpmem_num(int ch)
++{
++ switch (ch) {
++ case 8:
++ return 64;
++ case 9:
++ return 65;
++ case 10:
++ return 66;
++ case 13:
++ return 67;
++ case 21:
++ return 68;
++ case 23:
++ return 69;
++ case 27:
++ return 70;
++ case 28:
++ return 71;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static inline void _ipu_ch_params_set_packing(struct ipu_ch_param *p,
++ int red_width, int red_offset,
++ int green_width, int green_offset,
++ int blue_width, int blue_offset,
++ int alpha_width, int alpha_offset)
++{
++ /* Setup red width and offset */
++ ipu_ch_param_set_field(p, 1, 116, 3, red_width - 1);
++ ipu_ch_param_set_field(p, 1, 128, 5, red_offset);
++ /* Setup green width and offset */
++ ipu_ch_param_set_field(p, 1, 119, 3, green_width - 1);
++ ipu_ch_param_set_field(p, 1, 133, 5, green_offset);
++ /* Setup blue width and offset */
++ ipu_ch_param_set_field(p, 1, 122, 3, blue_width - 1);
++ ipu_ch_param_set_field(p, 1, 138, 5, blue_offset);
++ /* Setup alpha width and offset */
++ ipu_ch_param_set_field(p, 1, 125, 3, alpha_width - 1);
++ ipu_ch_param_set_field(p, 1, 143, 5, alpha_offset);
++}
++
++static inline void _ipu_ch_param_dump(struct ipu_soc *ipu, int ch)
++{
++ struct ipu_ch_param *p = ipu_ch_param_addr(ipu, ch);
++ dev_dbg(ipu->dev, "ch %d word 0 - %08X %08X %08X %08X %08X\n", ch,
++ p->word[0].data[0], p->word[0].data[1], p->word[0].data[2],
++ p->word[0].data[3], p->word[0].data[4]);
++ dev_dbg(ipu->dev, "ch %d word 1 - %08X %08X %08X %08X %08X\n", ch,
++ p->word[1].data[0], p->word[1].data[1], p->word[1].data[2],
++ p->word[1].data[3], p->word[1].data[4]);
++ dev_dbg(ipu->dev, "PFS 0x%x, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 85, 4));
++ dev_dbg(ipu->dev, "BPP 0x%x, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 107, 3));
++ dev_dbg(ipu->dev, "NPB 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 78, 7));
++
++ dev_dbg(ipu->dev, "FW %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 125, 13));
++ dev_dbg(ipu->dev, "FH %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 138, 12));
++ dev_dbg(ipu->dev, "EBA0 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 0, 29) << 3);
++ dev_dbg(ipu->dev, "EBA1 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 29, 29) << 3);
++ dev_dbg(ipu->dev, "Stride %d\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 102, 14));
++ dev_dbg(ipu->dev, "scan_order %d\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 113, 1));
++ dev_dbg(ipu->dev, "uv_stride %d\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 128, 14));
++ dev_dbg(ipu->dev, "u_offset 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 46, 22) << 3);
++ dev_dbg(ipu->dev, "v_offset 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 68, 22) << 3);
++
++ dev_dbg(ipu->dev, "Width0 %d+1, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 116, 3));
++ dev_dbg(ipu->dev, "Width1 %d+1, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 119, 3));
++ dev_dbg(ipu->dev, "Width2 %d+1, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 122, 3));
++ dev_dbg(ipu->dev, "Width3 %d+1, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 125, 3));
++ dev_dbg(ipu->dev, "Offset0 %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 128, 5));
++ dev_dbg(ipu->dev, "Offset1 %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 133, 5));
++ dev_dbg(ipu->dev, "Offset2 %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 138, 5));
++ dev_dbg(ipu->dev, "Offset3 %d\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 143, 5));
++}
++
++static inline void fill_cpmem(struct ipu_soc *ipu, int ch, struct ipu_ch_param *params)
++{
++ int i, w;
++ void *addr = ipu_ch_param_addr(ipu, ch);
++
++ /* 2 words, 5 valid data */
++ for (w = 0; w < 2; w++) {
++ for (i = 0; i < 5; i++) {
++ writel(params->word[w].data[i], addr);
++ addr += 4;
++ }
++ addr += 12;
++ }
++}
++
++static inline void _ipu_ch_param_init(struct ipu_soc *ipu, int ch,
++ uint32_t pixel_fmt, uint32_t width,
++ uint32_t height, uint32_t stride,
++ uint32_t u, uint32_t v,
++ uint32_t uv_stride, dma_addr_t addr0,
++ dma_addr_t addr1, dma_addr_t addr2)
++{
++ uint32_t u_offset = 0;
++ uint32_t v_offset = 0;
++ int32_t sub_ch = 0;
++ struct ipu_ch_param params;
++
++ memset(&params, 0, sizeof(params));
++
++ ipu_ch_param_set_field(&params, 0, 125, 13, width - 1);
++
++ if (((ch == 8) || (ch == 9) || (ch == 10)) && !ipu->vdoa_en) {
++ ipu_ch_param_set_field(&params, 0, 138, 12, (height / 2) - 1);
++ ipu_ch_param_set_field(&params, 1, 102, 14, (stride * 2) - 1);
++ } else {
++ /* note: for vdoa+vdi- ch8/9/10, always use band mode */
++ ipu_ch_param_set_field(&params, 0, 138, 12, height - 1);
++ ipu_ch_param_set_field(&params, 1, 102, 14, stride - 1);
++ }
++
++ /* EBA is 8-byte aligned */
++ ipu_ch_param_set_field(&params, 1, 0, 29, addr0 >> 3);
++ ipu_ch_param_set_field(&params, 1, 29, 29, addr1 >> 3);
++ if (addr0%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's EBA0 is not 8-byte aligned\n", ch);
++ if (addr1%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's EBA1 is not 8-byte aligned\n", ch);
++
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_GENERIC:
++ /*Represents 8-bit Generic data */
++ ipu_ch_param_set_field(&params, 0, 107, 3, 5); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 6); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 63); /* burst size */
++
++ break;
++ case IPU_PIX_FMT_GENERIC_16:
++ /* Represents 16-bit generic data */
++ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 6); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++
++ break;
++ case IPU_PIX_FMT_GENERIC_32:
++ /*Represents 32-bit Generic data */
++ break;
++ case IPU_PIX_FMT_RGB565:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 5, 0, 6, 5, 5, 11, 8, 16);
++ break;
++ case IPU_PIX_FMT_BGR24:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 1); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 19); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 0, 8, 8, 8, 16, 8, 24);
++ break;
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_YUV444:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 1); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 19); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 16, 8, 8, 8, 0, 8, 24);
++ break;
++ case IPU_PIX_FMT_VYU444:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 1); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 19); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 8, 8, 0, 8, 16, 8, 24);
++ break;
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_BGR32:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 8, 8, 16, 8, 24, 8, 0);
++ break;
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_RGB32:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 24, 8, 16, 8, 8, 8, 0);
++ break;
++ case IPU_PIX_FMT_ABGR32:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 0, 8, 8, 8, 16, 8, 24);
++ break;
++ case IPU_PIX_FMT_UYVY:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 0xA); /* pix format */
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ case IPU_PIX_FMT_YUYV:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 0x8); /* pix format */
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ if (ipu->vdoa_en) {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31);
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15);
++ }
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ ipu_ch_param_set_field(&params, 1, 85, 4, 2); /* pix format */
++
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ u_offset = stride * height;
++ v_offset = u_offset + (uv_stride * height / 2);
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++ uv_stride = uv_stride*2;
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ case IPU_PIX_FMT_YVU420P:
++ ipu_ch_param_set_field(&params, 1, 85, 4, 2); /* pix format */
++
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ v_offset = stride * height;
++ u_offset = v_offset + (uv_stride * height / 2);
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++ uv_stride = uv_stride*2;
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ case IPU_PIX_FMT_YVU422P:
++ /* BPP & pixel format */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 1); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ v_offset = (v == 0) ? stride * height : v;
++ u_offset = (u == 0) ? v_offset + v_offset / 2 : u;
++ break;
++ case IPU_PIX_FMT_YUV422P:
++ /* BPP & pixel format */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 1); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ u_offset = (u == 0) ? stride * height : u;
++ v_offset = (v == 0) ? u_offset + u_offset / 2 : v;
++ break;
++ case IPU_PIX_FMT_YUV444P:
++ /* BPP & pixel format */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 0); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ uv_stride = stride;
++ u_offset = (u == 0) ? stride * height : u;
++ v_offset = (v == 0) ? u_offset * 2 : v;
++ break;
++ case IPU_PIX_FMT_NV12:
++ /* BPP & pixel format */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 4); /* pix format */
++ uv_stride = stride;
++ u_offset = (u == 0) ? stride * height : u;
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ if (ipu->vdoa_en) {
++ /* one field buffer, memory width 64bits */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 63);
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15);
++ /* top/bottom field in one buffer*/
++ uv_stride = uv_stride*2;
++ }
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ default:
++ dev_err(ipu->dev, "mxc ipu: unimplemented pixel format\n");
++ break;
++ }
++ /*set burst size to 16*/
++
++
++ if (uv_stride)
++ ipu_ch_param_set_field(&params, 1, 128, 14, uv_stride - 1);
++
++ /* Get the uv offset from user when need cropping */
++ if (u || v) {
++ u_offset = u;
++ v_offset = v;
++ }
++
++ /* UBO and VBO are 22-bit and 8-byte aligned */
++ if (u_offset/8 > 0x3fffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's U offset exceeds IPU limitation\n", ch);
++ if (v_offset/8 > 0x3fffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's V offset exceeds IPU limitation\n", ch);
++ if (u_offset%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's U offset is not 8-byte aligned\n", ch);
++ if (v_offset%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's V offset is not 8-byte aligned\n", ch);
++
++ ipu_ch_param_set_field(&params, 0, 46, 22, u_offset / 8);
++ ipu_ch_param_set_field(&params, 0, 68, 22, v_offset / 8);
++
++ dev_dbg(ipu->dev, "initializing idma ch %d @ %p\n", ch, ipu_ch_param_addr(ipu, ch));
++ fill_cpmem(ipu, ch, &params);
++ if (addr2) {
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++
++ ipu_ch_param_set_field(&params, 1, 0, 29, addr2 >> 3);
++ ipu_ch_param_set_field(&params, 1, 29, 29, 0);
++ if (addr2%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's sub-CPMEM entry%d EBA0 is not "
++ "8-byte aligned\n", ch, sub_ch);
++
++ dev_dbg(ipu->dev, "initializing idma ch %d @ %p sub cpmem\n", ch,
++ ipu_ch_param_addr(ipu, sub_ch));
++ fill_cpmem(ipu, sub_ch, &params);
++ }
++};
++
++static inline void _ipu_ch_param_set_burst_size(struct ipu_soc *ipu,
++ uint32_t ch,
++ uint16_t burst_pixels)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 78, 7,
++ burst_pixels - 1);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 78, 7,
++ burst_pixels - 1);
++};
++
++static inline int _ipu_ch_param_get_burst_size(struct ipu_soc *ipu, uint32_t ch)
++{
++ return ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 78, 7) + 1;
++};
++
++static inline int _ipu_ch_param_get_bpp(struct ipu_soc *ipu, uint32_t ch)
++{
++ return ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 107, 3);
++};
++
++static inline void _ipu_ch_param_set_buffer(struct ipu_soc *ipu, uint32_t ch,
++ int bufNum, dma_addr_t phyaddr)
++{
++ if (bufNum == 2) {
++ ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (ch <= 0)
++ return;
++ bufNum = 0;
++ }
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 29 * bufNum, 29,
++ phyaddr / 8);
++};
++
++static inline void _ipu_ch_param_set_rotation(struct ipu_soc *ipu, uint32_t ch,
++ ipu_rotate_mode_t rot)
++{
++ u32 temp_rot = bitrev8(rot) >> 5;
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 119, 3, temp_rot);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 119, 3, temp_rot);
++};
++
++static inline void _ipu_ch_param_set_block_mode(struct ipu_soc *ipu, uint32_t ch)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 117, 2, 1);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 117, 2, 1);
++};
++
++static inline void _ipu_ch_param_set_alpha_use_separate_channel(struct ipu_soc *ipu,
++ uint32_t ch,
++ bool option)
++{
++ int32_t sub_ch = 0;
++
++ if (option) {
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 89, 1, 1);
++ } else {
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 89, 1, 0);
++ }
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++
++ if (option) {
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 89, 1, 1);
++ } else {
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 89, 1, 0);
++ }
++};
++
++static inline void _ipu_ch_param_set_alpha_condition_read(struct ipu_soc *ipu, uint32_t ch)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 149, 1, 1);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 149, 1, 1);
++};
++
++static inline void _ipu_ch_param_set_alpha_buffer_memory(struct ipu_soc *ipu, uint32_t ch)
++{
++ int alp_mem_idx;
++ int32_t sub_ch = 0;
++
++ switch (ch) {
++ case 14: /* PRP graphic */
++ alp_mem_idx = 0;
++ break;
++ case 15: /* PP graphic */
++ alp_mem_idx = 1;
++ break;
++ case 23: /* DP BG SYNC graphic */
++ alp_mem_idx = 4;
++ break;
++ case 27: /* DP FG SYNC graphic */
++ alp_mem_idx = 2;
++ break;
++ default:
++ dev_err(ipu->dev, "unsupported correlative channel of local "
++ "alpha channel\n");
++ return;
++ }
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 90, 3, alp_mem_idx);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 90, 3, alp_mem_idx);
++};
++
++static inline void _ipu_ch_param_set_interlaced_scan(struct ipu_soc *ipu, uint32_t ch)
++{
++ u32 stride;
++ int32_t sub_ch = 0;
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, ch), 0, 113, 1, 1);
++ if (sub_ch > 0)
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 113, 1, 1);
++ stride = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 102, 14) + 1;
++ /* ILO is 20-bit and 8-byte aligned */
++ if (stride/8 > 0xfffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's ILO exceeds IPU limitation\n", ch);
++ if (stride%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's ILO is not 8-byte aligned\n", ch);
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 58, 20, stride / 8);
++ if (sub_ch > 0)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 58, 20,
++ stride / 8);
++ stride *= 2;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 102, 14, stride - 1);
++ if (sub_ch > 0)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 102, 14,
++ stride - 1);
++};
++
++static inline void _ipu_ch_param_set_axi_id(struct ipu_soc *ipu, uint32_t ch, uint32_t id)
++{
++ int32_t sub_ch = 0;
++
++ id %= 4;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 93, 2, id);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 93, 2, id);
++};
++
++/* IDMAC U/V offset changing support */
++/* U and V input is not affected, */
++/* the update is done by new calculation according to */
++/* vertical_offset and horizontal_offset */
++static inline void _ipu_ch_offset_update(struct ipu_soc *ipu,
++ int ch,
++ uint32_t pixel_fmt,
++ uint32_t width,
++ uint32_t height,
++ uint32_t stride,
++ uint32_t u,
++ uint32_t v,
++ uint32_t uv_stride,
++ uint32_t vertical_offset,
++ uint32_t horizontal_offset)
++{
++ uint32_t u_offset = 0;
++ uint32_t v_offset = 0;
++ uint32_t old_offset = 0;
++ uint32_t u_fix = 0;
++ uint32_t v_fix = 0;
++ int32_t sub_ch = 0;
++
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_GENERIC:
++ case IPU_PIX_FMT_GENERIC_16:
++ case IPU_PIX_FMT_GENERIC_32:
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_YUV444:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_ABGR32:
++ case IPU_PIX_FMT_UYVY:
++ case IPU_PIX_FMT_YUYV:
++ break;
++
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ u_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset / 2) +
++ horizontal_offset / 2;
++ v_offset = u_offset + (uv_stride * height / 2);
++ u_fix = u ? (u + (uv_stride * vertical_offset / 2) +
++ (horizontal_offset / 2) -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset / 2) +
++ (horizontal_offset / 2) -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ v_offset;
++
++ break;
++ case IPU_PIX_FMT_YVU420P:
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ v_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset / 2) +
++ horizontal_offset / 2;
++ u_offset = v_offset + (uv_stride * height / 2);
++ u_fix = u ? (u + (uv_stride * vertical_offset / 2) +
++ (horizontal_offset / 2) -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset / 2) +
++ (horizontal_offset / 2) -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ v_offset;
++
++ break;
++ case IPU_PIX_FMT_YVU422P:
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ v_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset) +
++ horizontal_offset / 2;
++ u_offset = v_offset + uv_stride * height;
++ u_fix = u ? (u + (uv_stride * vertical_offset) +
++ horizontal_offset / 2 -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset) +
++ horizontal_offset / 2 -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ v_offset;
++ break;
++ case IPU_PIX_FMT_YUV422P:
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ u_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset) +
++ horizontal_offset / 2;
++ v_offset = u_offset + uv_stride * height;
++ u_fix = u ? (u + (uv_stride * vertical_offset) +
++ horizontal_offset / 2 -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset) +
++ horizontal_offset / 2 -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ v_offset;
++ break;
++
++ case IPU_PIX_FMT_YUV444P:
++ uv_stride = stride;
++ u_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset) +
++ horizontal_offset;
++ v_offset = u_offset + uv_stride * height;
++ u_fix = u ? (u + (uv_stride * vertical_offset) +
++ horizontal_offset -
++ (stride * vertical_offset) -
++ (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset) +
++ horizontal_offset -
++ (stride * vertical_offset) -
++ (horizontal_offset)) :
++ v_offset;
++ break;
++ case IPU_PIX_FMT_NV12:
++ uv_stride = stride;
++ u_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset / 2) +
++ horizontal_offset;
++ u_fix = u ? (u + (uv_stride * vertical_offset / 2) +
++ horizontal_offset -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++
++ break;
++ default:
++ dev_err(ipu->dev, "mxc ipu: unimplemented pixel format\n");
++ break;
++ }
++
++
++
++ if (u_fix > u_offset)
++ u_offset = u_fix;
++
++ if (v_fix > v_offset)
++ v_offset = v_fix;
++
++ /* UBO and VBO are 22-bit and 8-byte aligned */
++ if (u_offset/8 > 0x3fffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's U offset exceeds IPU limitation\n", ch);
++ if (v_offset/8 > 0x3fffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's V offset exceeds IPU limitation\n", ch);
++ if (u_offset%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's U offset is not 8-byte aligned\n", ch);
++ if (v_offset%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's V offset is not 8-byte aligned\n", ch);
++
++ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 46, 22);
++ if (old_offset != u_offset / 8)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 46, 22, u_offset / 8);
++ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 68, 22);
++ if (old_offset != v_offset / 8)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 68, 22, v_offset / 8);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 46, 22);
++ if (old_offset != u_offset / 8)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 46, 22, u_offset / 8);
++ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 68, 22);
++ if (old_offset != v_offset / 8)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 68, 22, v_offset / 8);
++};
++
++static inline void _ipu_ch_params_set_alpha_width(struct ipu_soc *ipu, uint32_t ch, int alpha_width)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, ch), 1, 125, 3, alpha_width - 1);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 125, 3, alpha_width - 1);
++};
++
++static inline void _ipu_ch_param_set_bandmode(struct ipu_soc *ipu,
++ uint32_t ch, uint32_t band_height)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, ch),
++ 0, 114, 3, band_height - 1);
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, sub_ch),
++ 0, 114, 3, band_height - 1);
++
++ dev_dbg(ipu->dev, "BNDM 0x%x, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 114, 3));
++}
++
++/*
++ * The IPUv3 IDMAC has a bug to read 32bpp pixels from a graphics plane
++ * whose alpha component is at the most significant 8 bits. The bug only
++ * impacts on cases in which the relevant separate alpha channel is enabled.
++ *
++ * Return true on bad alpha component position, otherwise, return false.
++ */
++static inline bool _ipu_ch_param_bad_alpha_pos(uint32_t pixel_fmt)
++{
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_RGB32:
++ return true;
++ }
++
++ return false;
++}
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/ipu_pixel_clk.c linux-3.14.40/drivers/mxc/ipu3/ipu_pixel_clk.c
+--- linux-3.14.40.orig/drivers/mxc/ipu3/ipu_pixel_clk.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/ipu_pixel_clk.c 2015-05-01 14:57:59.611427001 -0500
+@@ -0,0 +1,317 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_pixel_clk.c
++ *
++ * @brief IPU pixel clock implementation
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/clk-provider.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++
++#include "ipu_prv.h"
++#include "ipu_regs.h"
++
++ /*
++ * muxd clock implementation
++ */
++struct clk_di_mux {
++ struct clk_hw hw;
++ u8 ipu_id;
++ u8 di_id;
++ u8 flags;
++ u8 index;
++};
++#define to_clk_di_mux(_hw) container_of(_hw, struct clk_di_mux, hw)
++
++static int _ipu_pixel_clk_set_parent(struct clk_hw *hw, u8 index)
++{
++ struct clk_di_mux *mux = to_clk_di_mux(hw);
++ struct ipu_soc *ipu = ipu_get_soc(mux->ipu_id);
++ u32 di_gen;
++
++ di_gen = ipu_di_read(ipu, mux->di_id, DI_GENERAL);
++ if (index == 0)
++ /* ipu1_clk or ipu2_clk internal clk */
++ di_gen &= ~DI_GEN_DI_CLK_EXT;
++ else
++ di_gen |= DI_GEN_DI_CLK_EXT;
++
++ ipu_di_write(ipu, mux->di_id, di_gen, DI_GENERAL);
++ mux->index = index;
++ pr_debug("ipu_pixel_clk: di_clk_ext:0x%x, di_gen reg:0x%x.\n",
++ !(di_gen & DI_GEN_DI_CLK_EXT), di_gen);
++ return 0;
++}
++
++static u8 _ipu_pixel_clk_get_parent(struct clk_hw *hw)
++{
++ struct clk_di_mux *mux = to_clk_di_mux(hw);
++
++ return mux->index;
++}
++
++const struct clk_ops clk_mux_di_ops = {
++ .get_parent = _ipu_pixel_clk_get_parent,
++ .set_parent = _ipu_pixel_clk_set_parent,
++};
++
++struct clk *clk_register_mux_pix_clk(struct device *dev, const char *name,
++ const char **parent_names, u8 num_parents, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_mux_flags)
++{
++ struct clk_di_mux *mux;
++ struct clk *clk;
++ struct clk_init_data init;
++
++ mux = kzalloc(sizeof(struct clk_di_mux), GFP_KERNEL);
++ if (!mux)
++ return ERR_PTR(-ENOMEM);
++
++ init.name = name;
++ init.ops = &clk_mux_di_ops;
++ init.flags = flags;
++ init.parent_names = parent_names;
++ init.num_parents = num_parents;
++
++ mux->ipu_id = ipu_id;
++ mux->di_id = di_id;
++ mux->flags = clk_mux_flags | CLK_SET_RATE_PARENT;
++ mux->hw.init = &init;
++
++ clk = clk_register(dev, &mux->hw);
++ if (IS_ERR(clk))
++ kfree(mux);
++
++ return clk;
++}
++
++/*
++ * Gated clock implementation
++ */
++struct clk_di_div {
++ struct clk_hw hw;
++ u8 ipu_id;
++ u8 di_id;
++ u8 flags;
++};
++#define to_clk_di_div(_hw) container_of(_hw, struct clk_di_div, hw)
++
++static unsigned long _ipu_pixel_clk_div_recalc_rate(struct clk_hw *hw,
++ unsigned long parent_rate)
++{
++ struct clk_di_div *di_div = to_clk_di_div(hw);
++ struct ipu_soc *ipu = ipu_get_soc(di_div->ipu_id);
++ u32 div;
++ u64 final_rate = (unsigned long long)parent_rate * 16;
++
++ _ipu_get(ipu);
++ div = ipu_di_read(ipu, di_div->di_id, DI_BS_CLKGEN0);
++ _ipu_put(ipu);
++ pr_debug("ipu_di%d read BS_CLKGEN0 div:%d, final_rate:%lld, prate:%ld\n",
++ di_div->di_id, div, final_rate, parent_rate);
++
++ if (div == 0)
++ return 0;
++ do_div(final_rate, div);
++
++ return (unsigned long)final_rate;
++}
++
++static long _ipu_pixel_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
++ unsigned long *parent_clk_rate)
++{
++ u64 div, final_rate;
++ u32 remainder;
++ u64 parent_rate = (unsigned long long)(*parent_clk_rate) * 16;
++
++ /*
++ * Calculate divider
++ * Fractional part is 4 bits,
++ * so simply multiply by 2^4 to get fractional part.
++ */
++ div = parent_rate;
++ remainder = do_div(div, rate);
++ /* Round the divider value */
++ if (remainder > (rate/2))
++ div++;
++ if (div < 0x10) /* Min DI disp clock divider is 1 */
++ div = 0x10;
++ if (div & ~0xFEF)
++ div &= 0xFF8;
++ else {
++ /* Round up divider if it gets us closer to desired pix clk */
++ if ((div & 0xC) == 0xC) {
++ div += 0x10;
++ div &= ~0xF;
++ }
++ }
++ final_rate = parent_rate;
++ do_div(final_rate, div);
++
++ return final_rate;
++}
++
++static int _ipu_pixel_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
++ unsigned long parent_clk_rate)
++{
++ struct clk_di_div *di_div = to_clk_di_div(hw);
++ struct ipu_soc *ipu = ipu_get_soc(di_div->ipu_id);
++ u64 div, parent_rate;
++ u32 remainder;
++
++ parent_rate = (unsigned long long)parent_clk_rate * 16;
++ div = parent_rate;
++ remainder = do_div(div, rate);
++ /* Round the divider value */
++ if (remainder > (rate/2))
++ div++;
++
++ /* Round up divider if it gets us closer to desired pix clk */
++ if ((div & 0xC) == 0xC) {
++ div += 0x10;
++ div &= ~0xF;
++ }
++ if (div > 0x1000)
++ pr_err("Overflow, di:%d, DI_BS_CLKGEN0 div:0x%x\n",
++ di_div->di_id, (u32)div);
++ _ipu_get(ipu);
++ ipu_di_write(ipu, di_div->di_id, (u32)div, DI_BS_CLKGEN0);
++
++ /* Setup pixel clock timing */
++ /* FIXME: needs to be more flexible */
++ /* Down time is half of period */
++ ipu_di_write(ipu, di_div->di_id, ((u32)div / 16) << 16, DI_BS_CLKGEN1);
++ _ipu_put(ipu);
++
++ return 0;
++}
++
++static struct clk_ops clk_div_ops = {
++ .recalc_rate = _ipu_pixel_clk_div_recalc_rate,
++ .round_rate = _ipu_pixel_clk_div_round_rate,
++ .set_rate = _ipu_pixel_clk_div_set_rate,
++};
++
++struct clk *clk_register_div_pix_clk(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_div_flags)
++{
++ struct clk_di_div *di_div;
++ struct clk *clk;
++ struct clk_init_data init;
++
++ di_div = kzalloc(sizeof(struct clk_di_div), GFP_KERNEL);
++ if (!di_div)
++ return ERR_PTR(-ENOMEM);
++
++ /* struct clk_di_div assignments */
++ di_div->ipu_id = ipu_id;
++ di_div->di_id = di_id;
++ di_div->flags = clk_div_flags;
++
++ init.name = name;
++ init.ops = &clk_div_ops;
++ init.flags = flags | CLK_SET_RATE_PARENT;
++ init.parent_names = parent_name ? &parent_name : NULL;
++ init.num_parents = parent_name ? 1 : 0;
++
++ di_div->hw.init = &init;
++
++ clk = clk_register(dev, &di_div->hw);
++ if (IS_ERR(clk))
++ kfree(clk);
++
++ return clk;
++}
++
++/*
++ * Gated clock implementation
++ */
++struct clk_di_gate {
++ struct clk_hw hw;
++ u8 ipu_id;
++ u8 di_id;
++ u8 flags;
++};
++#define to_clk_di_gate(_hw) container_of(_hw, struct clk_di_gate, hw)
++
++static int _ipu_pixel_clk_enable(struct clk_hw *hw)
++{
++ struct clk_di_gate *gate = to_clk_di_gate(hw);
++ struct ipu_soc *ipu = ipu_get_soc(gate->ipu_id);
++ u32 disp_gen;
++
++ disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
++ disp_gen |= gate->di_id ? DI1_COUNTER_RELEASE : DI0_COUNTER_RELEASE;
++ ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
++
++ return 0;
++}
++
++static void _ipu_pixel_clk_disable(struct clk_hw *hw)
++{
++ struct clk_di_gate *gate = to_clk_di_gate(hw);
++ struct ipu_soc *ipu = ipu_get_soc(gate->ipu_id);
++ u32 disp_gen;
++
++ disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
++ disp_gen &= gate->di_id ? ~DI1_COUNTER_RELEASE : ~DI0_COUNTER_RELEASE;
++ ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
++
++}
++
++
++static struct clk_ops clk_gate_di_ops = {
++ .enable = _ipu_pixel_clk_enable,
++ .disable = _ipu_pixel_clk_disable,
++};
++
++struct clk *clk_register_gate_pix_clk(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_gate_flags)
++{
++ struct clk_di_gate *gate;
++ struct clk *clk;
++ struct clk_init_data init;
++
++ gate = kzalloc(sizeof(struct clk_di_gate), GFP_KERNEL);
++ if (!gate)
++ return ERR_PTR(-ENOMEM);
++
++ gate->ipu_id = ipu_id;
++ gate->di_id = di_id;
++ gate->flags = clk_gate_flags;
++
++ init.name = name;
++ init.ops = &clk_gate_di_ops;
++ init.flags = flags | CLK_SET_RATE_PARENT;
++ init.parent_names = parent_name ? &parent_name : NULL;
++ init.num_parents = parent_name ? 1 : 0;
++
++ gate->hw.init = &init;
++
++ clk = clk_register(dev, &gate->hw);
++ if (IS_ERR(clk))
++ kfree(clk);
++
++ return clk;
++}
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/ipu_prv.h linux-3.14.40/drivers/mxc/ipu3/ipu_prv.h
+--- linux-3.14.40.orig/drivers/mxc/ipu3/ipu_prv.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/ipu_prv.h 2015-05-01 14:57:59.611427001 -0500
+@@ -0,0 +1,356 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#ifndef __INCLUDE_IPU_PRV_H__
++#define __INCLUDE_IPU_PRV_H__
++
++#include <linux/clkdev.h>
++#include <linux/device.h>
++#include <linux/fsl_devices.h>
++#include <linux/interrupt.h>
++#include <linux/types.h>
++
++#define MXC_IPU_MAX_NUM 2
++#define MXC_DI_NUM_PER_IPU 2
++
++/* Globals */
++extern int dmfc_type_setup;
++
++#define IDMA_CHAN_INVALID 0xFF
++#define HIGH_RESOLUTION_WIDTH 1024
++
++struct ipu_irq_node {
++ irqreturn_t(*handler) (int, void *); /*!< the ISR */
++ const char *name; /*!< device associated with the interrupt */
++ void *dev_id; /*!< some unique information for the ISR */
++ __u32 flags; /*!< not used */
++};
++
++enum csc_type_t {
++ RGB2YUV = 0,
++ YUV2RGB,
++ RGB2RGB,
++ YUV2YUV,
++ CSC_NONE,
++ CSC_NUM
++};
++
++enum imx_ipu_type {
++ IMX6Q_IPU,
++};
++
++struct ipu_pltfm_data {
++ u32 id;
++ u32 devtype;
++ int (*init) (int);
++ void (*pg) (int);
++
++ /*
++ * Bypass reset to avoid display channel being
++ * stopped by probe since it may starts to work
++ * in bootloader.
++ */
++ bool bypass_reset;
++};
++
++struct ipu_soc {
++ bool online;
++ struct ipu_pltfm_data *pdata;
++
++ /*clk*/
++ struct clk *ipu_clk;
++ struct clk *di_clk[2];
++ struct clk *di_clk_sel[2];
++ struct clk *pixel_clk[2];
++ struct clk *pixel_clk_sel[2];
++ struct clk *csi_clk[2];
++
++ /*irq*/
++ int irq_sync;
++ int irq_err;
++ struct ipu_irq_node irq_list[IPU_IRQ_COUNT];
++
++ /*reg*/
++ void __iomem *cm_reg;
++ void __iomem *idmac_reg;
++ void __iomem *dp_reg;
++ void __iomem *ic_reg;
++ void __iomem *dc_reg;
++ void __iomem *dc_tmpl_reg;
++ void __iomem *dmfc_reg;
++ void __iomem *di_reg[2];
++ void __iomem *smfc_reg;
++ void __iomem *csi_reg[2];
++ void __iomem *cpmem_base;
++ void __iomem *tpmem_base;
++ void __iomem *disp_base[2];
++ void __iomem *vdi_reg;
++
++ struct device *dev;
++
++ ipu_channel_t csi_channel[2];
++ ipu_channel_t using_ic_dirct_ch;
++ unsigned char dc_di_assignment[10];
++ bool sec_chan_en[24];
++ bool thrd_chan_en[24];
++ bool chan_is_interlaced[52];
++ uint32_t channel_init_mask;
++ uint32_t channel_enable_mask;
++
++ /*use count*/
++ int dc_use_count;
++ int dp_use_count;
++ int dmfc_use_count;
++ int smfc_use_count;
++ int ic_use_count;
++ int rot_use_count;
++ int vdi_use_count;
++ int di_use_count[2];
++ int csi_use_count[2];
++
++ struct mutex mutex_lock;
++ spinlock_t int_reg_spin_lock;
++ spinlock_t rdy_reg_spin_lock;
++
++ int dmfc_size_28;
++ int dmfc_size_29;
++ int dmfc_size_24;
++ int dmfc_size_27;
++ int dmfc_size_23;
++
++ enum csc_type_t fg_csc_type;
++ enum csc_type_t bg_csc_type;
++ bool color_key_4rgb;
++ bool dc_swap;
++ struct completion dc_comp;
++ struct completion csi_comp;
++
++ struct rot_mem {
++ void *vaddr;
++ dma_addr_t paddr;
++ int size;
++ } rot_dma[2];
++
++ int vdoa_en;
++ struct task_struct *thread[2];
++
++};
++
++struct ipu_channel {
++ u8 video_in_dma;
++ u8 alpha_in_dma;
++ u8 graph_in_dma;
++ u8 out_dma;
++};
++
++enum ipu_dmfc_type {
++ DMFC_NORMAL = 0,
++ DMFC_HIGH_RESOLUTION_DC,
++ DMFC_HIGH_RESOLUTION_DP,
++ DMFC_HIGH_RESOLUTION_ONLY_DP,
++};
++
++static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->cm_reg + offset);
++}
++
++static inline void ipu_cm_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->cm_reg + offset);
++}
++
++static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->idmac_reg + offset);
++}
++
++static inline void ipu_idmac_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->idmac_reg + offset);
++}
++
++static inline u32 ipu_dc_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->dc_reg + offset);
++}
++
++static inline void ipu_dc_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->dc_reg + offset);
++}
++
++static inline u32 ipu_dc_tmpl_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->dc_tmpl_reg + offset);
++}
++
++static inline void ipu_dc_tmpl_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->dc_tmpl_reg + offset);
++}
++
++static inline u32 ipu_dmfc_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->dmfc_reg + offset);
++}
++
++static inline void ipu_dmfc_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->dmfc_reg + offset);
++}
++
++static inline u32 ipu_dp_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->dp_reg + offset);
++}
++
++static inline void ipu_dp_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->dp_reg + offset);
++}
++
++static inline u32 ipu_di_read(struct ipu_soc *ipu, int di, unsigned offset)
++{
++ return readl(ipu->di_reg[di] + offset);
++}
++
++static inline void ipu_di_write(struct ipu_soc *ipu, int di,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->di_reg[di] + offset);
++}
++
++static inline u32 ipu_csi_read(struct ipu_soc *ipu, int csi, unsigned offset)
++{
++ return readl(ipu->csi_reg[csi] + offset);
++}
++
++static inline void ipu_csi_write(struct ipu_soc *ipu, int csi,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->csi_reg[csi] + offset);
++}
++
++static inline u32 ipu_smfc_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->smfc_reg + offset);
++}
++
++static inline void ipu_smfc_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->smfc_reg + offset);
++}
++
++static inline u32 ipu_vdi_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->vdi_reg + offset);
++}
++
++static inline void ipu_vdi_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->vdi_reg + offset);
++}
++
++static inline u32 ipu_ic_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->ic_reg + offset);
++}
++
++static inline void ipu_ic_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->ic_reg + offset);
++}
++
++int register_ipu_device(struct ipu_soc *ipu, int id);
++void unregister_ipu_device(struct ipu_soc *ipu, int id);
++ipu_color_space_t format_to_colorspace(uint32_t fmt);
++bool ipu_pixel_format_has_alpha(uint32_t fmt);
++
++void ipu_dump_registers(struct ipu_soc *ipu);
++
++uint32_t _ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel);
++
++void ipu_disp_init(struct ipu_soc *ipu);
++void _ipu_init_dc_mappings(struct ipu_soc *ipu);
++int _ipu_dp_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t in_pixel_fmt,
++ uint32_t out_pixel_fmt);
++void _ipu_dp_uninit(struct ipu_soc *ipu, ipu_channel_t channel);
++void _ipu_dc_init(struct ipu_soc *ipu, int dc_chan, int di, bool interlaced, uint32_t pixel_fmt);
++void _ipu_dc_uninit(struct ipu_soc *ipu, int dc_chan);
++void _ipu_dp_dc_enable(struct ipu_soc *ipu, ipu_channel_t channel);
++void _ipu_dp_dc_disable(struct ipu_soc *ipu, ipu_channel_t channel, bool swap);
++void _ipu_dmfc_init(struct ipu_soc *ipu, int dmfc_type, int first);
++void _ipu_dmfc_set_wait4eot(struct ipu_soc *ipu, int dma_chan, int width);
++void _ipu_dmfc_set_burst_size(struct ipu_soc *ipu, int dma_chan, int burst_size);
++int _ipu_disp_chan_is_interlaced(struct ipu_soc *ipu, ipu_channel_t channel);
++
++void _ipu_ic_enable_task(struct ipu_soc *ipu, ipu_channel_t channel);
++void _ipu_ic_disable_task(struct ipu_soc *ipu, ipu_channel_t channel);
++int _ipu_ic_init_prpvf(struct ipu_soc *ipu, ipu_channel_params_t *params,
++ bool src_is_csi);
++void _ipu_vdi_init(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params);
++void _ipu_vdi_uninit(struct ipu_soc *ipu);
++void _ipu_ic_uninit_prpvf(struct ipu_soc *ipu);
++void _ipu_ic_init_rotate_vf(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_rotate_vf(struct ipu_soc *ipu);
++void _ipu_ic_init_csi(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_csi(struct ipu_soc *ipu);
++int _ipu_ic_init_prpenc(struct ipu_soc *ipu, ipu_channel_params_t *params,
++ bool src_is_csi);
++void _ipu_ic_uninit_prpenc(struct ipu_soc *ipu);
++void _ipu_ic_init_rotate_enc(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_rotate_enc(struct ipu_soc *ipu);
++int _ipu_ic_init_pp(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_pp(struct ipu_soc *ipu);
++void _ipu_ic_init_rotate_pp(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_rotate_pp(struct ipu_soc *ipu);
++int _ipu_ic_idma_init(struct ipu_soc *ipu, int dma_chan, uint16_t width, uint16_t height,
++ int burst_size, ipu_rotate_mode_t rot);
++void _ipu_vdi_toggle_top_field_man(struct ipu_soc *ipu);
++int _ipu_csi_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t csi);
++int _ipu_csi_set_mipi_di(struct ipu_soc *ipu, uint32_t num, uint32_t di_val, uint32_t csi);
++void ipu_csi_set_test_generator(struct ipu_soc *ipu, bool active, uint32_t r_value,
++ uint32_t g_value, uint32_t b_value,
++ uint32_t pix_clk, uint32_t csi);
++void _ipu_csi_ccir_err_detection_enable(struct ipu_soc *ipu, uint32_t csi);
++void _ipu_csi_ccir_err_detection_disable(struct ipu_soc *ipu, uint32_t csi);
++void _ipu_csi_wait4eof(struct ipu_soc *ipu, ipu_channel_t channel);
++void _ipu_smfc_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t mipi_id, uint32_t csi);
++void _ipu_smfc_set_burst_size(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t bs);
++void _ipu_dp_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3]);
++int32_t _ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t x_pos, int16_t y_pos);
++int32_t _ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t *x_pos, int16_t *y_pos);
++void _ipu_get(struct ipu_soc *ipu);
++void _ipu_put(struct ipu_soc *ipu);
++
++struct clk *clk_register_mux_pix_clk(struct device *dev, const char *name,
++ const char **parent_names, u8 num_parents, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_mux_flags);
++struct clk *clk_register_div_pix_clk(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_div_flags);
++struct clk *clk_register_gate_pix_clk(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_gate_flags);
++#endif /* __INCLUDE_IPU_PRV_H__ */
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/ipu_regs.h linux-3.14.40/drivers/mxc/ipu3/ipu_regs.h
+--- linux-3.14.40.orig/drivers/mxc/ipu3/ipu_regs.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/ipu_regs.h 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,743 @@
++/*
++ * Copyright (C) 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*
++ * @file ipu_regs.h
++ *
++ * @brief IPU Register definitions
++ *
++ * @ingroup IPU
++ */
++#ifndef __IPU_REGS_INCLUDED__
++#define __IPU_REGS_INCLUDED__
++
++enum imx_ipu_rev {
++ IPU_V3DEX = 2,
++ IPU_V3M,
++ IPU_V3H,
++};
++
++/*
++ * hw_rev 2: IPUV3DEX
++ * hw_rev 3: IPUV3M
++ * hw_rev 4: IPUV3H
++ */
++extern int g_ipu_hw_rev;
++
++#define IPU_MAX_VDI_IN_WIDTH ({g_ipu_hw_rev >= 3 ? \
++ (968) : \
++ (720); })
++#define IPU_DISP0_BASE 0x00000000
++#define IPU_MCU_T_DEFAULT 8
++#define IPU_DISP1_BASE ({g_ipu_hw_rev < 4 ? \
++ (IPU_MCU_T_DEFAULT << 25) : \
++ (0x00000000); })
++#define IPUV3DEX_REG_BASE 0x1E000000
++#define IPUV3M_REG_BASE 0x06000000
++#define IPUV3H_REG_BASE 0x00200000
++
++#define IPU_CM_REG_BASE 0x00000000
++#define IPU_IDMAC_REG_BASE 0x00008000
++#define IPU_ISP_REG_BASE 0x00010000
++#define IPU_DP_REG_BASE 0x00018000
++#define IPU_IC_REG_BASE 0x00020000
++#define IPU_IRT_REG_BASE 0x00028000
++#define IPU_CSI0_REG_BASE 0x00030000
++#define IPU_CSI1_REG_BASE 0x00038000
++#define IPU_DI0_REG_BASE 0x00040000
++#define IPU_DI1_REG_BASE 0x00048000
++#define IPU_SMFC_REG_BASE 0x00050000
++#define IPU_DC_REG_BASE 0x00058000
++#define IPU_DMFC_REG_BASE 0x00060000
++#define IPU_VDI_REG_BASE 0x00068000
++#define IPU_CPMEM_REG_BASE ({g_ipu_hw_rev >= 4 ? \
++ (0x00100000) : \
++ (0x01000000); })
++#define IPU_LUT_REG_BASE 0x01020000
++#define IPU_SRM_REG_BASE ({g_ipu_hw_rev >= 4 ? \
++ (0x00140000) : \
++ (0x01040000); })
++#define IPU_TPM_REG_BASE ({g_ipu_hw_rev >= 4 ? \
++ (0x00160000) : \
++ (0x01060000); })
++#define IPU_DC_TMPL_REG_BASE ({g_ipu_hw_rev >= 4 ? \
++ (0x00180000) : \
++ (0x01080000); })
++#define IPU_ISP_TBPR_REG_BASE 0x010C0000
++
++/* Register addresses */
++/* IPU Common registers */
++#define IPU_CM_REG(offset) (offset)
++
++#define IPU_CONF IPU_CM_REG(0)
++#define IPU_SRM_PRI1 IPU_CM_REG(0x00A0)
++#define IPU_SRM_PRI2 IPU_CM_REG(0x00A4)
++#define IPU_FS_PROC_FLOW1 IPU_CM_REG(0x00A8)
++#define IPU_FS_PROC_FLOW2 IPU_CM_REG(0x00AC)
++#define IPU_FS_PROC_FLOW3 IPU_CM_REG(0x00B0)
++#define IPU_FS_DISP_FLOW1 IPU_CM_REG(0x00B4)
++#define IPU_FS_DISP_FLOW2 IPU_CM_REG(0x00B8)
++#define IPU_SKIP IPU_CM_REG(0x00BC)
++#define IPU_DISP_ALT_CONF IPU_CM_REG(0x00C0)
++#define IPU_DISP_GEN IPU_CM_REG(0x00C4)
++#define IPU_DISP_ALT1 IPU_CM_REG(0x00C8)
++#define IPU_DISP_ALT2 IPU_CM_REG(0x00CC)
++#define IPU_DISP_ALT3 IPU_CM_REG(0x00D0)
++#define IPU_DISP_ALT4 IPU_CM_REG(0x00D4)
++#define IPU_SNOOP IPU_CM_REG(0x00D8)
++#define IPU_MEM_RST IPU_CM_REG(0x00DC)
++#define IPU_PM IPU_CM_REG(0x00E0)
++#define IPU_GPR IPU_CM_REG(0x00E4)
++#define IPU_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0150 + 4 * ((ch) / 32))
++#define IPU_ALT_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0168 + 4 * ((ch) / 32))
++/*
++ * IPUv3D doesn't support triple buffer, so point
++ * IPU_CHA_TRB_MODE_SEL, IPU_CHA_TRIPLE_CUR_BUF and
++ * IPU_CHA_BUF2_RDY to readonly
++ * IPU_ALT_CUR_BUF0 for IPUv3D.
++ */
++#define IPU_CHA_TRB_MODE_SEL(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0178 + 4 * ((ch) / 32)) : \
++ (0x012C); })
++#define IPU_CHA_TRIPLE_CUR_BUF(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0258 + \
++ 4 * (((ch) * 2) / 32)) : \
++ (0x012C); })
++#define IPU_CHA_BUF2_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0288 + 4 * ((ch) / 32)) : \
++ (0x012C); })
++#define IPU_CHA_CUR_BUF(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x023C + 4 * ((ch) / 32)) : \
++ (0x0124 + 4 * ((ch) / 32)); })
++#define IPU_ALT_CUR_BUF0 IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0244) : \
++ (0x012C); })
++#define IPU_ALT_CUR_BUF1 IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0248) : \
++ (0x0130); })
++#define IPU_SRM_STAT IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x024C) : \
++ (0x0134); })
++#define IPU_PROC_TASK_STAT IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0250) : \
++ (0x0138); })
++#define IPU_DISP_TASK_STAT IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0254) : \
++ (0x013C); })
++#define IPU_CHA_BUF0_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0268 + 4 * ((ch) / 32)) : \
++ (0x0140 + 4 * ((ch) / 32)); })
++#define IPU_CHA_BUF1_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0270 + 4 * ((ch) / 32)) : \
++ (0x0148 + 4 * ((ch) / 32)); })
++#define IPU_ALT_CHA_BUF0_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0278 + 4 * ((ch) / 32)) : \
++ (0x0158 + 4 * ((ch) / 32)); })
++#define IPU_ALT_CHA_BUF1_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0280 + 4 * ((ch) / 32)) : \
++ (0x0160 + 4 * ((ch) / 32)); })
++
++#define IPU_INT_CTRL(n) IPU_CM_REG(0x003C + 4 * ((n) - 1))
++#define IPU_INT_STAT(n) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0200 + 4 * ((n) - 1)) : \
++ (0x00E8 + 4 * ((n) - 1)); })
++
++#define IPUIRQ_2_STATREG(irq) IPU_CM_REG(IPU_INT_STAT(1) + 4 * ((irq) / 32))
++#define IPUIRQ_2_CTRLREG(irq) IPU_CM_REG(IPU_INT_CTRL(1) + 4 * ((irq) / 32))
++#define IPUIRQ_2_MASK(irq) (1UL << ((irq) & 0x1F))
++
++/* IPU VDI registers */
++#define IPU_VDI_REG(offset) (offset)
++
++#define VDI_FSIZE IPU_VDI_REG(0)
++#define VDI_C IPU_VDI_REG(0x0004)
++
++/* IPU CSI Registers */
++#define IPU_CSI_REG(offset) (offset)
++
++#define CSI_SENS_CONF IPU_CSI_REG(0)
++#define CSI_SENS_FRM_SIZE IPU_CSI_REG(0x0004)
++#define CSI_ACT_FRM_SIZE IPU_CSI_REG(0x0008)
++#define CSI_OUT_FRM_CTRL IPU_CSI_REG(0x000C)
++#define CSI_TST_CTRL IPU_CSI_REG(0x0010)
++#define CSI_CCIR_CODE_1 IPU_CSI_REG(0x0014)
++#define CSI_CCIR_CODE_2 IPU_CSI_REG(0x0018)
++#define CSI_CCIR_CODE_3 IPU_CSI_REG(0x001C)
++#define CSI_MIPI_DI IPU_CSI_REG(0x0020)
++#define CSI_SKIP IPU_CSI_REG(0x0024)
++#define CSI_CPD_CTRL IPU_CSI_REG(0x0028)
++#define CSI_CPD_RC(n) IPU_CSI_REG(0x002C + 4 * (n))
++#define CSI_CPD_RS(n) IPU_CSI_REG(0x004C + 4 * (n))
++#define CSI_CPD_GRC(n) IPU_CSI_REG(0x005C + 4 * (n))
++#define CSI_CPD_GRS(n) IPU_CSI_REG(0x007C + 4 * (n))
++#define CSI_CPD_GBC(n) IPU_CSI_REG(0x008C + 4 * (n))
++#define CSI_CPD_GBS(n) IPU_CSI_REG(0x00AC + 4 * (n))
++#define CSI_CPD_BC(n) IPU_CSI_REG(0x00BC + 4 * (n))
++#define CSI_CPD_BS(n) IPU_CSI_REG(0x00DC + 4 * (n))
++#define CSI_CPD_OFFSET1 IPU_CSI_REG(0x00EC)
++#define CSI_CPD_OFFSET2 IPU_CSI_REG(0x00F0)
++
++/* IPU SMFC Registers */
++#define IPU_SMFC_REG(offset) (offset)
++
++#define SMFC_MAP IPU_SMFC_REG(0)
++#define SMFC_WMC IPU_SMFC_REG(0x0004)
++#define SMFC_BS IPU_SMFC_REG(0x0008)
++
++/* IPU IC Registers */
++#define IPU_IC_REG(offset) (offset)
++
++#define IC_CONF IPU_IC_REG(0)
++#define IC_PRP_ENC_RSC IPU_IC_REG(0x0004)
++#define IC_PRP_VF_RSC IPU_IC_REG(0x0008)
++#define IC_PP_RSC IPU_IC_REG(0x000C)
++#define IC_CMBP_1 IPU_IC_REG(0x0010)
++#define IC_CMBP_2 IPU_IC_REG(0x0014)
++#define IC_IDMAC_1 IPU_IC_REG(0x0018)
++#define IC_IDMAC_2 IPU_IC_REG(0x001C)
++#define IC_IDMAC_3 IPU_IC_REG(0x0020)
++#define IC_IDMAC_4 IPU_IC_REG(0x0024)
++
++/* IPU IDMAC Registers */
++#define IPU_IDMAC_REG(offset) (offset)
++
++#define IDMAC_CONF IPU_IDMAC_REG(0x0000)
++#define IDMAC_CHA_EN(ch) IPU_IDMAC_REG(0x0004 + 4 * ((ch) / 32))
++#define IDMAC_SEP_ALPHA IPU_IDMAC_REG(0x000C)
++#define IDMAC_ALT_SEP_ALPHA IPU_IDMAC_REG(0x0010)
++#define IDMAC_CHA_PRI(ch) IPU_IDMAC_REG(0x0014 + 4 * ((ch) / 32))
++#define IDMAC_WM_EN(ch) IPU_IDMAC_REG(0x001C + 4 * ((ch) / 32))
++#define IDMAC_CH_LOCK_EN_1 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0024) : 0; })
++#define IDMAC_CH_LOCK_EN_2 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0028) : \
++ (0x0024); })
++#define IDMAC_SUB_ADDR_0 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x002C) : \
++ (0x0028); })
++#define IDMAC_SUB_ADDR_1 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0030) : \
++ (0x002C); })
++#define IDMAC_SUB_ADDR_2 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0034) : \
++ (0x0030); })
++/*
++ * IPUv3D doesn't support IDMAC_SUB_ADDR_3 and IDMAC_SUB_ADDR_4,
++ * so point them to readonly IDMAC_CHA_BUSY1 for IPUv3D.
++ */
++#define IDMAC_SUB_ADDR_3 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0038) : \
++ (0x0040); })
++#define IDMAC_SUB_ADDR_4 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x003C) : \
++ (0x0040); })
++#define IDMAC_BAND_EN(ch) IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0040 + 4 * ((ch) / 32)) : \
++ (0x0034 + 4 * ((ch) / 32)); })
++#define IDMAC_CHA_BUSY(ch) IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0100 + 4 * ((ch) / 32)) : \
++ (0x0040 + 4 * ((ch) / 32)); })
++
++/* IPU DI Registers */
++#define IPU_DI_REG(offset) (offset)
++
++#define DI_GENERAL IPU_DI_REG(0)
++#define DI_BS_CLKGEN0 IPU_DI_REG(0x0004)
++#define DI_BS_CLKGEN1 IPU_DI_REG(0x0008)
++#define DI_SW_GEN0(gen) IPU_DI_REG(0x000C + 4 * ((gen) - 1))
++#define DI_SW_GEN1(gen) IPU_DI_REG(0x0030 + 4 * ((gen) - 1))
++#define DI_STP_REP(gen) IPU_DI_REG(0x0148 + 4 * (((gen) - 1) / 2))
++#define DI_SYNC_AS_GEN IPU_DI_REG(0x0054)
++#define DI_DW_GEN(gen) IPU_DI_REG(0x0058 + 4 * (gen))
++#define DI_DW_SET(gen, set) IPU_DI_REG(0x0088 + 4 * ((gen) + 0xC * (set)))
++#define DI_SER_CONF IPU_DI_REG(0x015C)
++#define DI_SSC IPU_DI_REG(0x0160)
++#define DI_POL IPU_DI_REG(0x0164)
++#define DI_AW0 IPU_DI_REG(0x0168)
++#define DI_AW1 IPU_DI_REG(0x016C)
++#define DI_SCR_CONF IPU_DI_REG(0x0170)
++#define DI_STAT IPU_DI_REG(0x0174)
++
++/* IPU DMFC Registers */
++#define IPU_DMFC_REG(offset) (offset)
++
++#define DMFC_RD_CHAN IPU_DMFC_REG(0)
++#define DMFC_WR_CHAN IPU_DMFC_REG(0x0004)
++#define DMFC_WR_CHAN_DEF IPU_DMFC_REG(0x0008)
++#define DMFC_DP_CHAN IPU_DMFC_REG(0x000C)
++#define DMFC_DP_CHAN_DEF IPU_DMFC_REG(0x0010)
++#define DMFC_GENERAL1 IPU_DMFC_REG(0x0014)
++#define DMFC_GENERAL2 IPU_DMFC_REG(0x0018)
++#define DMFC_IC_CTRL IPU_DMFC_REG(0x001C)
++#define DMFC_STAT IPU_DMFC_REG(0x0020)
++
++/* IPU DC Registers */
++#define IPU_DC_REG(offset) (offset)
++
++#define DC_MAP_CONF_PTR(n) IPU_DC_REG(0x0108 + ((n) & ~0x1) * 2)
++#define DC_MAP_CONF_VAL(n) IPU_DC_REG(0x0144 + ((n) & ~0x1) * 2)
++
++#define _RL_CH_2_OFFSET(ch) (((ch) == 0) ? 8 : ( \
++ ((ch) == 1) ? 0x24 : ( \
++ ((ch) == 2) ? 0x40 : ( \
++ ((ch) == 5) ? 0x64 : ( \
++ ((ch) == 6) ? 0x80 : ( \
++ ((ch) == 8) ? 0x9C : ( \
++ ((ch) == 9) ? 0xBC : (-1))))))))
++#define DC_RL_CH(ch, evt) IPU_DC_REG(_RL_CH_2_OFFSET(ch) + \
++ ((evt) & ~0x1) * 2)
++
++#define DC_EVT_NF 0
++#define DC_EVT_NL 1
++#define DC_EVT_EOF 2
++#define DC_EVT_NFIELD 3
++#define DC_EVT_EOL 4
++#define DC_EVT_EOFIELD 5
++#define DC_EVT_NEW_ADDR 6
++#define DC_EVT_NEW_CHAN 7
++#define DC_EVT_NEW_DATA 8
++
++#define DC_EVT_NEW_ADDR_W_0 0
++#define DC_EVT_NEW_ADDR_W_1 1
++#define DC_EVT_NEW_CHAN_W_0 2
++#define DC_EVT_NEW_CHAN_W_1 3
++#define DC_EVT_NEW_DATA_W_0 4
++#define DC_EVT_NEW_DATA_W_1 5
++#define DC_EVT_NEW_ADDR_R_0 6
++#define DC_EVT_NEW_ADDR_R_1 7
++#define DC_EVT_NEW_CHAN_R_0 8
++#define DC_EVT_NEW_CHAN_R_1 9
++#define DC_EVT_NEW_DATA_R_0 10
++#define DC_EVT_NEW_DATA_R_1 11
++#define DC_EVEN_UGDE0 12
++#define DC_ODD_UGDE0 13
++#define DC_EVEN_UGDE1 14
++#define DC_ODD_UGDE1 15
++#define DC_EVEN_UGDE2 16
++#define DC_ODD_UGDE2 17
++#define DC_EVEN_UGDE3 18
++#define DC_ODD_UGDE3 19
++
++#define dc_ch_offset(ch) \
++({ \
++ const u8 _offset[] = { \
++ 0, 0x1C, 0x38, 0x54, 0x58, 0x5C, 0x78, 0, 0x94, 0xB4}; \
++ _offset[ch]; \
++})
++#define DC_WR_CH_CONF(ch) IPU_DC_REG(dc_ch_offset(ch))
++#define DC_WR_CH_ADDR(ch) IPU_DC_REG(dc_ch_offset(ch) + 4)
++
++#define DC_WR_CH_CONF_1 IPU_DC_REG(0x001C)
++#define DC_WR_CH_ADDR_1 IPU_DC_REG(0x0020)
++#define DC_WR_CH_CONF_5 IPU_DC_REG(0x005C)
++#define DC_WR_CH_ADDR_5 IPU_DC_REG(0x0060)
++#define DC_GEN IPU_DC_REG(0x00D4)
++#define DC_DISP_CONF1(disp) IPU_DC_REG(0x00D8 + 4 * (disp))
++#define DC_DISP_CONF2(disp) IPU_DC_REG(0x00E8 + 4 * (disp))
++#define DC_STAT IPU_DC_REG(0x01C8)
++#define DC_UGDE_0(evt) IPU_DC_REG(0x0174 + 16 * (evt))
++#define DC_UGDE_1(evt) IPU_DC_REG(0x0178 + 16 * (evt))
++#define DC_UGDE_2(evt) IPU_DC_REG(0x017C + 16 * (evt))
++#define DC_UGDE_3(evt) IPU_DC_REG(0x0180 + 16 * (evt))
++
++/* IPU DP Registers */
++#define IPU_DP_REG(offset) (offset)
++
++#define DP_SYNC 0
++#define DP_ASYNC0 0x60
++#define DP_ASYNC1 0xBC
++#define DP_COM_CONF(flow) IPU_DP_REG(flow)
++#define DP_GRAPH_WIND_CTRL(flow) IPU_DP_REG(0x0004 + (flow))
++#define DP_FG_POS(flow) IPU_DP_REG(0x0008 + (flow))
++#define DP_GAMMA_C(flow, i) IPU_DP_REG(0x0014 + (flow) + 4 * (i))
++#define DP_GAMMA_S(flow, i) IPU_DP_REG(0x0034 + (flow) + 4 * (i))
++#define DP_CSC_A_0(flow) IPU_DP_REG(0x0044 + (flow))
++#define DP_CSC_A_1(flow) IPU_DP_REG(0x0048 + (flow))
++#define DP_CSC_A_2(flow) IPU_DP_REG(0x004C + (flow))
++#define DP_CSC_A_3(flow) IPU_DP_REG(0x0050 + (flow))
++#define DP_CSC_0(flow) IPU_DP_REG(0x0054 + (flow))
++#define DP_CSC_1(flow) IPU_DP_REG(0x0058 + (flow))
++
++enum {
++ IPU_CONF_CSI0_EN = 0x00000001,
++ IPU_CONF_CSI1_EN = 0x00000002,
++ IPU_CONF_IC_EN = 0x00000004,
++ IPU_CONF_ROT_EN = 0x00000008,
++ IPU_CONF_ISP_EN = 0x00000010,
++ IPU_CONF_DP_EN = 0x00000020,
++ IPU_CONF_DI0_EN = 0x00000040,
++ IPU_CONF_DI1_EN = 0x00000080,
++ IPU_CONF_DMFC_EN = 0x00000400,
++ IPU_CONF_SMFC_EN = 0x00000100,
++ IPU_CONF_DC_EN = 0x00000200,
++ IPU_CONF_VDI_EN = 0x00001000,
++ IPU_CONF_IDMAC_DIS = 0x00400000,
++ IPU_CONF_IC_DMFC_SEL = 0x02000000,
++ IPU_CONF_IC_DMFC_SYNC = 0x04000000,
++ IPU_CONF_VDI_DMFC_SYNC = 0x08000000,
++ IPU_CONF_CSI0_DATA_SOURCE = 0x10000000,
++ IPU_CONF_CSI0_DATA_SOURCE_OFFSET = 28,
++ IPU_CONF_CSI1_DATA_SOURCE = 0x20000000,
++ IPU_CONF_IC_INPUT = 0x40000000,
++ IPU_CONF_CSI_SEL = 0x80000000,
++
++ DI0_COUNTER_RELEASE = 0x01000000,
++ DI1_COUNTER_RELEASE = 0x02000000,
++
++ FS_PRPVF_ROT_SRC_SEL_MASK = 0x00000F00,
++ FS_PRPVF_ROT_SRC_SEL_OFFSET = 8,
++ FS_PRPENC_ROT_SRC_SEL_MASK = 0x0000000F,
++ FS_PRPENC_ROT_SRC_SEL_OFFSET = 0,
++ FS_PP_ROT_SRC_SEL_MASK = 0x000F0000,
++ FS_PP_ROT_SRC_SEL_OFFSET = 16,
++ FS_PP_SRC_SEL_MASK = 0x0000F000,
++ FS_PP_SRC_SEL_VDOA = 0x00008000,
++ FS_PP_SRC_SEL_OFFSET = 12,
++ FS_PRP_SRC_SEL_MASK = 0x0F000000,
++ FS_PRP_SRC_SEL_OFFSET = 24,
++ FS_VF_IN_VALID = 0x80000000,
++ FS_ENC_IN_VALID = 0x40000000,
++ FS_VDI_SRC_SEL_MASK = 0x30000000,
++ FS_VDI_SRC_SEL_VDOA = 0x20000000,
++ FS_VDOA_DEST_SEL_MASK = 0x00030000,
++ FS_VDOA_DEST_SEL_VDI = 0x00020000,
++ FS_VDOA_DEST_SEL_IC = 0x00010000,
++ FS_VDI_SRC_SEL_OFFSET = 28,
++
++
++ FS_PRPENC_DEST_SEL_MASK = 0x0000000F,
++ FS_PRPENC_DEST_SEL_OFFSET = 0,
++ FS_PRPVF_DEST_SEL_MASK = 0x000000F0,
++ FS_PRPVF_DEST_SEL_OFFSET = 4,
++ FS_PRPVF_ROT_DEST_SEL_MASK = 0x00000F00,
++ FS_PRPVF_ROT_DEST_SEL_OFFSET = 8,
++ FS_PP_DEST_SEL_MASK = 0x0000F000,
++ FS_PP_DEST_SEL_OFFSET = 12,
++ FS_PP_ROT_DEST_SEL_MASK = 0x000F0000,
++ FS_PP_ROT_DEST_SEL_OFFSET = 16,
++ FS_PRPENC_ROT_DEST_SEL_MASK = 0x00F00000,
++ FS_PRPENC_ROT_DEST_SEL_OFFSET = 20,
++
++ FS_SMFC0_DEST_SEL_MASK = 0x0000000F,
++ FS_SMFC0_DEST_SEL_OFFSET = 0,
++ FS_SMFC1_DEST_SEL_MASK = 0x00000070,
++ FS_SMFC1_DEST_SEL_OFFSET = 4,
++ FS_SMFC2_DEST_SEL_MASK = 0x00000780,
++ FS_SMFC2_DEST_SEL_OFFSET = 7,
++ FS_SMFC3_DEST_SEL_MASK = 0x00003800,
++ FS_SMFC3_DEST_SEL_OFFSET = 11,
++
++ FS_DC1_SRC_SEL_MASK = 0x00F00000,
++ FS_DC1_SRC_SEL_OFFSET = 20,
++ FS_DC2_SRC_SEL_MASK = 0x000F0000,
++ FS_DC2_SRC_SEL_OFFSET = 16,
++ FS_DP_SYNC0_SRC_SEL_MASK = 0x0000000F,
++ FS_DP_SYNC0_SRC_SEL_OFFSET = 0,
++ FS_DP_SYNC1_SRC_SEL_MASK = 0x000000F0,
++ FS_DP_SYNC1_SRC_SEL_OFFSET = 4,
++ FS_DP_ASYNC0_SRC_SEL_MASK = 0x00000F00,
++ FS_DP_ASYNC0_SRC_SEL_OFFSET = 8,
++ FS_DP_ASYNC1_SRC_SEL_MASK = 0x0000F000,
++ FS_DP_ASYNC1_SRC_SEL_OFFSET = 12,
++
++ FS_AUTO_REF_PER_MASK = 0,
++ FS_AUTO_REF_PER_OFFSET = 16,
++
++ TSTAT_VF_MASK = 0x0000000C,
++ TSTAT_VF_OFFSET = 2,
++ TSTAT_VF_ROT_MASK = 0x00000300,
++ TSTAT_VF_ROT_OFFSET = 8,
++ TSTAT_ENC_MASK = 0x00000003,
++ TSTAT_ENC_OFFSET = 0,
++ TSTAT_ENC_ROT_MASK = 0x000000C0,
++ TSTAT_ENC_ROT_OFFSET = 6,
++ TSTAT_PP_MASK = 0x00000030,
++ TSTAT_PP_OFFSET = 4,
++ TSTAT_PP_ROT_MASK = 0x00000C00,
++ TSTAT_PP_ROT_OFFSET = 10,
++
++ TASK_STAT_IDLE = 0,
++ TASK_STAT_ACTIVE = 1,
++ TASK_STAT_WAIT4READY = 2,
++
++ /* Image Converter Register bits */
++ IC_CONF_PRPENC_EN = 0x00000001,
++ IC_CONF_PRPENC_CSC1 = 0x00000002,
++ IC_CONF_PRPENC_ROT_EN = 0x00000004,
++ IC_CONF_PRPVF_EN = 0x00000100,
++ IC_CONF_PRPVF_CSC1 = 0x00000200,
++ IC_CONF_PRPVF_CSC2 = 0x00000400,
++ IC_CONF_PRPVF_CMB = 0x00000800,
++ IC_CONF_PRPVF_ROT_EN = 0x00001000,
++ IC_CONF_PP_EN = 0x00010000,
++ IC_CONF_PP_CSC1 = 0x00020000,
++ IC_CONF_PP_CSC2 = 0x00040000,
++ IC_CONF_PP_CMB = 0x00080000,
++ IC_CONF_PP_ROT_EN = 0x00100000,
++ IC_CONF_IC_GLB_LOC_A = 0x10000000,
++ IC_CONF_KEY_COLOR_EN = 0x20000000,
++ IC_CONF_RWS_EN = 0x40000000,
++ IC_CONF_CSI_MEM_WR_EN = 0x80000000,
++
++ IC_RSZ_MAX_RESIZE_RATIO = 0x00004000,
++
++ IC_IDMAC_1_CB0_BURST_16 = 0x00000001,
++ IC_IDMAC_1_CB1_BURST_16 = 0x00000002,
++ IC_IDMAC_1_CB2_BURST_16 = 0x00000004,
++ IC_IDMAC_1_CB3_BURST_16 = 0x00000008,
++ IC_IDMAC_1_CB4_BURST_16 = 0x00000010,
++ IC_IDMAC_1_CB5_BURST_16 = 0x00000020,
++ IC_IDMAC_1_CB6_BURST_16 = 0x00000040,
++ IC_IDMAC_1_CB7_BURST_16 = 0x00000080,
++ IC_IDMAC_1_PRPENC_ROT_MASK = 0x00003800,
++ IC_IDMAC_1_PRPENC_ROT_OFFSET = 11,
++ IC_IDMAC_1_PRPVF_ROT_MASK = 0x0001C000,
++ IC_IDMAC_1_PRPVF_ROT_OFFSET = 14,
++ IC_IDMAC_1_PP_ROT_MASK = 0x000E0000,
++ IC_IDMAC_1_PP_ROT_OFFSET = 17,
++ IC_IDMAC_1_PP_FLIP_RS = 0x00400000,
++ IC_IDMAC_1_PRPVF_FLIP_RS = 0x00200000,
++ IC_IDMAC_1_PRPENC_FLIP_RS = 0x00100000,
++
++ IC_IDMAC_2_PRPENC_HEIGHT_MASK = 0x000003FF,
++ IC_IDMAC_2_PRPENC_HEIGHT_OFFSET = 0,
++ IC_IDMAC_2_PRPVF_HEIGHT_MASK = 0x000FFC00,
++ IC_IDMAC_2_PRPVF_HEIGHT_OFFSET = 10,
++ IC_IDMAC_2_PP_HEIGHT_MASK = 0x3FF00000,
++ IC_IDMAC_2_PP_HEIGHT_OFFSET = 20,
++
++ IC_IDMAC_3_PRPENC_WIDTH_MASK = 0x000003FF,
++ IC_IDMAC_3_PRPENC_WIDTH_OFFSET = 0,
++ IC_IDMAC_3_PRPVF_WIDTH_MASK = 0x000FFC00,
++ IC_IDMAC_3_PRPVF_WIDTH_OFFSET = 10,
++ IC_IDMAC_3_PP_WIDTH_MASK = 0x3FF00000,
++ IC_IDMAC_3_PP_WIDTH_OFFSET = 20,
++
++ CSI_SENS_CONF_DATA_FMT_SHIFT = 8,
++ CSI_SENS_CONF_DATA_FMT_MASK = 0x00000700,
++ CSI_SENS_CONF_DATA_FMT_RGB_YUV444 = 0L,
++ CSI_SENS_CONF_DATA_FMT_YUV422_YUYV = 1L,
++ CSI_SENS_CONF_DATA_FMT_YUV422_UYVY = 2L,
++ CSI_SENS_CONF_DATA_FMT_BAYER = 3L,
++ CSI_SENS_CONF_DATA_FMT_RGB565 = 4L,
++ CSI_SENS_CONF_DATA_FMT_RGB555 = 5L,
++ CSI_SENS_CONF_DATA_FMT_RGB444 = 6L,
++ CSI_SENS_CONF_DATA_FMT_JPEG = 7L,
++
++ CSI_SENS_CONF_VSYNC_POL_SHIFT = 0,
++ CSI_SENS_CONF_HSYNC_POL_SHIFT = 1,
++ CSI_SENS_CONF_DATA_POL_SHIFT = 2,
++ CSI_SENS_CONF_PIX_CLK_POL_SHIFT = 3,
++ CSI_SENS_CONF_SENS_PRTCL_MASK = 0x00000070L,
++ CSI_SENS_CONF_SENS_PRTCL_SHIFT = 4,
++ CSI_SENS_CONF_PACK_TIGHT_SHIFT = 7,
++ CSI_SENS_CONF_DATA_WIDTH_SHIFT = 11,
++ CSI_SENS_CONF_EXT_VSYNC_SHIFT = 15,
++ CSI_SENS_CONF_DIVRATIO_SHIFT = 16,
++
++ CSI_SENS_CONF_DIVRATIO_MASK = 0x00FF0000L,
++ CSI_SENS_CONF_DATA_DEST_SHIFT = 24,
++ CSI_SENS_CONF_DATA_DEST_MASK = 0x07000000L,
++ CSI_SENS_CONF_JPEG8_EN_SHIFT = 27,
++ CSI_SENS_CONF_JPEG_EN_SHIFT = 28,
++ CSI_SENS_CONF_FORCE_EOF_SHIFT = 29,
++ CSI_SENS_CONF_DATA_EN_POL_SHIFT = 31,
++
++ CSI_DATA_DEST_ISP = 1L,
++ CSI_DATA_DEST_IC = 2L,
++ CSI_DATA_DEST_IDMAC = 4L,
++
++ CSI_CCIR_ERR_DET_EN = 0x01000000L,
++ CSI_HORI_DOWNSIZE_EN = 0x80000000L,
++ CSI_VERT_DOWNSIZE_EN = 0x40000000L,
++ CSI_TEST_GEN_MODE_EN = 0x01000000L,
++
++ CSI_HSC_MASK = 0x1FFF0000,
++ CSI_HSC_SHIFT = 16,
++ CSI_VSC_MASK = 0x00000FFF,
++ CSI_VSC_SHIFT = 0,
++
++ CSI_TEST_GEN_R_MASK = 0x000000FFL,
++ CSI_TEST_GEN_R_SHIFT = 0,
++ CSI_TEST_GEN_G_MASK = 0x0000FF00L,
++ CSI_TEST_GEN_G_SHIFT = 8,
++ CSI_TEST_GEN_B_MASK = 0x00FF0000L,
++ CSI_TEST_GEN_B_SHIFT = 16,
++
++ CSI_MIPI_DI0_MASK = 0x000000FFL,
++ CSI_MIPI_DI0_SHIFT = 0,
++ CSI_MIPI_DI1_MASK = 0x0000FF00L,
++ CSI_MIPI_DI1_SHIFT = 8,
++ CSI_MIPI_DI2_MASK = 0x00FF0000L,
++ CSI_MIPI_DI2_SHIFT = 16,
++ CSI_MIPI_DI3_MASK = 0xFF000000L,
++ CSI_MIPI_DI3_SHIFT = 24,
++
++ CSI_MAX_RATIO_SKIP_ISP_MASK = 0x00070000L,
++ CSI_MAX_RATIO_SKIP_ISP_SHIFT = 16,
++ CSI_SKIP_ISP_MASK = 0x00F80000L,
++ CSI_SKIP_ISP_SHIFT = 19,
++ CSI_MAX_RATIO_SKIP_SMFC_MASK = 0x00000007L,
++ CSI_MAX_RATIO_SKIP_SMFC_SHIFT = 0,
++ CSI_SKIP_SMFC_MASK = 0x000000F8L,
++ CSI_SKIP_SMFC_SHIFT = 3,
++ CSI_ID_2_SKIP_MASK = 0x00000300L,
++ CSI_ID_2_SKIP_SHIFT = 8,
++
++ CSI_COLOR_FIRST_ROW_MASK = 0x00000002L,
++ CSI_COLOR_FIRST_COMP_MASK = 0x00000001L,
++
++ SMFC_MAP_CH0_MASK = 0x00000007L,
++ SMFC_MAP_CH0_SHIFT = 0,
++ SMFC_MAP_CH1_MASK = 0x00000038L,
++ SMFC_MAP_CH1_SHIFT = 3,
++ SMFC_MAP_CH2_MASK = 0x000001C0L,
++ SMFC_MAP_CH2_SHIFT = 6,
++ SMFC_MAP_CH3_MASK = 0x00000E00L,
++ SMFC_MAP_CH3_SHIFT = 9,
++
++ SMFC_WM0_SET_MASK = 0x00000007L,
++ SMFC_WM0_SET_SHIFT = 0,
++ SMFC_WM1_SET_MASK = 0x000001C0L,
++ SMFC_WM1_SET_SHIFT = 6,
++ SMFC_WM2_SET_MASK = 0x00070000L,
++ SMFC_WM2_SET_SHIFT = 16,
++ SMFC_WM3_SET_MASK = 0x01C00000L,
++ SMFC_WM3_SET_SHIFT = 22,
++
++ SMFC_WM0_CLR_MASK = 0x00000038L,
++ SMFC_WM0_CLR_SHIFT = 3,
++ SMFC_WM1_CLR_MASK = 0x00000E00L,
++ SMFC_WM1_CLR_SHIFT = 9,
++ SMFC_WM2_CLR_MASK = 0x00380000L,
++ SMFC_WM2_CLR_SHIFT = 19,
++ SMFC_WM3_CLR_MASK = 0x0E000000L,
++ SMFC_WM3_CLR_SHIFT = 25,
++
++ SMFC_BS0_MASK = 0x0000000FL,
++ SMFC_BS0_SHIFT = 0,
++ SMFC_BS1_MASK = 0x000000F0L,
++ SMFC_BS1_SHIFT = 4,
++ SMFC_BS2_MASK = 0x00000F00L,
++ SMFC_BS2_SHIFT = 8,
++ SMFC_BS3_MASK = 0x0000F000L,
++ SMFC_BS3_SHIFT = 12,
++
++ PF_CONF_TYPE_MASK = 0x00000007,
++ PF_CONF_TYPE_SHIFT = 0,
++ PF_CONF_PAUSE_EN = 0x00000010,
++ PF_CONF_RESET = 0x00008000,
++ PF_CONF_PAUSE_ROW_MASK = 0x00FF0000,
++ PF_CONF_PAUSE_ROW_SHIFT = 16,
++
++ DI_DW_GEN_ACCESS_SIZE_OFFSET = 24,
++ DI_DW_GEN_COMPONENT_SIZE_OFFSET = 16,
++
++ DI_GEN_DI_CLK_EXT = 0x100000,
++ DI_GEN_POLARITY_DISP_CLK = 0x00020000,
++ DI_GEN_POLARITY_1 = 0x00000001,
++ DI_GEN_POLARITY_2 = 0x00000002,
++ DI_GEN_POLARITY_3 = 0x00000004,
++ DI_GEN_POLARITY_4 = 0x00000008,
++ DI_GEN_POLARITY_5 = 0x00000010,
++ DI_GEN_POLARITY_6 = 0x00000020,
++ DI_GEN_POLARITY_7 = 0x00000040,
++ DI_GEN_POLARITY_8 = 0x00000080,
++
++ DI_POL_DRDY_DATA_POLARITY = 0x00000080,
++ DI_POL_DRDY_POLARITY_15 = 0x00000010,
++
++ DI_VSYNC_SEL_OFFSET = 13,
++
++ DC_WR_CH_CONF_FIELD_MODE = 0x00000200,
++ DC_WR_CH_CONF_PROG_TYPE_OFFSET = 5,
++ DC_WR_CH_CONF_PROG_TYPE_MASK = 0x000000E0,
++ DC_WR_CH_CONF_PROG_DI_ID = 0x00000004,
++ DC_WR_CH_CONF_PROG_DISP_ID_OFFSET = 3,
++ DC_WR_CH_CONF_PROG_DISP_ID_MASK = 0x00000018,
++
++ DC_UGDE_0_ODD_EN = 0x02000000,
++ DC_UGDE_0_ID_CODED_MASK = 0x00000007,
++ DC_UGDE_0_ID_CODED_OFFSET = 0,
++ DC_UGDE_0_EV_PRIORITY_MASK = 0x00000078,
++ DC_UGDE_0_EV_PRIORITY_OFFSET = 3,
++
++ DP_COM_CONF_FG_EN = 0x00000001,
++ DP_COM_CONF_GWSEL = 0x00000002,
++ DP_COM_CONF_GWAM = 0x00000004,
++ DP_COM_CONF_GWCKE = 0x00000008,
++ DP_COM_CONF_CSC_DEF_MASK = 0x00000300,
++ DP_COM_CONF_CSC_DEF_OFFSET = 8,
++ DP_COM_CONF_CSC_DEF_FG = 0x00000300,
++ DP_COM_CONF_CSC_DEF_BG = 0x00000200,
++ DP_COM_CONF_CSC_DEF_BOTH = 0x00000100,
++ DP_COM_CONF_GAMMA_EN = 0x00001000,
++ DP_COM_CONF_GAMMA_YUV_EN = 0x00002000,
++
++ DI_SER_CONF_LLA_SER_ACCESS = 0x00000020,
++ DI_SER_CONF_SERIAL_CLK_POL = 0x00000010,
++ DI_SER_CONF_SERIAL_DATA_POL = 0x00000008,
++ DI_SER_CONF_SERIAL_RS_POL = 0x00000004,
++ DI_SER_CONF_SERIAL_CS_POL = 0x00000002,
++ DI_SER_CONF_WAIT4SERIAL = 0x00000001,
++
++ VDI_C_CH_420 = 0x00000000,
++ VDI_C_CH_422 = 0x00000002,
++ VDI_C_MOT_SEL_FULL = 0x00000008,
++ VDI_C_MOT_SEL_LOW = 0x00000004,
++ VDI_C_MOT_SEL_MED = 0x00000000,
++ VDI_C_BURST_SIZE1_4 = 0x00000030,
++ VDI_C_BURST_SIZE2_4 = 0x00000300,
++ VDI_C_BURST_SIZE3_4 = 0x00003000,
++ VDI_C_BURST_SIZE_MASK = 0xF,
++ VDI_C_BURST_SIZE1_OFFSET = 4,
++ VDI_C_BURST_SIZE2_OFFSET = 8,
++ VDI_C_BURST_SIZE3_OFFSET = 12,
++ VDI_C_VWM1_SET_1 = 0x00000000,
++ VDI_C_VWM1_SET_2 = 0x00010000,
++ VDI_C_VWM1_CLR_2 = 0x00080000,
++ VDI_C_VWM3_SET_1 = 0x00000000,
++ VDI_C_VWM3_SET_2 = 0x00400000,
++ VDI_C_VWM3_CLR_2 = 0x02000000,
++ VDI_C_TOP_FIELD_MAN_1 = 0x40000000,
++ VDI_C_TOP_FIELD_AUTO_1 = 0x80000000,
++};
++
++enum di_pins {
++ DI_PIN11 = 0,
++ DI_PIN12 = 1,
++ DI_PIN13 = 2,
++ DI_PIN14 = 3,
++ DI_PIN15 = 4,
++ DI_PIN16 = 5,
++ DI_PIN17 = 6,
++ DI_PIN_CS = 7,
++
++ DI_PIN_SER_CLK = 0,
++ DI_PIN_SER_RS = 1,
++};
++
++enum di_sync_wave {
++ DI_SYNC_NONE = -1,
++ DI_SYNC_CLK = 0,
++ DI_SYNC_INT_HSYNC = 1,
++ DI_SYNC_HSYNC = 2,
++ DI_SYNC_VSYNC = 3,
++ DI_SYNC_DE = 5,
++};
++
++/* DC template opcodes */
++#define WROD(lf) (0x18 | (lf << 1))
++#define WRG (0x01)
++
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/Kconfig linux-3.14.40/drivers/mxc/ipu3/Kconfig
+--- linux-3.14.40.orig/drivers/mxc/ipu3/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/Kconfig 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,2 @@
++config MXC_IPU_V3
++ bool
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/Makefile linux-3.14.40/drivers/mxc/ipu3/Makefile
+--- linux-3.14.40.orig/drivers/mxc/ipu3/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/Makefile 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,4 @@
++obj-$(CONFIG_MXC_IPU_V3) = mxc_ipu.o
++
++mxc_ipu-objs := ipu_common.o ipu_ic.o ipu_disp.o ipu_capture.o ipu_device.o \
++ ipu_calc_stripes_sizes.o vdoa.o ipu_pixel_clk.o
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/vdoa.c linux-3.14.40/drivers/mxc/ipu3/vdoa.c
+--- linux-3.14.40.orig/drivers/mxc/ipu3/vdoa.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/vdoa.c 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,543 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/ipu.h>
++#include <linux/genalloc.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++
++#include "vdoa.h"
++/* 6band(3field* double buffer) * (width*2) * bandline(8)
++ = 6x1024x2x8 = 96k or 72k(1.5byte) */
++#define MAX_VDOA_IRAM_SIZE (1024*96)
++#define VDOA_IRAM_SIZE (1024*72)
++
++#define VDOAC_BAND_HEIGHT_32LINES (32)
++#define VDOAC_BAND_HEIGHT_16LINES (16)
++#define VDOAC_BAND_HEIGHT_8LINES (8)
++#define VDOAC_THREE_FRAMES (0x1 << 2)
++#define VDOAC_SYNC_BAND_MODE (0x1 << 3)
++#define VDOAC_SCAN_ORDER_INTERLACED (0x1 << 4)
++#define VDOAC_PFS_YUYV (0x1 << 5)
++#define VDOAC_IPU_SEL_1 (0x1 << 6)
++#define VDOAFP_FH_MASK (0x1FFF)
++#define VDOAFP_FH_SHIFT (16)
++#define VDOAFP_FW_MASK (0x3FFF)
++#define VDOAFP_FW_SHIFT (0)
++#define VDOASL_VSLY_MASK (0x3FFF)
++#define VDOASL_VSLY_SHIFT (16)
++#define VDOASL_ISLY_MASK (0x7FFF)
++#define VDOASL_ISLY_SHIFT (0)
++#define VDOASRR_START_XFER (0x2)
++#define VDOASRR_SWRST (0x1)
++#define VDOAIEIST_TRANSFER_ERR (0x2)
++#define VDOAIEIST_TRANSFER_END (0x1)
++
++#define VDOAC (0x0) /* Control Register */
++#define VDOASRR (0x4) /* Start and Reset Register */
++#define VDOAIE (0x8) /* Interrupt Enable Register */
++#define VDOAIST (0xc) /* Interrupt Status Register */
++#define VDOAFP (0x10) /* Frame Parameters Register */
++#define VDOAIEBA00 (0x14) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA01 (0x18) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA02 (0x1c) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA10 (0x20) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA11 (0x24) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA12 (0x28) /* External Buffer n Frame m Address Register */
++#define VDOASL (0x2c) /* IPU Stride Line Register */
++#define VDOAIUBO (0x30) /* IPU Chroma Buffer Offset Register */
++#define VDOAVEBA0 (0x34) /* External Buffer m Address Register */
++#define VDOAVEBA1 (0x38) /* External Buffer m Address Register */
++#define VDOAVEBA2 (0x3c) /* External Buffer m Address Register */
++#define VDOAVUBO (0x40) /* VPU Chroma Buffer Offset */
++#define VDOASR (0x44) /* Status Register */
++#define VDOATD (0x48) /* Test Debug Register */
++
++
++enum {
++ VDOA_INIT = 0x1,
++ VDOA_GET = 0x2,
++ VDOA_SETUP = 0x4,
++ VDOA_GET_OBUF = 0x8,
++ VDOA_START = 0x10,
++ VDOA_INIRQ = 0x20,
++ VDOA_STOP = 0x40,
++ VDOA_PUT = VDOA_INIT,
++};
++
++enum {
++ VDOA_NULL = 0,
++ VDOA_FRAME = 1,
++ VDOA_PREV_FIELD = 2,
++ VDOA_CURR_FIELD = 3,
++ VDOA_NEXT_FIELD = 4,
++};
++
++#define CHECK_STATE(expect, retcode) \
++do { \
++ if (!((expect) & vdoa->state)) { \
++ dev_err(vdoa->dev, "ERR: %s state:0x%x, expect:0x%x.\n",\
++ __func__, vdoa->state, (expect)); \
++ retcode; \
++ } \
++} while (0)
++
++#define CHECK_NULL_PTR(ptr) \
++do { \
++ pr_debug("vdoa_ptr:0x%p in %s state:0x%x.\n", \
++ vdoa, __func__, vdoa->state); \
++ if (NULL == (ptr)) { \
++ pr_err("ERR vdoa: %s state:0x%x null ptr.\n", \
++ __func__, vdoa->state); \
++ } \
++} while (0)
++
++struct vdoa_info {
++ int state;
++ struct device *dev;
++ struct clk *vdoa_clk;
++ void __iomem *reg_base;
++ struct gen_pool *iram_pool;
++ unsigned long iram_base;
++ unsigned long iram_paddr;
++ int irq;
++ int field;
++ struct completion comp;
++};
++
++static struct vdoa_info *g_vdoa;
++static unsigned long iram_size;
++static DEFINE_MUTEX(vdoa_lock);
++
++static inline void vdoa_read_register(struct vdoa_info *vdoa,
++ u32 reg, u32 *val)
++{
++ *val = ioread32(vdoa->reg_base + reg);
++ dev_dbg(vdoa->dev, "read_reg:0x%02x, val:0x%08x.\n", reg, *val);
++}
++
++static inline void vdoa_write_register(struct vdoa_info *vdoa,
++ u32 reg, u32 val)
++{
++ iowrite32(val, vdoa->reg_base + reg);
++ dev_dbg(vdoa->dev, "\t\twrite_reg:0x%02x, val:0x%08x.\n", reg, val);
++}
++
++static void dump_registers(struct vdoa_info *vdoa)
++{
++ int i;
++ u32 data;
++
++ for (i = VDOAC; i < VDOATD; i += 4)
++ vdoa_read_register(vdoa, i, &data);
++}
++
++int vdoa_setup(vdoa_handle_t handle, struct vdoa_params *params)
++{
++ int band_size;
++ int total_band_size = 0;
++ int ipu_stride;
++ u32 data;
++ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_GET | VDOA_GET_OBUF | VDOA_STOP, return -EINVAL);
++ if (VDOA_GET == vdoa->state) {
++ dev_dbg(vdoa->dev, "w:%d, h:%d.\n",
++ params->width, params->height);
++ data = (params->band_lines == VDOAC_BAND_HEIGHT_32LINES) ? 2 :
++ ((params->band_lines == VDOAC_BAND_HEIGHT_16LINES) ?
++ 1 : 0);
++ data |= params->scan_order ? VDOAC_SCAN_ORDER_INTERLACED : 0;
++ data |= params->band_mode ? VDOAC_SYNC_BAND_MODE : 0;
++ data |= params->pfs ? VDOAC_PFS_YUYV : 0;
++ data |= params->ipu_num ? VDOAC_IPU_SEL_1 : 0;
++ vdoa_write_register(vdoa, VDOAC, data);
++
++ data = ((params->width & VDOAFP_FW_MASK) << VDOAFP_FW_SHIFT) |
++ ((params->height & VDOAFP_FH_MASK) << VDOAFP_FH_SHIFT);
++ vdoa_write_register(vdoa, VDOAFP, data);
++
++ ipu_stride = params->pfs ? params->width << 1 : params->width;
++ data = ((params->vpu_stride & VDOASL_VSLY_MASK) <<
++ VDOASL_VSLY_SHIFT) |
++ ((ipu_stride & VDOASL_ISLY_MASK) << VDOASL_ISLY_SHIFT);
++ vdoa_write_register(vdoa, VDOASL, data);
++
++ dev_dbg(vdoa->dev, "band_mode:%d, band_line:%d, base:0x%lx.\n",
++ params->band_mode, params->band_lines, vdoa->iram_paddr);
++ }
++ /*
++ * band size = (luma_per_line + chroma_per_line) * bandLines
++ * = width * (3/2 or 2) * bandLines
++ * double buffer mode used.
++ */
++ if (params->pfs)
++ band_size = (params->width << 1) * params->band_lines;
++ else
++ band_size = ((params->width * 3) >> 1) *
++ params->band_lines;
++ if (params->interlaced) {
++ total_band_size = 6 * band_size; /* 3 frames*double buffer */
++ if (iram_size < total_band_size) {
++ dev_err(vdoa->dev, "iram_size:0x%lx is smaller than "
++ "request:0x%x!\n", iram_size, total_band_size);
++ return -EINVAL;
++ }
++ if (params->vfield_buf.prev_veba) {
++ if (params->band_mode) {
++ vdoa_write_register(vdoa, VDOAIEBA00,
++ vdoa->iram_paddr);
++ vdoa_write_register(vdoa, VDOAIEBA10,
++ vdoa->iram_paddr + band_size);
++ } else
++ vdoa_write_register(vdoa, VDOAIEBA00,
++ params->ieba0);
++ vdoa_write_register(vdoa, VDOAVEBA0,
++ params->vfield_buf.prev_veba);
++ vdoa->field = VDOA_PREV_FIELD;
++ }
++ if (params->vfield_buf.cur_veba) {
++ if (params->band_mode) {
++ vdoa_write_register(vdoa, VDOAIEBA01,
++ vdoa->iram_paddr + band_size * 2);
++ vdoa_write_register(vdoa, VDOAIEBA11,
++ vdoa->iram_paddr + band_size * 3);
++ } else
++ vdoa_write_register(vdoa, VDOAIEBA01,
++ params->ieba1);
++ vdoa_write_register(vdoa, VDOAVEBA1,
++ params->vfield_buf.cur_veba);
++ vdoa->field = VDOA_CURR_FIELD;
++ }
++ if (params->vfield_buf.next_veba) {
++ if (params->band_mode) {
++ vdoa_write_register(vdoa, VDOAIEBA02,
++ vdoa->iram_paddr + band_size * 4);
++ vdoa_write_register(vdoa, VDOAIEBA12,
++ vdoa->iram_paddr + band_size * 5);
++ } else
++ vdoa_write_register(vdoa, VDOAIEBA02,
++ params->ieba2);
++ vdoa_write_register(vdoa, VDOAVEBA2,
++ params->vfield_buf.next_veba);
++ vdoa->field = VDOA_NEXT_FIELD;
++ vdoa_read_register(vdoa, VDOAC, &data);
++ data |= VDOAC_THREE_FRAMES;
++ vdoa_write_register(vdoa, VDOAC, data);
++ }
++
++ if (!params->pfs)
++ vdoa_write_register(vdoa, VDOAIUBO,
++ params->width * params->band_lines);
++ vdoa_write_register(vdoa, VDOAVUBO,
++ params->vfield_buf.vubo);
++ dev_dbg(vdoa->dev, "total band_size:0x%x.\n", band_size*6);
++ } else if (params->band_mode) {
++ /* used for progressive frame resize on PrP channel */
++ BUG(); /* currently not support */
++ /* progressvie frame: band mode */
++ vdoa_write_register(vdoa, VDOAIEBA00, vdoa->iram_paddr);
++ vdoa_write_register(vdoa, VDOAIEBA10,
++ vdoa->iram_paddr + band_size);
++ if (!params->pfs)
++ vdoa_write_register(vdoa, VDOAIUBO,
++ params->width * params->band_lines);
++ dev_dbg(vdoa->dev, "total band_size:0x%x\n", band_size*2);
++ } else {
++ /* progressive frame: mem->mem, non-band mode */
++ vdoa->field = VDOA_FRAME;
++ vdoa_write_register(vdoa, VDOAVEBA0, params->vframe_buf.veba);
++ vdoa_write_register(vdoa, VDOAVUBO, params->vframe_buf.vubo);
++ vdoa_write_register(vdoa, VDOAIEBA00, params->ieba0);
++ if (!params->pfs)
++ /* note: iubo is relative value, based on ieba0 */
++ vdoa_write_register(vdoa, VDOAIUBO,
++ params->width * params->height);
++ }
++ vdoa->state = VDOA_SETUP;
++ return 0;
++}
++
++void vdoa_get_output_buf(vdoa_handle_t handle, struct vdoa_ipu_buf *buf)
++{
++ u32 data;
++ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_SETUP, return);
++ vdoa->state = VDOA_GET_OBUF;
++ memset(buf, 0, sizeof(*buf));
++
++ vdoa_read_register(vdoa, VDOAC, &data);
++ switch (vdoa->field) {
++ case VDOA_FRAME:
++ case VDOA_PREV_FIELD:
++ vdoa_read_register(vdoa, VDOAIEBA00, &buf->ieba0);
++ if (data & VDOAC_SYNC_BAND_MODE)
++ vdoa_read_register(vdoa, VDOAIEBA10, &buf->ieba1);
++ break;
++ case VDOA_CURR_FIELD:
++ vdoa_read_register(vdoa, VDOAIEBA01, &buf->ieba0);
++ vdoa_read_register(vdoa, VDOAIEBA11, &buf->ieba1);
++ break;
++ case VDOA_NEXT_FIELD:
++ vdoa_read_register(vdoa, VDOAIEBA02, &buf->ieba0);
++ vdoa_read_register(vdoa, VDOAIEBA12, &buf->ieba1);
++ break;
++ default:
++ BUG();
++ break;
++ }
++ if (!(data & VDOAC_PFS_YUYV))
++ vdoa_read_register(vdoa, VDOAIUBO, &buf->iubo);
++}
++
++int vdoa_start(vdoa_handle_t handle, int timeout_ms)
++{
++ int ret;
++ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_GET_OBUF, return -EINVAL);
++ vdoa->state = VDOA_START;
++ init_completion(&vdoa->comp);
++ vdoa_write_register(vdoa, VDOAIST,
++ VDOAIEIST_TRANSFER_ERR | VDOAIEIST_TRANSFER_END);
++ vdoa_write_register(vdoa, VDOAIE,
++ VDOAIEIST_TRANSFER_ERR | VDOAIEIST_TRANSFER_END);
++
++ enable_irq(vdoa->irq);
++ vdoa_write_register(vdoa, VDOASRR, VDOASRR_START_XFER);
++ dump_registers(vdoa);
++
++ ret = wait_for_completion_timeout(&vdoa->comp,
++ msecs_to_jiffies(timeout_ms));
++
++ return ret > 0 ? 0 : -ETIMEDOUT;
++}
++
++void vdoa_stop(vdoa_handle_t handle)
++{
++ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_GET | VDOA_START | VDOA_INIRQ, return);
++ vdoa->state = VDOA_STOP;
++
++ disable_irq(vdoa->irq);
++
++ vdoa_write_register(vdoa, VDOASRR, VDOASRR_SWRST);
++}
++
++void vdoa_get_handle(vdoa_handle_t *handle)
++{
++ struct vdoa_info *vdoa = g_vdoa;
++
++ CHECK_NULL_PTR(handle);
++ *handle = (vdoa_handle_t *)NULL;
++ CHECK_STATE(VDOA_INIT, return);
++ mutex_lock(&vdoa_lock);
++ clk_prepare_enable(vdoa->vdoa_clk);
++ vdoa->state = VDOA_GET;
++ vdoa->field = VDOA_NULL;
++ vdoa_write_register(vdoa, VDOASRR, VDOASRR_SWRST);
++
++ *handle = (vdoa_handle_t *)vdoa;
++}
++
++void vdoa_put_handle(vdoa_handle_t *handle)
++{
++ struct vdoa_info *vdoa = (struct vdoa_info *)(*handle);
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_STOP, return);
++ if (vdoa != g_vdoa)
++ BUG();
++
++ clk_disable_unprepare(vdoa->vdoa_clk);
++ vdoa->state = VDOA_PUT;
++ *handle = (vdoa_handle_t *)NULL;
++ mutex_unlock(&vdoa_lock);
++}
++
++static irqreturn_t vdoa_irq_handler(int irq, void *data)
++{
++ u32 status, mask, val;
++ struct vdoa_info *vdoa = data;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_START, return IRQ_HANDLED);
++ vdoa->state = VDOA_INIRQ;
++ vdoa_read_register(vdoa, VDOAIST, &status);
++ vdoa_read_register(vdoa, VDOAIE, &mask);
++ val = status & mask;
++ vdoa_write_register(vdoa, VDOAIST, val);
++ if (VDOAIEIST_TRANSFER_ERR & val)
++ dev_err(vdoa->dev, "vdoa Transfer err irq!\n");
++ if (VDOAIEIST_TRANSFER_END & val)
++ dev_dbg(vdoa->dev, "vdoa Transfer end irq!\n");
++ if (0 == val) {
++ dev_err(vdoa->dev, "vdoa unknown irq!\n");
++ BUG();
++ }
++
++ complete(&vdoa->comp);
++ return IRQ_HANDLED;
++}
++
++/* IRAM Size in Kbytes, example:vdoa_iram_size=64, 64KBytes */
++static int __init vdoa_iram_size_setup(char *options)
++{
++ int ret;
++
++ ret = strict_strtoul(options, 0, &iram_size);
++ if (ret)
++ iram_size = 0;
++ else
++ iram_size *= SZ_1K;
++
++ return 1;
++}
++__setup("vdoa_iram_size=", vdoa_iram_size_setup);
++
++static const struct of_device_id imx_vdoa_dt_ids[] = {
++ { .compatible = "fsl,imx6q-vdoa", },
++ { /* sentinel */ }
++};
++
++static int vdoa_probe(struct platform_device *pdev)
++{
++ int ret;
++ struct vdoa_info *vdoa;
++ struct resource *res;
++ struct resource *res_irq;
++ struct device *dev = &pdev->dev;
++ struct device_node *np = pdev->dev.of_node;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(dev, "can't get device resources\n");
++ return -ENOENT;
++ }
++
++ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++ if (!res_irq) {
++ dev_err(dev, "failed to get irq resource\n");
++ return -ENOENT;
++ }
++
++ vdoa = devm_kzalloc(dev, sizeof(struct vdoa_info), GFP_KERNEL);
++ if (!vdoa)
++ return -ENOMEM;
++ vdoa->dev = dev;
++
++ vdoa->reg_base = devm_request_and_ioremap(&pdev->dev, res);
++ if (!vdoa->reg_base)
++ return -EBUSY;
++
++ vdoa->irq = res_irq->start;
++ ret = devm_request_irq(dev, vdoa->irq, vdoa_irq_handler, 0,
++ "vdoa", vdoa);
++ if (ret) {
++ dev_err(dev, "can't claim irq %d\n", vdoa->irq);
++ return ret;
++ }
++ disable_irq(vdoa->irq);
++
++ vdoa->vdoa_clk = devm_clk_get(dev, NULL);
++ if (IS_ERR(vdoa->vdoa_clk)) {
++ dev_err(dev, "failed to get vdoa_clk\n");
++ return PTR_ERR(vdoa->vdoa_clk);
++ }
++
++ vdoa->iram_pool = of_get_named_gen_pool(np, "iram", 0);
++ if (!vdoa->iram_pool) {
++ dev_err(&pdev->dev, "iram pool not available\n");
++ return -ENOMEM;
++ }
++
++ if ((iram_size == 0) || (iram_size > MAX_VDOA_IRAM_SIZE))
++ iram_size = VDOA_IRAM_SIZE;
++
++ vdoa->iram_base = gen_pool_alloc(vdoa->iram_pool, iram_size);
++ if (!vdoa->iram_base) {
++ dev_err(&pdev->dev, "unable to alloc iram\n");
++ return -ENOMEM;
++ }
++
++ vdoa->iram_paddr = gen_pool_virt_to_phys(vdoa->iram_pool,
++ vdoa->iram_base);
++
++ dev_dbg(dev, "iram_base:0x%lx,iram_paddr:0x%lx,size:0x%lx\n",
++ vdoa->iram_base, vdoa->iram_paddr, iram_size);
++
++ vdoa->state = VDOA_INIT;
++ dev_set_drvdata(dev, vdoa);
++ g_vdoa = vdoa;
++ dev_info(dev, "i.MX Video Data Order Adapter(VDOA) driver probed\n");
++ return 0;
++}
++
++static int vdoa_remove(struct platform_device *pdev)
++{
++ struct vdoa_info *vdoa = dev_get_drvdata(&pdev->dev);
++
++ gen_pool_free(vdoa->iram_pool, vdoa->iram_base, iram_size);
++ kfree(vdoa);
++ dev_set_drvdata(&pdev->dev, NULL);
++
++ return 0;
++}
++
++static struct platform_driver vdoa_driver = {
++ .driver = {
++ .name = "mxc_vdoa",
++ .of_match_table = imx_vdoa_dt_ids,
++ },
++ .probe = vdoa_probe,
++ .remove = vdoa_remove,
++};
++
++static int __init vdoa_init(void)
++{
++ int err;
++
++ err = platform_driver_register(&vdoa_driver);
++ if (err) {
++ pr_err("vdoa_driver register failed\n");
++ return -ENODEV;
++ }
++ return 0;
++}
++
++static void __exit vdoa_cleanup(void)
++{
++ platform_driver_unregister(&vdoa_driver);
++}
++
++module_init(vdoa_init);
++module_exit(vdoa_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX Video Data Order Adapter(VDOA) driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/mxc/ipu3/vdoa.h linux-3.14.40/drivers/mxc/ipu3/vdoa.h
+--- linux-3.14.40.orig/drivers/mxc/ipu3/vdoa.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/ipu3/vdoa.h 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,69 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __VDOA_H__
++#define __VDOA_H__
++
++#define VDOA_PFS_YUYV (1)
++#define VDOA_PFS_NV12 (0)
++
++
++struct vfield_buf {
++ u32 prev_veba;
++ u32 cur_veba;
++ u32 next_veba;
++ u32 vubo;
++};
++
++struct vframe_buf {
++ u32 veba;
++ u32 vubo;
++};
++
++struct vdoa_params {
++ u32 width;
++ u32 height;
++ int vpu_stride;
++ int interlaced;
++ int scan_order;
++ int ipu_num;
++ int band_lines;
++ int band_mode;
++ int pfs;
++ u32 ieba0;
++ u32 ieba1;
++ u32 ieba2;
++ struct vframe_buf vframe_buf;
++ struct vfield_buf vfield_buf;
++};
++struct vdoa_ipu_buf {
++ u32 ieba0;
++ u32 ieba1;
++ u32 iubo;
++};
++
++struct vdoa_info;
++typedef void *vdoa_handle_t;
++
++int vdoa_setup(vdoa_handle_t handle, struct vdoa_params *params);
++void vdoa_get_output_buf(vdoa_handle_t handle, struct vdoa_ipu_buf *buf);
++int vdoa_start(vdoa_handle_t handle, int timeout_ms);
++void vdoa_stop(vdoa_handle_t handle);
++void vdoa_get_handle(vdoa_handle_t *handle);
++void vdoa_put_handle(vdoa_handle_t *handle);
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/Kconfig linux-3.14.40/drivers/mxc/Kconfig
+--- linux-3.14.40.orig/drivers/mxc/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/Kconfig 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,24 @@
++# drivers/mxc/Kconfig
++
++if ARCH_MXC
++
++menu "MXC support drivers"
++
++config MXC_IPU
++ bool "Image Processing Unit Driver"
++ select MXC_IPU_V3
++ help
++ If you plan to use the Image Processing unit, say
++ Y here. IPU is needed by Framebuffer and V4L2 drivers.
++
++source "drivers/mxc/gpu-viv/Kconfig"
++source "drivers/mxc/ipu3/Kconfig"
++source "drivers/mxc/asrc/Kconfig"
++source "drivers/mxc/vpu/Kconfig"
++source "drivers/mxc/hdmi-cec/Kconfig"
++source "drivers/mxc/mipi/Kconfig"
++source "drivers/mxc/mlb/Kconfig"
++
++endmenu
++
++endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/Makefile linux-3.14.40/drivers/mxc/Makefile
+--- linux-3.14.40.orig/drivers/mxc/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/Makefile 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,11 @@
++ifeq ($(CONFIG_MXC_GPU_VIV_V5),y)
++obj-$(CONFIG_MXC_GPU_VIV) += gpu-viv/v5/
++else
++obj-$(CONFIG_MXC_GPU_VIV) += gpu-viv/v4/
++endif
++obj-$(CONFIG_MXC_IPU_V3) += ipu3/
++obj-$(CONFIG_MXC_ASRC) += asrc/
++obj-$(CONFIG_MXC_VPU) += vpu/
++obj-$(CONFIG_MXC_HDMI_CEC) += hdmi-cec/
++obj-$(CONFIG_MXC_MIPI_CSI2) += mipi/
++obj-$(CONFIG_MXC_MLB) += mlb/
+diff -Nur linux-3.14.40.orig/drivers/mxc/mipi/Kconfig linux-3.14.40/drivers/mxc/mipi/Kconfig
+--- linux-3.14.40.orig/drivers/mxc/mipi/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/mipi/Kconfig 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,14 @@
++#
++# MIPI configuration
++#
++
++menu "MXC MIPI Support"
++
++config MXC_MIPI_CSI2
++ tristate "MIPI CSI2 support"
++ depends on SOC_IMX6Q
++ default n
++ ---help---
++ Say Y to get the MIPI CSI2 support.
++
++endmenu
+diff -Nur linux-3.14.40.orig/drivers/mxc/mipi/Makefile linux-3.14.40/drivers/mxc/mipi/Makefile
+--- linux-3.14.40.orig/drivers/mxc/mipi/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/mipi/Makefile 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,4 @@
++#
++# Makefile for the mipi interface driver
++#
++obj-$(CONFIG_MXC_MIPI_CSI2) += mxc_mipi_csi2.o
+diff -Nur linux-3.14.40.orig/drivers/mxc/mipi/mxc_mipi_csi2.c linux-3.14.40/drivers/mxc/mipi/mxc_mipi_csi2.c
+--- linux-3.14.40.orig/drivers/mxc/mipi/mxc_mipi_csi2.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/mipi/mxc_mipi_csi2.c 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,540 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/irqdesc.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/io.h>
++#include <linux/bitops.h>
++#include <linux/delay.h>
++#include <linux/fsl_devices.h>
++#include <linux/slab.h>
++#include <linux/of.h>
++
++#include <linux/mipi_csi2.h>
++
++#include "mxc_mipi_csi2.h"
++
++static struct mipi_csi2_info *gmipi_csi2;
++
++void _mipi_csi2_lock(struct mipi_csi2_info *info)
++{
++ if (!in_irq() && !in_softirq())
++ mutex_lock(&info->mutex_lock);
++}
++
++void _mipi_csi2_unlock(struct mipi_csi2_info *info)
++{
++ if (!in_irq() && !in_softirq())
++ mutex_unlock(&info->mutex_lock);
++}
++
++static inline void mipi_csi2_write(struct mipi_csi2_info *info,
++ unsigned value, unsigned offset)
++{
++ writel(value, info->mipi_csi2_base + offset);
++}
++
++static inline unsigned int mipi_csi2_read(struct mipi_csi2_info *info,
++ unsigned offset)
++{
++ return readl(info->mipi_csi2_base + offset);
++}
++
++/*!
++ * This function is called to enable the mipi csi2 interface.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns setted value
++ */
++bool mipi_csi2_enable(struct mipi_csi2_info *info)
++{
++ bool status;
++
++ _mipi_csi2_lock(info);
++
++ if (!info->mipi_en) {
++ info->mipi_en = true;
++ clk_prepare_enable(info->cfg_clk);
++ clk_prepare_enable(info->dphy_clk);
++ } else
++ mipi_dbg("mipi csi2 already enabled!\n");
++
++ status = info->mipi_en;
++
++ _mipi_csi2_unlock(info);
++
++ return status;
++}
++EXPORT_SYMBOL(mipi_csi2_enable);
++
++/*!
++ * This function is called to disable the mipi csi2 interface.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns setted value
++ */
++bool mipi_csi2_disable(struct mipi_csi2_info *info)
++{
++ bool status;
++
++ _mipi_csi2_lock(info);
++
++ if (info->mipi_en) {
++ info->mipi_en = false;
++ clk_disable_unprepare(info->dphy_clk);
++ clk_disable_unprepare(info->cfg_clk);
++ } else
++ mipi_dbg("mipi csi2 already disabled!\n");
++
++ status = info->mipi_en;
++
++ _mipi_csi2_unlock(info);
++
++ return status;
++}
++EXPORT_SYMBOL(mipi_csi2_disable);
++
++/*!
++ * This function is called to get mipi csi2 disable/enable status.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns mipi csi2 status
++ */
++bool mipi_csi2_get_status(struct mipi_csi2_info *info)
++{
++ bool status;
++
++ _mipi_csi2_lock(info);
++ status = info->mipi_en;
++ _mipi_csi2_unlock(info);
++
++ return status;
++}
++EXPORT_SYMBOL(mipi_csi2_get_status);
++
++/*!
++ * This function is called to set mipi lanes.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns setted value
++ */
++unsigned int mipi_csi2_set_lanes(struct mipi_csi2_info *info)
++{
++ unsigned int lanes;
++
++ _mipi_csi2_lock(info);
++ mipi_csi2_write(info, info->lanes - 1, MIPI_CSI2_N_LANES);
++ lanes = mipi_csi2_read(info, MIPI_CSI2_N_LANES);
++ _mipi_csi2_unlock(info);
++
++ return lanes;
++}
++EXPORT_SYMBOL(mipi_csi2_set_lanes);
++
++/*!
++ * This function is called to set mipi data type.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns setted value
++ */
++unsigned int mipi_csi2_set_datatype(struct mipi_csi2_info *info,
++ unsigned int datatype)
++{
++ unsigned int dtype;
++
++ _mipi_csi2_lock(info);
++ info->datatype = datatype;
++ dtype = info->datatype;
++ _mipi_csi2_unlock(info);
++
++ return dtype;
++}
++EXPORT_SYMBOL(mipi_csi2_set_datatype);
++
++/*!
++ * This function is called to get mipi data type.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns mipi data type
++ */
++unsigned int mipi_csi2_get_datatype(struct mipi_csi2_info *info)
++{
++ unsigned int dtype;
++
++ _mipi_csi2_lock(info);
++ dtype = info->datatype;
++ _mipi_csi2_unlock(info);
++
++ return dtype;
++}
++EXPORT_SYMBOL(mipi_csi2_get_datatype);
++
++/*!
++ * This function is called to get mipi csi2 dphy status.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns dphy status
++ */
++unsigned int mipi_csi2_dphy_status(struct mipi_csi2_info *info)
++{
++ unsigned int status;
++
++ _mipi_csi2_lock(info);
++ status = mipi_csi2_read(info, MIPI_CSI2_PHY_STATE);
++ _mipi_csi2_unlock(info);
++
++ return status;
++}
++EXPORT_SYMBOL(mipi_csi2_dphy_status);
++
++/*!
++ * This function is called to get mipi csi2 error1 status.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns error1 value
++ */
++unsigned int mipi_csi2_get_error1(struct mipi_csi2_info *info)
++{
++ unsigned int err1;
++
++ _mipi_csi2_lock(info);
++ err1 = mipi_csi2_read(info, MIPI_CSI2_ERR1);
++ _mipi_csi2_unlock(info);
++
++ return err1;
++}
++EXPORT_SYMBOL(mipi_csi2_get_error1);
++
++/*!
++ * This function is called to get mipi csi2 error1 status.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns error1 value
++ */
++unsigned int mipi_csi2_get_error2(struct mipi_csi2_info *info)
++{
++ unsigned int err2;
++
++ _mipi_csi2_lock(info);
++ err2 = mipi_csi2_read(info, MIPI_CSI2_ERR2);
++ _mipi_csi2_unlock(info);
++
++ return err2;
++}
++EXPORT_SYMBOL(mipi_csi2_get_error2);
++
++/*!
++ * This function is called to enable mipi to ipu pixel clock.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns 0 on success or negative error code on fail
++ */
++int mipi_csi2_pixelclk_enable(struct mipi_csi2_info *info)
++{
++ return clk_prepare_enable(info->pixel_clk);
++}
++EXPORT_SYMBOL(mipi_csi2_pixelclk_enable);
++
++/*!
++ * This function is called to disable mipi to ipu pixel clock.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns 0 on success or negative error code on fail
++ */
++void mipi_csi2_pixelclk_disable(struct mipi_csi2_info *info)
++{
++ clk_disable_unprepare(info->pixel_clk);
++}
++EXPORT_SYMBOL(mipi_csi2_pixelclk_disable);
++
++/*!
++ * This function is called to power on mipi csi2.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns 0 on success or negative error code on fail
++ */
++int mipi_csi2_reset(struct mipi_csi2_info *info)
++{
++ _mipi_csi2_lock(info);
++
++ mipi_csi2_write(info, 0x0, MIPI_CSI2_PHY_SHUTDOWNZ);
++ mipi_csi2_write(info, 0x0, MIPI_CSI2_DPHY_RSTZ);
++ mipi_csi2_write(info, 0x0, MIPI_CSI2_CSI2_RESETN);
++
++ mipi_csi2_write(info, 0x00000001, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00000000, MIPI_CSI2_PHY_TST_CTRL1);
++ mipi_csi2_write(info, 0x00000000, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00000002, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00010044, MIPI_CSI2_PHY_TST_CTRL1);
++ mipi_csi2_write(info, 0x00000000, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00000014, MIPI_CSI2_PHY_TST_CTRL1);
++ mipi_csi2_write(info, 0x00000002, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00000000, MIPI_CSI2_PHY_TST_CTRL0);
++
++ mipi_csi2_write(info, 0xffffffff, MIPI_CSI2_PHY_SHUTDOWNZ);
++ mipi_csi2_write(info, 0xffffffff, MIPI_CSI2_DPHY_RSTZ);
++ mipi_csi2_write(info, 0xffffffff, MIPI_CSI2_CSI2_RESETN);
++
++ _mipi_csi2_unlock(info);
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_csi2_reset);
++
++/*!
++ * This function is called to get mipi csi2 info.
++ *
++ * @return Returns mipi csi2 info struct pointor
++ */
++struct mipi_csi2_info *mipi_csi2_get_info(void)
++{
++ return gmipi_csi2;
++}
++EXPORT_SYMBOL(mipi_csi2_get_info);
++
++/*!
++ * This function is called to get mipi csi2 bind ipu num.
++ *
++ * @return Returns mipi csi2 bind ipu num
++ */
++int mipi_csi2_get_bind_ipu(struct mipi_csi2_info *info)
++{
++ int ipu_id;
++
++ _mipi_csi2_lock(info);
++ ipu_id = info->ipu_id;
++ _mipi_csi2_unlock(info);
++
++ return ipu_id;
++}
++EXPORT_SYMBOL(mipi_csi2_get_bind_ipu);
++
++/*!
++ * This function is called to get mipi csi2 bind csi num.
++ *
++ * @return Returns mipi csi2 bind csi num
++ */
++unsigned int mipi_csi2_get_bind_csi(struct mipi_csi2_info *info)
++{
++ unsigned int csi_id;
++
++ _mipi_csi2_lock(info);
++ csi_id = info->csi_id;
++ _mipi_csi2_unlock(info);
++
++ return csi_id;
++}
++EXPORT_SYMBOL(mipi_csi2_get_bind_csi);
++
++/*!
++ * This function is called to get mipi csi2 virtual channel.
++ *
++ * @return Returns mipi csi2 virtual channel num
++ */
++unsigned int mipi_csi2_get_virtual_channel(struct mipi_csi2_info *info)
++{
++ unsigned int v_channel;
++
++ _mipi_csi2_lock(info);
++ v_channel = info->v_channel;
++ _mipi_csi2_unlock(info);
++
++ return v_channel;
++}
++EXPORT_SYMBOL(mipi_csi2_get_virtual_channel);
++
++/**
++ * This function is called by the driver framework to initialize the MIPI CSI2
++ * device.
++ *
++ * @param pdev The device structure for the MIPI CSI2 passed in by the
++ * driver framework.
++ *
++ * @return Returns 0 on success or negative error code on error
++ */
++static int mipi_csi2_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct device_node *np = pdev->dev.of_node;
++ struct resource *res;
++ u32 mipi_csi2_dphy_ver;
++ int ret;
++
++ gmipi_csi2 = kmalloc(sizeof(struct mipi_csi2_info), GFP_KERNEL);
++ if (!gmipi_csi2) {
++ ret = -ENOMEM;
++ goto alloc_failed;
++ }
++
++ ret = of_property_read_u32(np, "ipu_id", &(gmipi_csi2->ipu_id));
++ if (ret) {
++ dev_err(&pdev->dev, "ipu_id missing or invalid\n");
++ goto err;
++ }
++
++ ret = of_property_read_u32(np, "csi_id", &(gmipi_csi2->csi_id));
++ if (ret) {
++ dev_err(&pdev->dev, "csi_id missing or invalid\n");
++ goto err;
++ }
++
++ ret = of_property_read_u32(np, "v_channel", &(gmipi_csi2->v_channel));
++ if (ret) {
++ dev_err(&pdev->dev, "v_channel missing or invalid\n");
++ goto err;
++ }
++
++ ret = of_property_read_u32(np, "lanes", &(gmipi_csi2->lanes));
++ if (ret) {
++ dev_err(&pdev->dev, "lanes missing or invalid\n");
++ goto err;
++ }
++
++ if ((gmipi_csi2->ipu_id < 0) || (gmipi_csi2->ipu_id > 1) ||
++ (gmipi_csi2->csi_id > 1) || (gmipi_csi2->v_channel > 3) ||
++ (gmipi_csi2->lanes > 4)) {
++ dev_err(&pdev->dev, "invalid param for mipi csi2!\n");
++ ret = -EINVAL;
++ goto err;
++ }
++
++ /* initialize mutex */
++ mutex_init(&gmipi_csi2->mutex_lock);
++
++ /* get mipi csi2 informaiton */
++ gmipi_csi2->pdev = pdev;
++ gmipi_csi2->mipi_en = false;
++
++ gmipi_csi2->cfg_clk = devm_clk_get(dev, "cfg_clk");
++ if (IS_ERR(gmipi_csi2->cfg_clk)) {
++ dev_err(&pdev->dev, "failed to get cfg_clk\n");
++ ret = PTR_ERR(gmipi_csi2->cfg_clk);
++ goto err;
++ }
++
++ /* get mipi dphy clk */
++ gmipi_csi2->dphy_clk = devm_clk_get(dev, "dphy_clk");
++ if (IS_ERR(gmipi_csi2->dphy_clk)) {
++ dev_err(&pdev->dev, "failed to get dphy pll_ref_clk\n");
++ ret = PTR_ERR(gmipi_csi2->dphy_clk);
++ goto err;
++ }
++
++ /* get mipi to ipu pixel clk */
++ gmipi_csi2->pixel_clk = devm_clk_get(dev, "pixel_clk");
++ if (IS_ERR(gmipi_csi2->pixel_clk)) {
++ dev_err(&pdev->dev, "failed to get mipi pixel clk\n");
++ ret = PTR_ERR(gmipi_csi2->pixel_clk);
++ goto err;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ ret = -ENODEV;
++ goto err;
++ }
++
++ /* mipi register mapping */
++ gmipi_csi2->mipi_csi2_base = ioremap(res->start, PAGE_SIZE);
++ if (!gmipi_csi2->mipi_csi2_base) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ /* mipi dphy clk enable for register access */
++ clk_prepare_enable(gmipi_csi2->dphy_clk);
++ /* get mipi csi2 dphy version */
++ mipi_csi2_dphy_ver = mipi_csi2_read(gmipi_csi2, MIPI_CSI2_VERSION);
++
++ clk_disable_unprepare(gmipi_csi2->dphy_clk);
++
++ platform_set_drvdata(pdev, gmipi_csi2);
++
++ dev_info(&pdev->dev, "i.MX MIPI CSI2 driver probed\n");
++ dev_info(&pdev->dev, "i.MX MIPI CSI2 dphy version is 0x%x\n",
++ mipi_csi2_dphy_ver);
++
++ return 0;
++
++err:
++ kfree(gmipi_csi2);
++alloc_failed:
++ dev_err(&pdev->dev, "i.MX MIPI CSI2 driver probed - error\n");
++ return ret;
++}
++
++static int mipi_csi2_remove(struct platform_device *pdev)
++{
++ /* unmapping mipi register */
++ iounmap(gmipi_csi2->mipi_csi2_base);
++
++ kfree(gmipi_csi2);
++
++ dev_set_drvdata(&pdev->dev, NULL);
++
++ return 0;
++}
++
++static const struct of_device_id imx_mipi_csi2_dt_ids[] = {
++ { .compatible = "fsl,imx6q-mipi-csi2", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver mipi_csi2_driver = {
++ .driver = {
++ .name = "mxc_mipi_csi2",
++ .of_match_table = imx_mipi_csi2_dt_ids,
++ },
++ .probe = mipi_csi2_probe,
++ .remove = mipi_csi2_remove,
++};
++
++static int __init mipi_csi2_init(void)
++{
++ int err;
++
++ err = platform_driver_register(&mipi_csi2_driver);
++ if (err) {
++ pr_err("mipi_csi2_driver register failed\n");
++ return -ENODEV;
++ }
++
++ pr_info("MIPI CSI2 driver module loaded\n");
++
++ return 0;
++}
++
++static void __exit mipi_csi2_cleanup(void)
++{
++ platform_driver_unregister(&mipi_csi2_driver);
++}
++
++subsys_initcall(mipi_csi2_init);
++module_exit(mipi_csi2_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX MIPI CSI2 driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/mxc/mipi/mxc_mipi_csi2.h linux-3.14.40/drivers/mxc/mipi/mxc_mipi_csi2.h
+--- linux-3.14.40.orig/drivers/mxc/mipi/mxc_mipi_csi2.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/mipi/mxc_mipi_csi2.h 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,46 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __MXC_MIPI_CSI2_H__
++#define __MXC_MIPI_CSI2_H__
++
++#ifdef DEBUG
++#define mipi_dbg(fmt, ...) \
++ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
++#else
++#define mipi_dbg(fmt, ...)
++#endif
++
++/* driver private data */
++struct mipi_csi2_info {
++ bool mipi_en;
++ int ipu_id;
++ unsigned int csi_id;
++ unsigned int v_channel;
++ unsigned int lanes;
++ unsigned int datatype;
++ struct clk *cfg_clk;
++ struct clk *dphy_clk;
++ struct clk *pixel_clk;
++ void __iomem *mipi_csi2_base;
++ struct platform_device *pdev;
++
++ struct mutex mutex_lock;
++};
++
++#endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/mlb/Kconfig linux-3.14.40/drivers/mxc/mlb/Kconfig
+--- linux-3.14.40.orig/drivers/mxc/mlb/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/mlb/Kconfig 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,17 @@
++#
++# MLB150 configuration
++#
++
++menu "MXC Media Local Bus Driver"
++
++config MXC_MLB
++ boolean
++
++config MXC_MLB150
++ tristate "MLB150 support"
++ depends on SOC_IMX6Q
++ select MXC_MLB
++ ---help---
++ Say Y to get the MLB150 support.
++
++endmenu
+diff -Nur linux-3.14.40.orig/drivers/mxc/mlb/Makefile linux-3.14.40/drivers/mxc/mlb/Makefile
+--- linux-3.14.40.orig/drivers/mxc/mlb/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/mlb/Makefile 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,5 @@
++#
++# Makefile for the i.MX6Q/DL MLB150 driver
++#
++
++obj-$(CONFIG_MXC_MLB150) += mxc_mlb150.o
+diff -Nur linux-3.14.40.orig/drivers/mxc/mlb/mxc_mlb150.c linux-3.14.40/drivers/mxc/mlb/mxc_mlb150.c
+--- linux-3.14.40.orig/drivers/mxc/mlb/mxc_mlb150.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/mlb/mxc_mlb150.c 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,2778 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/cdev.h>
++#include <linux/circ_buf.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/genalloc.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mxc_mlb.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/regulator/consumer.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/uaccess.h>
++
++#define DRIVER_NAME "mxc_mlb150"
++
++/*
++ * MLB module memory map registers define
++ */
++#define REG_MLBC0 0x0
++#define MLBC0_MLBEN (0x1)
++#define MLBC0_MLBCLK_MASK (0x7 << 2)
++#define MLBC0_MLBCLK_SHIFT (2)
++#define MLBC0_MLBPEN (0x1 << 5)
++#define MLBC0_MLBLK (0x1 << 7)
++#define MLBC0_ASYRETRY (0x1 << 12)
++#define MLBC0_CTLRETRY (0x1 << 12)
++#define MLBC0_FCNT_MASK (0x7 << 15)
++#define MLBC0_FCNT_SHIFT (15)
++
++#define REG_MLBPC0 0x8
++#define MLBPC0_MCLKHYS (0x1 << 11)
++
++#define REG_MS0 0xC
++#define REG_MS1 0x14
++
++#define REG_MSS 0x20
++#define MSS_RSTSYSCMD (0x1)
++#define MSS_LKSYSCMD (0x1 << 1)
++#define MSS_ULKSYSCMD (0x1 << 2)
++#define MSS_CSSYSCMD (0x1 << 3)
++#define MSS_SWSYSCMD (0x1 << 4)
++#define MSS_SERVREQ (0x1 << 5)
++
++#define REG_MSD 0x24
++
++#define REG_MIEN 0x2C
++#define MIEN_ISOC_PE (0x1)
++#define MIEN_ISOC_BUFO (0x1 << 1)
++#define MIEN_SYNC_PE (0x1 << 16)
++#define MIEN_ARX_DONE (0x1 << 17)
++#define MIEN_ARX_PE (0x1 << 18)
++#define MIEN_ARX_BREAK (0x1 << 19)
++#define MIEN_ATX_DONE (0x1 << 20)
++#define MIEN_ATX_PE (0x1 << 21)
++#define MIEN_ATX_BREAK (0x1 << 22)
++#define MIEN_CRX_DONE (0x1 << 24)
++#define MIEN_CRX_PE (0x1 << 25)
++#define MIEN_CRX_BREAK (0x1 << 26)
++#define MIEN_CTX_DONE (0x1 << 27)
++#define MIEN_CTX_PE (0x1 << 28)
++#define MIEN_CTX_BREAK (0x1 << 29)
++
++#define REG_MLBPC2 0x34
++#define REG_MLBPC1 0x38
++#define MLBPC1_VAL (0x00000888)
++
++#define REG_MLBC1 0x3C
++#define MLBC1_LOCK (0x1 << 6)
++#define MLBC1_CLKM (0x1 << 7)
++#define MLBC1_NDA_MASK (0xFF << 8)
++#define MLBC1_NDA_SHIFT (8)
++
++#define REG_HCTL 0x80
++#define HCTL_RST0 (0x1)
++#define HCTL_RST1 (0x1 << 1)
++#define HCTL_EN (0x1 << 15)
++
++#define REG_HCMR0 0x88
++#define REG_HCMR1 0x8C
++#define REG_HCER0 0x90
++#define REG_HCER1 0x94
++#define REG_HCBR0 0x98
++#define REG_HCBR1 0x9C
++
++#define REG_MDAT0 0xC0
++#define REG_MDAT1 0xC4
++#define REG_MDAT2 0xC8
++#define REG_MDAT3 0xCC
++
++#define REG_MDWE0 0xD0
++#define REG_MDWE1 0xD4
++#define REG_MDWE2 0xD8
++#define REG_MDWE3 0xDC
++
++#define REG_MCTL 0xE0
++#define MCTL_XCMP (0x1)
++
++#define REG_MADR 0xE4
++#define MADR_WNR (0x1 << 31)
++#define MADR_TB (0x1 << 30)
++#define MADR_ADDR_MASK (0x7f << 8)
++#define MADR_ADDR_SHIFT (0)
++
++#define REG_ACTL 0x3C0
++#define ACTL_MPB (0x1 << 4)
++#define ACTL_DMAMODE (0x1 << 2)
++#define ACTL_SMX (0x1 << 1)
++#define ACTL_SCE (0x1)
++
++#define REG_ACSR0 0x3D0
++#define REG_ACSR1 0x3D4
++#define REG_ACMR0 0x3D8
++#define REG_ACMR1 0x3DC
++
++#define REG_CAT_MDATn(ch) (REG_MDAT0 + ((ch % 8) >> 1) * 4)
++#define REG_CAT_MDWEn(ch) (REG_MDWE0 + ((ch % 8) >> 1) * 4)
++
++#define INT_AHB0_CH_START (0)
++#define INT_AHB1_CH_START (32)
++
++#define LOGIC_CH_NUM (64)
++#define BUF_CDT_OFFSET (0x0)
++#define BUF_ADT_OFFSET (0x40)
++#define BUF_CAT_MLB_OFFSET (0x80)
++#define BUF_CAT_HBI_OFFSET (0x88)
++#define BUF_CTR_END_OFFSET (0x8F)
++
++#define CAT_MODE_RX (0x1 << 0)
++#define CAT_MODE_TX (0x1 << 1)
++#define CAT_MODE_INBOUND_DMA (0x1 << 8)
++#define CAT_MODE_OUTBOUND_DMA (0x1 << 9)
++
++#define CH_SYNC_DEFAULT_QUAD (1)
++#define CH_SYNC_MAX_QUAD (15)
++#define CH_SYNC_CDT_BUF_DEP (CH_SYNC_DEFAULT_QUAD * 4 * 4)
++#define CH_SYNC_ADT_BUF_MULTI (4)
++#define CH_SYNC_ADT_BUF_DEP (CH_SYNC_CDT_BUF_DEP * CH_SYNC_ADT_BUF_MULTI)
++#define CH_SYNC_BUF_SZ (CH_SYNC_MAX_QUAD * 4 * 4 * \
++ CH_SYNC_ADT_BUF_MULTI)
++#define CH_CTRL_CDT_BUF_DEP (64)
++#define CH_CTRL_ADT_BUF_DEP (CH_CTRL_CDT_BUF_DEP)
++#define CH_CTRL_BUF_SZ (CH_CTRL_ADT_BUF_DEP)
++#define CH_ASYNC_MDP_PACKET_LEN (1024)
++#define CH_ASYNC_MEP_PACKET_LEN (1536)
++#define CH_ASYNC_CDT_BUF_DEP (CH_ASYNC_MEP_PACKET_LEN)
++#define CH_ASYNC_ADT_BUF_DEP (CH_ASYNC_CDT_BUF_DEP)
++#define CH_ASYNC_BUF_SZ (CH_ASYNC_ADT_BUF_DEP)
++#define CH_ISOC_BLK_SIZE_188 (188)
++#define CH_ISOC_BLK_SIZE_196 (196)
++#define CH_ISOC_BLK_SIZE (CH_ISOC_BLK_SIZE_188)
++#define CH_ISOC_BLK_NUM (1)
++#define CH_ISOC_CDT_BUF_DEP (CH_ISOC_BLK_SIZE * CH_ISOC_BLK_NUM)
++#define CH_ISOC_ADT_BUF_DEP (CH_ISOC_CDT_BUF_DEP)
++#define CH_ISOC_BUF_SZ (1024)
++
++#define CH_SYNC_DBR_BUF_OFFSET (0x0)
++#define CH_CTRL_DBR_BUF_OFFSET (CH_SYNC_DBR_BUF_OFFSET + \
++ 2 * (CH_SYNC_MAX_QUAD * 4 * 4))
++#define CH_ASYNC_DBR_BUF_OFFSET (CH_CTRL_DBR_BUF_OFFSET + \
++ 2 * CH_CTRL_CDT_BUF_DEP)
++#define CH_ISOC_DBR_BUF_OFFSET (CH_ASYNC_DBR_BUF_OFFSET + \
++ 2 * CH_ASYNC_CDT_BUF_DEP)
++
++#define DBR_BUF_START 0x00000
++
++#define CDT_LEN (16)
++#define ADT_LEN (16)
++#define CAT_LEN (2)
++
++#define CDT_SZ (CDT_LEN * LOGIC_CH_NUM)
++#define ADT_SZ (ADT_LEN * LOGIC_CH_NUM)
++#define CAT_SZ (CAT_LEN * LOGIC_CH_NUM * 2)
++
++#define CDT_BASE(base) (base + BUF_CDT_OFFSET)
++#define ADT_BASE(base) (base + BUF_ADT_OFFSET)
++#define CAT_MLB_BASE(base) (base + BUF_CAT_MLB_OFFSET)
++#define CAT_HBI_BASE(base) (base + BUF_CAT_HBI_OFFSET)
++
++#define CDTn_ADDR(base, n) (base + BUF_CDT_OFFSET + n * CDT_LEN)
++#define ADTn_ADDR(base, n) (base + BUF_ADT_OFFSET + n * ADT_LEN)
++#define CATn_MLB_ADDR(base, n) (base + BUF_CAT_MLB_OFFSET + n * CAT_LEN)
++#define CATn_HBI_ADDR(base, n) (base + BUF_CAT_HBI_OFFSET + n * CAT_LEN)
++
++#define CAT_CL_SHIFT (0x0)
++#define CAT_CT_SHIFT (8)
++#define CAT_CE (0x1 << 11)
++#define CAT_RNW (0x1 << 12)
++#define CAT_MT (0x1 << 13)
++#define CAT_FCE (0x1 << 14)
++#define CAT_MFE (0x1 << 14)
++
++#define CDT_WSBC_SHIFT (14)
++#define CDT_WPC_SHIFT (11)
++#define CDT_RSBC_SHIFT (30)
++#define CDT_RPC_SHIFT (27)
++#define CDT_WPC_1_SHIFT (12)
++#define CDT_RPC_1_SHIFT (28)
++#define CDT_WPTR_SHIFT (0)
++#define CDT_SYNC_WSTS_MASK (0x0000f000)
++#define CDT_SYNC_WSTS_SHIFT (12)
++#define CDT_CTRL_ASYNC_WSTS_MASK (0x0000f000)
++#define CDT_CTRL_ASYNC_WSTS_SHIFT (12)
++#define CDT_ISOC_WSTS_MASK (0x0000e000)
++#define CDT_ISOC_WSTS_SHIFT (13)
++#define CDT_RPTR_SHIFT (16)
++#define CDT_SYNC_RSTS_MASK (0xf0000000)
++#define CDT_SYNC_RSTS_SHIFT (28)
++#define CDT_CTRL_ASYNC_RSTS_MASK (0xf0000000)
++#define CDT_CTRL_ASYNC_RSTS_SHIFT (28)
++#define CDT_ISOC_RSTS_MASK (0xe0000000)
++#define CDT_ISOC_RSTS_SHIFT (29)
++#define CDT_CTRL_ASYNC_WSTS_1 (0x1 << 14)
++#define CDT_CTRL_ASYNC_RSTS_1 (0x1 << 15)
++#define CDT_BD_SHIFT (0)
++#define CDT_BA_SHIFT (16)
++#define CDT_BS_SHIFT (0)
++#define CDT_BF_SHIFT (31)
++
++#define ADT_PG (0x1 << 13)
++#define ADT_LE (0x1 << 14)
++#define ADT_CE (0x1 << 15)
++#define ADT_BD1_SHIFT (0)
++#define ADT_ERR1 (0x1 << 13)
++#define ADT_DNE1 (0x1 << 14)
++#define ADT_RDY1 (0x1 << 15)
++#define ADT_BD2_SHIFT (16)
++#define ADT_ERR2 (0x1 << 29)
++#define ADT_DNE2 (0x1 << 30)
++#define ADT_RDY2 (0x1 << 31)
++#define ADT_BA1_SHIFT (0x0)
++#define ADT_BA2_SHIFT (0x0)
++#define ADT_PS1 (0x1 << 12)
++#define ADT_PS2 (0x1 << 28)
++#define ADT_MEP1 (0x1 << 11)
++#define ADT_MEP2 (0x1 << 27)
++
++#define MLB_MINOR_DEVICES 4
++#define MLB_CONTROL_DEV_NAME "ctrl"
++#define MLB_ASYNC_DEV_NAME "async"
++#define MLB_SYNC_DEV_NAME "sync"
++#define MLB_ISOC_DEV_NAME "isoc"
++
++#define TX_CHANNEL 0
++#define RX_CHANNEL 1
++
++#define TRANS_RING_NODES (1 << 3)
++
++enum MLB_CTYPE {
++ MLB_CTYPE_SYNC,
++ MLB_CTYPE_CTRL,
++ MLB_CTYPE_ASYNC,
++ MLB_CTYPE_ISOC,
++};
++
++enum CLK_SPEED {
++ CLK_256FS,
++ CLK_512FS,
++ CLK_1024FS,
++ CLK_2048FS,
++ CLK_3072FS,
++ CLK_4096FS,
++ CLK_6144FS,
++ CLK_8192FS,
++};
++
++struct mlb_ringbuf {
++ s8 *virt_bufs[TRANS_RING_NODES];
++ u32 phy_addrs[TRANS_RING_NODES];
++ s32 head;
++ s32 tail;
++ s32 unit_size;
++ s32 total_size;
++ rwlock_t rb_lock ____cacheline_aligned; /* ring index lock */
++};
++
++struct mlb_channel_info {
++ /* Input MLB channel address */
++ u32 address;
++ /* Internal AHB channel label */
++ u32 cl;
++ /* DBR buf head */
++ u32 dbr_buf_head;
++};
++
++struct mlb_dev_info {
++ /* device node name */
++ const char dev_name[20];
++ /* channel type */
++ const unsigned int channel_type;
++ /* ch fps */
++ enum CLK_SPEED fps;
++ /* channel info for tx/rx */
++ struct mlb_channel_info channels[2];
++ /* ring buffer */
++ u8 *rbuf_base_virt;
++ u32 rbuf_base_phy;
++ struct mlb_ringbuf rx_rbuf;
++ struct mlb_ringbuf tx_rbuf;
++ /* exception event */
++ unsigned long ex_event;
++ /* tx busy indicator */
++ unsigned long tx_busy;
++ /* channel started up or not */
++ atomic_t on;
++ /* device open count */
++ atomic_t opencnt;
++ /* wait queue head for channel */
++ wait_queue_head_t rx_wq;
++ wait_queue_head_t tx_wq;
++ /* TX OK */
++ s32 tx_ok;
++ /* spinlock for event access */
++ spinlock_t event_lock;
++ /*
++ * Block size for isoc mode
++ * This variable can be configured in ioctl
++ */
++ u32 isoc_blksz;
++ /*
++ * Quads number for sync mode
++ * This variable can be confifured in ioctl
++ */
++ u32 sync_quad;
++ /* Buffer depth in cdt */
++ u32 cdt_buf_dep;
++ /* Buffer depth in adt */
++ u32 adt_buf_dep;
++ /* Buffer size to hold data */
++ u32 buf_size;
++};
++
++struct mlb_data {
++ struct mlb_dev_info *devinfo;
++ struct clk *clk_mlb3p;
++ struct clk *clk_mlb6p;
++ struct cdev cdev;
++ struct class *class; /* device class */
++ dev_t firstdev;
++#ifdef CONFIG_REGULATOR
++ struct regulator *nvcc;
++#endif
++ void __iomem *membase; /* mlb module base address */
++ struct gen_pool *iram_pool;
++ u32 iram_size;
++ u32 irq_ahb0;
++ u32 irq_ahb1;
++ u32 irq_mlb;
++};
++
++/*
++ * For optimization, we use fixed channel label for
++ * input channels of each mode
++ * SYNC: CL = 0 for RX, CL = 64 for TX
++ * CTRL: CL = 1 for RX, CL = 65 for TX
++ * ASYNC: CL = 2 for RX, CL = 66 for TX
++ * ISOC: CL = 3 for RX, CL = 67 for TX
++ */
++#define SYNC_RX_CL_AHB0 0
++#define CTRL_RX_CL_AHB0 1
++#define ASYNC_RX_CL_AHB0 2
++#define ISOC_RX_CL_AHB0 3
++#define SYNC_TX_CL_AHB0 4
++#define CTRL_TX_CL_AHB0 5
++#define ASYNC_TX_CL_AHB0 6
++#define ISOC_TX_CL_AHB0 7
++
++#define SYNC_RX_CL_AHB1 32
++#define CTRL_RX_CL_AHB1 33
++#define ASYNC_RX_CL_AHB1 34
++#define ISOC_RX_CL_AHB1 35
++#define SYNC_TX_CL_AHB1 36
++#define CTRL_TX_CL_AHB1 37
++#define ASYNC_TX_CL_AHB1 38
++#define ISOC_TX_CL_AHB1 39
++
++#define SYNC_RX_CL SYNC_RX_CL_AHB0
++#define CTRL_RX_CL CTRL_RX_CL_AHB0
++#define ASYNC_RX_CL ASYNC_RX_CL_AHB0
++#define ISOC_RX_CL ISOC_RX_CL_AHB0
++
++#define SYNC_TX_CL SYNC_TX_CL_AHB0
++#define CTRL_TX_CL CTRL_TX_CL_AHB0
++#define ASYNC_TX_CL ASYNC_TX_CL_AHB0
++#define ISOC_TX_CL ISOC_TX_CL_AHB0
++
++static struct mlb_dev_info mlb_devinfo[MLB_MINOR_DEVICES] = {
++ {
++ .dev_name = MLB_SYNC_DEV_NAME,
++ .channel_type = MLB_CTYPE_SYNC,
++ .channels = {
++ [0] = {
++ .cl = SYNC_TX_CL,
++ .dbr_buf_head = CH_SYNC_DBR_BUF_OFFSET,
++ },
++ [1] = {
++ .cl = SYNC_RX_CL,
++ .dbr_buf_head = CH_SYNC_DBR_BUF_OFFSET
++ + CH_SYNC_BUF_SZ,
++ },
++ },
++ .rx_rbuf = {
++ .unit_size = CH_SYNC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[0].rx_rbuf.rb_lock),
++ },
++ .tx_rbuf = {
++ .unit_size = CH_SYNC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[0].tx_rbuf.rb_lock),
++ },
++ .cdt_buf_dep = CH_SYNC_CDT_BUF_DEP,
++ .adt_buf_dep = CH_SYNC_ADT_BUF_DEP,
++ .buf_size = CH_SYNC_BUF_SZ,
++ .on = ATOMIC_INIT(0),
++ .opencnt = ATOMIC_INIT(0),
++ .event_lock = __SPIN_LOCK_UNLOCKED(mlb_devinfo[0].event_lock),
++ },
++ {
++ .dev_name = MLB_CONTROL_DEV_NAME,
++ .channel_type = MLB_CTYPE_CTRL,
++ .channels = {
++ [0] = {
++ .cl = CTRL_TX_CL,
++ .dbr_buf_head = CH_CTRL_DBR_BUF_OFFSET,
++ },
++ [1] = {
++ .cl = CTRL_RX_CL,
++ .dbr_buf_head = CH_CTRL_DBR_BUF_OFFSET
++ + CH_CTRL_BUF_SZ,
++ },
++ },
++ .rx_rbuf = {
++ .unit_size = CH_CTRL_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[1].rx_rbuf.rb_lock),
++ },
++ .tx_rbuf = {
++ .unit_size = CH_CTRL_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[1].tx_rbuf.rb_lock),
++ },
++ .cdt_buf_dep = CH_CTRL_CDT_BUF_DEP,
++ .adt_buf_dep = CH_CTRL_ADT_BUF_DEP,
++ .buf_size = CH_CTRL_BUF_SZ,
++ .on = ATOMIC_INIT(0),
++ .opencnt = ATOMIC_INIT(0),
++ .event_lock = __SPIN_LOCK_UNLOCKED(mlb_devinfo[1].event_lock),
++ },
++ {
++ .dev_name = MLB_ASYNC_DEV_NAME,
++ .channel_type = MLB_CTYPE_ASYNC,
++ .channels = {
++ [0] = {
++ .cl = ASYNC_TX_CL,
++ .dbr_buf_head = CH_ASYNC_DBR_BUF_OFFSET,
++ },
++ [1] = {
++ .cl = ASYNC_RX_CL,
++ .dbr_buf_head = CH_ASYNC_DBR_BUF_OFFSET
++ + CH_ASYNC_BUF_SZ,
++ },
++ },
++ .rx_rbuf = {
++ .unit_size = CH_ASYNC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[2].rx_rbuf.rb_lock),
++ },
++ .tx_rbuf = {
++ .unit_size = CH_ASYNC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[2].tx_rbuf.rb_lock),
++ },
++ .cdt_buf_dep = CH_ASYNC_CDT_BUF_DEP,
++ .adt_buf_dep = CH_ASYNC_ADT_BUF_DEP,
++ .buf_size = CH_ASYNC_BUF_SZ,
++ .on = ATOMIC_INIT(0),
++ .opencnt = ATOMIC_INIT(0),
++ .event_lock = __SPIN_LOCK_UNLOCKED(mlb_devinfo[2].event_lock),
++ },
++ {
++ .dev_name = MLB_ISOC_DEV_NAME,
++ .channel_type = MLB_CTYPE_ISOC,
++ .channels = {
++ [0] = {
++ .cl = ISOC_TX_CL,
++ .dbr_buf_head = CH_ISOC_DBR_BUF_OFFSET,
++ },
++ [1] = {
++ .cl = ISOC_RX_CL,
++ .dbr_buf_head = CH_ISOC_DBR_BUF_OFFSET
++ + CH_ISOC_BUF_SZ,
++ },
++ },
++ .rx_rbuf = {
++ .unit_size = CH_ISOC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[3].rx_rbuf.rb_lock),
++ },
++ .tx_rbuf = {
++ .unit_size = CH_ISOC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[3].tx_rbuf.rb_lock),
++ },
++ .cdt_buf_dep = CH_ISOC_CDT_BUF_DEP,
++ .adt_buf_dep = CH_ISOC_ADT_BUF_DEP,
++ .buf_size = CH_ISOC_BUF_SZ,
++ .on = ATOMIC_INIT(0),
++ .opencnt = ATOMIC_INIT(0),
++ .event_lock = __SPIN_LOCK_UNLOCKED(mlb_devinfo[3].event_lock),
++ .isoc_blksz = CH_ISOC_BLK_SIZE_188,
++ },
++};
++
++static void __iomem *mlb_base;
++
++DEFINE_SPINLOCK(ctr_lock);
++
++#ifdef DEBUG
++#define DUMP_REG(reg) pr_debug(#reg": 0x%08x\n", __raw_readl(mlb_base + reg))
++
++static void mlb150_dev_dump_reg(void)
++{
++ pr_debug("mxc_mlb150: Dump registers:\n");
++ DUMP_REG(REG_MLBC0);
++ DUMP_REG(REG_MLBPC0);
++ DUMP_REG(REG_MS0);
++ DUMP_REG(REG_MS1);
++ DUMP_REG(REG_MSS);
++ DUMP_REG(REG_MSD);
++ DUMP_REG(REG_MIEN);
++ DUMP_REG(REG_MLBPC2);
++ DUMP_REG(REG_MLBPC1);
++ DUMP_REG(REG_MLBC1);
++ DUMP_REG(REG_HCTL);
++ DUMP_REG(REG_HCMR0);
++ DUMP_REG(REG_HCMR1);
++ DUMP_REG(REG_HCER0);
++ DUMP_REG(REG_HCER1);
++ DUMP_REG(REG_HCBR0);
++ DUMP_REG(REG_HCBR1);
++ DUMP_REG(REG_MDAT0);
++ DUMP_REG(REG_MDAT1);
++ DUMP_REG(REG_MDAT2);
++ DUMP_REG(REG_MDAT3);
++ DUMP_REG(REG_MDWE0);
++ DUMP_REG(REG_MDWE1);
++ DUMP_REG(REG_MDWE2);
++ DUMP_REG(REG_MDWE3);
++ DUMP_REG(REG_MCTL);
++ DUMP_REG(REG_MADR);
++ DUMP_REG(REG_ACTL);
++ DUMP_REG(REG_ACSR0);
++ DUMP_REG(REG_ACSR1);
++ DUMP_REG(REG_ACMR0);
++ DUMP_REG(REG_ACMR1);
++}
++
++static void mlb150_dev_dump_hex(const u8 *buf, u32 len)
++{
++ print_hex_dump(KERN_DEBUG, "CTR DUMP:",
++ DUMP_PREFIX_OFFSET, 8, 1, buf, len, 0);
++}
++#endif
++
++static inline void mlb150_dev_enable_ctr_write(u32 mdat0_bits_en,
++ u32 mdat1_bits_en, u32 mdat2_bits_en, u32 mdat3_bits_en)
++{
++ __raw_writel(mdat0_bits_en, mlb_base + REG_MDWE0);
++ __raw_writel(mdat1_bits_en, mlb_base + REG_MDWE1);
++ __raw_writel(mdat2_bits_en, mlb_base + REG_MDWE2);
++ __raw_writel(mdat3_bits_en, mlb_base + REG_MDWE3);
++}
++
++#ifdef DEBUG
++static inline u8 mlb150_dev_dbr_read(u32 dbr_addr)
++{
++ s32 timeout = 1000;
++ u8 dbr_val = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++ __raw_writel(MADR_TB | dbr_addr,
++ mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (0 == timeout) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ return -ETIME;
++ }
++
++ dbr_val = __raw_readl(mlb_base + REG_MDAT0) & 0x000000ff;
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++ return dbr_val;
++}
++
++static inline s32 mlb150_dev_dbr_write(u32 dbr_addr, u8 dbr_val)
++{
++ s32 timeout = 1000;
++ u32 mdat0 = dbr_val & 0x000000ff;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++ __raw_writel(mdat0, mlb_base + REG_MDAT0);
++
++ __raw_writel(MADR_WNR | MADR_TB | dbr_addr,
++ mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (timeout <= 0) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ return -ETIME;
++ }
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++ return 0;
++}
++
++static inline s32 mlb150_dev_dbr_dump(u32 addr, u32 size)
++{
++ u8 *dump_buf = NULL;
++ u8 *buf_ptr = NULL;
++ s32 i;
++
++ dump_buf = kzalloc(size, GFP_KERNEL);
++ if (!dump_buf) {
++ pr_err("can't allocate enough memory\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0, buf_ptr = dump_buf;
++ i < size; ++i, ++buf_ptr)
++ *buf_ptr = mlb150_dev_dbr_read(addr + i);
++
++ mlb150_dev_dump_hex(dump_buf, size);
++
++ kfree(dump_buf);
++
++ return 0;
++}
++#endif
++
++static s32 mlb150_dev_ctr_read(u32 ctr_offset, u32 *ctr_val)
++{
++ s32 timeout = 1000;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++ __raw_writel(ctr_offset, mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (timeout <= 0) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ pr_debug("mxc_mlb150: Read CTR timeout\n");
++ return -ETIME;
++ }
++
++ ctr_val[0] = __raw_readl(mlb_base + REG_MDAT0);
++ ctr_val[1] = __raw_readl(mlb_base + REG_MDAT1);
++ ctr_val[2] = __raw_readl(mlb_base + REG_MDAT2);
++ ctr_val[3] = __raw_readl(mlb_base + REG_MDAT3);
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++ return 0;
++}
++
++static s32 mlb150_dev_ctr_write(u32 ctr_offset, const u32 *ctr_val)
++{
++ s32 timeout = 1000;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++
++ __raw_writel(ctr_val[0], mlb_base + REG_MDAT0);
++ __raw_writel(ctr_val[1], mlb_base + REG_MDAT1);
++ __raw_writel(ctr_val[2], mlb_base + REG_MDAT2);
++ __raw_writel(ctr_val[3], mlb_base + REG_MDAT3);
++
++ __raw_writel(MADR_WNR | ctr_offset,
++ mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (timeout <= 0) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ pr_debug("mxc_mlb150: Write CTR timeout\n");
++ return -ETIME;
++ }
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++#ifdef DEBUG_CTR
++ {
++ u32 ctr_rd[4] = { 0 };
++
++ if (!mlb150_dev_ctr_read(ctr_offset, ctr_rd)) {
++ if (ctr_val[0] == ctr_rd[0] &&
++ ctr_val[1] == ctr_rd[1] &&
++ ctr_val[2] == ctr_rd[2] &&
++ ctr_val[3] == ctr_rd[3])
++ return 0;
++ else {
++ pr_debug("mxc_mlb150: ctr write failed\n");
++ pr_debug("offset: 0x%x\n", ctr_offset);
++ pr_debug("Write: 0x%x 0x%x 0x%x 0x%x\n",
++ ctr_val[3], ctr_val[2],
++ ctr_val[1], ctr_val[0]);
++ pr_debug("Read: 0x%x 0x%x 0x%x 0x%x\n",
++ ctr_rd[3], ctr_rd[2],
++ ctr_rd[1], ctr_rd[0]);
++ return -EBADE;
++ }
++ } else {
++ pr_debug("mxc_mlb150: ctr read failed\n");
++ return -EBADE;
++ }
++ }
++#endif
++
++ return 0;
++}
++
++#ifdef DEBUG
++static s32 mlb150_dev_cat_read(u32 ctr_offset, u32 ch, u16 *cat_val)
++{
++ u16 ctr_val[8] = { 0 };
++
++ if (mlb150_dev_ctr_read(ctr_offset, (u32 *)ctr_val))
++ return -ETIME;
++
++ /*
++ * Use u16 array to get u32 array value,
++ * need to convert
++ */
++ cat_val = ctr_val[ch % 8];
++
++ return 0;
++}
++#endif
++
++static s32 mlb150_dev_cat_write(u32 ctr_offset, u32 ch, const u16 cat_val)
++{
++ u16 ctr_val[8] = { 0 };
++
++ if (mlb150_dev_ctr_read(ctr_offset, (u32 *)ctr_val))
++ return -ETIME;
++
++ ctr_val[ch % 8] = cat_val;
++ if (mlb150_dev_ctr_write(ctr_offset, (u32 *)ctr_val))
++ return -ETIME;
++
++ return 0;
++}
++
++#define mlb150_dev_cat_mlb_read(ch, cat_val) \
++ mlb150_dev_cat_read(BUF_CAT_MLB_OFFSET + (ch >> 3), ch, cat_val)
++#define mlb150_dev_cat_mlb_write(ch, cat_val) \
++ mlb150_dev_cat_write(BUF_CAT_MLB_OFFSET + (ch >> 3), ch, cat_val)
++#define mlb150_dev_cat_hbi_read(ch, cat_val) \
++ mlb150_dev_cat_read(BUF_CAT_HBI_OFFSET + (ch >> 3), ch, cat_val)
++#define mlb150_dev_cat_hbi_write(ch, cat_val) \
++ mlb150_dev_cat_write(BUF_CAT_HBI_OFFSET + (ch >> 3), ch, cat_val)
++
++#define mlb150_dev_cdt_read(ch, cdt_val) \
++ mlb150_dev_ctr_read(BUF_CDT_OFFSET + ch, cdt_val)
++#define mlb150_dev_cdt_write(ch, cdt_val) \
++ mlb150_dev_ctr_write(BUF_CDT_OFFSET + ch, cdt_val)
++#define mlb150_dev_adt_read(ch, adt_val) \
++ mlb150_dev_ctr_read(BUF_ADT_OFFSET + ch, adt_val)
++#define mlb150_dev_adt_write(ch, adt_val) \
++ mlb150_dev_ctr_write(BUF_ADT_OFFSET + ch, adt_val)
++
++static s32 mlb150_dev_get_adt_sts(u32 ch)
++{
++ s32 timeout = 1000;
++ unsigned long flags;
++ u32 reg;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++ __raw_writel(BUF_ADT_OFFSET + ch,
++ mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (timeout <= 0) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ pr_debug("mxc_mlb150: Read CTR timeout\n");
++ return -ETIME;
++ }
++
++ reg = __raw_readl(mlb_base + REG_MDAT1);
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++#ifdef DEBUG_ADT
++ pr_debug("mxc_mlb150: Get ch %d adt sts: 0x%08x\n", ch, reg);
++#endif
++
++ return reg;
++}
++
++#ifdef DEBUG
++static void mlb150_dev_dump_ctr_tbl(u32 ch_start, u32 ch_end)
++{
++ u32 i = 0;
++ u32 ctr_val[4] = { 0 };
++
++ pr_debug("mxc_mlb150: CDT Table");
++ for (i = BUF_CDT_OFFSET + ch_start;
++ i < BUF_CDT_OFFSET + ch_end;
++ ++i) {
++ mlb150_dev_ctr_read(i, ctr_val);
++ pr_debug("CTR 0x%02x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i, ctr_val[3], ctr_val[2], ctr_val[1], ctr_val[0]);
++ }
++
++ pr_debug("mxc_mlb150: ADT Table");
++ for (i = BUF_ADT_OFFSET + ch_start;
++ i < BUF_ADT_OFFSET + ch_end;
++ ++i) {
++ mlb150_dev_ctr_read(i, ctr_val);
++ pr_debug("CTR 0x%02x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i, ctr_val[3], ctr_val[2], ctr_val[1], ctr_val[0]);
++ }
++
++ pr_debug("mxc_mlb150: CAT MLB Table");
++ for (i = BUF_CAT_MLB_OFFSET + (ch_start >> 3);
++ i <= BUF_CAT_MLB_OFFSET + ((ch_end + 8) >> 3);
++ ++i) {
++ mlb150_dev_ctr_read(i, ctr_val);
++ pr_debug("CTR 0x%02x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i, ctr_val[3], ctr_val[2], ctr_val[1], ctr_val[0]);
++ }
++
++ pr_debug("mxc_mlb150: CAT HBI Table");
++ for (i = BUF_CAT_HBI_OFFSET + (ch_start >> 3);
++ i <= BUF_CAT_HBI_OFFSET + ((ch_end + 8) >> 3);
++ ++i) {
++ mlb150_dev_ctr_read(i, ctr_val);
++ pr_debug("CTR 0x%02x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i, ctr_val[3], ctr_val[2], ctr_val[1], ctr_val[0]);
++ }
++}
++#endif
++
++/*
++ * Initial the MLB module device
++ */
++static inline void mlb150_dev_enable_dma_irq(u32 enable)
++{
++ u32 ch_rx_mask = (1 << SYNC_RX_CL_AHB0) | (1 << CTRL_RX_CL_AHB0)
++ | (1 << ASYNC_RX_CL_AHB0) | (1 << ISOC_RX_CL_AHB0)
++ | (1 << SYNC_TX_CL_AHB0) | (1 << CTRL_TX_CL_AHB0)
++ | (1 << ASYNC_TX_CL_AHB0) | (1 << ISOC_TX_CL_AHB0);
++ u32 ch_tx_mask = (1 << (SYNC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (CTRL_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ASYNC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ISOC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (SYNC_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (CTRL_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ASYNC_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ISOC_TX_CL_AHB1 - INT_AHB1_CH_START));
++
++ if (enable) {
++ __raw_writel(ch_rx_mask, mlb_base + REG_ACMR0);
++ __raw_writel(ch_tx_mask, mlb_base + REG_ACMR1);
++ } else {
++ __raw_writel(0x0, mlb_base + REG_ACMR0);
++ __raw_writel(0x0, mlb_base + REG_ACMR1);
++ }
++}
++
++
++static void mlb150_dev_init_ir_amba_ahb(void)
++{
++ u32 reg = 0;
++
++ /*
++ * Step 1. Program the ACMRn registers to enable interrupts from all
++ * active DMA channels
++ */
++ mlb150_dev_enable_dma_irq(1);
++
++ /*
++ * Step 2. Select the status clear method:
++ * ACTL.SCE = 0, hardware clears on read
++ * ACTL.SCE = 1, software writes a '1' to clear
++ * We only support DMA MODE 1
++ */
++ reg = __raw_readl(mlb_base + REG_ACTL);
++ reg |= ACTL_DMAMODE;
++#ifdef MULTIPLE_PACKAGE_MODE
++ reg |= REG_ACTL_MPB;
++#endif
++
++ /*
++ * Step 3. Select 1 or 2 interrupt signals:
++ * ACTL.SMX = 0: one interrupt for channels 0 - 31 on ahb_init[0]
++ * and another interrupt for channels 32 - 63 on ahb_init[1]
++ * ACTL.SMX = 1: singel interrupt all channels on ahb_init[0]
++ */
++ reg &= ~ACTL_SMX;
++
++ __raw_writel(reg, mlb_base + REG_ACTL);
++}
++
++static inline void mlb150_dev_enable_ir_mlb(u32 enable)
++{
++ /*
++ * Step 1, Select the MSn to be cleared by software,
++ * writing a '0' to the appropriate bits
++ */
++ __raw_writel(0, mlb_base + REG_MS0);
++ __raw_writel(0, mlb_base + REG_MS1);
++
++ /*
++ * Step 1, Program MIEN to enable protocol error
++ * interrupts for all active MLB channels
++ */
++ if (enable)
++ __raw_writel(MIEN_CTX_PE |
++ MIEN_CRX_PE | MIEN_ATX_PE |
++ MIEN_ARX_PE | MIEN_SYNC_PE |
++ MIEN_ISOC_PE,
++ mlb_base + REG_MIEN);
++ else
++ __raw_writel(0, mlb_base + REG_MIEN);
++}
++
++static inline void mlb150_enable_pll(struct mlb_data *drvdata)
++{
++ u32 c0_val;
++
++ __raw_writel(MLBPC1_VAL,
++ drvdata->membase + REG_MLBPC1);
++
++ c0_val = __raw_readl(drvdata->membase + REG_MLBC0);
++ if (c0_val & MLBC0_MLBPEN) {
++ c0_val &= ~MLBC0_MLBPEN;
++ __raw_writel(c0_val,
++ drvdata->membase + REG_MLBC0);
++ }
++
++ clk_prepare_enable(drvdata->clk_mlb6p);
++
++ c0_val |= (MLBC0_MLBPEN);
++ __raw_writel(c0_val, drvdata->membase + REG_MLBC0);
++}
++
++static inline void mlb150_disable_pll(struct mlb_data *drvdata)
++{
++ u32 c0_val;
++
++ clk_disable_unprepare(drvdata->clk_mlb6p);
++
++ c0_val = __raw_readl(drvdata->membase + REG_MLBC0);
++
++ __raw_writel(0x0, drvdata->membase + REG_MLBPC1);
++
++ c0_val &= ~MLBC0_MLBPEN;
++ __raw_writel(c0_val, drvdata->membase + REG_MLBC0);
++}
++
++static void mlb150_dev_reset_cdt(void)
++{
++ int i = 0;
++ u32 ctr_val[4] = { 0 };
++
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++
++ for (i = 0; i < (LOGIC_CH_NUM); ++i)
++ mlb150_dev_ctr_write(BUF_CDT_OFFSET + i, ctr_val);
++}
++
++static s32 mlb150_dev_init_ch_cdt(struct mlb_dev_info *pdevinfo, u32 ch,
++ enum MLB_CTYPE ctype, u32 ch_func)
++{
++ u32 cdt_val[4] = { 0 };
++
++ /* a. Set the 14-bit base address (BA) */
++ pr_debug("mxc_mlb150: ctype: %d, ch: %d, dbr_buf_head: 0x%08x",
++ ctype, ch, pdevinfo->channels[ch_func].dbr_buf_head);
++ cdt_val[3] = (pdevinfo->channels[ch_func].dbr_buf_head)
++ << CDT_BA_SHIFT;
++ /*
++ * b. Set the 12-bit or 13-bit buffer depth (BD)
++ * BD = buffer depth in bytes - 1
++ * For synchronous channels: (BD + 1) = 4 * m * bpf
++ * For control channels: (BD + 1) >= max packet length (64)
++ * For asynchronous channels: (BD + 1) >= max packet length
++ * 1024 for a MOST Data packet (MDP);
++ * 1536 for a MOST Ethernet Packet (MEP)
++ * For isochronous channels: (BD + 1) mod (BS + 1) = 0
++ * BS
++ */
++ if (MLB_CTYPE_ISOC == ctype)
++ cdt_val[1] |= (pdevinfo->isoc_blksz - 1);
++ /* BD */
++ cdt_val[3] |= (pdevinfo->cdt_buf_dep - 1) << CDT_BD_SHIFT;
++
++ pr_debug("mxc_mlb150: Set CDT val of channel %d, type: %d: "
++ "0x%08x 0x%08x 0x%08x 0x%08x\n",
++ ch, ctype, cdt_val[3], cdt_val[2], cdt_val[1], cdt_val[0]);
++
++ if (mlb150_dev_cdt_write(ch, cdt_val))
++ return -ETIME;
++
++#ifdef DEBUG_CTR
++ {
++ u32 cdt_rd[4] = { 0 };
++ if (!mlb150_dev_cdt_read(ch, cdt_rd)) {
++ pr_debug("mxc_mlb150: CDT val of channel %d: "
++ "0x%08x 0x%08x 0x%08x 0x%08x\n",
++ ch, cdt_rd[3], cdt_rd[2], cdt_rd[1], cdt_rd[0]);
++ if (cdt_rd[3] == cdt_val[3] &&
++ cdt_rd[2] == cdt_val[2] &&
++ cdt_rd[1] == cdt_val[1] &&
++ cdt_rd[0] == cdt_val[0]) {
++ pr_debug("mxc_mlb150: set cdt succeed!\n");
++ return 0;
++ } else {
++ pr_debug("mxc_mlb150: set cdt failed!\n");
++ return -EBADE;
++ }
++ } else {
++ pr_debug("mxc_mlb150: Read CDT val of channel %d failed\n",
++ ch);
++ return -EBADE;
++ }
++ }
++#endif
++
++ return 0;
++}
++
++static s32 mlb150_dev_init_ch_cat(u32 ch, u32 cl,
++ u32 cat_mode, enum MLB_CTYPE ctype)
++{
++ u16 cat_val = 0;
++#ifdef DEBUG_CTR
++ u16 cat_rd = 0;
++#endif
++
++ cat_val = CAT_CE | (ctype << CAT_CT_SHIFT) | cl;
++
++ if (cat_mode & CAT_MODE_OUTBOUND_DMA)
++ cat_val |= CAT_RNW;
++
++ if (MLB_CTYPE_SYNC == ctype)
++ cat_val |= CAT_MT;
++
++ switch (cat_mode) {
++ case CAT_MODE_RX | CAT_MODE_INBOUND_DMA:
++ case CAT_MODE_TX | CAT_MODE_OUTBOUND_DMA:
++ pr_debug("mxc_mlb150: set CAT val of channel %d, type: %d: 0x%04x\n",
++ ch, ctype, cat_val);
++
++ if (mlb150_dev_cat_mlb_write(ch, cat_val))
++ return -ETIME;
++#ifdef DEBUG_CTR
++ if (!mlb150_dev_cat_mlb_read(ch, &cat_rd))
++ pr_debug("mxc_mlb150: CAT val of mlb channel %d: 0x%04x",
++ ch, cat_rd);
++ else {
++ pr_debug("mxc_mlb150: Read CAT of mlb channel %d failed\n",
++ ch);
++ return -EBADE;
++ }
++#endif
++ break;
++ case CAT_MODE_TX | CAT_MODE_INBOUND_DMA:
++ case CAT_MODE_RX | CAT_MODE_OUTBOUND_DMA:
++ pr_debug("mxc_mlb150: set CAT val of channel %d, type: %d: 0x%04x\n",
++ cl, ctype, cat_val);
++
++ if (mlb150_dev_cat_hbi_write(cl, cat_val))
++ return -ETIME;
++#ifdef DEBUG_CTR
++ if (!mlb150_dev_cat_hbi_read(cl, &cat_rd))
++ pr_debug("mxc_mlb150: CAT val of hbi channel %d: 0x%04x",
++ cl, cat_rd);
++ else {
++ pr_debug("mxc_mlb150: Read CAT of hbi channel %d failed\n",
++ cl);
++ return -EBADE;
++ }
++#endif
++ break;
++ default:
++ return EBADRQC;
++ }
++
++#ifdef DEBUG_CTR
++ {
++ if (cat_val == cat_rd) {
++ pr_debug("mxc_mlb150: set cat succeed!\n");
++ return 0;
++ } else {
++ pr_debug("mxc_mlb150: set cat failed!\n");
++ return -EBADE;
++ }
++ }
++#endif
++ return 0;
++}
++
++static void mlb150_dev_reset_cat(void)
++{
++ int i = 0;
++ u32 ctr_val[4] = { 0 };
++
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++
++ for (i = 0; i < (LOGIC_CH_NUM >> 3); ++i) {
++ mlb150_dev_ctr_write(BUF_CAT_MLB_OFFSET + i, ctr_val);
++ mlb150_dev_ctr_write(BUF_CAT_HBI_OFFSET + i, ctr_val);
++ }
++}
++
++static void mlb150_dev_init_rfb(struct mlb_dev_info *pdevinfo, u32 rx_ch,
++ u32 tx_ch, enum MLB_CTYPE ctype)
++{
++ u32 rx_cl = pdevinfo->channels[RX_CHANNEL].cl;
++ u32 tx_cl = pdevinfo->channels[TX_CHANNEL].cl;
++ /* Step 1, Initialize all bits of CAT to '0' */
++ mlb150_dev_reset_cat();
++ mlb150_dev_reset_cdt();
++ /*
++ * Step 2, Initialize logical channel
++ * Step 3, Program the CDT for channel N
++ */
++ mlb150_dev_init_ch_cdt(pdevinfo, rx_cl, ctype, RX_CHANNEL);
++ mlb150_dev_init_ch_cdt(pdevinfo, tx_cl, ctype, TX_CHANNEL);
++
++ /* Step 4&5, Program the CAT for the inbound and outbound DMA */
++ mlb150_dev_init_ch_cat(rx_ch, rx_cl,
++ CAT_MODE_RX | CAT_MODE_INBOUND_DMA,
++ ctype);
++ mlb150_dev_init_ch_cat(rx_ch, rx_cl,
++ CAT_MODE_RX | CAT_MODE_OUTBOUND_DMA,
++ ctype);
++ mlb150_dev_init_ch_cat(tx_ch, tx_cl,
++ CAT_MODE_TX | CAT_MODE_INBOUND_DMA,
++ ctype);
++ mlb150_dev_init_ch_cat(tx_ch, tx_cl,
++ CAT_MODE_TX | CAT_MODE_OUTBOUND_DMA,
++ ctype);
++}
++
++static void mlb150_dev_reset_adt(void)
++{
++ int i = 0;
++ u32 ctr_val[4] = { 0 };
++
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++
++ for (i = 0; i < (LOGIC_CH_NUM); ++i)
++ mlb150_dev_ctr_write(BUF_ADT_OFFSET + i, ctr_val);
++}
++
++static void mlb150_dev_reset_whole_ctr(void)
++{
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++ mlb150_dev_reset_cdt();
++ mlb150_dev_reset_adt();
++ mlb150_dev_reset_cat();
++}
++
++#define CLR_REG(reg) __raw_writel(0x0, mlb_base + reg)
++
++static void mlb150_dev_reset_all_regs(void)
++{
++ CLR_REG(REG_MLBC0);
++ CLR_REG(REG_MLBPC0);
++ CLR_REG(REG_MS0);
++ CLR_REG(REG_MS1);
++ CLR_REG(REG_MSS);
++ CLR_REG(REG_MSD);
++ CLR_REG(REG_MIEN);
++ CLR_REG(REG_MLBPC2);
++ CLR_REG(REG_MLBPC1);
++ CLR_REG(REG_MLBC1);
++ CLR_REG(REG_HCTL);
++ CLR_REG(REG_HCMR0);
++ CLR_REG(REG_HCMR1);
++ CLR_REG(REG_HCER0);
++ CLR_REG(REG_HCER1);
++ CLR_REG(REG_HCBR0);
++ CLR_REG(REG_HCBR1);
++ CLR_REG(REG_MDAT0);
++ CLR_REG(REG_MDAT1);
++ CLR_REG(REG_MDAT2);
++ CLR_REG(REG_MDAT3);
++ CLR_REG(REG_MDWE0);
++ CLR_REG(REG_MDWE1);
++ CLR_REG(REG_MDWE2);
++ CLR_REG(REG_MDWE3);
++ CLR_REG(REG_MCTL);
++ CLR_REG(REG_MADR);
++ CLR_REG(REG_ACTL);
++ CLR_REG(REG_ACSR0);
++ CLR_REG(REG_ACSR1);
++ CLR_REG(REG_ACMR0);
++ CLR_REG(REG_ACMR1);
++}
++
++static inline s32 mlb150_dev_pipo_start(struct mlb_ringbuf *rbuf,
++ u32 ahb_ch, u32 buf_addr)
++{
++ u32 ctr_val[4] = { 0 };
++
++ ctr_val[1] |= ADT_RDY1;
++ ctr_val[2] = buf_addr;
++
++ if (mlb150_dev_adt_write(ahb_ch, ctr_val))
++ return -ETIME;
++
++ return 0;
++}
++
++static inline s32 mlb150_dev_pipo_next(u32 ahb_ch, enum MLB_CTYPE ctype,
++ u32 dne_sts, u32 buf_addr)
++{
++ u32 ctr_val[4] = { 0 };
++
++ if (MLB_CTYPE_ASYNC == ctype ||
++ MLB_CTYPE_CTRL == ctype) {
++ ctr_val[1] |= ADT_PS1;
++ ctr_val[1] |= ADT_PS2;
++ }
++
++ /*
++ * Clear DNE1 and ERR1
++ * Set the page ready bit (RDY1)
++ */
++ if (dne_sts & ADT_DNE1) {
++ ctr_val[1] |= ADT_RDY2;
++ ctr_val[3] = buf_addr;
++ } else {
++ ctr_val[1] |= ADT_RDY1;
++ ctr_val[2] = buf_addr;
++ }
++
++ if (mlb150_dev_adt_write(ahb_ch, ctr_val))
++ return -ETIME;
++
++ return 0;
++}
++
++static inline s32 mlb150_dev_pipo_stop(struct mlb_ringbuf *rbuf, u32 ahb_ch)
++{
++ u32 ctr_val[4] = { 0 };
++ unsigned long flags;
++
++ write_lock_irqsave(&rbuf->rb_lock, flags);
++ rbuf->head = rbuf->tail = 0;
++ write_unlock_irqrestore(&rbuf->rb_lock, flags);
++
++ if (mlb150_dev_adt_write(ahb_ch, ctr_val))
++ return -ETIME;
++
++ return 0;
++}
++
++static s32 mlb150_dev_init_ch_amba_ahb(struct mlb_dev_info *pdevinfo,
++ struct mlb_channel_info *chinfo,
++ enum MLB_CTYPE ctype)
++{
++ u32 ctr_val[4] = { 0 };
++
++ /* a. Set the 32-bit base address (BA1) */
++ ctr_val[3] = 0;
++ ctr_val[2] = 0;
++ ctr_val[1] = (pdevinfo->adt_buf_dep - 1) << ADT_BD1_SHIFT;
++ ctr_val[1] |= (pdevinfo->adt_buf_dep - 1) << ADT_BD2_SHIFT;
++ if (MLB_CTYPE_ASYNC == ctype ||
++ MLB_CTYPE_CTRL == ctype) {
++ ctr_val[1] |= ADT_PS1;
++ ctr_val[1] |= ADT_PS2;
++ }
++
++ ctr_val[0] |= (ADT_LE | ADT_CE);
++
++ pr_debug("mxc_mlb150: Set ADT val of channel %d, ctype: %d: "
++ "0x%08x 0x%08x 0x%08x 0x%08x\n",
++ chinfo->cl, ctype, ctr_val[3], ctr_val[2],
++ ctr_val[1], ctr_val[0]);
++
++ if (mlb150_dev_adt_write(chinfo->cl, ctr_val))
++ return -ETIME;
++
++#ifdef DEBUG_CTR
++ {
++ u32 ctr_rd[4] = { 0 };
++ if (!mlb150_dev_adt_read(chinfo->cl, ctr_rd)) {
++ pr_debug("mxc_mlb150: ADT val of channel %d: "
++ "0x%08x 0x%08x 0x%08x 0x%08x\n",
++ chinfo->cl, ctr_rd[3], ctr_rd[2],
++ ctr_rd[1], ctr_rd[0]);
++ if (ctr_rd[3] == ctr_val[3] &&
++ ctr_rd[2] == ctr_val[2] &&
++ ctr_rd[1] == ctr_val[1] &&
++ ctr_rd[0] == ctr_val[0]) {
++ pr_debug("mxc_mlb150: set adt succeed!\n");
++ return 0;
++ } else {
++ pr_debug("mxc_mlb150: set adt failed!\n");
++ return -EBADE;
++ }
++ } else {
++ pr_debug("mxc_mlb150: Read ADT val of channel %d failed\n",
++ chinfo->cl);
++ return -EBADE;
++ }
++ }
++#endif
++
++ return 0;
++}
++
++static void mlb150_dev_init_amba_ahb(struct mlb_dev_info *pdevinfo,
++ enum MLB_CTYPE ctype)
++{
++ struct mlb_channel_info *tx_chinfo = &pdevinfo->channels[TX_CHANNEL];
++ struct mlb_channel_info *rx_chinfo = &pdevinfo->channels[RX_CHANNEL];
++
++ /* Step 1, Initialize all bits of the ADT to '0' */
++ mlb150_dev_reset_adt();
++
++ /*
++ * Step 2, Select a logic channel
++ * Step 3, Program the AMBA AHB block ping page for channel N
++ * Step 4, Program the AMBA AHB block pong page for channel N
++ */
++ mlb150_dev_init_ch_amba_ahb(pdevinfo, rx_chinfo, ctype);
++ mlb150_dev_init_ch_amba_ahb(pdevinfo, tx_chinfo, ctype);
++}
++
++static void mlb150_dev_exit(void)
++{
++ u32 c0_val, hctl_val;
++
++ /* Disable EN bits */
++ c0_val = __raw_readl(mlb_base + REG_MLBC0);
++ c0_val &= ~(MLBC0_MLBEN | MLBC0_MLBPEN);
++ __raw_writel(c0_val, mlb_base + REG_MLBC0);
++
++ hctl_val = __raw_readl(mlb_base + REG_HCTL);
++ hctl_val &= ~HCTL_EN;
++ __raw_writel(hctl_val, mlb_base + REG_HCTL);
++
++ __raw_writel(0x0, mlb_base + REG_HCMR0);
++ __raw_writel(0x0, mlb_base + REG_HCMR1);
++
++ mlb150_dev_enable_dma_irq(0);
++ mlb150_dev_enable_ir_mlb(0);
++}
++
++static void mlb150_dev_init(void)
++{
++ u32 c0_val;
++ u32 ch_rx_mask = (1 << SYNC_RX_CL_AHB0) | (1 << CTRL_RX_CL_AHB0)
++ | (1 << ASYNC_RX_CL_AHB0) | (1 << ISOC_RX_CL_AHB0)
++ | (1 << SYNC_TX_CL_AHB0) | (1 << CTRL_TX_CL_AHB0)
++ | (1 << ASYNC_TX_CL_AHB0) | (1 << ISOC_TX_CL_AHB0);
++ u32 ch_tx_mask = (1 << (SYNC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (CTRL_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ASYNC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ISOC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (SYNC_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (CTRL_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ASYNC_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ISOC_TX_CL_AHB1 - INT_AHB1_CH_START));
++
++ /* Disable EN bits */
++ mlb150_dev_exit();
++
++ /*
++ * Step 1. Initialize CTR and registers
++ * a. Set all bit of the CTR (CAT, CDT, and ADT) to 0.
++ */
++ mlb150_dev_reset_whole_ctr();
++
++ /* a. Set all bit of the CTR (CAT, CDT, and ADT) to 0. */
++ mlb150_dev_reset_all_regs();
++
++ /*
++ * Step 2, Configure the MediaLB interface
++ * Select pin mode and clock, 3-pin and 256fs
++ */
++ c0_val = __raw_readl(mlb_base + REG_MLBC0);
++ c0_val &= ~(MLBC0_MLBPEN | MLBC0_MLBCLK_MASK);
++ __raw_writel(c0_val, mlb_base + REG_MLBC0);
++
++ c0_val |= MLBC0_MLBEN;
++ __raw_writel(c0_val, mlb_base + REG_MLBC0);
++
++ /* Step 3, Configure the HBI interface */
++ __raw_writel(ch_rx_mask, mlb_base + REG_HCMR0);
++ __raw_writel(ch_tx_mask, mlb_base + REG_HCMR1);
++ __raw_writel(HCTL_EN, mlb_base + REG_HCTL);
++
++ mlb150_dev_init_ir_amba_ahb();
++
++ mlb150_dev_enable_ir_mlb(1);
++}
++
++static s32 mlb150_dev_unmute_syn_ch(u32 rx_ch, u32 rx_cl, u32 tx_ch, u32 tx_cl)
++{
++ u32 timeout = 10000;
++
++ /*
++ * Check that MediaLB clock is running (MLBC1.CLKM = 0)
++ * If MLBC1.CLKM = 1, clear the register bit, wait one
++ * APB or I/O clock cycle and repeat the check
++ */
++ while ((__raw_readl(mlb_base + REG_MLBC1) & MLBC1_CLKM)
++ && --timeout)
++ __raw_writel(~MLBC1_CLKM, mlb_base + REG_MLBC1);
++
++ if (0 == timeout)
++ return -ETIME;
++
++ timeout = 10000;
++ /* Poll for MLB lock (MLBC0.MLBLK = 1) */
++ while (!(__raw_readl(mlb_base + REG_MLBC0) & MLBC0_MLBLK)
++ && --timeout)
++ ;
++
++ if (0 == timeout)
++ return -ETIME;
++
++ /* Unmute synchronous channel(s) */
++ mlb150_dev_cat_mlb_write(rx_ch, CAT_CE | rx_cl);
++ mlb150_dev_cat_mlb_write(tx_ch,
++ CAT_CE | tx_cl | CAT_RNW);
++ mlb150_dev_cat_hbi_write(rx_cl,
++ CAT_CE | rx_cl | CAT_RNW);
++ mlb150_dev_cat_hbi_write(tx_cl, CAT_CE | tx_cl);
++
++ return 0;
++}
++
++/* In case the user calls channel shutdown, but rx or tx is not completed yet */
++static s32 mlb150_trans_complete_check(struct mlb_dev_info *pdevinfo)
++{
++ struct mlb_ringbuf *rx_rbuf = &pdevinfo->rx_rbuf;
++ struct mlb_ringbuf *tx_rbuf = &pdevinfo->tx_rbuf;
++ s32 timeout = 1024;
++
++ while (timeout--) {
++ read_lock(&tx_rbuf->rb_lock);
++ if (!CIRC_CNT(tx_rbuf->head, tx_rbuf->tail, TRANS_RING_NODES)) {
++ read_unlock(&tx_rbuf->rb_lock);
++ break;
++ } else
++ read_unlock(&tx_rbuf->rb_lock);
++ }
++
++ if (timeout <= 0) {
++ pr_debug("TX complete check timeout!\n");
++ return -ETIME;
++ }
++
++ timeout = 1024;
++ while (timeout--) {
++ read_lock(&rx_rbuf->rb_lock);
++ if (!CIRC_CNT(rx_rbuf->head, rx_rbuf->tail, TRANS_RING_NODES)) {
++ read_unlock(&rx_rbuf->rb_lock);
++ break;
++ } else
++ read_unlock(&rx_rbuf->rb_lock);
++ }
++
++ if (timeout <= 0) {
++ pr_debug("RX complete check timeout!\n");
++ return -ETIME;
++ }
++
++ /*
++ * Interrupt from TX can only inform that the data is sent
++ * to AHB bus, not mean that it is sent to MITB. Thus we add
++ * a delay here for data to be completed sent.
++ */
++ udelay(1000);
++
++ return 0;
++}
++
++/*
++ * Enable/Disable the MLB IRQ
++ */
++static void mxc_mlb150_irq_enable(struct mlb_data *drvdata, u8 enable)
++{
++ if (enable) {
++ enable_irq(drvdata->irq_ahb0);
++ enable_irq(drvdata->irq_ahb1);
++ enable_irq(drvdata->irq_mlb);
++ } else {
++ disable_irq(drvdata->irq_ahb0);
++ disable_irq(drvdata->irq_ahb1);
++ disable_irq(drvdata->irq_mlb);
++ }
++}
++
++/*
++ * Enable the MLB channel
++ */
++static s32 mlb_channel_enable(struct mlb_data *drvdata,
++ int chan_dev_id, int on)
++{
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ struct mlb_channel_info *tx_chinfo = &pdevinfo->channels[TX_CHANNEL];
++ struct mlb_channel_info *rx_chinfo = &pdevinfo->channels[RX_CHANNEL];
++ u32 tx_ch = tx_chinfo->address;
++ u32 rx_ch = rx_chinfo->address;
++ u32 tx_cl = tx_chinfo->cl;
++ u32 rx_cl = rx_chinfo->cl;
++ s32 ret = 0;
++
++ /*
++ * setup the direction, enable, channel type,
++ * mode select, channel address and mask buf start
++ */
++ if (on) {
++ u32 ctype = pdevinfo->channel_type;
++
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++ mlb150_dev_init_rfb(pdevinfo, rx_ch, tx_ch, ctype);
++
++ mlb150_dev_init_amba_ahb(pdevinfo, ctype);
++
++#ifdef DEBUG
++ mlb150_dev_dump_ctr_tbl(0, tx_chinfo->cl + 1);
++#endif
++ /* Synchronize and unmute synchrouous channel */
++ if (MLB_CTYPE_SYNC == ctype) {
++ ret = mlb150_dev_unmute_syn_ch(rx_ch, rx_cl,
++ tx_ch, tx_cl);
++ if (ret)
++ return ret;
++ }
++
++ mlb150_dev_enable_ctr_write(0x0, ADT_RDY1 | ADT_DNE1 |
++ ADT_ERR1 | ADT_PS1 |
++ ADT_RDY2 | ADT_DNE2 | ADT_ERR2 | ADT_PS2,
++ 0xffffffff, 0xffffffff);
++
++ if (pdevinfo->fps >= CLK_2048FS)
++ mlb150_enable_pll(drvdata);
++
++ atomic_set(&pdevinfo->on, 1);
++
++#ifdef DEBUG
++ mlb150_dev_dump_reg();
++ mlb150_dev_dump_ctr_tbl(0, tx_chinfo->cl + 1);
++#endif
++ /* Init RX ADT */
++ mlb150_dev_pipo_start(&pdevinfo->rx_rbuf, rx_cl,
++ pdevinfo->rx_rbuf.phy_addrs[0]);
++ } else {
++ mlb150_dev_pipo_stop(&pdevinfo->rx_rbuf, rx_cl);
++
++ mlb150_dev_enable_dma_irq(0);
++ mlb150_dev_enable_ir_mlb(0);
++
++ mlb150_dev_reset_cat();
++
++ atomic_set(&pdevinfo->on, 0);
++
++ if (pdevinfo->fps >= CLK_2048FS)
++ mlb150_disable_pll(drvdata);
++ }
++
++ return 0;
++}
++
++/*
++ * MLB interrupt handler
++ */
++static void mlb_rx_isr(s32 ctype, u32 ahb_ch, struct mlb_dev_info *pdevinfo)
++{
++ struct mlb_ringbuf *rx_rbuf = &pdevinfo->rx_rbuf;
++ s32 head, tail, adt_sts;
++ u32 rx_buf_ptr;
++
++#ifdef DEBUG_RX
++ pr_debug("mxc_mlb150: mlb_rx_isr\n");
++#endif
++
++ read_lock(&rx_rbuf->rb_lock);
++
++ head = (rx_rbuf->head + 1) & (TRANS_RING_NODES - 1);
++ tail = ACCESS_ONCE(rx_rbuf->tail);
++ read_unlock(&rx_rbuf->rb_lock);
++
++ if (CIRC_SPACE(head, tail, TRANS_RING_NODES) >= 1) {
++ rx_buf_ptr = rx_rbuf->phy_addrs[head];
++
++ /* commit the item before incrementing the head */
++ smp_wmb();
++
++ write_lock(&rx_rbuf->rb_lock);
++ rx_rbuf->head = head;
++ write_unlock(&rx_rbuf->rb_lock);
++
++ /* wake up the reader */
++ wake_up_interruptible(&pdevinfo->rx_wq);
++ } else {
++ rx_buf_ptr = rx_rbuf->phy_addrs[head];
++ pr_debug("drop RX package, due to no space, (%d,%d)\n",
++ head, tail);
++ }
++
++ adt_sts = mlb150_dev_get_adt_sts(ahb_ch);
++ /* Set ADT for RX */
++ mlb150_dev_pipo_next(ahb_ch, ctype, adt_sts, rx_buf_ptr);
++}
++
++static void mlb_tx_isr(s32 ctype, u32 ahb_ch, struct mlb_dev_info *pdevinfo)
++{
++ struct mlb_ringbuf *tx_rbuf = &pdevinfo->tx_rbuf;
++ s32 head, tail, adt_sts;
++ u32 tx_buf_ptr;
++
++ read_lock(&tx_rbuf->rb_lock);
++
++ head = ACCESS_ONCE(tx_rbuf->head);
++ tail = (tx_rbuf->tail + 1) & (TRANS_RING_NODES - 1);
++ read_unlock(&tx_rbuf->rb_lock);
++
++ smp_mb();
++ write_lock(&tx_rbuf->rb_lock);
++ tx_rbuf->tail = tail;
++ write_unlock(&tx_rbuf->rb_lock);
++
++ /* check the current tx buffer is available or not */
++ if (CIRC_CNT(head, tail, TRANS_RING_NODES) >= 1) {
++ /* read index before reading contents at that index */
++ smp_read_barrier_depends();
++
++ tx_buf_ptr = tx_rbuf->phy_addrs[tail];
++
++ wake_up_interruptible(&pdevinfo->tx_wq);
++
++ adt_sts = mlb150_dev_get_adt_sts(ahb_ch);
++ /* Set ADT for TX */
++ mlb150_dev_pipo_next(ahb_ch, ctype, adt_sts, tx_buf_ptr);
++ }
++}
++
++static irqreturn_t mlb_ahb_isr(int irq, void *dev_id)
++{
++ u32 acsr0, hcer0;
++ u32 ch_mask = (1 << SYNC_RX_CL) | (1 << CTRL_RX_CL)
++ | (1 << ASYNC_RX_CL) | (1 << ISOC_RX_CL)
++ | (1 << SYNC_TX_CL) | (1 << CTRL_TX_CL)
++ | (1 << ASYNC_TX_CL) | (1 << ISOC_TX_CL);
++
++ /*
++ * Step 5, Read the ACSRn registers to determine which channel or
++ * channels are causing the interrupt
++ */
++ acsr0 = __raw_readl(mlb_base + REG_ACSR0);
++
++ hcer0 = __raw_readl(mlb_base + REG_HCER0);
++
++ /*
++ * Step 6, If ACTL.SCE = 1, write the result of step 5 back to ACSR0
++ * and ACSR1 to clear the interrupt
++ * We'll not set ACTL_SCE
++ */
++
++ if (ch_mask & hcer0)
++ pr_err("CH encounters an AHB error: 0x%x\n", hcer0);
++
++ if ((1 << SYNC_RX_CL) & acsr0)
++ mlb_rx_isr(MLB_CTYPE_SYNC, SYNC_RX_CL,
++ &mlb_devinfo[MLB_CTYPE_SYNC]);
++
++ if ((1 << CTRL_RX_CL) & acsr0)
++ mlb_rx_isr(MLB_CTYPE_CTRL, CTRL_RX_CL,
++ &mlb_devinfo[MLB_CTYPE_CTRL]);
++
++ if ((1 << ASYNC_RX_CL) & acsr0)
++ mlb_rx_isr(MLB_CTYPE_ASYNC, ASYNC_RX_CL,
++ &mlb_devinfo[MLB_CTYPE_ASYNC]);
++
++ if ((1 << ISOC_RX_CL) & acsr0)
++ mlb_rx_isr(MLB_CTYPE_ISOC, ISOC_RX_CL,
++ &mlb_devinfo[MLB_CTYPE_ISOC]);
++
++ if ((1 << SYNC_TX_CL) & acsr0)
++ mlb_tx_isr(MLB_CTYPE_SYNC, SYNC_TX_CL,
++ &mlb_devinfo[MLB_CTYPE_SYNC]);
++
++ if ((1 << CTRL_TX_CL) & acsr0)
++ mlb_tx_isr(MLB_CTYPE_CTRL, CTRL_TX_CL,
++ &mlb_devinfo[MLB_CTYPE_CTRL]);
++
++ if ((1 << ASYNC_TX_CL) & acsr0)
++ mlb_tx_isr(MLB_CTYPE_ASYNC, ASYNC_TX_CL,
++ &mlb_devinfo[MLB_CTYPE_ASYNC]);
++
++ if ((1 << ISOC_TX_CL) & acsr0)
++ mlb_tx_isr(MLB_CTYPE_ASYNC, ISOC_TX_CL,
++ &mlb_devinfo[MLB_CTYPE_ISOC]);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t mlb_isr(int irq, void *dev_id)
++{
++ u32 rx_int_sts, tx_int_sts, ms0,
++ ms1, tx_cis, rx_cis, ctype;
++ int minor;
++ u32 cdt_val[4] = { 0 };
++
++ /*
++ * Step 4, Read the MSn register to determine which channel(s)
++ * are causing the interrupt
++ */
++ ms0 = __raw_readl(mlb_base + REG_MS0);
++ ms1 = __raw_readl(mlb_base + REG_MS1);
++
++ /*
++ * The MLB150_MS0, MLB150_MS1 registers need to be cleared. In
++ * the spec description, the registers should be cleared when
++ * enabling interrupt. In fact, we also should clear it in ISR.
++ */
++ __raw_writel(0, mlb_base + REG_MS0);
++ __raw_writel(0, mlb_base + REG_MS1);
++
++ pr_debug("mxc_mlb150: mlb interrupt:0x%08x 0x%08x\n",
++ (u32)ms0, (u32)ms1);
++
++ for (minor = 0; minor < MLB_MINOR_DEVICES; minor++) {
++ struct mlb_dev_info *pdevinfo = &mlb_devinfo[minor];
++ u32 rx_mlb_ch = pdevinfo->channels[RX_CHANNEL].address;
++ u32 tx_mlb_ch = pdevinfo->channels[TX_CHANNEL].address;
++ u32 rx_mlb_cl = pdevinfo->channels[RX_CHANNEL].cl;
++ u32 tx_mlb_cl = pdevinfo->channels[TX_CHANNEL].cl;
++
++ tx_cis = rx_cis = 0;
++
++ ctype = pdevinfo->channel_type;
++ rx_int_sts = (rx_mlb_ch < 31) ? ms0 : ms1;
++ tx_int_sts = (tx_mlb_ch < 31) ? ms0 : ms1;
++
++ pr_debug("mxc_mlb150: channel interrupt: "
++ "tx %d: 0x%08x, rx %d: 0x%08x\n",
++ tx_mlb_ch, (u32)tx_int_sts, rx_mlb_ch, (u32)rx_int_sts);
++
++ /* Get tx channel interrupt status */
++ if (tx_int_sts & (1 << (tx_mlb_ch % 32))) {
++ mlb150_dev_cdt_read(tx_mlb_cl, cdt_val);
++ pr_debug("mxc_mlb150: TX_CH: %d, cdt_val[3]: 0x%08x, "
++ "cdt_val[2]: 0x%08x, "
++ "cdt_val[1]: 0x%08x, "
++ "cdt_val[0]: 0x%08x\n",
++ tx_mlb_ch, cdt_val[3], cdt_val[2],
++ cdt_val[1], cdt_val[0]);
++ switch (ctype) {
++ case MLB_CTYPE_SYNC:
++ tx_cis = (cdt_val[2] & ~CDT_SYNC_WSTS_MASK)
++ >> CDT_SYNC_WSTS_SHIFT;
++ /*
++ * Clear RSTS/WSTS errors to resume
++ * channel operation
++ * a. For synchronous channels: WSTS[3] = 0
++ */
++ cdt_val[2] &= ~(0x8 << CDT_SYNC_WSTS_SHIFT);
++ break;
++ case MLB_CTYPE_CTRL:
++ case MLB_CTYPE_ASYNC:
++ tx_cis = (cdt_val[2] &
++ ~CDT_CTRL_ASYNC_WSTS_MASK)
++ >> CDT_CTRL_ASYNC_WSTS_SHIFT;
++ tx_cis = (cdt_val[3] & CDT_CTRL_ASYNC_WSTS_1) ?
++ (tx_cis | (0x1 << 4)) : tx_cis;
++ /*
++ * b. For async and ctrl channels:
++ * RSTS[4]/WSTS[4] = 0
++ * and RSTS[2]/WSTS[2] = 0
++ */
++ cdt_val[3] &= ~CDT_CTRL_ASYNC_WSTS_1;
++ cdt_val[2] &=
++ ~(0x4 << CDT_CTRL_ASYNC_WSTS_SHIFT);
++ break;
++ case MLB_CTYPE_ISOC:
++ tx_cis = (cdt_val[2] & ~CDT_ISOC_WSTS_MASK)
++ >> CDT_ISOC_WSTS_SHIFT;
++ /* c. For isoc channels: WSTS[2:1] = 0x00 */
++ cdt_val[2] &= ~(0x6 << CDT_ISOC_WSTS_SHIFT);
++ break;
++ default:
++ break;
++ }
++ mlb150_dev_cdt_write(tx_mlb_ch, cdt_val);
++ }
++
++ /* Get rx channel interrupt status */
++ if (rx_int_sts & (1 << (rx_mlb_ch % 32))) {
++ mlb150_dev_cdt_read(rx_mlb_cl, cdt_val);
++ pr_debug("mxc_mlb150: RX_CH: %d, cdt_val[3]: 0x%08x, "
++ "cdt_val[2]: 0x%08x, "
++ "cdt_val[1]: 0x%08x, "
++ "cdt_val[0]: 0x%08x\n",
++ rx_mlb_ch, cdt_val[3], cdt_val[2],
++ cdt_val[1], cdt_val[0]);
++ switch (ctype) {
++ case MLB_CTYPE_SYNC:
++ tx_cis = (cdt_val[2] & ~CDT_SYNC_RSTS_MASK)
++ >> CDT_SYNC_RSTS_SHIFT;
++ cdt_val[2] &= ~(0x8 << CDT_SYNC_WSTS_SHIFT);
++ break;
++ case MLB_CTYPE_CTRL:
++ case MLB_CTYPE_ASYNC:
++ tx_cis =
++ (cdt_val[2] & ~CDT_CTRL_ASYNC_RSTS_MASK)
++ >> CDT_CTRL_ASYNC_RSTS_SHIFT;
++ tx_cis = (cdt_val[3] & CDT_CTRL_ASYNC_RSTS_1) ?
++ (tx_cis | (0x1 << 4)) : tx_cis;
++ cdt_val[3] &= ~CDT_CTRL_ASYNC_RSTS_1;
++ cdt_val[2] &=
++ ~(0x4 << CDT_CTRL_ASYNC_RSTS_SHIFT);
++ break;
++ case MLB_CTYPE_ISOC:
++ tx_cis = (cdt_val[2] & ~CDT_ISOC_RSTS_MASK)
++ >> CDT_ISOC_RSTS_SHIFT;
++ cdt_val[2] &= ~(0x6 << CDT_ISOC_WSTS_SHIFT);
++ break;
++ default:
++ break;
++ }
++ mlb150_dev_cdt_write(rx_mlb_ch, cdt_val);
++ }
++
++ if (!tx_cis && !rx_cis)
++ continue;
++
++ /* fill exception event */
++ spin_lock(&pdevinfo->event_lock);
++ pdevinfo->ex_event |= (rx_cis << 16) | tx_cis;
++ spin_unlock(&pdevinfo->event_lock);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int mxc_mlb150_open(struct inode *inode, struct file *filp)
++{
++ int minor, ring_buf_size, buf_size, j, ret;
++ void __iomem *buf_addr;
++ ulong phy_addr;
++ struct mlb_dev_info *pdevinfo = NULL;
++ struct mlb_channel_info *pchinfo = NULL;
++ struct mlb_data *drvdata;
++
++ minor = MINOR(inode->i_rdev);
++ drvdata = container_of(inode->i_cdev, struct mlb_data, cdev);
++
++ if (minor < 0 || minor >= MLB_MINOR_DEVICES) {
++ pr_err("no device\n");
++ return -ENODEV;
++ }
++
++ /* open for each channel device */
++ if (atomic_cmpxchg(&mlb_devinfo[minor].opencnt, 0, 1) != 0) {
++ pr_err("busy\n");
++ return -EBUSY;
++ }
++
++ clk_prepare_enable(drvdata->clk_mlb3p);
++
++ /* initial MLB module */
++ mlb150_dev_init();
++
++ pdevinfo = &mlb_devinfo[minor];
++ pchinfo = &pdevinfo->channels[TX_CHANNEL];
++
++ ring_buf_size = pdevinfo->buf_size;
++ buf_size = ring_buf_size * (TRANS_RING_NODES * 2);
++ buf_addr = (void __iomem *)gen_pool_alloc(drvdata->iram_pool, buf_size);
++ if (buf_addr == NULL) {
++ ret = -ENOMEM;
++ pr_err("can not alloc rx/tx buffers: %d\n", buf_size);
++ return ret;
++ }
++ phy_addr = gen_pool_virt_to_phys(drvdata->iram_pool, (ulong)buf_addr);
++ pr_debug("IRAM Range: Virt 0x%p - 0x%p, Phys 0x%x - 0x%x, size: 0x%x\n",
++ buf_addr, (buf_addr + buf_size - 1), (u32)phy_addr,
++ (u32)(phy_addr + buf_size - 1), buf_size);
++ pdevinfo->rbuf_base_virt = buf_addr;
++ pdevinfo->rbuf_base_phy = phy_addr;
++ drvdata->iram_size = buf_size;
++
++ memset(buf_addr, 0, buf_size);
++
++ for (j = 0; j < (TRANS_RING_NODES);
++ ++j, buf_addr += ring_buf_size, phy_addr += ring_buf_size) {
++ pdevinfo->rx_rbuf.virt_bufs[j] = buf_addr;
++ pdevinfo->rx_rbuf.phy_addrs[j] = phy_addr;
++ pr_debug("RX Ringbuf[%d]: 0x%p 0x%x\n",
++ j, buf_addr, (u32)phy_addr);
++ }
++ pdevinfo->rx_rbuf.unit_size = ring_buf_size;
++ pdevinfo->rx_rbuf.total_size = buf_size;
++ for (j = 0; j < (TRANS_RING_NODES);
++ ++j, buf_addr += ring_buf_size, phy_addr += ring_buf_size) {
++ pdevinfo->tx_rbuf.virt_bufs[j] = buf_addr;
++ pdevinfo->tx_rbuf.phy_addrs[j] = phy_addr;
++ pr_debug("TX Ringbuf[%d]: 0x%p 0x%x\n",
++ j, buf_addr, (u32)phy_addr);
++ }
++
++ pdevinfo->tx_rbuf.unit_size = ring_buf_size;
++ pdevinfo->tx_rbuf.total_size = buf_size;
++
++ /* reset the buffer read/write ptr */
++ pdevinfo->rx_rbuf.head = pdevinfo->rx_rbuf.tail = 0;
++ pdevinfo->tx_rbuf.head = pdevinfo->tx_rbuf.tail = 0;
++ pdevinfo->ex_event = 0;
++ pdevinfo->tx_ok = 0;
++
++ init_waitqueue_head(&pdevinfo->rx_wq);
++ init_waitqueue_head(&pdevinfo->tx_wq);
++
++ drvdata = container_of(inode->i_cdev, struct mlb_data, cdev);
++ drvdata->devinfo = pdevinfo;
++ mxc_mlb150_irq_enable(drvdata, 1);
++ filp->private_data = drvdata;
++
++ return 0;
++}
++
++static int mxc_mlb150_release(struct inode *inode, struct file *filp)
++{
++ int minor;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++
++ minor = MINOR(inode->i_rdev);
++ mxc_mlb150_irq_enable(drvdata, 0);
++
++#ifdef DEBUG
++ mlb150_dev_dump_reg();
++ mlb150_dev_dump_ctr_tbl(0, pdevinfo->channels[TX_CHANNEL].cl + 1);
++#endif
++
++ gen_pool_free(drvdata->iram_pool,
++ (ulong)pdevinfo->rbuf_base_virt, drvdata->iram_size);
++
++ mlb150_dev_exit();
++
++ if (pdevinfo && atomic_read(&pdevinfo->on)
++ && (pdevinfo->fps >= CLK_2048FS))
++ clk_disable_unprepare(drvdata->clk_mlb6p);
++
++ atomic_set(&pdevinfo->on, 0);
++
++ clk_disable_unprepare(drvdata->clk_mlb3p);
++ /* decrease the open count */
++ atomic_set(&pdevinfo->opencnt, 0);
++
++ drvdata->devinfo = NULL;
++
++ return 0;
++}
++
++static long mxc_mlb150_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ struct inode *inode = filp->f_dentry->d_inode;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ void __user *argp = (void __user *)arg;
++ unsigned long flags, event;
++ int minor;
++
++ minor = MINOR(inode->i_rdev);
++
++ switch (cmd) {
++ case MLB_CHAN_SETADDR:
++ {
++ unsigned int caddr;
++ /* get channel address from user space */
++ if (copy_from_user(&caddr, argp, sizeof(caddr))) {
++ pr_err("mxc_mlb150: copy from user failed\n");
++ return -EFAULT;
++ }
++ pdevinfo->channels[TX_CHANNEL].address =
++ (caddr >> 16) & 0xFFFF;
++ pdevinfo->channels[RX_CHANNEL].address = caddr & 0xFFFF;
++ pr_debug("mxc_mlb150: set ch addr, tx: %d, rx: %d\n",
++ pdevinfo->channels[TX_CHANNEL].address,
++ pdevinfo->channels[RX_CHANNEL].address);
++ break;
++ }
++
++ case MLB_CHAN_STARTUP:
++ if (atomic_read(&pdevinfo->on)) {
++ pr_debug("mxc_mlb150: channel alreadly startup\n");
++ break;
++ }
++ if (mlb_channel_enable(drvdata, minor, 1))
++ return -EFAULT;
++ break;
++ case MLB_CHAN_SHUTDOWN:
++ if (atomic_read(&pdevinfo->on) == 0) {
++ pr_debug("mxc_mlb150: channel areadly shutdown\n");
++ break;
++ }
++ mlb150_trans_complete_check(pdevinfo);
++ mlb_channel_enable(drvdata, minor, 0);
++ break;
++ case MLB_CHAN_GETEVENT:
++ /* get and clear the ex_event */
++ spin_lock_irqsave(&pdevinfo->event_lock, flags);
++ event = pdevinfo->ex_event;
++ pdevinfo->ex_event = 0;
++ spin_unlock_irqrestore(&pdevinfo->event_lock, flags);
++
++ if (event) {
++ if (copy_to_user(argp, &event, sizeof(event))) {
++ pr_err("mxc_mlb150: copy to user failed\n");
++ return -EFAULT;
++ }
++ } else
++ return -EAGAIN;
++ break;
++ case MLB_SET_ISOC_BLKSIZE_188:
++ pdevinfo->isoc_blksz = 188;
++ pdevinfo->cdt_buf_dep = pdevinfo->adt_buf_dep =
++ pdevinfo->isoc_blksz * CH_ISOC_BLK_NUM;
++ break;
++ case MLB_SET_ISOC_BLKSIZE_196:
++ pdevinfo->isoc_blksz = 196;
++ pdevinfo->cdt_buf_dep = pdevinfo->adt_buf_dep =
++ pdevinfo->isoc_blksz * CH_ISOC_BLK_NUM;
++ break;
++ case MLB_SET_SYNC_QUAD:
++ {
++ u32 quad;
++
++ if (copy_from_user(&quad, argp, sizeof(quad))) {
++ pr_err("mxc_mlb150: get quad number "
++ "from user failed\n");
++ return -EFAULT;
++ }
++ if (quad <= 0 || quad > 3) {
++ pr_err("mxc_mlb150: Invalid Quadlets!"
++ "Quadlets in Sync mode can "
++ "only be 1, 2, 3\n");
++ return -EINVAL;
++ }
++ pdevinfo->sync_quad = quad;
++ /* Each quadlets is 4 bytes */
++ pdevinfo->cdt_buf_dep = quad * 4 * 4;
++ pdevinfo->adt_buf_dep =
++ pdevinfo->cdt_buf_dep * CH_SYNC_ADT_BUF_MULTI;
++ }
++ break;
++ case MLB_SET_FPS:
++ {
++ u32 fps, c0_val;
++
++ /* get fps from user space */
++ if (copy_from_user(&fps, argp, sizeof(fps))) {
++ pr_err("mxc_mlb150: copy from user failed\n");
++ return -EFAULT;
++ }
++
++ c0_val = __raw_readl(mlb_base + REG_MLBC0);
++ c0_val &= ~MLBC0_MLBCLK_MASK;
++
++ /* check fps value */
++ switch (fps) {
++ case 256:
++ case 512:
++ case 1024:
++ pdevinfo->fps = fps >> 9;
++ c0_val &= ~MLBC0_MLBPEN;
++ c0_val |= (fps >> 9)
++ << MLBC0_MLBCLK_SHIFT;
++
++ if (1024 == fps) {
++ /*
++ * Invert output clock phase
++ * in 1024 fps
++ */
++ __raw_writel(0x1,
++ mlb_base + REG_MLBPC2);
++ }
++ break;
++ case 2048:
++ case 3072:
++ case 4096:
++ pdevinfo->fps = (fps >> 10) + 1;
++ c0_val |= ((fps >> 10) + 1)
++ << MLBC0_MLBCLK_SHIFT;
++ break;
++ case 6144:
++ pdevinfo->fps = fps >> 10;
++ c0_val |= ((fps >> 10) + 1)
++ << MLBC0_MLBCLK_SHIFT;
++ break;
++ case 8192:
++ pdevinfo->fps = (fps >> 10) - 1;
++ c0_val |= ((fps >> 10) - 1)
++ << MLBC0_MLBCLK_SHIFT;
++ break;
++ default:
++ pr_debug("mxc_mlb150: invalid fps argument: %d\n",
++ fps);
++ return -EINVAL;
++ }
++
++ __raw_writel(c0_val, mlb_base + REG_MLBC0);
++
++ pr_debug("mxc_mlb150: set fps to %d, MLBC0: 0x%08x\n",
++ fps,
++ (u32)__raw_readl(mlb_base + REG_MLBC0));
++
++ break;
++ }
++
++ case MLB_GET_VER:
++ {
++ u32 version;
++
++ /* get MLB device module version */
++ version = 0x03030003;
++
++ pr_debug("mxc_mlb150: get version: 0x%08x\n",
++ version);
++
++ if (copy_to_user(argp, &version, sizeof(version))) {
++ pr_err("mxc_mlb150: copy to user failed\n");
++ return -EFAULT;
++ }
++ break;
++ }
++
++ case MLB_SET_DEVADDR:
++ {
++ u32 c1_val;
++ u8 devaddr;
++
++ /* get MLB device address from user space */
++ if (copy_from_user
++ (&devaddr, argp, sizeof(unsigned char))) {
++ pr_err("mxc_mlb150: copy from user failed\n");
++ return -EFAULT;
++ }
++
++ c1_val = __raw_readl(mlb_base + REG_MLBC1);
++ c1_val &= ~MLBC1_NDA_MASK;
++ c1_val |= devaddr << MLBC1_NDA_SHIFT;
++ __raw_writel(c1_val, mlb_base + REG_MLBC1);
++ pr_debug("mxc_mlb150: set dev addr, dev addr: %d, "
++ "MLBC1: 0x%08x\n", devaddr,
++ (u32)__raw_readl(mlb_base + REG_MLBC1));
++
++ break;
++ }
++
++ case MLB_IRQ_DISABLE:
++ {
++ disable_irq(drvdata->irq_mlb);
++ break;
++ }
++
++ case MLB_IRQ_ENABLE:
++ {
++ enable_irq(drvdata->irq_mlb);
++ break;
++ }
++ default:
++ pr_info("mxc_mlb150: Invalid ioctl command\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/*
++ * MLB read routine
++ * Read the current received data from queued buffer,
++ * and free this buffer for hw to fill ingress data.
++ */
++static ssize_t mxc_mlb150_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ int size;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ struct mlb_ringbuf *rx_rbuf = &pdevinfo->rx_rbuf;
++ int head, tail;
++ unsigned long flags;
++
++ read_lock_irqsave(&rx_rbuf->rb_lock, flags);
++
++ head = ACCESS_ONCE(rx_rbuf->head);
++ tail = rx_rbuf->tail;
++
++ read_unlock_irqrestore(&rx_rbuf->rb_lock, flags);
++
++ /* check the current rx buffer is available or not */
++ if (0 == CIRC_CNT(head, tail, TRANS_RING_NODES)) {
++
++ if (filp->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ do {
++ DEFINE_WAIT(__wait);
++
++ for (;;) {
++ prepare_to_wait(&pdevinfo->rx_wq,
++ &__wait, TASK_INTERRUPTIBLE);
++
++ read_lock_irqsave(&rx_rbuf->rb_lock, flags);
++ if (CIRC_CNT(rx_rbuf->head, rx_rbuf->tail,
++ TRANS_RING_NODES) > 0) {
++ read_unlock_irqrestore(&rx_rbuf->rb_lock,
++ flags);
++ break;
++ }
++ read_unlock_irqrestore(&rx_rbuf->rb_lock,
++ flags);
++
++ if (!signal_pending(current)) {
++ schedule();
++ continue;
++ }
++ return -ERESTARTSYS;
++ }
++ finish_wait(&pdevinfo->rx_wq, &__wait);
++ } while (0);
++ }
++
++ /* read index before reading contents at that index */
++ smp_read_barrier_depends();
++
++ size = pdevinfo->adt_buf_dep;
++ if (size > count) {
++ /* the user buffer is too small */
++ pr_warning
++ ("mxc_mlb150: received data size is bigger than "
++ "size: %d, count: %d\n", size, count);
++ return -EINVAL;
++ }
++
++ /* extract one item from the buffer */
++ if (copy_to_user(buf, rx_rbuf->virt_bufs[tail], size)) {
++ pr_err("mxc_mlb150: copy from user failed\n");
++ return -EFAULT;
++ }
++
++ /* finish reading descriptor before incrementing tail */
++ smp_mb();
++
++ write_lock_irqsave(&rx_rbuf->rb_lock, flags);
++ rx_rbuf->tail = (tail + 1) & (TRANS_RING_NODES - 1);
++ write_unlock_irqrestore(&rx_rbuf->rb_lock, flags);
++
++ *f_pos = 0;
++
++ return size;
++}
++
++/*
++ * MLB write routine
++ * Copy the user data to tx channel buffer,
++ * and prepare the channel current/next buffer ptr.
++ */
++static ssize_t mxc_mlb150_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ s32 ret = 0;
++ struct mlb_channel_info *pchinfo = NULL;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ struct mlb_ringbuf *tx_rbuf = &pdevinfo->tx_rbuf;
++ int head, tail;
++ unsigned long flags;
++
++ /*
++ * minor = MINOR(filp->f_dentry->d_inode->i_rdev);
++ */
++ pchinfo = &pdevinfo->channels[TX_CHANNEL];
++
++ if (count > pdevinfo->buf_size) {
++ /* too many data to write */
++ pr_warning("mxc_mlb150: overflow write data\n");
++ return -EFBIG;
++ }
++
++ *f_pos = 0;
++
++ read_lock_irqsave(&tx_rbuf->rb_lock, flags);
++
++ head = tx_rbuf->head;
++ tail = ACCESS_ONCE(tx_rbuf->tail);
++ read_unlock_irqrestore(&tx_rbuf->rb_lock, flags);
++
++ if (0 == CIRC_SPACE(head, tail, TRANS_RING_NODES)) {
++ if (filp->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++ do {
++ DEFINE_WAIT(__wait);
++
++ for (;;) {
++ prepare_to_wait(&pdevinfo->tx_wq,
++ &__wait, TASK_INTERRUPTIBLE);
++
++ read_lock_irqsave(&tx_rbuf->rb_lock, flags);
++ if (CIRC_SPACE(tx_rbuf->head, tx_rbuf->tail,
++ TRANS_RING_NODES) > 0) {
++ read_unlock_irqrestore(&tx_rbuf->rb_lock,
++ flags);
++ break;
++ }
++ read_unlock_irqrestore(&tx_rbuf->rb_lock,
++ flags);
++
++ if (!signal_pending(current)) {
++ schedule();
++ continue;
++ }
++ return -ERESTARTSYS;
++ }
++ finish_wait(&pdevinfo->tx_wq, &__wait);
++ } while (0);
++ }
++
++ if (copy_from_user((void *)tx_rbuf->virt_bufs[head], buf, count)) {
++ read_unlock_irqrestore(&tx_rbuf->rb_lock, flags);
++ pr_err("mxc_mlb: copy from user failed\n");
++ ret = -EFAULT;
++ goto out;
++ }
++
++ write_lock_irqsave(&tx_rbuf->rb_lock, flags);
++ smp_wmb();
++ tx_rbuf->head = (head + 1) & (TRANS_RING_NODES - 1);
++ write_unlock_irqrestore(&tx_rbuf->rb_lock, flags);
++
++ if (0 == CIRC_CNT(head, tail, TRANS_RING_NODES)) {
++ u32 tx_buf_ptr, ahb_ch;
++ s32 adt_sts;
++ u32 ctype = pdevinfo->channel_type;
++
++ /* read index before reading contents at that index */
++ smp_read_barrier_depends();
++
++ tx_buf_ptr = tx_rbuf->phy_addrs[tail];
++
++ ahb_ch = pdevinfo->channels[TX_CHANNEL].cl;
++ adt_sts = mlb150_dev_get_adt_sts(ahb_ch);
++
++ /* Set ADT for TX */
++ mlb150_dev_pipo_next(ahb_ch, ctype, adt_sts, tx_buf_ptr);
++ }
++
++ ret = count;
++out:
++ return ret;
++}
++
++static unsigned int mxc_mlb150_poll(struct file *filp,
++ struct poll_table_struct *wait)
++{
++ int minor;
++ unsigned int ret = 0;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ struct mlb_ringbuf *tx_rbuf = &pdevinfo->tx_rbuf;
++ struct mlb_ringbuf *rx_rbuf = &pdevinfo->rx_rbuf;
++ int head, tail;
++ unsigned long flags;
++
++
++ minor = MINOR(filp->f_dentry->d_inode->i_rdev);
++
++ poll_wait(filp, &pdevinfo->rx_wq, wait);
++ poll_wait(filp, &pdevinfo->tx_wq, wait);
++
++ read_lock_irqsave(&tx_rbuf->rb_lock, flags);
++ head = tx_rbuf->head;
++ tail = tx_rbuf->tail;
++ read_unlock_irqrestore(&tx_rbuf->rb_lock, flags);
++
++ /* check the tx buffer is avaiable or not */
++ if (CIRC_SPACE(head, tail, TRANS_RING_NODES) >= 1)
++ ret |= POLLOUT | POLLWRNORM;
++
++ read_lock_irqsave(&rx_rbuf->rb_lock, flags);
++ head = rx_rbuf->head;
++ tail = rx_rbuf->tail;
++ read_unlock_irqrestore(&rx_rbuf->rb_lock, flags);
++
++ /* check the rx buffer filled or not */
++ if (CIRC_CNT(head, tail, TRANS_RING_NODES) >= 1)
++ ret |= POLLIN | POLLRDNORM;
++
++
++ /* check the exception event */
++ if (pdevinfo->ex_event)
++ ret |= POLLIN | POLLRDNORM;
++
++ return ret;
++}
++
++/*
++ * char dev file operations structure
++ */
++static const struct file_operations mxc_mlb150_fops = {
++
++ .owner = THIS_MODULE,
++ .open = mxc_mlb150_open,
++ .release = mxc_mlb150_release,
++ .unlocked_ioctl = mxc_mlb150_ioctl,
++ .poll = mxc_mlb150_poll,
++ .read = mxc_mlb150_read,
++ .write = mxc_mlb150_write,
++};
++
++static struct platform_device_id imx_mlb150_devtype[] = {
++ {
++ .name = "imx6q-mlb150",
++ .driver_data = 0,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, imx_mlb150_devtype);
++
++static const struct of_device_id mlb150_imx_dt_ids[] = {
++ { .compatible = "fsl,imx6q-mlb150", .data = &imx_mlb150_devtype[0], },
++ { /* sentinel */ }
++};
++
++/*
++ * This function is called whenever the MLB device is detected.
++ */
++static int mxc_mlb150_probe(struct platform_device *pdev)
++{
++ int ret, mlb_major, i;
++ struct mlb_data *drvdata;
++ struct resource *res;
++ struct device_node *np = pdev->dev.of_node;
++
++ drvdata = devm_kzalloc(&pdev->dev, sizeof(struct mlb_data),
++ GFP_KERNEL);
++ if (!drvdata) {
++ dev_err(&pdev->dev, "can't allocate enough memory\n");
++ return -ENOMEM;
++ }
++
++ /*
++ * Register MLB lld as four character devices
++ */
++ ret = alloc_chrdev_region(&drvdata->firstdev, 0,
++ MLB_MINOR_DEVICES, "mxc_mlb150");
++ if (ret < 0) {
++ dev_err(&pdev->dev, "alloc region error\n");
++ goto err_reg;
++ }
++ mlb_major = MAJOR(drvdata->firstdev);
++ dev_dbg(&pdev->dev, "MLB device major: %d\n", mlb_major);
++
++ cdev_init(&drvdata->cdev, &mxc_mlb150_fops);
++ drvdata->cdev.owner = THIS_MODULE;
++
++ ret = cdev_add(&drvdata->cdev, drvdata->firstdev, MLB_MINOR_DEVICES);
++ if (ret) {
++ dev_err(&pdev->dev, "can't add cdev\n");
++ goto err_reg;
++ }
++
++ /* create class and device for udev information */
++ drvdata->class = class_create(THIS_MODULE, "mlb150");
++ if (IS_ERR(drvdata->class)) {
++ dev_err(&pdev->dev, "failed to create device class\n");
++ ret = -ENOMEM;
++ goto err_class;
++ }
++
++ for (i = 0; i < MLB_MINOR_DEVICES; i++) {
++ struct device *class_dev;
++
++ class_dev = device_create(drvdata->class, NULL,
++ MKDEV(mlb_major, i),
++ NULL, mlb_devinfo[i].dev_name);
++ if (IS_ERR(class_dev)) {
++ dev_err(&pdev->dev, "failed to create mlb150 %s"
++ " class device\n", mlb_devinfo[i].dev_name);
++ ret = -ENOMEM;
++ goto err_dev;
++ }
++ }
++
++ /* ahb0 irq */
++ drvdata->irq_ahb0 = platform_get_irq(pdev, 1);
++ if (drvdata->irq_ahb0 < 0) {
++ dev_err(&pdev->dev, "No ahb0 irq line provided\n");
++ goto err_dev;
++ }
++ dev_dbg(&pdev->dev, "ahb0_irq: %d\n", drvdata->irq_ahb0);
++ if (devm_request_irq(&pdev->dev, drvdata->irq_ahb0, mlb_ahb_isr,
++ 0, "mlb_ahb0", NULL)) {
++ dev_err(&pdev->dev, "can't claim irq %d\n", drvdata->irq_ahb0);
++ goto err_dev;
++ }
++
++ /* ahb1 irq */
++ drvdata->irq_ahb1 = platform_get_irq(pdev, 2);
++ if (drvdata->irq_ahb1 < 0) {
++ dev_err(&pdev->dev, "No ahb1 irq line provided\n");
++ goto err_dev;
++ }
++ dev_dbg(&pdev->dev, "ahb1_irq: %d\n", drvdata->irq_ahb1);
++ if (devm_request_irq(&pdev->dev, drvdata->irq_ahb1, mlb_ahb_isr,
++ 0, "mlb_ahb1", NULL)) {
++ dev_err(&pdev->dev, "can't claim irq %d\n", drvdata->irq_ahb1);
++ goto err_dev;
++ }
++
++ /* mlb irq */
++ drvdata->irq_mlb = platform_get_irq(pdev, 0);
++ if (drvdata->irq_mlb < 0) {
++ dev_err(&pdev->dev, "No mlb irq line provided\n");
++ goto err_dev;
++ }
++ dev_dbg(&pdev->dev, "mlb_irq: %d\n", drvdata->irq_mlb);
++ if (devm_request_irq(&pdev->dev, drvdata->irq_mlb, mlb_isr,
++ 0, "mlb", NULL)) {
++ dev_err(&pdev->dev, "can't claim irq %d\n", drvdata->irq_mlb);
++ goto err_dev;
++ }
++
++ /* ioremap from phy mlb to kernel space */
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "can't get device resources\n");
++ ret = -ENOENT;
++ goto err_dev;
++ }
++ mlb_base = devm_request_and_ioremap(&pdev->dev, res);
++ dev_dbg(&pdev->dev, "mapped base address: 0x%08x\n", (u32)mlb_base);
++ if (IS_ERR(mlb_base)) {
++ dev_err(&pdev->dev,
++ "failed to get ioremap base\n");
++ ret = PTR_ERR(mlb_base);
++ goto err_dev;
++ }
++ drvdata->membase = mlb_base;
++
++#ifdef CONFIG_REGULATOR
++ drvdata->nvcc = devm_regulator_get(&pdev->dev, "reg_nvcc");
++ if (!IS_ERR(drvdata->nvcc)) {
++ regulator_set_voltage(drvdata->nvcc, 2500000, 2500000);
++ dev_err(&pdev->dev, "enalbe regulator\n");
++ ret = regulator_enable(drvdata->nvcc);
++ if (ret) {
++ dev_err(&pdev->dev, "vdd set voltage error\n");
++ goto err_dev;
++ }
++ }
++#endif
++
++ /* enable clock */
++ drvdata->clk_mlb3p = devm_clk_get(&pdev->dev, "mlb");
++ if (IS_ERR(drvdata->clk_mlb3p)) {
++ dev_err(&pdev->dev, "unable to get mlb clock\n");
++ ret = PTR_ERR(drvdata->clk_mlb3p);
++ goto err_dev;
++ }
++
++ drvdata->clk_mlb6p = devm_clk_get(&pdev->dev, "pll8_mlb");
++ if (IS_ERR(drvdata->clk_mlb6p)) {
++ dev_err(&pdev->dev, "unable to get mlb pll clock\n");
++ ret = PTR_ERR(drvdata->clk_mlb6p);
++ goto err_dev;
++ }
++
++
++ drvdata->iram_pool = of_get_named_gen_pool(np, "iram", 0);
++ if (!drvdata->iram_pool) {
++ dev_err(&pdev->dev, "iram pool not available\n");
++ ret = -ENOMEM;
++ goto err_dev;
++ }
++
++ drvdata->devinfo = NULL;
++ mxc_mlb150_irq_enable(drvdata, 0);
++ platform_set_drvdata(pdev, drvdata);
++ return 0;
++
++err_dev:
++ for (--i; i >= 0; i--)
++ device_destroy(drvdata->class, MKDEV(mlb_major, i));
++
++ class_destroy(drvdata->class);
++err_class:
++ cdev_del(&drvdata->cdev);
++err_reg:
++ unregister_chrdev_region(drvdata->firstdev, MLB_MINOR_DEVICES);
++
++ return ret;
++}
++
++static int mxc_mlb150_remove(struct platform_device *pdev)
++{
++ int i;
++ struct mlb_data *drvdata = platform_get_drvdata(pdev);
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++
++ if (pdevinfo && atomic_read(&pdevinfo->on)
++ && (pdevinfo->fps >= CLK_2048FS))
++ clk_disable_unprepare(drvdata->clk_mlb6p);
++
++ if (pdevinfo && atomic_read(&pdevinfo->opencnt))
++ clk_disable_unprepare(drvdata->clk_mlb3p);
++
++ /* disable mlb power */
++#ifdef CONFIG_REGULATOR
++ if (!IS_ERR(drvdata->nvcc))
++ regulator_disable(drvdata->nvcc);
++#endif
++
++ /* destroy mlb device class */
++ for (i = MLB_MINOR_DEVICES - 1; i >= 0; i--)
++ device_destroy(drvdata->class,
++ MKDEV(MAJOR(drvdata->firstdev), i));
++ class_destroy(drvdata->class);
++
++ cdev_del(&drvdata->cdev);
++
++ /* Unregister the two MLB devices */
++ unregister_chrdev_region(drvdata->firstdev, MLB_MINOR_DEVICES);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int mxc_mlb150_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct mlb_data *drvdata = platform_get_drvdata(pdev);
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++
++ if (pdevinfo && atomic_read(&pdevinfo->on)
++ && (pdevinfo->fps >= CLK_2048FS))
++ clk_disable_unprepare(drvdata->clk_mlb6p);
++
++ if (pdevinfo && atomic_read(&pdevinfo->opencnt)) {
++ mlb150_dev_exit();
++ clk_disable_unprepare(drvdata->clk_mlb3p);
++ }
++
++ return 0;
++}
++
++static int mxc_mlb150_resume(struct platform_device *pdev)
++{
++ struct mlb_data *drvdata = platform_get_drvdata(pdev);
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++
++ if (pdevinfo && atomic_read(&pdevinfo->opencnt)) {
++ clk_prepare_enable(drvdata->clk_mlb3p);
++ mlb150_dev_init();
++ }
++
++ if (pdevinfo && atomic_read(&pdevinfo->on) &&
++ (pdevinfo->fps >= CLK_2048FS))
++ clk_prepare_enable(drvdata->clk_mlb6p);
++
++ return 0;
++}
++#else
++#define mxc_mlb150_suspend NULL
++#define mxc_mlb150_resume NULL
++#endif
++
++/*
++ * platform driver structure for MLB
++ */
++static struct platform_driver mxc_mlb150_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ .of_match_table = mlb150_imx_dt_ids,
++ },
++ .probe = mxc_mlb150_probe,
++ .remove = mxc_mlb150_remove,
++ .suspend = mxc_mlb150_suspend,
++ .resume = mxc_mlb150_resume,
++ .id_table = imx_mlb150_devtype,
++};
++
++static int __init mxc_mlb150_init(void)
++{
++ return platform_driver_register(&mxc_mlb150_driver);
++}
++
++static void __exit mxc_mlb150_exit(void)
++{
++ platform_driver_unregister(&mxc_mlb150_driver);
++}
++
++module_init(mxc_mlb150_init);
++module_exit(mxc_mlb150_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("MLB150 low level driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/mxc/vpu/Kconfig linux-3.14.40/drivers/mxc/vpu/Kconfig
+--- linux-3.14.40.orig/drivers/mxc/vpu/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/vpu/Kconfig 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,31 @@
++#
++# Codec configuration
++#
++
++menu "MXC VPU(Video Processing Unit) support"
++
++config MXC_VPU
++ tristate "Support for MXC VPU(Video Processing Unit)"
++ depends on (SOC_IMX27 || SOC_IMX5 || SOC_IMX6Q)
++ default y
++ ---help---
++ The VPU codec device provides codec function for H.264/MPEG4/H.263,
++ as well as MPEG2/VC-1/DivX on some platforms.
++
++config MXC_VPU_DEBUG
++ bool "MXC VPU debugging"
++ depends on MXC_VPU != n
++ help
++ This is an option for the developers; most people should
++ say N here. This enables MXC VPU driver debugging.
++
++config MX6_VPU_352M
++ bool "MX6 VPU 352M"
++ depends on MXC_VPU
++ default n
++ help
++ Increase VPU frequncy to 352M, the config will disable bus frequency
++ adjust dynamic, and CPU lowest setpoint will be 352Mhz.
++ This config is used for special VPU use case.
++
++endmenu
+diff -Nur linux-3.14.40.orig/drivers/mxc/vpu/Makefile linux-3.14.40/drivers/mxc/vpu/Makefile
+--- linux-3.14.40.orig/drivers/mxc/vpu/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/vpu/Makefile 2015-05-01 14:57:59.615427001 -0500
+@@ -0,0 +1,9 @@
++#
++# Makefile for the VPU drivers.
++#
++
++obj-$(CONFIG_MXC_VPU) += mxc_vpu.o
++
++ifeq ($(CONFIG_MXC_VPU_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
+diff -Nur linux-3.14.40.orig/drivers/mxc/vpu/mxc_vpu.c linux-3.14.40/drivers/mxc/vpu/mxc_vpu.c
+--- linux-3.14.40.orig/drivers/mxc/vpu/mxc_vpu.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/mxc/vpu/mxc_vpu.c 2015-05-01 14:57:59.619427001 -0500
+@@ -0,0 +1,1342 @@
++/*
++ * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file mxc_vpu.c
++ *
++ * @brief VPU system initialization and file operation implementation
++ *
++ * @ingroup VPU
++ */
++
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/ioport.h>
++#include <linux/stat.h>
++#include <linux/platform_device.h>
++#include <linux/kdev_t.h>
++#include <linux/dma-mapping.h>
++#include <linux/wait.h>
++#include <linux/list.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/fsl_devices.h>
++#include <linux/uaccess.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include <linux/workqueue.h>
++#include <linux/sched.h>
++#include <linux/vmalloc.h>
++#include <linux/regulator/consumer.h>
++#include <linux/page-flags.h>
++#include <linux/mm_types.h>
++#include <linux/types.h>
++#include <linux/memblock.h>
++#include <linux/memory.h>
++#include <linux/version.h>
++#include <asm/page.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++#include <linux/module.h>
++#include <linux/pm_runtime.h>
++#include <linux/sizes.h>
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
++#include <linux/iram_alloc.h>
++#include <mach/clock.h>
++#include <mach/hardware.h>
++#include <mach/mxc_vpu.h>
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++#include <linux/busfreq-imx6.h>
++#include <linux/clk.h>
++#include <linux/genalloc.h>
++#include <linux/mxc_vpu.h>
++#include <linux/of.h>
++#include <linux/reset.h>
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++#include <mach/busfreq.h>
++#include <mach/common.h>
++#else
++#include <asm/sizes.h>
++#endif
++
++/* Define one new pgprot which combined uncached and XN(never executable) */
++#define pgprot_noncachedxn(prot) \
++ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
++
++struct vpu_priv {
++ struct fasync_struct *async_queue;
++ struct work_struct work;
++ struct workqueue_struct *workqueue;
++ struct mutex lock;
++};
++
++/* To track the allocated memory buffer */
++struct memalloc_record {
++ struct list_head list;
++ struct vpu_mem_desc mem;
++};
++
++struct iram_setting {
++ u32 start;
++ u32 end;
++};
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++static struct gen_pool *iram_pool;
++static u32 iram_base;
++#endif
++
++static LIST_HEAD(head);
++
++static int vpu_major;
++static int vpu_clk_usercount;
++static struct class *vpu_class;
++static struct vpu_priv vpu_data;
++static u8 open_count;
++static struct clk *vpu_clk;
++static struct vpu_mem_desc bitwork_mem = { 0 };
++static struct vpu_mem_desc pic_para_mem = { 0 };
++static struct vpu_mem_desc user_data_mem = { 0 };
++static struct vpu_mem_desc share_mem = { 0 };
++static struct vpu_mem_desc vshare_mem = { 0 };
++
++static void __iomem *vpu_base;
++static int vpu_ipi_irq;
++static u32 phy_vpu_base_addr;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++static phys_addr_t top_address_DRAM;
++static struct mxc_vpu_platform_data *vpu_plat;
++#endif
++
++static struct device *vpu_dev;
++
++/* IRAM setting */
++static struct iram_setting iram;
++
++/* implement the blocking ioctl */
++static int irq_status;
++static int codec_done;
++static wait_queue_head_t vpu_queue;
++
++#ifdef CONFIG_SOC_IMX6Q
++#define MXC_VPU_HAS_JPU
++#endif
++
++#ifdef MXC_VPU_HAS_JPU
++static int vpu_jpu_irq;
++#endif
++
++#ifdef CONFIG_PM
++static unsigned int regBk[64];
++static unsigned int pc_before_suspend;
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++static struct regulator *vpu_regulator;
++#endif
++static atomic_t clk_cnt_from_ioc = ATOMIC_INIT(0);
++
++#define READ_REG(x) readl_relaxed(vpu_base + x)
++#define WRITE_REG(val, x) writel_relaxed(val, vpu_base + x)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++/* redirect to static functions */
++static int cpu_is_mx6dl(void)
++{
++ int ret;
++ ret = of_machine_is_compatible("fsl,imx6dl");
++ return ret;
++}
++
++static int cpu_is_mx6q(void)
++{
++ int ret;
++ ret = of_machine_is_compatible("fsl,imx6q");
++ return ret;
++}
++#endif
++
++static void vpu_reset(void)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ device_reset(vpu_dev);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ imx_src_reset_vpu();
++#else
++ if (vpu_plat->reset)
++ vpu_plat->reset();
++#endif
++}
++
++static long vpu_power_get(bool on)
++{
++ long ret = 0;
++
++ if (on) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ vpu_regulator = regulator_get(NULL, "cpu_vddvpu");
++ ret = IS_ERR(vpu_regulator);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ vpu_regulator = devm_regulator_get(vpu_dev, "pu");
++ ret = IS_ERR(vpu_regulator);
++#endif
++ } else {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (!IS_ERR(vpu_regulator))
++ regulator_put(vpu_regulator);
++#endif
++ }
++ return ret;
++}
++
++static void vpu_power_up(bool on)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ int ret = 0;
++
++ if (on) {
++ if (!IS_ERR(vpu_regulator)) {
++ ret = regulator_enable(vpu_regulator);
++ if (ret)
++ dev_err(vpu_dev, "failed to power up vpu\n");
++ }
++ } else {
++ if (!IS_ERR(vpu_regulator)) {
++ ret = regulator_disable(vpu_regulator);
++ if (ret)
++ dev_err(vpu_dev, "failed to power down vpu\n");
++ }
++ }
++#else
++ imx_gpc_power_up_pu(on);
++#endif
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++static int cpu_is_mx53(void)
++{
++ return 0;
++}
++
++static int cpu_is_mx51(void)
++{
++ return 0;
++}
++
++#define VM_RESERVED 0
++#endif
++
++/*!
++ * Private function to alloc dma buffer
++ * @return status 0 success.
++ */
++static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
++{
++ mem->cpu_addr = (unsigned long)
++ dma_alloc_coherent(NULL, PAGE_ALIGN(mem->size),
++ (dma_addr_t *) (&mem->phy_addr),
++ GFP_DMA | GFP_KERNEL);
++ dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = 0x%x\n", mem->cpu_addr);
++ if ((void *)(mem->cpu_addr) == NULL) {
++ dev_err(vpu_dev, "Physical memory allocation error!\n");
++ return -1;
++ }
++ return 0;
++}
++
++/*!
++ * Private function to free dma buffer
++ */
++static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
++{
++ if (mem->cpu_addr != 0) {
++ dma_free_coherent(0, PAGE_ALIGN(mem->size),
++ (void *)mem->cpu_addr, mem->phy_addr);
++ }
++}
++
++/*!
++ * Private function to free buffers
++ * @return status 0 success.
++ */
++static int vpu_free_buffers(void)
++{
++ struct memalloc_record *rec, *n;
++ struct vpu_mem_desc mem;
++
++ list_for_each_entry_safe(rec, n, &head, list) {
++ mem = rec->mem;
++ if (mem.cpu_addr != 0) {
++ vpu_free_dma_buffer(&mem);
++ dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
++ /* delete from list */
++ list_del(&rec->list);
++ kfree(rec);
++ }
++ }
++
++ return 0;
++}
++
++static inline void vpu_worker_callback(struct work_struct *w)
++{
++ struct vpu_priv *dev = container_of(w, struct vpu_priv,
++ work);
++
++ if (dev->async_queue)
++ kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
++
++ irq_status = 1;
++ /*
++ * Clock is gated on when dec/enc started, gate it off when
++ * codec is done.
++ */
++ if (codec_done)
++ codec_done = 0;
++
++ wake_up_interruptible(&vpu_queue);
++}
++
++/*!
++ * @brief vpu interrupt handler
++ */
++static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
++{
++ struct vpu_priv *dev = dev_id;
++ unsigned long reg;
++
++ reg = READ_REG(BIT_INT_REASON);
++ if (reg & 0x8)
++ codec_done = 1;
++ WRITE_REG(0x1, BIT_INT_CLEAR);
++
++ queue_work(dev->workqueue, &dev->work);
++
++ return IRQ_HANDLED;
++}
++
++/*!
++ * @brief vpu jpu interrupt handler
++ */
++#ifdef MXC_VPU_HAS_JPU
++static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
++{
++ struct vpu_priv *dev = dev_id;
++ unsigned long reg;
++
++ reg = READ_REG(MJPEG_PIC_STATUS_REG);
++ if (reg & 0x3)
++ codec_done = 1;
++
++ queue_work(dev->workqueue, &dev->work);
++
++ return IRQ_HANDLED;
++}
++#endif
++
++/*!
++ * @brief check phy memory prepare to pass to vpu is valid or not, we
++ * already address some issue that if pass a wrong address to vpu
++ * (like virtual address), system will hang.
++ *
++ * @return true return is a valid phy memory address, false return not.
++ */
++bool vpu_is_valid_phy_memory(u32 paddr)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (paddr > top_address_DRAM)
++ return false;
++#endif
++
++ return true;
++}
++
++/*!
++ * @brief open function for vpu file operation
++ *
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_open(struct inode *inode, struct file *filp)
++{
++
++ mutex_lock(&vpu_data.lock);
++
++ if (open_count++ == 0) {
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ pm_runtime_get_sync(vpu_dev);
++#endif
++ vpu_power_up(true);
++
++#ifdef CONFIG_SOC_IMX6Q
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ if (READ_REG(BIT_CUR_PC))
++ dev_dbg(vpu_dev, "Not power off before vpu open!\n");
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++#endif
++ }
++
++ filp->private_data = (void *)(&vpu_data);
++ mutex_unlock(&vpu_data.lock);
++ return 0;
++}
++
++/*!
++ * @brief IO ctrl function for vpu file operation
++ * @param cmd IO ctrl command
++ * @return 0 on success or negative error code on error
++ */
++static long vpu_ioctl(struct file *filp, u_int cmd,
++ u_long arg)
++{
++ int ret = 0;
++
++ switch (cmd) {
++ case VPU_IOC_PHYMEM_ALLOC:
++ {
++ struct memalloc_record *rec;
++
++ rec = kzalloc(sizeof(*rec), GFP_KERNEL);
++ if (!rec)
++ return -ENOMEM;
++
++ ret = copy_from_user(&(rec->mem),
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc));
++ if (ret) {
++ kfree(rec);
++ return -EFAULT;
++ }
++
++ dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
++ rec->mem.size);
++
++ ret = vpu_alloc_dma_buffer(&(rec->mem));
++ if (ret == -1) {
++ kfree(rec);
++ dev_err(vpu_dev,
++ "Physical memory allocation error!\n");
++ break;
++ }
++ ret = copy_to_user((void __user *)arg, &(rec->mem),
++ sizeof(struct vpu_mem_desc));
++ if (ret) {
++ kfree(rec);
++ ret = -EFAULT;
++ break;
++ }
++
++ mutex_lock(&vpu_data.lock);
++ list_add(&rec->list, &head);
++ mutex_unlock(&vpu_data.lock);
++
++ break;
++ }
++ case VPU_IOC_PHYMEM_FREE:
++ {
++ struct memalloc_record *rec, *n;
++ struct vpu_mem_desc vpu_mem;
++
++ ret = copy_from_user(&vpu_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc));
++ if (ret)
++ return -EACCES;
++
++ dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = 0x%x\n",
++ vpu_mem.cpu_addr);
++ if ((void *)vpu_mem.cpu_addr != NULL)
++ vpu_free_dma_buffer(&vpu_mem);
++
++ mutex_lock(&vpu_data.lock);
++ list_for_each_entry_safe(rec, n, &head, list) {
++ if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
++ /* delete from list */
++ list_del(&rec->list);
++ kfree(rec);
++ break;
++ }
++ }
++ mutex_unlock(&vpu_data.lock);
++
++ break;
++ }
++ case VPU_IOC_WAIT4INT:
++ {
++ u_long timeout = (u_long) arg;
++ if (!wait_event_interruptible_timeout
++ (vpu_queue, irq_status != 0,
++ msecs_to_jiffies(timeout))) {
++ dev_warn(vpu_dev, "VPU blocking: timeout.\n");
++ ret = -ETIME;
++ } else if (signal_pending(current)) {
++ dev_warn(vpu_dev, "VPU interrupt received.\n");
++ ret = -ERESTARTSYS;
++ } else
++ irq_status = 0;
++ break;
++ }
++ case VPU_IOC_IRAM_SETTING:
++ {
++ ret = copy_to_user((void __user *)arg, &iram,
++ sizeof(struct iram_setting));
++ if (ret)
++ ret = -EFAULT;
++
++ break;
++ }
++ case VPU_IOC_CLKGATE_SETTING:
++ {
++ u32 clkgate_en;
++
++ if (get_user(clkgate_en, (u32 __user *) arg))
++ return -EFAULT;
++
++ if (clkgate_en) {
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ atomic_inc(&clk_cnt_from_ioc);
++ } else {
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ atomic_dec(&clk_cnt_from_ioc);
++ }
++
++ break;
++ }
++ case VPU_IOC_GET_SHARE_MEM:
++ {
++ mutex_lock(&vpu_data.lock);
++ if (share_mem.cpu_addr != 0) {
++ ret = copy_to_user((void __user *)arg,
++ &share_mem,
++ sizeof(struct vpu_mem_desc));
++ mutex_unlock(&vpu_data.lock);
++ break;
++ } else {
++ if (copy_from_user(&share_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc))) {
++ mutex_unlock(&vpu_data.lock);
++ return -EFAULT;
++ }
++ if (vpu_alloc_dma_buffer(&share_mem) == -1)
++ ret = -EFAULT;
++ else {
++ if (copy_to_user((void __user *)arg,
++ &share_mem,
++ sizeof(struct
++ vpu_mem_desc)))
++ ret = -EFAULT;
++ }
++ }
++ mutex_unlock(&vpu_data.lock);
++ break;
++ }
++ case VPU_IOC_REQ_VSHARE_MEM:
++ {
++ mutex_lock(&vpu_data.lock);
++ if (vshare_mem.cpu_addr != 0) {
++ ret = copy_to_user((void __user *)arg,
++ &vshare_mem,
++ sizeof(struct vpu_mem_desc));
++ mutex_unlock(&vpu_data.lock);
++ break;
++ } else {
++ if (copy_from_user(&vshare_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct
++ vpu_mem_desc))) {
++ mutex_unlock(&vpu_data.lock);
++ return -EFAULT;
++ }
++ /* vmalloc shared memory if not allocated */
++ if (!vshare_mem.cpu_addr)
++ vshare_mem.cpu_addr =
++ (unsigned long)
++ vmalloc_user(vshare_mem.size);
++ if (copy_to_user
++ ((void __user *)arg, &vshare_mem,
++ sizeof(struct vpu_mem_desc)))
++ ret = -EFAULT;
++ }
++ mutex_unlock(&vpu_data.lock);
++ break;
++ }
++ case VPU_IOC_GET_WORK_ADDR:
++ {
++ if (bitwork_mem.cpu_addr != 0) {
++ ret =
++ copy_to_user((void __user *)arg,
++ &bitwork_mem,
++ sizeof(struct vpu_mem_desc));
++ break;
++ } else {
++ if (copy_from_user(&bitwork_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc)))
++ return -EFAULT;
++
++ if (vpu_alloc_dma_buffer(&bitwork_mem) == -1)
++ ret = -EFAULT;
++ else if (copy_to_user((void __user *)arg,
++ &bitwork_mem,
++ sizeof(struct
++ vpu_mem_desc)))
++ ret = -EFAULT;
++ }
++ break;
++ }
++ /*
++ * The following two ioctl is used when user allocates working buffer
++ * and register it to vpu driver.
++ */
++ case VPU_IOC_QUERY_BITWORK_MEM:
++ {
++ if (copy_to_user((void __user *)arg,
++ &bitwork_mem,
++ sizeof(struct vpu_mem_desc)))
++ ret = -EFAULT;
++ break;
++ }
++ case VPU_IOC_SET_BITWORK_MEM:
++ {
++ if (copy_from_user(&bitwork_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc)))
++ ret = -EFAULT;
++ break;
++ }
++ case VPU_IOC_SYS_SW_RESET:
++ {
++ vpu_reset();
++ break;
++ }
++ case VPU_IOC_REG_DUMP:
++ break;
++ case VPU_IOC_PHYMEM_DUMP:
++ break;
++ case VPU_IOC_PHYMEM_CHECK:
++ {
++ struct vpu_mem_desc check_memory;
++ ret = copy_from_user(&check_memory,
++ (void __user *)arg,
++ sizeof(struct vpu_mem_desc));
++ if (ret != 0) {
++ dev_err(vpu_dev, "copy from user failure:%d\n", ret);
++ ret = -EFAULT;
++ break;
++ }
++ ret = vpu_is_valid_phy_memory((u32)check_memory.phy_addr);
++
++ dev_dbg(vpu_dev, "vpu: memory phy:0x%x %s phy memory\n",
++ check_memory.phy_addr, (ret ? "is" : "isn't"));
++ /* borrow .size to pass back the result. */
++ check_memory.size = ret;
++ ret = copy_to_user((void __user *)arg, &check_memory,
++ sizeof(struct vpu_mem_desc));
++ if (ret) {
++ ret = -EFAULT;
++ break;
++ }
++ break;
++ }
++ case VPU_IOC_LOCK_DEV:
++ {
++ u32 lock_en;
++
++ if (get_user(lock_en, (u32 __user *) arg))
++ return -EFAULT;
++
++ if (lock_en)
++ mutex_lock(&vpu_data.lock);
++ else
++ mutex_unlock(&vpu_data.lock);
++
++ break;
++ }
++ default:
++ {
++ dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
++ ret = -EINVAL;
++ break;
++ }
++ }
++ return ret;
++}
++
++/*!
++ * @brief Release function for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_release(struct inode *inode, struct file *filp)
++{
++ int i;
++ unsigned long timeout;
++
++ mutex_lock(&vpu_data.lock);
++
++ if (open_count > 0 && !(--open_count)) {
++
++ /* Wait for vpu go to idle state */
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ if (READ_REG(BIT_CUR_PC)) {
++
++ timeout = jiffies + HZ;
++ while (READ_REG(BIT_BUSY_FLAG)) {
++ msleep(1);
++ if (time_after(jiffies, timeout)) {
++ dev_warn(vpu_dev, "VPU timeout during release\n");
++ break;
++ }
++ }
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++
++ /* Clean up interrupt */
++ cancel_work_sync(&vpu_data.work);
++ flush_workqueue(vpu_data.workqueue);
++ irq_status = 0;
++
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ if (READ_REG(BIT_BUSY_FLAG)) {
++
++ if (cpu_is_mx51() || cpu_is_mx53()) {
++ dev_err(vpu_dev,
++ "fatal error: can't gate/power off when VPU is busy\n");
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ mutex_unlock(&vpu_data.lock);
++ return -EFAULT;
++ }
++
++#ifdef CONFIG_SOC_IMX6Q
++ if (cpu_is_mx6dl() || cpu_is_mx6q()) {
++ WRITE_REG(0x11, 0x10F0);
++ timeout = jiffies + HZ;
++ while (READ_REG(0x10F4) != 0x77) {
++ msleep(1);
++ if (time_after(jiffies, timeout))
++ break;
++ }
++
++ if (READ_REG(0x10F4) != 0x77) {
++ dev_err(vpu_dev,
++ "fatal error: can't gate/power off when VPU is busy\n");
++ WRITE_REG(0x0, 0x10F0);
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ mutex_unlock(&vpu_data.lock);
++ return -EFAULT;
++ } else
++ vpu_reset();
++ }
++#endif
++ }
++ }
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++
++ vpu_free_buffers();
++
++ /* Free shared memory when vpu device is idle */
++ vpu_free_dma_buffer(&share_mem);
++ share_mem.cpu_addr = 0;
++ vfree((void *)vshare_mem.cpu_addr);
++ vshare_mem.cpu_addr = 0;
++
++ vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
++ for (i = 0; i < vpu_clk_usercount; i++) {
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ atomic_dec(&clk_cnt_from_ioc);
++ }
++
++ vpu_power_up(false);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ pm_runtime_put_sync_suspend(vpu_dev);
++#endif
++
++ }
++ mutex_unlock(&vpu_data.lock);
++
++ return 0;
++}
++
++/*!
++ * @brief fasync function for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_fasync(int fd, struct file *filp, int mode)
++{
++ struct vpu_priv *dev = (struct vpu_priv *)filp->private_data;
++ return fasync_helper(fd, filp, mode, &dev->async_queue);
++}
++
++/*!
++ * @brief memory map function of harware registers for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
++{
++ unsigned long pfn;
++
++ vm->vm_flags |= VM_IO | VM_RESERVED;
++ /*
++ * Since vpu registers have been mapped with ioremap() at probe
++ * which L_PTE_XN is 1, and the same physical address must be
++ * mapped multiple times with same type, so set L_PTE_XN to 1 here.
++ * Otherwise, there may be unexpected result in video codec.
++ */
++ vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
++ pfn = phy_vpu_base_addr >> PAGE_SHIFT;
++ dev_dbg(vpu_dev, "size=0x%x, page no.=0x%x\n",
++ (int)(vm->vm_end - vm->vm_start), (int)pfn);
++ return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end - vm->vm_start,
++ vm->vm_page_prot) ? -EAGAIN : 0;
++}
++
++/*!
++ * @brief memory map function of memory for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
++{
++ int request_size;
++ request_size = vm->vm_end - vm->vm_start;
++
++ dev_dbg(vpu_dev, "start=0x%x, pgoff=0x%x, size=0x%x\n",
++ (unsigned int)(vm->vm_start), (unsigned int)(vm->vm_pgoff),
++ request_size);
++
++ vm->vm_flags |= VM_IO | VM_RESERVED;
++ vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
++
++ return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
++ request_size, vm->vm_page_prot) ? -EAGAIN : 0;
++
++}
++
++/* !
++ * @brief memory map function of vmalloced share memory
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
++{
++ int ret = -EINVAL;
++
++ ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
++ vm->vm_flags |= VM_IO;
++
++ return ret;
++}
++/*!
++ * @brief memory map interface for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
++{
++ unsigned long offset;
++
++ offset = vshare_mem.cpu_addr >> PAGE_SHIFT;
++
++ if (vm->vm_pgoff && (vm->vm_pgoff == offset))
++ return vpu_map_vshare_mem(fp, vm);
++ else if (vm->vm_pgoff)
++ return vpu_map_dma_mem(fp, vm);
++ else
++ return vpu_map_hwregs(fp, vm);
++}
++
++const struct file_operations vpu_fops = {
++ .owner = THIS_MODULE,
++ .open = vpu_open,
++ .unlocked_ioctl = vpu_ioctl,
++ .release = vpu_release,
++ .fasync = vpu_fasync,
++ .mmap = vpu_mmap,
++};
++
++/*!
++ * This function is called by the driver framework to initialize the vpu device.
++ * @param dev The device structure for the vpu passed in by the framework.
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_dev_probe(struct platform_device *pdev)
++{
++ int err = 0;
++ struct device *temp_class;
++ struct resource *res;
++ unsigned long addr = 0;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ struct device_node *np = pdev->dev.of_node;
++ u32 iramsize;
++
++ err = of_property_read_u32(np, "iramsize", (u32 *)&iramsize);
++ if (!err && iramsize)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ {
++ iram_pool = of_get_named_gen_pool(np, "iram", 0);
++ if (!iram_pool) {
++ dev_err(&pdev->dev, "iram pool not available\n");
++ return -ENOMEM;
++ }
++
++ iram_base = gen_pool_alloc(iram_pool, iramsize);
++ if (!iram_base) {
++ dev_err(&pdev->dev, "unable to alloc iram\n");
++ return -ENOMEM;
++ }
++
++ addr = gen_pool_virt_to_phys(iram_pool, iram_base);
++ }
++#else
++ iram_alloc(iramsize, &addr);
++#endif
++ if (addr == 0)
++ iram.start = iram.end = 0;
++ else {
++ iram.start = addr;
++ iram.end = addr + iramsize - 1;
++ }
++#else
++
++ vpu_plat = pdev->dev.platform_data;
++
++ if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
++ iram_alloc(vpu_plat->iram_size, &addr);
++ if (addr == 0)
++ iram.start = iram.end = 0;
++ else {
++ iram.start = addr;
++ iram.end = addr + vpu_plat->iram_size - 1;
++ }
++#endif
++
++ vpu_dev = &pdev->dev;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
++ if (!res) {
++ dev_err(vpu_dev, "vpu: unable to get vpu base addr\n");
++ return -ENODEV;
++ }
++ phy_vpu_base_addr = res->start;
++ vpu_base = ioremap(res->start, res->end - res->start);
++
++ vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
++ if (vpu_major < 0) {
++ dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
++ err = -EBUSY;
++ goto error;
++ }
++
++ vpu_class = class_create(THIS_MODULE, "mxc_vpu");
++ if (IS_ERR(vpu_class)) {
++ err = PTR_ERR(vpu_class);
++ goto err_out_chrdev;
++ }
++
++ temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
++ NULL, "mxc_vpu");
++ if (IS_ERR(temp_class)) {
++ err = PTR_ERR(temp_class);
++ goto err_out_class;
++ }
++
++ vpu_clk = clk_get(&pdev->dev, "vpu_clk");
++ if (IS_ERR(vpu_clk)) {
++ err = -ENOENT;
++ goto err_out_class;
++ }
++
++ vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
++ if (vpu_ipi_irq < 0) {
++ dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
++ err = -ENXIO;
++ goto err_out_class;
++ }
++ err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
++ (void *)(&vpu_data));
++ if (err)
++ goto err_out_class;
++ if (vpu_power_get(true)) {
++ if (!(cpu_is_mx51() || cpu_is_mx53())) {
++ dev_err(vpu_dev, "failed to get vpu power\n");
++ goto err_out_class;
++ } else {
++ /* regulator_get will return error on MX5x,
++ * just igore it everywhere*/
++ dev_warn(vpu_dev, "failed to get vpu power\n");
++ }
++ }
++
++#ifdef MXC_VPU_HAS_JPU
++ vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
++ if (vpu_jpu_irq < 0) {
++ dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
++ err = -ENXIO;
++ free_irq(vpu_ipi_irq, &vpu_data);
++ goto err_out_class;
++ }
++ err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
++ "VPU_JPG_IRQ", (void *)(&vpu_data));
++ if (err) {
++ free_irq(vpu_ipi_irq, &vpu_data);
++ goto err_out_class;
++ }
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ pm_runtime_enable(&pdev->dev);
++#endif
++
++ vpu_data.workqueue = create_workqueue("vpu_wq");
++ INIT_WORK(&vpu_data.work, vpu_worker_callback);
++ mutex_init(&vpu_data.lock);
++ dev_info(vpu_dev, "VPU initialized\n");
++ goto out;
++
++err_out_class:
++ device_destroy(vpu_class, MKDEV(vpu_major, 0));
++ class_destroy(vpu_class);
++err_out_chrdev:
++ unregister_chrdev(vpu_major, "mxc_vpu");
++error:
++ iounmap(vpu_base);
++out:
++ return err;
++}
++
++static int vpu_dev_remove(struct platform_device *pdev)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ pm_runtime_disable(&pdev->dev);
++#endif
++ free_irq(vpu_ipi_irq, &vpu_data);
++#ifdef MXC_VPU_HAS_JPU
++ free_irq(vpu_jpu_irq, &vpu_data);
++#endif
++ cancel_work_sync(&vpu_data.work);
++ flush_workqueue(vpu_data.workqueue);
++ destroy_workqueue(vpu_data.workqueue);
++
++ iounmap(vpu_base);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ if (iram.start)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
++#else
++ iram_free(iram.start, iram.end-iram.start+1);
++#endif
++#else
++ if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
++ iram_free(iram.start, vpu_plat->iram_size);
++#endif
++
++ vpu_power_get(false);
++ return 0;
++}
++
++#ifdef CONFIG_PM
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++static int vpu_suspend(struct device *dev)
++#else
++static int vpu_suspend(struct platform_device *pdev, pm_message_t state)
++#endif
++{
++ int i;
++ unsigned long timeout;
++
++ mutex_lock(&vpu_data.lock);
++ if (open_count == 0) {
++ /* VPU is released (all instances are freed),
++ * clock is already off, context is no longer needed,
++ * power is already off on MX6,
++ * gate power on MX51 */
++ if (cpu_is_mx51()) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (vpu_plat->pg)
++ vpu_plat->pg(1);
++#endif
++ }
++ } else {
++ /* Wait for vpu go to idle state, suspect vpu cannot be changed
++ to idle state after about 1 sec */
++ timeout = jiffies + HZ;
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ while (READ_REG(BIT_BUSY_FLAG)) {
++ msleep(1);
++ if (time_after(jiffies, timeout)) {
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ mutex_unlock(&vpu_data.lock);
++ return -EAGAIN;
++ }
++ }
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++
++ /* Make sure clock is disabled before suspend */
++ vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
++ for (i = 0; i < vpu_clk_usercount; i++) {
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ }
++
++ if (cpu_is_mx53()) {
++ mutex_unlock(&vpu_data.lock);
++ return 0;
++ }
++
++ if (bitwork_mem.cpu_addr != 0) {
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ /* Save 64 registers from BIT_CODE_BUF_ADDR */
++ for (i = 0; i < 64; i++)
++ regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
++ pc_before_suspend = READ_REG(BIT_CUR_PC);
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ }
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (vpu_plat->pg)
++ vpu_plat->pg(1);
++#endif
++
++ /* If VPU is working before suspend, disable
++ * regulator to make usecount right. */
++ vpu_power_up(false);
++ }
++
++ mutex_unlock(&vpu_data.lock);
++ return 0;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++static int vpu_resume(struct device *dev)
++#else
++static int vpu_resume(struct platform_device *pdev)
++#endif
++{
++ int i;
++
++ mutex_lock(&vpu_data.lock);
++ if (open_count == 0) {
++ /* VPU is released (all instances are freed),
++ * clock should be kept off, context is no longer needed,
++ * power should be kept off on MX6,
++ * disable power gating on MX51 */
++ if (cpu_is_mx51()) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (vpu_plat->pg)
++ vpu_plat->pg(0);
++#endif
++ }
++ } else {
++ if (cpu_is_mx53())
++ goto recover_clk;
++
++ /* If VPU is working before suspend, enable
++ * regulator to make usecount right. */
++ vpu_power_up(true);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (vpu_plat->pg)
++ vpu_plat->pg(0);
++#endif
++
++ if (bitwork_mem.cpu_addr != 0) {
++ u32 *p = (u32 *) bitwork_mem.cpu_addr;
++ u32 data, pc;
++ u16 data_hi;
++ u16 data_lo;
++
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++
++ pc = READ_REG(BIT_CUR_PC);
++ if (pc) {
++ dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ goto recover_clk;
++ }
++
++ /* Restore registers */
++ for (i = 0; i < 64; i++)
++ WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
++
++ WRITE_REG(0x0, BIT_RESET_CTRL);
++ WRITE_REG(0x0, BIT_CODE_RUN);
++ /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
++#ifdef CONFIG_SOC_IMX6Q
++ WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
++#endif
++
++ /*
++ * Re-load boot code, from the codebuffer in external RAM.
++ * Thankfully, we only need 4096 bytes, same for all platforms.
++ */
++ for (i = 0; i < 2048; i += 4) {
++ data = p[(i / 2) + 1];
++ data_hi = (data >> 16) & 0xFFFF;
++ data_lo = data & 0xFFFF;
++ WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
++ WRITE_REG(((i + 1) << 16) | data_lo,
++ BIT_CODE_DOWN);
++
++ data = p[i / 2];
++ data_hi = (data >> 16) & 0xFFFF;
++ data_lo = data & 0xFFFF;
++ WRITE_REG(((i + 2) << 16) | data_hi,
++ BIT_CODE_DOWN);
++ WRITE_REG(((i + 3) << 16) | data_lo,
++ BIT_CODE_DOWN);
++ }
++
++ if (pc_before_suspend) {
++ WRITE_REG(0x1, BIT_BUSY_FLAG);
++ WRITE_REG(0x1, BIT_CODE_RUN);
++ while (READ_REG(BIT_BUSY_FLAG))
++ ;
++ } else {
++ dev_warn(vpu_dev, "PC=0 before suspend\n");
++ }
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ }
++
++recover_clk:
++ /* Recover vpu clock */
++ for (i = 0; i < vpu_clk_usercount; i++) {
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ }
++ }
++
++ mutex_unlock(&vpu_data.lock);
++ return 0;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++static int vpu_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static int vpu_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static const struct dev_pm_ops vpu_pm_ops = {
++ SET_RUNTIME_PM_OPS(vpu_runtime_suspend, vpu_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(vpu_suspend, vpu_resume)
++};
++#endif
++
++#else
++#define vpu_suspend NULL
++#define vpu_resume NULL
++#endif /* !CONFIG_PM */
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++static const struct of_device_id vpu_of_match[] = {
++ { .compatible = "fsl,imx6-vpu", },
++ {/* sentinel */}
++};
++MODULE_DEVICE_TABLE(of, vpu_of_match);
++#endif
++
++/*! Driver definition
++ *
++ */
++static struct platform_driver mxcvpu_driver = {
++ .driver = {
++ .name = "mxc_vpu",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ .of_match_table = vpu_of_match,
++#ifdef CONFIG_PM
++ .pm = &vpu_pm_ops,
++#endif
++#endif
++ },
++ .probe = vpu_dev_probe,
++ .remove = vpu_dev_remove,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ .suspend = vpu_suspend,
++ .resume = vpu_resume,
++#endif
++};
++
++static int __init vpu_init(void)
++{
++ int ret = platform_driver_register(&mxcvpu_driver);
++
++ init_waitqueue_head(&vpu_queue);
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ memblock_analyze();
++ top_address_DRAM = memblock_end_of_DRAM_with_reserved();
++#endif
++
++ return ret;
++}
++
++static void __exit vpu_exit(void)
++{
++ if (vpu_major > 0) {
++ device_destroy(vpu_class, MKDEV(vpu_major, 0));
++ class_destroy(vpu_class);
++ unregister_chrdev(vpu_major, "mxc_vpu");
++ vpu_major = 0;
++ }
++
++ vpu_free_dma_buffer(&bitwork_mem);
++ vpu_free_dma_buffer(&pic_para_mem);
++ vpu_free_dma_buffer(&user_data_mem);
++
++ /* reset VPU state */
++ vpu_power_up(true);
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ vpu_reset();
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ vpu_power_up(false);
++
++ clk_put(vpu_clk);
++
++ platform_driver_unregister(&mxcvpu_driver);
++ return;
++}
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
++MODULE_LICENSE("GPL");
++
++module_init(vpu_init);
++module_exit(vpu_exit);
+diff -Nur linux-3.14.40.orig/drivers/net/bonding/bonding.h linux-3.14.40/drivers/net/bonding/bonding.h
+--- linux-3.14.40.orig/drivers/net/bonding/bonding.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/bonding/bonding.h 2015-05-01 14:57:59.631427001 -0500
+@@ -188,7 +188,8 @@
+ struct net_device *dev; /* first - useful for panic debug */
+ struct bonding *bond; /* our master */
+ int delay;
+- unsigned long jiffies;
++ /* all three in jiffies */
++ unsigned long last_link_up;
+ unsigned long last_arp_rx;
+ unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
+ s8 link; /* one of BOND_LINK_XXXX */
+diff -Nur linux-3.14.40.orig/drivers/net/bonding/bond_main.c linux-3.14.40/drivers/net/bonding/bond_main.c
+--- linux-3.14.40.orig/drivers/net/bonding/bond_main.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/bonding/bond_main.c 2015-05-01 14:57:59.647427001 -0500
+@@ -798,7 +798,7 @@
+ return;
+
+ if (new_active) {
+- new_active->jiffies = jiffies;
++ new_active->last_link_up = jiffies;
+
+ if (new_active->link == BOND_LINK_BACK) {
+ if (USES_PRIMARY(bond->params.mode)) {
+@@ -1457,7 +1457,7 @@
+ }
+
+ if (new_slave->link != BOND_LINK_DOWN)
+- new_slave->jiffies = jiffies;
++ new_slave->last_link_up = jiffies;
+ pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
+ new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+ (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+@@ -1908,7 +1908,7 @@
+ * recovered before downdelay expired
+ */
+ slave->link = BOND_LINK_UP;
+- slave->jiffies = jiffies;
++ slave->last_link_up = jiffies;
+ pr_info("%s: link status up again after %d ms for interface %s.\n",
+ bond->dev->name,
+ (bond->params.downdelay - slave->delay) *
+@@ -1983,7 +1983,7 @@
+
+ case BOND_LINK_UP:
+ slave->link = BOND_LINK_UP;
+- slave->jiffies = jiffies;
++ slave->last_link_up = jiffies;
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ /* prevent it from being the active one */
+@@ -2268,6 +2268,7 @@
+ struct slave *slave)
+ {
+ struct arphdr *arp = (struct arphdr *)skb->data;
++ struct slave *curr_active_slave;
+ unsigned char *arp_ptr;
+ __be32 sip, tip;
+ int alen;
+@@ -2312,6 +2313,8 @@
+ bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ &sip, &tip);
+
++ curr_active_slave = rcu_dereference(bond->curr_active_slave);
++
+ /*
+ * Backup slaves won't see the ARP reply, but do come through
+ * here for each ARP probe (so we swap the sip/tip to validate
+@@ -2325,11 +2328,12 @@
+ * is done to avoid endless looping when we can't reach the
+ * arp_ip_target and fool ourselves with our own arp requests.
+ */
++
+ if (bond_is_active_slave(slave))
+ bond_validate_arp(bond, slave, sip, tip);
+- else if (bond->curr_active_slave &&
+- time_after(slave_last_rx(bond, bond->curr_active_slave),
+- bond->curr_active_slave->jiffies))
++ else if (curr_active_slave &&
++ time_after(slave_last_rx(bond, curr_active_slave),
++ curr_active_slave->last_link_up))
+ bond_validate_arp(bond, slave, tip, sip);
+
+ out_unlock:
+@@ -2376,9 +2380,9 @@
+ oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
+ /* see if any of the previous devices are up now (i.e. they have
+ * xmt and rcv traffic). the curr_active_slave does not come into
+- * the picture unless it is null. also, slave->jiffies is not needed
+- * here because we send an arp on each slave and give a slave as
+- * long as it needs to get the tx/rx within the delta.
++ * the picture unless it is null. also, slave->last_link_up is not
++ * needed here because we send an arp on each slave and give a slave
++ * as long as it needs to get the tx/rx within the delta.
+ * TODO: what about up/down delay in arp mode? it wasn't here before
+ * so it can wait
+ */
+@@ -2505,7 +2509,7 @@
+ * active. This avoids bouncing, as the last receive
+ * times need a full ARP monitor cycle to be updated.
+ */
+- if (bond_time_in_interval(bond, slave->jiffies, 2))
++ if (bond_time_in_interval(bond, slave->last_link_up, 2))
+ continue;
+
+ /*
+@@ -2698,7 +2702,7 @@
+ new_slave->link = BOND_LINK_BACK;
+ bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
+ bond_arp_send_all(bond, new_slave);
+- new_slave->jiffies = jiffies;
++ new_slave->last_link_up = jiffies;
+ rcu_assign_pointer(bond->current_arp_slave, new_slave);
+
+ check_state:
+diff -Nur linux-3.14.40.orig/drivers/net/can/flexcan.c linux-3.14.40/drivers/net/can/flexcan.c
+--- linux-3.14.40.orig/drivers/net/can/flexcan.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/can/flexcan.c 2015-05-01 14:57:59.663427001 -0500
+@@ -125,7 +125,8 @@
+ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
+
+ /* FLEXCAN interrupt flag register (IFLAG) bits */
+-#define FLEXCAN_TX_BUF_ID 8
++#define FLEXCAN_RESERVED_BUF_ID 8
++#define FLEXCAN_TX_BUF_ID 13
+ #define FLEXCAN_IFLAG_BUF(x) BIT(x)
+ #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
+ #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
+@@ -162,6 +163,7 @@
+ */
+ #define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */
+ #define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* [TR]WRN_INT not connected */
++#define FLEXCAN_HAS_ERR005829 BIT(3) /* have errata ERR005829 */
+
+ /* Structure of the message buffer */
+ struct flexcan_mb {
+@@ -221,7 +223,7 @@
+ };
+ static struct flexcan_devtype_data fsl_imx28_devtype_data;
+ static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
+- .features = FLEXCAN_HAS_V10_FEATURES,
++ .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_ERR005829,
+ };
+
+ static const struct can_bittiming_const flexcan_bittiming_const = {
+@@ -428,6 +430,11 @@
+ flexcan_write(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
+ flexcan_write(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+
++ if (priv->devtype_data->features & FLEXCAN_HAS_ERR005829) {
++ writel(0x0, &regs->cantxfg[FLEXCAN_RESERVED_BUF_ID].can_ctrl);
++ writel(0x0, &regs->cantxfg[FLEXCAN_RESERVED_BUF_ID].can_ctrl);
++ }
++
+ return NETDEV_TX_OK;
+ }
+
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/adi/bfin_mac.c linux-3.14.40/drivers/net/ethernet/adi/bfin_mac.c
+--- linux-3.14.40.orig/drivers/net/ethernet/adi/bfin_mac.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/adi/bfin_mac.c 2015-05-01 14:57:59.671427001 -0500
+@@ -1040,6 +1040,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = bfin_ptp_adjfreq,
+ .adjtime = bfin_ptp_adjtime,
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/broadcom/tg3.c linux-3.14.40/drivers/net/ethernet/broadcom/tg3.c
+--- linux-3.14.40.orig/drivers/net/ethernet/broadcom/tg3.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/broadcom/tg3.c 2015-05-01 14:57:59.715427001 -0500
+@@ -6322,6 +6322,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 1,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = tg3_ptp_adjfreq,
+ .adjtime = tg3_ptp_adjtime,
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/cadence/macb.c linux-3.14.40/drivers/net/ethernet/cadence/macb.c
+--- linux-3.14.40.orig/drivers/net/ethernet/cadence/macb.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/cadence/macb.c 2015-05-01 14:57:59.719427001 -0500
+@@ -604,25 +604,16 @@
+ {
+ unsigned int entry;
+ struct sk_buff *skb;
+- struct macb_dma_desc *desc;
+ dma_addr_t paddr;
+
+ while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
+- u32 addr, ctrl;
+-
+ entry = macb_rx_ring_wrap(bp->rx_prepared_head);
+- desc = &bp->rx_ring[entry];
+
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+- addr = desc->addr;
+- ctrl = desc->ctrl;
+ bp->rx_prepared_head++;
+
+- if ((addr & MACB_BIT(RX_USED)))
+- continue;
+-
+ if (bp->rx_skbuff[entry] == NULL) {
+ /* allocate sk_buff for this free entry in ring */
+ skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
+@@ -703,7 +694,6 @@
+ if (!(addr & MACB_BIT(RX_USED)))
+ break;
+
+- desc->addr &= ~MACB_BIT(RX_USED);
+ bp->rx_tail++;
+ count++;
+
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/chelsio/cxgb4vf/sge.c linux-3.14.40/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+--- linux-3.14.40.orig/drivers/net/ethernet/chelsio/cxgb4vf/sge.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/chelsio/cxgb4vf/sge.c 2015-05-01 14:57:59.727427001 -0500
+@@ -1510,7 +1510,8 @@
+ {
+ struct sk_buff *skb;
+ const struct cpl_rx_pkt *pkt = (void *)rsp;
+- bool csum_ok = pkt->csum_calc && !pkt->err_vec;
++ bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
++ (rspq->netdev->features & NETIF_F_RXCSUM);
+ struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+
+ /*
+@@ -1538,8 +1539,8 @@
+ skb_record_rx_queue(skb, rspq->idx);
+ rxq->stats.pkts++;
+
+- if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
+- !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
++ if (csum_ok && !pkt->err_vec &&
++ (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ if (!pkt->ip_frag)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else {
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/fec.h linux-3.14.40/drivers/net/ethernet/freescale/fec.h
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/fec.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/fec.h 2015-05-01 14:57:59.731427001 -0500
+@@ -221,7 +221,7 @@
+ #define BD_ENET_TX_RCMASK ((ushort)0x003c)
+ #define BD_ENET_TX_UN ((ushort)0x0002)
+ #define BD_ENET_TX_CSL ((ushort)0x0001)
+-#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
++#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
+
+ /*enhanced buffer descriptor control/status used by Ethernet transmit*/
+ #define BD_ENET_TX_INT 0x40000000
+@@ -246,8 +246,8 @@
+ #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+ #define FEC_ENET_TX_FRSIZE 2048
+ #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
+-#define TX_RING_SIZE 16 /* Must be power of two */
+-#define TX_RING_MOD_MASK 15 /* for this to work */
++#define TX_RING_SIZE 512 /* Must be power of two */
++#define TX_RING_MOD_MASK 511 /* for this to work */
+
+ #define BD_ENET_RX_INT 0x00800000
+ #define BD_ENET_RX_PTP ((ushort)0x0400)
+@@ -256,12 +256,6 @@
+ #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+ #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+
+-struct fec_enet_delayed_work {
+- struct delayed_work delay_work;
+- bool timeout;
+- bool trig_tx;
+-};
+-
+ /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors. The
+ * cur_rx and cur_tx point to the currently available buffer.
+@@ -296,12 +290,18 @@
+ /* The ring entries to be free()ed */
+ struct bufdesc *dirty_tx;
+
++ unsigned short bufdesc_size;
+ unsigned short tx_ring_size;
+ unsigned short rx_ring_size;
++ unsigned short tx_stop_threshold;
++ unsigned short tx_wake_threshold;
++
++ /* Software TSO */
++ char *tso_hdrs;
++ dma_addr_t tso_hdrs_dma;
+
+ struct platform_device *pdev;
+
+- int opened;
+ int dev_id;
+
+ /* Phylib and MDIO interface */
+@@ -321,6 +321,8 @@
+ struct napi_struct napi;
+ int csum_flags;
+
++ struct work_struct tx_timeout_work;
++
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ unsigned long last_overflow_check;
+@@ -333,7 +335,6 @@
+ int hwts_rx_en;
+ int hwts_tx_en;
+ struct timer_list time_keep;
+- struct fec_enet_delayed_work delay_work;
+ struct regulator *reg_phy;
+ };
+
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/fec_main.c linux-3.14.40/drivers/net/ethernet/freescale/fec_main.c
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/fec_main.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/fec_main.c 2015-05-01 14:57:59.739427001 -0500
+@@ -36,6 +36,7 @@
+ #include <linux/in.h>
+ #include <linux/ip.h>
+ #include <net/ip.h>
++#include <net/tso.h>
+ #include <linux/tcp.h>
+ #include <linux/udp.h>
+ #include <linux/icmp.h>
+@@ -54,6 +55,10 @@
+ #include <linux/of_net.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/if_vlan.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/busfreq-imx6.h>
++#include <linux/pm_runtime.h>
++#include <linux/pm_qos.h>
+
+ #include <asm/cacheflush.h>
+
+@@ -91,6 +96,8 @@
+ #define FEC_QUIRK_HAS_CSUM (1 << 5)
+ /* Controller has hardware vlan support */
+ #define FEC_QUIRK_HAS_VLAN (1 << 6)
++/* Controller is FEC-MAC */
++#define FEC_QUIRK_FEC_MAC (1 << 7)
+ /* ENET IP errata ERR006358
+ *
+ * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
+@@ -100,7 +107,13 @@
+ * frames not being transmitted until there is a 0-to-1 transition on
+ * ENET_TDAR[TDAR].
+ */
+-#define FEC_QUIRK_ERR006358 (1 << 7)
++#define FEC_QUIRK_ERR006358 (1 << 8)
++/*
++ * i.MX6Q/DL ENET cannot wake up system in wait mode because ENET tx & rx
++ * interrupt signal don't connect to GPC. So use pm qos to avoid cpu enter
++ * to wait mode.
++ */
++#define FEC_QUIRK_BUG_WAITMODE (1 << 9)
+
+ static struct platform_device_id fec_devtype[] = {
+ {
+@@ -109,7 +122,7 @@
+ .driver_data = 0,
+ }, {
+ .name = "imx25-fec",
+- .driver_data = FEC_QUIRK_USE_GASKET,
++ .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_FEC_MAC,
+ }, {
+ .name = "imx27-fec",
+ .driver_data = 0,
+@@ -120,7 +133,8 @@
+ .name = "imx6q-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
++ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
++ FEC_QUIRK_BUG_WAITMODE,
+ }, {
+ .name = "mvf600-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC,
+@@ -172,10 +186,6 @@
+ #endif
+ #endif /* CONFIG_M5272 */
+
+-#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
+-#error "FEC: descriptor ring size constants too large"
+-#endif
+-
+ /* Interrupt events/masks. */
+ #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
+ #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
+@@ -231,6 +241,15 @@
+ #define FEC_PAUSE_FLAG_AUTONEG 0x1
+ #define FEC_PAUSE_FLAG_ENABLE 0x2
+
++#define TSO_HEADER_SIZE 128
++/* Max number of allowed TCP segments for software TSO */
++#define FEC_MAX_TSO_SEGS 100
++#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
++
++#define IS_TSO_HEADER(txq, addr) \
++ ((addr >= txq->tso_hdrs_dma) && \
++ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
++
+ static int mii_cnt;
+
+ static inline
+@@ -286,6 +305,22 @@
+ return (new_bd < base) ? (new_bd + ring_size) : new_bd;
+ }
+
++static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
++ struct fec_enet_private *fep)
++{
++ return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
++}
++
++static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
++{
++ int entries;
++
++ entries = ((const char *)fep->dirty_tx -
++ (const char *)fep->cur_tx) / fep->bufdesc_size - 1;
++
++ return entries > 0 ? entries : entries + fep->tx_ring_size;
++}
++
+ static void *swap_buffer(void *bufaddr, int len)
+ {
+ int i;
+@@ -297,6 +332,32 @@
+ return bufaddr;
+ }
+
++static void fec_dump(struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ struct bufdesc *bdp = fep->tx_bd_base;
++ unsigned int index = 0;
++
++ netdev_info(ndev, "TX ring dump\n");
++ pr_info("Nr SC addr len SKB\n");
++
++ do {
++ pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
++ index,
++ bdp == fep->cur_tx ? 'S' : ' ',
++ bdp == fep->dirty_tx ? 'H' : ' ',
++ bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
++ fep->tx_skbuff[index]);
++ bdp = fec_enet_get_nextdesc(bdp, fep);
++ index++;
++ } while (bdp != fep->tx_bd_base);
++}
++
++static inline bool is_ipv4_pkt(struct sk_buff *skb)
++{
++ return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
++}
++
+ static int
+ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
+ {
+@@ -307,137 +368,419 @@
+ if (unlikely(skb_cow_head(skb, 0)))
+ return -1;
+
++ if (is_ipv4_pkt(skb))
++ ip_hdr(skb)->check = 0;
+ *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
+
+ return 0;
+ }
+
+-static netdev_tx_t
+-fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static int
++fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+- struct bufdesc *bdp, *bdp_pre;
+- void *bufaddr;
+- unsigned short status;
++ struct bufdesc *bdp = fep->cur_tx;
++ struct bufdesc_ex *ebdp;
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ int frag, frag_len;
++ unsigned short status;
++ unsigned int estatus = 0;
++ skb_frag_t *this_frag;
+ unsigned int index;
++ void *bufaddr;
++ dma_addr_t addr;
++ int i;
+
+- /* Fill in a Tx ring entry */
++ for (frag = 0; frag < nr_frags; frag++) {
++ this_frag = &skb_shinfo(skb)->frags[frag];
++ bdp = fec_enet_get_nextdesc(bdp, fep);
++ ebdp = (struct bufdesc_ex *)bdp;
++
++ status = bdp->cbd_sc;
++ status &= ~BD_ENET_TX_STATS;
++ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
++ frag_len = skb_shinfo(skb)->frags[frag].size;
++
++ /* Handle the last BD specially */
++ if (frag == nr_frags - 1) {
++ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
++ if (fep->bufdesc_ex) {
++ estatus |= BD_ENET_TX_INT;
++ if (unlikely(skb_shinfo(skb)->tx_flags &
++ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
++ estatus |= BD_ENET_TX_TS;
++ }
++ }
++
++ if (fep->bufdesc_ex) {
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
++ ebdp->cbd_bdu = 0;
++ ebdp->cbd_esc = estatus;
++ }
++
++ bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
++
++ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
++ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
++ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
++ memcpy(fep->tx_bounce[index], bufaddr, frag_len);
++ bufaddr = fep->tx_bounce[index];
++
++ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
++ swap_buffer(bufaddr, frag_len);
++ }
++
++ addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(&fep->pdev->dev, addr)) {
++ dev_kfree_skb_any(skb);
++ if (net_ratelimit())
++ netdev_err(ndev, "Tx DMA memory map failed\n");
++ goto dma_mapping_error;
++ }
++
++ bdp->cbd_bufaddr = addr;
++ bdp->cbd_datlen = frag_len;
++ bdp->cbd_sc = status;
++ }
++
++ fep->cur_tx = bdp;
++
++ return 0;
++
++dma_mapping_error:
+ bdp = fep->cur_tx;
++ for (i = 0; i < frag; i++) {
++ bdp = fec_enet_get_nextdesc(bdp, fep);
++ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
++ bdp->cbd_datlen, DMA_TO_DEVICE);
++ }
++ return NETDEV_TX_OK;
++}
+
+- status = bdp->cbd_sc;
++static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ const struct platform_device_id *id_entry =
++ platform_get_device_id(fep->pdev);
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ struct bufdesc *bdp, *last_bdp;
++ void *bufaddr;
++ dma_addr_t addr;
++ unsigned short status;
++ unsigned short buflen;
++ unsigned int estatus = 0;
++ unsigned int index;
++ int entries_free;
++ int ret;
+
+- if (status & BD_ENET_TX_READY) {
+- /* Ooops. All transmit buffers are full. Bail out.
+- * This should not happen, since ndev->tbusy should be set.
+- */
+- netdev_err(ndev, "tx queue full!\n");
+- return NETDEV_TX_BUSY;
++ entries_free = fec_enet_get_free_txdesc_num(fep);
++ if (entries_free < MAX_SKB_FRAGS + 1) {
++ dev_kfree_skb_any(skb);
++ if (net_ratelimit())
++ netdev_err(ndev, "NOT enough BD for SG!\n");
++ return NETDEV_TX_OK;
+ }
+
+ /* Protocol checksum off-load for TCP and UDP. */
+ if (fec_enet_clear_csum(skb, ndev)) {
+- kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+- /* Clear all of the status flags */
++ /* Fill in a Tx ring entry */
++ bdp = fep->cur_tx;
++ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+
+ /* Set buffer length and buffer pointer */
+ bufaddr = skb->data;
+- bdp->cbd_datlen = skb->len;
++ buflen = skb_headlen(skb);
+
+- /*
+- * On some FEC implementations data must be aligned on
+- * 4-byte boundaries. Use bounce buffers to copy data
+- * and get it aligned. Ugh.
+- */
+- if (fep->bufdesc_ex)
+- index = (struct bufdesc_ex *)bdp -
+- (struct bufdesc_ex *)fep->tx_bd_base;
+- else
+- index = bdp - fep->tx_bd_base;
+-
+- if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
+- memcpy(fep->tx_bounce[index], skb->data, skb->len);
++ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
++ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
++ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
++ memcpy(fep->tx_bounce[index], skb->data, buflen);
+ bufaddr = fep->tx_bounce[index];
+- }
+-
+- /*
+- * Some design made an incorrect assumption on endian mode of
+- * the system that it's running on. As the result, driver has to
+- * swap every frame going to and coming from the controller.
+- */
+- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+- swap_buffer(bufaddr, skb->len);
+
+- /* Save skb pointer */
+- fep->tx_skbuff[index] = skb;
++ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
++ swap_buffer(bufaddr, buflen);
++ }
+
+- /* Push the data cache so the CPM does not get stale memory
+- * data.
+- */
+- bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
+- skb->len, DMA_TO_DEVICE);
+- if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+- bdp->cbd_bufaddr = 0;
+- fep->tx_skbuff[index] = NULL;
++ /* Push the data cache so the CPM does not get stale memory data. */
++ addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
++ if (dma_mapping_error(&fep->pdev->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_OK;
+ }
+
++ if (nr_frags) {
++ ret = fec_enet_txq_submit_frag_skb(skb, ndev);
++ if (ret)
++ return ret;
++ } else {
++ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
++ if (fep->bufdesc_ex) {
++ estatus = BD_ENET_TX_INT;
++ if (unlikely(skb_shinfo(skb)->tx_flags &
++ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
++ estatus |= BD_ENET_TX_TS;
++ }
++ }
++
+ if (fep->bufdesc_ex) {
+
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+- ebdp->cbd_bdu = 0;
++
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+- fep->hwts_tx_en)) {
+- ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
++ fep->hwts_tx_en))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+- } else {
+- ebdp->cbd_esc = BD_ENET_TX_INT;
+
+- /* Enable protocol checksum flags
+- * We do not bother with the IP Checksum bits as they
+- * are done by the kernel
+- */
+- if (skb->ip_summed == CHECKSUM_PARTIAL)
+- ebdp->cbd_esc |= BD_ENET_TX_PINS;
+- }
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
++
++ ebdp->cbd_bdu = 0;
++ ebdp->cbd_esc = estatus;
+ }
+
++ last_bdp = fep->cur_tx;
++ index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
++ /* Save skb pointer */
++ fep->tx_skbuff[index] = skb;
++
++ bdp->cbd_datlen = buflen;
++ bdp->cbd_bufaddr = addr;
++
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+- status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+- | BD_ENET_TX_LAST | BD_ENET_TX_TC);
++ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
+ bdp->cbd_sc = status;
+
+- bdp_pre = fec_enet_get_prevdesc(bdp, fep);
+- if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
+- !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
+- fep->delay_work.trig_tx = true;
+- schedule_delayed_work(&(fep->delay_work.delay_work),
+- msecs_to_jiffies(1));
+- }
+-
+ /* If this was the last BD in the ring, start at the beginning again. */
+- bdp = fec_enet_get_nextdesc(bdp, fep);
++ bdp = fec_enet_get_nextdesc(last_bdp, fep);
+
+ skb_tx_timestamp(skb);
+
+ fep->cur_tx = bdp;
+
+- if (fep->cur_tx == fep->dirty_tx)
+- netif_stop_queue(ndev);
++ /* Trigger transmission start */
++ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
++
++ return 0;
++}
++
++static int
++fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
++ struct bufdesc *bdp, int index, char *data,
++ int size, bool last_tcp, bool is_last)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ const struct platform_device_id *id_entry =
++ platform_get_device_id(fep->pdev);
++ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
++ unsigned short status;
++ unsigned int estatus = 0;
++ dma_addr_t addr;
++
++ status = bdp->cbd_sc;
++ status &= ~BD_ENET_TX_STATS;
++
++ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
++
++ if (((unsigned long) data) & FEC_ALIGNMENT ||
++ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
++ memcpy(fep->tx_bounce[index], data, size);
++ data = fep->tx_bounce[index];
++
++ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
++ swap_buffer(data, size);
++ }
++
++ addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
++ if (dma_mapping_error(&fep->pdev->dev, addr)) {
++ dev_kfree_skb_any(skb);
++ if (net_ratelimit())
++ netdev_err(ndev, "Tx DMA memory map failed\n");
++ return NETDEV_TX_BUSY;
++ }
++
++ bdp->cbd_datlen = size;
++ bdp->cbd_bufaddr = addr;
++
++ if (fep->bufdesc_ex) {
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
++ ebdp->cbd_bdu = 0;
++ ebdp->cbd_esc = estatus;
++ }
++
++ /* Handle the last BD specially */
++ if (last_tcp)
++ status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
++ if (is_last) {
++ status |= BD_ENET_TX_INTR;
++ if (fep->bufdesc_ex)
++ ebdp->cbd_esc |= BD_ENET_TX_INT;
++ }
++
++ bdp->cbd_sc = status;
++
++ return 0;
++}
++
++static int
++fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
++ struct bufdesc *bdp, int index)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ const struct platform_device_id *id_entry =
++ platform_get_device_id(fep->pdev);
++ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
++ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
++ void *bufaddr;
++ unsigned long dmabuf;
++ unsigned short status;
++ unsigned int estatus = 0;
++
++ status = bdp->cbd_sc;
++ status &= ~BD_ENET_TX_STATS;
++ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
++
++ bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
++ dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
++ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
++ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
++ memcpy(fep->tx_bounce[index], skb->data, hdr_len);
++ bufaddr = fep->tx_bounce[index];
++
++ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
++ swap_buffer(bufaddr, hdr_len);
++
++ dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
++ hdr_len, DMA_TO_DEVICE);
++ if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
++ dev_kfree_skb_any(skb);
++ if (net_ratelimit())
++ netdev_err(ndev, "Tx DMA memory map failed\n");
++ return NETDEV_TX_BUSY;
++ }
++ }
++
++ bdp->cbd_bufaddr = dmabuf;
++ bdp->cbd_datlen = hdr_len;
++
++ if (fep->bufdesc_ex) {
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
++ ebdp->cbd_bdu = 0;
++ ebdp->cbd_esc = estatus;
++ }
++
++ bdp->cbd_sc = status;
++
++ return 0;
++}
++
++static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
++ int total_len, data_left;
++ struct bufdesc *bdp = fep->cur_tx;
++ struct tso_t tso;
++ unsigned int index = 0;
++ int ret;
++
++ if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
++ dev_kfree_skb_any(skb);
++ if (net_ratelimit())
++ netdev_err(ndev, "NOT enough BD for TSO!\n");
++ return NETDEV_TX_OK;
++ }
++
++ /* Protocol checksum off-load for TCP and UDP. */
++ if (fec_enet_clear_csum(skb, ndev)) {
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++ }
++
++ /* Initialize the TSO handler, and prepare the first payload */
++ tso_start(skb, &tso);
++
++ total_len = skb->len - hdr_len;
++ while (total_len > 0) {
++ char *hdr;
++
++ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
++ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
++ total_len -= data_left;
++
++ /* prepare packet headers: MAC + IP + TCP */
++ hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
++ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
++ ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
++ if (ret)
++ goto err_release;
++
++ while (data_left > 0) {
++ int size;
++
++ size = min_t(int, tso.size, data_left);
++ bdp = fec_enet_get_nextdesc(bdp, fep);
++ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
++ ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
++ size, size == data_left,
++ total_len == 0);
++ if (ret)
++ goto err_release;
++
++ data_left -= size;
++ tso_build_data(skb, &tso, size);
++ }
++
++ bdp = fec_enet_get_nextdesc(bdp, fep);
++ }
++
++ /* Save skb pointer */
++ fep->tx_skbuff[index] = skb;
++
++ skb_tx_timestamp(skb);
++ fep->cur_tx = bdp;
+
+ /* Trigger transmission start */
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+
++ return 0;
++
++err_release:
++ /* TODO: Release all used data descriptors for TSO */
++ return ret;
++}
++
++static netdev_tx_t
++fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ int entries_free;
++ int ret;
++
++ if (skb_is_gso(skb))
++ ret = fec_enet_txq_submit_tso(skb, ndev);
++ else
++ ret = fec_enet_txq_submit_skb(skb, ndev);
++ if (ret)
++ return ret;
++
++ entries_free = fec_enet_get_free_txdesc_num(fep);
++ if (entries_free <= fep->tx_stop_threshold)
++ netif_stop_queue(ndev);
++
+ return NETDEV_TX_OK;
+ }
+
+@@ -474,7 +817,7 @@
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+- if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
++ if (fep->tx_skbuff[i]) {
+ dev_kfree_skb_any(fep->tx_skbuff[i]);
+ fep->tx_skbuff[i] = NULL;
+ }
+@@ -488,12 +831,13 @@
+ fep->dirty_tx = bdp;
+ }
+
+-/* This function is called to start or restart the FEC during a link
+- * change. This only happens when switching between half and full
+- * duplex.
++/*
++ * This function is called to start or restart the FEC during a link
++ * change, transmit timeout, or to reconfigure the FEC. The network
++ * packet processing for this device must be stopped before this call.
+ */
+ static void
+-fec_restart(struct net_device *ndev, int duplex)
++fec_restart(struct net_device *ndev)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+@@ -504,13 +848,6 @@
+ u32 rcntl = OPT_FRAME_SIZE | 0x04;
+ u32 ecntl = 0x2; /* ETHEREN */
+
+- if (netif_running(ndev)) {
+- netif_device_detach(ndev);
+- napi_disable(&fep->napi);
+- netif_stop_queue(ndev);
+- netif_tx_lock_bh(ndev);
+- }
+-
+ /* Whack a reset. We should wait for this. */
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+@@ -519,7 +856,8 @@
+ * enet-mac reset will reset mac address registers too,
+ * so need to reconfigure it.
+ */
+- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
++ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC ||
++ id_entry->driver_data & FEC_QUIRK_FEC_MAC) {
+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+ writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+ writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+@@ -551,7 +889,7 @@
+ }
+
+ /* Enable MII mode */
+- if (duplex) {
++ if (fep->full_duplex == DUPLEX_FULL) {
+ /* FD enable */
+ writel(0x04, fep->hwp + FEC_X_CNTRL);
+ } else {
+@@ -560,8 +898,6 @@
+ writel(0x0, fep->hwp + FEC_X_CNTRL);
+ }
+
+- fep->full_duplex = duplex;
+-
+ /* Set MII speed */
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+@@ -679,13 +1015,6 @@
+
+ /* Enable interrupts we wish to service */
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+-
+- if (netif_running(ndev)) {
+- netif_tx_unlock_bh(ndev);
+- netif_wake_queue(ndev);
+- napi_enable(&fep->napi);
+- netif_device_attach(ndev);
+- }
+ }
+
+ static void
+@@ -723,29 +1052,44 @@
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
++ fec_dump(ndev);
++
+ ndev->stats.tx_errors++;
+
+- fep->delay_work.timeout = true;
+- schedule_delayed_work(&(fep->delay_work.delay_work), 0);
++ schedule_work(&fep->tx_timeout_work);
+ }
+
+-static void fec_enet_work(struct work_struct *work)
++static void fec_enet_timeout_work(struct work_struct *work)
+ {
+ struct fec_enet_private *fep =
+- container_of(work,
+- struct fec_enet_private,
+- delay_work.delay_work.work);
+-
+- if (fep->delay_work.timeout) {
+- fep->delay_work.timeout = false;
+- fec_restart(fep->netdev, fep->full_duplex);
+- netif_wake_queue(fep->netdev);
+- }
++ container_of(work, struct fec_enet_private, tx_timeout_work);
++ struct net_device *ndev = fep->netdev;
+
+- if (fep->delay_work.trig_tx) {
+- fep->delay_work.trig_tx = false;
+- writel(0, fep->hwp + FEC_X_DES_ACTIVE);
++ rtnl_lock();
++ if (netif_device_present(ndev) || netif_running(ndev)) {
++ napi_disable(&fep->napi);
++ netif_tx_lock_bh(ndev);
++ fec_restart(ndev);
++ netif_wake_queue(ndev);
++ netif_tx_unlock_bh(ndev);
++ napi_enable(&fep->napi);
+ }
++ rtnl_unlock();
++}
++
++static void
++fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
++ struct skb_shared_hwtstamps *hwtstamps)
++{
++ unsigned long flags;
++ u64 ns;
++
++ spin_lock_irqsave(&fep->tmreg_lock, flags);
++ ns = timecounter_cyc2time(&fep->tc, ts);
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
++
++ memset(hwtstamps, 0, sizeof(*hwtstamps));
++ hwtstamps->hwtstamp = ns_to_ktime(ns);
+ }
+
+ static void
+@@ -756,6 +1100,7 @@
+ unsigned short status;
+ struct sk_buff *skb;
+ int index = 0;
++ int entries_free;
+
+ fep = netdev_priv(ndev);
+ bdp = fep->dirty_tx;
+@@ -769,16 +1114,18 @@
+ if (bdp == fep->cur_tx)
+ break;
+
+- if (fep->bufdesc_ex)
+- index = (struct bufdesc_ex *)bdp -
+- (struct bufdesc_ex *)fep->tx_bd_base;
+- else
+- index = bdp - fep->tx_bd_base;
++ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+
+ skb = fep->tx_skbuff[index];
+- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
+- DMA_TO_DEVICE);
++ fep->tx_skbuff[index] = NULL;
++ if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
++ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
++ bdp->cbd_datlen, DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = 0;
++ if (!skb) {
++ bdp = fec_enet_get_nextdesc(bdp, fep);
++ continue;
++ }
+
+ /* Check for errors. */
+ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+@@ -797,26 +1144,18 @@
+ ndev->stats.tx_carrier_errors++;
+ } else {
+ ndev->stats.tx_packets++;
+- ndev->stats.tx_bytes += bdp->cbd_datlen;
++ ndev->stats.tx_bytes += skb->len;
+ }
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ fep->bufdesc_ex) {
+ struct skb_shared_hwtstamps shhwtstamps;
+- unsigned long flags;
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+- spin_lock_irqsave(&fep->tmreg_lock, flags);
+- shhwtstamps.hwtstamp = ns_to_ktime(
+- timecounter_cyc2time(&fep->tc, ebdp->ts));
+- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
++ fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+- if (status & BD_ENET_TX_READY)
+- netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n");
+-
+ /* Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+@@ -825,7 +1164,6 @@
+
+ /* Free the sk buffer associated with this last transmit */
+ dev_kfree_skb_any(skb);
+- fep->tx_skbuff[index] = NULL;
+
+ fep->dirty_tx = bdp;
+
+@@ -834,14 +1172,17 @@
+
+ /* Since we have freed up a buffer, the ring is no longer full
+ */
+- if (fep->dirty_tx != fep->cur_tx) {
+- if (netif_queue_stopped(ndev))
++ if (netif_queue_stopped(ndev)) {
++ entries_free = fec_enet_get_free_txdesc_num(fep);
++ if (entries_free >= fep->tx_wake_threshold)
+ netif_wake_queue(ndev);
+ }
+ }
+- return;
+-}
+
++ /* ERR006538: Keep the transmitter going */
++ if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
++ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
++}
+
+ /* During a receive, the cur_rx points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+@@ -876,8 +1217,11 @@
+
+ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+
+- if (pkt_received >= budget)
++ if (pkt_received >= budget) {
++ /* overwhelmed take a breath */
++ udelay(210);
+ break;
++ }
+ pkt_received++;
+
+ /* Since we have allocated space to hold a complete frame,
+@@ -886,8 +1230,7 @@
+ if ((status & BD_ENET_RX_LAST) == 0)
+ netdev_err(ndev, "rcv is not +last\n");
+
+- if (!fep->opened)
+- goto rx_processing_done;
++ writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
+
+ /* Check for errors. */
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+@@ -920,11 +1263,7 @@
+ pkt_len = bdp->cbd_datlen;
+ ndev->stats.rx_bytes += pkt_len;
+
+- if (fep->bufdesc_ex)
+- index = (struct bufdesc_ex *)bdp -
+- (struct bufdesc_ex *)fep->rx_bd_base;
+- else
+- index = bdp - fep->rx_bd_base;
++ index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
+ data = fep->rx_skbuff[index]->data;
+ dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+@@ -975,18 +1314,9 @@
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* Get receive timestamp from the skb */
+- if (fep->hwts_rx_en && fep->bufdesc_ex) {
+- struct skb_shared_hwtstamps *shhwtstamps =
+- skb_hwtstamps(skb);
+- unsigned long flags;
+-
+- memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+-
+- spin_lock_irqsave(&fep->tmreg_lock, flags);
+- shhwtstamps->hwtstamp = ns_to_ktime(
+- timecounter_cyc2time(&fep->tc, ebdp->ts));
+- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+- }
++ if (fep->hwts_rx_en && fep->bufdesc_ex)
++ fec_enet_hwtstamp(fep, ebdp->ts,
++ skb_hwtstamps(skb));
+
+ if (fep->bufdesc_ex &&
+ (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
+@@ -1044,29 +1374,25 @@
+ {
+ struct net_device *ndev = dev_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
++ const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF;
+ uint int_events;
+ irqreturn_t ret = IRQ_NONE;
+
+- do {
+- int_events = readl(fep->hwp + FEC_IEVENT);
+- writel(int_events, fep->hwp + FEC_IEVENT);
++ int_events = readl(fep->hwp + FEC_IEVENT);
++ writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
+
+- if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
+- ret = IRQ_HANDLED;
++ if (int_events & napi_mask) {
++ ret = IRQ_HANDLED;
+
+- /* Disable the RX interrupt */
+- if (napi_schedule_prep(&fep->napi)) {
+- writel(FEC_RX_DISABLED_IMASK,
+- fep->hwp + FEC_IMASK);
+- __napi_schedule(&fep->napi);
+- }
+- }
++ /* Disable the NAPI interrupts */
++ writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
++ napi_schedule(&fep->napi);
++ }
+
+- if (int_events & FEC_ENET_MII) {
+- ret = IRQ_HANDLED;
+- complete(&fep->mdio_done);
+- }
+- } while (int_events);
++ if (int_events & FEC_ENET_MII) {
++ ret = IRQ_HANDLED;
++ complete(&fep->mdio_done);
++ }
+
+ return ret;
+ }
+@@ -1074,8 +1400,16 @@
+ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
+ {
+ struct net_device *ndev = napi->dev;
+- int pkts = fec_enet_rx(ndev, budget);
+ struct fec_enet_private *fep = netdev_priv(ndev);
++ int pkts;
++
++ /*
++ * Clear any pending transmit or receive interrupts before
++ * processing the rings to avoid racing with the hardware.
++ */
++ writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT);
++
++ pkts = fec_enet_rx(ndev, budget);
+
+ fec_enet_tx(ndev);
+
+@@ -1173,14 +1507,23 @@
+ return;
+ }
+
+- if (phy_dev->link) {
++ /*
++ * If the netdev is down, or is going down, we're not interested
++ * in link state events, so just mark our idea of the link as down
++ * and ignore the event.
++ */
++ if (!netif_running(ndev) || !netif_device_present(ndev)) {
++ fep->link = 0;
++ } else if (phy_dev->link) {
+ if (!fep->link) {
+ fep->link = phy_dev->link;
+ status_change = 1;
+ }
+
+- if (fep->full_duplex != phy_dev->duplex)
++ if (fep->full_duplex != phy_dev->duplex) {
++ fep->full_duplex = phy_dev->duplex;
+ status_change = 1;
++ }
+
+ if (phy_dev->speed != fep->speed) {
+ fep->speed = phy_dev->speed;
+@@ -1188,11 +1531,21 @@
+ }
+
+ /* if any of the above changed restart the FEC */
+- if (status_change)
+- fec_restart(ndev, phy_dev->duplex);
++ if (status_change) {
++ napi_disable(&fep->napi);
++ netif_tx_lock_bh(ndev);
++ fec_restart(ndev);
++ netif_wake_queue(ndev);
++ netif_tx_unlock_bh(ndev);
++ napi_enable(&fep->napi);
++ }
+ } else {
+ if (fep->link) {
++ napi_disable(&fep->napi);
++ netif_tx_lock_bh(ndev);
+ fec_stop(ndev);
++ netif_tx_unlock_bh(ndev);
++ napi_enable(&fep->napi);
+ fep->link = phy_dev->link;
+ status_change = 1;
+ }
+@@ -1255,9 +1608,51 @@
+ return 0;
+ }
+
+-static int fec_enet_mdio_reset(struct mii_bus *bus)
++static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ {
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ int ret;
++
++ if (enable) {
++ pm_runtime_enable(&fep->pdev->dev);
++
++ ret = clk_prepare_enable(fep->clk_ahb);
++ if (ret)
++ return ret;
++ ret = clk_prepare_enable(fep->clk_ipg);
++ if (ret)
++ goto failed_clk_ipg;
++ if (fep->clk_enet_out) {
++ ret = clk_prepare_enable(fep->clk_enet_out);
++ if (ret)
++ goto failed_clk_enet_out;
++ }
++ if (fep->clk_ptp) {
++ ret = clk_prepare_enable(fep->clk_ptp);
++ if (ret)
++ goto failed_clk_ptp;
++ }
++ } else {
++ clk_disable_unprepare(fep->clk_ahb);
++ clk_disable_unprepare(fep->clk_ipg);
++ if (fep->clk_enet_out)
++ clk_disable_unprepare(fep->clk_enet_out);
++ if (fep->clk_ptp)
++ clk_disable_unprepare(fep->clk_ptp);
++
++ pm_runtime_disable(&fep->pdev->dev);
++ }
++
+ return 0;
++failed_clk_ptp:
++ if (fep->clk_enet_out)
++ clk_disable_unprepare(fep->clk_enet_out);
++failed_clk_enet_out:
++ clk_disable_unprepare(fep->clk_ipg);
++failed_clk_ipg:
++ clk_disable_unprepare(fep->clk_ahb);
++
++ return ret;
+ }
+
+ static int fec_enet_mii_probe(struct net_device *ndev)
+@@ -1304,6 +1699,7 @@
+ /* mask with MAC supported features */
+ if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
+ phy_dev->supported &= PHY_GBIT_FEATURES;
++ phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
+ #if !defined(CONFIG_M5272)
+ phy_dev->supported |= SUPPORTED_Pause;
+ #endif
+@@ -1369,7 +1765,7 @@
+ * Reference Manual has an error on this, and gets fixed on i.MX6Q
+ * document.
+ */
+- fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
++ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ fep->phy_speed--;
+ fep->phy_speed <<= 1;
+@@ -1384,7 +1780,6 @@
+ fep->mii_bus->name = "fec_enet_mii_bus";
+ fep->mii_bus->read = fec_enet_mdio_read;
+ fep->mii_bus->write = fec_enet_mdio_write;
+- fep->mii_bus->reset = fec_enet_mdio_reset;
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, fep->dev_id + 1);
+ fep->mii_bus->priv = fep;
+@@ -1508,6 +1903,9 @@
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
++ if (!fep->phy_dev)
++ return -ENODEV;
++
+ if (pause->tx_pause != pause->rx_pause) {
+ netdev_info(ndev,
+ "hardware only support enable/disable both tx and rx");
+@@ -1533,8 +1931,14 @@
+ fec_stop(ndev);
+ phy_start_aneg(fep->phy_dev);
+ }
+- if (netif_running(ndev))
+- fec_restart(ndev, 0);
++ if (netif_running(ndev)) {
++ napi_disable(&fep->napi);
++ netif_tx_lock_bh(ndev);
++ fec_restart(ndev);
++ netif_wake_queue(ndev);
++ netif_tx_unlock_bh(ndev);
++ napi_enable(&fep->napi);
++ }
+
+ return 0;
+ }
+@@ -1651,21 +2055,19 @@
+ }
+
+ static const struct ethtool_ops fec_enet_ethtool_ops = {
+-#if !defined(CONFIG_M5272)
+- .get_pauseparam = fec_enet_get_pauseparam,
+- .set_pauseparam = fec_enet_set_pauseparam,
+-#endif
+ .get_settings = fec_enet_get_settings,
+ .set_settings = fec_enet_set_settings,
+ .get_drvinfo = fec_enet_get_drvinfo,
+- .get_link = ethtool_op_get_link,
+- .get_ts_info = fec_enet_get_ts_info,
+ .nway_reset = fec_enet_nway_reset,
++ .get_link = ethtool_op_get_link,
+ #ifndef CONFIG_M5272
+- .get_ethtool_stats = fec_enet_get_ethtool_stats,
++ .get_pauseparam = fec_enet_get_pauseparam,
++ .set_pauseparam = fec_enet_set_pauseparam,
+ .get_strings = fec_enet_get_strings,
++ .get_ethtool_stats = fec_enet_get_ethtool_stats,
+ .get_sset_count = fec_enet_get_sset_count,
+ #endif
++ .get_ts_info = fec_enet_get_ts_info,
+ };
+
+ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+@@ -1699,18 +2101,23 @@
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < fep->rx_ring_size; i++) {
+ skb = fep->rx_skbuff[i];
+-
+- if (bdp->cbd_bufaddr)
++ fep->rx_skbuff[i] = NULL;
++ if (skb) {
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+- if (skb)
+ dev_kfree_skb(skb);
++ }
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ }
+
+ bdp = fep->tx_bd_base;
+- for (i = 0; i < fep->tx_ring_size; i++)
++ for (i = 0; i < fep->tx_ring_size; i++) {
+ kfree(fep->tx_bounce[i]);
++ fep->tx_bounce[i] = NULL;
++ skb = fep->tx_skbuff[i];
++ fep->tx_skbuff[i] = NULL;
++ dev_kfree_skb(skb);
++ }
+ }
+
+ static int fec_enet_alloc_buffers(struct net_device *ndev)
+@@ -1722,21 +2129,23 @@
+
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < fep->rx_ring_size; i++) {
++ dma_addr_t addr;
++
+ skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
+- if (!skb) {
+- fec_enet_free_buffers(ndev);
+- return -ENOMEM;
+- }
+- fep->rx_skbuff[i] = skb;
++ if (!skb)
++ goto err_alloc;
+
+- bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
++ addr = dma_map_single(&fep->pdev->dev, skb->data,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+- if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+- fec_enet_free_buffers(ndev);
++ if (dma_mapping_error(&fep->pdev->dev, addr)) {
++ dev_kfree_skb(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Rx DMA memory map failed\n");
+- return -ENOMEM;
++ goto err_alloc;
+ }
++
++ fep->rx_skbuff[i] = skb;
++ bdp->cbd_bufaddr = addr;
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+
+ if (fep->bufdesc_ex) {
+@@ -1754,6 +2163,8 @@
+ bdp = fep->tx_bd_base;
+ for (i = 0; i < fep->tx_ring_size; i++) {
+ fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
++ if (!fep->tx_bounce[i])
++ goto err_alloc;
+
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+@@ -1771,14 +2182,35 @@
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ return 0;
++
++ err_alloc:
++ fec_enet_free_buffers(ndev);
++ return -ENOMEM;
+ }
+
+ static int
+ fec_enet_open(struct net_device *ndev)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
++ const struct platform_device_id *id_entry =
++ platform_get_device_id(fep->pdev);
+ int ret;
+
++ if (id_entry->driver_data & FEC_QUIRK_BUG_WAITMODE)
++ pm_qos_add_request(&ndev->pm_qos_req,
++ PM_QOS_CPU_DMA_LATENCY,
++ 0);
++ else
++ pm_qos_add_request(&ndev->pm_qos_req,
++ PM_QOS_CPU_DMA_LATENCY,
++ PM_QOS_DEFAULT_VALUE);
++
++
++ pinctrl_pm_select_default_state(&fep->pdev->dev);
++ ret = fec_enet_clk_enable(ndev, true);
++ if (ret)
++ return ret;
++
+ /* I should reset the ring buffers here, but I don't yet know
+ * a simple way to do that.
+ */
+@@ -1794,10 +2226,12 @@
+ return ret;
+ }
+
++ pm_runtime_get_sync(&fep->pdev->dev);
++
++ fec_restart(ndev);
+ napi_enable(&fep->napi);
+ phy_start(fep->phy_dev);
+ netif_start_queue(ndev);
+- fep->opened = 1;
+ return 0;
+ }
+
+@@ -1806,17 +2240,22 @@
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+- /* Don't know what to do yet. */
+- napi_disable(&fep->napi);
+- fep->opened = 0;
+- netif_stop_queue(ndev);
+- fec_stop(ndev);
++ phy_stop(fep->phy_dev);
+
+- if (fep->phy_dev) {
+- phy_stop(fep->phy_dev);
+- phy_disconnect(fep->phy_dev);
++ if (netif_device_present(ndev)) {
++ napi_disable(&fep->napi);
++ netif_tx_disable(ndev);
++ fec_stop(ndev);
+ }
+
++ phy_disconnect(fep->phy_dev);
++ fep->phy_dev = NULL;
++
++ fec_enet_clk_enable(ndev, false);
++ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
++ pm_qos_remove_request(&ndev->pm_qos_req);
++ pm_runtime_put_sync_suspend(&fep->pdev->dev);
++
+ fec_enet_free_buffers(ndev);
+
+ return 0;
+@@ -1904,10 +2343,11 @@
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+
+- if (!is_valid_ether_addr(addr->sa_data))
+- return -EADDRNOTAVAIL;
+-
+- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
++ if (addr) {
++ if (!is_valid_ether_addr(addr->sa_data))
++ return -EADDRNOTAVAIL;
++ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
++ }
+
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
+@@ -1940,12 +2380,21 @@
+ }
+ #endif
+
++#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
++
+ static int fec_set_features(struct net_device *netdev,
+ netdev_features_t features)
+ {
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
+
++ /* Quiesce the device if necessary */
++ if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
++ napi_disable(&fep->napi);
++ netif_tx_lock_bh(netdev);
++ fec_stop(netdev);
++ }
++
+ netdev->features = features;
+
+ /* Receive checksum has been changed */
+@@ -1954,14 +2403,14 @@
+ fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+ else
+ fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
++ }
+
+- if (netif_running(netdev)) {
+- fec_stop(netdev);
+- fec_restart(netdev, fep->phy_dev->duplex);
+- netif_wake_queue(netdev);
+- } else {
+- fec_restart(netdev, fep->phy_dev->duplex);
+- }
++ /* Resume the device after updates */
++ if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
++ fec_restart(netdev);
++ netif_wake_queue(netdev);
++ netif_tx_unlock_bh(netdev);
++ napi_enable(&fep->napi);
+ }
+
+ return 0;
+@@ -1993,23 +2442,43 @@
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ struct bufdesc *cbd_base;
++ int bd_size;
++
++ /* init the tx & rx ring size */
++ fep->tx_ring_size = TX_RING_SIZE;
++ fep->rx_ring_size = RX_RING_SIZE;
++
++ fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
++ fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
++
++ if (fep->bufdesc_ex)
++ fep->bufdesc_size = sizeof(struct bufdesc_ex);
++ else
++ fep->bufdesc_size = sizeof(struct bufdesc);
++ bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
++ fep->bufdesc_size;
+
+ /* Allocate memory for buffer descriptors. */
+- cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
++ cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
+ GFP_KERNEL);
+ if (!cbd_base)
+ return -ENOMEM;
+
++ fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
++ &fep->tso_hdrs_dma, GFP_KERNEL);
++ if (!fep->tso_hdrs) {
++ dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
++ return -ENOMEM;
++ }
++
+ memset(cbd_base, 0, PAGE_SIZE);
+
+ fep->netdev = ndev;
+
+ /* Get the Ethernet address */
+ fec_get_mac(ndev);
+-
+- /* init the tx & rx ring size */
+- fep->tx_ring_size = TX_RING_SIZE;
+- fep->rx_ring_size = RX_RING_SIZE;
++ /* make sure MAC we just acquired is programmed into the hw */
++ fec_set_mac_address(ndev, NULL);
+
+ /* Set receive and transmit descriptor base. */
+ fep->rx_bd_base = cbd_base;
+@@ -2027,22 +2496,22 @@
+ writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
+
+- if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
++ if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
+ /* enable hw VLAN support */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+- ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+- }
+
+ if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
++ ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
++
+ /* enable hw accelerator */
+ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+- | NETIF_F_RXCSUM);
+- ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+- | NETIF_F_RXCSUM);
++ | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
+ fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+ }
+
+- fec_restart(ndev, 0);
++ ndev->hw_features = ndev->features;
++
++ fec_restart(ndev);
+
+ return 0;
+ }
+@@ -2117,6 +2586,9 @@
+ fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
+ #endif
+
++ /* Select default pin state */
++ pinctrl_pm_select_default_state(&pdev->dev);
++
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fep->hwp = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(fep->hwp)) {
+@@ -2167,26 +2639,10 @@
+ fep->bufdesc_ex = 0;
+ }
+
+- ret = clk_prepare_enable(fep->clk_ahb);
++ ret = fec_enet_clk_enable(ndev, true);
+ if (ret)
+ goto failed_clk;
+
+- ret = clk_prepare_enable(fep->clk_ipg);
+- if (ret)
+- goto failed_clk_ipg;
+-
+- if (fep->clk_enet_out) {
+- ret = clk_prepare_enable(fep->clk_enet_out);
+- if (ret)
+- goto failed_clk_enet_out;
+- }
+-
+- if (fep->clk_ptp) {
+- ret = clk_prepare_enable(fep->clk_ptp);
+- if (ret)
+- goto failed_clk_ptp;
+- }
+-
+ fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
+ if (!IS_ERR(fep->reg_phy)) {
+ ret = regulator_enable(fep->reg_phy);
+@@ -2228,6 +2684,8 @@
+
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(ndev);
++ fec_enet_clk_enable(ndev, false);
++ pinctrl_pm_select_sleep_state(&pdev->dev);
+
+ ret = register_netdev(ndev);
+ if (ret)
+@@ -2236,7 +2694,7 @@
+ if (fep->bufdesc_ex && fep->ptp_clock)
+ netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
+
+- INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work);
++ INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
+ return 0;
+
+ failed_register:
+@@ -2246,16 +2704,10 @@
+ failed_init:
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
++ if (fep->ptp_clock)
++ ptp_clock_unregister(fep->ptp_clock);
+ failed_regulator:
+- if (fep->clk_ptp)
+- clk_disable_unprepare(fep->clk_ptp);
+-failed_clk_ptp:
+- if (fep->clk_enet_out)
+- clk_disable_unprepare(fep->clk_enet_out);
+-failed_clk_enet_out:
+- clk_disable_unprepare(fep->clk_ipg);
+-failed_clk_ipg:
+- clk_disable_unprepare(fep->clk_ahb);
++ fec_enet_clk_enable(ndev, false);
+ failed_clk:
+ failed_ioremap:
+ free_netdev(ndev);
+@@ -2269,42 +2721,40 @@
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+- cancel_delayed_work_sync(&(fep->delay_work.delay_work));
++ cancel_work_sync(&fep->tx_timeout_work);
+ unregister_netdev(ndev);
+ fec_enet_mii_remove(fep);
+ del_timer_sync(&fep->time_keep);
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+- if (fep->clk_ptp)
+- clk_disable_unprepare(fep->clk_ptp);
+ if (fep->ptp_clock)
+ ptp_clock_unregister(fep->ptp_clock);
+- if (fep->clk_enet_out)
+- clk_disable_unprepare(fep->clk_enet_out);
+- clk_disable_unprepare(fep->clk_ipg);
+- clk_disable_unprepare(fep->clk_ahb);
++ fec_enet_clk_enable(ndev, false);
+ free_netdev(ndev);
+
+ return 0;
+ }
+
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ static int
+ fec_suspend(struct device *dev)
+ {
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
++ rtnl_lock();
+ if (netif_running(ndev)) {
+- fec_stop(ndev);
++ phy_stop(fep->phy_dev);
++ napi_disable(&fep->napi);
++ netif_tx_lock_bh(ndev);
+ netif_device_detach(ndev);
++ netif_tx_unlock_bh(ndev);
++ fec_stop(ndev);
+ }
+- if (fep->clk_ptp)
+- clk_disable_unprepare(fep->clk_ptp);
+- if (fep->clk_enet_out)
+- clk_disable_unprepare(fep->clk_enet_out);
+- clk_disable_unprepare(fep->clk_ipg);
+- clk_disable_unprepare(fep->clk_ahb);
++ rtnl_unlock();
++
++ fec_enet_clk_enable(ndev, false);
++ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+@@ -2325,48 +2775,49 @@
+ return ret;
+ }
+
+- ret = clk_prepare_enable(fep->clk_ahb);
+- if (ret)
+- goto failed_clk_ahb;
+-
+- ret = clk_prepare_enable(fep->clk_ipg);
++ pinctrl_pm_select_default_state(&fep->pdev->dev);
++ ret = fec_enet_clk_enable(ndev, true);
+ if (ret)
+- goto failed_clk_ipg;
+-
+- if (fep->clk_enet_out) {
+- ret = clk_prepare_enable(fep->clk_enet_out);
+- if (ret)
+- goto failed_clk_enet_out;
+- }
+-
+- if (fep->clk_ptp) {
+- ret = clk_prepare_enable(fep->clk_ptp);
+- if (ret)
+- goto failed_clk_ptp;
+- }
++ goto failed_clk;
+
++ rtnl_lock();
+ if (netif_running(ndev)) {
+- fec_restart(ndev, fep->full_duplex);
++ fec_restart(ndev);
++ netif_tx_lock_bh(ndev);
+ netif_device_attach(ndev);
++ netif_tx_unlock_bh(ndev);
++ napi_enable(&fep->napi);
++ phy_start(fep->phy_dev);
+ }
++ rtnl_unlock();
+
+ return 0;
+
+-failed_clk_ptp:
+- if (fep->clk_enet_out)
+- clk_disable_unprepare(fep->clk_enet_out);
+-failed_clk_enet_out:
+- clk_disable_unprepare(fep->clk_ipg);
+-failed_clk_ipg:
+- clk_disable_unprepare(fep->clk_ahb);
+-failed_clk_ahb:
++failed_clk:
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+ return ret;
+ }
++
++static int fec_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static int fec_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static const struct dev_pm_ops fec_pm_ops = {
++ SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
++};
++
+ #endif /* CONFIG_PM_SLEEP */
+
+-static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
+
+ static struct platform_driver fec_driver = {
+ .driver = {
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/fec_ptp.c linux-3.14.40/drivers/net/ethernet/freescale/fec_ptp.c
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/fec_ptp.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/fec_ptp.c 2015-05-01 14:57:59.751427001 -0500
+@@ -372,6 +372,7 @@
+ fep->ptp_caps.n_alarm = 0;
+ fep->ptp_caps.n_ext_ts = 0;
+ fep->ptp_caps.n_per_out = 0;
++ fep->ptp_caps.n_pins = 0;
+ fep->ptp_caps.pps = 0;
+ fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
+ fep->ptp_caps.adjtime = fec_ptp_adjtime;
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c linux-3.14.40/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 2015-05-01 14:57:59.755427001 -0500
+@@ -91,6 +91,9 @@
+ u16 pkt_len, sc;
+ int curidx;
+
++ if (budget <= 0)
++ return received;
++
+ /*
+ * First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+@@ -789,10 +792,6 @@
+ phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
+ iface);
+ if (!phydev) {
+- phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
+- iface);
+- }
+- if (!phydev) {
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
+ }
+@@ -1026,9 +1025,16 @@
+ fpi->use_napi = 1;
+ fpi->napi_weight = 17;
+ fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
+- if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
+- NULL)))
+- goto out_free_fpi;
++ if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
++ err = of_phy_register_fixed_link(ofdev->dev.of_node);
++ if (err)
++ goto out_free_fpi;
++
++ /* In the case of a fixed PHY, the DT node associated
++ * to the PHY is the Ethernet MAC DT node.
++ */
++ fpi->phy_node = ofdev->dev.of_node;
++ }
+
+ if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
+ phy_connection_type = of_get_property(ofdev->dev.of_node,
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/fs_enet/mii-fec.c linux-3.14.40/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/fs_enet/mii-fec.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/fs_enet/mii-fec.c 2015-05-01 14:57:59.803427001 -0500
+@@ -95,12 +95,6 @@
+
+ }
+
+-static int fs_enet_fec_mii_reset(struct mii_bus *bus)
+-{
+- /* nothing here - for now */
+- return 0;
+-}
+-
+ static struct of_device_id fs_enet_mdio_fec_match[];
+ static int fs_enet_mdio_probe(struct platform_device *ofdev)
+ {
+@@ -128,7 +122,6 @@
+ new_bus->name = "FEC MII Bus";
+ new_bus->read = &fs_enet_fec_mii_read;
+ new_bus->write = &fs_enet_fec_mii_write;
+- new_bus->reset = &fs_enet_fec_mii_reset;
+
+ ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
+ if (ret)
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/gianfar.c linux-3.14.40/drivers/net/ethernet/freescale/gianfar.c
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/gianfar.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/gianfar.c 2015-05-01 14:57:59.803427001 -0500
+@@ -9,7 +9,7 @@
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
++ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
+ * Copyright 2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+@@ -121,7 +121,7 @@
+ static irqreturn_t gfar_transmit(int irq, void *dev_id);
+ static irqreturn_t gfar_interrupt(int irq, void *dev_id);
+ static void adjust_link(struct net_device *dev);
+-static void init_registers(struct net_device *dev);
++static noinline void gfar_update_link_state(struct gfar_private *priv);
+ static int init_phy(struct net_device *dev);
+ static int gfar_probe(struct platform_device *ofdev);
+ static int gfar_remove(struct platform_device *ofdev);
+@@ -129,8 +129,10 @@
+ static void gfar_set_multi(struct net_device *dev);
+ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
+ static void gfar_configure_serdes(struct net_device *dev);
+-static int gfar_poll(struct napi_struct *napi, int budget);
+-static int gfar_poll_sq(struct napi_struct *napi, int budget);
++static int gfar_poll_rx(struct napi_struct *napi, int budget);
++static int gfar_poll_tx(struct napi_struct *napi, int budget);
++static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
++static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ static void gfar_netpoll(struct net_device *dev);
+ #endif
+@@ -138,9 +140,7 @@
+ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
+ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull, struct napi_struct *napi);
+-void gfar_halt(struct net_device *dev);
+-static void gfar_halt_nodisable(struct net_device *dev);
+-void gfar_start(struct net_device *dev);
++static void gfar_halt_nodisable(struct gfar_private *priv);
+ static void gfar_clear_exact_match(struct net_device *dev);
+ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+ const u8 *addr);
+@@ -332,72 +332,76 @@
+ }
+ }
+
+-static void gfar_init_mac(struct net_device *ndev)
++static void gfar_rx_buff_size_config(struct gfar_private *priv)
+ {
+- struct gfar_private *priv = netdev_priv(ndev);
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- u32 rctrl = 0;
+- u32 tctrl = 0;
+- u32 attrs = 0;
+-
+- /* write the tx/rx base registers */
+- gfar_init_tx_rx_base(priv);
+-
+- /* Configure the coalescing support */
+- gfar_configure_coalescing_all(priv);
++ int frame_size = priv->ndev->mtu + ETH_HLEN;
+
+ /* set this when rx hw offload (TOE) functions are being used */
+ priv->uses_rxfcb = 0;
+
++ if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
++ priv->uses_rxfcb = 1;
++
++ if (priv->hwts_rx_en)
++ priv->uses_rxfcb = 1;
++
++ if (priv->uses_rxfcb)
++ frame_size += GMAC_FCB_LEN;
++
++ frame_size += priv->padding;
++
++ frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
++ INCREMENTAL_BUFFER_SIZE;
++
++ priv->rx_buffer_size = frame_size;
++}
++
++static void gfar_mac_rx_config(struct gfar_private *priv)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++ u32 rctrl = 0;
++
+ if (priv->rx_filer_enable) {
+ rctrl |= RCTRL_FILREN;
+ /* Program the RIR0 reg with the required distribution */
+- gfar_write(&regs->rir0, DEFAULT_RIR0);
++ if (priv->poll_mode == GFAR_SQ_POLLING)
++ gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
++ else /* GFAR_MQ_POLLING */
++ gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
+ }
+
+ /* Restore PROMISC mode */
+- if (ndev->flags & IFF_PROMISC)
++ if (priv->ndev->flags & IFF_PROMISC)
+ rctrl |= RCTRL_PROM;
+
+- if (ndev->features & NETIF_F_RXCSUM) {
++ if (priv->ndev->features & NETIF_F_RXCSUM)
+ rctrl |= RCTRL_CHECKSUMMING;
+- priv->uses_rxfcb = 1;
+- }
+-
+- if (priv->extended_hash) {
+- rctrl |= RCTRL_EXTHASH;
+
+- gfar_clear_exact_match(ndev);
+- rctrl |= RCTRL_EMEN;
+- }
++ if (priv->extended_hash)
++ rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
+
+ if (priv->padding) {
+ rctrl &= ~RCTRL_PAL_MASK;
+ rctrl |= RCTRL_PADDING(priv->padding);
+ }
+
+- /* Insert receive time stamps into padding alignment bytes */
+- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
+- rctrl &= ~RCTRL_PAL_MASK;
+- rctrl |= RCTRL_PADDING(8);
+- priv->padding = 8;
+- }
+-
+ /* Enable HW time stamping if requested from user space */
+- if (priv->hwts_rx_en) {
++ if (priv->hwts_rx_en)
+ rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+- priv->uses_rxfcb = 1;
+- }
+
+- if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
++ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+- priv->uses_rxfcb = 1;
+- }
+
+ /* Init rctrl based on our settings */
+ gfar_write(&regs->rctrl, rctrl);
++}
+
+- if (ndev->features & NETIF_F_IP_CSUM)
++static void gfar_mac_tx_config(struct gfar_private *priv)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++ u32 tctrl = 0;
++
++ if (priv->ndev->features & NETIF_F_IP_CSUM)
+ tctrl |= TCTRL_INIT_CSUM;
+
+ if (priv->prio_sched_en)
+@@ -408,30 +412,51 @@
+ gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
+ }
+
+- gfar_write(&regs->tctrl, tctrl);
++ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
++ tctrl |= TCTRL_VLINS;
+
+- /* Set the extraction length and index */
+- attrs = ATTRELI_EL(priv->rx_stash_size) |
+- ATTRELI_EI(priv->rx_stash_index);
++ gfar_write(&regs->tctrl, tctrl);
++}
+
+- gfar_write(&regs->attreli, attrs);
++static void gfar_configure_coalescing(struct gfar_private *priv,
++ unsigned long tx_mask, unsigned long rx_mask)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++ u32 __iomem *baddr;
+
+- /* Start with defaults, and add stashing or locking
+- * depending on the approprate variables
+- */
+- attrs = ATTR_INIT_SETTINGS;
++ if (priv->mode == MQ_MG_MODE) {
++ int i = 0;
+
+- if (priv->bd_stash_en)
+- attrs |= ATTR_BDSTASH;
++ baddr = &regs->txic0;
++ for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
++ gfar_write(baddr + i, 0);
++ if (likely(priv->tx_queue[i]->txcoalescing))
++ gfar_write(baddr + i, priv->tx_queue[i]->txic);
++ }
+
+- if (priv->rx_stash_size != 0)
+- attrs |= ATTR_BUFSTASH;
++ baddr = &regs->rxic0;
++ for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
++ gfar_write(baddr + i, 0);
++ if (likely(priv->rx_queue[i]->rxcoalescing))
++ gfar_write(baddr + i, priv->rx_queue[i]->rxic);
++ }
++ } else {
++ /* Backward compatible case -- even if we enable
++ * multiple queues, there's only single reg to program
++ */
++ gfar_write(&regs->txic, 0);
++ if (likely(priv->tx_queue[0]->txcoalescing))
++ gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+
+- gfar_write(&regs->attr, attrs);
++ gfar_write(&regs->rxic, 0);
++ if (unlikely(priv->rx_queue[0]->rxcoalescing))
++ gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
++ }
++}
+
+- gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
+- gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
+- gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
++void gfar_configure_coalescing_all(struct gfar_private *priv)
++{
++ gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ }
+
+ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
+@@ -479,12 +504,27 @@
+ #endif
+ };
+
+-void lock_rx_qs(struct gfar_private *priv)
++static void gfar_ints_disable(struct gfar_private *priv)
+ {
+ int i;
++ for (i = 0; i < priv->num_grps; i++) {
++ struct gfar __iomem *regs = priv->gfargrp[i].regs;
++ /* Clear IEVENT */
++ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+
+- for (i = 0; i < priv->num_rx_queues; i++)
+- spin_lock(&priv->rx_queue[i]->rxlock);
++ /* Initialize IMASK */
++ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
++ }
++}
++
++static void gfar_ints_enable(struct gfar_private *priv)
++{
++ int i;
++ for (i = 0; i < priv->num_grps; i++) {
++ struct gfar __iomem *regs = priv->gfargrp[i].regs;
++ /* Unmask the interrupts we look for */
++ gfar_write(&regs->imask, IMASK_DEFAULT);
++ }
+ }
+
+ void lock_tx_qs(struct gfar_private *priv)
+@@ -495,23 +535,50 @@
+ spin_lock(&priv->tx_queue[i]->txlock);
+ }
+
+-void unlock_rx_qs(struct gfar_private *priv)
++void unlock_tx_qs(struct gfar_private *priv)
+ {
+ int i;
+
+- for (i = 0; i < priv->num_rx_queues; i++)
+- spin_unlock(&priv->rx_queue[i]->rxlock);
++ for (i = 0; i < priv->num_tx_queues; i++)
++ spin_unlock(&priv->tx_queue[i]->txlock);
+ }
+
+-void unlock_tx_qs(struct gfar_private *priv)
++static int gfar_alloc_tx_queues(struct gfar_private *priv)
+ {
+ int i;
+
+- for (i = 0; i < priv->num_tx_queues; i++)
+- spin_unlock(&priv->tx_queue[i]->txlock);
++ for (i = 0; i < priv->num_tx_queues; i++) {
++ priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
++ GFP_KERNEL);
++ if (!priv->tx_queue[i])
++ return -ENOMEM;
++
++ priv->tx_queue[i]->tx_skbuff = NULL;
++ priv->tx_queue[i]->qindex = i;
++ priv->tx_queue[i]->dev = priv->ndev;
++ spin_lock_init(&(priv->tx_queue[i]->txlock));
++ }
++ return 0;
++}
++
++static int gfar_alloc_rx_queues(struct gfar_private *priv)
++{
++ int i;
++
++ for (i = 0; i < priv->num_rx_queues; i++) {
++ priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
++ GFP_KERNEL);
++ if (!priv->rx_queue[i])
++ return -ENOMEM;
++
++ priv->rx_queue[i]->rx_skbuff = NULL;
++ priv->rx_queue[i]->qindex = i;
++ priv->rx_queue[i]->dev = priv->ndev;
++ }
++ return 0;
+ }
+
+-static void free_tx_pointers(struct gfar_private *priv)
++static void gfar_free_tx_queues(struct gfar_private *priv)
+ {
+ int i;
+
+@@ -519,7 +586,7 @@
+ kfree(priv->tx_queue[i]);
+ }
+
+-static void free_rx_pointers(struct gfar_private *priv)
++static void gfar_free_rx_queues(struct gfar_private *priv)
+ {
+ int i;
+
+@@ -553,23 +620,26 @@
+ {
+ int i;
+
+- for (i = 0; i < priv->num_grps; i++)
+- napi_disable(&priv->gfargrp[i].napi);
++ for (i = 0; i < priv->num_grps; i++) {
++ napi_disable(&priv->gfargrp[i].napi_rx);
++ napi_disable(&priv->gfargrp[i].napi_tx);
++ }
+ }
+
+ static void enable_napi(struct gfar_private *priv)
+ {
+ int i;
+
+- for (i = 0; i < priv->num_grps; i++)
+- napi_enable(&priv->gfargrp[i].napi);
++ for (i = 0; i < priv->num_grps; i++) {
++ napi_enable(&priv->gfargrp[i].napi_rx);
++ napi_enable(&priv->gfargrp[i].napi_tx);
++ }
+ }
+
+ static int gfar_parse_group(struct device_node *np,
+ struct gfar_private *priv, const char *model)
+ {
+ struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
+- u32 *queue_mask;
+ int i;
+
+ for (i = 0; i < GFAR_NUM_IRQS; i++) {
+@@ -598,16 +668,52 @@
+ grp->priv = priv;
+ spin_lock_init(&grp->grplock);
+ if (priv->mode == MQ_MG_MODE) {
+- queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
+- grp->rx_bit_map = queue_mask ?
+- *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+- queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
+- grp->tx_bit_map = queue_mask ?
+- *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
++ u32 *rxq_mask, *txq_mask;
++ rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
++ txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
++
++ if (priv->poll_mode == GFAR_SQ_POLLING) {
++ /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
++ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
++ grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
++ } else { /* GFAR_MQ_POLLING */
++ grp->rx_bit_map = rxq_mask ?
++ *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
++ grp->tx_bit_map = txq_mask ?
++ *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
++ }
+ } else {
+ grp->rx_bit_map = 0xFF;
+ grp->tx_bit_map = 0xFF;
+ }
++
++ /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
++ * right to left, so we need to revert the 8 bits to get the q index
++ */
++ grp->rx_bit_map = bitrev8(grp->rx_bit_map);
++ grp->tx_bit_map = bitrev8(grp->tx_bit_map);
++
++ /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
++ * also assign queues to groups
++ */
++ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
++ if (!grp->rx_queue)
++ grp->rx_queue = priv->rx_queue[i];
++ grp->num_rx_queues++;
++ grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
++ priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
++ priv->rx_queue[i]->grp = grp;
++ }
++
++ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
++ if (!grp->tx_queue)
++ grp->tx_queue = priv->tx_queue[i];
++ grp->num_tx_queues++;
++ grp->tstat |= (TSTAT_CLEAR_THALT >> i);
++ priv->tqueue |= (TQUEUE_EN0 >> i);
++ priv->tx_queue[i]->grp = grp;
++ }
++
+ priv->num_grps++;
+
+ return 0;
+@@ -628,13 +734,45 @@
+ const u32 *stash_idx;
+ unsigned int num_tx_qs, num_rx_qs;
+ u32 *tx_queues, *rx_queues;
++ unsigned short mode, poll_mode;
+
+ if (!np || !of_device_is_available(np))
+ return -ENODEV;
+
+- /* parse the num of tx and rx queues */
++ if (of_device_is_compatible(np, "fsl,etsec2")) {
++ mode = MQ_MG_MODE;
++ poll_mode = GFAR_SQ_POLLING;
++ } else {
++ mode = SQ_SG_MODE;
++ poll_mode = GFAR_SQ_POLLING;
++ }
++
++ /* parse the num of HW tx and rx queues */
+ tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
+- num_tx_qs = tx_queues ? *tx_queues : 1;
++ rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
++
++ if (mode == SQ_SG_MODE) {
++ num_tx_qs = 1;
++ num_rx_qs = 1;
++ } else { /* MQ_MG_MODE */
++ /* get the actual number of supported groups */
++ unsigned int num_grps = of_get_available_child_count(np);
++
++ if (num_grps == 0 || num_grps > MAXGROUPS) {
++ dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
++ num_grps);
++ pr_err("Cannot do alloc_etherdev, aborting\n");
++ return -EINVAL;
++ }
++
++ if (poll_mode == GFAR_SQ_POLLING) {
++ num_tx_qs = num_grps; /* one txq per int group */
++ num_rx_qs = num_grps; /* one rxq per int group */
++ } else { /* GFAR_MQ_POLLING */
++ num_tx_qs = tx_queues ? *tx_queues : 1;
++ num_rx_qs = rx_queues ? *rx_queues : 1;
++ }
++ }
+
+ if (num_tx_qs > MAX_TX_QS) {
+ pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
+@@ -643,9 +781,6 @@
+ return -EINVAL;
+ }
+
+- rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
+- num_rx_qs = rx_queues ? *rx_queues : 1;
+-
+ if (num_rx_qs > MAX_RX_QS) {
+ pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
+ num_rx_qs, MAX_RX_QS);
+@@ -661,10 +796,20 @@
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+
++ priv->mode = mode;
++ priv->poll_mode = poll_mode;
++
+ priv->num_tx_queues = num_tx_qs;
+ netif_set_real_num_rx_queues(dev, num_rx_qs);
+ priv->num_rx_queues = num_rx_qs;
+- priv->num_grps = 0x0;
++
++ err = gfar_alloc_tx_queues(priv);
++ if (err)
++ goto tx_alloc_failed;
++
++ err = gfar_alloc_rx_queues(priv);
++ if (err)
++ goto rx_alloc_failed;
+
+ /* Init Rx queue filer rule set linked list */
+ INIT_LIST_HEAD(&priv->rx_list.list);
+@@ -677,52 +822,18 @@
+ priv->gfargrp[i].regs = NULL;
+
+ /* Parse and initialize group specific information */
+- if (of_device_is_compatible(np, "fsl,etsec2")) {
+- priv->mode = MQ_MG_MODE;
++ if (priv->mode == MQ_MG_MODE) {
+ for_each_child_of_node(np, child) {
+ err = gfar_parse_group(child, priv, model);
+ if (err)
+ goto err_grp_init;
+ }
+- } else {
+- priv->mode = SQ_SG_MODE;
++ } else { /* SQ_SG_MODE */
+ err = gfar_parse_group(np, priv, model);
+ if (err)
+ goto err_grp_init;
+ }
+
+- for (i = 0; i < priv->num_tx_queues; i++)
+- priv->tx_queue[i] = NULL;
+- for (i = 0; i < priv->num_rx_queues; i++)
+- priv->rx_queue[i] = NULL;
+-
+- for (i = 0; i < priv->num_tx_queues; i++) {
+- priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+- GFP_KERNEL);
+- if (!priv->tx_queue[i]) {
+- err = -ENOMEM;
+- goto tx_alloc_failed;
+- }
+- priv->tx_queue[i]->tx_skbuff = NULL;
+- priv->tx_queue[i]->qindex = i;
+- priv->tx_queue[i]->dev = dev;
+- spin_lock_init(&(priv->tx_queue[i]->txlock));
+- }
+-
+- for (i = 0; i < priv->num_rx_queues; i++) {
+- priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+- GFP_KERNEL);
+- if (!priv->rx_queue[i]) {
+- err = -ENOMEM;
+- goto rx_alloc_failed;
+- }
+- priv->rx_queue[i]->rx_skbuff = NULL;
+- priv->rx_queue[i]->qindex = i;
+- priv->rx_queue[i]->dev = dev;
+- spin_lock_init(&(priv->rx_queue[i]->rxlock));
+- }
+-
+-
+ stash = of_get_property(np, "bd-stash", NULL);
+
+ if (stash) {
+@@ -749,17 +860,16 @@
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+
+ if (model && !strcasecmp(model, "TSEC"))
+- priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
++ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+
+ if (model && !strcasecmp(model, "eTSEC"))
+- priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
++ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+- FSL_GIANFAR_DEV_HAS_PADDING |
+ FSL_GIANFAR_DEV_HAS_CSUM |
+ FSL_GIANFAR_DEV_HAS_VLAN |
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
+@@ -779,17 +889,28 @@
+
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
++ /* In the case of a fixed PHY, the DT node associated
++ * to the PHY is the Ethernet MAC DT node.
++ */
++ if (of_phy_is_fixed_link(np)) {
++ err = of_phy_register_fixed_link(np);
++ if (err)
++ goto err_grp_init;
++
++ priv->phy_node = np;
++ }
++
+ /* Find the TBI PHY. If it's not there, we don't support SGMII */
+ priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
+
+ return 0;
+
+-rx_alloc_failed:
+- free_rx_pointers(priv);
+-tx_alloc_failed:
+- free_tx_pointers(priv);
+ err_grp_init:
+ unmap_group_regs(priv);
++rx_alloc_failed:
++ gfar_free_rx_queues(priv);
++tx_alloc_failed:
++ gfar_free_tx_queues(priv);
+ free_gfar_dev(priv);
+ return err;
+ }
+@@ -822,18 +943,16 @@
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ if (priv->hwts_rx_en) {
+- stop_gfar(netdev);
+ priv->hwts_rx_en = 0;
+- startup_gfar(netdev);
++ reset_gfar(netdev);
+ }
+ break;
+ default:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ if (!priv->hwts_rx_en) {
+- stop_gfar(netdev);
+ priv->hwts_rx_en = 1;
+- startup_gfar(netdev);
++ reset_gfar(netdev);
+ }
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+@@ -875,19 +994,6 @@
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
+ }
+
+-static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
+-{
+- unsigned int new_bit_map = 0x0;
+- int mask = 0x1 << (max_qs - 1), i;
+-
+- for (i = 0; i < max_qs; i++) {
+- if (bit_map & mask)
+- new_bit_map = new_bit_map + (1 << i);
+- mask = mask >> 0x1;
+- }
+- return new_bit_map;
+-}
+-
+ static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
+ u32 class)
+ {
+@@ -1005,100 +1111,141 @@
+ priv->errata);
+ }
+
+-/* Set up the ethernet device structure, private data,
+- * and anything else we need before we start
+- */
+-static int gfar_probe(struct platform_device *ofdev)
++void gfar_mac_reset(struct gfar_private *priv)
+ {
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+- struct net_device *dev = NULL;
+- struct gfar_private *priv = NULL;
+- struct gfar __iomem *regs = NULL;
+- int err = 0, i, grp_idx = 0;
+- u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
+- u32 isrg = 0;
+- u32 __iomem *baddr;
+-
+- err = gfar_of_init(ofdev, &dev);
+-
+- if (err)
+- return err;
+-
+- priv = netdev_priv(dev);
+- priv->ndev = dev;
+- priv->ofdev = ofdev;
+- priv->dev = &ofdev->dev;
+- SET_NETDEV_DEV(dev, &ofdev->dev);
+-
+- spin_lock_init(&priv->bflock);
+- INIT_WORK(&priv->reset_task, gfar_reset_task);
+-
+- platform_set_drvdata(ofdev, priv);
+- regs = priv->gfargrp[0].regs;
+-
+- gfar_detect_errata(priv);
+-
+- /* Stop the DMA engine now, in case it was running before
+- * (The firmware could have used it, and left it running).
+- */
+- gfar_halt(dev);
+
+ /* Reset MAC layer */
+ gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
+
+ /* We need to delay at least 3 TX clocks */
+- udelay(2);
++ udelay(3);
+
+- tempval = 0;
+- if (!priv->pause_aneg_en && priv->tx_pause_en)
+- tempval |= MACCFG1_TX_FLOW;
+- if (!priv->pause_aneg_en && priv->rx_pause_en)
+- tempval |= MACCFG1_RX_FLOW;
+ /* the soft reset bit is not self-resetting, so we need to
+ * clear it before resuming normal operation
+ */
+- gfar_write(&regs->maccfg1, tempval);
++ gfar_write(&regs->maccfg1, 0);
+
+- /* Initialize MACCFG2. */
+- tempval = MACCFG2_INIT_SETTINGS;
+- if (gfar_has_errata(priv, GFAR_ERRATA_74))
+- tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+- gfar_write(&regs->maccfg2, tempval);
++ udelay(3);
+
+- /* Initialize ECNTRL */
+- gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
++ /* Compute rx_buff_size based on config flags */
++ gfar_rx_buff_size_config(priv);
+
+- /* Set the dev->base_addr to the gfar reg region */
+- dev->base_addr = (unsigned long) regs;
++ /* Initialize the max receive frame/buffer lengths */
++ gfar_write(&regs->maxfrm, priv->rx_buffer_size);
++ gfar_write(&regs->mrblr, priv->rx_buffer_size);
+
+- /* Fill in the dev structure */
+- dev->watchdog_timeo = TX_TIMEOUT;
+- dev->mtu = 1500;
+- dev->netdev_ops = &gfar_netdev_ops;
+- dev->ethtool_ops = &gfar_ethtool_ops;
++ /* Initialize the Minimum Frame Length Register */
++ gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
+
+- /* Register for napi ...We are registering NAPI for each grp */
+- if (priv->mode == SQ_SG_MODE)
+- netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
+- GFAR_DEV_WEIGHT);
+- else
+- for (i = 0; i < priv->num_grps; i++)
+- netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
+- GFAR_DEV_WEIGHT);
++ /* Initialize MACCFG2. */
++ tempval = MACCFG2_INIT_SETTINGS;
+
+- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
+- dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+- NETIF_F_RXCSUM;
+- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
+- NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+- }
++ /* If the mtu is larger than the max size for standard
++ * ethernet frames (ie, a jumbo frame), then set maccfg2
++ * to allow huge frames, and to check the length
++ */
++ if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
++ gfar_has_errata(priv, GFAR_ERRATA_74))
++ tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+
+- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
+- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+- NETIF_F_HW_VLAN_CTAG_RX;
+- dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
++ gfar_write(&regs->maccfg2, tempval);
++
++ /* Clear mac addr hash registers */
++ gfar_write(&regs->igaddr0, 0);
++ gfar_write(&regs->igaddr1, 0);
++ gfar_write(&regs->igaddr2, 0);
++ gfar_write(&regs->igaddr3, 0);
++ gfar_write(&regs->igaddr4, 0);
++ gfar_write(&regs->igaddr5, 0);
++ gfar_write(&regs->igaddr6, 0);
++ gfar_write(&regs->igaddr7, 0);
++
++ gfar_write(&regs->gaddr0, 0);
++ gfar_write(&regs->gaddr1, 0);
++ gfar_write(&regs->gaddr2, 0);
++ gfar_write(&regs->gaddr3, 0);
++ gfar_write(&regs->gaddr4, 0);
++ gfar_write(&regs->gaddr5, 0);
++ gfar_write(&regs->gaddr6, 0);
++ gfar_write(&regs->gaddr7, 0);
++
++ if (priv->extended_hash)
++ gfar_clear_exact_match(priv->ndev);
++
++ gfar_mac_rx_config(priv);
++
++ gfar_mac_tx_config(priv);
++
++ gfar_set_mac_address(priv->ndev);
++
++ gfar_set_multi(priv->ndev);
++
++ /* clear ievent and imask before configuring coalescing */
++ gfar_ints_disable(priv);
++
++ /* Configure the coalescing support */
++ gfar_configure_coalescing_all(priv);
++}
++
++static void gfar_hw_init(struct gfar_private *priv)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++ u32 attrs;
++
++ /* Stop the DMA engine now, in case it was running before
++ * (The firmware could have used it, and left it running).
++ */
++ gfar_halt(priv);
++
++ gfar_mac_reset(priv);
++
++ /* Zero out the rmon mib registers if it has them */
++ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
++ memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
++
++ /* Mask off the CAM interrupts */
++ gfar_write(&regs->rmon.cam1, 0xffffffff);
++ gfar_write(&regs->rmon.cam2, 0xffffffff);
+ }
+
++ /* Initialize ECNTRL */
++ gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
++
++ /* Set the extraction length and index */
++ attrs = ATTRELI_EL(priv->rx_stash_size) |
++ ATTRELI_EI(priv->rx_stash_index);
++
++ gfar_write(&regs->attreli, attrs);
++
++ /* Start with defaults, and add stashing
++ * depending on driver parameters
++ */
++ attrs = ATTR_INIT_SETTINGS;
++
++ if (priv->bd_stash_en)
++ attrs |= ATTR_BDSTASH;
++
++ if (priv->rx_stash_size != 0)
++ attrs |= ATTR_BUFSTASH;
++
++ gfar_write(&regs->attr, attrs);
++
++ /* FIFO configs */
++ gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
++ gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
++ gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
++
++ /* Program the interrupt steering regs, only for MG devices */
++ if (priv->num_grps > 1)
++ gfar_write_isrg(priv);
++}
++
++static void gfar_init_addr_hash_table(struct gfar_private *priv)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
+ priv->extended_hash = 1;
+ priv->hash_width = 9;
+@@ -1133,68 +1280,81 @@
+ priv->hash_regs[6] = &regs->gaddr6;
+ priv->hash_regs[7] = &regs->gaddr7;
+ }
++}
+
+- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
+- priv->padding = DEFAULT_PADDING;
+- else
+- priv->padding = 0;
++/* Set up the ethernet device structure, private data,
++ * and anything else we need before we start
++ */
++static int gfar_probe(struct platform_device *ofdev)
++{
++ struct net_device *dev = NULL;
++ struct gfar_private *priv = NULL;
++ int err = 0, i;
+
+- if (dev->features & NETIF_F_IP_CSUM ||
+- priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+- dev->needed_headroom = GMAC_FCB_LEN;
++ err = gfar_of_init(ofdev, &dev);
+
+- /* Program the isrg regs only if number of grps > 1 */
+- if (priv->num_grps > 1) {
+- baddr = &regs->isrg0;
+- for (i = 0; i < priv->num_grps; i++) {
+- isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
+- isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
+- gfar_write(baddr, isrg);
+- baddr++;
+- isrg = 0x0;
++ if (err)
++ return err;
++
++ priv = netdev_priv(dev);
++ priv->ndev = dev;
++ priv->ofdev = ofdev;
++ priv->dev = &ofdev->dev;
++ SET_NETDEV_DEV(dev, &ofdev->dev);
++
++ spin_lock_init(&priv->bflock);
++ INIT_WORK(&priv->reset_task, gfar_reset_task);
++
++ platform_set_drvdata(ofdev, priv);
++
++ gfar_detect_errata(priv);
++
++ /* Set the dev->base_addr to the gfar reg region */
++ dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
++
++ /* Fill in the dev structure */
++ dev->watchdog_timeo = TX_TIMEOUT;
++ dev->mtu = 1500;
++ dev->netdev_ops = &gfar_netdev_ops;
++ dev->ethtool_ops = &gfar_ethtool_ops;
++
++ /* Register for napi ...We are registering NAPI for each grp */
++ for (i = 0; i < priv->num_grps; i++) {
++ if (priv->poll_mode == GFAR_SQ_POLLING) {
++ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
++ gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
++ netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
++ gfar_poll_tx_sq, 2);
++ } else {
++ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
++ gfar_poll_rx, GFAR_DEV_WEIGHT);
++ netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
++ gfar_poll_tx, 2);
+ }
+ }
+
+- /* Need to reverse the bit maps as bit_map's MSB is q0
+- * but, for_each_set_bit parses from right to left, which
+- * basically reverses the queue numbers
+- */
+- for (i = 0; i< priv->num_grps; i++) {
+- priv->gfargrp[i].tx_bit_map =
+- reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+- priv->gfargrp[i].rx_bit_map =
+- reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
++ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
++ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
++ NETIF_F_RXCSUM;
++ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
++ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ }
+
+- /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+- * also assign queues to groups
+- */
+- for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+- priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+-
+- for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+- priv->num_rx_queues) {
+- priv->gfargrp[grp_idx].num_rx_queues++;
+- priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
+- rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
+- rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+- }
+- priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+-
+- for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
+- priv->num_tx_queues) {
+- priv->gfargrp[grp_idx].num_tx_queues++;
+- priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
+- tstat = tstat | (TSTAT_CLEAR_THALT >> i);
+- tqueue = tqueue | (TQUEUE_EN0 >> i);
+- }
+- priv->gfargrp[grp_idx].rstat = rstat;
+- priv->gfargrp[grp_idx].tstat = tstat;
+- rstat = tstat =0;
++ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
++ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_CTAG_RX;
++ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+- gfar_write(&regs->rqueue, rqueue);
+- gfar_write(&regs->tqueue, tqueue);
++ gfar_init_addr_hash_table(priv);
++
++ /* Insert receive time stamps into padding alignment bytes */
++ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
++ priv->padding = 8;
++
++ if (dev->features & NETIF_F_IP_CSUM ||
++ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
++ dev->needed_headroom = GMAC_FCB_LEN;
+
+ priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
+
+@@ -1220,6 +1380,10 @@
+ if (priv->num_tx_queues == 1)
+ priv->prio_sched_en = 1;
+
++ set_bit(GFAR_DOWN, &priv->state);
++
++ gfar_hw_init(priv);
++
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
+@@ -1251,9 +1415,6 @@
+ /* Initialize the filer table */
+ gfar_init_filer_table(priv);
+
+- /* Create all the sysfs files */
+- gfar_init_sysfs(dev);
+-
+ /* Print out the device info */
+ netdev_info(dev, "mac: %pM\n", dev->dev_addr);
+
+@@ -1272,8 +1433,8 @@
+
+ register_fail:
+ unmap_group_regs(priv);
+- free_tx_pointers(priv);
+- free_rx_pointers(priv);
++ gfar_free_rx_queues(priv);
++ gfar_free_tx_queues(priv);
+ if (priv->phy_node)
+ of_node_put(priv->phy_node);
+ if (priv->tbi_node)
+@@ -1293,6 +1454,8 @@
+
+ unregister_netdev(priv->ndev);
+ unmap_group_regs(priv);
++ gfar_free_rx_queues(priv);
++ gfar_free_tx_queues(priv);
+ free_gfar_dev(priv);
+
+ return 0;
+@@ -1318,9 +1481,8 @@
+
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+- lock_rx_qs(priv);
+
+- gfar_halt_nodisable(ndev);
++ gfar_halt_nodisable(priv);
+
+ /* Disable Tx, and Rx if wake-on-LAN is disabled. */
+ tempval = gfar_read(&regs->maccfg1);
+@@ -1332,7 +1494,6 @@
+
+ gfar_write(&regs->maccfg1, tempval);
+
+- unlock_rx_qs(priv);
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+
+@@ -1378,15 +1539,13 @@
+ */
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+- lock_rx_qs(priv);
+
+ tempval = gfar_read(&regs->maccfg2);
+ tempval &= ~MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
+
+- gfar_start(ndev);
++ gfar_start(priv);
+
+- unlock_rx_qs(priv);
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+
+@@ -1413,10 +1572,11 @@
+ return -ENOMEM;
+ }
+
+- init_registers(ndev);
+- gfar_set_mac_address(ndev);
+- gfar_init_mac(ndev);
+- gfar_start(ndev);
++ gfar_mac_reset(priv);
++
++ gfar_init_tx_rx_base(priv);
++
++ gfar_start(priv);
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+@@ -1511,9 +1671,6 @@
+
+ priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
+ interface);
+- if (!priv->phydev)
+- priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
+- interface);
+ if (!priv->phydev) {
+ dev_err(&dev->dev, "could not attach to PHY\n");
+ return -ENODEV;
+@@ -1574,57 +1731,6 @@
+ BMCR_SPEED1000);
+ }
+
+-static void init_registers(struct net_device *dev)
+-{
+- struct gfar_private *priv = netdev_priv(dev);
+- struct gfar __iomem *regs = NULL;
+- int i;
+-
+- for (i = 0; i < priv->num_grps; i++) {
+- regs = priv->gfargrp[i].regs;
+- /* Clear IEVENT */
+- gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+-
+- /* Initialize IMASK */
+- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+- }
+-
+- regs = priv->gfargrp[0].regs;
+- /* Init hash registers to zero */
+- gfar_write(&regs->igaddr0, 0);
+- gfar_write(&regs->igaddr1, 0);
+- gfar_write(&regs->igaddr2, 0);
+- gfar_write(&regs->igaddr3, 0);
+- gfar_write(&regs->igaddr4, 0);
+- gfar_write(&regs->igaddr5, 0);
+- gfar_write(&regs->igaddr6, 0);
+- gfar_write(&regs->igaddr7, 0);
+-
+- gfar_write(&regs->gaddr0, 0);
+- gfar_write(&regs->gaddr1, 0);
+- gfar_write(&regs->gaddr2, 0);
+- gfar_write(&regs->gaddr3, 0);
+- gfar_write(&regs->gaddr4, 0);
+- gfar_write(&regs->gaddr5, 0);
+- gfar_write(&regs->gaddr6, 0);
+- gfar_write(&regs->gaddr7, 0);
+-
+- /* Zero out the rmon mib registers if it has them */
+- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+- memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
+-
+- /* Mask off the CAM interrupts */
+- gfar_write(&regs->rmon.cam1, 0xffffffff);
+- gfar_write(&regs->rmon.cam2, 0xffffffff);
+- }
+-
+- /* Initialize the max receive buffer length */
+- gfar_write(&regs->mrblr, priv->rx_buffer_size);
+-
+- /* Initialize the Minimum Frame Length Register */
+- gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
+-}
+-
+ static int __gfar_is_rx_idle(struct gfar_private *priv)
+ {
+ u32 res;
+@@ -1648,23 +1754,13 @@
+ }
+
+ /* Halt the receive and transmit queues */
+-static void gfar_halt_nodisable(struct net_device *dev)
++static void gfar_halt_nodisable(struct gfar_private *priv)
+ {
+- struct gfar_private *priv = netdev_priv(dev);
+- struct gfar __iomem *regs = NULL;
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+- int i;
+-
+- for (i = 0; i < priv->num_grps; i++) {
+- regs = priv->gfargrp[i].regs;
+- /* Mask all interrupts */
+- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+
+- /* Clear all interrupts */
+- gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+- }
++ gfar_ints_disable(priv);
+
+- regs = priv->gfargrp[0].regs;
+ /* Stop the DMA, and wait for it to stop */
+ tempval = gfar_read(&regs->dmactrl);
+ if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
+@@ -1685,56 +1781,41 @@
+ }
+
+ /* Halt the receive and transmit queues */
+-void gfar_halt(struct net_device *dev)
++void gfar_halt(struct gfar_private *priv)
+ {
+- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+
+- gfar_halt_nodisable(dev);
++ /* Dissable the Rx/Tx hw queues */
++ gfar_write(&regs->rqueue, 0);
++ gfar_write(&regs->tqueue, 0);
+
+- /* Disable Rx and Tx */
++ mdelay(10);
++
++ gfar_halt_nodisable(priv);
++
++ /* Disable Rx/Tx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+ }
+
+-static void free_grp_irqs(struct gfar_priv_grp *grp)
+-{
+- free_irq(gfar_irq(grp, TX)->irq, grp);
+- free_irq(gfar_irq(grp, RX)->irq, grp);
+- free_irq(gfar_irq(grp, ER)->irq, grp);
+-}
+-
+ void stop_gfar(struct net_device *dev)
+ {
+ struct gfar_private *priv = netdev_priv(dev);
+- unsigned long flags;
+- int i;
+-
+- phy_stop(priv->phydev);
+
++ netif_tx_stop_all_queues(dev);
+
+- /* Lock it down */
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+- lock_rx_qs(priv);
++ smp_mb__before_clear_bit();
++ set_bit(GFAR_DOWN, &priv->state);
++ smp_mb__after_clear_bit();
+
+- gfar_halt(dev);
++ disable_napi(priv);
+
+- unlock_rx_qs(priv);
+- unlock_tx_qs(priv);
+- local_irq_restore(flags);
++ /* disable ints and gracefully shut down Rx/Tx DMA */
++ gfar_halt(priv);
+
+- /* Free the IRQs */
+- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+- for (i = 0; i < priv->num_grps; i++)
+- free_grp_irqs(&priv->gfargrp[i]);
+- } else {
+- for (i = 0; i < priv->num_grps; i++)
+- free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
+- &priv->gfargrp[i]);
+- }
++ phy_stop(priv->phydev);
+
+ free_skb_resources(priv);
+ }
+@@ -1825,17 +1906,15 @@
+ priv->tx_queue[0]->tx_bd_dma_base);
+ }
+
+-void gfar_start(struct net_device *dev)
++void gfar_start(struct gfar_private *priv)
+ {
+- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ int i = 0;
+
+- /* Enable Rx and Tx in MACCFG1 */
+- tempval = gfar_read(&regs->maccfg1);
+- tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+- gfar_write(&regs->maccfg1, tempval);
++ /* Enable Rx/Tx hw queues */
++ gfar_write(&regs->rqueue, priv->rqueue);
++ gfar_write(&regs->tqueue, priv->tqueue);
+
+ /* Initialize DMACTRL to have WWR and WOP */
+ tempval = gfar_read(&regs->dmactrl);
+@@ -1852,52 +1931,23 @@
+ /* Clear THLT/RHLT, so that the DMA starts polling now */
+ gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
+ gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+- /* Unmask the interrupts we look for */
+- gfar_write(&regs->imask, IMASK_DEFAULT);
+ }
+
+- dev->trans_start = jiffies; /* prevent tx timeout */
+-}
+-
+-static void gfar_configure_coalescing(struct gfar_private *priv,
+- unsigned long tx_mask, unsigned long rx_mask)
+-{
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- u32 __iomem *baddr;
++ /* Enable Rx/Tx DMA */
++ tempval = gfar_read(&regs->maccfg1);
++ tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
++ gfar_write(&regs->maccfg1, tempval);
+
+- if (priv->mode == MQ_MG_MODE) {
+- int i = 0;
++ gfar_ints_enable(priv);
+
+- baddr = &regs->txic0;
+- for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
+- gfar_write(baddr + i, 0);
+- if (likely(priv->tx_queue[i]->txcoalescing))
+- gfar_write(baddr + i, priv->tx_queue[i]->txic);
+- }
+-
+- baddr = &regs->rxic0;
+- for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
+- gfar_write(baddr + i, 0);
+- if (likely(priv->rx_queue[i]->rxcoalescing))
+- gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+- }
+- } else {
+- /* Backward compatible case -- even if we enable
+- * multiple queues, there's only single reg to program
+- */
+- gfar_write(&regs->txic, 0);
+- if (likely(priv->tx_queue[0]->txcoalescing))
+- gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+-
+- gfar_write(&regs->rxic, 0);
+- if (unlikely(priv->rx_queue[0]->rxcoalescing))
+- gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
+- }
++ priv->ndev->trans_start = jiffies; /* prevent tx timeout */
+ }
+
+-void gfar_configure_coalescing_all(struct gfar_private *priv)
++static void free_grp_irqs(struct gfar_priv_grp *grp)
+ {
+- gfar_configure_coalescing(priv, 0xFF, 0xFF);
++ free_irq(gfar_irq(grp, TX)->irq, grp);
++ free_irq(gfar_irq(grp, RX)->irq, grp);
++ free_irq(gfar_irq(grp, ER)->irq, grp);
+ }
+
+ static int register_grp_irqs(struct gfar_priv_grp *grp)
+@@ -1956,46 +2006,65 @@
+
+ }
+
+-/* Bring the controller up and running */
+-int startup_gfar(struct net_device *ndev)
++static void gfar_free_irq(struct gfar_private *priv)
+ {
+- struct gfar_private *priv = netdev_priv(ndev);
+- struct gfar __iomem *regs = NULL;
+- int err, i, j;
++ int i;
+
+- for (i = 0; i < priv->num_grps; i++) {
+- regs= priv->gfargrp[i].regs;
+- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
++ /* Free the IRQs */
++ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
++ for (i = 0; i < priv->num_grps; i++)
++ free_grp_irqs(&priv->gfargrp[i]);
++ } else {
++ for (i = 0; i < priv->num_grps; i++)
++ free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
++ &priv->gfargrp[i]);
+ }
++}
+
+- regs= priv->gfargrp[0].regs;
+- err = gfar_alloc_skb_resources(ndev);
+- if (err)
+- return err;
+-
+- gfar_init_mac(ndev);
++static int gfar_request_irq(struct gfar_private *priv)
++{
++ int err, i, j;
+
+ for (i = 0; i < priv->num_grps; i++) {
+ err = register_grp_irqs(&priv->gfargrp[i]);
+ if (err) {
+ for (j = 0; j < i; j++)
+ free_grp_irqs(&priv->gfargrp[j]);
+- goto irq_fail;
++ return err;
+ }
+ }
+
+- /* Start the controller */
+- gfar_start(ndev);
++ return 0;
++}
++
++/* Bring the controller up and running */
++int startup_gfar(struct net_device *ndev)
++{
++ struct gfar_private *priv = netdev_priv(ndev);
++ int err;
++
++ gfar_mac_reset(priv);
++
++ err = gfar_alloc_skb_resources(ndev);
++ if (err)
++ return err;
++
++ gfar_init_tx_rx_base(priv);
++
++ smp_mb__before_clear_bit();
++ clear_bit(GFAR_DOWN, &priv->state);
++ smp_mb__after_clear_bit();
++
++ /* Start Rx/Tx DMA and enable the interrupts */
++ gfar_start(priv);
+
+ phy_start(priv->phydev);
+
+- gfar_configure_coalescing_all(priv);
++ enable_napi(priv);
+
+- return 0;
++ netif_tx_wake_all_queues(ndev);
+
+-irq_fail:
+- free_skb_resources(priv);
+- return err;
++ return 0;
+ }
+
+ /* Called when something needs to use the ethernet device
+@@ -2006,27 +2075,17 @@
+ struct gfar_private *priv = netdev_priv(dev);
+ int err;
+
+- enable_napi(priv);
+-
+- /* Initialize a bunch of registers */
+- init_registers(dev);
+-
+- gfar_set_mac_address(dev);
+-
+ err = init_phy(dev);
++ if (err)
++ return err;
+
+- if (err) {
+- disable_napi(priv);
++ err = gfar_request_irq(priv);
++ if (err)
+ return err;
+- }
+
+ err = startup_gfar(dev);
+- if (err) {
+- disable_napi(priv);
++ if (err)
+ return err;
+- }
+-
+- netif_tx_start_all_queues(dev);
+
+ device_set_wakeup_enable(&dev->dev, priv->wol_en);
+
+@@ -2351,8 +2410,6 @@
+ {
+ struct gfar_private *priv = netdev_priv(dev);
+
+- disable_napi(priv);
+-
+ cancel_work_sync(&priv->reset_task);
+ stop_gfar(dev);
+
+@@ -2360,7 +2417,7 @@
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+
+- netif_tx_stop_all_queues(dev);
++ gfar_free_irq(priv);
+
+ return 0;
+ }
+@@ -2373,77 +2430,9 @@
+ return 0;
+ }
+
+-/* Check if rx parser should be activated */
+-void gfar_check_rx_parser_mode(struct gfar_private *priv)
+-{
+- struct gfar __iomem *regs;
+- u32 tempval;
+-
+- regs = priv->gfargrp[0].regs;
+-
+- tempval = gfar_read(&regs->rctrl);
+- /* If parse is no longer required, then disable parser */
+- if (tempval & RCTRL_REQ_PARSER) {
+- tempval |= RCTRL_PRSDEP_INIT;
+- priv->uses_rxfcb = 1;
+- } else {
+- tempval &= ~RCTRL_PRSDEP_INIT;
+- priv->uses_rxfcb = 0;
+- }
+- gfar_write(&regs->rctrl, tempval);
+-}
+-
+-/* Enables and disables VLAN insertion/extraction */
+-void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
+-{
+- struct gfar_private *priv = netdev_priv(dev);
+- struct gfar __iomem *regs = NULL;
+- unsigned long flags;
+- u32 tempval;
+-
+- regs = priv->gfargrp[0].regs;
+- local_irq_save(flags);
+- lock_rx_qs(priv);
+-
+- if (features & NETIF_F_HW_VLAN_CTAG_TX) {
+- /* Enable VLAN tag insertion */
+- tempval = gfar_read(&regs->tctrl);
+- tempval |= TCTRL_VLINS;
+- gfar_write(&regs->tctrl, tempval);
+- } else {
+- /* Disable VLAN tag insertion */
+- tempval = gfar_read(&regs->tctrl);
+- tempval &= ~TCTRL_VLINS;
+- gfar_write(&regs->tctrl, tempval);
+- }
+-
+- if (features & NETIF_F_HW_VLAN_CTAG_RX) {
+- /* Enable VLAN tag extraction */
+- tempval = gfar_read(&regs->rctrl);
+- tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
+- gfar_write(&regs->rctrl, tempval);
+- priv->uses_rxfcb = 1;
+- } else {
+- /* Disable VLAN tag extraction */
+- tempval = gfar_read(&regs->rctrl);
+- tempval &= ~RCTRL_VLEX;
+- gfar_write(&regs->rctrl, tempval);
+-
+- gfar_check_rx_parser_mode(priv);
+- }
+-
+- gfar_change_mtu(dev, dev->mtu);
+-
+- unlock_rx_qs(priv);
+- local_irq_restore(flags);
+-}
+-
+ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
+ {
+- int tempsize, tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- int oldsize = priv->rx_buffer_size;
+ int frame_size = new_mtu + ETH_HLEN;
+
+ if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+@@ -2451,45 +2440,33 @@
+ return -EINVAL;
+ }
+
+- if (priv->uses_rxfcb)
+- frame_size += GMAC_FCB_LEN;
+-
+- frame_size += priv->padding;
+-
+- tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+- INCREMENTAL_BUFFER_SIZE;
++ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
++ cpu_relax();
+
+- /* Only stop and start the controller if it isn't already
+- * stopped, and we changed something
+- */
+- if ((oldsize != tempsize) && (dev->flags & IFF_UP))
++ if (dev->flags & IFF_UP)
+ stop_gfar(dev);
+
+- priv->rx_buffer_size = tempsize;
+-
+ dev->mtu = new_mtu;
+
+- gfar_write(&regs->mrblr, priv->rx_buffer_size);
+- gfar_write(&regs->maxfrm, priv->rx_buffer_size);
++ if (dev->flags & IFF_UP)
++ startup_gfar(dev);
+
+- /* If the mtu is larger than the max size for standard
+- * ethernet frames (ie, a jumbo frame), then set maccfg2
+- * to allow huge frames, and to check the length
+- */
+- tempval = gfar_read(&regs->maccfg2);
++ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+- if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
+- gfar_has_errata(priv, GFAR_ERRATA_74))
+- tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+- else
+- tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
++ return 0;
++}
+
+- gfar_write(&regs->maccfg2, tempval);
++void reset_gfar(struct net_device *ndev)
++{
++ struct gfar_private *priv = netdev_priv(ndev);
+
+- if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+- startup_gfar(dev);
++ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
++ cpu_relax();
+
+- return 0;
++ stop_gfar(ndev);
++ startup_gfar(ndev);
++
++ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+ }
+
+ /* gfar_reset_task gets scheduled when a packet has not been
+@@ -2501,16 +2478,7 @@
+ {
+ struct gfar_private *priv = container_of(work, struct gfar_private,
+ reset_task);
+- struct net_device *dev = priv->ndev;
+-
+- if (dev->flags & IFF_UP) {
+- netif_tx_stop_all_queues(dev);
+- stop_gfar(dev);
+- startup_gfar(dev);
+- netif_tx_start_all_queues(dev);
+- }
+-
+- netif_tx_schedule_all(dev);
++ reset_gfar(priv->ndev);
+ }
+
+ static void gfar_timeout(struct net_device *dev)
+@@ -2623,8 +2591,10 @@
+ }
+
+ /* If we freed a buffer, we can restart transmission, if necessary */
+- if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
+- netif_wake_subqueue(dev, tqi);
++ if (tx_queue->num_txbdfree &&
++ netif_tx_queue_stopped(txq) &&
++ !(test_bit(GFAR_DOWN, &priv->state)))
++ netif_wake_subqueue(priv->ndev, tqi);
+
+ /* Update dirty indicators */
+ tx_queue->skb_dirtytx = skb_dirtytx;
+@@ -2633,31 +2603,6 @@
+ netdev_tx_completed_queue(txq, howmany, bytes_sent);
+ }
+
+-static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
+-{
+- unsigned long flags;
+-
+- spin_lock_irqsave(&gfargrp->grplock, flags);
+- if (napi_schedule_prep(&gfargrp->napi)) {
+- gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
+- __napi_schedule(&gfargrp->napi);
+- } else {
+- /* Clear IEVENT, so interrupts aren't called again
+- * because of the packets that have already arrived.
+- */
+- gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
+- }
+- spin_unlock_irqrestore(&gfargrp->grplock, flags);
+-
+-}
+-
+-/* Interrupt Handler for Transmit complete */
+-static irqreturn_t gfar_transmit(int irq, void *grp_id)
+-{
+- gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
+- return IRQ_HANDLED;
+-}
+-
+ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ struct sk_buff *skb)
+ {
+@@ -2728,7 +2673,48 @@
+
+ irqreturn_t gfar_receive(int irq, void *grp_id)
+ {
+- gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
++ struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
++ unsigned long flags;
++ u32 imask;
++
++ if (likely(napi_schedule_prep(&grp->napi_rx))) {
++ spin_lock_irqsave(&grp->grplock, flags);
++ imask = gfar_read(&grp->regs->imask);
++ imask &= IMASK_RX_DISABLED;
++ gfar_write(&grp->regs->imask, imask);
++ spin_unlock_irqrestore(&grp->grplock, flags);
++ __napi_schedule(&grp->napi_rx);
++ } else {
++ /* Clear IEVENT, so interrupts aren't called again
++ * because of the packets that have already arrived.
++ */
++ gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
++ }
++
++ return IRQ_HANDLED;
++}
++
++/* Interrupt Handler for Transmit complete */
++static irqreturn_t gfar_transmit(int irq, void *grp_id)
++{
++ struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
++ unsigned long flags;
++ u32 imask;
++
++ if (likely(napi_schedule_prep(&grp->napi_tx))) {
++ spin_lock_irqsave(&grp->grplock, flags);
++ imask = gfar_read(&grp->regs->imask);
++ imask &= IMASK_TX_DISABLED;
++ gfar_write(&grp->regs->imask, imask);
++ spin_unlock_irqrestore(&grp->grplock, flags);
++ __napi_schedule(&grp->napi_tx);
++ } else {
++ /* Clear IEVENT, so interrupts aren't called again
++ * because of the packets that have already arrived.
++ */
++ gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
++ }
++
+ return IRQ_HANDLED;
+ }
+
+@@ -2852,7 +2838,7 @@
+ rx_queue->stats.rx_bytes += pkt_len;
+ skb_record_rx_queue(skb, rx_queue->qindex);
+ gfar_process_frame(dev, skb, amount_pull,
+- &rx_queue->grp->napi);
++ &rx_queue->grp->napi_rx);
+
+ } else {
+ netif_warn(priv, rx_err, dev, "Missing skb!\n");
+@@ -2881,66 +2867,81 @@
+ return howmany;
+ }
+
+-static int gfar_poll_sq(struct napi_struct *napi, int budget)
++static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
+ {
+ struct gfar_priv_grp *gfargrp =
+- container_of(napi, struct gfar_priv_grp, napi);
++ container_of(napi, struct gfar_priv_grp, napi_rx);
+ struct gfar __iomem *regs = gfargrp->regs;
+- struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0];
+- struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
++ struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
+ int work_done = 0;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+- gfar_write(&regs->ievent, IEVENT_RTX_MASK);
+-
+- /* run Tx cleanup to completion */
+- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
+- gfar_clean_tx_ring(tx_queue);
++ gfar_write(&regs->ievent, IEVENT_RX_MASK);
+
+ work_done = gfar_clean_rx_ring(rx_queue, budget);
+
+ if (work_done < budget) {
++ u32 imask;
+ napi_complete(napi);
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
+
+- gfar_write(&regs->imask, IMASK_DEFAULT);
+-
+- /* If we are coalescing interrupts, update the timer
+- * Otherwise, clear it
+- */
+- gfar_write(&regs->txic, 0);
+- if (likely(tx_queue->txcoalescing))
+- gfar_write(&regs->txic, tx_queue->txic);
+-
+- gfar_write(&regs->rxic, 0);
+- if (unlikely(rx_queue->rxcoalescing))
+- gfar_write(&regs->rxic, rx_queue->rxic);
++ spin_lock_irq(&gfargrp->grplock);
++ imask = gfar_read(&regs->imask);
++ imask |= IMASK_RX_DEFAULT;
++ gfar_write(&regs->imask, imask);
++ spin_unlock_irq(&gfargrp->grplock);
+ }
+
+ return work_done;
+ }
+
+-static int gfar_poll(struct napi_struct *napi, int budget)
++static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
++{
++ struct gfar_priv_grp *gfargrp =
++ container_of(napi, struct gfar_priv_grp, napi_tx);
++ struct gfar __iomem *regs = gfargrp->regs;
++ struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
++ u32 imask;
++
++ /* Clear IEVENT, so interrupts aren't called again
++ * because of the packets that have already arrived
++ */
++ gfar_write(&regs->ievent, IEVENT_TX_MASK);
++
++ /* run Tx cleanup to completion */
++ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
++ gfar_clean_tx_ring(tx_queue);
++
++ napi_complete(napi);
++
++ spin_lock_irq(&gfargrp->grplock);
++ imask = gfar_read(&regs->imask);
++ imask |= IMASK_TX_DEFAULT;
++ gfar_write(&regs->imask, imask);
++ spin_unlock_irq(&gfargrp->grplock);
++
++ return 0;
++}
++
++static int gfar_poll_rx(struct napi_struct *napi, int budget)
+ {
+ struct gfar_priv_grp *gfargrp =
+- container_of(napi, struct gfar_priv_grp, napi);
++ container_of(napi, struct gfar_priv_grp, napi_rx);
+ struct gfar_private *priv = gfargrp->priv;
+ struct gfar __iomem *regs = gfargrp->regs;
+- struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ int work_done = 0, work_done_per_q = 0;
+ int i, budget_per_q = 0;
+- int has_tx_work = 0;
+ unsigned long rstat_rxf;
+ int num_act_queues;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+- gfar_write(&regs->ievent, IEVENT_RTX_MASK);
++ gfar_write(&regs->ievent, IEVENT_RX_MASK);
+
+ rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
+
+@@ -2948,15 +2949,6 @@
+ if (num_act_queues)
+ budget_per_q = budget/num_act_queues;
+
+- for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+- tx_queue = priv->tx_queue[i];
+- /* run Tx cleanup to completion */
+- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+- gfar_clean_tx_ring(tx_queue);
+- has_tx_work = 1;
+- }
+- }
+-
+ for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+ /* skip queue if not active */
+ if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
+@@ -2979,25 +2971,62 @@
+ }
+ }
+
+- if (!num_act_queues && !has_tx_work) {
+-
++ if (!num_act_queues) {
++ u32 imask;
+ napi_complete(napi);
+
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
+
+- gfar_write(&regs->imask, IMASK_DEFAULT);
+-
+- /* If we are coalescing interrupts, update the timer
+- * Otherwise, clear it
+- */
+- gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+- gfargrp->tx_bit_map);
++ spin_lock_irq(&gfargrp->grplock);
++ imask = gfar_read(&regs->imask);
++ imask |= IMASK_RX_DEFAULT;
++ gfar_write(&regs->imask, imask);
++ spin_unlock_irq(&gfargrp->grplock);
+ }
+
+ return work_done;
+ }
+
++static int gfar_poll_tx(struct napi_struct *napi, int budget)
++{
++ struct gfar_priv_grp *gfargrp =
++ container_of(napi, struct gfar_priv_grp, napi_tx);
++ struct gfar_private *priv = gfargrp->priv;
++ struct gfar __iomem *regs = gfargrp->regs;
++ struct gfar_priv_tx_q *tx_queue = NULL;
++ int has_tx_work = 0;
++ int i;
++
++ /* Clear IEVENT, so interrupts aren't called again
++ * because of the packets that have already arrived
++ */
++ gfar_write(&regs->ievent, IEVENT_TX_MASK);
++
++ for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
++ tx_queue = priv->tx_queue[i];
++ /* run Tx cleanup to completion */
++ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
++ gfar_clean_tx_ring(tx_queue);
++ has_tx_work = 1;
++ }
++ }
++
++ if (!has_tx_work) {
++ u32 imask;
++ napi_complete(napi);
++
++ spin_lock_irq(&gfargrp->grplock);
++ imask = gfar_read(&regs->imask);
++ imask |= IMASK_TX_DEFAULT;
++ gfar_write(&regs->imask, imask);
++ spin_unlock_irq(&gfargrp->grplock);
++ }
++
++ return 0;
++}
++
++
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ /* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+@@ -3056,41 +3085,6 @@
+ return IRQ_HANDLED;
+ }
+
+-static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
+-{
+- struct phy_device *phydev = priv->phydev;
+- u32 val = 0;
+-
+- if (!phydev->duplex)
+- return val;
+-
+- if (!priv->pause_aneg_en) {
+- if (priv->tx_pause_en)
+- val |= MACCFG1_TX_FLOW;
+- if (priv->rx_pause_en)
+- val |= MACCFG1_RX_FLOW;
+- } else {
+- u16 lcl_adv, rmt_adv;
+- u8 flowctrl;
+- /* get link partner capabilities */
+- rmt_adv = 0;
+- if (phydev->pause)
+- rmt_adv = LPA_PAUSE_CAP;
+- if (phydev->asym_pause)
+- rmt_adv |= LPA_PAUSE_ASYM;
+-
+- lcl_adv = mii_advertise_flowctrl(phydev->advertising);
+-
+- flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+- if (flowctrl & FLOW_CTRL_TX)
+- val |= MACCFG1_TX_FLOW;
+- if (flowctrl & FLOW_CTRL_RX)
+- val |= MACCFG1_RX_FLOW;
+- }
+-
+- return val;
+-}
+-
+ /* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the phydev structure, and this
+@@ -3100,86 +3094,12 @@
+ static void adjust_link(struct net_device *dev)
+ {
+ struct gfar_private *priv = netdev_priv(dev);
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- unsigned long flags;
+ struct phy_device *phydev = priv->phydev;
+- int new_state = 0;
+-
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+-
+- if (phydev->link) {
+- u32 tempval1 = gfar_read(&regs->maccfg1);
+- u32 tempval = gfar_read(&regs->maccfg2);
+- u32 ecntrl = gfar_read(&regs->ecntrl);
+-
+- /* Now we make sure that we can be in full duplex mode.
+- * If not, we operate in half-duplex mode.
+- */
+- if (phydev->duplex != priv->oldduplex) {
+- new_state = 1;
+- if (!(phydev->duplex))
+- tempval &= ~(MACCFG2_FULL_DUPLEX);
+- else
+- tempval |= MACCFG2_FULL_DUPLEX;
+-
+- priv->oldduplex = phydev->duplex;
+- }
+-
+- if (phydev->speed != priv->oldspeed) {
+- new_state = 1;
+- switch (phydev->speed) {
+- case 1000:
+- tempval =
+- ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+-
+- ecntrl &= ~(ECNTRL_R100);
+- break;
+- case 100:
+- case 10:
+- tempval =
+- ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
+-
+- /* Reduced mode distinguishes
+- * between 10 and 100
+- */
+- if (phydev->speed == SPEED_100)
+- ecntrl |= ECNTRL_R100;
+- else
+- ecntrl &= ~(ECNTRL_R100);
+- break;
+- default:
+- netif_warn(priv, link, dev,
+- "Ack! Speed (%d) is not 10/100/1000!\n",
+- phydev->speed);
+- break;
+- }
+-
+- priv->oldspeed = phydev->speed;
+- }
+-
+- tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+- tempval1 |= gfar_get_flowctrl_cfg(priv);
+-
+- gfar_write(&regs->maccfg1, tempval1);
+- gfar_write(&regs->maccfg2, tempval);
+- gfar_write(&regs->ecntrl, ecntrl);
+-
+- if (!priv->oldlink) {
+- new_state = 1;
+- priv->oldlink = 1;
+- }
+- } else if (priv->oldlink) {
+- new_state = 1;
+- priv->oldlink = 0;
+- priv->oldspeed = 0;
+- priv->oldduplex = -1;
+- }
+
+- if (new_state && netif_msg_link(priv))
+- phy_print_status(phydev);
+- unlock_tx_qs(priv);
+- local_irq_restore(flags);
++ if (unlikely(phydev->link != priv->oldlink ||
++ phydev->duplex != priv->oldduplex ||
++ phydev->speed != priv->oldspeed))
++ gfar_update_link_state(priv);
+ }
+
+ /* Update the hash table based on the current list of multicast
+@@ -3425,6 +3345,114 @@
+ return IRQ_HANDLED;
+ }
+
++static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
++{
++ struct phy_device *phydev = priv->phydev;
++ u32 val = 0;
++
++ if (!phydev->duplex)
++ return val;
++
++ if (!priv->pause_aneg_en) {
++ if (priv->tx_pause_en)
++ val |= MACCFG1_TX_FLOW;
++ if (priv->rx_pause_en)
++ val |= MACCFG1_RX_FLOW;
++ } else {
++ u16 lcl_adv, rmt_adv;
++ u8 flowctrl;
++ /* get link partner capabilities */
++ rmt_adv = 0;
++ if (phydev->pause)
++ rmt_adv = LPA_PAUSE_CAP;
++ if (phydev->asym_pause)
++ rmt_adv |= LPA_PAUSE_ASYM;
++
++ lcl_adv = mii_advertise_flowctrl(phydev->advertising);
++
++ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
++ if (flowctrl & FLOW_CTRL_TX)
++ val |= MACCFG1_TX_FLOW;
++ if (flowctrl & FLOW_CTRL_RX)
++ val |= MACCFG1_RX_FLOW;
++ }
++
++ return val;
++}
++
++static noinline void gfar_update_link_state(struct gfar_private *priv)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++ struct phy_device *phydev = priv->phydev;
++
++ if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
++ return;
++
++ if (phydev->link) {
++ u32 tempval1 = gfar_read(&regs->maccfg1);
++ u32 tempval = gfar_read(&regs->maccfg2);
++ u32 ecntrl = gfar_read(&regs->ecntrl);
++
++ if (phydev->duplex != priv->oldduplex) {
++ if (!(phydev->duplex))
++ tempval &= ~(MACCFG2_FULL_DUPLEX);
++ else
++ tempval |= MACCFG2_FULL_DUPLEX;
++
++ priv->oldduplex = phydev->duplex;
++ }
++
++ if (phydev->speed != priv->oldspeed) {
++ switch (phydev->speed) {
++ case 1000:
++ tempval =
++ ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
++
++ ecntrl &= ~(ECNTRL_R100);
++ break;
++ case 100:
++ case 10:
++ tempval =
++ ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
++
++ /* Reduced mode distinguishes
++ * between 10 and 100
++ */
++ if (phydev->speed == SPEED_100)
++ ecntrl |= ECNTRL_R100;
++ else
++ ecntrl &= ~(ECNTRL_R100);
++ break;
++ default:
++ netif_warn(priv, link, priv->ndev,
++ "Ack! Speed (%d) is not 10/100/1000!\n",
++ phydev->speed);
++ break;
++ }
++
++ priv->oldspeed = phydev->speed;
++ }
++
++ tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
++ tempval1 |= gfar_get_flowctrl_cfg(priv);
++
++ gfar_write(&regs->maccfg1, tempval1);
++ gfar_write(&regs->maccfg2, tempval);
++ gfar_write(&regs->ecntrl, ecntrl);
++
++ if (!priv->oldlink)
++ priv->oldlink = 1;
++
++ } else if (priv->oldlink) {
++ priv->oldlink = 0;
++ priv->oldspeed = 0;
++ priv->oldduplex = -1;
++ }
++
++ if (netif_msg_link(priv))
++ phy_print_status(phydev);
++}
++
+ static struct of_device_id gfar_match[] =
+ {
+ {
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/gianfar_ethtool.c linux-3.14.40/drivers/net/ethernet/freescale/gianfar_ethtool.c
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/gianfar_ethtool.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/gianfar_ethtool.c 2015-05-01 14:57:59.827427001 -0500
+@@ -44,10 +44,6 @@
+
+ #include "gianfar.h"
+
+-extern void gfar_start(struct net_device *dev);
+-extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
+- int rx_work_limit);
+-
+ #define GFAR_MAX_COAL_USECS 0xffff
+ #define GFAR_MAX_COAL_FRAMES 0xff
+ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+@@ -364,25 +360,11 @@
+ struct ethtool_coalesce *cvals)
+ {
+ struct gfar_private *priv = netdev_priv(dev);
+- int i = 0;
++ int i, err = 0;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
+ return -EOPNOTSUPP;
+
+- /* Set up rx coalescing */
+- /* As of now, we will enable/disable coalescing for all
+- * queues together in case of eTSEC2, this will be modified
+- * along with the ethtool interface
+- */
+- if ((cvals->rx_coalesce_usecs == 0) ||
+- (cvals->rx_max_coalesced_frames == 0)) {
+- for (i = 0; i < priv->num_rx_queues; i++)
+- priv->rx_queue[i]->rxcoalescing = 0;
+- } else {
+- for (i = 0; i < priv->num_rx_queues; i++)
+- priv->rx_queue[i]->rxcoalescing = 1;
+- }
+-
+ if (NULL == priv->phydev)
+ return -ENODEV;
+
+@@ -399,6 +381,32 @@
+ return -EINVAL;
+ }
+
++ /* Check the bounds of the values */
++ if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
++ netdev_info(dev, "Coalescing is limited to %d microseconds\n",
++ GFAR_MAX_COAL_USECS);
++ return -EINVAL;
++ }
++
++ if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
++ netdev_info(dev, "Coalescing is limited to %d frames\n",
++ GFAR_MAX_COAL_FRAMES);
++ return -EINVAL;
++ }
++
++ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
++ cpu_relax();
++
++ /* Set up rx coalescing */
++ if ((cvals->rx_coalesce_usecs == 0) ||
++ (cvals->rx_max_coalesced_frames == 0)) {
++ for (i = 0; i < priv->num_rx_queues; i++)
++ priv->rx_queue[i]->rxcoalescing = 0;
++ } else {
++ for (i = 0; i < priv->num_rx_queues; i++)
++ priv->rx_queue[i]->rxcoalescing = 1;
++ }
++
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i]->rxic = mk_ic_value(
+ cvals->rx_max_coalesced_frames,
+@@ -415,28 +423,22 @@
+ priv->tx_queue[i]->txcoalescing = 1;
+ }
+
+- /* Check the bounds of the values */
+- if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+- netdev_info(dev, "Coalescing is limited to %d microseconds\n",
+- GFAR_MAX_COAL_USECS);
+- return -EINVAL;
+- }
+-
+- if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+- netdev_info(dev, "Coalescing is limited to %d frames\n",
+- GFAR_MAX_COAL_FRAMES);
+- return -EINVAL;
+- }
+-
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i]->txic = mk_ic_value(
+ cvals->tx_max_coalesced_frames,
+ gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
+ }
+
+- gfar_configure_coalescing_all(priv);
++ if (dev->flags & IFF_UP) {
++ stop_gfar(dev);
++ err = startup_gfar(dev);
++ } else {
++ gfar_mac_reset(priv);
++ }
++
++ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+- return 0;
++ return err;
+ }
+
+ /* Fills in rvals with the current ring parameters. Currently,
+@@ -467,15 +469,13 @@
+ }
+
+ /* Change the current ring parameters, stopping the controller if
+- * necessary so that we don't mess things up while we're in
+- * motion. We wait for the ring to be clean before reallocating
+- * the rings.
++ * necessary so that we don't mess things up while we're in motion.
+ */
+ static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals)
+ {
+ struct gfar_private *priv = netdev_priv(dev);
+- int err = 0, i = 0;
++ int err = 0, i;
+
+ if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
+ return -EINVAL;
+@@ -493,44 +493,25 @@
+ return -EINVAL;
+ }
+
++ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
++ cpu_relax();
+
+- if (dev->flags & IFF_UP) {
+- unsigned long flags;
+-
+- /* Halt TX and RX, and process the frames which
+- * have already been received
+- */
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+- lock_rx_qs(priv);
+-
+- gfar_halt(dev);
+-
+- unlock_rx_qs(priv);
+- unlock_tx_qs(priv);
+- local_irq_restore(flags);
+-
+- for (i = 0; i < priv->num_rx_queues; i++)
+- gfar_clean_rx_ring(priv->rx_queue[i],
+- priv->rx_queue[i]->rx_ring_size);
+-
+- /* Now we take down the rings to rebuild them */
++ if (dev->flags & IFF_UP)
+ stop_gfar(dev);
+- }
+
+- /* Change the size */
+- for (i = 0; i < priv->num_rx_queues; i++) {
++ /* Change the sizes */
++ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
++
++ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
+- priv->tx_queue[i]->num_txbdfree =
+- priv->tx_queue[i]->tx_ring_size;
+- }
+
+ /* Rebuild the rings with the new size */
+- if (dev->flags & IFF_UP) {
++ if (dev->flags & IFF_UP)
+ err = startup_gfar(dev);
+- netif_tx_wake_all_queues(dev);
+- }
++
++ clear_bit_unlock(GFAR_RESETTING, &priv->state);
++
+ return err;
+ }
+
+@@ -552,6 +533,9 @@
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 oldadv, newadv;
+
++ if (!phydev)
++ return -ENODEV;
++
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ (epause->rx_pause != epause->tx_pause)))
+@@ -608,43 +592,29 @@
+
+ int gfar_set_features(struct net_device *dev, netdev_features_t features)
+ {
+- struct gfar_private *priv = netdev_priv(dev);
+- unsigned long flags;
+- int err = 0, i = 0;
+ netdev_features_t changed = dev->features ^ features;
++ struct gfar_private *priv = netdev_priv(dev);
++ int err = 0;
+
+- if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
+- gfar_vlan_mode(dev, features);
+-
+- if (!(changed & NETIF_F_RXCSUM))
++ if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
++ NETIF_F_RXCSUM)))
+ return 0;
+
+- if (dev->flags & IFF_UP) {
+- /* Halt TX and RX, and process the frames which
+- * have already been received
+- */
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+- lock_rx_qs(priv);
+-
+- gfar_halt(dev);
+-
+- unlock_tx_qs(priv);
+- unlock_rx_qs(priv);
+- local_irq_restore(flags);
++ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
++ cpu_relax();
+
+- for (i = 0; i < priv->num_rx_queues; i++)
+- gfar_clean_rx_ring(priv->rx_queue[i],
+- priv->rx_queue[i]->rx_ring_size);
++ dev->features = features;
+
++ if (dev->flags & IFF_UP) {
+ /* Now we take down the rings to rebuild them */
+ stop_gfar(dev);
+-
+- dev->features = features;
+-
+ err = startup_gfar(dev);
+- netif_tx_wake_all_queues(dev);
++ } else {
++ gfar_mac_reset(priv);
+ }
++
++ clear_bit_unlock(GFAR_RESETTING, &priv->state);
++
+ return err;
+ }
+
+@@ -1610,9 +1580,6 @@
+ if (tab->index > MAX_FILER_IDX - 1)
+ return -EBUSY;
+
+- /* Avoid inconsistent filer table to be processed */
+- lock_rx_qs(priv);
+-
+ /* Fill regular entries */
+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
+ i++)
+@@ -1625,8 +1592,6 @@
+ */
+ gfar_write_filer(priv, i, 0x20, 0x0);
+
+- unlock_rx_qs(priv);
+-
+ return 0;
+ }
+
+@@ -1831,6 +1796,9 @@
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret = 0;
+
++ if (test_bit(GFAR_RESETTING, &priv->state))
++ return -EBUSY;
++
+ mutex_lock(&priv->rx_queue_access);
+
+ switch (cmd->cmd) {
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/gianfar.h linux-3.14.40/drivers/net/ethernet/freescale/gianfar.h
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/gianfar.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/gianfar.h 2015-05-01 14:57:59.827427001 -0500
+@@ -9,7 +9,7 @@
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
++ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+@@ -377,8 +377,11 @@
+ IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
+ IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
+ | IMASK_PERR)
+-#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \
+- & IMASK_DEFAULT)
++#define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY)
++#define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN)
++
++#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT)
++#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT)
+
+ /* Fifo management */
+ #define FIFO_TX_THR_MASK 0x01ff
+@@ -409,7 +412,9 @@
+
+ /* This default RIR value directly corresponds
+ * to the 3-bit hash value generated */
+-#define DEFAULT_RIR0 0x05397700
++#define DEFAULT_8RXQ_RIR0 0x05397700
++/* Map even hash values to Q0, and odd ones to Q1 */
++#define DEFAULT_2RXQ_RIR0 0x04104100
+
+ /* RQFCR register bits */
+ #define RQFCR_GPI 0x80000000
+@@ -880,7 +885,6 @@
+ #define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
+ #define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
+ #define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
+-#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
+ #define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
+ #define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
+ #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
+@@ -892,8 +896,8 @@
+ #define DEFAULT_MAPPING 0xFF
+ #endif
+
+-#define ISRG_SHIFT_TX 0x10
+-#define ISRG_SHIFT_RX 0x18
++#define ISRG_RR0 0x80000000
++#define ISRG_TR0 0x00800000
+
+ /* The same driver can operate in two modes */
+ /* SQ_SG_MODE: Single Queue Single Group Mode
+@@ -905,6 +909,22 @@
+ MQ_MG_MODE
+ };
+
++/* GFAR_SQ_POLLING: Single Queue NAPI polling mode
++ * The driver supports a single pair of RX/Tx queues
++ * per interrupt group (Rx/Tx int line). MQ_MG mode
++ * devices have 2 interrupt groups, so the device will
++ * have a total of 2 Tx and 2 Rx queues in this case.
++ * GFAR_MQ_POLLING: Multi Queue NAPI polling mode
++ * The driver supports all the 8 Rx and Tx HW queues
++ * each queue mapped by the Device Tree to one of
++ * the 2 interrupt groups. This mode implies significant
++ * processing overhead (CPU and controller level).
++ */
++enum gfar_poll_mode {
++ GFAR_SQ_POLLING = 0,
++ GFAR_MQ_POLLING
++};
++
+ /*
+ * Per TX queue stats
+ */
+@@ -966,7 +986,6 @@
+
+ /**
+ * struct gfar_priv_rx_q - per rx queue structure
+- * @rxlock: per queue rx spin lock
+ * @rx_skbuff: skb pointers
+ * @skb_currx: currently use skb pointer
+ * @rx_bd_base: First rx buffer descriptor
+@@ -979,8 +998,7 @@
+ */
+
+ struct gfar_priv_rx_q {
+- spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+- struct sk_buff ** rx_skbuff;
++ struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
+ dma_addr_t rx_bd_dma_base;
+ struct rxbd8 *rx_bd_base;
+ struct rxbd8 *cur_rx;
+@@ -1016,17 +1034,20 @@
+ */
+
+ struct gfar_priv_grp {
+- spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+- struct napi_struct napi;
+- struct gfar_private *priv;
++ spinlock_t grplock __aligned(SMP_CACHE_BYTES);
++ struct napi_struct napi_rx;
++ struct napi_struct napi_tx;
+ struct gfar __iomem *regs;
+- unsigned int rstat;
+- unsigned long num_rx_queues;
+- unsigned long rx_bit_map;
+- /* cacheline 3 */
++ struct gfar_priv_tx_q *tx_queue;
++ struct gfar_priv_rx_q *rx_queue;
+ unsigned int tstat;
++ unsigned int rstat;
++
++ struct gfar_private *priv;
+ unsigned long num_tx_queues;
+ unsigned long tx_bit_map;
++ unsigned long num_rx_queues;
++ unsigned long rx_bit_map;
+
+ struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
+ };
+@@ -1041,6 +1062,11 @@
+ GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
+ };
+
++enum gfar_dev_state {
++ GFAR_DOWN = 1,
++ GFAR_RESETTING
++};
++
+ /* Struct stolen almost completely (and shamelessly) from the FCC enet source
+ * (Ok, that's not so true anymore, but there is a family resemblance)
+ * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
+@@ -1051,8 +1077,6 @@
+ * the buffer descriptor determines the actual condition.
+ */
+ struct gfar_private {
+- unsigned int num_rx_queues;
+-
+ struct device *dev;
+ struct net_device *ndev;
+ enum gfar_errata errata;
+@@ -1060,6 +1084,7 @@
+
+ u16 uses_rxfcb;
+ u16 padding;
++ u32 device_flags;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+@@ -1069,10 +1094,12 @@
+ struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
+ struct gfar_priv_grp gfargrp[MAXGROUPS];
+
+- u32 device_flags;
++ unsigned long state;
+
+- unsigned int mode;
++ unsigned short mode;
++ unsigned short poll_mode;
+ unsigned int num_tx_queues;
++ unsigned int num_rx_queues;
+ unsigned int num_grps;
+
+ /* Network Statistics */
+@@ -1113,6 +1140,9 @@
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
+
++ u32 rqueue;
++ u32 tqueue;
++
+ /* RX per device parameters */
+ unsigned int rx_stash_size;
+ unsigned int rx_stash_index;
+@@ -1127,11 +1157,6 @@
+ u32 __iomem *hash_regs[16];
+ int hash_width;
+
+- /* global parameters */
+- unsigned int fifo_threshold;
+- unsigned int fifo_starve;
+- unsigned int fifo_starve_off;
+-
+ /*Filer table*/
+ unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+ unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+@@ -1176,21 +1201,42 @@
+ *fpr = gfar_read(&regs->rqfpr);
+ }
+
+-void lock_rx_qs(struct gfar_private *priv);
+-void lock_tx_qs(struct gfar_private *priv);
+-void unlock_rx_qs(struct gfar_private *priv);
+-void unlock_tx_qs(struct gfar_private *priv);
++static inline void gfar_write_isrg(struct gfar_private *priv)
++{
++ struct gfar __iomem *regs = priv->gfargrp[0].regs;
++ u32 __iomem *baddr = &regs->isrg0;
++ u32 isrg = 0;
++ int grp_idx, i;
++
++ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
++ struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx];
++
++ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
++ isrg |= (ISRG_RR0 >> i);
++ }
++
++ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
++ isrg |= (ISRG_TR0 >> i);
++ }
++
++ gfar_write(baddr, isrg);
++
++ baddr++;
++ isrg = 0;
++ }
++}
++
+ irqreturn_t gfar_receive(int irq, void *dev_id);
+ int startup_gfar(struct net_device *dev);
+ void stop_gfar(struct net_device *dev);
+-void gfar_halt(struct net_device *dev);
++void reset_gfar(struct net_device *dev);
++void gfar_mac_reset(struct gfar_private *priv);
++void gfar_halt(struct gfar_private *priv);
++void gfar_start(struct gfar_private *priv);
+ void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
+ u32 regnum, u32 read);
+ void gfar_configure_coalescing_all(struct gfar_private *priv);
+-void gfar_init_sysfs(struct net_device *dev);
+ int gfar_set_features(struct net_device *dev, netdev_features_t features);
+-void gfar_check_rx_parser_mode(struct gfar_private *priv);
+-void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
+
+ extern const struct ethtool_ops gfar_ethtool_ops;
+
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/gianfar_ptp.c linux-3.14.40/drivers/net/ethernet/freescale/gianfar_ptp.c
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/gianfar_ptp.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/gianfar_ptp.c 2015-05-01 14:57:59.827427001 -0500
+@@ -414,6 +414,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = 0,
++ .n_pins = 0,
+ .pps = 1,
+ .adjfreq = ptp_gianfar_adjfreq,
+ .adjtime = ptp_gianfar_adjtime,
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/gianfar_sysfs.c linux-3.14.40/drivers/net/ethernet/freescale/gianfar_sysfs.c
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/gianfar_sysfs.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/gianfar_sysfs.c 1969-12-31 18:00:00.000000000 -0600
+@@ -1,340 +0,0 @@
+-/*
+- * drivers/net/ethernet/freescale/gianfar_sysfs.c
+- *
+- * Gianfar Ethernet Driver
+- * This driver is designed for the non-CPM ethernet controllers
+- * on the 85xx and 83xx family of integrated processors
+- * Based on 8260_io/fcc_enet.c
+- *
+- * Author: Andy Fleming
+- * Maintainer: Kumar Gala (galak@kernel.crashing.org)
+- * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+- *
+- * Copyright 2002-2009 Freescale Semiconductor, Inc.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms of the GNU General Public License as published by the
+- * Free Software Foundation; either version 2 of the License, or (at your
+- * option) any later version.
+- *
+- * Sysfs file creation and management
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/string.h>
+-#include <linux/errno.h>
+-#include <linux/unistd.h>
+-#include <linux/delay.h>
+-#include <linux/etherdevice.h>
+-#include <linux/spinlock.h>
+-#include <linux/mm.h>
+-#include <linux/device.h>
+-
+-#include <asm/uaccess.h>
+-#include <linux/module.h>
+-
+-#include "gianfar.h"
+-
+-static ssize_t gfar_show_bd_stash(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+-
+- return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off");
+-}
+-
+-static ssize_t gfar_set_bd_stash(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- int new_setting = 0;
+- u32 temp;
+- unsigned long flags;
+-
+- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
+- return count;
+-
+-
+- /* Find out the new setting */
+- if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
+- new_setting = 1;
+- else if (!strncmp("off", buf, count - 1) ||
+- !strncmp("0", buf, count - 1))
+- new_setting = 0;
+- else
+- return count;
+-
+-
+- local_irq_save(flags);
+- lock_rx_qs(priv);
+-
+- /* Set the new stashing value */
+- priv->bd_stash_en = new_setting;
+-
+- temp = gfar_read(&regs->attr);
+-
+- if (new_setting)
+- temp |= ATTR_BDSTASH;
+- else
+- temp &= ~(ATTR_BDSTASH);
+-
+- gfar_write(&regs->attr, temp);
+-
+- unlock_rx_qs(priv);
+- local_irq_restore(flags);
+-
+- return count;
+-}
+-
+-static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash);
+-
+-static ssize_t gfar_show_rx_stash_size(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+-
+- return sprintf(buf, "%d\n", priv->rx_stash_size);
+-}
+-
+-static ssize_t gfar_set_rx_stash_size(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- unsigned int length = simple_strtoul(buf, NULL, 0);
+- u32 temp;
+- unsigned long flags;
+-
+- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
+- return count;
+-
+- local_irq_save(flags);
+- lock_rx_qs(priv);
+-
+- if (length > priv->rx_buffer_size)
+- goto out;
+-
+- if (length == priv->rx_stash_size)
+- goto out;
+-
+- priv->rx_stash_size = length;
+-
+- temp = gfar_read(&regs->attreli);
+- temp &= ~ATTRELI_EL_MASK;
+- temp |= ATTRELI_EL(length);
+- gfar_write(&regs->attreli, temp);
+-
+- /* Turn stashing on/off as appropriate */
+- temp = gfar_read(&regs->attr);
+-
+- if (length)
+- temp |= ATTR_BUFSTASH;
+- else
+- temp &= ~(ATTR_BUFSTASH);
+-
+- gfar_write(&regs->attr, temp);
+-
+-out:
+- unlock_rx_qs(priv);
+- local_irq_restore(flags);
+-
+- return count;
+-}
+-
+-static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size,
+- gfar_set_rx_stash_size);
+-
+-/* Stashing will only be enabled when rx_stash_size != 0 */
+-static ssize_t gfar_show_rx_stash_index(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+-
+- return sprintf(buf, "%d\n", priv->rx_stash_index);
+-}
+-
+-static ssize_t gfar_set_rx_stash_index(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- unsigned short index = simple_strtoul(buf, NULL, 0);
+- u32 temp;
+- unsigned long flags;
+-
+- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
+- return count;
+-
+- local_irq_save(flags);
+- lock_rx_qs(priv);
+-
+- if (index > priv->rx_stash_size)
+- goto out;
+-
+- if (index == priv->rx_stash_index)
+- goto out;
+-
+- priv->rx_stash_index = index;
+-
+- temp = gfar_read(&regs->attreli);
+- temp &= ~ATTRELI_EI_MASK;
+- temp |= ATTRELI_EI(index);
+- gfar_write(&regs->attreli, temp);
+-
+-out:
+- unlock_rx_qs(priv);
+- local_irq_restore(flags);
+-
+- return count;
+-}
+-
+-static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index,
+- gfar_set_rx_stash_index);
+-
+-static ssize_t gfar_show_fifo_threshold(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+-
+- return sprintf(buf, "%d\n", priv->fifo_threshold);
+-}
+-
+-static ssize_t gfar_set_fifo_threshold(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- unsigned int length = simple_strtoul(buf, NULL, 0);
+- u32 temp;
+- unsigned long flags;
+-
+- if (length > GFAR_MAX_FIFO_THRESHOLD)
+- return count;
+-
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+-
+- priv->fifo_threshold = length;
+-
+- temp = gfar_read(&regs->fifo_tx_thr);
+- temp &= ~FIFO_TX_THR_MASK;
+- temp |= length;
+- gfar_write(&regs->fifo_tx_thr, temp);
+-
+- unlock_tx_qs(priv);
+- local_irq_restore(flags);
+-
+- return count;
+-}
+-
+-static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold,
+- gfar_set_fifo_threshold);
+-
+-static ssize_t gfar_show_fifo_starve(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+-
+- return sprintf(buf, "%d\n", priv->fifo_starve);
+-}
+-
+-static ssize_t gfar_set_fifo_starve(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- unsigned int num = simple_strtoul(buf, NULL, 0);
+- u32 temp;
+- unsigned long flags;
+-
+- if (num > GFAR_MAX_FIFO_STARVE)
+- return count;
+-
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+-
+- priv->fifo_starve = num;
+-
+- temp = gfar_read(&regs->fifo_tx_starve);
+- temp &= ~FIFO_TX_STARVE_MASK;
+- temp |= num;
+- gfar_write(&regs->fifo_tx_starve, temp);
+-
+- unlock_tx_qs(priv);
+- local_irq_restore(flags);
+-
+- return count;
+-}
+-
+-static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve,
+- gfar_set_fifo_starve);
+-
+-static ssize_t gfar_show_fifo_starve_off(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+-
+- return sprintf(buf, "%d\n", priv->fifo_starve_off);
+-}
+-
+-static ssize_t gfar_set_fifo_starve_off(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+- struct gfar __iomem *regs = priv->gfargrp[0].regs;
+- unsigned int num = simple_strtoul(buf, NULL, 0);
+- u32 temp;
+- unsigned long flags;
+-
+- if (num > GFAR_MAX_FIFO_STARVE_OFF)
+- return count;
+-
+- local_irq_save(flags);
+- lock_tx_qs(priv);
+-
+- priv->fifo_starve_off = num;
+-
+- temp = gfar_read(&regs->fifo_tx_starve_shutoff);
+- temp &= ~FIFO_TX_STARVE_OFF_MASK;
+- temp |= num;
+- gfar_write(&regs->fifo_tx_starve_shutoff, temp);
+-
+- unlock_tx_qs(priv);
+- local_irq_restore(flags);
+-
+- return count;
+-}
+-
+-static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off,
+- gfar_set_fifo_starve_off);
+-
+-void gfar_init_sysfs(struct net_device *dev)
+-{
+- struct gfar_private *priv = netdev_priv(dev);
+- int rc;
+-
+- /* Initialize the default values */
+- priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
+- priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
+- priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
+-
+- /* Create our sysfs files */
+- rc = device_create_file(&dev->dev, &dev_attr_bd_stash);
+- rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size);
+- rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index);
+- rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold);
+- rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve);
+- rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off);
+- if (rc)
+- dev_err(&dev->dev, "Error creating gianfar sysfs files\n");
+-}
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/Kconfig linux-3.14.40/drivers/net/ethernet/freescale/Kconfig
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/Kconfig 2015-05-01 14:57:59.827427001 -0500
+@@ -67,6 +67,7 @@
+ tristate "Freescale XGMAC MDIO"
+ depends on FSL_SOC
+ select PHYLIB
++ select OF_MDIO
+ ---help---
+ This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
+
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/Makefile linux-3.14.40/drivers/net/ethernet/freescale/Makefile
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/Makefile 2015-05-01 14:57:59.843427001 -0500
+@@ -14,7 +14,6 @@
+ obj-$(CONFIG_GIANFAR) += gianfar_driver.o
+ obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
+ gianfar_driver-objs := gianfar.o \
+- gianfar_ethtool.o \
+- gianfar_sysfs.o
++ gianfar_ethtool.o
+ obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
+ ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/ucc_geth.c linux-3.14.40/drivers/net/ethernet/freescale/ucc_geth.c
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/ucc_geth.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/ucc_geth.c 2015-05-01 14:57:59.847427001 -0500
+@@ -1728,9 +1728,6 @@
+
+ phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
+ priv->phy_interface);
+- if (!phydev)
+- phydev = of_phy_connect_fixed_link(dev, &adjust_link,
+- priv->phy_interface);
+ if (!phydev) {
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
+@@ -3261,7 +3258,7 @@
+
+ dev->stats.tx_packets++;
+
+- dev_kfree_skb(skb);
++ dev_consume_skb_any(skb);
+
+ ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
+ ugeth->skb_dirtytx[txQ] =
+@@ -3790,6 +3787,17 @@
+ ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
+
+ ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
++ if (!ug_info->phy_node) {
++ /* In the case of a fixed PHY, the DT node associated
++ * to the PHY is the Ethernet MAC DT node.
++ */
++ if (of_phy_is_fixed_link(np)) {
++ err = of_phy_register_fixed_link(np);
++ if (err)
++ return err;
++ }
++ ug_info->phy_node = np;
++ }
+
+ /* Find the TBI PHY node. If it's not there, we don't support SGMII */
+ ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/freescale/xgmac_mdio.c linux-3.14.40/drivers/net/ethernet/freescale/xgmac_mdio.c
+--- linux-3.14.40.orig/drivers/net/ethernet/freescale/xgmac_mdio.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/freescale/xgmac_mdio.c 2015-05-01 14:57:59.879427001 -0500
+@@ -162,7 +162,9 @@
+
+ /* Return all Fs if nothing was there */
+ if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
+- dev_err(&bus->dev, "MDIO read error\n");
++ dev_err(&bus->dev,
++ "Error while reading PHY%d reg at %d.%d\n",
++ phy_id, dev_addr, regnum);
+ return 0xffff;
+ }
+
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/intel/e1000e/ptp.c linux-3.14.40/drivers/net/ethernet/intel/e1000e/ptp.c
+--- linux-3.14.40.orig/drivers/net/ethernet/intel/e1000e/ptp.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/intel/e1000e/ptp.c 2015-05-01 14:57:59.895427001 -0500
+@@ -191,6 +191,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = e1000e_phc_adjfreq,
+ .adjtime = e1000e_phc_adjtime,
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/mellanox/mlx4/en_clock.c linux-3.14.40/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+--- linux-3.14.40.orig/drivers/net/ethernet/mellanox/mlx4/en_clock.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/mellanox/mlx4/en_clock.c 2015-05-01 14:57:59.903427001 -0500
+@@ -276,6 +276,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = mlx4_en_phc_adjfreq,
+ .adjtime = mlx4_en_phc_adjtime,
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/sfc/ptp.c linux-3.14.40/drivers/net/ethernet/sfc/ptp.c
+--- linux-3.14.40.orig/drivers/net/ethernet/sfc/ptp.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/sfc/ptp.c 2015-05-01 14:57:59.911427001 -0500
+@@ -1208,6 +1208,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
++ .n_pins = 0,
+ .pps = 1,
+ .adjfreq = efx_phc_adjfreq,
+ .adjtime = efx_phc_adjtime,
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c linux-3.14.40/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+--- linux-3.14.40.orig/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 2015-05-01 14:57:59.935427001 -0500
+@@ -164,6 +164,7 @@
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = stmmac_adjust_freq,
+ .adjtime = stmmac_adjust_time,
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/ti/cpts.c linux-3.14.40/drivers/net/ethernet/ti/cpts.c
+--- linux-3.14.40.orig/drivers/net/ethernet/ti/cpts.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/ti/cpts.c 2015-05-01 14:57:59.959427001 -0500
+@@ -217,6 +217,7 @@
+ .name = "CTPS timer",
+ .max_adj = 1000000,
+ .n_ext_ts = 0,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = cpts_ptp_adjfreq,
+ .adjtime = cpts_ptp_adjtime,
+diff -Nur linux-3.14.40.orig/drivers/net/ethernet/tile/tilegx.c linux-3.14.40/drivers/net/ethernet/tile/tilegx.c
+--- linux-3.14.40.orig/drivers/net/ethernet/tile/tilegx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ethernet/tile/tilegx.c 2015-05-01 14:57:59.971427001 -0500
+@@ -870,6 +870,7 @@
+ .name = "mPIPE clock",
+ .max_adj = 999999999,
+ .n_ext_ts = 0,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = ptp_mpipe_adjfreq,
+ .adjtime = ptp_mpipe_adjtime,
+diff -Nur linux-3.14.40.orig/drivers/net/ieee802154/Kconfig linux-3.14.40/drivers/net/ieee802154/Kconfig
+--- linux-3.14.40.orig/drivers/net/ieee802154/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/ieee802154/Kconfig 2015-05-01 14:57:59.999427001 -0500
+@@ -15,9 +15,9 @@
+ depends on IEEE802154_DRIVERS
+ ---help---
+ Say Y here to enable the fake driver that serves as an example
+- of HardMAC device driver.
++ of HardMAC device driver.
+
+- This driver can also be built as a module. To do so say M here.
++ This driver can also be built as a module. To do so say M here.
+ The module will be called 'fakehard'.
+
+ config IEEE802154_FAKELB
+@@ -31,17 +31,17 @@
+ The module will be called 'fakelb'.
+
+ config IEEE802154_AT86RF230
+- depends on IEEE802154_DRIVERS && MAC802154
+- tristate "AT86RF230/231 transceiver driver"
+- depends on SPI
++ depends on IEEE802154_DRIVERS && MAC802154
++ tristate "AT86RF230/231 transceiver driver"
++ depends on SPI
+
+ config IEEE802154_MRF24J40
+- tristate "Microchip MRF24J40 transceiver driver"
+- depends on IEEE802154_DRIVERS && MAC802154
+- depends on SPI
+- ---help---
+- Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
+- controller.
++ tristate "Microchip MRF24J40 transceiver driver"
++ depends on IEEE802154_DRIVERS && MAC802154
++ depends on SPI
++ ---help---
++ Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
++ controller.
+
+- This driver can also be built as a module. To do so, say M here.
+- the module will be called 'mrf24j40'.
++ This driver can also be built as a module. To do so, say M here.
++ the module will be called 'mrf24j40'.
+diff -Nur linux-3.14.40.orig/drivers/net/phy/at803x.c linux-3.14.40/drivers/net/phy/at803x.c
+--- linux-3.14.40.orig/drivers/net/phy/at803x.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/phy/at803x.c 2015-05-01 14:58:00.019427001 -0500
+@@ -27,6 +27,9 @@
+ #define AT803X_MMD_ACCESS_CONTROL 0x0D
+ #define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
+ #define AT803X_FUNC_DATA 0x4003
++#define AT803X_INER 0x0012
++#define AT803X_INER_INIT 0xec00
++#define AT803X_INSR 0x0013
+ #define AT803X_DEBUG_ADDR 0x1D
+ #define AT803X_DEBUG_DATA 0x1E
+ #define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05
+@@ -141,41 +144,11 @@
+
+ static int at803x_config_init(struct phy_device *phydev)
+ {
+- int val;
+ int ret;
+- u32 features;
+
+- features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI |
+- SUPPORTED_FIBRE | SUPPORTED_BNC;
+-
+- val = phy_read(phydev, MII_BMSR);
+- if (val < 0)
+- return val;
+-
+- if (val & BMSR_ANEGCAPABLE)
+- features |= SUPPORTED_Autoneg;
+- if (val & BMSR_100FULL)
+- features |= SUPPORTED_100baseT_Full;
+- if (val & BMSR_100HALF)
+- features |= SUPPORTED_100baseT_Half;
+- if (val & BMSR_10FULL)
+- features |= SUPPORTED_10baseT_Full;
+- if (val & BMSR_10HALF)
+- features |= SUPPORTED_10baseT_Half;
+-
+- if (val & BMSR_ESTATEN) {
+- val = phy_read(phydev, MII_ESTATUS);
+- if (val < 0)
+- return val;
+-
+- if (val & ESTATUS_1000_TFULL)
+- features |= SUPPORTED_1000baseT_Full;
+- if (val & ESTATUS_1000_THALF)
+- features |= SUPPORTED_1000baseT_Half;
+- }
+-
+- phydev->supported = features;
+- phydev->advertising = features;
++ ret = genphy_config_init(phydev);
++ if (ret < 0)
++ return ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR,
+@@ -191,6 +164,31 @@
+ return 0;
+ }
+
++static int at803x_ack_interrupt(struct phy_device *phydev)
++{
++ int err;
++
++ err = phy_read(phydev, AT803X_INSR);
++
++ return (err < 0) ? err : 0;
++}
++
++static int at803x_config_intr(struct phy_device *phydev)
++{
++ int err;
++ int value;
++
++ value = phy_read(phydev, AT803X_INER);
++
++ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
++ err = phy_write(phydev, AT803X_INER,
++ value | AT803X_INER_INIT);
++ else
++ err = phy_write(phydev, AT803X_INER, 0);
++
++ return err;
++}
++
+ static struct phy_driver at803x_driver[] = {
+ {
+ /* ATHEROS 8035 */
+@@ -240,6 +238,8 @@
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
++ .ack_interrupt = &at803x_ack_interrupt,
++ .config_intr = &at803x_config_intr,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+@@ -253,8 +253,7 @@
+
+ static void __exit atheros_exit(void)
+ {
+- return phy_drivers_unregister(at803x_driver,
+- ARRAY_SIZE(at803x_driver));
++ phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver));
+ }
+
+ module_init(atheros_init);
+diff -Nur linux-3.14.40.orig/drivers/net/phy/phy_device.c linux-3.14.40/drivers/net/phy/phy_device.c
+--- linux-3.14.40.orig/drivers/net/phy/phy_device.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/phy/phy_device.c 2015-05-01 14:58:00.035427001 -0500
+@@ -1029,7 +1029,7 @@
+ return 0;
+ }
+
+-static int genphy_config_init(struct phy_device *phydev)
++int genphy_config_init(struct phy_device *phydev)
+ {
+ int val;
+ u32 features;
+@@ -1075,6 +1075,8 @@
+ return 0;
+ }
+
++EXPORT_SYMBOL(genphy_config_init);
++
+ static int gen10g_config_init(struct phy_device *phydev)
+ {
+ /* Temporarily just say we support everything */
+diff -Nur linux-3.14.40.orig/drivers/net/phy/smsc.c linux-3.14.40/drivers/net/phy/smsc.c
+--- linux-3.14.40.orig/drivers/net/phy/smsc.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/phy/smsc.c 2015-05-01 14:58:00.059427001 -0500
+@@ -249,8 +249,7 @@
+
+ static void __exit smsc_exit(void)
+ {
+- return phy_drivers_unregister(smsc_phy_driver,
+- ARRAY_SIZE(smsc_phy_driver));
++ phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver));
+ }
+
+ MODULE_DESCRIPTION("SMSC PHY driver");
+diff -Nur linux-3.14.40.orig/drivers/net/phy/vitesse.c linux-3.14.40/drivers/net/phy/vitesse.c
+--- linux-3.14.40.orig/drivers/net/phy/vitesse.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/phy/vitesse.c 2015-05-01 14:58:00.079427001 -0500
+@@ -319,8 +319,7 @@
+
+ static void __exit vsc82xx_exit(void)
+ {
+- return phy_drivers_unregister(vsc82xx_driver,
+- ARRAY_SIZE(vsc82xx_driver));
++ phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver));
+ }
+
+ module_init(vsc82xx_init);
+diff -Nur linux-3.14.40.orig/drivers/net/veth.c linux-3.14.40/drivers/net/veth.c
+--- linux-3.14.40.orig/drivers/net/veth.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/veth.c 2015-05-01 14:58:00.091427001 -0500
+@@ -14,6 +14,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/u64_stats_sync.h>
+
++#include <net/rtnetlink.h>
+ #include <net/dst.h>
+ #include <net/xfrm.h>
+ #include <linux/veth.h>
+@@ -336,10 +337,9 @@
+
+ nla_peer = data[VETH_INFO_PEER];
+ ifmp = nla_data(nla_peer);
+- err = nla_parse(peer_tb, IFLA_MAX,
+- nla_data(nla_peer) + sizeof(struct ifinfomsg),
+- nla_len(nla_peer) - sizeof(struct ifinfomsg),
+- ifla_policy);
++ err = rtnl_nla_parse_ifla(peer_tb,
++ nla_data(nla_peer) + sizeof(struct ifinfomsg),
++ nla_len(nla_peer) - sizeof(struct ifinfomsg));
+ if (err < 0)
+ return err;
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/ath/ar5523/ar5523.c linux-3.14.40/drivers/net/wireless/ath/ar5523/ar5523.c
+--- linux-3.14.40.orig/drivers/net/wireless/ath/ar5523/ar5523.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/ath/ar5523/ar5523.c 2015-05-01 14:58:00.107427001 -0500
+@@ -1090,7 +1090,8 @@
+ return ret;
+ }
+
+-static void ar5523_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct ar5523 *ar = hw->priv;
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/ath/ath10k/mac.c linux-3.14.40/drivers/net/wireless/ath/ath10k/mac.c
+--- linux-3.14.40.orig/drivers/net/wireless/ath/ath10k/mac.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/ath/ath10k/mac.c 2015-05-01 14:58:00.131427001 -0500
+@@ -3183,7 +3183,8 @@
+ return ret;
+ }
+
+-static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct ath10k *ar = hw->priv;
+ bool skip;
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/ath/ath6kl/cfg80211.c linux-3.14.40/drivers/net/wireless/ath/ath6kl/cfg80211.c
+--- linux-3.14.40.orig/drivers/net/wireless/ath/ath6kl/cfg80211.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/ath/ath6kl/cfg80211.c 2015-05-01 14:58:00.143427001 -0500
+@@ -790,7 +790,7 @@
+ if (nw_type & ADHOC_NETWORK) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
+ nw_type & ADHOC_CREATOR ? "creator" : "joiner");
+- cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
++ cfg80211_ibss_joined(vif->ndev, bssid, chan, GFP_KERNEL);
+ cfg80211_put_bss(ar->wiphy, bss);
+ return;
+ }
+@@ -861,13 +861,9 @@
+ }
+
+ if (vif->nw_type & ADHOC_NETWORK) {
+- if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
++ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC)
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in ibss mode\n", __func__);
+- return;
+- }
+- memset(bssid, 0, ETH_ALEN);
+- cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
+ return;
+ }
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/ath/ath6kl/sdio.c linux-3.14.40/drivers/net/wireless/ath/ath6kl/sdio.c
+--- linux-3.14.40.orig/drivers/net/wireless/ath/ath6kl/sdio.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/ath/ath6kl/sdio.c 2015-05-01 14:58:03.959427001 -0500
+@@ -222,6 +222,7 @@
+ struct mmc_data *data)
+ {
+ struct scatterlist *sg;
++ struct hif_scatter_item *scat_list;
+ int i;
+
+ data->blksz = HIF_MBOX_BLOCK_SIZE;
+@@ -240,14 +241,14 @@
+ sg = scat_req->sgentries;
+ sg_init_table(sg, scat_req->scat_entries);
+
++ scat_list = &scat_req->scat_list[0];
++
+ /* assemble SG list */
+- for (i = 0; i < scat_req->scat_entries; i++, sg++) {
++ for (i = 0; i < scat_req->scat_entries; i++, sg++, scat_list++) {
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
+- i, scat_req->scat_list[i].buf,
+- scat_req->scat_list[i].len);
++ i, scat_list->buf, scat_list->len);
+
+- sg_set_buf(sg, scat_req->scat_list[i].buf,
+- scat_req->scat_list[i].len);
++ sg_set_buf(sg, scat_list->buf, scat_list->len);
+ }
+
+ /* set scatter-gather table for request */
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/ath/ath9k/main.c linux-3.14.40/drivers/net/wireless/ath/ath9k/main.c
+--- linux-3.14.40.orig/drivers/net/wireless/ath/ath9k/main.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/ath/ath9k/main.c 2015-05-01 14:58:03.971427001 -0500
+@@ -1883,7 +1883,8 @@
+ return !!npend;
+ }
+
+-static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/ath/carl9170/main.c linux-3.14.40/drivers/net/wireless/ath/carl9170/main.c
+--- linux-3.14.40.orig/drivers/net/wireless/ath/carl9170/main.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/ath/carl9170/main.c 2015-05-01 14:58:03.975427001 -0500
+@@ -1707,7 +1707,9 @@
+ return 0;
+ }
+
+-static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void carl9170_op_flush(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct ar9170 *ar = hw->priv;
+ unsigned int vid;
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c 2015-05-01 14:58:03.987427001 -0500
+@@ -43,7 +43,6 @@
+ #include "dhd_bus.h"
+ #include "dhd_dbg.h"
+ #include "sdio_host.h"
+-#include "sdio_chip.h"
+
+ #define SDIOH_API_ACCESS_RETRY_LIMIT 2
+
+@@ -54,6 +53,12 @@
+ /* Maximum milliseconds to wait for F2 to come up */
+ #define SDIO_WAIT_F2RDY 3000
+
++#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
++#define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
++
++static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
++module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
++MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
+
+ static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
+ {
+@@ -264,26 +269,17 @@
+ break;
+ }
+
+- if (ret) {
+- /*
+- * SleepCSR register access can fail when
+- * waking up the device so reduce this noise
+- * in the logs.
+- */
+- if (addr != SBSDIO_FUNC1_SLEEPCSR)
+- brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
+- write ? "write" : "read", fn, addr, ret);
+- else
+- brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+- write ? "write" : "read", fn, addr, ret);
+- }
++ if (ret)
++ brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
++ write ? "write" : "read", fn, addr, ret);
++
+ return ret;
+ }
+
+ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u8 regsz, void *data, bool write)
+ {
+- u8 func_num;
++ u8 func;
+ s32 retry = 0;
+ int ret;
+
+@@ -297,9 +293,9 @@
+ * The rest: function 1 silicon backplane core registers
+ */
+ if ((addr & ~REG_F0_REG_MASK) == 0)
+- func_num = SDIO_FUNC_0;
++ func = SDIO_FUNC_0;
+ else
+- func_num = SDIO_FUNC_1;
++ func = SDIO_FUNC_1;
+
+ do {
+ if (!write)
+@@ -307,16 +303,26 @@
+ /* for retry wait for 1 ms till bus get settled down */
+ if (retry)
+ usleep_range(1000, 2000);
+- ret = brcmf_sdiod_request_data(sdiodev, func_num, addr, regsz,
++ ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
+ data, write);
+ } while (ret != 0 && ret != -ENOMEDIUM &&
+ retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
+
+ if (ret == -ENOMEDIUM)
+ brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
+- else if (ret != 0)
+- brcmf_err("failed with %d\n", ret);
+-
++ else if (ret != 0) {
++ /*
++ * SleepCSR register access can fail when
++ * waking up the device so reduce this noise
++ * in the logs.
++ */
++ if (addr != SBSDIO_FUNC1_SLEEPCSR)
++ brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
++ write ? "write" : "read", func, addr, ret);
++ else
++ brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
++ write ? "write" : "read", func, addr, ret);
++ }
+ return ret;
+ }
+
+@@ -488,7 +494,6 @@
+ struct mmc_request mmc_req;
+ struct mmc_command mmc_cmd;
+ struct mmc_data mmc_dat;
+- struct sg_table st;
+ struct scatterlist *sgl;
+ int ret = 0;
+
+@@ -533,16 +538,11 @@
+ pkt_offset = 0;
+ pkt_next = target_list->next;
+
+- if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
+- ret = -ENOMEM;
+- goto exit;
+- }
+-
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ memset(&mmc_dat, 0, sizeof(struct mmc_data));
+
+- mmc_dat.sg = st.sgl;
++ mmc_dat.sg = sdiodev->sgtable.sgl;
+ mmc_dat.blksz = func_blk_sz;
+ mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+ mmc_cmd.opcode = SD_IO_RW_EXTENDED;
+@@ -558,7 +558,7 @@
+ while (seg_sz) {
+ req_sz = 0;
+ sg_cnt = 0;
+- sgl = st.sgl;
++ sgl = sdiodev->sgtable.sgl;
+ /* prep sg table */
+ while (pkt_next != (struct sk_buff *)target_list) {
+ pkt_data = pkt_next->data + pkt_offset;
+@@ -640,7 +640,7 @@
+ }
+
+ exit:
+- sg_free_table(&st);
++ sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
+ while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
+ brcmu_pkt_buf_free_skb(pkt_next);
+
+@@ -827,7 +827,7 @@
+ }
+ if (!write)
+ memcpy(data, pkt->data, dsize);
+- skb_trim(pkt, dsize);
++ skb_trim(pkt, 0);
+
+ /* Adjust for next transfer (if any) */
+ size -= dsize;
+@@ -864,6 +864,29 @@
+ return 0;
+ }
+
++static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
++{
++ uint nents;
++ int err;
++
++ if (!sdiodev->sg_support)
++ return;
++
++ nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, brcmf_sdiod_txglomsz);
++ nents += (nents >> 4) + 1;
++
++ WARN_ON(nents > sdiodev->max_segment_count);
++
++ brcmf_dbg(TRACE, "nents=%d\n", nents);
++ err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
++ if (err < 0) {
++ brcmf_err("allocation failed: disable scatter-gather");
++ sdiodev->sg_support = false;
++ }
++
++ sdiodev->txglomsz = brcmf_sdiod_txglomsz;
++}
++
+ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
+ {
+ if (sdiodev->bus) {
+@@ -881,6 +904,7 @@
+ sdio_disable_func(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+
++ sg_free_table(&sdiodev->sgtable);
+ sdiodev->sbwad = 0;
+
+ return 0;
+@@ -936,6 +960,11 @@
+ SG_MAX_SINGLE_ALLOC);
+ sdiodev->max_segment_size = host->max_seg_size;
+
++ /* allocate scatter-gather table. sg support
++ * will be disabled upon allocation failure.
++ */
++ brcmf_sdiod_sgtable_alloc(sdiodev);
++
+ /* try to attach to the target device */
+ sdiodev->bus = brcmf_sdio_probe(sdiodev);
+ if (!sdiodev->bus) {
+@@ -960,6 +989,7 @@
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
+ SDIO_DEVICE_ID_BROADCOM_4335_4339)},
++ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4354)},
+ { /* end: all zeroes */ },
+ };
+ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+@@ -1073,9 +1103,7 @@
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ int ret = 0;
+
+- brcmf_dbg(SDIO, "\n");
+-
+- atomic_set(&sdiodev->suspend, true);
++ brcmf_dbg(SDIO, "Enter\n");
+
+ sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
+ if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+@@ -1083,9 +1111,12 @@
+ return -EINVAL;
+ }
+
++ atomic_set(&sdiodev->suspend, true);
++
+ ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
+ if (ret) {
+ brcmf_err("Failed to set pm_flags\n");
++ atomic_set(&sdiodev->suspend, false);
+ return ret;
+ }
+
+@@ -1099,6 +1130,7 @@
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+
++ brcmf_dbg(SDIO, "Enter\n");
+ brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
+ atomic_set(&sdiodev->suspend, false);
+ return 0;
+@@ -1115,14 +1147,15 @@
+ .remove = brcmf_ops_sdio_remove,
+ .name = BRCMFMAC_SDIO_PDATA_NAME,
+ .id_table = brcmf_sdmmc_ids,
+-#ifdef CONFIG_PM_SLEEP
+ .drv = {
++ .owner = THIS_MODULE,
++#ifdef CONFIG_PM_SLEEP
+ .pm = &brcmf_sdio_pm_ops,
+- },
+ #endif /* CONFIG_PM_SLEEP */
++ },
+ };
+
+-static int brcmf_sdio_pd_probe(struct platform_device *pdev)
++static int __init brcmf_sdio_pd_probe(struct platform_device *pdev)
+ {
+ brcmf_dbg(SDIO, "Enter\n");
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/chip.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/chip.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/chip.c 2015-05-01 14:58:03.987427001 -0500
+@@ -0,0 +1,1035 @@
++/*
++ * Copyright (c) 2014 Broadcom Corporation
++ *
++ * Permission to use, copy, modify, and/or distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/list.h>
++#include <linux/ssb/ssb_regs.h>
++#include <linux/bcma/bcma.h>
++#include <linux/bcma/bcma_regs.h>
++
++#include <defs.h>
++#include <soc.h>
++#include <brcm_hw_ids.h>
++#include <brcmu_utils.h>
++#include <chipcommon.h>
++#include "dhd_dbg.h"
++#include "chip.h"
++
++/* SOC Interconnect types (aka chip types) */
++#define SOCI_SB 0
++#define SOCI_AI 1
++
++/* PL-368 DMP definitions */
++#define DMP_DESC_TYPE_MSK 0x0000000F
++#define DMP_DESC_EMPTY 0x00000000
++#define DMP_DESC_VALID 0x00000001
++#define DMP_DESC_COMPONENT 0x00000001
++#define DMP_DESC_MASTER_PORT 0x00000003
++#define DMP_DESC_ADDRESS 0x00000005
++#define DMP_DESC_ADDRSIZE_GT32 0x00000008
++#define DMP_DESC_EOT 0x0000000F
++
++#define DMP_COMP_DESIGNER 0xFFF00000
++#define DMP_COMP_DESIGNER_S 20
++#define DMP_COMP_PARTNUM 0x000FFF00
++#define DMP_COMP_PARTNUM_S 8
++#define DMP_COMP_CLASS 0x000000F0
++#define DMP_COMP_CLASS_S 4
++#define DMP_COMP_REVISION 0xFF000000
++#define DMP_COMP_REVISION_S 24
++#define DMP_COMP_NUM_SWRAP 0x00F80000
++#define DMP_COMP_NUM_SWRAP_S 19
++#define DMP_COMP_NUM_MWRAP 0x0007C000
++#define DMP_COMP_NUM_MWRAP_S 14
++#define DMP_COMP_NUM_SPORT 0x00003E00
++#define DMP_COMP_NUM_SPORT_S 9
++#define DMP_COMP_NUM_MPORT 0x000001F0
++#define DMP_COMP_NUM_MPORT_S 4
++
++#define DMP_MASTER_PORT_UID 0x0000FF00
++#define DMP_MASTER_PORT_UID_S 8
++#define DMP_MASTER_PORT_NUM 0x000000F0
++#define DMP_MASTER_PORT_NUM_S 4
++
++#define DMP_SLAVE_ADDR_BASE 0xFFFFF000
++#define DMP_SLAVE_ADDR_BASE_S 12
++#define DMP_SLAVE_PORT_NUM 0x00000F00
++#define DMP_SLAVE_PORT_NUM_S 8
++#define DMP_SLAVE_TYPE 0x000000C0
++#define DMP_SLAVE_TYPE_S 6
++#define DMP_SLAVE_TYPE_SLAVE 0
++#define DMP_SLAVE_TYPE_BRIDGE 1
++#define DMP_SLAVE_TYPE_SWRAP 2
++#define DMP_SLAVE_TYPE_MWRAP 3
++#define DMP_SLAVE_SIZE_TYPE 0x00000030
++#define DMP_SLAVE_SIZE_TYPE_S 4
++#define DMP_SLAVE_SIZE_4K 0
++#define DMP_SLAVE_SIZE_8K 1
++#define DMP_SLAVE_SIZE_16K 2
++#define DMP_SLAVE_SIZE_DESC 3
++
++/* EROM CompIdentB */
++#define CIB_REV_MASK 0xff000000
++#define CIB_REV_SHIFT 24
++
++/* ARM CR4 core specific control flag bits */
++#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
++
++/* D11 core specific control flag bits */
++#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
++#define D11_BCMA_IOCTL_PHYRESET 0x0008
++
++/* chip core base & ramsize */
++/* bcm4329 */
++/* SDIO device core, ID 0x829 */
++#define BCM4329_CORE_BUS_BASE 0x18011000
++/* internal memory core, ID 0x80e */
++#define BCM4329_CORE_SOCRAM_BASE 0x18003000
++/* ARM Cortex M3 core, ID 0x82a */
++#define BCM4329_CORE_ARM_BASE 0x18002000
++#define BCM4329_RAMSIZE 0x48000
++
++/* bcm43143 */
++/* SDIO device core */
++#define BCM43143_CORE_BUS_BASE 0x18002000
++/* internal memory core */
++#define BCM43143_CORE_SOCRAM_BASE 0x18004000
++/* ARM Cortex M3 core, ID 0x82a */
++#define BCM43143_CORE_ARM_BASE 0x18003000
++#define BCM43143_RAMSIZE 0x70000
++
++#define CORE_SB(base, field) \
++ (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
++#define SBCOREREV(sbidh) \
++ ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
++ ((sbidh) & SSB_IDHIGH_RCLO))
++
++struct sbconfig {
++ u32 PAD[2];
++ u32 sbipsflag; /* initiator port ocp slave flag */
++ u32 PAD[3];
++ u32 sbtpsflag; /* target port ocp slave flag */
++ u32 PAD[11];
++ u32 sbtmerrloga; /* (sonics >= 2.3) */
++ u32 PAD;
++ u32 sbtmerrlog; /* (sonics >= 2.3) */
++ u32 PAD[3];
++ u32 sbadmatch3; /* address match3 */
++ u32 PAD;
++ u32 sbadmatch2; /* address match2 */
++ u32 PAD;
++ u32 sbadmatch1; /* address match1 */
++ u32 PAD[7];
++ u32 sbimstate; /* initiator agent state */
++ u32 sbintvec; /* interrupt mask */
++ u32 sbtmstatelow; /* target state */
++ u32 sbtmstatehigh; /* target state */
++ u32 sbbwa0; /* bandwidth allocation table0 */
++ u32 PAD;
++ u32 sbimconfiglow; /* initiator configuration */
++ u32 sbimconfighigh; /* initiator configuration */
++ u32 sbadmatch0; /* address match0 */
++ u32 PAD;
++ u32 sbtmconfiglow; /* target configuration */
++ u32 sbtmconfighigh; /* target configuration */
++ u32 sbbconfig; /* broadcast configuration */
++ u32 PAD;
++ u32 sbbstate; /* broadcast state */
++ u32 PAD[3];
++ u32 sbactcnfg; /* activate configuration */
++ u32 PAD[3];
++ u32 sbflagst; /* current sbflags */
++ u32 PAD[3];
++ u32 sbidlow; /* identification */
++ u32 sbidhigh; /* identification */
++};
++
++struct brcmf_core_priv {
++ struct brcmf_core pub;
++ u32 wrapbase;
++ struct list_head list;
++ struct brcmf_chip_priv *chip;
++};
++
++/* ARM CR4 core specific control flag bits */
++#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
++
++/* D11 core specific control flag bits */
++#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
++#define D11_BCMA_IOCTL_PHYRESET 0x0008
++
++struct brcmf_chip_priv {
++ struct brcmf_chip pub;
++ const struct brcmf_buscore_ops *ops;
++ void *ctx;
++ /* assured first core is chipcommon, second core is buscore */
++ struct list_head cores;
++ u16 num_cores;
++
++ bool (*iscoreup)(struct brcmf_core_priv *core);
++ void (*coredisable)(struct brcmf_core_priv *core, u32 prereset,
++ u32 reset);
++ void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset,
++ u32 postreset);
++};
++
++static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci,
++ struct brcmf_core *core)
++{
++ u32 regdata;
++
++ regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh));
++ core->rev = SBCOREREV(regdata);
++}
++
++static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core)
++{
++ struct brcmf_chip_priv *ci;
++ u32 regdata;
++ u32 address;
++
++ ci = core->chip;
++ address = CORE_SB(core->pub.base, sbtmstatelow);
++ regdata = ci->ops->read32(ci->ctx, address);
++ regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
++ SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
++ return SSB_TMSLOW_CLOCK == regdata;
++}
++
++static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core)
++{
++ struct brcmf_chip_priv *ci;
++ u32 regdata;
++ bool ret;
++
++ ci = core->chip;
++ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
++ ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
++
++ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
++ ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
++
++ return ret;
++}
++
++static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core,
++ u32 prereset, u32 reset)
++{
++ struct brcmf_chip_priv *ci;
++ u32 val, base;
++
++ ci = core->chip;
++ base = core->pub.base;
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ if (val & SSB_TMSLOW_RESET)
++ return;
++
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ if ((val & SSB_TMSLOW_CLOCK) != 0) {
++ /*
++ * set target reject and spin until busy is clear
++ * (preserve core-specific bits)
++ */
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
++ val | SSB_TMSLOW_REJECT);
++
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ udelay(1);
++ SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh))
++ & SSB_TMSHIGH_BUSY), 100000);
++
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
++ if (val & SSB_TMSHIGH_BUSY)
++ brcmf_err("core state still busy\n");
++
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
++ if (val & SSB_IDLOW_INITIATOR) {
++ val = ci->ops->read32(ci->ctx,
++ CORE_SB(base, sbimstate));
++ val |= SSB_IMSTATE_REJECT;
++ ci->ops->write32(ci->ctx,
++ CORE_SB(base, sbimstate), val);
++ val = ci->ops->read32(ci->ctx,
++ CORE_SB(base, sbimstate));
++ udelay(1);
++ SPINWAIT((ci->ops->read32(ci->ctx,
++ CORE_SB(base, sbimstate)) &
++ SSB_IMSTATE_BUSY), 100000);
++ }
++
++ /* set reset and reject while enabling the clocks */
++ val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
++ SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val);
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ udelay(10);
++
++ /* clear the initiator reject bit */
++ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
++ if (val & SSB_IDLOW_INITIATOR) {
++ val = ci->ops->read32(ci->ctx,
++ CORE_SB(base, sbimstate));
++ val &= ~SSB_IMSTATE_REJECT;
++ ci->ops->write32(ci->ctx,
++ CORE_SB(base, sbimstate), val);
++ }
++ }
++
++ /* leave reset and reject asserted */
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
++ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
++ udelay(1);
++}
++
++static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
++ u32 prereset, u32 reset)
++{
++ struct brcmf_chip_priv *ci;
++ u32 regdata;
++
++ ci = core->chip;
++
++ /* if core is already in reset, skip reset */
++ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
++ if ((regdata & BCMA_RESET_CTL_RESET) != 0)
++ goto in_reset_configure;
++
++ /* configure reset */
++ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
++ prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
++ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
++
++ /* put in reset */
++ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL,
++ BCMA_RESET_CTL_RESET);
++ usleep_range(10, 20);
++
++ /* wait till reset is 1 */
++ SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
++ BCMA_RESET_CTL_RESET, 300);
++
++in_reset_configure:
++ /* in-reset configure */
++ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
++ reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
++ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
++}
++
++static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset,
++ u32 reset, u32 postreset)
++{
++ struct brcmf_chip_priv *ci;
++ u32 regdata;
++ u32 base;
++
++ ci = core->chip;
++ base = core->pub.base;
++ /*
++ * Must do the disable sequence first to work for
++ * arbitrary current core state.
++ */
++ brcmf_chip_sb_coredisable(core, 0, 0);
++
++ /*
++ * Now do the initialization sequence.
++ * set reset while enabling the clock and
++ * forcing them on throughout the core
++ */
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
++ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
++ SSB_TMSLOW_RESET);
++ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ udelay(1);
++
++ /* clear any serror */
++ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
++ if (regdata & SSB_TMSHIGH_SERR)
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0);
++
++ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate));
++ if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) {
++ regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO);
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata);
++ }
++
++ /* clear reset and allow it to propagate throughout the core */
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
++ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
++ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ udelay(1);
++
++ /* leave clock enabled */
++ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
++ SSB_TMSLOW_CLOCK);
++ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
++ udelay(1);
++}
++
++static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
++ u32 reset, u32 postreset)
++{
++ struct brcmf_chip_priv *ci;
++ int count;
++
++ ci = core->chip;
++
++ /* must disable first to work for arbitrary current core state */
++ brcmf_chip_ai_coredisable(core, prereset, reset);
++
++ count = 0;
++ while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
++ BCMA_RESET_CTL_RESET) {
++ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0);
++ count++;
++ if (count > 50)
++ break;
++ usleep_range(40, 60);
++ }
++
++ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
++ postreset | BCMA_IOCTL_CLK);
++ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
++}
++
++static char *brcmf_chip_name(uint chipid, char *buf, uint len)
++{
++ const char *fmt;
++
++ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
++ snprintf(buf, len, fmt, chipid);
++ return buf;
++}
++
++static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
++ u16 coreid, u32 base,
++ u32 wrapbase)
++{
++ struct brcmf_core_priv *core;
++
++ core = kzalloc(sizeof(*core), GFP_KERNEL);
++ if (!core)
++ return ERR_PTR(-ENOMEM);
++
++ core->pub.id = coreid;
++ core->pub.base = base;
++ core->chip = ci;
++ core->wrapbase = wrapbase;
++
++ list_add_tail(&core->list, &ci->cores);
++ return &core->pub;
++}
++
++#ifdef DEBUG
++/* safety check for chipinfo */
++static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
++{
++ struct brcmf_core_priv *core;
++ bool need_socram = false;
++ bool has_socram = false;
++ int idx = 1;
++
++ list_for_each_entry(core, &ci->cores, list) {
++ brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n",
++ idx++, core->pub.id, core->pub.rev, core->pub.base,
++ core->wrapbase);
++
++ switch (core->pub.id) {
++ case BCMA_CORE_ARM_CM3:
++ need_socram = true;
++ break;
++ case BCMA_CORE_INTERNAL_MEM:
++ has_socram = true;
++ break;
++ case BCMA_CORE_ARM_CR4:
++ if (ci->pub.rambase == 0) {
++ brcmf_err("RAM base not provided with ARM CR4 core\n");
++ return -ENOMEM;
++ }
++ break;
++ default:
++ break;
++ }
++ }
++
++ /* check RAM core presence for ARM CM3 core */
++ if (need_socram && !has_socram) {
++ brcmf_err("RAM core not provided with ARM CM3 core\n");
++ return -ENODEV;
++ }
++ return 0;
++}
++#else /* DEBUG */
++static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
++{
++ return 0;
++}
++#endif
++
++static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
++{
++ switch (ci->pub.chip) {
++ case BCM4329_CHIP_ID:
++ ci->pub.ramsize = BCM4329_RAMSIZE;
++ break;
++ case BCM43143_CHIP_ID:
++ ci->pub.ramsize = BCM43143_RAMSIZE;
++ break;
++ case BCM43241_CHIP_ID:
++ ci->pub.ramsize = 0x90000;
++ break;
++ case BCM4330_CHIP_ID:
++ ci->pub.ramsize = 0x48000;
++ break;
++ case BCM4334_CHIP_ID:
++ ci->pub.ramsize = 0x80000;
++ break;
++ case BCM4335_CHIP_ID:
++ ci->pub.ramsize = 0xc0000;
++ ci->pub.rambase = 0x180000;
++ break;
++ case BCM43362_CHIP_ID:
++ ci->pub.ramsize = 0x3c000;
++ break;
++ case BCM4339_CHIP_ID:
++ case BCM4354_CHIP_ID:
++ ci->pub.ramsize = 0xc0000;
++ ci->pub.rambase = 0x180000;
++ break;
++ default:
++ brcmf_err("unknown chip: %s\n", ci->pub.name);
++ break;
++ }
++}
++
++static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
++ u8 *type)
++{
++ u32 val;
++
++ /* read next descriptor */
++ val = ci->ops->read32(ci->ctx, *eromaddr);
++ *eromaddr += 4;
++
++ if (!type)
++ return val;
++
++ /* determine descriptor type */
++ *type = (val & DMP_DESC_TYPE_MSK);
++ if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS)
++ *type = DMP_DESC_ADDRESS;
++
++ return val;
++}
++
++static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
++ u32 *regbase, u32 *wrapbase)
++{
++ u8 desc;
++ u32 val;
++ u8 mpnum = 0;
++ u8 stype, sztype, wraptype;
++
++ *regbase = 0;
++ *wrapbase = 0;
++
++ val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
++ if (desc == DMP_DESC_MASTER_PORT) {
++ mpnum = (val & DMP_MASTER_PORT_NUM) >> DMP_MASTER_PORT_NUM_S;
++ wraptype = DMP_SLAVE_TYPE_MWRAP;
++ } else if (desc == DMP_DESC_ADDRESS) {
++ /* revert erom address */
++ *eromaddr -= 4;
++ wraptype = DMP_SLAVE_TYPE_SWRAP;
++ } else {
++ *eromaddr -= 4;
++ return -EILSEQ;
++ }
++
++ do {
++ /* locate address descriptor */
++ do {
++ val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
++ /* unexpected table end */
++ if (desc == DMP_DESC_EOT) {
++ *eromaddr -= 4;
++ return -EFAULT;
++ }
++ } while (desc != DMP_DESC_ADDRESS);
++
++ /* skip upper 32-bit address descriptor */
++ if (val & DMP_DESC_ADDRSIZE_GT32)
++ brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
++
++ sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S;
++
++ /* next size descriptor can be skipped */
++ if (sztype == DMP_SLAVE_SIZE_DESC) {
++ val = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
++ /* skip upper size descriptor if present */
++ if (val & DMP_DESC_ADDRSIZE_GT32)
++ brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
++ }
++
++ /* only look for 4K register regions */
++ if (sztype != DMP_SLAVE_SIZE_4K)
++ continue;
++
++ stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S;
++
++ /* only regular slave and wrapper */
++ if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE)
++ *regbase = val & DMP_SLAVE_ADDR_BASE;
++ if (*wrapbase == 0 && stype == wraptype)
++ *wrapbase = val & DMP_SLAVE_ADDR_BASE;
++ } while (*regbase == 0 || *wrapbase == 0);
++
++ return 0;
++}
++
++static
++int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
++{
++ struct brcmf_core *core;
++ u32 eromaddr;
++ u8 desc_type = 0;
++ u32 val;
++ u16 id;
++ u8 nmp, nsp, nmw, nsw, rev;
++ u32 base, wrap;
++ int err;
++
++ eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr));
++
++ while (desc_type != DMP_DESC_EOT) {
++ val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
++ if (!(val & DMP_DESC_VALID))
++ continue;
++
++ if (desc_type == DMP_DESC_EMPTY)
++ continue;
++
++ /* need a component descriptor */
++ if (desc_type != DMP_DESC_COMPONENT)
++ continue;
++
++ id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S;
++
++ /* next descriptor must be component as well */
++ val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
++ if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT))
++ return -EFAULT;
++
++ /* only look at cores with master port(s) */
++ nmp = (val & DMP_COMP_NUM_MPORT) >> DMP_COMP_NUM_MPORT_S;
++ nsp = (val & DMP_COMP_NUM_SPORT) >> DMP_COMP_NUM_SPORT_S;
++ nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
++ nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
++ rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
++
++ /* need core with ports */
++ if (nmw + nsw == 0)
++ continue;
++
++ /* try to obtain register address info */
++ err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap);
++ if (err)
++ continue;
++
++ /* finally a core to be added */
++ core = brcmf_chip_add_core(ci, id, base, wrap);
++ if (IS_ERR(core))
++ return PTR_ERR(core);
++
++ core->rev = rev;
++ }
++
++ return 0;
++}
++
++static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
++{
++ struct brcmf_core *core;
++ u32 regdata;
++ u32 socitype;
++
++ /* Get CC core rev
++ * Chipid is assume to be at offset 0 from SI_ENUM_BASE
++ * For different chiptypes or old sdio hosts w/o chipcommon,
++ * other ways of recognition should be added here.
++ */
++ regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid));
++ ci->pub.chip = regdata & CID_ID_MASK;
++ ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
++ socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
++
++ brcmf_chip_name(ci->pub.chip, ci->pub.name, sizeof(ci->pub.name));
++ brcmf_dbg(INFO, "found %s chip: BCM%s, rev=%d\n",
++ socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name,
++ ci->pub.chiprev);
++
++ if (socitype == SOCI_SB) {
++ if (ci->pub.chip != BCM4329_CHIP_ID) {
++ brcmf_err("SB chip is not supported\n");
++ return -ENODEV;
++ }
++ ci->iscoreup = brcmf_chip_sb_iscoreup;
++ ci->coredisable = brcmf_chip_sb_coredisable;
++ ci->resetcore = brcmf_chip_sb_resetcore;
++
++ core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON,
++ SI_ENUM_BASE, 0);
++ brcmf_chip_sb_corerev(ci, core);
++ core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV,
++ BCM4329_CORE_BUS_BASE, 0);
++ brcmf_chip_sb_corerev(ci, core);
++ core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM,
++ BCM4329_CORE_SOCRAM_BASE, 0);
++ brcmf_chip_sb_corerev(ci, core);
++ core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3,
++ BCM4329_CORE_ARM_BASE, 0);
++ brcmf_chip_sb_corerev(ci, core);
++
++ core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0);
++ brcmf_chip_sb_corerev(ci, core);
++ } else if (socitype == SOCI_AI) {
++ ci->iscoreup = brcmf_chip_ai_iscoreup;
++ ci->coredisable = brcmf_chip_ai_coredisable;
++ ci->resetcore = brcmf_chip_ai_resetcore;
++
++ brcmf_chip_dmp_erom_scan(ci);
++ } else {
++ brcmf_err("chip backplane type %u is not supported\n",
++ socitype);
++ return -ENODEV;
++ }
++
++ brcmf_chip_get_raminfo(ci);
++
++ return brcmf_chip_cores_check(ci);
++}
++
++static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
++{
++ struct brcmf_core *core;
++ struct brcmf_core_priv *cr4;
++ u32 val;
++
++
++ core = brcmf_chip_get_core(&chip->pub, id);
++ if (!core)
++ return;
++
++ switch (id) {
++ case BCMA_CORE_ARM_CM3:
++ brcmf_chip_coredisable(core, 0, 0);
++ break;
++ case BCMA_CORE_ARM_CR4:
++ cr4 = container_of(core, struct brcmf_core_priv, pub);
++
++ /* clear all IOCTL bits except HALT bit */
++ val = chip->ops->read32(chip->ctx, cr4->wrapbase + BCMA_IOCTL);
++ val &= ARMCR4_BCMA_IOCTL_CPUHALT;
++ brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT,
++ ARMCR4_BCMA_IOCTL_CPUHALT);
++ break;
++ default:
++ brcmf_err("unknown id: %u\n", id);
++ break;
++ }
++}
++
++static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
++{
++ struct brcmf_chip *pub;
++ struct brcmf_core_priv *cc;
++ u32 base;
++ u32 val;
++ int ret = 0;
++
++ pub = &chip->pub;
++ cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
++ base = cc->pub.base;
++
++ /* get chipcommon capabilites */
++ pub->cc_caps = chip->ops->read32(chip->ctx,
++ CORE_CC_REG(base, capabilities));
++
++ /* get pmu caps & rev */
++ if (pub->cc_caps & CC_CAP_PMU) {
++ val = chip->ops->read32(chip->ctx,
++ CORE_CC_REG(base, pmucapabilities));
++ pub->pmurev = val & PCAP_REV_MASK;
++ pub->pmucaps = val;
++ }
++
++ brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n",
++ cc->pub.rev, pub->pmurev, pub->pmucaps);
++
++ /* execute bus core specific setup */
++ if (chip->ops->setup)
++ ret = chip->ops->setup(chip->ctx, pub);
++
++ /*
++ * Make sure any on-chip ARM is off (in case strapping is wrong),
++ * or downloaded code was already running.
++ */
++ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
++ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
++ return ret;
++}
++
++struct brcmf_chip *brcmf_chip_attach(void *ctx,
++ const struct brcmf_buscore_ops *ops)
++{
++ struct brcmf_chip_priv *chip;
++ int err = 0;
++
++ if (WARN_ON(!ops->read32))
++ err = -EINVAL;
++ if (WARN_ON(!ops->write32))
++ err = -EINVAL;
++ if (WARN_ON(!ops->prepare))
++ err = -EINVAL;
++ if (WARN_ON(!ops->exit_dl))
++ err = -EINVAL;
++ if (err < 0)
++ return ERR_PTR(-EINVAL);
++
++ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
++ if (!chip)
++ return ERR_PTR(-ENOMEM);
++
++ INIT_LIST_HEAD(&chip->cores);
++ chip->num_cores = 0;
++ chip->ops = ops;
++ chip->ctx = ctx;
++
++ err = ops->prepare(ctx);
++ if (err < 0)
++ goto fail;
++
++ err = brcmf_chip_recognition(chip);
++ if (err < 0)
++ goto fail;
++
++ err = brcmf_chip_setup(chip);
++ if (err < 0)
++ goto fail;
++
++ return &chip->pub;
++
++fail:
++ brcmf_chip_detach(&chip->pub);
++ return ERR_PTR(err);
++}
++
++void brcmf_chip_detach(struct brcmf_chip *pub)
++{
++ struct brcmf_chip_priv *chip;
++ struct brcmf_core_priv *core;
++ struct brcmf_core_priv *tmp;
++
++ chip = container_of(pub, struct brcmf_chip_priv, pub);
++ list_for_each_entry_safe(core, tmp, &chip->cores, list) {
++ list_del(&core->list);
++ kfree(core);
++ }
++ kfree(chip);
++}
++
++struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
++{
++ struct brcmf_chip_priv *chip;
++ struct brcmf_core_priv *core;
++
++ chip = container_of(pub, struct brcmf_chip_priv, pub);
++ list_for_each_entry(core, &chip->cores, list)
++ if (core->pub.id == coreid)
++ return &core->pub;
++
++ return NULL;
++}
++
++struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
++{
++ struct brcmf_chip_priv *chip;
++ struct brcmf_core_priv *cc;
++
++ chip = container_of(pub, struct brcmf_chip_priv, pub);
++ cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
++ if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON))
++ return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON);
++ return &cc->pub;
++}
++
++bool brcmf_chip_iscoreup(struct brcmf_core *pub)
++{
++ struct brcmf_core_priv *core;
++
++ core = container_of(pub, struct brcmf_core_priv, pub);
++ return core->chip->iscoreup(core);
++}
++
++void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset)
++{
++ struct brcmf_core_priv *core;
++
++ core = container_of(pub, struct brcmf_core_priv, pub);
++ core->chip->coredisable(core, prereset, reset);
++}
++
++void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
++ u32 postreset)
++{
++ struct brcmf_core_priv *core;
++
++ core = container_of(pub, struct brcmf_core_priv, pub);
++ core->chip->resetcore(core, prereset, reset, postreset);
++}
++
++static void
++brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
++{
++ struct brcmf_core *core;
++
++ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
++ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
++ brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
++ D11_BCMA_IOCTL_PHYCLOCKEN,
++ D11_BCMA_IOCTL_PHYCLOCKEN,
++ D11_BCMA_IOCTL_PHYCLOCKEN);
++ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
++ brcmf_chip_resetcore(core, 0, 0, 0);
++}
++
++static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
++{
++ struct brcmf_core *core;
++
++ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
++ if (!brcmf_chip_iscoreup(core)) {
++ brcmf_err("SOCRAM core is down after reset?\n");
++ return false;
++ }
++
++ chip->ops->exit_dl(chip->ctx, &chip->pub, 0);
++
++ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
++ brcmf_chip_resetcore(core, 0, 0, 0);
++
++ return true;
++}
++
++static inline void
++brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
++{
++ struct brcmf_core *core;
++
++ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
++
++ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
++ brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
++ D11_BCMA_IOCTL_PHYCLOCKEN,
++ D11_BCMA_IOCTL_PHYCLOCKEN,
++ D11_BCMA_IOCTL_PHYCLOCKEN);
++}
++
++static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
++{
++ struct brcmf_core *core;
++
++ chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec);
++
++ /* restore ARM */
++ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
++ brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
++
++ return true;
++}
++
++void brcmf_chip_enter_download(struct brcmf_chip *pub)
++{
++ struct brcmf_chip_priv *chip;
++ struct brcmf_core *arm;
++
++ brcmf_dbg(TRACE, "Enter\n");
++
++ chip = container_of(pub, struct brcmf_chip_priv, pub);
++ arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
++ if (arm) {
++ brcmf_chip_cr4_enterdl(chip);
++ return;
++ }
++
++ brcmf_chip_cm3_enterdl(chip);
++}
++
++bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
++{
++ struct brcmf_chip_priv *chip;
++ struct brcmf_core *arm;
++
++ brcmf_dbg(TRACE, "Enter\n");
++
++ chip = container_of(pub, struct brcmf_chip_priv, pub);
++ arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
++ if (arm)
++ return brcmf_chip_cr4_exitdl(chip, rstvec);
++
++ return brcmf_chip_cm3_exitdl(chip);
++}
++
++bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
++{
++ u32 base, addr, reg, pmu_cc3_mask = ~0;
++ struct brcmf_chip_priv *chip;
++
++ brcmf_dbg(TRACE, "Enter\n");
++
++ /* old chips with PMU version less than 17 don't support save restore */
++ if (pub->pmurev < 17)
++ return false;
++
++ base = brcmf_chip_get_chipcommon(pub)->base;
++ chip = container_of(pub, struct brcmf_chip_priv, pub);
++
++ switch (pub->chip) {
++ case BCM4354_CHIP_ID:
++ /* explicitly check SR engine enable bit */
++ pmu_cc3_mask = BIT(2);
++ /* fall-through */
++ case BCM43241_CHIP_ID:
++ case BCM4335_CHIP_ID:
++ case BCM4339_CHIP_ID:
++ /* read PMU chipcontrol register 3 */
++ addr = CORE_CC_REG(base, chipcontrol_addr);
++ chip->ops->write32(chip->ctx, addr, 3);
++ addr = CORE_CC_REG(base, chipcontrol_data);
++ reg = chip->ops->read32(chip->ctx, addr);
++ return (reg & pmu_cc3_mask) != 0;
++ default:
++ addr = CORE_CC_REG(base, pmucapabilities_ext);
++ reg = chip->ops->read32(chip->ctx, addr);
++ if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
++ return false;
++
++ addr = CORE_CC_REG(base, retention_ctl);
++ reg = chip->ops->read32(chip->ctx, addr);
++ return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
++ PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
++ }
++}
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/chip.h linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/chip.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/chip.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/chip.h 2015-05-01 14:58:03.987427001 -0500
+@@ -0,0 +1,91 @@
++/*
++ * Copyright (c) 2014 Broadcom Corporation
++ *
++ * Permission to use, copy, modify, and/or distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++#ifndef BRCMF_CHIP_H
++#define BRCMF_CHIP_H
++
++#include <linux/types.h>
++
++#define CORE_CC_REG(base, field) \
++ (base + offsetof(struct chipcregs, field))
++
++/**
++ * struct brcmf_chip - chip level information.
++ *
++ * @chip: chip identifier.
++ * @chiprev: chip revision.
++ * @cc_caps: chipcommon core capabilities.
++ * @pmucaps: PMU capabilities.
++ * @pmurev: PMU revision.
++ * @rambase: RAM base address (only applicable for ARM CR4 chips).
++ * @ramsize: amount of RAM on chip.
++ * @name: string representation of the chip identifier.
++ */
++struct brcmf_chip {
++ u32 chip;
++ u32 chiprev;
++ u32 cc_caps;
++ u32 pmucaps;
++ u32 pmurev;
++ u32 rambase;
++ u32 ramsize;
++ char name[8];
++};
++
++/**
++ * struct brcmf_core - core related information.
++ *
++ * @id: core identifier.
++ * @rev: core revision.
++ * @base: base address of core register space.
++ */
++struct brcmf_core {
++ u16 id;
++ u16 rev;
++ u32 base;
++};
++
++/**
++ * struct brcmf_buscore_ops - buscore specific callbacks.
++ *
++ * @read32: read 32-bit value over bus.
++ * @write32: write 32-bit value over bus.
++ * @prepare: prepare bus for core configuration.
++ * @setup: bus-specific core setup.
++ * @exit_dl: exit download state.
++ * The callback should use the provided @rstvec when non-zero.
++ */
++struct brcmf_buscore_ops {
++ u32 (*read32)(void *ctx, u32 addr);
++ void (*write32)(void *ctx, u32 addr, u32 value);
++ int (*prepare)(void *ctx);
++ int (*setup)(void *ctx, struct brcmf_chip *chip);
++ void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
++};
++
++struct brcmf_chip *brcmf_chip_attach(void *ctx,
++ const struct brcmf_buscore_ops *ops);
++void brcmf_chip_detach(struct brcmf_chip *chip);
++struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
++struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
++bool brcmf_chip_iscoreup(struct brcmf_core *core);
++void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
++void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
++ u32 postreset);
++void brcmf_chip_enter_download(struct brcmf_chip *ci);
++bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
++bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
++
++#endif /* BRCMF_AXIDMP_H */
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h 2015-05-01 14:58:03.987427001 -0500
+@@ -63,7 +63,6 @@
+ */
+ struct brcmf_bus_ops {
+ int (*preinit)(struct device *dev);
+- int (*init)(struct device *dev);
+ void (*stop)(struct device *dev);
+ int (*txdata)(struct device *dev, struct sk_buff *skb);
+ int (*txctl)(struct device *dev, unsigned char *msg, uint len);
+@@ -99,6 +98,7 @@
+ unsigned long tx_realloc;
+ u32 chip;
+ u32 chiprev;
++ bool always_use_fws_queue;
+
+ struct brcmf_bus_ops *ops;
+ };
+@@ -113,11 +113,6 @@
+ return bus->ops->preinit(bus->dev);
+ }
+
+-static inline int brcmf_bus_init(struct brcmf_bus *bus)
+-{
+- return bus->ops->init(bus->dev);
+-}
+-
+ static inline void brcmf_bus_stop(struct brcmf_bus *bus)
+ {
+ bus->ops->stop(bus->dev);
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c 2015-05-01 14:58:04.015427001 -0500
+@@ -32,6 +32,9 @@
+ #define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
+ #define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
+
++/* boost value for RSSI_DELTA in preferred join selection */
++#define BRCMF_JOIN_PREF_RSSI_BOOST 8
++
+
+ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
+ struct sk_buff *pkt, int prec)
+@@ -246,6 +249,7 @@
+ {
+ s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ u8 buf[BRCMF_DCMD_SMLEN];
++ struct brcmf_join_pref_params join_pref_params[2];
+ char *ptr;
+ s32 err;
+
+@@ -298,6 +302,20 @@
+ goto done;
+ }
+
++ /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
++ join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
++ join_pref_params[0].len = 2;
++ join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
++ join_pref_params[0].band = WLC_BAND_5G;
++ join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
++ join_pref_params[1].len = 2;
++ join_pref_params[1].rssi_gain = 0;
++ join_pref_params[1].band = 0;
++ err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
++ sizeof(join_pref_params));
++ if (err)
++ brcmf_err("Set join_pref error (%d)\n", err);
++
+ /* Setup event_msgs, enable E_IF */
+ err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
+ BRCMF_EVENTING_MASK_LEN);
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/dhd.h linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/dhd.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/dhd.h 2015-05-01 14:58:04.015427001 -0500
+@@ -186,7 +186,7 @@
+ void brcmf_txflowblock_if(struct brcmf_if *ifp,
+ enum brcmf_netif_stop_reason reason, bool state);
+ u32 brcmf_get_chip_info(struct brcmf_if *ifp);
+-void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
++void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
+ bool success);
+
+ /* Sets dongle media info (drv_version, mac address). */
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c 2015-05-01 14:58:04.015427001 -0500
+@@ -190,7 +190,7 @@
+ int ret;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
+- struct ethhdr *eh;
++ struct ethhdr *eh = (struct ethhdr *)(skb->data);
+
+ brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
+
+@@ -236,6 +236,9 @@
+ goto done;
+ }
+
++ if (eh->h_proto == htons(ETH_P_PAE))
++ atomic_inc(&ifp->pend_8021x_cnt);
++
+ ret = brcmf_fws_process_skb(ifp, skb);
+
+ done:
+@@ -511,7 +514,7 @@
+
+ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
+ {
+- struct brcmf_if *ifp;
++ struct brcmf_if *ifp = NULL;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_skb_reorder_data *rd;
+@@ -522,7 +525,7 @@
+
+ /* process and remove protocol-specific header */
+ ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
+- ifp = drvr->iflist[ifidx];
++ if (!ret) ifp = drvr->iflist[ifidx];
+
+ if (ret || !ifp || !ifp->ndev) {
+ if ((ret != -ENODATA) && ifp)
+@@ -538,31 +541,26 @@
+ brcmf_netif_rx(ifp, skb);
+ }
+
+-void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
++void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
+ bool success)
+ {
+ struct brcmf_if *ifp;
+ struct ethhdr *eh;
+- u8 ifidx;
+ u16 type;
+- int res;
+-
+- res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
+
+ ifp = drvr->iflist[ifidx];
+ if (!ifp)
+ goto done;
+
+- if (res == 0) {
+- eh = (struct ethhdr *)(txp->data);
+- type = ntohs(eh->h_proto);
+-
+- if (type == ETH_P_PAE) {
+- atomic_dec(&ifp->pend_8021x_cnt);
+- if (waitqueue_active(&ifp->pend_8021x_wait))
+- wake_up(&ifp->pend_8021x_wait);
+- }
++ eh = (struct ethhdr *)(txp->data);
++ type = ntohs(eh->h_proto);
++
++ if (type == ETH_P_PAE) {
++ atomic_dec(&ifp->pend_8021x_cnt);
++ if (waitqueue_active(&ifp->pend_8021x_wait))
++ wake_up(&ifp->pend_8021x_wait);
+ }
++
+ if (!success)
+ ifp->stats.tx_errors++;
+ done:
+@@ -573,13 +571,17 @@
+ {
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
++ u8 ifidx;
+
+ /* await txstatus signal for firmware if active */
+ if (brcmf_fws_fc_active(drvr->fws)) {
+ if (!success)
+ brcmf_fws_bustxfail(drvr->fws, txp);
+ } else {
+- brcmf_txfinalize(drvr, txp, success);
++ if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
++ brcmu_pkt_buf_free_skb(txp);
++ else
++ brcmf_txfinalize(drvr, txp, ifidx, success);
+ }
+ }
+
+@@ -914,13 +916,6 @@
+
+ brcmf_dbg(TRACE, "\n");
+
+- /* Bring up the bus */
+- ret = brcmf_bus_init(bus_if);
+- if (ret != 0) {
+- brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
+- return ret;
+- }
+-
+ /* add primary networking interface */
+ ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
+ if (IS_ERR(ifp))
+@@ -1040,12 +1035,12 @@
+
+ brcmf_cfg80211_detach(drvr->config);
+
++ brcmf_fws_deinit(drvr);
++
+ brcmf_bus_detach(drvr);
+
+ brcmf_proto_detach(drvr);
+
+- brcmf_fws_deinit(drvr);
+-
+ brcmf_debugfs_detach(drvr);
+ bus_if->drvr = NULL;
+ kfree(drvr);
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c 2015-05-01 14:58:04.019427001 -0500
+@@ -23,6 +23,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/sched.h>
+ #include <linux/mmc/sdio.h>
++#include <linux/mmc/sdio_ids.h>
+ #include <linux/mmc/sdio_func.h>
+ #include <linux/mmc/card.h>
+ #include <linux/semaphore.h>
+@@ -40,8 +41,8 @@
+ #include <brcm_hw_ids.h>
+ #include <soc.h>
+ #include "sdio_host.h"
+-#include "sdio_chip.h"
+-#include "nvram.h"
++#include "chip.h"
++#include "firmware.h"
+
+ #define DCMD_RESP_TIMEOUT 2000 /* In milli second */
+
+@@ -112,8 +113,6 @@
+ #define BRCMF_TXBOUND 20 /* Default for max tx frames in
+ one scheduling */
+
+-#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
+-
+ #define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
+
+ #define MEMBLOCK 2048 /* Block size used for downloading
+@@ -156,6 +155,34 @@
+ /* manfid tuple length, include tuple, link bytes */
+ #define SBSDIO_CIS_MANFID_TUPLE_LEN 6
+
++#define CORE_BUS_REG(base, field) \
++ (base + offsetof(struct sdpcmd_regs, field))
++
++/* SDIO function 1 register CHIPCLKCSR */
++/* Force ALP request to backplane */
++#define SBSDIO_FORCE_ALP 0x01
++/* Force HT request to backplane */
++#define SBSDIO_FORCE_HT 0x02
++/* Force ILP request to backplane */
++#define SBSDIO_FORCE_ILP 0x04
++/* Make ALP ready (power up xtal) */
++#define SBSDIO_ALP_AVAIL_REQ 0x08
++/* Make HT ready (power up PLL) */
++#define SBSDIO_HT_AVAIL_REQ 0x10
++/* Squelch clock requests from HW */
++#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
++/* Status: ALP is ready */
++#define SBSDIO_ALP_AVAIL 0x40
++/* Status: HT is ready */
++#define SBSDIO_HT_AVAIL 0x80
++#define SBSDIO_CSR_MASK 0x1F
++#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
++#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
++#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
++#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
++#define SBSDIO_CLKAV(regval, alponly) \
++ (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
++
+ /* intstatus */
+ #define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
+ #define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
+@@ -276,7 +303,6 @@
+ /* Flags for SDH calls */
+ #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+-#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
+ #define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
+ * when idle
+ */
+@@ -433,10 +459,11 @@
+ bool alp_only; /* Don't use HT clock (ALP only) */
+
+ u8 *ctrl_frame_buf;
+- u32 ctrl_frame_len;
++ u16 ctrl_frame_len;
+ bool ctrl_frame_stat;
+
+- spinlock_t txqlock;
++ spinlock_t txq_lock; /* protect bus->txq */
++ struct semaphore tx_seq_lock; /* protect bus->tx_seq */
+ wait_queue_head_t ctrl_wait;
+ wait_queue_head_t dcmd_resp_wait;
+
+@@ -483,16 +510,58 @@
+
+ #define ALIGNMENT 4
+
+-static int brcmf_sdio_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
+-module_param_named(txglomsz, brcmf_sdio_txglomsz, int, 0);
+-MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
+-
+ enum brcmf_sdio_frmtype {
+ BRCMF_SDIO_FT_NORMAL,
+ BRCMF_SDIO_FT_SUPER,
+ BRCMF_SDIO_FT_SUB,
+ };
+
++#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
++
++/* SDIO Pad drive strength to select value mappings */
++struct sdiod_drive_str {
++ u8 strength; /* Pad Drive Strength in mA */
++ u8 sel; /* Chip-specific select value */
++};
++
++/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
++static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
++ {32, 0x6},
++ {26, 0x7},
++ {22, 0x4},
++ {16, 0x5},
++ {12, 0x2},
++ {8, 0x3},
++ {4, 0x0},
++ {0, 0x1}
++};
++
++/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
++static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
++ {6, 0x7},
++ {5, 0x6},
++ {4, 0x5},
++ {3, 0x4},
++ {2, 0x2},
++ {1, 0x1},
++ {0, 0x0}
++};
++
++/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
++static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
++ {3, 0x3},
++ {2, 0x2},
++ {1, 0x1},
++ {0, 0x0} };
++
++/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
++static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
++ {16, 0x7},
++ {12, 0x5},
++ {8, 0x3},
++ {4, 0x1}
++};
++
+ #define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
+ #define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
+ #define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
+@@ -511,6 +580,8 @@
+ #define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt"
+ #define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
+ #define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
++#define BCM4354_FIRMWARE_NAME "brcm/brcmfmac4354-sdio.bin"
++#define BCM4354_NVRAM_NAME "brcm/brcmfmac4354-sdio.txt"
+
+ MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
+ MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
+@@ -530,6 +601,8 @@
+ MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
+ MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
+ MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
++MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
++MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
+
+ struct brcmf_firmware_names {
+ u32 chipid;
+@@ -555,46 +628,32 @@
+ { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
+ { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
+ { BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
+- { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }
++ { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
++ { BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
+ };
+
+-
+-static const struct firmware *brcmf_sdio_get_fw(struct brcmf_sdio *bus,
+- enum brcmf_firmware_type type)
++static const char *brcmf_sdio_get_fwname(struct brcmf_chip *ci,
++ enum brcmf_firmware_type type)
+ {
+- const struct firmware *fw;
+- const char *name;
+- int err, i;
++ int i;
+
+ for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
+- if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
+- brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
++ if (brcmf_fwname_data[i].chipid == ci->chip &&
++ brcmf_fwname_data[i].revmsk & BIT(ci->chiprev)) {
+ switch (type) {
+ case BRCMF_FIRMWARE_BIN:
+- name = brcmf_fwname_data[i].bin;
+- break;
++ return brcmf_fwname_data[i].bin;
+ case BRCMF_FIRMWARE_NVRAM:
+- name = brcmf_fwname_data[i].nv;
+- break;
++ return brcmf_fwname_data[i].nv;
+ default:
+ brcmf_err("invalid firmware type (%d)\n", type);
+ return NULL;
+ }
+- goto found;
+ }
+ }
+ brcmf_err("Unknown chipid %d [%d]\n",
+- bus->ci->chip, bus->ci->chiprev);
++ ci->chip, ci->chiprev);
+ return NULL;
+-
+-found:
+- err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
+- if ((err) || (!fw)) {
+- brcmf_err("fail to request firmware %s (%d)\n", name, err);
+- return NULL;
+- }
+-
+- return fw;
+ }
+
+ static void pkt_align(struct sk_buff *p, int len, int align)
+@@ -618,27 +677,24 @@
+ * Reads a register in the SDIO hardware block. This block occupies a series of
+ * adresses on the 32 bit backplane bus.
+ */
+-static int
+-r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
++static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
+ {
+- u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
++ struct brcmf_core *core;
+ int ret;
+
+- *regvar = brcmf_sdiod_regrl(bus->sdiodev,
+- bus->ci->c_inf[idx].base + offset, &ret);
++ core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
++ *regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
+
+ return ret;
+ }
+
+-static int
+-w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
++static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
+ {
+- u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
++ struct brcmf_core *core;
+ int ret;
+
+- brcmf_sdiod_regwl(bus->sdiodev,
+- bus->ci->c_inf[idx].base + reg_offset,
+- regval, &ret);
++ core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
++ brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
+
+ return ret;
+ }
+@@ -650,16 +706,12 @@
+ int err = 0;
+ int try_cnt = 0;
+
+- brcmf_dbg(TRACE, "Enter\n");
++ brcmf_dbg(TRACE, "Enter: on=%d\n", on);
+
+ wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+ /* 1st KSO write goes to AOS wake up core if device is asleep */
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ wr_val, &err);
+- if (err) {
+- brcmf_err("SDIO_AOS KSO write error: %d\n", err);
+- return err;
+- }
+
+ if (on) {
+ /* device WAKEUP through KSO:
+@@ -689,18 +741,22 @@
+ &err);
+ if (((rd_val & bmask) == cmp_val) && !err)
+ break;
+- brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
+- try_cnt, MAX_KSO_ATTEMPTS, err);
++
+ udelay(KSO_WAIT_US);
+ brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ wr_val, &err);
+ } while (try_cnt++ < MAX_KSO_ATTEMPTS);
+
++ if (try_cnt > 2)
++ brcmf_dbg(SDIO, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt,
++ rd_val, err);
++
++ if (try_cnt > MAX_KSO_ATTEMPTS)
++ brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
++
+ return err;
+ }
+
+-#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
+-
+ #define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
+
+ /* Turn backplane clock on or off */
+@@ -799,7 +855,6 @@
+ }
+ #endif /* defined (DEBUG) */
+
+- bus->activity = true;
+ } else {
+ clkreq = 0;
+
+@@ -899,8 +954,9 @@
+ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
+ {
+ int err = 0;
+- brcmf_dbg(TRACE, "Enter\n");
+- brcmf_dbg(SDIO, "request %s currently %s\n",
++ u8 clkcsr;
++
++ brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
+ (sleep ? "SLEEP" : "WAKE"),
+ (bus->sleeping ? "SLEEP" : "WAKE"));
+
+@@ -917,8 +973,20 @@
+ atomic_read(&bus->ipend) > 0 ||
+ (!atomic_read(&bus->fcstate) &&
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
+- data_ok(bus)))
+- return -EBUSY;
++ data_ok(bus))) {
++ err = -EBUSY;
++ goto done;
++ }
++
++ clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
++ SBSDIO_FUNC1_CHIPCLKCSR,
++ &err);
++ if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
++ brcmf_dbg(SDIO, "no clock, set ALP\n");
++ brcmf_sdiod_regwb(bus->sdiodev,
++ SBSDIO_FUNC1_CHIPCLKCSR,
++ SBSDIO_ALP_AVAIL_REQ, &err);
++ }
+ err = brcmf_sdio_kso_control(bus, false);
+ /* disable watchdog */
+ if (!err)
+@@ -935,7 +1003,7 @@
+ } else {
+ brcmf_err("error while changing bus sleep state %d\n",
+ err);
+- return err;
++ goto done;
+ }
+ }
+
+@@ -947,11 +1015,92 @@
+ } else {
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
+ }
+-
++done:
++ brcmf_dbg(SDIO, "Exit: err=%d\n", err);
+ return err;
+
+ }
+
++#ifdef DEBUG
++static inline bool brcmf_sdio_valid_shared_address(u32 addr)
++{
++ return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
++}
++
++static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
++ struct sdpcm_shared *sh)
++{
++ u32 addr;
++ int rv;
++ u32 shaddr = 0;
++ struct sdpcm_shared_le sh_le;
++ __le32 addr_le;
++
++ shaddr = bus->ci->rambase + bus->ramsize - 4;
++
++ /*
++ * Read last word in socram to determine
++ * address of sdpcm_shared structure
++ */
++ sdio_claim_host(bus->sdiodev->func[1]);
++ brcmf_sdio_bus_sleep(bus, false, false);
++ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
++ sdio_release_host(bus->sdiodev->func[1]);
++ if (rv < 0)
++ return rv;
++
++ addr = le32_to_cpu(addr_le);
++
++ brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
++
++ /*
++ * Check if addr is valid.
++ * NVRAM length at the end of memory should have been overwritten.
++ */
++ if (!brcmf_sdio_valid_shared_address(addr)) {
++ brcmf_err("invalid sdpcm_shared address 0x%08X\n",
++ addr);
++ return -EINVAL;
++ }
++
++ /* Read hndrte_shared structure */
++ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
++ sizeof(struct sdpcm_shared_le));
++ if (rv < 0)
++ return rv;
++
++ /* Endianness */
++ sh->flags = le32_to_cpu(sh_le.flags);
++ sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
++ sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
++ sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
++ sh->assert_line = le32_to_cpu(sh_le.assert_line);
++ sh->console_addr = le32_to_cpu(sh_le.console_addr);
++ sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
++
++ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
++ brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
++ SDPCM_SHARED_VERSION,
++ sh->flags & SDPCM_SHARED_VERSION_MASK);
++ return -EPROTO;
++ }
++
++ return 0;
++}
++
++static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
++{
++ struct sdpcm_shared sh;
++
++ if (brcmf_sdio_readshared(bus, &sh) == 0)
++ bus->console_addr = sh.console_addr;
++}
++#else
++static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
++{
++}
++#endif /* DEBUG */
++
+ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
+ {
+ u32 intstatus = 0;
+@@ -995,6 +1144,12 @@
+ else
+ brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
+ bus->sdpcm_ver);
++
++ /*
++ * Retrieve console state address now that firmware should have
++ * updated it.
++ */
++ brcmf_sdio_get_console_addr(bus);
+ }
+
+ /*
+@@ -1083,6 +1238,28 @@
+ bus->cur_read.len = 0;
+ }
+
++static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
++{
++ struct brcmf_sdio_dev *sdiodev = bus->sdiodev;
++ u8 i, hi, lo;
++
++ /* On failure, abort the command and terminate the frame */
++ brcmf_err("sdio error, abort command and terminate frame\n");
++ bus->sdcnt.tx_sderrs++;
++
++ brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
++ bus->sdcnt.f1regdata++;
++
++ for (i = 0; i < 3; i++) {
++ hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
++ lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
++ bus->sdcnt.f1regdata += 2;
++ if ((hi == 0) && (lo == 0))
++ break;
++ }
++}
++
+ /* return total length of buffer chain */
+ static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
+ {
+@@ -1955,7 +2132,7 @@
+ memcpy(pkt_pad->data,
+ pkt->data + pkt->len - tail_chop,
+ tail_chop);
+- *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
++ *(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
+ skb_trim(pkt, pkt->len - tail_chop);
+ skb_trim(pkt_pad, tail_pad + tail_chop);
+ __skb_queue_after(pktq, pkt, pkt_pad);
+@@ -2003,7 +2180,7 @@
+ * already properly aligned and does not
+ * need an sdpcm header.
+ */
+- if (*(u32 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
++ if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
+ continue;
+
+ /* align packet data pointer */
+@@ -2037,10 +2214,10 @@
+ if (BRCMF_BYTES_ON() &&
+ ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
+ (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
+- brcmf_dbg_hex_dump(true, pkt_next, hd_info.len,
++ brcmf_dbg_hex_dump(true, pkt_next->data, hd_info.len,
+ "Tx Frame:\n");
+ else if (BRCMF_HDRS_ON())
+- brcmf_dbg_hex_dump(true, pkt_next,
++ brcmf_dbg_hex_dump(true, pkt_next->data,
+ head_pad + bus->tx_hdrlen,
+ "Tx Header:\n");
+ }
+@@ -2067,11 +2244,11 @@
+ u8 *hdr;
+ u32 dat_offset;
+ u16 tail_pad;
+- u32 dummy_flags, chop_len;
++ u16 dummy_flags, chop_len;
+ struct sk_buff *pkt_next, *tmp, *pkt_prev;
+
+ skb_queue_walk_safe(pktq, pkt_next, tmp) {
+- dummy_flags = *(u32 *)(pkt_next->cb);
++ dummy_flags = *(u16 *)(pkt_next->cb);
+ if (dummy_flags & ALIGN_SKB_FLAG) {
+ chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
+ if (chop_len) {
+@@ -2100,7 +2277,6 @@
+ uint chan)
+ {
+ int ret;
+- int i;
+ struct sk_buff *pkt_next, *tmp;
+
+ brcmf_dbg(TRACE, "Enter\n");
+@@ -2113,28 +2289,9 @@
+ ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
+ bus->sdcnt.f2txdata++;
+
+- if (ret < 0) {
+- /* On failure, abort the command and terminate the frame */
+- brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
+- ret);
+- bus->sdcnt.tx_sderrs++;
++ if (ret < 0)
++ brcmf_sdio_txfail(bus);
+
+- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+- SFC_WF_TERM, NULL);
+- bus->sdcnt.f1regdata++;
+-
+- for (i = 0; i < 3; i++) {
+- u8 hi, lo;
+- hi = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+- lo = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+- bus->sdcnt.f1regdata += 2;
+- if ((hi == 0) && (lo == 0))
+- break;
+- }
+- }
+ sdio_release_host(bus->sdiodev->func[1]);
+
+ done:
+@@ -2164,13 +2321,15 @@
+ /* Send frames until the limit or some other event */
+ for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
+ pkt_num = 1;
+- __skb_queue_head_init(&pktq);
++ if (down_interruptible(&bus->tx_seq_lock))
++ return cnt;
+ if (bus->txglom)
+ pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
+- brcmf_sdio_txglomsz);
++ bus->sdiodev->txglomsz);
+ pkt_num = min_t(u32, pkt_num,
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
+- spin_lock_bh(&bus->txqlock);
++ __skb_queue_head_init(&pktq);
++ spin_lock_bh(&bus->txq_lock);
+ for (i = 0; i < pkt_num; i++) {
+ pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
+ &prec_out);
+@@ -2178,15 +2337,19 @@
+ break;
+ __skb_queue_tail(&pktq, pkt);
+ }
+- spin_unlock_bh(&bus->txqlock);
+- if (i == 0)
++ spin_unlock_bh(&bus->txq_lock);
++ if (i == 0) {
++ up(&bus->tx_seq_lock);
+ break;
++ }
+
+ ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
++ up(&bus->tx_seq_lock);
++
+ cnt += i;
+
+ /* In poll mode, need to check for other events */
+- if (!bus->intr && cnt) {
++ if (!bus->intr) {
+ /* Check device status, signal pending interrupt */
+ sdio_claim_host(bus->sdiodev->func[1]);
+ ret = r_sdreg32(bus, &intstatus,
+@@ -2211,6 +2374,68 @@
+ return cnt;
+ }
+
++static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
++{
++ u8 doff;
++ u16 pad;
++ uint retries = 0;
++ struct brcmf_sdio_hdrinfo hd_info = {0};
++ int ret;
++
++ brcmf_dbg(TRACE, "Enter\n");
++
++ /* Back the pointer to make room for bus header */
++ frame -= bus->tx_hdrlen;
++ len += bus->tx_hdrlen;
++
++ /* Add alignment padding (optional for ctl frames) */
++ doff = ((unsigned long)frame % bus->head_align);
++ if (doff) {
++ frame -= doff;
++ len += doff;
++ memset(frame + bus->tx_hdrlen, 0, doff);
++ }
++
++ /* Round send length to next SDIO block */
++ pad = 0;
++ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
++ pad = bus->blocksize - (len % bus->blocksize);
++ if ((pad > bus->roundup) || (pad >= bus->blocksize))
++ pad = 0;
++ } else if (len % bus->head_align) {
++ pad = bus->head_align - (len % bus->head_align);
++ }
++ len += pad;
++
++ hd_info.len = len - pad;
++ hd_info.channel = SDPCM_CONTROL_CHANNEL;
++ hd_info.dat_offset = doff + bus->tx_hdrlen;
++ hd_info.seq_num = bus->tx_seq;
++ hd_info.lastfrm = true;
++ hd_info.tail_pad = pad;
++ brcmf_sdio_hdpack(bus, frame, &hd_info);
++
++ if (bus->txglom)
++ brcmf_sdio_update_hwhdr(frame, len);
++
++ brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
++ frame, len, "Tx Frame:\n");
++ brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
++ BRCMF_HDRS_ON(),
++ frame, min_t(u16, len, 16), "TxHdr:\n");
++
++ do {
++ ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
++
++ if (ret < 0)
++ brcmf_sdio_txfail(bus);
++ else
++ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
++ } while (ret < 0 && retries++ < TXRETRIES);
++
++ return ret;
++}
++
+ static void brcmf_sdio_bus_stop(struct device *dev)
+ {
+ u32 local_hostintmask;
+@@ -2292,21 +2517,29 @@
+ }
+ }
+
++static void atomic_orr(int val, atomic_t *v)
++{
++ int old_val;
++
++ old_val = atomic_read(v);
++ while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
++ old_val = atomic_read(v);
++}
++
+ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
+ {
+- u8 idx;
++ struct brcmf_core *buscore;
+ u32 addr;
+ unsigned long val;
+- int n, ret;
++ int ret;
+
+- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+- addr = bus->ci->c_inf[idx].base +
+- offsetof(struct sdpcmd_regs, intstatus);
++ buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
++ addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
+
+ val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
+ bus->sdcnt.f1regdata++;
+ if (ret != 0)
+- val = 0;
++ return ret;
+
+ val &= bus->hostintmask;
+ atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
+@@ -2315,13 +2548,7 @@
+ if (val) {
+ brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
+ bus->sdcnt.f1regdata++;
+- }
+-
+- if (ret) {
+- atomic_set(&bus->intstatus, 0);
+- } else if (val) {
+- for_each_set_bit(n, &val, 32)
+- set_bit(n, (unsigned long *)&bus->intstatus.counter);
++ atomic_orr(val, &bus->intstatus);
+ }
+
+ return ret;
+@@ -2331,10 +2558,9 @@
+ {
+ u32 newstatus = 0;
+ unsigned long intstatus;
+- uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
+ uint txlimit = bus->txbound; /* Tx frames to send before resched */
+- uint framecnt = 0; /* Temporary counter of tx/rx frames */
+- int err = 0, n;
++ uint framecnt; /* Temporary counter of tx/rx frames */
++ int err = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+@@ -2431,70 +2657,38 @@
+ intstatus &= ~I_HMB_FRAME_IND;
+
+ /* On frame indication, read available frames */
+- if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
+- framecnt = brcmf_sdio_readframes(bus, rxlimit);
++ if ((intstatus & I_HMB_FRAME_IND) && (bus->clkstate == CLK_AVAIL)) {
++ brcmf_sdio_readframes(bus, bus->rxbound);
+ if (!bus->rxpending)
+ intstatus &= ~I_HMB_FRAME_IND;
+- rxlimit -= min(framecnt, rxlimit);
+ }
+
+ /* Keep still-pending events for next scheduling */
+- if (intstatus) {
+- for_each_set_bit(n, &intstatus, 32)
+- set_bit(n, (unsigned long *)&bus->intstatus.counter);
+- }
++ if (intstatus)
++ atomic_orr(intstatus, &bus->intstatus);
+
+ brcmf_sdio_clrintr(bus);
+
+- if (data_ok(bus) && bus->ctrl_frame_stat &&
+- (bus->clkstate == CLK_AVAIL)) {
+- int i;
+-
+- sdio_claim_host(bus->sdiodev->func[1]);
+- err = brcmf_sdiod_send_buf(bus->sdiodev, bus->ctrl_frame_buf,
+- (u32)bus->ctrl_frame_len);
+-
+- if (err < 0) {
+- /* On failure, abort the command and
+- terminate the frame */
+- brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
+- err);
+- bus->sdcnt.tx_sderrs++;
+-
+- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+-
+- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+- SFC_WF_TERM, &err);
+- bus->sdcnt.f1regdata++;
+-
+- for (i = 0; i < 3; i++) {
+- u8 hi, lo;
+- hi = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_WFRAMEBCHI,
+- &err);
+- lo = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_WFRAMEBCLO,
+- &err);
+- bus->sdcnt.f1regdata += 2;
+- if ((hi == 0) && (lo == 0))
+- break;
+- }
++ if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
++ (down_interruptible(&bus->tx_seq_lock) == 0)) {
++ if (data_ok(bus)) {
++ sdio_claim_host(bus->sdiodev->func[1]);
++ err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
++ bus->ctrl_frame_len);
++ sdio_release_host(bus->sdiodev->func[1]);
+
+- } else {
+- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
++ bus->ctrl_frame_stat = false;
++ brcmf_sdio_wait_event_wakeup(bus);
+ }
+- sdio_release_host(bus->sdiodev->func[1]);
+- bus->ctrl_frame_stat = false;
+- brcmf_sdio_wait_event_wakeup(bus);
++ up(&bus->tx_seq_lock);
+ }
+ /* Send queued frames (limit 1 if rx may still be pending) */
+- else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
+- brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
+- && data_ok(bus)) {
++ if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
++ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit &&
++ data_ok(bus)) {
+ framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
+ txlimit;
+- framecnt = brcmf_sdio_sendfromq(bus, framecnt);
+- txlimit -= framecnt;
++ brcmf_sdio_sendfromq(bus, framecnt);
+ }
+
+ if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) {
+@@ -2504,19 +2698,9 @@
+ atomic_read(&bus->ipend) > 0 ||
+ (!atomic_read(&bus->fcstate) &&
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
+- data_ok(bus)) || PKT_AVAILABLE()) {
++ data_ok(bus))) {
+ atomic_inc(&bus->dpc_tskcnt);
+ }
+-
+- /* If we're done for now, turn off clock request. */
+- if ((bus->clkstate != CLK_PENDING)
+- && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
+- bus->activity = false;
+- brcmf_dbg(SDIO, "idle state\n");
+- sdio_claim_host(bus->sdiodev->func[1]);
+- brcmf_sdio_bus_sleep(bus, true, false);
+- sdio_release_host(bus->sdiodev->func[1]);
+- }
+ }
+
+ static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
+@@ -2531,15 +2715,12 @@
+ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
+ {
+ int ret = -EBADE;
+- uint datalen, prec;
++ uint prec;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
+- ulong flags;
+-
+- brcmf_dbg(TRACE, "Enter\n");
+
+- datalen = pkt->len;
++ brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
+
+ /* Add space for the header */
+ skb_push(pkt, bus->tx_hdrlen);
+@@ -2553,7 +2734,9 @@
+ bus->sdcnt.fcqueued++;
+
+ /* Priority based enq */
+- spin_lock_irqsave(&bus->txqlock, flags);
++ spin_lock_bh(&bus->txq_lock);
++ /* reset bus_flags in packet cb */
++ *(u16 *)(pkt->cb) = 0;
+ if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
+ skb_pull(pkt, bus->tx_hdrlen);
+ brcmf_err("out of bus->txq !!!\n");
+@@ -2566,7 +2749,7 @@
+ bus->txoff = true;
+ brcmf_txflowblock(bus->sdiodev->dev, true);
+ }
+- spin_unlock_irqrestore(&bus->txqlock, flags);
++ spin_unlock_bh(&bus->txq_lock);
+
+ #ifdef DEBUG
+ if (pktq_plen(&bus->txq, prec) > qcount[prec])
+@@ -2661,110 +2844,27 @@
+ }
+ #endif /* DEBUG */
+
+-static int brcmf_sdio_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
+-{
+- int i;
+- int ret;
+-
+- bus->ctrl_frame_stat = false;
+- ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
+-
+- if (ret < 0) {
+- /* On failure, abort the command and terminate the frame */
+- brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
+- ret);
+- bus->sdcnt.tx_sderrs++;
+-
+- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+-
+- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+- SFC_WF_TERM, NULL);
+- bus->sdcnt.f1regdata++;
+-
+- for (i = 0; i < 3; i++) {
+- u8 hi, lo;
+- hi = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+- lo = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+- bus->sdcnt.f1regdata += 2;
+- if (hi == 0 && lo == 0)
+- break;
+- }
+- return ret;
+- }
+-
+- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
+-
+- return ret;
+-}
+-
+ static int
+ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
+ {
+- u8 *frame;
+- u16 len, pad;
+- uint retries = 0;
+- u8 doff = 0;
+- int ret = -1;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
+- struct brcmf_sdio_hdrinfo hd_info = {0};
++ int ret = -1;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+- /* Back the pointer to make a room for bus header */
+- frame = msg - bus->tx_hdrlen;
+- len = (msglen += bus->tx_hdrlen);
++ if (down_interruptible(&bus->tx_seq_lock))
++ return -EINTR;
+
+- /* Add alignment padding (optional for ctl frames) */
+- doff = ((unsigned long)frame % bus->head_align);
+- if (doff) {
+- frame -= doff;
+- len += doff;
+- msglen += doff;
+- memset(frame, 0, doff + bus->tx_hdrlen);
+- }
+- /* precondition: doff < bus->head_align */
+- doff += bus->tx_hdrlen;
+-
+- /* Round send length to next SDIO block */
+- pad = 0;
+- if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+- pad = bus->blocksize - (len % bus->blocksize);
+- if ((pad > bus->roundup) || (pad >= bus->blocksize))
+- pad = 0;
+- } else if (len % bus->head_align) {
+- pad = bus->head_align - (len % bus->head_align);
+- }
+- len += pad;
+-
+- /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
+-
+- /* Make sure backplane clock is on */
+- sdio_claim_host(bus->sdiodev->func[1]);
+- brcmf_sdio_bus_sleep(bus, false, false);
+- sdio_release_host(bus->sdiodev->func[1]);
+-
+- hd_info.len = (u16)msglen;
+- hd_info.channel = SDPCM_CONTROL_CHANNEL;
+- hd_info.dat_offset = doff;
+- hd_info.seq_num = bus->tx_seq;
+- hd_info.lastfrm = true;
+- hd_info.tail_pad = pad;
+- brcmf_sdio_hdpack(bus, frame, &hd_info);
+-
+- if (bus->txglom)
+- brcmf_sdio_update_hwhdr(frame, len);
+-
+- if (!data_ok(bus)) {
+- brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+- bus->tx_max, bus->tx_seq);
+- bus->ctrl_frame_stat = true;
+- /* Send from dpc */
+- bus->ctrl_frame_buf = frame;
+- bus->ctrl_frame_len = len;
++ if (!data_ok(bus)) {
++ brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
++ bus->tx_max, bus->tx_seq);
++ up(&bus->tx_seq_lock);
++ /* Send from dpc */
++ bus->ctrl_frame_buf = msg;
++ bus->ctrl_frame_len = msglen;
++ bus->ctrl_frame_stat = true;
+
+ wait_event_interruptible_timeout(bus->ctrl_wait,
+ !bus->ctrl_frame_stat,
+@@ -2775,31 +2875,18 @@
+ ret = 0;
+ } else {
+ brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
++ bus->ctrl_frame_stat = false;
++ if (down_interruptible(&bus->tx_seq_lock))
++ return -EINTR;
+ ret = -1;
+ }
+ }
+-
+ if (ret == -1) {
+- brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
+- frame, len, "Tx Frame:\n");
+- brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
+- BRCMF_HDRS_ON(),
+- frame, min_t(u16, len, 16), "TxHdr:\n");
+-
+- do {
+- sdio_claim_host(bus->sdiodev->func[1]);
+- ret = brcmf_sdio_tx_frame(bus, frame, len);
+- sdio_release_host(bus->sdiodev->func[1]);
+- } while (ret < 0 && retries++ < TXRETRIES);
+- }
+-
+- if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
+- atomic_read(&bus->dpc_tskcnt) == 0) {
+- bus->activity = false;
+ sdio_claim_host(bus->sdiodev->func[1]);
+- brcmf_dbg(INFO, "idle\n");
+- brcmf_sdio_clkctl(bus, CLK_NONE, true);
++ brcmf_sdio_bus_sleep(bus, false, false);
++ ret = brcmf_sdio_tx_ctrlframe(bus, msg, msglen);
+ sdio_release_host(bus->sdiodev->func[1]);
++ up(&bus->tx_seq_lock);
+ }
+
+ if (ret)
+@@ -2811,72 +2898,6 @@
+ }
+
+ #ifdef DEBUG
+-static inline bool brcmf_sdio_valid_shared_address(u32 addr)
+-{
+- return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
+-}
+-
+-static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
+- struct sdpcm_shared *sh)
+-{
+- u32 addr;
+- int rv;
+- u32 shaddr = 0;
+- struct sdpcm_shared_le sh_le;
+- __le32 addr_le;
+-
+- shaddr = bus->ci->rambase + bus->ramsize - 4;
+-
+- /*
+- * Read last word in socram to determine
+- * address of sdpcm_shared structure
+- */
+- sdio_claim_host(bus->sdiodev->func[1]);
+- brcmf_sdio_bus_sleep(bus, false, false);
+- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
+- sdio_release_host(bus->sdiodev->func[1]);
+- if (rv < 0)
+- return rv;
+-
+- addr = le32_to_cpu(addr_le);
+-
+- brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
+-
+- /*
+- * Check if addr is valid.
+- * NVRAM length at the end of memory should have been overwritten.
+- */
+- if (!brcmf_sdio_valid_shared_address(addr)) {
+- brcmf_err("invalid sdpcm_shared address 0x%08X\n",
+- addr);
+- return -EINVAL;
+- }
+-
+- /* Read hndrte_shared structure */
+- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
+- sizeof(struct sdpcm_shared_le));
+- if (rv < 0)
+- return rv;
+-
+- /* Endianness */
+- sh->flags = le32_to_cpu(sh_le.flags);
+- sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
+- sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
+- sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
+- sh->assert_line = le32_to_cpu(sh_le.assert_line);
+- sh->console_addr = le32_to_cpu(sh_le.console_addr);
+- sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
+-
+- if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
+- brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
+- SDPCM_SHARED_VERSION,
+- sh->flags & SDPCM_SHARED_VERSION_MASK);
+- return -EPROTO;
+- }
+-
+- return 0;
+-}
+-
+ static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
+ struct sdpcm_shared *sh, char __user *data,
+ size_t count)
+@@ -3106,6 +3127,8 @@
+ debugfs_create_file("forensics", S_IRUGO, dentry, bus,
+ &brcmf_sdio_forensic_ops);
+ brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
++ debugfs_create_u32("console_interval", 0644, dentry,
++ &bus->console_interval);
+ }
+ #else
+ static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
+@@ -3224,51 +3247,29 @@
+ const struct firmware *fw)
+ {
+ int err;
+- int offset;
+- int address;
+- int len;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+- err = 0;
+- offset = 0;
+- address = bus->ci->rambase;
+- while (offset < fw->size) {
+- len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
+- fw->size - offset;
+- err = brcmf_sdiod_ramrw(bus->sdiodev, true, address,
+- (u8 *)&fw->data[offset], len);
+- if (err) {
+- brcmf_err("error %d on writing %d membytes at 0x%08x\n",
+- err, len, address);
+- return err;
+- }
+- offset += len;
+- address += len;
+- }
+- if (!err)
+- if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
+- (u8 *)fw->data, fw->size))
+- err = -EIO;
++ err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
++ (u8 *)fw->data, fw->size);
++ if (err)
++ brcmf_err("error %d on writing %d membytes at 0x%08x\n",
++ err, (int)fw->size, bus->ci->rambase);
++ else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
++ (u8 *)fw->data, fw->size))
++ err = -EIO;
+
+ return err;
+ }
+
+ static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
+- const struct firmware *nv)
++ void *vars, u32 varsz)
+ {
+- void *vars;
+- u32 varsz;
+ int address;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+- vars = brcmf_nvram_strip(nv, &varsz);
+-
+- if (vars == NULL)
+- return -EINVAL;
+-
+ address = bus->ci->ramsize - varsz + bus->ci->rambase;
+ err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
+ if (err)
+@@ -3277,28 +3278,21 @@
+ else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
+ err = -EIO;
+
+- brcmf_nvram_free(vars);
+-
+ return err;
+ }
+
+-static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
++static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
++ const struct firmware *fw,
++ void *nvram, u32 nvlen)
+ {
+ int bcmerror = -EFAULT;
+- const struct firmware *fw;
+ u32 rstvec;
+
+ sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
+
+ /* Keep arm in reset */
+- brcmf_sdio_chip_enter_download(bus->sdiodev, bus->ci);
+-
+- fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
+- if (fw == NULL) {
+- bcmerror = -ENOENT;
+- goto err;
+- }
++ brcmf_chip_enter_download(bus->ci);
+
+ rstvec = get_unaligned_le32(fw->data);
+ brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
+@@ -3307,24 +3301,19 @@
+ release_firmware(fw);
+ if (bcmerror) {
+ brcmf_err("dongle image file download failed\n");
++ brcmf_fw_nvram_free(nvram);
+ goto err;
+ }
+
+- fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
+- if (fw == NULL) {
+- bcmerror = -ENOENT;
+- goto err;
+- }
+-
+- bcmerror = brcmf_sdio_download_nvram(bus, fw);
+- release_firmware(fw);
++ bcmerror = brcmf_sdio_download_nvram(bus, nvram, nvlen);
++ brcmf_fw_nvram_free(nvram);
+ if (bcmerror) {
+ brcmf_err("dongle nvram file download failed\n");
+ goto err;
+ }
+
+ /* Take arm out of reset */
+- if (!brcmf_sdio_chip_exit_download(bus->sdiodev, bus->ci, rstvec)) {
++ if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
+ brcmf_err("error getting out of ARM core reset\n");
+ goto err;
+ }
+@@ -3339,40 +3328,6 @@
+ return bcmerror;
+ }
+
+-static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
+-{
+- u32 addr, reg, pmu_cc3_mask = ~0;
+- int err;
+-
+- brcmf_dbg(TRACE, "Enter\n");
+-
+- /* old chips with PMU version less than 17 don't support save restore */
+- if (bus->ci->pmurev < 17)
+- return false;
+-
+- switch (bus->ci->chip) {
+- case BCM43241_CHIP_ID:
+- case BCM4335_CHIP_ID:
+- case BCM4339_CHIP_ID:
+- /* read PMU chipcontrol register 3 */
+- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
+- brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
+- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
+- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
+- return (reg & pmu_cc3_mask) != 0;
+- default:
+- addr = CORE_CC_REG(bus->ci->c_inf[0].base, pmucapabilities_ext);
+- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, &err);
+- if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
+- return false;
+-
+- addr = CORE_CC_REG(bus->ci->c_inf[0].base, retention_ctl);
+- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
+- return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
+- PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
+- }
+-}
+-
+ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
+ {
+ int err = 0;
+@@ -3424,7 +3379,7 @@
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* KSO bit added in SDIO core rev 12 */
+- if (bus->ci->c_inf[1].rev < 12)
++ if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
+ return 0;
+
+ val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
+@@ -3455,15 +3410,13 @@
+ struct brcmf_sdio *bus = sdiodev->bus;
+ uint pad_size;
+ u32 value;
+- u8 idx;
+ int err;
+
+ /* the commands below use the terms tx and rx from
+ * a device perspective, ie. bus:txglom affects the
+ * bus transfers from device to host.
+ */
+- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+- if (bus->ci->c_inf[idx].rev < 12) {
++ if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
+ /* for sdio core rev < 12, disable txgloming */
+ value = 0;
+ err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
+@@ -3503,97 +3456,6 @@
+ return err;
+ }
+
+-static int brcmf_sdio_bus_init(struct device *dev)
+-{
+- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+- struct brcmf_sdio *bus = sdiodev->bus;
+- int err, ret = 0;
+- u8 saveclk;
+-
+- brcmf_dbg(TRACE, "Enter\n");
+-
+- /* try to download image and nvram to the dongle */
+- if (bus_if->state == BRCMF_BUS_DOWN) {
+- bus->alp_only = true;
+- err = brcmf_sdio_download_firmware(bus);
+- if (err)
+- return err;
+- bus->alp_only = false;
+- }
+-
+- if (!bus->sdiodev->bus_if->drvr)
+- return 0;
+-
+- /* Start the watchdog timer */
+- bus->sdcnt.tickcnt = 0;
+- brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
+-
+- sdio_claim_host(bus->sdiodev->func[1]);
+-
+- /* Make sure backplane clock is on, needed to generate F2 interrupt */
+- brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
+- if (bus->clkstate != CLK_AVAIL)
+- goto exit;
+-
+- /* Force clocks on backplane to be sure F2 interrupt propagates */
+- saveclk = brcmf_sdiod_regrb(bus->sdiodev,
+- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+- if (!err) {
+- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+- (saveclk | SBSDIO_FORCE_HT), &err);
+- }
+- if (err) {
+- brcmf_err("Failed to force clock for F2: err %d\n", err);
+- goto exit;
+- }
+-
+- /* Enable function 2 (frame transfers) */
+- w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
+- offsetof(struct sdpcmd_regs, tosbmailboxdata));
+- err = sdio_enable_func(bus->sdiodev->func[SDIO_FUNC_2]);
+-
+-
+- brcmf_dbg(INFO, "enable F2: err=%d\n", err);
+-
+- /* If F2 successfully enabled, set core and enable interrupts */
+- if (!err) {
+- /* Set up the interrupt mask and enable interrupts */
+- bus->hostintmask = HOSTINTMASK;
+- w_sdreg32(bus, bus->hostintmask,
+- offsetof(struct sdpcmd_regs, hostintmask));
+-
+- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
+- } else {
+- /* Disable F2 again */
+- sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
+- ret = -ENODEV;
+- }
+-
+- if (brcmf_sdio_sr_capable(bus)) {
+- brcmf_sdio_sr_init(bus);
+- } else {
+- /* Restore previous clock setting */
+- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+- saveclk, &err);
+- }
+-
+- if (ret == 0) {
+- ret = brcmf_sdiod_intr_register(bus->sdiodev);
+- if (ret != 0)
+- brcmf_err("intr register failed:%d\n", ret);
+- }
+-
+- /* If we didn't come up, turn off backplane clock */
+- if (ret != 0)
+- brcmf_sdio_clkctl(bus, CLK_NONE, false);
+-
+-exit:
+- sdio_release_host(bus->sdiodev->func[1]);
+-
+- return ret;
+-}
+-
+ void brcmf_sdio_isr(struct brcmf_sdio *bus)
+ {
+ brcmf_dbg(TRACE, "Enter\n");
+@@ -3714,11 +3576,175 @@
+ datawork);
+
+ while (atomic_read(&bus->dpc_tskcnt)) {
++ atomic_set(&bus->dpc_tskcnt, 0);
+ brcmf_sdio_dpc(bus);
+- atomic_dec(&bus->dpc_tskcnt);
+ }
+ }
+
++static void
++brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
++ struct brcmf_chip *ci, u32 drivestrength)
++{
++ const struct sdiod_drive_str *str_tab = NULL;
++ u32 str_mask;
++ u32 str_shift;
++ u32 base;
++ u32 i;
++ u32 drivestrength_sel = 0;
++ u32 cc_data_temp;
++ u32 addr;
++
++ if (!(ci->cc_caps & CC_CAP_PMU))
++ return;
++
++ switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
++ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
++ str_tab = sdiod_drvstr_tab1_1v8;
++ str_mask = 0x00003800;
++ str_shift = 11;
++ break;
++ case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
++ str_tab = sdiod_drvstr_tab6_1v8;
++ str_mask = 0x00001800;
++ str_shift = 11;
++ break;
++ case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
++ /* note: 43143 does not support tristate */
++ i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
++ if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
++ str_tab = sdiod_drvstr_tab2_3v3;
++ str_mask = 0x00000007;
++ str_shift = 0;
++ } else
++ brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
++ ci->name, drivestrength);
++ break;
++ case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
++ str_tab = sdiod_drive_strength_tab5_1v8;
++ str_mask = 0x00003800;
++ str_shift = 11;
++ break;
++ default:
++ brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
++ ci->name, ci->chiprev, ci->pmurev);
++ break;
++ }
++
++ if (str_tab != NULL) {
++ for (i = 0; str_tab[i].strength != 0; i++) {
++ if (drivestrength >= str_tab[i].strength) {
++ drivestrength_sel = str_tab[i].sel;
++ break;
++ }
++ }
++ base = brcmf_chip_get_chipcommon(ci)->base;
++ addr = CORE_CC_REG(base, chipcontrol_addr);
++ brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
++ cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
++ cc_data_temp &= ~str_mask;
++ drivestrength_sel <<= str_shift;
++ cc_data_temp |= drivestrength_sel;
++ brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
++
++ brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
++ str_tab[i].strength, drivestrength, cc_data_temp);
++ }
++}
++
++static int brcmf_sdio_buscoreprep(void *ctx)
++{
++ struct brcmf_sdio_dev *sdiodev = ctx;
++ int err = 0;
++ u8 clkval, clkset;
++
++ /* Try forcing SDIO core to do ALPAvail request only */
++ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
++ if (err) {
++ brcmf_err("error writing for HT off\n");
++ return err;
++ }
++
++ /* If register supported, wait for ALPAvail and then force ALP */
++ /* This may take up to 15 milliseconds */
++ clkval = brcmf_sdiod_regrb(sdiodev,
++ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
++
++ if ((clkval & ~SBSDIO_AVBITS) != clkset) {
++ brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
++ clkset, clkval);
++ return -EACCES;
++ }
++
++ SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
++ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
++ !SBSDIO_ALPAV(clkval)),
++ PMU_MAX_TRANSITION_DLY);
++ if (!SBSDIO_ALPAV(clkval)) {
++ brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
++ clkval);
++ return -EBUSY;
++ }
++
++ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
++ udelay(65);
++
++ /* Also, disable the extra SDIO pull-ups */
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
++
++ return 0;
++}
++
++static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
++ u32 rstvec)
++{
++ struct brcmf_sdio_dev *sdiodev = ctx;
++ struct brcmf_core *core;
++ u32 reg_addr;
++
++ /* clear all interrupts */
++ core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
++ reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
++ brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
++
++ if (rstvec)
++ /* Write reset vector to address 0 */
++ brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
++ sizeof(rstvec));
++}
++
++static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
++{
++ struct brcmf_sdio_dev *sdiodev = ctx;
++ u32 val, rev;
++
++ val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
++ if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
++ addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
++ rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
++ if (rev >= 2) {
++ val &= ~CID_ID_MASK;
++ val |= BCM4339_CHIP_ID;
++ }
++ }
++ return val;
++}
++
++static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
++{
++ struct brcmf_sdio_dev *sdiodev = ctx;
++
++ brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
++}
++
++static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
++ .prepare = brcmf_sdio_buscoreprep,
++ .exit_dl = brcmf_sdio_buscore_exitdl,
++ .read32 = brcmf_sdio_buscore_read32,
++ .write32 = brcmf_sdio_buscore_write32,
++};
++
+ static bool
+ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
+ {
+@@ -3734,7 +3760,7 @@
+ brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
+
+ /*
+- * Force PLL off until brcmf_sdio_chip_attach()
++ * Force PLL off until brcmf_chip_attach()
+ * programs PLL control regs
+ */
+
+@@ -3755,8 +3781,10 @@
+ */
+ brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
+
+- if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
+- brcmf_err("brcmf_sdio_chip_attach failed!\n");
++ bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
++ if (IS_ERR(bus->ci)) {
++ brcmf_err("brcmf_chip_attach failed!\n");
++ bus->ci = NULL;
+ goto fail;
+ }
+
+@@ -3769,7 +3797,7 @@
+ drivestrength = bus->sdiodev->pdata->drive_strength;
+ else
+ drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
+- brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
++ brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
+
+ /* Get info on the SOCRAM cores... */
+ bus->ramsize = bus->ci->ramsize;
+@@ -3792,24 +3820,18 @@
+ goto fail;
+
+ /* set PMUControl so a backplane reset does PMU state reload */
+- reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
++ reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
+ pmucontrol);
+- reg_val = brcmf_sdiod_regrl(bus->sdiodev,
+- reg_addr,
+- &err);
++ reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
+ if (err)
+ goto fail;
+
+ reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
+
+- brcmf_sdiod_regwl(bus->sdiodev,
+- reg_addr,
+- reg_val,
+- &err);
++ brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
+ if (err)
+ goto fail;
+
+-
+ sdio_release_host(bus->sdiodev->func[1]);
+
+ brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
+@@ -3849,6 +3871,7 @@
+ brcmf_sdio_bus_watchdog(bus);
+ /* Count the tick for reference */
+ bus->sdcnt.tickcnt++;
++ reinit_completion(&bus->watchdog_wait);
+ } else
+ break;
+ }
+@@ -3872,13 +3895,114 @@
+ static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
+ .stop = brcmf_sdio_bus_stop,
+ .preinit = brcmf_sdio_bus_preinit,
+- .init = brcmf_sdio_bus_init,
+ .txdata = brcmf_sdio_bus_txdata,
+ .txctl = brcmf_sdio_bus_txctl,
+ .rxctl = brcmf_sdio_bus_rxctl,
+ .gettxq = brcmf_sdio_bus_gettxq,
+ };
+
++static void brcmf_sdio_firmware_callback(struct device *dev,
++ const struct firmware *code,
++ void *nvram, u32 nvram_len)
++{
++ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
++ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
++ struct brcmf_sdio *bus = sdiodev->bus;
++ int err = 0;
++ u8 saveclk;
++
++ brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
++
++ /* try to download image and nvram to the dongle */
++ if (bus_if->state == BRCMF_BUS_DOWN) {
++ bus->alp_only = true;
++ err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
++ if (err)
++ goto fail;
++ bus->alp_only = false;
++ }
++
++ if (!bus_if->drvr)
++ return;
++
++ /* Start the watchdog timer */
++ bus->sdcnt.tickcnt = 0;
++ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
++
++ sdio_claim_host(sdiodev->func[1]);
++
++ /* Make sure backplane clock is on, needed to generate F2 interrupt */
++ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
++ if (bus->clkstate != CLK_AVAIL)
++ goto release;
++
++ /* Force clocks on backplane to be sure F2 interrupt propagates */
++ saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
++ if (!err) {
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
++ (saveclk | SBSDIO_FORCE_HT), &err);
++ }
++ if (err) {
++ brcmf_err("Failed to force clock for F2: err %d\n", err);
++ goto release;
++ }
++
++ /* Enable function 2 (frame transfers) */
++ w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
++ offsetof(struct sdpcmd_regs, tosbmailboxdata));
++ err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
++
++
++ brcmf_dbg(INFO, "enable F2: err=%d\n", err);
++
++ /* If F2 successfully enabled, set core and enable interrupts */
++ if (!err) {
++ /* Set up the interrupt mask and enable interrupts */
++ bus->hostintmask = HOSTINTMASK;
++ w_sdreg32(bus, bus->hostintmask,
++ offsetof(struct sdpcmd_regs, hostintmask));
++
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
++ } else {
++ /* Disable F2 again */
++ sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
++ goto release;
++ }
++
++ if (brcmf_chip_sr_capable(bus->ci)) {
++ brcmf_sdio_sr_init(bus);
++ } else {
++ /* Restore previous clock setting */
++ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
++ saveclk, &err);
++ }
++
++ if (err == 0) {
++ err = brcmf_sdiod_intr_register(sdiodev);
++ if (err != 0)
++ brcmf_err("intr register failed:%d\n", err);
++ }
++
++ /* If we didn't come up, turn off backplane clock */
++ if (err != 0)
++ brcmf_sdio_clkctl(bus, CLK_NONE, false);
++
++ sdio_release_host(sdiodev->func[1]);
++
++ err = brcmf_bus_start(dev);
++ if (err != 0) {
++ brcmf_err("dongle is not responding\n");
++ goto fail;
++ }
++ return;
++
++release:
++ sdio_release_host(sdiodev->func[1]);
++fail:
++ brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
++ device_release_driver(dev);
++}
++
+ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
+ {
+ int ret;
+@@ -3925,7 +4049,8 @@
+ }
+
+ spin_lock_init(&bus->rxctl_lock);
+- spin_lock_init(&bus->txqlock);
++ spin_lock_init(&bus->txq_lock);
++ sema_init(&bus->tx_seq_lock, 1);
+ init_waitqueue_head(&bus->ctrl_wait);
+ init_waitqueue_head(&bus->dcmd_resp_wait);
+
+@@ -3961,8 +4086,13 @@
+ goto fail;
+ }
+
++ /* Query the F2 block size, set roundup accordingly */
++ bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
++ bus->roundup = min(max_roundup, bus->blocksize);
++
+ /* Allocate buffers */
+ if (bus->sdiodev->bus_if->maxctl) {
++ bus->sdiodev->bus_if->maxctl += bus->roundup;
+ bus->rxblen =
+ roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
+ ALIGNMENT) + bus->head_align;
+@@ -3990,10 +4120,6 @@
+ bus->idletime = BRCMF_IDLE_INTERVAL;
+ bus->idleclock = BRCMF_IDLE_ACTIVE;
+
+- /* Query the F2 block size, set roundup accordingly */
+- bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+- bus->roundup = min(max_roundup, bus->blocksize);
+-
+ /* SR state */
+ bus->sleeping = false;
+ bus->sr_enabled = false;
+@@ -4001,10 +4127,14 @@
+ brcmf_sdio_debugfs_create(bus);
+ brcmf_dbg(INFO, "completed!!\n");
+
+- /* if firmware path present try to download and bring up bus */
+- ret = brcmf_bus_start(bus->sdiodev->dev);
++ ret = brcmf_fw_get_firmwares(sdiodev->dev, BRCMF_FW_REQUEST_NVRAM,
++ brcmf_sdio_get_fwname(bus->ci,
++ BRCMF_FIRMWARE_BIN),
++ brcmf_sdio_get_fwname(bus->ci,
++ BRCMF_FIRMWARE_NVRAM),
++ brcmf_sdio_firmware_callback);
+ if (ret != 0) {
+- brcmf_err("dongle is not responding\n");
++ brcmf_err("async firmware request failed: %d\n", ret);
+ goto fail;
+ }
+
+@@ -4024,14 +4154,12 @@
+ /* De-register interrupt handler */
+ brcmf_sdiod_intr_unregister(bus->sdiodev);
+
++ brcmf_detach(bus->sdiodev->dev);
++
+ cancel_work_sync(&bus->datawork);
+ if (bus->brcmf_wq)
+ destroy_workqueue(bus->brcmf_wq);
+
+- if (bus->sdiodev->bus_if->drvr) {
+- brcmf_detach(bus->sdiodev->dev);
+- }
+-
+ if (bus->ci) {
+ if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
+ sdio_claim_host(bus->sdiodev->func[1]);
+@@ -4042,12 +4170,11 @@
+ * all necessary cores.
+ */
+ msleep(20);
+- brcmf_sdio_chip_enter_download(bus->sdiodev,
+- bus->ci);
++ brcmf_chip_enter_download(bus->ci);
+ brcmf_sdio_clkctl(bus, CLK_NONE, false);
+ sdio_release_host(bus->sdiodev->func[1]);
+ }
+- brcmf_sdio_chip_detach(&bus->ci);
++ brcmf_chip_detach(bus->ci);
+ }
+
+ kfree(bus->rxbuf);
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/firmware.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/firmware.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/firmware.c 2015-05-01 14:58:04.019427001 -0500
+@@ -0,0 +1,332 @@
++/*
++ * Copyright (c) 2013 Broadcom Corporation
++ *
++ * Permission to use, copy, modify, and/or distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/firmware.h>
++
++#include "dhd_dbg.h"
++#include "firmware.h"
++
++enum nvram_parser_state {
++ IDLE,
++ KEY,
++ VALUE,
++ COMMENT,
++ END
++};
++
++/**
++ * struct nvram_parser - internal info for parser.
++ *
++ * @state: current parser state.
++ * @fwnv: input buffer being parsed.
++ * @nvram: output buffer with parse result.
++ * @nvram_len: lenght of parse result.
++ * @line: current line.
++ * @column: current column in line.
++ * @pos: byte offset in input buffer.
++ * @entry: start position of key,value entry.
++ */
++struct nvram_parser {
++ enum nvram_parser_state state;
++ const struct firmware *fwnv;
++ u8 *nvram;
++ u32 nvram_len;
++ u32 line;
++ u32 column;
++ u32 pos;
++ u32 entry;
++};
++
++static bool is_nvram_char(char c)
++{
++ /* comment marker excluded */
++ if (c == '#')
++ return false;
++
++ /* key and value may have any other readable character */
++ return (c > 0x20 && c < 0x7f);
++}
++
++static bool is_whitespace(char c)
++{
++ return (c == ' ' || c == '\r' || c == '\n' || c == '\t');
++}
++
++static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp)
++{
++ char c;
++
++ c = nvp->fwnv->data[nvp->pos];
++ if (c == '\n')
++ return COMMENT;
++ if (is_whitespace(c))
++ goto proceed;
++ if (c == '#')
++ return COMMENT;
++ if (is_nvram_char(c)) {
++ nvp->entry = nvp->pos;
++ return KEY;
++ }
++ brcmf_dbg(INFO, "warning: ln=%d:col=%d: ignoring invalid character\n",
++ nvp->line, nvp->column);
++proceed:
++ nvp->column++;
++ nvp->pos++;
++ return IDLE;
++}
++
++static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
++{
++ enum nvram_parser_state st = nvp->state;
++ char c;
++
++ c = nvp->fwnv->data[nvp->pos];
++ if (c == '=') {
++ st = VALUE;
++ } else if (!is_nvram_char(c)) {
++ brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
++ nvp->line, nvp->column);
++ return COMMENT;
++ }
++
++ nvp->column++;
++ nvp->pos++;
++ return st;
++}
++
++static enum nvram_parser_state
++brcmf_nvram_handle_value(struct nvram_parser *nvp)
++{
++ char c;
++ char *skv;
++ char *ekv;
++ u32 cplen;
++
++ c = nvp->fwnv->data[nvp->pos];
++ if (!is_nvram_char(c)) {
++ /* key,value pair complete */
++ ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
++ skv = (u8 *)&nvp->fwnv->data[nvp->entry];
++ cplen = ekv - skv;
++ /* copy to output buffer */
++ memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
++ nvp->nvram_len += cplen;
++ nvp->nvram[nvp->nvram_len] = '\0';
++ nvp->nvram_len++;
++ return IDLE;
++ }
++ nvp->pos++;
++ nvp->column++;
++ return VALUE;
++}
++
++static enum nvram_parser_state
++brcmf_nvram_handle_comment(struct nvram_parser *nvp)
++{
++ char *eol, *sol;
++
++ sol = (char *)&nvp->fwnv->data[nvp->pos];
++ eol = strchr(sol, '\n');
++ if (eol == NULL)
++ return END;
++
++ /* eat all moving to next line */
++ nvp->line++;
++ nvp->column = 1;
++ nvp->pos += (eol - sol) + 1;
++ return IDLE;
++}
++
++static enum nvram_parser_state brcmf_nvram_handle_end(struct nvram_parser *nvp)
++{
++ /* final state */
++ return END;
++}
++
++static enum nvram_parser_state
++(*nv_parser_states[])(struct nvram_parser *nvp) = {
++ brcmf_nvram_handle_idle,
++ brcmf_nvram_handle_key,
++ brcmf_nvram_handle_value,
++ brcmf_nvram_handle_comment,
++ brcmf_nvram_handle_end
++};
++
++static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
++ const struct firmware *nv)
++{
++ memset(nvp, 0, sizeof(*nvp));
++ nvp->fwnv = nv;
++ /* Alloc for extra 0 byte + roundup by 4 + length field */
++ nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
++ if (!nvp->nvram)
++ return -ENOMEM;
++
++ nvp->line = 1;
++ nvp->column = 1;
++ return 0;
++}
++
++/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
++ * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
++ * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
++ * End of buffer is completed with token identifying length of buffer.
++ */
++static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
++{
++ struct nvram_parser nvp;
++ u32 pad;
++ u32 token;
++ __le32 token_le;
++
++ if (brcmf_init_nvram_parser(&nvp, nv) < 0)
++ return NULL;
++
++ while (nvp.pos < nv->size) {
++ nvp.state = nv_parser_states[nvp.state](&nvp);
++ if (nvp.state == END)
++ break;
++ }
++ pad = nvp.nvram_len;
++ *new_length = roundup(nvp.nvram_len + 1, 4);
++ while (pad != *new_length) {
++ nvp.nvram[pad] = 0;
++ pad++;
++ }
++
++ token = *new_length / 4;
++ token = (~token << 16) | (token & 0x0000FFFF);
++ token_le = cpu_to_le32(token);
++
++ memcpy(&nvp.nvram[*new_length], &token_le, sizeof(token_le));
++ *new_length += sizeof(token_le);
++
++ return nvp.nvram;
++}
++
++void brcmf_fw_nvram_free(void *nvram)
++{
++ kfree(nvram);
++}
++
++struct brcmf_fw {
++ struct device *dev;
++ u16 flags;
++ const struct firmware *code;
++ const char *nvram_name;
++ void (*done)(struct device *dev, const struct firmware *fw,
++ void *nvram_image, u32 nvram_len);
++};
++
++static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
++{
++ struct brcmf_fw *fwctx = ctx;
++ u32 nvram_length = 0;
++ void *nvram = NULL;
++
++ brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
++ if (!fw && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
++ goto fail;
++
++ if (fw) {
++ nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
++ release_firmware(fw);
++ if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
++ goto fail;
++ }
++
++ fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
++ kfree(fwctx);
++ return;
++
++fail:
++ brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
++ if (fwctx->code)
++ release_firmware(fwctx->code);
++ device_release_driver(fwctx->dev);
++ kfree(fwctx);
++}
++
++static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
++{
++ struct brcmf_fw *fwctx = ctx;
++ int ret;
++
++ brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
++ if (!fw)
++ goto fail;
++
++ /* only requested code so done here */
++ if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
++ fwctx->done(fwctx->dev, fw, NULL, 0);
++ kfree(fwctx);
++ return;
++ }
++ fwctx->code = fw;
++ ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
++ fwctx->dev, GFP_KERNEL, fwctx,
++ brcmf_fw_request_nvram_done);
++
++ if (!ret)
++ return;
++
++ /* when nvram is optional call .done() callback here */
++ if (fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL) {
++ fwctx->done(fwctx->dev, fw, NULL, 0);
++ kfree(fwctx);
++ return;
++ }
++
++ /* failed nvram request */
++ release_firmware(fw);
++fail:
++ brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
++ device_release_driver(fwctx->dev);
++ kfree(fwctx);
++}
++
++int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
++ const char *code, const char *nvram,
++ void (*fw_cb)(struct device *dev,
++ const struct firmware *fw,
++ void *nvram_image, u32 nvram_len))
++{
++ struct brcmf_fw *fwctx;
++
++ brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
++ if (!fw_cb || !code)
++ return -EINVAL;
++
++ if ((flags & BRCMF_FW_REQUEST_NVRAM) && !nvram)
++ return -EINVAL;
++
++ fwctx = kzalloc(sizeof(*fwctx), GFP_KERNEL);
++ if (!fwctx)
++ return -ENOMEM;
++
++ fwctx->dev = dev;
++ fwctx->flags = flags;
++ fwctx->done = fw_cb;
++ if (flags & BRCMF_FW_REQUEST_NVRAM)
++ fwctx->nvram_name = nvram;
++
++ return request_firmware_nowait(THIS_MODULE, true, code, dev,
++ GFP_KERNEL, fwctx,
++ brcmf_fw_request_code_done);
++}
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/firmware.h linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/firmware.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/firmware.h 2015-05-01 14:58:04.019427001 -0500
+@@ -0,0 +1,36 @@
++/*
++ * Copyright (c) 2013 Broadcom Corporation
++ *
++ * Permission to use, copy, modify, and/or distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++#ifndef BRCMFMAC_FIRMWARE_H
++#define BRCMFMAC_FIRMWARE_H
++
++#define BRCMF_FW_REQUEST 0x000F
++#define BRCMF_FW_REQUEST_NVRAM 0x0001
++#define BRCMF_FW_REQ_FLAGS 0x00F0
++#define BRCMF_FW_REQ_NV_OPTIONAL 0x0010
++
++void brcmf_fw_nvram_free(void *nvram);
++/*
++ * Request firmware(s) asynchronously. When the asynchronous request
++ * fails it will not use the callback, but call device_release_driver()
++ * instead which will call the driver .remove() callback.
++ */
++int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
++ const char *code, const char *nvram,
++ void (*fw_cb)(struct device *dev,
++ const struct firmware *fw,
++ void *nvram_image, u32 nvram_len));
++
++#endif /* BRCMFMAC_FIRMWARE_H */
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/fwil.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/fwil.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/fwil.c 2015-05-01 14:58:04.027427001 -0500
+@@ -54,7 +54,7 @@
+ if (err >= 0)
+ err = 0;
+ else
+- brcmf_err("Failed err=%d\n", err);
++ brcmf_dbg(FIL, "Failed err=%d\n", err);
+
+ return err;
+ }
+@@ -124,7 +124,8 @@
+ }
+
+ static u32
+-brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
++brcmf_create_iovar(char *name, const char *data, u32 datalen,
++ char *buf, u32 buflen)
+ {
+ u32 len;
+
+@@ -144,7 +145,7 @@
+
+
+ s32
+-brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
++brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+ u32 len)
+ {
+ struct brcmf_pub *drvr = ifp->drvr;
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/fwil.h linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/fwil.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/fwil.h 2015-05-01 14:58:04.039427001 -0500
+@@ -83,7 +83,7 @@
+ s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
+ s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
+
+-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
++s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+ u32 len);
+ s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+ u32 len);
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h 2015-05-01 14:58:04.039427001 -0500
+@@ -48,6 +48,19 @@
+
+ #define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
+
++/* OBSS Coex Auto/On/Off */
++#define BRCMF_OBSS_COEX_AUTO (-1)
++#define BRCMF_OBSS_COEX_OFF 0
++#define BRCMF_OBSS_COEX_ON 1
++
++/* join preference types for join_pref iovar */
++enum brcmf_join_pref_types {
++ BRCMF_JOIN_PREF_RSSI = 1,
++ BRCMF_JOIN_PREF_WPA,
++ BRCMF_JOIN_PREF_BAND,
++ BRCMF_JOIN_PREF_RSSI_DELTA,
++};
++
+ enum brcmf_fil_p2p_if_types {
+ BRCMF_FIL_P2P_IF_CLIENT,
+ BRCMF_FIL_P2P_IF_GO,
+@@ -87,6 +100,11 @@
+ __le32 enable;
+ };
+
++struct brcmf_fil_bwcap_le {
++ __le32 band;
++ __le32 bw_cap;
++};
++
+ /**
+ * struct tdls_iovar - common structure for tdls iovars.
+ *
+@@ -272,6 +290,22 @@
+ __le16 chanspec_list[1];
+ };
+
++/**
++ * struct join_pref params - parameters for preferred join selection.
++ *
++ * @type: preference type (see enum brcmf_join_pref_types).
++ * @len: length of bytes following (currently always 2).
++ * @rssi_gain: signal gain for selection (only when @type is RSSI_DELTA).
++ * @band: band to which selection preference applies.
++ * This is used if @type is BAND or RSSI_DELTA.
++ */
++struct brcmf_join_pref_params {
++ u8 type;
++ u8 len;
++ u8 rssi_gain;
++ u8 band;
++};
++
+ /* used for join with or without a specific bssid and channel list */
+ struct brcmf_join_params {
+ struct brcmf_ssid_le ssid_le;
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c 2015-05-01 14:58:04.039427001 -0500
+@@ -476,6 +476,7 @@
+ bool bus_flow_blocked;
+ bool creditmap_received;
+ u8 mode;
++ bool avoid_queueing;
+ };
+
+ /*
+@@ -1369,13 +1370,12 @@
+ }
+
+ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
+- struct sk_buff *skb, u32 genbit,
+- u16 seq)
++ struct sk_buff *skb, u8 ifidx,
++ u32 genbit, u16 seq)
+ {
+ struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
+ u32 hslot;
+ int ret;
+- u8 ifidx;
+
+ hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+
+@@ -1389,29 +1389,21 @@
+
+ entry->generation = genbit;
+
+- ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
+- if (ret == 0) {
+- brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
+- brcmf_skbcb(skb)->htod_seq = seq;
+- if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
+- brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
+- brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
+- } else {
+- brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
+- }
+- ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
+- skb);
++ brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
++ brcmf_skbcb(skb)->htod_seq = seq;
++ if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
++ brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
++ brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
++ } else {
++ brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
+ }
++ ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
+
+ if (ret != 0) {
+- /* suppress q is full or hdrpull failed, drop this packet */
+- brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
+- true);
++ /* suppress q is full drop this packet */
++ brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true);
+ } else {
+- /*
+- * Mark suppressed to avoid a double free during
+- * wlfc cleanup
+- */
++ /* Mark suppressed to avoid a double free during wlfc cleanup */
+ brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot);
+ }
+
+@@ -1428,6 +1420,7 @@
+ struct sk_buff *skb;
+ struct brcmf_skbuff_cb *skcb;
+ struct brcmf_fws_mac_descriptor *entry = NULL;
++ u8 ifidx;
+
+ brcmf_dbg(DATA, "flags %d\n", flags);
+
+@@ -1476,12 +1469,15 @@
+ }
+ brcmf_fws_macdesc_return_req_credit(skb);
+
++ if (brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb)) {
++ brcmu_pkt_buf_free_skb(skb);
++ return -EINVAL;
++ }
+ if (!remove_from_hanger)
+- ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit,
+- seq);
+-
++ ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, ifidx,
++ genbit, seq);
+ if (remove_from_hanger || ret)
+- brcmf_txfinalize(fws->drvr, skb, true);
++ brcmf_txfinalize(fws->drvr, skb, ifidx, true);
+
+ return 0;
+ }
+@@ -1868,7 +1864,7 @@
+ struct ethhdr *eh = (struct ethhdr *)(skb->data);
+ int fifo = BRCMF_FWS_FIFO_BCMC;
+ bool multicast = is_multicast_ether_addr(eh->h_dest);
+- bool pae = eh->h_proto == htons(ETH_P_PAE);
++ int rc = 0;
+
+ brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
+ /* determine the priority */
+@@ -1876,8 +1872,13 @@
+ skb->priority = cfg80211_classify8021d(skb, NULL);
+
+ drvr->tx_multicast += !!multicast;
+- if (pae)
+- atomic_inc(&ifp->pend_8021x_cnt);
++
++ if (fws->avoid_queueing) {
++ rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
++ if (rc < 0)
++ brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
++ return rc;
++ }
+
+ /* set control buffer information */
+ skcb->if_flags = 0;
+@@ -1899,15 +1900,12 @@
+ brcmf_fws_schedule_deq(fws);
+ } else {
+ brcmf_err("drop skb: no hanger slot\n");
+- if (pae) {
+- atomic_dec(&ifp->pend_8021x_cnt);
+- if (waitqueue_active(&ifp->pend_8021x_wait))
+- wake_up(&ifp->pend_8021x_wait);
+- }
+- brcmu_pkt_buf_free_skb(skb);
++ brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
++ rc = -ENOMEM;
+ }
+ brcmf_fws_unlock(fws);
+- return 0;
++
++ return rc;
+ }
+
+ void brcmf_fws_reset_interface(struct brcmf_if *ifp)
+@@ -1982,7 +1980,8 @@
+ ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
+ brcmf_fws_lock(fws);
+ if (ret < 0)
+- brcmf_txfinalize(drvr, skb, false);
++ brcmf_txfinalize(drvr, skb, ifidx,
++ false);
+ if (fws->bus_flow_blocked)
+ break;
+ }
+@@ -2039,6 +2038,13 @@
+ fws->drvr = drvr;
+ fws->fcmode = fcmode;
+
++ if ((drvr->bus_if->always_use_fws_queue == false) &&
++ (fcmode == BRCMF_FWS_FCMODE_NONE)) {
++ fws->avoid_queueing = true;
++ brcmf_dbg(INFO, "FWS queueing will be avoided\n");
++ return 0;
++ }
++
+ fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
+ if (fws->fws_wq == NULL) {
+ brcmf_err("workqueue creation failed\n");
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/Makefile linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/Makefile 2015-05-01 14:58:04.043427001 -0500
+@@ -24,6 +24,7 @@
+ obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
+ brcmfmac-objs += \
+ wl_cfg80211.o \
++ chip.o \
+ fwil.o \
+ fweh.o \
+ fwsignal.o \
+@@ -32,12 +33,11 @@
+ bcdc.o \
+ dhd_common.o \
+ dhd_linux.o \
+- nvram.o \
++ firmware.o \
+ btcoex.o
+ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
+ dhd_sdio.o \
+- bcmsdh.o \
+- sdio_chip.o
++ bcmsdh.o
+ brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
+ usb.o
+ brcmfmac-$(CONFIG_BRCMDBG) += \
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/nvram.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/nvram.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/nvram.c 1969-12-31 18:00:00.000000000 -0600
+@@ -1,94 +0,0 @@
+-/*
+- * Copyright (c) 2013 Broadcom Corporation
+- *
+- * Permission to use, copy, modify, and/or distribute this software for any
+- * purpose with or without fee is hereby granted, provided that the above
+- * copyright notice and this permission notice appear in all copies.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/slab.h>
+-#include <linux/firmware.h>
+-
+-#include "nvram.h"
+-
+-/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a file
+- * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
+- * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
+- * End of buffer is completed with token identifying length of buffer.
+- */
+-void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length)
+-{
+- u8 *nvram;
+- u32 i;
+- u32 len;
+- u32 column;
+- u8 val;
+- bool comment;
+- u32 token;
+- __le32 token_le;
+-
+- /* Alloc for extra 0 byte + roundup by 4 + length field */
+- nvram = kmalloc(nv->size + 1 + 3 + sizeof(token_le), GFP_KERNEL);
+- if (!nvram)
+- return NULL;
+-
+- len = 0;
+- column = 0;
+- comment = false;
+- for (i = 0; i < nv->size; i++) {
+- val = nv->data[i];
+- if (val == 0)
+- break;
+- if (val == '\r')
+- continue;
+- if (comment && (val != '\n'))
+- continue;
+- comment = false;
+- if (val == '#') {
+- comment = true;
+- continue;
+- }
+- if (val == '\n') {
+- if (column == 0)
+- continue;
+- nvram[len] = 0;
+- len++;
+- column = 0;
+- continue;
+- }
+- nvram[len] = val;
+- len++;
+- column++;
+- }
+- column = len;
+- *new_length = roundup(len + 1, 4);
+- while (column != *new_length) {
+- nvram[column] = 0;
+- column++;
+- }
+-
+- token = *new_length / 4;
+- token = (~token << 16) | (token & 0x0000FFFF);
+- token_le = cpu_to_le32(token);
+-
+- memcpy(&nvram[*new_length], &token_le, sizeof(token_le));
+- *new_length += sizeof(token_le);
+-
+- return nvram;
+-}
+-
+-void brcmf_nvram_free(void *nvram)
+-{
+- kfree(nvram);
+-}
+-
+-
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/nvram.h linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/nvram.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/nvram.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/nvram.h 1969-12-31 18:00:00.000000000 -0600
+@@ -1,24 +0,0 @@
+-/*
+- * Copyright (c) 2013 Broadcom Corporation
+- *
+- * Permission to use, copy, modify, and/or distribute this software for any
+- * purpose with or without fee is hereby granted, provided that the above
+- * copyright notice and this permission notice appear in all copies.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+- */
+-#ifndef BRCMFMAC_NVRAM_H
+-#define BRCMFMAC_NVRAM_H
+-
+-
+-void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length);
+-void brcmf_nvram_free(void *nvram);
+-
+-
+-#endif /* BRCMFMAC_NVRAM_H */
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/p2p.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/p2p.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/p2p.c 2015-05-01 14:58:04.047427001 -0500
+@@ -797,7 +797,8 @@
+ /* SOCIAL CHANNELS 1, 6, 11 */
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ brcmf_dbg(INFO, "P2P SEARCH PHASE START\n");
+- } else if (dev != NULL && vif->mode == WL_MODE_AP) {
++ } else if (dev != NULL &&
++ vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
+ /* If you are already a GO, then do SEARCH only */
+ brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n");
+ search_state = WL_P2P_DISC_ST_SEARCH;
+@@ -2256,7 +2257,6 @@
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct brcmf_cfg80211_vif *vif;
+ enum brcmf_fil_p2p_if_types iftype;
+- enum wl_mode mode;
+ int err;
+
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+@@ -2267,11 +2267,9 @@
+ switch (type) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ iftype = BRCMF_FIL_P2P_IF_CLIENT;
+- mode = WL_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ iftype = BRCMF_FIL_P2P_IF_GO;
+- mode = WL_MODE_AP;
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return brcmf_p2p_create_p2pdev(&cfg->p2p, wiphy,
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c 1969-12-31 18:00:00.000000000 -0600
+@@ -1,973 +0,0 @@
+-/*
+- * Copyright (c) 2011 Broadcom Corporation
+- *
+- * Permission to use, copy, modify, and/or distribute this software for any
+- * purpose with or without fee is hereby granted, provided that the above
+- * copyright notice and this permission notice appear in all copies.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+- */
+-/* ***** SDIO interface chip backplane handle functions ***** */
+-
+-#include <linux/types.h>
+-#include <linux/netdevice.h>
+-#include <linux/mmc/card.h>
+-#include <linux/mmc/sdio_func.h>
+-#include <linux/mmc/sdio_ids.h>
+-#include <linux/ssb/ssb_regs.h>
+-#include <linux/bcma/bcma.h>
+-
+-#include <chipcommon.h>
+-#include <brcm_hw_ids.h>
+-#include <brcmu_wifi.h>
+-#include <brcmu_utils.h>
+-#include <soc.h>
+-#include "dhd_dbg.h"
+-#include "sdio_host.h"
+-#include "sdio_chip.h"
+-
+-/* chip core base & ramsize */
+-/* bcm4329 */
+-/* SDIO device core, ID 0x829 */
+-#define BCM4329_CORE_BUS_BASE 0x18011000
+-/* internal memory core, ID 0x80e */
+-#define BCM4329_CORE_SOCRAM_BASE 0x18003000
+-/* ARM Cortex M3 core, ID 0x82a */
+-#define BCM4329_CORE_ARM_BASE 0x18002000
+-#define BCM4329_RAMSIZE 0x48000
+-
+-/* bcm43143 */
+-/* SDIO device core */
+-#define BCM43143_CORE_BUS_BASE 0x18002000
+-/* internal memory core */
+-#define BCM43143_CORE_SOCRAM_BASE 0x18004000
+-/* ARM Cortex M3 core, ID 0x82a */
+-#define BCM43143_CORE_ARM_BASE 0x18003000
+-#define BCM43143_RAMSIZE 0x70000
+-
+-/* All D11 cores, ID 0x812 */
+-#define BCM43xx_CORE_D11_BASE 0x18001000
+-
+-#define SBCOREREV(sbidh) \
+- ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+- ((sbidh) & SSB_IDHIGH_RCLO))
+-
+-/* SOC Interconnect types (aka chip types) */
+-#define SOCI_SB 0
+-#define SOCI_AI 1
+-
+-/* EROM CompIdentB */
+-#define CIB_REV_MASK 0xff000000
+-#define CIB_REV_SHIFT 24
+-
+-/* ARM CR4 core specific control flag bits */
+-#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
+-
+-/* D11 core specific control flag bits */
+-#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
+-#define D11_BCMA_IOCTL_PHYRESET 0x0008
+-
+-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+-/* SDIO Pad drive strength to select value mappings */
+-struct sdiod_drive_str {
+- u8 strength; /* Pad Drive Strength in mA */
+- u8 sel; /* Chip-specific select value */
+-};
+-/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+-static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+- {32, 0x6},
+- {26, 0x7},
+- {22, 0x4},
+- {16, 0x5},
+- {12, 0x2},
+- {8, 0x3},
+- {4, 0x0},
+- {0, 0x1}
+-};
+-
+-/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+-static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
+- {6, 0x7},
+- {5, 0x6},
+- {4, 0x5},
+- {3, 0x4},
+- {2, 0x2},
+- {1, 0x1},
+- {0, 0x0}
+-};
+-
+-/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+-static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
+- {3, 0x3},
+- {2, 0x2},
+- {1, 0x1},
+- {0, 0x0} };
+-
+-/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
+-static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
+- {16, 0x7},
+- {12, 0x5},
+- {8, 0x3},
+- {4, 0x1}
+-};
+-
+-u8
+-brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid)
+-{
+- u8 idx;
+-
+- for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
+- if (coreid == ci->c_inf[idx].id)
+- return idx;
+-
+- return BRCMF_MAX_CORENUM;
+-}
+-
+-static u32
+-brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid)
+-{
+- u32 regdata;
+- u8 idx;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+-
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbidhigh),
+- NULL);
+- return SBCOREREV(regdata);
+-}
+-
+-static u32
+-brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid)
+-{
+- u8 idx;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+-
+- return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+-}
+-
+-static bool
+-brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid)
+-{
+- u32 regdata;
+- u8 idx;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+- if (idx == BRCMF_MAX_CORENUM)
+- return false;
+-
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- NULL);
+- regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+- SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+- return (SSB_TMSLOW_CLOCK == regdata);
+-}
+-
+-static bool
+-brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid)
+-{
+- u32 regdata;
+- u8 idx;
+- bool ret;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+- if (idx == BRCMF_MAX_CORENUM)
+- return false;
+-
+- regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+- NULL);
+- ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+-
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+- NULL);
+- ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+-
+- return ret;
+-}
+-
+-static void
+-brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+- u32 in_resetbits)
+-{
+- u32 regdata, base;
+- u8 idx;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+- base = ci->c_inf[idx].base;
+-
+- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
+- if (regdata & SSB_TMSLOW_RESET)
+- return;
+-
+- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
+- if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
+- /*
+- * set target reject and spin until busy is clear
+- * (preserve core-specific bits)
+- */
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbtmstatelow), NULL);
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+- regdata | SSB_TMSLOW_REJECT, NULL);
+-
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbtmstatelow), NULL);
+- udelay(1);
+- SPINWAIT((brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbtmstatehigh),
+- NULL) &
+- SSB_TMSHIGH_BUSY), 100000);
+-
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbtmstatehigh),
+- NULL);
+- if (regdata & SSB_TMSHIGH_BUSY)
+- brcmf_err("core state still busy\n");
+-
+- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
+- NULL);
+- if (regdata & SSB_IDLOW_INITIATOR) {
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbimstate),
+- NULL);
+- regdata |= SSB_IMSTATE_REJECT;
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
+- regdata, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbimstate),
+- NULL);
+- udelay(1);
+- SPINWAIT((brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbimstate),
+- NULL) &
+- SSB_IMSTATE_BUSY), 100000);
+- }
+-
+- /* set reset and reject while enabling the clocks */
+- regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+- SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+- regdata, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbtmstatelow), NULL);
+- udelay(10);
+-
+- /* clear the initiator reject bit */
+- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
+- NULL);
+- if (regdata & SSB_IDLOW_INITIATOR) {
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(base, sbimstate),
+- NULL);
+- regdata &= ~SSB_IMSTATE_REJECT;
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
+- regdata, NULL);
+- }
+- }
+-
+- /* leave reset and reject asserted */
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+- (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
+- udelay(1);
+-}
+-
+-static void
+-brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+- u32 in_resetbits)
+-{
+- u8 idx;
+- u32 regdata;
+- u32 wrapbase;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+- if (idx == BRCMF_MAX_CORENUM)
+- return;
+-
+- wrapbase = ci->c_inf[idx].wrapbase;
+-
+- /* if core is already in reset, skip reset */
+- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
+- if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+- goto post_reset_config;
+-
+- /* configure reset */
+- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
+- BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
+-
+- /* put in reset */
+- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL,
+- BCMA_RESET_CTL_RESET, NULL);
+- usleep_range(10, 20);
+-
+- /* wait till reset is 1 */
+- SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
+- BCMA_RESET_CTL_RESET, 300);
+-
+-post_reset_config:
+- /* post reset configure */
+- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
+- BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
+-}
+-
+-static void
+-brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+- u32 in_resetbits, u32 post_resetbits)
+-{
+- u32 regdata;
+- u8 idx;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+- if (idx == BRCMF_MAX_CORENUM)
+- return;
+-
+- /*
+- * Must do the disable sequence first to work for
+- * arbitrary current core state.
+- */
+- brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, pre_resetbits,
+- in_resetbits);
+-
+- /*
+- * Now do the initialization sequence.
+- * set reset while enabling the clock and
+- * forcing them on throughout the core
+- */
+- brcmf_sdiod_regwl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
+- NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- NULL);
+- udelay(1);
+-
+- /* clear any serror */
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+- NULL);
+- if (regdata & SSB_TMSHIGH_SERR)
+- brcmf_sdiod_regwl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+- 0, NULL);
+-
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbimstate),
+- NULL);
+- if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
+- brcmf_sdiod_regwl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbimstate),
+- regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
+- NULL);
+-
+- /* clear reset and allow it to propagate throughout the core */
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- NULL);
+- udelay(1);
+-
+- /* leave clock enabled */
+- brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- SSB_TMSLOW_CLOCK, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+- NULL);
+- udelay(1);
+-}
+-
+-static void
+-brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+- u32 in_resetbits, u32 post_resetbits)
+-{
+- u8 idx;
+- u32 regdata;
+- u32 wrapbase;
+-
+- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+- if (idx == BRCMF_MAX_CORENUM)
+- return;
+-
+- wrapbase = ci->c_inf[idx].wrapbase;
+-
+- /* must disable first to work for arbitrary current core state */
+- brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, pre_resetbits,
+- in_resetbits);
+-
+- while (brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) &
+- BCMA_RESET_CTL_RESET) {
+- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL, 0, NULL);
+- usleep_range(40, 60);
+- }
+-
+- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, post_resetbits |
+- BCMA_IOCTL_CLK, NULL);
+- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
+-}
+-
+-#ifdef DEBUG
+-/* safety check for chipinfo */
+-static int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
+-{
+- u8 core_idx;
+-
+- /* check RAM core presence for ARM CM3 core */
+- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
+- if (BRCMF_MAX_CORENUM != core_idx) {
+- core_idx = brcmf_sdio_chip_getinfidx(ci,
+- BCMA_CORE_INTERNAL_MEM);
+- if (BRCMF_MAX_CORENUM == core_idx) {
+- brcmf_err("RAM core not provided with ARM CM3 core\n");
+- return -ENODEV;
+- }
+- }
+-
+- /* check RAM base for ARM CR4 core */
+- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
+- if (BRCMF_MAX_CORENUM != core_idx) {
+- if (ci->rambase == 0) {
+- brcmf_err("RAM base not provided with ARM CR4 core\n");
+- return -ENOMEM;
+- }
+- }
+-
+- return 0;
+-}
+-#else /* DEBUG */
+-static inline int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
+-{
+- return 0;
+-}
+-#endif
+-
+-static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci)
+-{
+- u32 regdata;
+- u32 socitype;
+-
+- /* Get CC core rev
+- * Chipid is assume to be at offset 0 from SI_ENUM_BASE
+- * For different chiptypes or old sdio hosts w/o chipcommon,
+- * other ways of recognition should be added here.
+- */
+- regdata = brcmf_sdiod_regrl(sdiodev,
+- CORE_CC_REG(SI_ENUM_BASE, chipid),
+- NULL);
+- ci->chip = regdata & CID_ID_MASK;
+- ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+- if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+- ci->chiprev >= 2)
+- ci->chip = BCM4339_CHIP_ID;
+- socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+-
+- brcmf_dbg(INFO, "found %s chip: id=0x%x, rev=%d\n",
+- socitype == SOCI_SB ? "SB" : "AXI", ci->chip, ci->chiprev);
+-
+- if (socitype == SOCI_SB) {
+- if (ci->chip != BCM4329_CHIP_ID) {
+- brcmf_err("SB chip is not supported\n");
+- return -ENODEV;
+- }
+- ci->iscoreup = brcmf_sdio_sb_iscoreup;
+- ci->corerev = brcmf_sdio_sb_corerev;
+- ci->coredisable = brcmf_sdio_sb_coredisable;
+- ci->resetcore = brcmf_sdio_sb_resetcore;
+-
+- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+- ci->c_inf[0].base = SI_ENUM_BASE;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
+- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+- ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
+- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+- ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
+- ci->c_inf[4].id = BCMA_CORE_80211;
+- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+- ci->ramsize = BCM4329_RAMSIZE;
+- } else if (socitype == SOCI_AI) {
+- ci->iscoreup = brcmf_sdio_ai_iscoreup;
+- ci->corerev = brcmf_sdio_ai_corerev;
+- ci->coredisable = brcmf_sdio_ai_coredisable;
+- ci->resetcore = brcmf_sdio_ai_resetcore;
+-
+- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+- ci->c_inf[0].base = SI_ENUM_BASE;
+-
+- /* Address of cores for new chips should be added here */
+- switch (ci->chip) {
+- case BCM43143_CHIP_ID:
+- ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
+- ci->c_inf[0].cib = 0x2b000000;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
+- ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
+- ci->c_inf[1].cib = 0x18000000;
+- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+- ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
+- ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
+- ci->c_inf[2].cib = 0x14000000;
+- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+- ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
+- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+- ci->c_inf[3].cib = 0x07000000;
+- ci->c_inf[4].id = BCMA_CORE_80211;
+- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+- ci->ramsize = BCM43143_RAMSIZE;
+- break;
+- case BCM43241_CHIP_ID:
+- ci->c_inf[0].wrapbase = 0x18100000;
+- ci->c_inf[0].cib = 0x2a084411;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = 0x18002000;
+- ci->c_inf[1].wrapbase = 0x18102000;
+- ci->c_inf[1].cib = 0x0e004211;
+- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+- ci->c_inf[2].base = 0x18004000;
+- ci->c_inf[2].wrapbase = 0x18104000;
+- ci->c_inf[2].cib = 0x14080401;
+- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+- ci->c_inf[3].base = 0x18003000;
+- ci->c_inf[3].wrapbase = 0x18103000;
+- ci->c_inf[3].cib = 0x07004211;
+- ci->c_inf[4].id = BCMA_CORE_80211;
+- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+- ci->ramsize = 0x90000;
+- break;
+- case BCM4330_CHIP_ID:
+- ci->c_inf[0].wrapbase = 0x18100000;
+- ci->c_inf[0].cib = 0x27004211;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = 0x18002000;
+- ci->c_inf[1].wrapbase = 0x18102000;
+- ci->c_inf[1].cib = 0x07004211;
+- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+- ci->c_inf[2].base = 0x18004000;
+- ci->c_inf[2].wrapbase = 0x18104000;
+- ci->c_inf[2].cib = 0x0d080401;
+- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+- ci->c_inf[3].base = 0x18003000;
+- ci->c_inf[3].wrapbase = 0x18103000;
+- ci->c_inf[3].cib = 0x03004211;
+- ci->c_inf[4].id = BCMA_CORE_80211;
+- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+- ci->ramsize = 0x48000;
+- break;
+- case BCM4334_CHIP_ID:
+- ci->c_inf[0].wrapbase = 0x18100000;
+- ci->c_inf[0].cib = 0x29004211;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = 0x18002000;
+- ci->c_inf[1].wrapbase = 0x18102000;
+- ci->c_inf[1].cib = 0x0d004211;
+- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+- ci->c_inf[2].base = 0x18004000;
+- ci->c_inf[2].wrapbase = 0x18104000;
+- ci->c_inf[2].cib = 0x13080401;
+- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+- ci->c_inf[3].base = 0x18003000;
+- ci->c_inf[3].wrapbase = 0x18103000;
+- ci->c_inf[3].cib = 0x07004211;
+- ci->c_inf[4].id = BCMA_CORE_80211;
+- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+- ci->ramsize = 0x80000;
+- break;
+- case BCM4335_CHIP_ID:
+- ci->c_inf[0].wrapbase = 0x18100000;
+- ci->c_inf[0].cib = 0x2b084411;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = 0x18005000;
+- ci->c_inf[1].wrapbase = 0x18105000;
+- ci->c_inf[1].cib = 0x0f004211;
+- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+- ci->c_inf[2].base = 0x18002000;
+- ci->c_inf[2].wrapbase = 0x18102000;
+- ci->c_inf[2].cib = 0x01084411;
+- ci->c_inf[3].id = BCMA_CORE_80211;
+- ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+- ci->ramsize = 0xc0000;
+- ci->rambase = 0x180000;
+- break;
+- case BCM43362_CHIP_ID:
+- ci->c_inf[0].wrapbase = 0x18100000;
+- ci->c_inf[0].cib = 0x27004211;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = 0x18002000;
+- ci->c_inf[1].wrapbase = 0x18102000;
+- ci->c_inf[1].cib = 0x0a004211;
+- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+- ci->c_inf[2].base = 0x18004000;
+- ci->c_inf[2].wrapbase = 0x18104000;
+- ci->c_inf[2].cib = 0x08080401;
+- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+- ci->c_inf[3].base = 0x18003000;
+- ci->c_inf[3].wrapbase = 0x18103000;
+- ci->c_inf[3].cib = 0x03004211;
+- ci->c_inf[4].id = BCMA_CORE_80211;
+- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+- ci->ramsize = 0x3C000;
+- break;
+- case BCM4339_CHIP_ID:
+- ci->c_inf[0].wrapbase = 0x18100000;
+- ci->c_inf[0].cib = 0x2e084411;
+- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+- ci->c_inf[1].base = 0x18005000;
+- ci->c_inf[1].wrapbase = 0x18105000;
+- ci->c_inf[1].cib = 0x15004211;
+- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+- ci->c_inf[2].base = 0x18002000;
+- ci->c_inf[2].wrapbase = 0x18102000;
+- ci->c_inf[2].cib = 0x04084411;
+- ci->c_inf[3].id = BCMA_CORE_80211;
+- ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
+- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+- ci->ramsize = 0xc0000;
+- ci->rambase = 0x180000;
+- break;
+- default:
+- brcmf_err("AXI chip is not supported\n");
+- return -ENODEV;
+- }
+- } else {
+- brcmf_err("chip backplane type %u is not supported\n",
+- socitype);
+- return -ENODEV;
+- }
+-
+- return brcmf_sdio_chip_cichk(ci);
+-}
+-
+-static int
+-brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
+-{
+- int err = 0;
+- u8 clkval, clkset;
+-
+- /* Try forcing SDIO core to do ALPAvail request only */
+- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+- if (err) {
+- brcmf_err("error writing for HT off\n");
+- return err;
+- }
+-
+- /* If register supported, wait for ALPAvail and then force ALP */
+- /* This may take up to 15 milliseconds */
+- clkval = brcmf_sdiod_regrb(sdiodev,
+- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+-
+- if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+- brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+- clkset, clkval);
+- return -EACCES;
+- }
+-
+- SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
+- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+- !SBSDIO_ALPAV(clkval)),
+- PMU_MAX_TRANSITION_DLY);
+- if (!SBSDIO_ALPAV(clkval)) {
+- brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
+- clkval);
+- return -EBUSY;
+- }
+-
+- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+- udelay(65);
+-
+- /* Also, disable the extra SDIO pull-ups */
+- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+-
+- return 0;
+-}
+-
+-static void
+-brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci)
+-{
+- u32 base = ci->c_inf[0].base;
+-
+- /* get chipcommon rev */
+- ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
+-
+- /* get chipcommon capabilites */
+- ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
+- CORE_CC_REG(base, capabilities),
+- NULL);
+-
+- /* get pmu caps & rev */
+- if (ci->c_inf[0].caps & CC_CAP_PMU) {
+- ci->pmucaps =
+- brcmf_sdiod_regrl(sdiodev,
+- CORE_CC_REG(base, pmucapabilities),
+- NULL);
+- ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
+- }
+-
+- ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
+-
+- brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
+- ci->c_inf[0].rev, ci->pmurev,
+- ci->c_inf[1].rev, ci->c_inf[1].id);
+-
+- /*
+- * Make sure any on-chip ARM is off (in case strapping is wrong),
+- * or downloaded code was already running.
+- */
+- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
+-}
+-
+-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip **ci_ptr)
+-{
+- int ret;
+- struct brcmf_chip *ci;
+-
+- brcmf_dbg(TRACE, "Enter\n");
+-
+- ci = kzalloc(sizeof(*ci), GFP_ATOMIC);
+- if (!ci)
+- return -ENOMEM;
+-
+- ret = brcmf_sdio_chip_buscoreprep(sdiodev);
+- if (ret != 0)
+- goto err;
+-
+- ret = brcmf_sdio_chip_recognition(sdiodev, ci);
+- if (ret != 0)
+- goto err;
+-
+- brcmf_sdio_chip_buscoresetup(sdiodev, ci);
+-
+- brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
+- 0, NULL);
+- brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
+- 0, NULL);
+-
+- *ci_ptr = ci;
+- return 0;
+-
+-err:
+- kfree(ci);
+- return ret;
+-}
+-
+-void
+-brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr)
+-{
+- brcmf_dbg(TRACE, "Enter\n");
+-
+- kfree(*ci_ptr);
+- *ci_ptr = NULL;
+-}
+-
+-static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
+-{
+- const char *fmt;
+-
+- fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+- snprintf(buf, len, fmt, chipid);
+- return buf;
+-}
+-
+-void
+-brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u32 drivestrength)
+-{
+- const struct sdiod_drive_str *str_tab = NULL;
+- u32 str_mask;
+- u32 str_shift;
+- char chn[8];
+- u32 base = ci->c_inf[0].base;
+- u32 i;
+- u32 drivestrength_sel = 0;
+- u32 cc_data_temp;
+- u32 addr;
+-
+- if (!(ci->c_inf[0].caps & CC_CAP_PMU))
+- return;
+-
+- switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+- case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+- str_tab = sdiod_drvstr_tab1_1v8;
+- str_mask = 0x00003800;
+- str_shift = 11;
+- break;
+- case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+- str_tab = sdiod_drvstr_tab6_1v8;
+- str_mask = 0x00001800;
+- str_shift = 11;
+- break;
+- case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+- /* note: 43143 does not support tristate */
+- i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
+- if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
+- str_tab = sdiod_drvstr_tab2_3v3;
+- str_mask = 0x00000007;
+- str_shift = 0;
+- } else
+- brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
+- brcmf_sdio_chip_name(ci->chip, chn, 8),
+- drivestrength);
+- break;
+- case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+- str_tab = sdiod_drive_strength_tab5_1v8;
+- str_mask = 0x00003800;
+- str_shift = 11;
+- break;
+- default:
+- brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+- brcmf_sdio_chip_name(ci->chip, chn, 8),
+- ci->chiprev, ci->pmurev);
+- break;
+- }
+-
+- if (str_tab != NULL) {
+- for (i = 0; str_tab[i].strength != 0; i++) {
+- if (drivestrength >= str_tab[i].strength) {
+- drivestrength_sel = str_tab[i].sel;
+- break;
+- }
+- }
+- addr = CORE_CC_REG(base, chipcontrol_addr);
+- brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
+- cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+- cc_data_temp &= ~str_mask;
+- drivestrength_sel <<= str_shift;
+- cc_data_temp |= drivestrength_sel;
+- brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+-
+- brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
+- str_tab[i].strength, drivestrength, cc_data_temp);
+- }
+-}
+-
+-static void
+-brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci)
+-{
+- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
+- ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
+- D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
+- D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
+- ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0, 0, 0);
+-}
+-
+-static bool brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci)
+-{
+- u8 core_idx;
+- u32 reg_addr;
+-
+- if (!ci->iscoreup(sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
+- brcmf_err("SOCRAM core is down after reset?\n");
+- return false;
+- }
+-
+- /* clear all interrupts */
+- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
+- reg_addr = ci->c_inf[core_idx].base;
+- reg_addr += offsetof(struct sdpcmd_regs, intstatus);
+- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+-
+- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0, 0);
+-
+- return true;
+-}
+-
+-static inline void
+-brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci)
+-{
+- u8 idx;
+- u32 regdata;
+- u32 wrapbase;
+- idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
+-
+- if (idx == BRCMF_MAX_CORENUM)
+- return;
+-
+- wrapbase = ci->c_inf[idx].wrapbase;
+- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
+- regdata &= ARMCR4_BCMA_IOCTL_CPUHALT;
+- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, regdata,
+- ARMCR4_BCMA_IOCTL_CPUHALT, ARMCR4_BCMA_IOCTL_CPUHALT);
+- ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
+- D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
+- D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
+-}
+-
+-static bool brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u32 rstvec)
+-{
+- u8 core_idx;
+- u32 reg_addr;
+-
+- /* clear all interrupts */
+- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
+- reg_addr = ci->c_inf[core_idx].base;
+- reg_addr += offsetof(struct sdpcmd_regs, intstatus);
+- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+-
+- /* Write reset vector to address 0 */
+- brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
+- sizeof(rstvec));
+-
+- /* restore ARM */
+- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, ARMCR4_BCMA_IOCTL_CPUHALT,
+- 0, 0);
+-
+- return true;
+-}
+-
+-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci)
+-{
+- u8 arm_core_idx;
+-
+- arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
+- if (BRCMF_MAX_CORENUM != arm_core_idx) {
+- brcmf_sdio_chip_cm3_enterdl(sdiodev, ci);
+- return;
+- }
+-
+- brcmf_sdio_chip_cr4_enterdl(sdiodev, ci);
+-}
+-
+-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u32 rstvec)
+-{
+- u8 arm_core_idx;
+-
+- arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
+- if (BRCMF_MAX_CORENUM != arm_core_idx)
+- return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci);
+-
+- return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, rstvec);
+-}
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h 1969-12-31 18:00:00.000000000 -0600
+@@ -1,231 +0,0 @@
+-/*
+- * Copyright (c) 2011 Broadcom Corporation
+- *
+- * Permission to use, copy, modify, and/or distribute this software for any
+- * purpose with or without fee is hereby granted, provided that the above
+- * copyright notice and this permission notice appear in all copies.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+- */
+-
+-#ifndef _BRCMFMAC_SDIO_CHIP_H_
+-#define _BRCMFMAC_SDIO_CHIP_H_
+-
+-/*
+- * Core reg address translation.
+- * Both macro's returns a 32 bits byte address on the backplane bus.
+- */
+-#define CORE_CC_REG(base, field) \
+- (base + offsetof(struct chipcregs, field))
+-#define CORE_BUS_REG(base, field) \
+- (base + offsetof(struct sdpcmd_regs, field))
+-#define CORE_SB(base, field) \
+- (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+-
+-/* SDIO function 1 register CHIPCLKCSR */
+-/* Force ALP request to backplane */
+-#define SBSDIO_FORCE_ALP 0x01
+-/* Force HT request to backplane */
+-#define SBSDIO_FORCE_HT 0x02
+-/* Force ILP request to backplane */
+-#define SBSDIO_FORCE_ILP 0x04
+-/* Make ALP ready (power up xtal) */
+-#define SBSDIO_ALP_AVAIL_REQ 0x08
+-/* Make HT ready (power up PLL) */
+-#define SBSDIO_HT_AVAIL_REQ 0x10
+-/* Squelch clock requests from HW */
+-#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
+-/* Status: ALP is ready */
+-#define SBSDIO_ALP_AVAIL 0x40
+-/* Status: HT is ready */
+-#define SBSDIO_HT_AVAIL 0x80
+-#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+-#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+-#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+-#define SBSDIO_CLKAV(regval, alponly) \
+- (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+-
+-#define BRCMF_MAX_CORENUM 6
+-
+-struct brcmf_core {
+- u16 id;
+- u16 rev;
+- u32 base;
+- u32 wrapbase;
+- u32 caps;
+- u32 cib;
+-};
+-
+-struct brcmf_chip {
+- u32 chip;
+- u32 chiprev;
+- /* core info */
+- /* always put chipcommon core at 0, bus core at 1 */
+- struct brcmf_core c_inf[BRCMF_MAX_CORENUM];
+- u32 pmurev;
+- u32 pmucaps;
+- u32 ramsize;
+- u32 rambase;
+- u32 rst_vec; /* reset vertor for ARM CR4 core */
+-
+- bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
+- u16 coreid);
+- u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
+- u16 coreid);
+- void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+- u32 in_resetbits);
+- void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+- u32 in_resetbits, u32 post_resetbits);
+-};
+-
+-struct sbconfig {
+- u32 PAD[2];
+- u32 sbipsflag; /* initiator port ocp slave flag */
+- u32 PAD[3];
+- u32 sbtpsflag; /* target port ocp slave flag */
+- u32 PAD[11];
+- u32 sbtmerrloga; /* (sonics >= 2.3) */
+- u32 PAD;
+- u32 sbtmerrlog; /* (sonics >= 2.3) */
+- u32 PAD[3];
+- u32 sbadmatch3; /* address match3 */
+- u32 PAD;
+- u32 sbadmatch2; /* address match2 */
+- u32 PAD;
+- u32 sbadmatch1; /* address match1 */
+- u32 PAD[7];
+- u32 sbimstate; /* initiator agent state */
+- u32 sbintvec; /* interrupt mask */
+- u32 sbtmstatelow; /* target state */
+- u32 sbtmstatehigh; /* target state */
+- u32 sbbwa0; /* bandwidth allocation table0 */
+- u32 PAD;
+- u32 sbimconfiglow; /* initiator configuration */
+- u32 sbimconfighigh; /* initiator configuration */
+- u32 sbadmatch0; /* address match0 */
+- u32 PAD;
+- u32 sbtmconfiglow; /* target configuration */
+- u32 sbtmconfighigh; /* target configuration */
+- u32 sbbconfig; /* broadcast configuration */
+- u32 PAD;
+- u32 sbbstate; /* broadcast state */
+- u32 PAD[3];
+- u32 sbactcnfg; /* activate configuration */
+- u32 PAD[3];
+- u32 sbflagst; /* current sbflags */
+- u32 PAD[3];
+- u32 sbidlow; /* identification */
+- u32 sbidhigh; /* identification */
+-};
+-
+-/* sdio core registers */
+-struct sdpcmd_regs {
+- u32 corecontrol; /* 0x00, rev8 */
+- u32 corestatus; /* rev8 */
+- u32 PAD[1];
+- u32 biststatus; /* rev8 */
+-
+- /* PCMCIA access */
+- u16 pcmciamesportaladdr; /* 0x010, rev8 */
+- u16 PAD[1];
+- u16 pcmciamesportalmask; /* rev8 */
+- u16 PAD[1];
+- u16 pcmciawrframebc; /* rev8 */
+- u16 PAD[1];
+- u16 pcmciaunderflowtimer; /* rev8 */
+- u16 PAD[1];
+-
+- /* interrupt */
+- u32 intstatus; /* 0x020, rev8 */
+- u32 hostintmask; /* rev8 */
+- u32 intmask; /* rev8 */
+- u32 sbintstatus; /* rev8 */
+- u32 sbintmask; /* rev8 */
+- u32 funcintmask; /* rev4 */
+- u32 PAD[2];
+- u32 tosbmailbox; /* 0x040, rev8 */
+- u32 tohostmailbox; /* rev8 */
+- u32 tosbmailboxdata; /* rev8 */
+- u32 tohostmailboxdata; /* rev8 */
+-
+- /* synchronized access to registers in SDIO clock domain */
+- u32 sdioaccess; /* 0x050, rev8 */
+- u32 PAD[3];
+-
+- /* PCMCIA frame control */
+- u8 pcmciaframectrl; /* 0x060, rev8 */
+- u8 PAD[3];
+- u8 pcmciawatermark; /* rev8 */
+- u8 PAD[155];
+-
+- /* interrupt batching control */
+- u32 intrcvlazy; /* 0x100, rev8 */
+- u32 PAD[3];
+-
+- /* counters */
+- u32 cmd52rd; /* 0x110, rev8 */
+- u32 cmd52wr; /* rev8 */
+- u32 cmd53rd; /* rev8 */
+- u32 cmd53wr; /* rev8 */
+- u32 abort; /* rev8 */
+- u32 datacrcerror; /* rev8 */
+- u32 rdoutofsync; /* rev8 */
+- u32 wroutofsync; /* rev8 */
+- u32 writebusy; /* rev8 */
+- u32 readwait; /* rev8 */
+- u32 readterm; /* rev8 */
+- u32 writeterm; /* rev8 */
+- u32 PAD[40];
+- u32 clockctlstatus; /* rev8 */
+- u32 PAD[7];
+-
+- u32 PAD[128]; /* DMA engines */
+-
+- /* SDIO/PCMCIA CIS region */
+- char cis[512]; /* 0x400-0x5ff, rev6 */
+-
+- /* PCMCIA function control registers */
+- char pcmciafcr[256]; /* 0x600-6ff, rev6 */
+- u16 PAD[55];
+-
+- /* PCMCIA backplane access */
+- u16 backplanecsr; /* 0x76E, rev6 */
+- u16 backplaneaddr0; /* rev6 */
+- u16 backplaneaddr1; /* rev6 */
+- u16 backplaneaddr2; /* rev6 */
+- u16 backplaneaddr3; /* rev6 */
+- u16 backplanedata0; /* rev6 */
+- u16 backplanedata1; /* rev6 */
+- u16 backplanedata2; /* rev6 */
+- u16 backplanedata3; /* rev6 */
+- u16 PAD[31];
+-
+- /* sprom "size" & "blank" info */
+- u16 spromstatus; /* 0x7BE, rev2 */
+- u32 PAD[464];
+-
+- u16 PAD[0x80];
+-};
+-
+-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip **ci_ptr);
+-void brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr);
+-void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci,
+- u32 drivestrength);
+-u8 brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid);
+-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci);
+-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
+- struct brcmf_chip *ci, u32 rstvec);
+-
+-#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h 2015-05-01 14:58:04.051427001 -0500
+@@ -180,6 +180,97 @@
+ uint max_request_size;
+ ushort max_segment_count;
+ uint max_segment_size;
++ uint txglomsz;
++ struct sg_table sgtable;
++};
++
++/* sdio core registers */
++struct sdpcmd_regs {
++ u32 corecontrol; /* 0x00, rev8 */
++ u32 corestatus; /* rev8 */
++ u32 PAD[1];
++ u32 biststatus; /* rev8 */
++
++ /* PCMCIA access */
++ u16 pcmciamesportaladdr; /* 0x010, rev8 */
++ u16 PAD[1];
++ u16 pcmciamesportalmask; /* rev8 */
++ u16 PAD[1];
++ u16 pcmciawrframebc; /* rev8 */
++ u16 PAD[1];
++ u16 pcmciaunderflowtimer; /* rev8 */
++ u16 PAD[1];
++
++ /* interrupt */
++ u32 intstatus; /* 0x020, rev8 */
++ u32 hostintmask; /* rev8 */
++ u32 intmask; /* rev8 */
++ u32 sbintstatus; /* rev8 */
++ u32 sbintmask; /* rev8 */
++ u32 funcintmask; /* rev4 */
++ u32 PAD[2];
++ u32 tosbmailbox; /* 0x040, rev8 */
++ u32 tohostmailbox; /* rev8 */
++ u32 tosbmailboxdata; /* rev8 */
++ u32 tohostmailboxdata; /* rev8 */
++
++ /* synchronized access to registers in SDIO clock domain */
++ u32 sdioaccess; /* 0x050, rev8 */
++ u32 PAD[3];
++
++ /* PCMCIA frame control */
++ u8 pcmciaframectrl; /* 0x060, rev8 */
++ u8 PAD[3];
++ u8 pcmciawatermark; /* rev8 */
++ u8 PAD[155];
++
++ /* interrupt batching control */
++ u32 intrcvlazy; /* 0x100, rev8 */
++ u32 PAD[3];
++
++ /* counters */
++ u32 cmd52rd; /* 0x110, rev8 */
++ u32 cmd52wr; /* rev8 */
++ u32 cmd53rd; /* rev8 */
++ u32 cmd53wr; /* rev8 */
++ u32 abort; /* rev8 */
++ u32 datacrcerror; /* rev8 */
++ u32 rdoutofsync; /* rev8 */
++ u32 wroutofsync; /* rev8 */
++ u32 writebusy; /* rev8 */
++ u32 readwait; /* rev8 */
++ u32 readterm; /* rev8 */
++ u32 writeterm; /* rev8 */
++ u32 PAD[40];
++ u32 clockctlstatus; /* rev8 */
++ u32 PAD[7];
++
++ u32 PAD[128]; /* DMA engines */
++
++ /* SDIO/PCMCIA CIS region */
++ char cis[512]; /* 0x400-0x5ff, rev6 */
++
++ /* PCMCIA function control registers */
++ char pcmciafcr[256]; /* 0x600-6ff, rev6 */
++ u16 PAD[55];
++
++ /* PCMCIA backplane access */
++ u16 backplanecsr; /* 0x76E, rev6 */
++ u16 backplaneaddr0; /* rev6 */
++ u16 backplaneaddr1; /* rev6 */
++ u16 backplaneaddr2; /* rev6 */
++ u16 backplaneaddr3; /* rev6 */
++ u16 backplanedata0; /* rev6 */
++ u16 backplanedata1; /* rev6 */
++ u16 backplanedata2; /* rev6 */
++ u16 backplanedata3; /* rev6 */
++ u16 PAD[31];
++
++ /* sprom "size" & "blank" info */
++ u16 spromstatus; /* 0x7BE, rev2 */
++ u32 PAD[464];
++
++ u16 PAD[0x80];
+ };
+
+ /* Register/deregister interrupt handler. */
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/usb.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/usb.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/usb.c 2015-05-01 14:58:04.051427001 -0500
+@@ -25,6 +25,7 @@
+ #include <dhd_bus.h>
+ #include <dhd_dbg.h>
+
++#include "firmware.h"
+ #include "usb_rdl.h"
+ #include "usb.h"
+
+@@ -61,12 +62,6 @@
+ u8 *image;
+ int image_len;
+ };
+-static struct list_head fw_image_list;
+-
+-struct intr_transfer_buf {
+- u32 notification;
+- u32 reserved;
+-};
+
+ struct brcmf_usbdev_info {
+ struct brcmf_usbdev bus_pub; /* MUST BE FIRST */
+@@ -75,7 +70,7 @@
+ struct list_head rx_postq;
+ struct list_head tx_freeq;
+ struct list_head tx_postq;
+- uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
++ uint rx_pipe, tx_pipe, rx_pipe2;
+
+ int rx_low_watermark;
+ int tx_low_watermark;
+@@ -87,7 +82,7 @@
+ struct brcmf_usbreq *tx_reqs;
+ struct brcmf_usbreq *rx_reqs;
+
+- u8 *image; /* buffer for combine fw and nvram */
++ const u8 *image; /* buffer for combine fw and nvram */
+ int image_len;
+
+ struct usb_device *usbdev;
+@@ -104,10 +99,6 @@
+ ulong ctl_op;
+
+ struct urb *bulk_urb; /* used for FW download */
+- struct urb *intr_urb; /* URB for interrupt endpoint */
+- int intr_size; /* Size of interrupt message */
+- int interval; /* Interrupt polling interval */
+- struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */
+ };
+
+ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
+@@ -531,39 +522,6 @@
+ }
+ }
+
+-static void
+-brcmf_usb_intr_complete(struct urb *urb)
+-{
+- struct brcmf_usbdev_info *devinfo =
+- (struct brcmf_usbdev_info *)urb->context;
+- int err;
+-
+- brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
+-
+- if (devinfo == NULL)
+- return;
+-
+- if (unlikely(urb->status)) {
+- if (urb->status == -ENOENT ||
+- urb->status == -ESHUTDOWN ||
+- urb->status == -ENODEV) {
+- brcmf_usb_state_change(devinfo,
+- BRCMFMAC_USB_STATE_DOWN);
+- }
+- }
+-
+- if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN) {
+- brcmf_err("intr cb when DBUS down, ignoring\n");
+- return;
+- }
+-
+- if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
+- err = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
+- if (err)
+- brcmf_err("usb_submit_urb, err=%d\n", err);
+- }
+-}
+-
+ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
+ {
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+@@ -619,7 +577,6 @@
+ {
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+ u16 ifnum;
+- int ret;
+
+ brcmf_dbg(USB, "Enter\n");
+ if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
+@@ -628,23 +585,6 @@
+ /* Success, indicate devinfo is fully up */
+ brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP);
+
+- if (devinfo->intr_urb) {
+- usb_fill_int_urb(devinfo->intr_urb, devinfo->usbdev,
+- devinfo->intr_pipe,
+- &devinfo->intr,
+- devinfo->intr_size,
+- (usb_complete_t)brcmf_usb_intr_complete,
+- devinfo,
+- devinfo->interval);
+-
+- ret = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
+- if (ret) {
+- brcmf_err("USB_SUBMIT_URB failed with status %d\n",
+- ret);
+- return -EINVAL;
+- }
+- }
+-
+ if (devinfo->ctl_urb) {
+ devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0);
+ devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0);
+@@ -681,8 +621,6 @@
+ return;
+
+ brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
+- if (devinfo->intr_urb)
+- usb_kill_urb(devinfo->intr_urb);
+
+ if (devinfo->ctl_urb)
+ usb_kill_urb(devinfo->ctl_urb);
+@@ -1021,7 +959,7 @@
+ }
+
+ err = brcmf_usb_dlstart(devinfo,
+- devinfo->image, devinfo->image_len);
++ (u8 *)devinfo->image, devinfo->image_len);
+ if (err == 0)
+ err = brcmf_usb_dlrun(devinfo);
+ return err;
+@@ -1036,7 +974,6 @@
+ brcmf_usb_free_q(&devinfo->rx_freeq, false);
+ brcmf_usb_free_q(&devinfo->tx_freeq, false);
+
+- usb_free_urb(devinfo->intr_urb);
+ usb_free_urb(devinfo->ctl_urb);
+ usb_free_urb(devinfo->bulk_urb);
+
+@@ -1080,68 +1017,20 @@
+ return -1;
+ }
+
+-static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
++static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo)
+ {
+- s8 *fwname;
+- const struct firmware *fw;
+- struct brcmf_usb_image *fw_image;
+- int err;
+-
+- brcmf_dbg(USB, "Enter\n");
+ switch (devinfo->bus_pub.devid) {
+ case 43143:
+- fwname = BRCMF_USB_43143_FW_NAME;
+- break;
++ return BRCMF_USB_43143_FW_NAME;
+ case 43235:
+ case 43236:
+ case 43238:
+- fwname = BRCMF_USB_43236_FW_NAME;
+- break;
++ return BRCMF_USB_43236_FW_NAME;
+ case 43242:
+- fwname = BRCMF_USB_43242_FW_NAME;
+- break;
++ return BRCMF_USB_43242_FW_NAME;
+ default:
+- return -EINVAL;
+- break;
+- }
+- brcmf_dbg(USB, "Loading FW %s\n", fwname);
+- list_for_each_entry(fw_image, &fw_image_list, list) {
+- if (fw_image->fwname == fwname) {
+- devinfo->image = fw_image->image;
+- devinfo->image_len = fw_image->image_len;
+- return 0;
+- }
+- }
+- /* fw image not yet loaded. Load it now and add to list */
+- err = request_firmware(&fw, fwname, devinfo->dev);
+- if (!fw) {
+- brcmf_err("fail to request firmware %s\n", fwname);
+- return err;
+- }
+- if (check_file(fw->data) < 0) {
+- brcmf_err("invalid firmware %s\n", fwname);
+- return -EINVAL;
++ return NULL;
+ }
+-
+- fw_image = kzalloc(sizeof(*fw_image), GFP_ATOMIC);
+- if (!fw_image)
+- return -ENOMEM;
+- INIT_LIST_HEAD(&fw_image->list);
+- list_add_tail(&fw_image->list, &fw_image_list);
+- fw_image->fwname = fwname;
+- fw_image->image = vmalloc(fw->size);
+- if (!fw_image->image)
+- return -ENOMEM;
+-
+- memcpy(fw_image->image, fw->data, fw->size);
+- fw_image->image_len = fw->size;
+-
+- release_firmware(fw);
+-
+- devinfo->image = fw_image->image;
+- devinfo->image_len = fw_image->image_len;
+-
+- return 0;
+ }
+
+
+@@ -1186,11 +1075,6 @@
+ goto error;
+ devinfo->tx_freecount = ntxq;
+
+- devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
+- if (!devinfo->intr_urb) {
+- brcmf_err("usb_alloc_urb (intr) failed\n");
+- goto error;
+- }
+ devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!devinfo->ctl_urb) {
+ brcmf_err("usb_alloc_urb (ctl) failed\n");
+@@ -1202,16 +1086,6 @@
+ goto error;
+ }
+
+- if (!brcmf_usb_dlneeded(devinfo))
+- return &devinfo->bus_pub;
+-
+- brcmf_dbg(USB, "Start fw downloading\n");
+- if (brcmf_usb_get_fw(devinfo))
+- goto error;
+-
+- if (brcmf_usb_fw_download(devinfo))
+- goto error;
+-
+ return &devinfo->bus_pub;
+
+ error:
+@@ -1222,18 +1096,77 @@
+
+ static struct brcmf_bus_ops brcmf_usb_bus_ops = {
+ .txdata = brcmf_usb_tx,
+- .init = brcmf_usb_up,
+ .stop = brcmf_usb_down,
+ .txctl = brcmf_usb_tx_ctlpkt,
+ .rxctl = brcmf_usb_rx_ctlpkt,
+ };
+
++static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
++{
++ int ret;
++
++ /* Attach to the common driver interface */
++ ret = brcmf_attach(devinfo->dev);
++ if (ret) {
++ brcmf_err("brcmf_attach failed\n");
++ return ret;
++ }
++
++ ret = brcmf_usb_up(devinfo->dev);
++ if (ret)
++ goto fail;
++
++ ret = brcmf_bus_start(devinfo->dev);
++ if (ret)
++ goto fail;
++
++ return 0;
++fail:
++ brcmf_detach(devinfo->dev);
++ return ret;
++}
++
++static void brcmf_usb_probe_phase2(struct device *dev,
++ const struct firmware *fw,
++ void *nvram, u32 nvlen)
++{
++ struct brcmf_bus *bus = dev_get_drvdata(dev);
++ struct brcmf_usbdev_info *devinfo;
++ int ret;
++
++ brcmf_dbg(USB, "Start fw downloading\n");
++ ret = check_file(fw->data);
++ if (ret < 0) {
++ brcmf_err("invalid firmware\n");
++ release_firmware(fw);
++ goto error;
++ }
++
++ devinfo = bus->bus_priv.usb->devinfo;
++ devinfo->image = fw->data;
++ devinfo->image_len = fw->size;
++
++ ret = brcmf_usb_fw_download(devinfo);
++ release_firmware(fw);
++ if (ret)
++ goto error;
++
++ ret = brcmf_usb_bus_setup(devinfo);
++ if (ret)
++ goto error;
++
++ return;
++error:
++ brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
++ device_release_driver(dev);
++}
++
+ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
+ {
+ struct brcmf_bus *bus = NULL;
+ struct brcmf_usbdev *bus_pub = NULL;
+- int ret;
+ struct device *dev = devinfo->dev;
++ int ret;
+
+ brcmf_dbg(USB, "Enter\n");
+ bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
+@@ -1254,22 +1187,18 @@
+ bus->chip = bus_pub->devid;
+ bus->chiprev = bus_pub->chiprev;
+ bus->proto_type = BRCMF_PROTO_BCDC;
++ bus->always_use_fws_queue = true;
+
+- /* Attach to the common driver interface */
+- ret = brcmf_attach(dev);
+- if (ret) {
+- brcmf_err("brcmf_attach failed\n");
+- goto fail;
+- }
+-
+- ret = brcmf_bus_start(dev);
+- if (ret) {
+- brcmf_err("dongle is not responding\n");
+- brcmf_detach(dev);
+- goto fail;
++ if (!brcmf_usb_dlneeded(devinfo)) {
++ ret = brcmf_usb_bus_setup(devinfo);
++ if (ret)
++ goto fail;
+ }
+-
++ /* request firmware here */
++ brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
++ brcmf_usb_probe_phase2);
+ return 0;
++
+ fail:
+ /* Release resources in reverse order */
+ kfree(bus);
+@@ -1357,9 +1286,6 @@
+ goto fail;
+ }
+
+- endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+- devinfo->intr_pipe = usb_rcvintpipe(usb, endpoint_num);
+-
+ devinfo->rx_pipe = 0;
+ devinfo->rx_pipe2 = 0;
+ devinfo->tx_pipe = 0;
+@@ -1391,16 +1317,9 @@
+ }
+ }
+
+- /* Allocate interrupt URB and data buffer */
+- /* RNDIS says 8-byte intr, our old drivers used 4-byte */
+- if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16))
+- devinfo->intr_size = 8;
+- else
+- devinfo->intr_size = 4;
+-
+- devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
+-
+- if (usb->speed == USB_SPEED_HIGH)
++ if (usb->speed == USB_SPEED_SUPER)
++ brcmf_dbg(USB, "Broadcom super speed USB wireless device detected\n");
++ else if (usb->speed == USB_SPEED_HIGH)
+ brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n");
+ else
+ brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n");
+@@ -1455,23 +1374,18 @@
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+
+ brcmf_dbg(USB, "Enter\n");
+- if (!brcmf_attach(devinfo->dev))
+- return brcmf_bus_start(&usb->dev);
+-
+- return 0;
++ return brcmf_usb_bus_setup(devinfo);
+ }
+
+ static int brcmf_usb_reset_resume(struct usb_interface *intf)
+ {
+ struct usb_device *usb = interface_to_usbdev(intf);
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+-
+ brcmf_dbg(USB, "Enter\n");
+
+- if (!brcmf_usb_fw_download(devinfo))
+- return brcmf_usb_resume(intf);
+-
+- return -EIO;
++ return brcmf_fw_get_firmwares(&usb->dev, 0,
++ brcmf_usb_get_fwname(devinfo), NULL,
++ brcmf_usb_probe_phase2);
+ }
+
+ #define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
+@@ -1506,16 +1420,6 @@
+ .disable_hub_initiated_lpm = 1,
+ };
+
+-static void brcmf_release_fw(struct list_head *q)
+-{
+- struct brcmf_usb_image *fw_image, *next;
+-
+- list_for_each_entry_safe(fw_image, next, q, list) {
+- vfree(fw_image->image);
+- list_del_init(&fw_image->list);
+- }
+-}
+-
+ static int brcmf_usb_reset_device(struct device *dev, void *notused)
+ {
+ /* device past is the usb interface so we
+@@ -1534,12 +1438,10 @@
+ ret = driver_for_each_device(drv, NULL, NULL,
+ brcmf_usb_reset_device);
+ usb_deregister(&brcmf_usbdrvr);
+- brcmf_release_fw(&fw_image_list);
+ }
+
+ void brcmf_usb_register(void)
+ {
+ brcmf_dbg(USB, "Enter\n");
+- INIT_LIST_HEAD(&fw_image_list);
+ usb_register(&brcmf_usbdrvr);
+ }
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c 2015-05-01 14:58:04.051427001 -0500
+@@ -18,6 +18,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/etherdevice.h>
++#include <linux/module.h>
+ #include <net/cfg80211.h>
+ #include <net/netlink.h>
+
+@@ -190,6 +191,7 @@
+ .n_channels = ARRAY_SIZE(__wl_2ghz_channels),
+ .bitrates = wl_g_rates,
+ .n_bitrates = wl_g_rates_size,
++ .ht_cap = {IEEE80211_HT_CAP_SUP_WIDTH_20_40, true},
+ };
+
+ static struct ieee80211_supported_band __wl_band_5ghz_a = {
+@@ -219,9 +221,9 @@
+ */
+ REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
+ /* IEEE 802.11a, channel 36..64 */
+- REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
++ REG_RULE(5150-10, 5350+10, 80, 6, 20, 0),
+ /* IEEE 802.11a, channel 100..165 */
+- REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
++ REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
+ };
+
+ static const u32 __wl_cipher_suites[] = {
+@@ -251,6 +253,10 @@
+ struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
+ };
+
++static int brcmf_roamoff;
++module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR);
++MODULE_PARM_DESC(roamoff, "do not use internal roaming engine");
++
+ /* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+@@ -335,6 +341,61 @@
+ return qdbm;
+ }
+
++static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
++ struct cfg80211_chan_def *ch)
++{
++ struct brcmu_chan ch_inf;
++ s32 primary_offset;
++
++ brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n",
++ ch->chan->center_freq, ch->center_freq1, ch->width);
++ ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1);
++ primary_offset = ch->center_freq1 - ch->chan->center_freq;
++ switch (ch->width) {
++ case NL80211_CHAN_WIDTH_20_NOHT:
++ case NL80211_CHAN_WIDTH_20:
++ ch_inf.bw = BRCMU_CHAN_BW_20;
++ WARN_ON(primary_offset != 0);
++ break;
++ case NL80211_CHAN_WIDTH_40:
++ ch_inf.bw = BRCMU_CHAN_BW_40;
++ if (primary_offset < 0)
++ ch_inf.sb = BRCMU_CHAN_SB_U;
++ else
++ ch_inf.sb = BRCMU_CHAN_SB_L;
++ break;
++ case NL80211_CHAN_WIDTH_80:
++ ch_inf.bw = BRCMU_CHAN_BW_80;
++ if (primary_offset < 0) {
++ if (primary_offset < -CH_10MHZ_APART)
++ ch_inf.sb = BRCMU_CHAN_SB_UU;
++ else
++ ch_inf.sb = BRCMU_CHAN_SB_UL;
++ } else {
++ if (primary_offset > CH_10MHZ_APART)
++ ch_inf.sb = BRCMU_CHAN_SB_LL;
++ else
++ ch_inf.sb = BRCMU_CHAN_SB_LU;
++ }
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ }
++ switch (ch->chan->band) {
++ case IEEE80211_BAND_2GHZ:
++ ch_inf.band = BRCMU_CHAN_BAND_2G;
++ break;
++ case IEEE80211_BAND_5GHZ:
++ ch_inf.band = BRCMU_CHAN_BAND_5G;
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ }
++ d11inf->encchspec(&ch_inf);
++
++ return ch_inf.chspec;
++}
++
+ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
+ struct ieee80211_channel *ch)
+ {
+@@ -351,13 +412,11 @@
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
++const struct brcmf_tlv *
++brcmf_parse_tlvs(const void *buf, int buflen, uint key)
+ {
+- struct brcmf_tlv *elt;
+- int totlen;
+-
+- elt = (struct brcmf_tlv *)buf;
+- totlen = buflen;
++ const struct brcmf_tlv *elt = buf;
++ int totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+@@ -378,8 +437,8 @@
+ * not update the tlvs buffer pointer/length.
+ */
+ static bool
+-brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
+- u8 *oui, u32 oui_len, u8 type)
++brcmf_tlv_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len,
++ const u8 *oui, u32 oui_len, u8 type)
+ {
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+@@ -401,12 +460,12 @@
+ }
+
+ static struct brcmf_vs_tlv *
+-brcmf_find_wpaie(u8 *parse, u32 len)
++brcmf_find_wpaie(const u8 *parse, u32 len)
+ {
+- struct brcmf_tlv *ie;
++ const struct brcmf_tlv *ie;
+
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+- if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
++ if (brcmf_tlv_has_ie((const u8 *)ie, &parse, &len,
+ WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
+ return (struct brcmf_vs_tlv *)ie;
+ }
+@@ -414,9 +473,9 @@
+ }
+
+ static struct brcmf_vs_tlv *
+-brcmf_find_wpsie(u8 *parse, u32 len)
++brcmf_find_wpsie(const u8 *parse, u32 len)
+ {
+- struct brcmf_tlv *ie;
++ const struct brcmf_tlv *ie;
+
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+ if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+@@ -491,6 +550,19 @@
+ return err;
+ }
+
++static bool brcmf_is_apmode(struct brcmf_cfg80211_vif *vif)
++{
++ enum nl80211_iftype iftype;
++
++ iftype = vif->wdev.iftype;
++ return iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO;
++}
++
++static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
++{
++ return vif->wdev.iftype == NL80211_IFTYPE_ADHOC;
++}
++
+ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
+ const char *name,
+ enum nl80211_iftype type,
+@@ -569,6 +641,9 @@
+ if (err)
+ brcmf_err("Scan abort failed\n");
+ }
++
++ brcmf_set_mpc(ifp, 1);
++
+ /*
+ * e-scan can be initiated by scheduled scan
+ * which takes precedence.
+@@ -578,12 +653,10 @@
+ cfg->sched_escan = false;
+ if (!aborted)
+ cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
+- brcmf_set_mpc(ifp, 1);
+ } else if (scan_request) {
+ brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
+ aborted ? "Aborted" : "Done");
+ cfg80211_scan_done(scan_request, aborted);
+- brcmf_set_mpc(ifp, 1);
+ }
+ if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+ brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
+@@ -651,7 +724,6 @@
+ type);
+ return -EOPNOTSUPP;
+ case NL80211_IFTYPE_ADHOC:
+- vif->mode = WL_MODE_IBSS;
+ infra = 0;
+ break;
+ case NL80211_IFTYPE_STATION:
+@@ -667,12 +739,10 @@
+ */
+ return 0;
+ }
+- vif->mode = WL_MODE_BSS;
+ infra = 1;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+- vif->mode = WL_MODE_AP;
+ ap = 1;
+ break;
+ default:
+@@ -696,7 +766,7 @@
+ err = -EAGAIN;
+ goto done;
+ }
+- brcmf_dbg(INFO, "IF Type = %s\n", (vif->mode == WL_MODE_IBSS) ?
++ brcmf_dbg(INFO, "IF Type = %s\n", brcmf_is_ibssmode(vif) ?
+ "Adhoc" : "Infra");
+ }
+ ndev->ieee80211_ptr->iftype = type;
+@@ -1222,8 +1292,8 @@
+ params->chandef.chan->center_freq);
+ if (params->channel_fixed) {
+ /* adding chanspec */
+- chanspec = channel_to_chanspec(&cfg->d11inf,
+- params->chandef.chan);
++ chanspec = chandef_to_chanspec(&cfg->d11inf,
++ &params->chandef);
+ join_params.params_le.chanspec_list[0] =
+ cpu_to_le16(chanspec);
+ join_params.params_le.chanspec_num = cpu_to_le32(1);
+@@ -1340,13 +1410,14 @@
+ }
+
+ static s32
+-brcmf_set_set_cipher(struct net_device *ndev,
+- struct cfg80211_connect_params *sme)
++brcmf_set_wsec_mode(struct net_device *ndev,
++ struct cfg80211_connect_params *sme, bool mfp)
+ {
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+ struct brcmf_cfg80211_security *sec;
+ s32 pval = 0;
+ s32 gval = 0;
++ s32 wsec;
+ s32 err = 0;
+
+ if (sme->crypto.n_ciphers_pairwise) {
+@@ -1398,7 +1469,12 @@
+ if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval &&
+ sme->privacy)
+ pval = AES_ENABLED;
+- err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", pval | gval);
++
++ if (mfp)
++ wsec = pval | gval | MFP_CAPABLE;
++ else
++ wsec = pval | gval;
++ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", wsec);
+ if (err) {
+ brcmf_err("error (%d)\n", err);
+ return err;
+@@ -1562,13 +1638,12 @@
+ struct ieee80211_channel *chan = sme->channel;
+ struct brcmf_join_params join_params;
+ size_t join_params_size;
+- struct brcmf_tlv *rsn_ie;
+- struct brcmf_vs_tlv *wpa_ie;
+- void *ie;
++ const struct brcmf_tlv *rsn_ie;
++ const struct brcmf_vs_tlv *wpa_ie;
++ const void *ie;
+ u32 ie_len;
+ struct brcmf_ext_join_params_le *ext_join_params;
+ u16 chanspec;
+-
+ s32 err = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+@@ -1591,7 +1666,8 @@
+ ie_len = wpa_ie->len + TLV_HDR_LEN;
+ } else {
+ /* find the RSN_IE */
+- rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len,
++ rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie,
++ sme->ie_len,
+ WLAN_EID_RSN);
+ if (rsn_ie) {
+ ie = rsn_ie;
+@@ -1636,7 +1712,7 @@
+ goto done;
+ }
+
+- err = brcmf_set_set_cipher(ndev, sme);
++ err = brcmf_set_wsec_mode(ndev, sme, sme->mfp == NL80211_MFP_REQUIRED);
+ if (err) {
+ brcmf_err("wl_set_set_cipher failed (%d)\n", err);
+ goto done;
+@@ -1678,22 +1754,9 @@
+ ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
+ memcpy(&ext_join_params->ssid_le.SSID, sme->ssid,
+ profile->ssid.SSID_len);
+- /*increase dwell time to receive probe response or detect Beacon
+- * from target AP at a noisy air only during connect command
+- */
+- ext_join_params->scan_le.active_time =
+- cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
+- ext_join_params->scan_le.passive_time =
+- cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
++
+ /* Set up join scan parameters */
+ ext_join_params->scan_le.scan_type = -1;
+- /* to sync with presence period of VSDB GO.
+- * Send probe request more frequently. Probe request will be stopped
+- * when it gets probe response from target AP/GO.
+- */
+- ext_join_params->scan_le.nprobes =
+- cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
+- BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
+ ext_join_params->scan_le.home_time = cpu_to_le32(-1);
+
+ if (sme->bssid)
+@@ -1706,6 +1769,25 @@
+
+ ext_join_params->assoc_le.chanspec_list[0] =
+ cpu_to_le16(chanspec);
++ /* Increase dwell time to receive probe response or detect
++ * beacon from target AP at a noisy air only during connect
++ * command.
++ */
++ ext_join_params->scan_le.active_time =
++ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
++ ext_join_params->scan_le.passive_time =
++ cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
++ /* To sync with presence period of VSDB GO send probe request
++ * more frequently. Probe request will be stopped when it gets
++ * probe response from target AP/GO.
++ */
++ ext_join_params->scan_le.nprobes =
++ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
++ BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
++ } else {
++ ext_join_params->scan_le.active_time = cpu_to_le32(-1);
++ ext_join_params->scan_le.passive_time = cpu_to_le32(-1);
++ ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
+ }
+
+ err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
+@@ -1913,7 +1995,7 @@
+ brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
+ memcpy(key.data, params->key, key.len);
+
+- if ((ifp->vif->mode != WL_MODE_AP) &&
++ if (!brcmf_is_apmode(ifp->vif) &&
+ (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+ brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
+ memcpy(keybuf, &key.data[24], sizeof(keybuf));
+@@ -1981,7 +2063,9 @@
+ if (!check_vif_up(ifp->vif))
+ return -EIO;
+
+- if (mac_addr) {
++ if (mac_addr &&
++ (params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
++ (params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
+ brcmf_dbg(TRACE, "Exit");
+ return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
+ }
+@@ -2010,7 +2094,7 @@
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+- if (ifp->vif->mode != WL_MODE_AP) {
++ if (!brcmf_is_apmode(ifp->vif)) {
+ brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
+ memcpy(keybuf, &key.data[24], sizeof(keybuf));
+ memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+@@ -2164,12 +2248,14 @@
+ s32 err = 0;
+ u8 *bssid = profile->bssid;
+ struct brcmf_sta_info_le sta_info_le;
++ u32 beacon_period;
++ u32 dtim_period;
+
+ brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
+ if (!check_vif_up(ifp->vif))
+ return -EIO;
+
+- if (ifp->vif->mode == WL_MODE_AP) {
++ if (brcmf_is_apmode(ifp->vif)) {
+ memcpy(&sta_info_le, mac, ETH_ALEN);
+ err = brcmf_fil_iovar_data_get(ifp, "sta_info",
+ &sta_info_le,
+@@ -2186,7 +2272,7 @@
+ }
+ brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n",
+ sinfo->inactive_time, sinfo->connected_time);
+- } else if (ifp->vif->mode == WL_MODE_BSS) {
++ } else if (ifp->vif->wdev.iftype == NL80211_IFTYPE_STATION) {
+ if (memcmp(mac, bssid, ETH_ALEN)) {
+ brcmf_err("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
+ mac, bssid);
+@@ -2218,6 +2304,30 @@
+ sinfo->signal = rssi;
+ brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
+ }
++ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_BCNPRD,
++ &beacon_period);
++ if (err) {
++ brcmf_err("Could not get beacon period (%d)\n",
++ err);
++ goto done;
++ } else {
++ sinfo->bss_param.beacon_interval =
++ beacon_period;
++ brcmf_dbg(CONN, "Beacon peroid %d\n",
++ beacon_period);
++ }
++ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_DTIMPRD,
++ &dtim_period);
++ if (err) {
++ brcmf_err("Could not get DTIM period (%d)\n",
++ err);
++ goto done;
++ } else {
++ sinfo->bss_param.dtim_period = dtim_period;
++ brcmf_dbg(CONN, "DTIM peroid %d\n",
++ dtim_period);
++ }
++ sinfo->filled |= STATION_INFO_BSS_PARAM;
+ }
+ } else
+ err = -EPERM;
+@@ -2444,18 +2554,13 @@
+ return err;
+ }
+
+-static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
+-{
+- return vif->mode == WL_MODE_IBSS;
+-}
+-
+ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_if *ifp)
+ {
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
+ struct brcmf_bss_info_le *bi;
+ struct brcmf_ssid *ssid;
+- struct brcmf_tlv *tim;
++ const struct brcmf_tlv *tim;
+ u16 beacon_interval;
+ u8 dtim_period;
+ size_t ie_len;
+@@ -3075,7 +3180,7 @@
+ }
+
+ if (!request->n_ssids || !request->n_match_sets) {
+- brcmf_err("Invalid sched scan req!! n_ssids:%d\n",
++ brcmf_dbg(SCAN, "Invalid sched scan req!! n_ssids:%d\n",
+ request->n_ssids);
+ return -EINVAL;
+ }
+@@ -3220,8 +3325,9 @@
+ }
+
+ static s32
+-brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
+- bool is_rsn_ie)
++brcmf_configure_wpaie(struct net_device *ndev,
++ const struct brcmf_vs_tlv *wpa_ie,
++ bool is_rsn_ie)
+ {
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ u32 auth = 0; /* d11 open authentication */
+@@ -3684,42 +3790,26 @@
+ }
+
+ static s32
+-brcmf_cfg80211_set_channel(struct brcmf_cfg80211_info *cfg,
+- struct brcmf_if *ifp,
+- struct ieee80211_channel *channel)
+-{
+- u16 chanspec;
+- s32 err;
+-
+- brcmf_dbg(TRACE, "band=%d, center_freq=%d\n", channel->band,
+- channel->center_freq);
+-
+- chanspec = channel_to_chanspec(&cfg->d11inf, channel);
+- err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
+-
+- return err;
+-}
+-
+-static s32
+ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_ap_settings *settings)
+ {
+ s32 ie_offset;
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+- struct brcmf_tlv *ssid_ie;
++ const struct brcmf_tlv *ssid_ie;
+ struct brcmf_ssid_le ssid_le;
+ s32 err = -EPERM;
+- struct brcmf_tlv *rsn_ie;
+- struct brcmf_vs_tlv *wpa_ie;
++ const struct brcmf_tlv *rsn_ie;
++ const struct brcmf_vs_tlv *wpa_ie;
+ struct brcmf_join_params join_params;
+ enum nl80211_iftype dev_role;
+ struct brcmf_fil_bss_enable_le bss_enable;
++ u16 chanspec;
+
+- brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
+- cfg80211_get_chandef_type(&settings->chandef),
+- settings->beacon_interval,
+- settings->dtim_period);
++ brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
++ settings->chandef.chan->hw_value,
++ settings->chandef.center_freq1, settings->chandef.width,
++ settings->beacon_interval, settings->dtim_period);
+ brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
+ settings->ssid, settings->ssid_len, settings->auth_type,
+ settings->inactivity_timeout);
+@@ -3776,9 +3866,10 @@
+
+ brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
+
+- err = brcmf_cfg80211_set_channel(cfg, ifp, settings->chandef.chan);
++ chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
++ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
+ if (err < 0) {
+- brcmf_err("Set Channel failed, %d\n", err);
++ brcmf_err("Set Channel failed: chspec=%d, %d\n", chanspec, err);
+ goto exit;
+ }
+
+@@ -4220,32 +4311,6 @@
+ CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
+ };
+
+-static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
+-{
+- switch (type) {
+- case NL80211_IFTYPE_AP_VLAN:
+- case NL80211_IFTYPE_WDS:
+- case NL80211_IFTYPE_MONITOR:
+- case NL80211_IFTYPE_MESH_POINT:
+- return -ENOTSUPP;
+- case NL80211_IFTYPE_ADHOC:
+- return WL_MODE_IBSS;
+- case NL80211_IFTYPE_STATION:
+- case NL80211_IFTYPE_P2P_CLIENT:
+- return WL_MODE_BSS;
+- case NL80211_IFTYPE_AP:
+- case NL80211_IFTYPE_P2P_GO:
+- return WL_MODE_AP;
+- case NL80211_IFTYPE_P2P_DEVICE:
+- return WL_MODE_P2P;
+- case NL80211_IFTYPE_UNSPECIFIED:
+- default:
+- break;
+- }
+-
+- return -EINVAL;
+-}
+-
+ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
+ {
+ /* scheduled scan settings */
+@@ -4340,6 +4405,8 @@
+ WIPHY_FLAG_OFFCHAN_TX |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_SUPPORTS_TDLS;
++ if (!brcmf_roamoff)
++ wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+ wiphy->mgmt_stypes = brcmf_txrx_stypes;
+ wiphy->max_remain_on_channel_duration = 5000;
+ brcmf_wiphy_pno_params(wiphy);
+@@ -4370,7 +4437,6 @@
+ vif->wdev.wiphy = cfg->wiphy;
+ vif->wdev.iftype = type;
+
+- vif->mode = brcmf_nl80211_iftype_to_mode(type);
+ vif->pm_block = pm_block;
+ vif->roam_off = -1;
+
+@@ -4416,7 +4482,9 @@
+ u32 event = e->event_code;
+ u16 flags = e->flags;
+
+- if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) {
++ if ((event == BRCMF_E_DEAUTH) || (event == BRCMF_E_DEAUTH_IND) ||
++ (event == BRCMF_E_DISASSOC_IND) ||
++ ((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
+ brcmf_dbg(CONN, "Processing link down\n");
+ return true;
+ }
+@@ -4658,16 +4726,18 @@
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct net_device *ndev = ifp->ndev;
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
++ struct ieee80211_channel *chan;
+ s32 err = 0;
+
+- if (ifp->vif->mode == WL_MODE_AP) {
++ if (brcmf_is_apmode(ifp->vif)) {
+ err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
+ } else if (brcmf_is_linkup(e)) {
+ brcmf_dbg(CONN, "Linkup\n");
+ if (brcmf_is_ibssmode(ifp->vif)) {
++ chan = ieee80211_get_channel(cfg->wiphy, cfg->channel);
+ memcpy(profile->bssid, e->addr, ETH_ALEN);
+ wl_inform_ibss(cfg, ndev, e->addr);
+- cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
++ cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL);
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &ifp->vif->sme_state);
+ set_bit(BRCMF_VIF_STATUS_CONNECTED,
+@@ -4678,10 +4748,6 @@
+ brcmf_dbg(CONN, "Linkdown\n");
+ if (!brcmf_is_ibssmode(ifp->vif)) {
+ brcmf_bss_connect_done(cfg, ndev, e, false);
+- if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
+- &ifp->vif->sme_state))
+- cfg80211_disconnected(ndev, 0, NULL, 0,
+- GFP_KERNEL);
+ }
+ brcmf_link_down(ifp->vif);
+ brcmf_init_prof(ndev_to_prof(ndev));
+@@ -4875,11 +4941,8 @@
+
+ cfg->scan_request = NULL;
+ cfg->pwr_save = true;
+- cfg->roam_on = true; /* roam on & off switch.
+- we enable roam per default */
+- cfg->active_scan = true; /* we do active scan for
+- specific scan per default */
+- cfg->dongle_up = false; /* dongle is not up yet */
++ cfg->active_scan = true; /* we do active scan per default */
++ cfg->dongle_up = false; /* dongle is not up yet */
+ err = brcmf_init_priv_mem(cfg);
+ if (err)
+ return err;
+@@ -4904,6 +4967,30 @@
+ mutex_init(&event->vif_event_lock);
+ }
+
++static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
++{
++ struct brcmf_fil_bwcap_le band_bwcap;
++ u32 val;
++ int err;
++
++ /* verify support for bw_cap command */
++ val = WLC_BAND_5G;
++ err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
++
++ if (!err) {
++ /* only set 2G bandwidth using bw_cap command */
++ band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
++ band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
++ err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
++ sizeof(band_bwcap));
++ } else {
++ brcmf_dbg(INFO, "fallback to mimo_bw_cap\n");
++ val = WLC_N_BW_40ALL;
++ err = brcmf_fil_iovar_int_set(ifp, "mimo_bw_cap", val);
++ }
++ return err;
++}
++
+ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
+ struct device *busdev)
+ {
+@@ -4961,6 +5048,17 @@
+ goto cfg80211_p2p_attach_out;
+ }
+
++ /* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
++ * setup 40MHz in 2GHz band and enable OBSS scanning.
++ */
++ if (wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap &
++ IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
++ err = brcmf_enable_bw40_2g(ifp);
++ if (!err)
++ err = brcmf_fil_iovar_int_set(ifp, "obss_coex",
++ BRCMF_OBSS_COEX_AUTO);
++ }
++
+ err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
+ if (err) {
+ brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
+@@ -4999,7 +5097,7 @@
+ }
+
+ static s32
+-brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
++brcmf_dongle_roam(struct brcmf_if *ifp, u32 bcn_timeout)
+ {
+ s32 err = 0;
+ __le32 roamtrigger[2];
+@@ -5009,7 +5107,7 @@
+ * Setup timeout if Beacons are lost and roam is
+ * off to report link down
+ */
+- if (roamvar) {
++ if (brcmf_roamoff) {
+ err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout);
+ if (err) {
+ brcmf_err("bcn_timeout error (%d)\n", err);
+@@ -5021,8 +5119,9 @@
+ * Enable/Disable built-in roaming to allow supplicant
+ * to take care of roaming
+ */
+- brcmf_dbg(INFO, "Internal Roaming = %s\n", roamvar ? "Off" : "On");
+- err = brcmf_fil_iovar_int_set(ifp, "roam_off", roamvar);
++ brcmf_dbg(INFO, "Internal Roaming = %s\n",
++ brcmf_roamoff ? "Off" : "On");
++ err = brcmf_fil_iovar_int_set(ifp, "roam_off", !!(brcmf_roamoff));
+ if (err) {
+ brcmf_err("roam_off error (%d)\n", err);
+ goto dongle_rom_out;
+@@ -5148,6 +5247,9 @@
+ if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) &&
+ ch.bw == BRCMU_CHAN_BW_40)
+ continue;
++ if (!(bw_cap[band] & WLC_BW_80MHZ_BIT) &&
++ ch.bw == BRCMU_CHAN_BW_80)
++ continue;
+ update = false;
+ for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
+ if (band_chan_arr[j].hw_value == ch.chnum) {
+@@ -5164,13 +5266,13 @@
+ ieee80211_channel_to_frequency(ch.chnum, band);
+ band_chan_arr[index].hw_value = ch.chnum;
+
+- brcmf_err("channel %d: f=%d bw=%d sb=%d\n",
+- ch.chnum, band_chan_arr[index].center_freq,
+- ch.bw, ch.sb);
+- if (ch.bw == BRCMU_CHAN_BW_40) {
+- /* assuming the order is HT20, HT40 Upper,
+- * HT40 lower from chanspecs
+- */
++ /* assuming the chanspecs order is HT20,
++ * HT40 upper, HT40 lower, and VHT80.
++ */
++ if (ch.bw == BRCMU_CHAN_BW_80) {
++ band_chan_arr[index].flags &=
++ ~IEEE80211_CHAN_NO_80MHZ;
++ } else if (ch.bw == BRCMU_CHAN_BW_40) {
+ ht40_flag = band_chan_arr[index].flags &
+ IEEE80211_CHAN_NO_HT40;
+ if (ch.sb == BRCMU_CHAN_SB_U) {
+@@ -5191,8 +5293,13 @@
+ IEEE80211_CHAN_NO_HT40MINUS;
+ }
+ } else {
++ /* disable other bandwidths for now as mentioned
++ * order assure they are enabled for subsequent
++ * chanspecs.
++ */
+ band_chan_arr[index].flags =
+- IEEE80211_CHAN_NO_HT40;
++ IEEE80211_CHAN_NO_HT40 |
++ IEEE80211_CHAN_NO_80MHZ;
+ ch.bw = BRCMU_CHAN_BW_20;
+ cfg->d11inf.encchspec(&ch);
+ channel = ch.chspec;
+@@ -5259,14 +5366,66 @@
+ }
+ }
+
++static void brcmf_update_ht_cap(struct ieee80211_supported_band *band,
++ u32 bw_cap[2], u32 nchain)
++{
++ band->ht_cap.ht_supported = true;
++ if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
++ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
++ band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
++ }
++ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
++ band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
++ band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
++ band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
++ memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
++ band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
++}
++
++static __le16 brcmf_get_mcs_map(u32 nchain, enum ieee80211_vht_mcs_support supp)
++{
++ u16 mcs_map;
++ int i;
++
++ for (i = 0, mcs_map = 0xFFFF; i < nchain; i++)
++ mcs_map = (mcs_map << 2) | supp;
++
++ return cpu_to_le16(mcs_map);
++}
++
++static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
++ u32 bw_cap[2], u32 nchain)
++{
++ __le16 mcs_map;
++
++ /* not allowed in 2.4G band */
++ if (band->band == IEEE80211_BAND_2GHZ)
++ return;
++
++ band->vht_cap.vht_supported = true;
++ /* 80MHz is mandatory */
++ band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
++ if (bw_cap[band->band] & WLC_BW_160MHZ_BIT) {
++ band->vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
++ band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
++ }
++ /* all support 256-QAM */
++ mcs_map = brcmf_get_mcs_map(nchain, IEEE80211_VHT_MCS_SUPPORT_0_9);
++ band->vht_cap.vht_mcs.rx_mcs_map = mcs_map;
++ band->vht_cap.vht_mcs.tx_mcs_map = mcs_map;
++}
++
+ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
+ {
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct wiphy *wiphy;
+ s32 phy_list;
+ u32 band_list[3];
+- u32 nmode;
++ u32 nmode = 0;
++ u32 vhtmode = 0;
+ u32 bw_cap[2] = { 0, 0 };
++ u32 rxchain;
++ u32 nchain;
+ s8 phy;
+ s32 err;
+ u32 nband;
+@@ -5294,14 +5453,26 @@
+ brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n",
+ band_list[0], band_list[1], band_list[2]);
+
++ (void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode);
+ err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
+ if (err) {
+ brcmf_err("nmode error (%d)\n", err);
+ } else {
+ brcmf_get_bwcap(ifp, bw_cap);
+ }
+- brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode,
+- bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]);
++ brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
++ nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ],
++ bw_cap[IEEE80211_BAND_5GHZ]);
++
++ err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
++ if (err) {
++ brcmf_err("rxchain error (%d)\n", err);
++ nchain = 1;
++ } else {
++ for (nchain = 0; rxchain; nchain++)
++ rxchain = rxchain & (rxchain - 1);
++ }
++ brcmf_dbg(INFO, "nchain=%d\n", nchain);
+
+ err = brcmf_construct_reginfo(cfg, bw_cap);
+ if (err) {
+@@ -5322,20 +5493,10 @@
+ else
+ continue;
+
+- if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
+- band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+- band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+- }
+- band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+- band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+- band->ht_cap.ht_supported = true;
+- band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+- band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+- /* An HT shall support all EQM rates for one spatial
+- * stream
+- */
+- band->ht_cap.mcs.rx_mask[0] = 0xff;
+- band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
++ if (nmode)
++ brcmf_update_ht_cap(band, bw_cap, nchain);
++ if (vhtmode)
++ brcmf_update_vht_cap(band, bw_cap, nchain);
+ bands[band->band] = band;
+ }
+
+@@ -5381,7 +5542,7 @@
+ brcmf_dbg(INFO, "power save set to %s\n",
+ (power_mode ? "enabled" : "disabled"));
+
+- err = brcmf_dongle_roam(ifp, (cfg->roam_on ? 0 : 1), WL_BEACON_TIMEOUT);
++ err = brcmf_dongle_roam(ifp, WL_BEACON_TIMEOUT);
+ if (err)
+ goto default_conf_out;
+ err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h 2015-05-01 14:58:04.055427001 -0500
+@@ -89,21 +89,6 @@
+ BRCMF_SCAN_STATUS_SUPPRESS,
+ };
+
+-/**
+- * enum wl_mode - driver mode of virtual interface.
+- *
+- * @WL_MODE_BSS: connects to BSS.
+- * @WL_MODE_IBSS: operate as ad-hoc.
+- * @WL_MODE_AP: operate as access-point.
+- * @WL_MODE_P2P: provide P2P discovery.
+- */
+-enum wl_mode {
+- WL_MODE_BSS,
+- WL_MODE_IBSS,
+- WL_MODE_AP,
+- WL_MODE_P2P
+-};
+-
+ /* dongle configuration */
+ struct brcmf_cfg80211_conf {
+ u32 frag_threshold;
+@@ -193,7 +178,6 @@
+ * @ifp: lower layer interface pointer
+ * @wdev: wireless device.
+ * @profile: profile information.
+- * @mode: operating mode.
+ * @roam_off: roaming state.
+ * @sme_state: SME state using enum brcmf_vif_status bits.
+ * @pm_block: power-management blocked.
+@@ -204,7 +188,6 @@
+ struct brcmf_if *ifp;
+ struct wireless_dev wdev;
+ struct brcmf_cfg80211_profile profile;
+- s32 mode;
+ s32 roam_off;
+ unsigned long sme_state;
+ bool pm_block;
+@@ -402,7 +385,6 @@
+ bool ibss_starter;
+ bool pwr_save;
+ bool dongle_up;
+- bool roam_on;
+ bool scan_tried;
+ u8 *dcmd_buf;
+ u8 *extra_buf;
+@@ -491,7 +473,8 @@
+ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
+ const u8 *vndr_ie_buf, u32 vndr_ie_len);
+ s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
+-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
++const struct brcmf_tlv *
++brcmf_parse_tlvs(const void *buf, int buflen, uint key);
+ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
+ struct ieee80211_channel *ch);
+ u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c 2015-05-01 14:58:04.055427001 -0500
+@@ -897,7 +897,8 @@
+ return result;
+ }
+
+-static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void brcms_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct brcms_info *wl = hw->priv;
+ int ret;
+@@ -1092,12 +1093,6 @@
+ * Attach to the WL device identified by vendor and device parameters.
+ * regs is a host accessible memory address pointing to WL device registers.
+ *
+- * brcms_attach is not defined as static because in the case where no bus
+- * is defined, wl_attach will never be called, and thus, gcc will issue
+- * a warning that this function is defined but not used if we declare
+- * it as static.
+- *
+- *
+ * is called in brcms_bcma_probe() context, therefore no locking required.
+ */
+ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmsmac/main.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmsmac/main.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmsmac/main.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmsmac/main.c 2015-05-01 14:58:04.079427001 -0500
+@@ -4870,14 +4870,11 @@
+ /*
+ * low level detach
+ */
+-static int brcms_b_detach(struct brcms_c_info *wlc)
++static void brcms_b_detach(struct brcms_c_info *wlc)
+ {
+ uint i;
+ struct brcms_hw_band *band;
+ struct brcms_hardware *wlc_hw = wlc->hw;
+- int callbacks;
+-
+- callbacks = 0;
+
+ brcms_b_detach_dmapio(wlc_hw);
+
+@@ -4900,9 +4897,6 @@
+ ai_detach(wlc_hw->sih);
+ wlc_hw->sih = NULL;
+ }
+-
+- return callbacks;
+-
+ }
+
+ /*
+@@ -4917,14 +4911,15 @@
+ */
+ uint brcms_c_detach(struct brcms_c_info *wlc)
+ {
+- uint callbacks = 0;
++ uint callbacks;
+
+ if (wlc == NULL)
+ return 0;
+
+- callbacks += brcms_b_detach(wlc);
++ brcms_b_detach(wlc);
+
+ /* delete software timers */
++ callbacks = 0;
+ if (!brcms_c_radio_monitor_stop(wlc))
+ callbacks++;
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmutil/d11.c linux-3.14.40/drivers/net/wireless/brcm80211/brcmutil/d11.c
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/brcmutil/d11.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/brcmutil/d11.c 2015-05-01 14:58:04.083427001 -0500
+@@ -21,19 +21,46 @@
+ #include <brcmu_wifi.h>
+ #include <brcmu_d11.h>
+
+-static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
++static u16 d11n_sb(enum brcmu_chan_sb sb)
+ {
+- ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK;
++ switch (sb) {
++ case BRCMU_CHAN_SB_NONE:
++ return BRCMU_CHSPEC_D11N_SB_N;
++ case BRCMU_CHAN_SB_L:
++ return BRCMU_CHSPEC_D11N_SB_L;
++ case BRCMU_CHAN_SB_U:
++ return BRCMU_CHSPEC_D11N_SB_U;
++ default:
++ WARN_ON(1);
++ }
++ return 0;
++}
+
+- switch (ch->bw) {
++static u16 d11n_bw(enum brcmu_chan_bw bw)
++{
++ switch (bw) {
+ case BRCMU_CHAN_BW_20:
+- ch->chspec |= BRCMU_CHSPEC_D11N_BW_20 | BRCMU_CHSPEC_D11N_SB_N;
+- break;
++ return BRCMU_CHSPEC_D11N_BW_20;
+ case BRCMU_CHAN_BW_40:
++ return BRCMU_CHSPEC_D11N_BW_40;
+ default:
+- WARN_ON_ONCE(1);
+- break;
++ WARN_ON(1);
+ }
++ return 0;
++}
++
++static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
++{
++ if (ch->bw == BRCMU_CHAN_BW_20)
++ ch->sb = BRCMU_CHAN_SB_NONE;
++
++ ch->chspec = 0;
++ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
++ BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
++ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_SB_MASK,
++ 0, d11n_sb(ch->sb));
++ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_BW_MASK,
++ 0, d11n_bw(ch->bw));
+
+ if (ch->chnum <= CH_MAX_2G_CHANNEL)
+ ch->chspec |= BRCMU_CHSPEC_D11N_BND_2G;
+@@ -41,23 +68,34 @@
+ ch->chspec |= BRCMU_CHSPEC_D11N_BND_5G;
+ }
+
+-static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
++static u16 d11ac_bw(enum brcmu_chan_bw bw)
+ {
+- ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK;
+-
+- switch (ch->bw) {
++ switch (bw) {
+ case BRCMU_CHAN_BW_20:
+- ch->chspec |= BRCMU_CHSPEC_D11AC_BW_20;
+- break;
++ return BRCMU_CHSPEC_D11AC_BW_20;
+ case BRCMU_CHAN_BW_40:
++ return BRCMU_CHSPEC_D11AC_BW_40;
+ case BRCMU_CHAN_BW_80:
+- case BRCMU_CHAN_BW_80P80:
+- case BRCMU_CHAN_BW_160:
++ return BRCMU_CHSPEC_D11AC_BW_80;
+ default:
+- WARN_ON_ONCE(1);
+- break;
++ WARN_ON(1);
+ }
++ return 0;
++}
+
++static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
++{
++ if (ch->bw == BRCMU_CHAN_BW_20 || ch->sb == BRCMU_CHAN_SB_NONE)
++ ch->sb = BRCMU_CHAN_SB_L;
++
++ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
++ BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
++ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
++ BRCMU_CHSPEC_D11AC_SB_SHIFT, ch->sb);
++ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_BW_MASK,
++ 0, d11ac_bw(ch->bw));
++
++ ch->chspec &= ~BRCMU_CHSPEC_D11AC_BND_MASK;
+ if (ch->chnum <= CH_MAX_2G_CHANNEL)
+ ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G;
+ else
+@@ -73,6 +111,7 @@
+ switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) {
+ case BRCMU_CHSPEC_D11N_BW_20:
+ ch->bw = BRCMU_CHAN_BW_20;
++ ch->sb = BRCMU_CHAN_SB_NONE;
+ break;
+ case BRCMU_CHSPEC_D11N_BW_40:
+ ch->bw = BRCMU_CHAN_BW_40;
+@@ -112,6 +151,7 @@
+ switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) {
+ case BRCMU_CHSPEC_D11AC_BW_20:
+ ch->bw = BRCMU_CHAN_BW_20;
++ ch->sb = BRCMU_CHAN_SB_NONE;
+ break;
+ case BRCMU_CHSPEC_D11AC_BW_40:
+ ch->bw = BRCMU_CHAN_BW_40;
+@@ -128,6 +168,25 @@
+ break;
+ case BRCMU_CHSPEC_D11AC_BW_80:
+ ch->bw = BRCMU_CHAN_BW_80;
++ ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
++ BRCMU_CHSPEC_D11AC_SB_SHIFT);
++ switch (ch->sb) {
++ case BRCMU_CHAN_SB_LL:
++ ch->chnum -= CH_30MHZ_APART;
++ break;
++ case BRCMU_CHAN_SB_LU:
++ ch->chnum -= CH_10MHZ_APART;
++ break;
++ case BRCMU_CHAN_SB_UL:
++ ch->chnum += CH_10MHZ_APART;
++ break;
++ case BRCMU_CHAN_SB_UU:
++ ch->chnum += CH_30MHZ_APART;
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ break;
++ }
+ break;
+ case BRCMU_CHSPEC_D11AC_BW_8080:
+ case BRCMU_CHSPEC_D11AC_BW_160:
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h linux-3.14.40/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h 2015-05-01 14:58:04.083427001 -0500
+@@ -43,5 +43,6 @@
+ #define BCM4335_CHIP_ID 0x4335
+ #define BCM43362_CHIP_ID 43362
+ #define BCM4339_CHIP_ID 0x4339
++#define BCM4354_CHIP_ID 0x4354
+
+ #endif /* _BRCM_HW_IDS_H_ */
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/include/brcmu_d11.h linux-3.14.40/drivers/net/wireless/brcm80211/include/brcmu_d11.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/include/brcmu_d11.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/include/brcmu_d11.h 2015-05-01 14:58:04.087427001 -0500
+@@ -108,13 +108,7 @@
+ };
+
+ enum brcmu_chan_sb {
+- BRCMU_CHAN_SB_NONE = 0,
+- BRCMU_CHAN_SB_L,
+- BRCMU_CHAN_SB_U,
+- BRCMU_CHAN_SB_LL,
+- BRCMU_CHAN_SB_LU,
+- BRCMU_CHAN_SB_UL,
+- BRCMU_CHAN_SB_UU,
++ BRCMU_CHAN_SB_NONE = -1,
+ BRCMU_CHAN_SB_LLL,
+ BRCMU_CHAN_SB_LLU,
+ BRCMU_CHAN_SB_LUL,
+@@ -123,6 +117,12 @@
+ BRCMU_CHAN_SB_ULU,
+ BRCMU_CHAN_SB_UUL,
+ BRCMU_CHAN_SB_UUU,
++ BRCMU_CHAN_SB_L = BRCMU_CHAN_SB_LLL,
++ BRCMU_CHAN_SB_U = BRCMU_CHAN_SB_LLU,
++ BRCMU_CHAN_SB_LL = BRCMU_CHAN_SB_LLL,
++ BRCMU_CHAN_SB_LU = BRCMU_CHAN_SB_LLU,
++ BRCMU_CHAN_SB_UL = BRCMU_CHAN_SB_LUL,
++ BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU,
+ };
+
+ struct brcmu_chan {
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/brcm80211/include/brcmu_wifi.h linux-3.14.40/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+--- linux-3.14.40.orig/drivers/net/wireless/brcm80211/include/brcmu_wifi.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/brcm80211/include/brcmu_wifi.h 2015-05-01 14:58:04.087427001 -0500
+@@ -29,6 +29,7 @@
+ #define CH_UPPER_SB 0x01
+ #define CH_LOWER_SB 0x02
+ #define CH_EWA_VALID 0x04
++#define CH_30MHZ_APART 6
+ #define CH_20MHZ_APART 4
+ #define CH_10MHZ_APART 2
+ #define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
+@@ -217,6 +218,9 @@
+ #define WSEC_SWFLAG 0x0008
+ /* to go into transition mode without setting wep */
+ #define SES_OW_ENABLED 0x0040
++/* MFP */
++#define MFP_CAPABLE 0x0200
++#define MFP_REQUIRED 0x0400
+
+ /* WPA authentication mode bitvec */
+ #define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/cw1200/sta.c linux-3.14.40/drivers/net/wireless/cw1200/sta.c
+--- linux-3.14.40.orig/drivers/net/wireless/cw1200/sta.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/cw1200/sta.c 2015-05-01 14:58:04.091427001 -0500
+@@ -936,7 +936,8 @@
+ return ret;
+ }
+
+-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct cw1200_common *priv = hw->priv;
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/cw1200/sta.h linux-3.14.40/drivers/net/wireless/cw1200/sta.h
+--- linux-3.14.40.orig/drivers/net/wireless/cw1200/sta.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/cw1200/sta.h 2015-05-01 14:58:04.099427001 -0500
+@@ -40,7 +40,8 @@
+
+ int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+
+-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
++void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop);
+
+ u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list);
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/iwlegacy/common.c linux-3.14.40/drivers/net/wireless/iwlegacy/common.c
+--- linux-3.14.40.orig/drivers/net/wireless/iwlegacy/common.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/iwlegacy/common.c 2015-05-01 14:58:04.131427001 -0500
+@@ -4701,7 +4701,8 @@
+ }
+ EXPORT_SYMBOL(il_mac_change_interface);
+
+-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct il_priv *il = hw->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/iwlegacy/common.h linux-3.14.40/drivers/net/wireless/iwlegacy/common.h
+--- linux-3.14.40.orig/drivers/net/wireless/iwlegacy/common.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/iwlegacy/common.h 2015-05-01 14:58:04.135427001 -0500
+@@ -1722,7 +1722,8 @@
+ struct ieee80211_vif *vif);
+ int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p);
+-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
++void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop);
+ int il_alloc_txq_mem(struct il_priv *il);
+ void il_free_txq_mem(struct il_priv *il);
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/iwlwifi/dvm/mac80211.c linux-3.14.40/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+--- linux-3.14.40.orig/drivers/net/wireless/iwlwifi/dvm/mac80211.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/iwlwifi/dvm/mac80211.c 2015-05-01 14:58:04.139427001 -0500
+@@ -1091,7 +1091,8 @@
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+ }
+
+-static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/libertas/cfg.c linux-3.14.40/drivers/net/wireless/libertas/cfg.c
+--- linux-3.14.40.orig/drivers/net/wireless/libertas/cfg.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/libertas/cfg.c 2015-05-01 14:58:04.163427001 -0500
+@@ -1766,7 +1766,8 @@
+ memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
+ priv->wdev->ssid_len = params->ssid_len;
+
+- cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL);
++ cfg80211_ibss_joined(priv->dev, bssid, params->chandef.chan,
++ GFP_KERNEL);
+
+ /* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */
+ priv->connect_status = LBS_CONNECTED;
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/mac80211_hwsim.c linux-3.14.40/drivers/net/wireless/mac80211_hwsim.c
+--- linux-3.14.40.orig/drivers/net/wireless/mac80211_hwsim.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/mac80211_hwsim.c 2015-05-01 14:58:04.163427001 -0500
+@@ -1671,7 +1671,9 @@
+ return 0;
+ }
+
+-static void mac80211_hwsim_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void mac80211_hwsim_flush(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ /* Not implemented, queues only on kernel side */
+ }
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/mwifiex/cfg80211.c linux-3.14.40/drivers/net/wireless/mwifiex/cfg80211.c
+--- linux-3.14.40.orig/drivers/net/wireless/mwifiex/cfg80211.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/mwifiex/cfg80211.c 2015-05-01 14:58:04.167427001 -0500
+@@ -1881,7 +1881,8 @@
+ params->privacy);
+ done:
+ if (!ret) {
+- cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
++ cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
++ params->chandef.chan, GFP_KERNEL);
+ dev_dbg(priv->adapter->dev,
+ "info: joined/created adhoc network with bssid"
+ " %pM successfully\n", priv->cfg_bssid);
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/mwifiex/main.h linux-3.14.40/drivers/net/wireless/mwifiex/main.h
+--- linux-3.14.40.orig/drivers/net/wireless/mwifiex/main.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/mwifiex/main.h 2015-05-01 14:58:04.183427001 -0500
+@@ -1078,7 +1078,7 @@
+ const u8 *key, int key_len, u8 key_index,
+ const u8 *mac_addr, int disable);
+
+-int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
++int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len);
+
+ int mwifiex_get_ver_ext(struct mwifiex_private *priv);
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/mwifiex/sta_ioctl.c linux-3.14.40/drivers/net/wireless/mwifiex/sta_ioctl.c
+--- linux-3.14.40.orig/drivers/net/wireless/mwifiex/sta_ioctl.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/mwifiex/sta_ioctl.c 2015-05-01 14:58:04.183427001 -0500
+@@ -1391,7 +1391,7 @@
+ * with requisite parameters and calls the IOCTL handler.
+ */
+ int
+-mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len)
++mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len)
+ {
+ struct mwifiex_ds_misc_gen_ie gen_ie;
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/p54/main.c linux-3.14.40/drivers/net/wireless/p54/main.c
+--- linux-3.14.40.orig/drivers/net/wireless/p54/main.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/p54/main.c 2015-05-01 14:58:04.191427001 -0500
+@@ -669,7 +669,8 @@
+ return total;
+ }
+
+-static void p54_flush(struct ieee80211_hw *dev, u32 queues, bool drop)
++static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct p54_common *priv = dev->priv;
+ unsigned int total, i;
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/rndis_wlan.c linux-3.14.40/drivers/net/wireless/rndis_wlan.c
+--- linux-3.14.40.orig/drivers/net/wireless/rndis_wlan.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/rndis_wlan.c 2015-05-01 14:58:04.199427001 -0500
+@@ -2835,7 +2835,9 @@
+ bssid, req_ie, req_ie_len,
+ resp_ie, resp_ie_len, GFP_KERNEL);
+ } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
+- cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
++ cfg80211_ibss_joined(usbdev->net, bssid,
++ get_current_channel(usbdev, NULL),
++ GFP_KERNEL);
+
+ kfree(info);
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/rt2x00/rt2x00.h linux-3.14.40/drivers/net/wireless/rt2x00/rt2x00.h
+--- linux-3.14.40.orig/drivers/net/wireless/rt2x00/rt2x00.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/rt2x00/rt2x00.h 2015-05-01 14:58:04.207427001 -0500
+@@ -1449,7 +1449,8 @@
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
+ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
+-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
++void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop);
+ int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
+ int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
+ void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/rt2x00/rt2x00mac.c linux-3.14.40/drivers/net/wireless/rt2x00/rt2x00mac.c
+--- linux-3.14.40.orig/drivers/net/wireless/rt2x00/rt2x00mac.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/rt2x00/rt2x00mac.c 2015-05-01 14:58:04.215427001 -0500
+@@ -751,7 +751,8 @@
+ }
+ EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
+
+-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ struct data_queue *queue;
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/rtl818x/rtl8187/dev.c linux-3.14.40/drivers/net/wireless/rtl818x/rtl8187/dev.c
+--- linux-3.14.40.orig/drivers/net/wireless/rtl818x/rtl8187/dev.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/rtl818x/rtl8187/dev.c 2015-05-01 14:58:04.227427001 -0500
+@@ -1636,10 +1636,10 @@
+
+ err_free_dmabuf:
+ kfree(priv->io_dmabuf);
+- err_free_dev:
+- ieee80211_free_hw(dev);
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
++ err_free_dev:
++ ieee80211_free_hw(dev);
+ return err;
+ }
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/rtlwifi/core.c linux-3.14.40/drivers/net/wireless/rtlwifi/core.c
+--- linux-3.14.40.orig/drivers/net/wireless/rtlwifi/core.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/rtlwifi/core.c 2015-05-01 14:58:04.235427001 -0500
+@@ -1309,7 +1309,8 @@
+ * before switch channel or power save, or tx buffer packet
+ * maybe send after offchannel or rf sleep, this may cause
+ * dis-association by AP */
+-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+diff -Nur linux-3.14.40.orig/drivers/net/wireless/ti/wlcore/main.c linux-3.14.40/drivers/net/wireless/ti/wlcore/main.c
+--- linux-3.14.40.orig/drivers/net/wireless/ti/wlcore/main.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/net/wireless/ti/wlcore/main.c 2015-05-01 14:58:04.255427001 -0500
+@@ -5156,7 +5156,8 @@
+ mutex_unlock(&wl->mutex);
+ }
+
+-static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct wl1271 *wl = hw->priv;
+
+diff -Nur linux-3.14.40.orig/drivers/pci/host/Kconfig linux-3.14.40/drivers/pci/host/Kconfig
+--- linux-3.14.40.orig/drivers/pci/host/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/pci/host/Kconfig 2015-05-01 14:58:04.271427001 -0500
+@@ -21,6 +21,23 @@
+ select PCIEPORTBUS
+ select PCIE_DW
+
++config EP_MODE_IN_EP_RC_SYS
++ bool "PCI Express EP mode in the IMX6 RC/EP interconnection system"
++ depends on PCI_IMX6
++
++config EP_SELF_IO_TEST
++ bool "PCI Express EP_SELF_IO_TEST in EP mode"
++ depends on EP_MODE_IN_EP_RC_SYS
++
++config RC_MODE_IN_EP_RC_SYS
++ bool "PCI Express RC mode in the IMX6 RC/EP interconnection system"
++ depends on PCI_IMX6
++
++config PCI_IMX_EP_DRV
++ bool "i.MX6 PCI Express EP skeleton driver"
++ depends on RC_MODE_IN_EP_RC_SYS
++ default y
++
+ config PCI_TEGRA
+ bool "NVIDIA Tegra PCIe controller"
+ depends on ARCH_TEGRA
+diff -Nur linux-3.14.40.orig/drivers/pci/host/Makefile linux-3.14.40/drivers/pci/host/Makefile
+--- linux-3.14.40.orig/drivers/pci/host/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/pci/host/Makefile 2015-05-01 14:58:04.283427001 -0500
+@@ -1,6 +1,7 @@
+ obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+ obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
++obj-$(CONFIG_PCI_IMX_EP_DRV) += pci-imx6-ep-driver.o
+ obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
+ obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
+ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
+diff -Nur linux-3.14.40.orig/drivers/pci/host/pcie-designware.c linux-3.14.40/drivers/pci/host/pcie-designware.c
+--- linux-3.14.40.orig/drivers/pci/host/pcie-designware.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/pci/host/pcie-designware.c 2015-05-01 14:58:04.283427001 -0500
+@@ -23,48 +23,6 @@
+
+ #include "pcie-designware.h"
+
+-/* Synopsis specific PCIE configuration registers */
+-#define PCIE_PORT_LINK_CONTROL 0x710
+-#define PORT_LINK_MODE_MASK (0x3f << 16)
+-#define PORT_LINK_MODE_1_LANES (0x1 << 16)
+-#define PORT_LINK_MODE_2_LANES (0x3 << 16)
+-#define PORT_LINK_MODE_4_LANES (0x7 << 16)
+-
+-#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+-#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
+-#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
+-#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
+-#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
+-#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
+-
+-#define PCIE_MSI_ADDR_LO 0x820
+-#define PCIE_MSI_ADDR_HI 0x824
+-#define PCIE_MSI_INTR0_ENABLE 0x828
+-#define PCIE_MSI_INTR0_MASK 0x82C
+-#define PCIE_MSI_INTR0_STATUS 0x830
+-
+-#define PCIE_ATU_VIEWPORT 0x900
+-#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
+-#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
+-#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
+-#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
+-#define PCIE_ATU_CR1 0x904
+-#define PCIE_ATU_TYPE_MEM (0x0 << 0)
+-#define PCIE_ATU_TYPE_IO (0x2 << 0)
+-#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
+-#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
+-#define PCIE_ATU_CR2 0x908
+-#define PCIE_ATU_ENABLE (0x1 << 31)
+-#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
+-#define PCIE_ATU_LOWER_BASE 0x90C
+-#define PCIE_ATU_UPPER_BASE 0x910
+-#define PCIE_ATU_LIMIT 0x914
+-#define PCIE_ATU_LOWER_TARGET 0x918
+-#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
+-#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
+-#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
+-#define PCIE_ATU_UPPER_TARGET 0x91C
+-
+ static struct hw_pci dw_pci;
+
+ static unsigned long global_io_offset;
+@@ -332,23 +290,28 @@
+ return -EINVAL;
+ }
+
+- pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
+- &msg_ctr);
+- msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
+- if (msgvec == 0)
+- msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
+- if (msgvec > 5)
+- msgvec = 0;
+-
+- irq = assign_irq((1 << msgvec), desc, &pos);
+- if (irq < 0)
+- return irq;
+-
+- /*
+- * write_msi_msg() will update PCI_MSI_FLAGS so there is
+- * no need to explicitly call pci_write_config_word().
+- */
+- desc->msi_attrib.multiple = msgvec;
++ if (pp->quirks & DW_PCIE_QUIRK_NO_MSI_VEC) {
++ irq = assign_irq(1, desc, &pos);
++ set_irq_flags(irq, IRQF_VALID);
++ } else {
++ pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
++ &msg_ctr);
++ msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
++ if (msgvec == 0)
++ msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
++ if (msgvec > 5)
++ msgvec = 0;
++
++ irq = assign_irq((1 << msgvec), desc, &pos);
++ if (irq < 0)
++ return irq;
++
++ msg_ctr &= ~PCI_MSI_FLAGS_QSIZE;
++ msg_ctr |= msgvec << 4;
++ pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
++ msg_ctr);
++ desc->msi_attrib.multiple = msgvec;
++ }
+
+ msg.address_lo = virt_to_phys((void *)pp->msi_data);
+ msg.address_hi = 0x0;
+@@ -363,9 +326,30 @@
+ clear_irq(irq);
+ }
+
++static int dw_msi_check_device(struct msi_chip *chip, struct pci_dev *pdev,
++ int nvec, int type)
++{
++ struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
++ u32 val;
++
++ if (pp->quirks & DW_PCIE_QUIRK_MSI_SELF_EN) {
++ if ((type == PCI_CAP_ID_MSI) || (type == PCI_CAP_ID_MSIX)) {
++ /* Set MSI enable of RC here */
++ val = readl(pp->dbi_base + 0x50);
++ if ((val & (PCI_MSI_FLAGS_ENABLE << 16)) == 0) {
++ val |= PCI_MSI_FLAGS_ENABLE << 16;
++ writel(val, pp->dbi_base + 0x50);
++ }
++ }
++ }
++
++ return 0;
++}
++
+ static struct msi_chip dw_pcie_msi_chip = {
+ .setup_irq = dw_msi_setup_irq,
+ .teardown_irq = dw_msi_teardown_irq,
++ .check_device = dw_msi_check_device,
+ };
+
+ int dw_pcie_link_up(struct pcie_port *pp)
+@@ -531,38 +515,6 @@
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ }
+
+-static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
+-{
+- /* Program viewport 0 : OUTBOUND : MEM */
+- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
+- PCIE_ATU_VIEWPORT);
+- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
+- dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE);
+- dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE);
+- dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
+- PCIE_ATU_LIMIT);
+- dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
+- dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
+- PCIE_ATU_UPPER_TARGET);
+- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+-}
+-
+-static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
+-{
+- /* Program viewport 1 : OUTBOUND : IO */
+- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
+- PCIE_ATU_VIEWPORT);
+- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
+- dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE);
+- dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE);
+- dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
+- PCIE_ATU_LIMIT);
+- dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
+- dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
+- PCIE_ATU_UPPER_TARGET);
+- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+-}
+-
+ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ u32 devfn, int where, int size, u32 *val)
+ {
+@@ -577,12 +529,10 @@
+ dw_pcie_prog_viewport_cfg0(pp, busdev);
+ ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size,
+ val);
+- dw_pcie_prog_viewport_mem_outbound(pp);
+ } else {
+ dw_pcie_prog_viewport_cfg1(pp, busdev);
+ ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size,
+ val);
+- dw_pcie_prog_viewport_io_outbound(pp);
+ }
+
+ return ret;
+@@ -602,12 +552,10 @@
+ dw_pcie_prog_viewport_cfg0(pp, busdev);
+ ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size,
+ val);
+- dw_pcie_prog_viewport_mem_outbound(pp);
+ } else {
+ dw_pcie_prog_viewport_cfg1(pp, busdev);
+ ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size,
+ val);
+- dw_pcie_prog_viewport_io_outbound(pp);
+ }
+
+ return ret;
+@@ -739,7 +687,13 @@
+ {
+ struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
+
+- return pp->irq;
++ switch (pin) {
++ case 1: return pp->irq;
++ case 2: return pp->irq - 1;
++ case 3: return pp->irq - 2;
++ case 4: return pp->irq - 3;
++ default: return -1;
++ }
+ }
+
+ static void dw_pcie_add_bus(struct pci_bus *bus)
+diff -Nur linux-3.14.40.orig/drivers/pci/host/pcie-designware.h linux-3.14.40/drivers/pci/host/pcie-designware.h
+--- linux-3.14.40.orig/drivers/pci/host/pcie-designware.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/pci/host/pcie-designware.h 2015-05-01 14:58:04.283427001 -0500
+@@ -14,6 +14,48 @@
+ #ifndef _PCIE_DESIGNWARE_H
+ #define _PCIE_DESIGNWARE_H
+
++/* Synopsis specific PCIE configuration registers */
++#define PCIE_PORT_LINK_CONTROL 0x710
++#define PORT_LINK_MODE_MASK (0x3f << 16)
++#define PORT_LINK_MODE_1_LANES (0x1 << 16)
++#define PORT_LINK_MODE_2_LANES (0x3 << 16)
++#define PORT_LINK_MODE_4_LANES (0x7 << 16)
++
++#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
++#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
++#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
++#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
++#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
++#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
++
++#define PCIE_MSI_ADDR_LO 0x820
++#define PCIE_MSI_ADDR_HI 0x824
++#define PCIE_MSI_INTR0_ENABLE 0x828
++#define PCIE_MSI_INTR0_MASK 0x82C
++#define PCIE_MSI_INTR0_STATUS 0x830
++
++#define PCIE_ATU_VIEWPORT 0x900
++#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
++#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
++#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
++#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
++#define PCIE_ATU_CR1 0x904
++#define PCIE_ATU_TYPE_MEM (0x0 << 0)
++#define PCIE_ATU_TYPE_IO (0x2 << 0)
++#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
++#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
++#define PCIE_ATU_CR2 0x908
++#define PCIE_ATU_ENABLE (0x1 << 31)
++#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
++#define PCIE_ATU_LOWER_BASE 0x90C
++#define PCIE_ATU_UPPER_BASE 0x910
++#define PCIE_ATU_LIMIT 0x914
++#define PCIE_ATU_LOWER_TARGET 0x918
++#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
++#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
++#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
++#define PCIE_ATU_UPPER_TARGET 0x91C
++
+ struct pcie_port_info {
+ u32 cfg0_size;
+ u32 cfg1_size;
+@@ -49,6 +91,11 @@
+ int irq;
+ u32 lanes;
+ struct pcie_host_ops *ops;
++ u32 quirks; /* Deviations from spec. */
++/* Controller doesn't support MSI VEC */
++#define DW_PCIE_QUIRK_NO_MSI_VEC (1<<0)
++/* MSI EN of Controller should be configured when MSI is enabled */
++#define DW_PCIE_QUIRK_MSI_SELF_EN (1<<1)
+ int msi_irq;
+ struct irq_domain *irq_domain;
+ unsigned long msi_data;
+diff -Nur linux-3.14.40.orig/drivers/pci/host/pci-imx6.c linux-3.14.40/drivers/pci/host/pci-imx6.c
+--- linux-3.14.40.orig/drivers/pci/host/pci-imx6.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/pci/host/pci-imx6.c 2015-05-01 14:58:04.283427001 -0500
+@@ -1,6 +1,7 @@
+ /*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
++ * Copyright (C) 2014 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2013 Kosagi
+ * http://www.kosagi.com
+ *
+@@ -14,6 +15,7 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/gpio.h>
++#include <linux/interrupt.h>
+ #include <linux/kernel.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+@@ -25,11 +27,22 @@
+ #include <linux/resource.h>
+ #include <linux/signal.h>
+ #include <linux/types.h>
++#include <linux/busfreq-imx6.h>
+
++#include "../pci.h"
+ #include "pcie-designware.h"
+
+ #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
+
++/*
++ * The default value of the reserved ddr memory
++ * used to verify EP/RC memory space access operations.
++ * BTW, here is the layout of the 1G ddr on SD boards
++ * 0x1000_0000 ~ 0x4FFF_FFFF
++ */
++static u32 ddr_test_region = 0x40000000;
++static u32 test_region_size = SZ_2M;
++
+ struct imx6_pcie {
+ int reset_gpio;
+ int power_on_gpio;
+@@ -52,6 +65,9 @@
+
+ /* PCIe Port Logic registers (memory-mapped) */
+ #define PL_OFFSET 0x700
++#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
++#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
++#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
+ #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
+ #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+ #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
+@@ -216,14 +232,14 @@
+
+ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
+ {
+- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
++ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
++ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
++ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+
+- return 0;
++ return 0;
+ }
+
+ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+@@ -234,10 +250,7 @@
+ if (gpio_is_valid(imx6_pcie->power_on_gpio))
+ gpio_set_value(imx6_pcie->power_on_gpio, 1);
+
+- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+- IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+- IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
++ request_bus_freq(BUS_FREQ_HIGH);
+
+ ret = clk_prepare_enable(imx6_pcie->sata_ref_100m);
+ if (ret) {
+@@ -251,10 +264,13 @@
+ goto err_pcie_ref;
+ }
+
+- ret = clk_prepare_enable(imx6_pcie->lvds_gate);
+- if (ret) {
+- dev_err(pp->dev, "unable to enable lvds_gate\n");
+- goto err_lvds_gate;
++ if (!IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)
++ && !IS_ENABLED(CONFIG_RC_MODE_IN_EP_RC_SYS)) {
++ ret = clk_prepare_enable(imx6_pcie->lvds_gate);
++ if (ret) {
++ dev_err(pp->dev, "unable to enable lvds_gate\n");
++ goto err_lvds_gate;
++ }
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_axi);
+@@ -266,6 +282,12 @@
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+
++ /* power up core phy and enable ref clock */
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
++ IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
++ IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
++
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ gpio_set_value(imx6_pcie->reset_gpio, 0);
+@@ -281,6 +303,7 @@
+ err_pcie_ref:
+ clk_disable_unprepare(imx6_pcie->sata_ref_100m);
+ err_sata_ref:
++ release_bus_freq(BUS_FREQ_HIGH);
+ return ret;
+
+ }
+@@ -288,13 +311,44 @@
+ static void imx6_pcie_init_phy(struct pcie_port *pp)
+ {
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
++ u32 val, gpr1, gpr12;
++
++ /*
++ * If the bootloader already enabled the link we need some special
++ * handling to get the core back into a state where it is safe to
++ * touch it for configuration. As there is no dedicated reset signal
++ * wired up for MX6QDL, we need to manually force LTSSM into "detect"
++ * state before completely disabling LTSSM, which is a prerequisite
++ * for core configuration.
++ * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
++ * indication that the bootloader activated the link.
++ */
++ regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
++ regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
++
++ if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
++ (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
++ val = readl(pp->dbi_base + PCIE_PL_PFLR);
++ val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
++ val |= PCIE_PL_PFLR_FORCE_LINK;
++ writel(val, pp->dbi_base + PCIE_PL_PFLR);
++
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
++ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
++ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+ /* configure constant input signal to the pcie ctrl and phy */
+- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+- IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
++ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS))
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
++ IMX6Q_GPR12_DEVICE_TYPE,
++ PCI_EXP_TYPE_ENDPOINT << 12);
++ else
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
++ IMX6Q_GPR12_DEVICE_TYPE,
++ PCI_EXP_TYPE_ROOT_PORT << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+@@ -326,6 +380,12 @@
+ return -EINVAL;
+ }
+
++ if (IS_ENABLED(CONFIG_PCI_MSI)) {
++ pp->quirks |= DW_PCIE_QUIRK_NO_MSI_VEC;
++ pp->quirks |= DW_PCIE_QUIRK_MSI_SELF_EN;
++ dw_pcie_msi_init(pp);
++ }
++
+ return 0;
+ }
+
+@@ -392,6 +452,15 @@
+ return ret;
+ }
+
++static irqreturn_t imx_pcie_msi_irq_handler(int irq, void *arg)
++{
++ struct pcie_port *pp = arg;
++
++ dw_handle_msi_irq(pp);
++
++ return IRQ_HANDLED;
++}
++
+ static void imx6_pcie_host_init(struct pcie_port *pp)
+ {
+ imx6_pcie_assert_core_reset(pp);
+@@ -498,6 +567,22 @@
+ return -ENODEV;
+ }
+
++ if (IS_ENABLED(CONFIG_PCI_MSI)) {
++ pp->msi_irq = pp->irq - 3;
++ if (!pp->msi_irq) {
++ dev_err(&pdev->dev, "failed to get msi irq\n");
++ return -ENODEV;
++ }
++
++ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
++ imx_pcie_msi_irq_handler,
++ IRQF_SHARED, "imx6q-pcie", pp);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to request msi irq\n");
++ return ret;
++ }
++ }
++
+ pp->root_bus_nr = -1;
+ pp->ops = &imx6_pcie_host_ops;
+
+@@ -511,29 +596,188 @@
+ return 0;
+ }
+
++static ssize_t imx_pcie_bar0_addr_info(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
++ struct pcie_port *pp = &imx6_pcie->pp;
++
++ return sprintf(buf, "imx-pcie-bar0-addr-info start 0x%08x\n",
++ readl(pp->dbi_base + PCI_BASE_ADDRESS_0));
++}
++
++static ssize_t imx_pcie_bar0_addr_start(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ u32 bar_start;
++ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
++ struct pcie_port *pp = &imx6_pcie->pp;
++
++ sscanf(buf, "%x\n", &bar_start);
++ writel(bar_start, pp->dbi_base + PCI_BASE_ADDRESS_0);
++
++ return count;
++}
++
++static void imx_pcie_regions_setup(struct device *dev)
++{
++ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
++ struct pcie_port *pp = &imx6_pcie->pp;
++
++ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)) {
++ /*
++ * region2 outbound used to access rc mem
++ * in imx6 pcie ep/rc validation system
++ */
++ writel(0, pp->dbi_base + PCIE_ATU_VIEWPORT);
++ writel(0x01000000, pp->dbi_base + PCIE_ATU_LOWER_BASE);
++ writel(0, pp->dbi_base + PCIE_ATU_UPPER_BASE);
++ writel(0x01000000 + test_region_size,
++ pp->dbi_base + PCIE_ATU_LIMIT);
++
++ writel(ddr_test_region,
++ pp->dbi_base + PCIE_ATU_LOWER_TARGET);
++ writel(0, pp->dbi_base + PCIE_ATU_UPPER_TARGET);
++ writel(PCIE_ATU_TYPE_MEM, pp->dbi_base + PCIE_ATU_CR1);
++ writel(PCIE_ATU_ENABLE, pp->dbi_base + PCIE_ATU_CR2);
++ }
++
++ if (IS_ENABLED(CONFIG_RC_MODE_IN_EP_RC_SYS)) {
++ /*
++ * region2 outbound used to access ep mem
++ * in imx6 pcie ep/rc validation system
++ */
++ writel(2, pp->dbi_base + PCIE_ATU_VIEWPORT);
++ writel(0x01000000, pp->dbi_base + PCIE_ATU_LOWER_BASE);
++ writel(0, pp->dbi_base + PCIE_ATU_UPPER_BASE);
++ writel(0x01000000 + test_region_size,
++ pp->dbi_base + PCIE_ATU_LIMIT);
++
++ writel(ddr_test_region,
++ pp->dbi_base + PCIE_ATU_LOWER_TARGET);
++ writel(0, pp->dbi_base + PCIE_ATU_UPPER_TARGET);
++ writel(PCIE_ATU_TYPE_MEM, pp->dbi_base + PCIE_ATU_CR1);
++ writel(PCIE_ATU_ENABLE, pp->dbi_base + PCIE_ATU_CR2);
++ }
++}
++
++static ssize_t imx_pcie_memw_info(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ return sprintf(buf, "imx-pcie-rc-memw-info start 0x%08x, size 0x%08x\n",
++ ddr_test_region, test_region_size);
++}
++
++static ssize_t
++imx_pcie_memw_start(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ u32 memw_start;
++
++ sscanf(buf, "%x\n", &memw_start);
++
++ if (memw_start < 0x10000000) {
++ dev_err(dev, "Invalid memory start address.\n");
++ dev_info(dev, "For example: echo 0x41000000 > /sys/...");
++ return -1;
++ }
++
++ if (ddr_test_region != memw_start) {
++ ddr_test_region = memw_start;
++ /* Re-setup the iATU */
++ imx_pcie_regions_setup(dev);
++ }
++
++ return count;
++}
++
++static ssize_t
++imx_pcie_memw_size(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ u32 memw_size;
++
++ sscanf(buf, "%x\n", &memw_size);
++
++ if ((memw_size > (SZ_16M - SZ_1M)) || (memw_size < SZ_64K)) {
++ dev_err(dev, "Invalid, should be [SZ_64K,SZ_16M - SZ_1MB].\n");
++ dev_info(dev, "For example: echo 0x800000 > /sys/...");
++ return -1;
++ }
++
++ if (test_region_size != memw_size) {
++ test_region_size = memw_size;
++ /* Re-setup the iATU */
++ imx_pcie_regions_setup(dev);
++ }
++
++ return count;
++}
++
++static DEVICE_ATTR(memw_info, S_IRUGO, imx_pcie_memw_info, NULL);
++static DEVICE_ATTR(memw_start_set, S_IWUGO, NULL, imx_pcie_memw_start);
++static DEVICE_ATTR(memw_size_set, S_IWUGO, NULL, imx_pcie_memw_size);
++static DEVICE_ATTR(ep_bar0_addr, S_IRWXUGO, imx_pcie_bar0_addr_info,
++ imx_pcie_bar0_addr_start);
++
++static struct attribute *imx_pcie_attrs[] = {
++ /*
++ * The start address, and the limitation (64KB ~ (16MB - 1MB))
++ * of the ddr mem window reserved by RC, and used for EP to access.
++ * BTW, these attrs are only configured at EP side.
++ */
++ &dev_attr_memw_info.attr,
++ &dev_attr_memw_start_set.attr,
++ &dev_attr_memw_size_set.attr,
++ &dev_attr_ep_bar0_addr.attr,
++ NULL
++};
++
++static struct attribute_group imx_pcie_attrgroup = {
++ .attrs = imx_pcie_attrs,
++};
++
+ static int __init imx6_pcie_probe(struct platform_device *pdev)
+ {
+ struct imx6_pcie *imx6_pcie;
+ struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *dbi_base;
+- int ret;
++ int ret = 0;
++ int i;
++ void *test_reg1, *test_reg2;
++ void __iomem *pcie_arb_base_addr;
++ struct timeval tv1, tv2, tv3;
++ u32 tv_count1, tv_count2;
+
+ imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
+- if (!imx6_pcie)
+- return -ENOMEM;
++ if (!imx6_pcie) {
++ ret = -ENOMEM;
++ goto err;
++ }
+
+ pp = &imx6_pcie->pp;
+ pp->dev = &pdev->dev;
+
++ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)) {
++ /* add attributes for device */
++ ret = sysfs_create_group(&pdev->dev.kobj, &imx_pcie_attrgroup);
++ if (ret) {
++ ret = -EINVAL;
++ goto err;
++ }
++ }
++
+ /* Added for PCI abort handling */
+ hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+
+ dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+- if (IS_ERR(pp->dbi_base))
+- return PTR_ERR(pp->dbi_base);
++ if (IS_ERR(pp->dbi_base)) {
++ ret = PTR_ERR(pp->dbi_base);
++ goto err;
++ }
+
+ /* Fetch GPIOs */
+ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+@@ -542,7 +786,7 @@
+ GPIOF_OUT_INIT_LOW, "PCIe reset");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get reset gpio\n");
+- return ret;
++ goto err;
+ }
+ }
+
+@@ -554,7 +798,7 @@
+ "PCIe power enable");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get power-on gpio\n");
+- return ret;
++ goto err;
+ }
+ }
+
+@@ -566,7 +810,7 @@
+ "PCIe wake up");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get wake-up gpio\n");
+- return ret;
++ goto err;
+ }
+ }
+
+@@ -578,7 +822,7 @@
+ "PCIe disable endpoint");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
+- return ret;
++ goto err;
+ }
+ }
+
+@@ -587,28 +831,32 @@
+ if (IS_ERR(imx6_pcie->lvds_gate)) {
+ dev_err(&pdev->dev,
+ "lvds_gate clock select missing or invalid\n");
+- return PTR_ERR(imx6_pcie->lvds_gate);
++ ret = PTR_ERR(imx6_pcie->lvds_gate);
++ goto err;
+ }
+
+ imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
+ if (IS_ERR(imx6_pcie->sata_ref_100m)) {
+ dev_err(&pdev->dev,
+ "sata_ref_100m clock source missing or invalid\n");
+- return PTR_ERR(imx6_pcie->sata_ref_100m);
++ ret = PTR_ERR(imx6_pcie->sata_ref_100m);
++ goto err;
+ }
+
+ imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
+ if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
+ dev_err(&pdev->dev,
+ "pcie_ref_125m clock source missing or invalid\n");
+- return PTR_ERR(imx6_pcie->pcie_ref_125m);
++ ret = PTR_ERR(imx6_pcie->pcie_ref_125m);
++ goto err;
+ }
+
+ imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
+ if (IS_ERR(imx6_pcie->pcie_axi)) {
+ dev_err(&pdev->dev,
+ "pcie_axi clock source missing or invalid\n");
+- return PTR_ERR(imx6_pcie->pcie_axi);
++ ret = PTR_ERR(imx6_pcie->pcie_axi);
++ goto err;
+ }
+
+ /* Grab GPR config register range */
+@@ -616,15 +864,178 @@
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+ dev_err(&pdev->dev, "unable to find iomuxc registers\n");
+- return PTR_ERR(imx6_pcie->iomuxc_gpr);
++ ret = PTR_ERR(imx6_pcie->iomuxc_gpr);
++ goto err;
+ }
+
+- ret = imx6_add_pcie_port(pp, pdev);
+- if (ret < 0)
+- return ret;
++ if (of_find_property(np, "no-msi", NULL))
++ pci_no_msi();
+
+- platform_set_drvdata(pdev, imx6_pcie);
+- return 0;
++ if (IS_ENABLED(CONFIG_EP_MODE_IN_EP_RC_SYS)) {
++ if (IS_ENABLED(CONFIG_EP_SELF_IO_TEST)) {
++ /* Prepare the test regions and data */
++ test_reg1 = devm_kzalloc(&pdev->dev,
++ test_region_size, GFP_KERNEL);
++ if (!test_reg1) {
++ pr_err("pcie ep: can't alloc the test reg1.\n");
++ ret = PTR_ERR(test_reg1);
++ goto err;
++ }
++
++ test_reg2 = devm_kzalloc(&pdev->dev,
++ test_region_size, GFP_KERNEL);
++ if (!test_reg2) {
++ pr_err("pcie ep: can't alloc the test reg2.\n");
++ ret = PTR_ERR(test_reg1);
++ goto err;
++ }
++
++ pcie_arb_base_addr = ioremap_cache(0x01000000,
++ test_region_size);
++
++ if (!pcie_arb_base_addr) {
++ pr_err("error with ioremap in ep selftest\n");
++ ret = PTR_ERR(pcie_arb_base_addr);
++ goto err;
++ }
++
++ for (i = 0; i < test_region_size; i = i + 4) {
++ writel(0xE6600D00 + i, test_reg1 + i);
++ writel(0xDEADBEAF, test_reg2 + i);
++ }
++ }
++
++ imx6_pcie_init_phy(pp);
++
++ imx6_pcie_deassert_core_reset(pp);
++
++ /* assert LTSSM enable */
++ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
++ IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
++
++
++ dev_info(&pdev->dev, "PCIe EP: waiting for link up...\n");
++
++ platform_set_drvdata(pdev, imx6_pcie);
++ /* link is indicated by the bit4 of DB_R1 register */
++ do {
++ usleep_range(10, 20);
++ } while ((readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & 0x10) == 0);
++
++ /* CMD reg:I/O space, MEM space, and Bus Master Enable */
++ writel(readl(pp->dbi_base + PCI_COMMAND)
++ | PCI_COMMAND_IO
++ | PCI_COMMAND_MEMORY
++ | PCI_COMMAND_MASTER,
++ pp->dbi_base + PCI_COMMAND);
++
++ /*
++ * configure the class_rev(emaluate one memory ram ep device),
++ * bar0 and bar1 of ep
++ */
++ writel(0xdeadbeaf, pp->dbi_base + PCI_VENDOR_ID);
++ writel(readl(pp->dbi_base + PCI_CLASS_REVISION)
++ | (PCI_CLASS_MEMORY_RAM << 16),
++ pp->dbi_base + PCI_CLASS_REVISION);
++ writel(0xdeadbeaf, pp->dbi_base
++ + PCI_SUBSYSTEM_VENDOR_ID);
++
++ /* 32bit none-prefetchable 8M bytes memory on bar0 */
++ writel(0x0, pp->dbi_base + PCI_BASE_ADDRESS_0);
++ writel(SZ_8M - 1, pp->dbi_base + (1 << 12)
++ + PCI_BASE_ADDRESS_0);
++
++ /* None used bar1 */
++ writel(0x0, pp->dbi_base + PCI_BASE_ADDRESS_1);
++ writel(0, pp->dbi_base + (1 << 12) + PCI_BASE_ADDRESS_1);
++
++ /* 4K bytes IO on bar2 */
++ writel(0x1, pp->dbi_base + PCI_BASE_ADDRESS_2);
++ writel(SZ_4K - 1, pp->dbi_base + (1 << 12) +
++ PCI_BASE_ADDRESS_2);
++
++ /*
++ * 32bit prefetchable 1M bytes memory on bar3
++ * FIXME BAR MASK3 is not changable, the size
++ * is fixed to 256 bytes.
++ */
++ writel(0x8, pp->dbi_base + PCI_BASE_ADDRESS_3);
++ writel(SZ_1M - 1, pp->dbi_base + (1 << 12)
++ + PCI_BASE_ADDRESS_3);
++
++ /*
++ * 64bit prefetchable 1M bytes memory on bar4-5.
++ * FIXME BAR4,5 are not enabled yet
++ */
++ writel(0xc, pp->dbi_base + PCI_BASE_ADDRESS_4);
++ writel(SZ_1M - 1, pp->dbi_base + (1 << 12)
++ + PCI_BASE_ADDRESS_4);
++ writel(0, pp->dbi_base + (1 << 12) + PCI_BASE_ADDRESS_5);
++
++ /* Re-setup the iATU */
++ imx_pcie_regions_setup(&pdev->dev);
++
++ if (IS_ENABLED(CONFIG_EP_SELF_IO_TEST)) {
++ /* PCIe EP start the data transfer after link up */
++ pr_info("pcie ep: Starting data transfer...\n");
++ do_gettimeofday(&tv1);
++
++ memcpy((unsigned long *)pcie_arb_base_addr,
++ (unsigned long *)test_reg1,
++ test_region_size);
++
++ do_gettimeofday(&tv2);
++
++ memcpy((unsigned long *)test_reg2,
++ (unsigned long *)pcie_arb_base_addr,
++ test_region_size);
++
++ do_gettimeofday(&tv3);
++
++ if (memcmp(test_reg2, test_reg1, test_region_size) == 0) {
++ tv_count1 = (tv2.tv_sec - tv1.tv_sec)
++ * USEC_PER_SEC
++ + tv2.tv_usec - tv1.tv_usec;
++ tv_count2 = (tv3.tv_sec - tv2.tv_sec)
++ * USEC_PER_SEC
++ + tv3.tv_usec - tv2.tv_usec;
++
++ pr_info("pcie ep: Data transfer is successful."
++ " tv_count1 %dus,"
++ " tv_count2 %dus.\n",
++ tv_count1, tv_count2);
++ pr_info("pcie ep: Data write speed:%ldMB/s.\n",
++ ((test_region_size/1024)
++ * MSEC_PER_SEC)
++ /(tv_count1));
++ pr_info("pcie ep: Data read speed:%ldMB/s.\n",
++ ((test_region_size/1024)
++ * MSEC_PER_SEC)
++ /(tv_count2));
++ } else {
++ pr_info("pcie ep: Data transfer is failed.\n");
++ }
++ }
++ } else {
++ ret = imx6_add_pcie_port(pp, pdev);
++ if (ret < 0)
++ goto err;
++ platform_set_drvdata(pdev, imx6_pcie);
++
++ /* Re-setup the iATU */
++ imx_pcie_regions_setup(&pdev->dev);
++ }
++
++err:
++ return ret;
++}
++
++static void imx6_pcie_shutdown(struct platform_device *pdev)
++{
++ struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
++
++ /* bring down link, so bootloader gets clean state in case of reboot */
++ imx6_pcie_assert_core_reset(&imx6_pcie->pp);
+ }
+
+ static const struct of_device_id imx6_pcie_of_match[] = {
+@@ -639,6 +1050,7 @@
+ .owner = THIS_MODULE,
+ .of_match_table = imx6_pcie_of_match,
+ },
++ .shutdown = imx6_pcie_shutdown,
+ };
+
+ /* Freescale PCIe driver does not allow module unload */
+diff -Nur linux-3.14.40.orig/drivers/pci/host/pci-imx6-ep-driver.c linux-3.14.40/drivers/pci/host/pci-imx6-ep-driver.c
+--- linux-3.14.40.orig/drivers/pci/host/pci-imx6-ep-driver.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/pci/host/pci-imx6-ep-driver.c 2015-05-01 14:58:04.283427001 -0500
+@@ -0,0 +1,159 @@
++/*
++ * PCIe endpoint skeleton driver for IMX6 SOCs
++ *
++ * Copyright (C) 2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/pci-aspm.h>
++#include <linux/slab.h>
++#include <linux/dma-mapping.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++
++#define DRV_DESCRIPTION "i.MX PCIE endpoint device driver"
++#define DRV_VERSION "version 0.1"
++#define DRV_NAME "imx_pcie_ep"
++
++struct imx_pcie_ep_priv {
++ struct pci_dev *pci_dev;
++ void __iomem *hw_base;
++};
++
++/**
++ * imx_pcie_ep_probe - Device Initialization Routine
++ * @pdev: PCI device information struct
++ * @id: entry in id_tbl
++ *
++ * Returns 0 on success, negative on failure
++ **/
++static int imx_pcie_ep_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++ int ret = 0;
++ struct device *dev = &pdev->dev;
++ struct imx_pcie_ep_priv *priv;
++
++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv) {
++ dev_err(dev, "can't alloc imx pcie priv\n");
++ return -ENOMEM;
++ }
++
++ priv->pci_dev = pdev;
++
++ if (pci_enable_device(pdev)) {
++ ret = -ENODEV;
++ goto out;
++ }
++ pci_set_master(pdev);
++
++ pci_set_drvdata(pdev, priv);
++
++ priv->hw_base = pci_iomap(pdev, 0, 0);
++ if (!priv->hw_base) {
++ ret = -ENODEV;
++ goto out;
++ }
++
++ pr_info("pci_resource_len = 0x%08llx\n",
++ (unsigned long long) pci_resource_len(pdev, 0));
++ pr_info("pci_resource_base = %p\n", priv->hw_base);
++
++ ret = pci_enable_msi(priv->pci_dev);
++ if (ret < 0) {
++ dev_err(dev, "can't enable msi\n");
++ return ret;
++ }
++
++ /*
++ * Force to use 0x01FF8000 as the MSI address,
++ * to do the MSI demo
++ */
++ pci_bus_write_config_dword(pdev->bus, 0, 0x54, 0x01FF8000);
++ pci_bus_write_config_dword(pdev->bus->parent, 0, 0x820, 0x01FF8000);
++
++ /* configure rc's msi cap */
++ pci_bus_read_config_dword(pdev->bus->parent, 0, 0x50, &ret);
++ ret |= (PCI_MSI_FLAGS_ENABLE << 16);
++ pci_bus_write_config_dword(pdev->bus->parent, 0, 0x50, ret);
++ pci_bus_write_config_dword(pdev->bus->parent, 0, 0x828, 0x1);
++ pci_bus_write_config_dword(pdev->bus->parent, 0, 0x82C, 0xFFFFFFFE);
++
++ return 0;
++
++out:
++ return ret;
++}
++
++static void imx_pcie_ep_remove(struct pci_dev *pdev)
++{
++ struct imx_pcie_ep_priv *priv = pci_get_drvdata(pdev);
++
++ if (!priv)
++ return;
++ pr_info("***imx pcie ep driver unload***\n");
++}
++
++static struct pci_device_id imx_pcie_ep_ids[] = {
++ {
++ .class = PCI_CLASS_MEMORY_RAM << 8,
++ .class_mask = ~0,
++ .vendor = 0xbeaf,
++ .device = 0xdead,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ },
++ { } /* terminate list */
++};
++MODULE_DEVICE_TABLE(pci, imx_pcie_ep_ids);
++
++static struct pci_driver imx_pcie_ep_driver = {
++ .name = DRV_NAME,
++ .id_table = imx_pcie_ep_ids,
++ .probe = imx_pcie_ep_probe,
++ .remove = imx_pcie_ep_remove,
++};
++
++static int __init imx_pcie_ep_init(void)
++{
++ int ret;
++ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
++
++ ret = pci_register_driver(&imx_pcie_ep_driver);
++ if (ret)
++ pr_err("Unable to initialize PCI module\n");
++
++ return ret;
++}
++
++static void __exit imx_pcie_ep_exit(void)
++{
++ pci_unregister_driver(&imx_pcie_ep_driver);
++}
++
++module_exit(imx_pcie_ep_exit);
++module_init(imx_pcie_ep_init);
++
++MODULE_DESCRIPTION(DRV_DESCRIPTION);
++MODULE_VERSION(DRV_VERSION);
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("imx_pcie_ep");
+diff -Nur linux-3.14.40.orig/drivers/pinctrl/devicetree.c linux-3.14.40/drivers/pinctrl/devicetree.c
+--- linux-3.14.40.orig/drivers/pinctrl/devicetree.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/pinctrl/devicetree.c 2015-05-01 14:58:04.299427001 -0500
+@@ -18,6 +18,7 @@
+
+ #include <linux/device.h>
+ #include <linux/of.h>
++#include <linux/of_gpio.h>
+ #include <linux/pinctrl/pinctrl.h>
+ #include <linux/slab.h>
+
+@@ -172,6 +173,43 @@
+ return dt_remember_or_free_map(p, statename, NULL, map, 1);
+ }
+
++static int dt_gpio_assert_pinctrl(struct pinctrl *p)
++{
++ struct device_node *np = p->dev->of_node;
++ enum of_gpio_flags flags;
++ int gpio;
++ int index = 0;
++ int ret;
++
++ if (!of_find_property(np, "pinctrl-assert-gpios", NULL))
++ return 0; /* Missing the property, so nothing to be done */
++
++ for (;; index++) {
++ gpio = of_get_named_gpio_flags(np, "pinctrl-assert-gpios",
++ index, &flags);
++ if (gpio < 0)
++ break; /* End of the phandle list */
++
++ if (!gpio_is_valid(gpio))
++ return -EINVAL;
++
++ ret = devm_gpio_request_one(p->dev, gpio, GPIOF_OUT_INIT_LOW,
++ NULL);
++ if (ret < 0)
++ return ret;
++
++ if (flags & OF_GPIO_ACTIVE_LOW)
++ continue;
++
++ if (gpio_cansleep(gpio))
++ gpio_set_value_cansleep(gpio, 1);
++ else
++ gpio_set_value(gpio, 1);
++ }
++
++ return 0;
++}
++
+ int pinctrl_dt_to_map(struct pinctrl *p)
+ {
+ struct device_node *np = p->dev->of_node;
+@@ -190,6 +228,12 @@
+ return 0;
+ }
+
++ ret = dt_gpio_assert_pinctrl(p);
++ if (ret) {
++ dev_dbg(p->dev, "failed to assert pinctrl setting: %d\n", ret);
++ return ret;
++ }
++
+ /* We may store pointers to property names within the node */
+ of_node_get(np);
+
+diff -Nur linux-3.14.40.orig/drivers/pinctrl/pinctrl-imx6sl.c linux-3.14.40/drivers/pinctrl/pinctrl-imx6sl.c
+--- linux-3.14.40.orig/drivers/pinctrl/pinctrl-imx6sl.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/pinctrl/pinctrl-imx6sl.c 2015-05-01 14:58:04.307427001 -0500
+@@ -384,6 +384,10 @@
+ },
+ .probe = imx6sl_pinctrl_probe,
+ .remove = imx_pinctrl_remove,
++#ifdef CONFIG_PM
++ .suspend = imx_pinctrl_suspend,
++ .resume = imx_pinctrl_resume,
++#endif
+ };
+
+ static int __init imx6sl_pinctrl_init(void)
+diff -Nur linux-3.14.40.orig/drivers/pinctrl/pinctrl-imx.c linux-3.14.40/drivers/pinctrl/pinctrl-imx.c
+--- linux-3.14.40.orig/drivers/pinctrl/pinctrl-imx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/pinctrl/pinctrl-imx.c 2015-05-01 14:58:04.359427001 -0500
+@@ -1,7 +1,7 @@
+ /*
+ * Core driver for the imx pin controller
+ *
+- * Copyright (C) 2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+@@ -628,3 +628,25 @@
+
+ return 0;
+ }
++
++#ifdef CONFIG_PM
++int imx_pinctrl_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct imx_pinctrl *ipctl = platform_get_drvdata(pdev);
++
++ if (!ipctl)
++ return -EINVAL;
++
++ return pinctrl_force_sleep(ipctl->pctl);
++}
++
++int imx_pinctrl_resume(struct platform_device *pdev)
++{
++ struct imx_pinctrl *ipctl = platform_get_drvdata(pdev);
++
++ if (!ipctl)
++ return -EINVAL;
++
++ return pinctrl_force_default(ipctl->pctl);
++}
++#endif
+diff -Nur linux-3.14.40.orig/drivers/pinctrl/pinctrl-imx.h linux-3.14.40/drivers/pinctrl/pinctrl-imx.h
+--- linux-3.14.40.orig/drivers/pinctrl/pinctrl-imx.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/pinctrl/pinctrl-imx.h 2015-05-01 14:58:04.367427001 -0500
+@@ -1,7 +1,7 @@
+ /*
+ * IMX pinmux core definitions
+ *
+- * Copyright (C) 2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+@@ -98,4 +98,8 @@
+ int imx_pinctrl_probe(struct platform_device *pdev,
+ struct imx_pinctrl_soc_info *info);
+ int imx_pinctrl_remove(struct platform_device *pdev);
++#ifdef CONFIG_PM
++int imx_pinctrl_suspend(struct platform_device *pdev, pm_message_t state);
++int imx_pinctrl_resume(struct platform_device *pdev);
++#endif
+ #endif /* __DRIVERS_PINCTRL_IMX_H */
+diff -Nur linux-3.14.40.orig/drivers/power/imx6_usb_charger.c linux-3.14.40/drivers/power/imx6_usb_charger.c
+--- linux-3.14.40.orig/drivers/power/imx6_usb_charger.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/power/imx6_usb_charger.c 2015-05-01 14:58:04.367427001 -0500
+@@ -0,0 +1,294 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/power/imx6_usb_charger.h>
++#include <linux/regmap.h>
++
++#define HW_ANADIG_REG_3P0_SET (0x00000124)
++#define HW_ANADIG_REG_3P0_CLR (0x00000128)
++#define BM_ANADIG_REG_3P0_ENABLE_ILIMIT 0x00000004
++#define BM_ANADIG_REG_3P0_ENABLE_LINREG 0x00000001
++
++#define HW_ANADIG_USB1_CHRG_DETECT_SET (0x000001b4)
++#define HW_ANADIG_USB1_CHRG_DETECT_CLR (0x000001b8)
++
++#define BM_ANADIG_USB1_CHRG_DETECT_EN_B 0x00100000
++#define BM_ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B 0x00080000
++#define BM_ANADIG_USB1_CHRG_DETECT_CHK_CONTACT 0x00040000
++
++#define HW_ANADIG_USB1_VBUS_DET_STAT (0x000001c0)
++
++#define BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID 0x00000008
++
++#define HW_ANADIG_USB1_CHRG_DET_STAT (0x000001d0)
++
++#define BM_ANADIG_USB1_CHRG_DET_STAT_DM_STATE 0x00000004
++#define BM_ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED 0x00000002
++#define BM_ANADIG_USB1_CHRG_DET_STAT_PLUG_CONTACT 0x00000001
++
++static char *imx6_usb_charger_supplied_to[] = {
++ "imx6_usb_charger",
++};
++
++static enum power_supply_property imx6_usb_charger_power_props[] = {
++ POWER_SUPPLY_PROP_PRESENT, /* Charger detected */
++ POWER_SUPPLY_PROP_ONLINE, /* VBUS online */
++ POWER_SUPPLY_PROP_CURRENT_MAX, /* Maximum current in mA */
++};
++
++static int imx6_usb_charger_get_property(struct power_supply *psy,
++ enum power_supply_property psp,
++ union power_supply_propval *val)
++{
++ struct usb_charger *charger =
++ container_of(psy, struct usb_charger, psy);
++
++ switch (psp) {
++ case POWER_SUPPLY_PROP_PRESENT:
++ val->intval = charger->present;
++ break;
++ case POWER_SUPPLY_PROP_ONLINE:
++ val->intval = charger->online;
++ break;
++ case POWER_SUPPLY_PROP_CURRENT_MAX:
++ val->intval = charger->max_current;
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static void disable_charger_detector(struct regmap *regmap)
++{
++ regmap_write(regmap, HW_ANADIG_USB1_CHRG_DETECT_SET,
++ BM_ANADIG_USB1_CHRG_DETECT_EN_B |
++ BM_ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
++}
++
++static void disable_current_limiter(struct regmap *regmap)
++{
++ /* Disable the vdd3p0 current limiter */
++ regmap_write(regmap, HW_ANADIG_REG_3P0_CLR,
++ BM_ANADIG_REG_3P0_ENABLE_ILIMIT);
++}
++
++/* Return value if the charger is present */
++static int imx6_usb_charger_detect(struct usb_charger *charger)
++{
++ struct regmap *regmap = charger->anatop;
++ u32 val;
++ int i, data_pin_contact_count = 0;
++
++ /* Enable the vdd3p0 curret limiter */
++ regmap_write(regmap, HW_ANADIG_REG_3P0_SET,
++ BM_ANADIG_REG_3P0_ENABLE_LINREG |
++ BM_ANADIG_REG_3P0_ENABLE_ILIMIT);
++
++ /* check if vbus is valid */
++ regmap_read(regmap, HW_ANADIG_USB1_VBUS_DET_STAT, &val);
++ if (!(val & BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID)) {
++ dev_err(charger->dev, "vbus is error\n");
++ disable_current_limiter(regmap);
++ return -EINVAL;
++ }
++
++ /* Enable charger detector */
++ regmap_write(regmap, HW_ANADIG_USB1_CHRG_DETECT_CLR,
++ BM_ANADIG_USB1_CHRG_DETECT_EN_B);
++ /*
++ * - Do not check whether a charger is connected to the USB port
++ * - Check whether the USB plug has been in contact with each other
++ */
++ regmap_write(regmap, HW_ANADIG_USB1_CHRG_DETECT_SET,
++ BM_ANADIG_USB1_CHRG_DETECT_CHK_CONTACT |
++ BM_ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
++
++ /* Check if plug is connected */
++ for (i = 0; i < 100; i = i + 1) {
++ regmap_read(regmap, HW_ANADIG_USB1_CHRG_DET_STAT, &val);
++ if (val & BM_ANADIG_USB1_CHRG_DET_STAT_PLUG_CONTACT) {
++ if (data_pin_contact_count++ > 5)
++ /* Data pin makes contact */
++ break;
++ } else {
++ msleep(20);
++ }
++ }
++
++ if (i == 100) {
++ dev_err(charger->dev,
++ "VBUS is coming from a dedicated power supply.\n");
++ disable_current_limiter(regmap);
++ disable_charger_detector(regmap);
++ return -ENXIO;
++ }
++
++ /*
++ * - Do check whether a charger is connected to the USB port
++ * - Do not Check whether the USB plug has been in contact with
++ * each other
++ */
++ regmap_write(regmap, HW_ANADIG_USB1_CHRG_DETECT_CLR,
++ BM_ANADIG_USB1_CHRG_DETECT_CHK_CONTACT |
++ BM_ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
++ msleep(45);
++
++ /* Check if it is a charger */
++ regmap_read(regmap, HW_ANADIG_USB1_CHRG_DET_STAT, &val);
++ if (!(val & BM_ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED)) {
++ dev_dbg(charger->dev, "It is a stardard downstream port\n");
++ charger->psy.type = POWER_SUPPLY_TYPE_USB;
++ charger->max_current = 500;
++ disable_charger_detector(regmap);
++ } else {
++ /* It is a charger */
++ disable_charger_detector(regmap);
++ msleep(45);
++ }
++
++ disable_current_limiter(regmap);
++
++ return 0;
++}
++
++/*
++ * imx6_usb_vbus_connect - inform about VBUS connection
++ * @charger: the usb charger
++ *
++ * Inform the charger VBUS is connected, vbus detect supplier should call it.
++ * Besides, the USB device controller is expected to keep the dataline
++ * pullups disabled.
++ */
++int imx6_usb_vbus_connect(struct usb_charger *charger)
++{
++ int ret;
++
++ charger->online = 1;
++
++ mutex_lock(&charger->lock);
++
++ /* Start the 1st period charger detection. */
++ ret = imx6_usb_charger_detect(charger);
++ if (ret)
++ dev_err(charger->dev,
++ "Error occurs during detection: %d\n",
++ ret);
++ else
++ charger->present = 1;
++
++ mutex_unlock(&charger->lock);
++
++ return ret;
++}
++EXPORT_SYMBOL(imx6_usb_vbus_connect);
++
++/*
++ * It must be called after dp is pulled up (from USB controller driver),
++ * That is used to differentiate DCP and CDP
++ */
++int imx6_usb_charger_detect_post(struct usb_charger *charger)
++{
++ struct regmap *regmap = charger->anatop;
++ int val;
++
++ mutex_lock(&charger->lock);
++
++ msleep(40);
++
++ regmap_read(regmap, HW_ANADIG_USB1_CHRG_DET_STAT, &val);
++ if (val & BM_ANADIG_USB1_CHRG_DET_STAT_DM_STATE) {
++ dev_dbg(charger->dev, "It is a dedicate charging port\n");
++ charger->psy.type = POWER_SUPPLY_TYPE_USB_DCP;
++ charger->max_current = 1500;
++ } else {
++ dev_dbg(charger->dev, "It is a charging downstream port\n");
++ charger->psy.type = POWER_SUPPLY_TYPE_USB_CDP;
++ charger->max_current = 900;
++ }
++
++ power_supply_changed(&charger->psy);
++
++ mutex_unlock(&charger->lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(imx6_usb_charger_detect_post);
++
++/*
++ * imx6_usb_vbus_disconnect - inform about VBUS disconnection
++ * @charger: the usb charger
++ *
++ * Inform the charger that VBUS is disconnected. The charging will be
++ * stopped and the charger properties cleared.
++ */
++int imx6_usb_vbus_disconnect(struct usb_charger *charger)
++{
++ charger->online = 0;
++ charger->present = 0;
++ charger->max_current = 0;
++ charger->psy.type = POWER_SUPPLY_TYPE_MAINS;
++
++ power_supply_changed(&charger->psy);
++
++ return 0;
++}
++EXPORT_SYMBOL(imx6_usb_vbus_disconnect);
++
++/*
++ * imx6_usb_create_charger - create a USB charger
++ * @charger: the charger to be initialized
++ * @name: name for the power supply
++
++ * Registers a power supply for the charger. The USB Controller
++ * driver will call this after filling struct usb_charger.
++ */
++int imx6_usb_create_charger(struct usb_charger *charger,
++ const char *name)
++{
++ struct power_supply *psy = &charger->psy;
++
++ if (!charger->dev)
++ return -EINVAL;
++
++ if (name)
++ psy->name = name;
++ else
++ psy->name = "imx6_usb_charger";
++
++ charger->bc = BATTERY_CHARGING_SPEC_1_2;
++ mutex_init(&charger->lock);
++
++ psy->type = POWER_SUPPLY_TYPE_MAINS;
++ psy->properties = imx6_usb_charger_power_props;
++ psy->num_properties = ARRAY_SIZE(imx6_usb_charger_power_props);
++ psy->get_property = imx6_usb_charger_get_property;
++ psy->supplied_to = imx6_usb_charger_supplied_to;
++ psy->num_supplicants = sizeof(imx6_usb_charger_supplied_to)
++ / sizeof(char *);
++
++ return power_supply_register(charger->dev, psy);
++}
++EXPORT_SYMBOL(imx6_usb_create_charger);
++
++/*
++ * imx6_usb_remove_charger - remove a USB charger
++ * @charger: the charger to be removed
++ *
++ * Unregister the chargers power supply.
++ */
++void imx6_usb_remove_charger(struct usb_charger *charger)
++{
++ power_supply_unregister(&charger->psy);
++}
++EXPORT_SYMBOL(imx6_usb_remove_charger);
+diff -Nur linux-3.14.40.orig/drivers/power/Kconfig linux-3.14.40/drivers/power/Kconfig
+--- linux-3.14.40.orig/drivers/power/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/power/Kconfig 2015-05-01 14:58:04.379427001 -0500
+@@ -389,6 +389,12 @@
+ Say Y to enable support for the battery and AC power in the
+ Goldfish emulator.
+
++config IMX6_USB_CHARGER
++ bool "Freescale imx6 USB Charger"
++ depends on SOC_IMX6Q || SOC_IMX6SL
++ help
++ Say Y to enable Freescale imx6 USB Charger Detect.
++
+ source "drivers/power/reset/Kconfig"
+
+ endif # POWER_SUPPLY
+diff -Nur linux-3.14.40.orig/drivers/power/Makefile linux-3.14.40/drivers/power/Makefile
+--- linux-3.14.40.orig/drivers/power/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/power/Makefile 2015-05-01 14:58:04.391427001 -0500
+@@ -58,3 +58,4 @@
+ obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
+ obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
+ obj-$(CONFIG_POWER_RESET) += reset/
++obj-$(CONFIG_IMX6_USB_CHARGER) += imx6_usb_charger.o
+diff -Nur linux-3.14.40.orig/drivers/ptp/ptp_chardev.c linux-3.14.40/drivers/ptp/ptp_chardev.c
+--- linux-3.14.40.orig/drivers/ptp/ptp_chardev.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ptp/ptp_chardev.c 2015-05-01 14:58:04.407427001 -0500
+@@ -25,6 +25,96 @@
+
+ #include "ptp_private.h"
+
++static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
++ enum ptp_pin_function func, unsigned int chan)
++{
++ struct ptp_clock_request rq;
++ int err = 0;
++
++ memset(&rq, 0, sizeof(rq));
++
++ switch (func) {
++ case PTP_PF_NONE:
++ break;
++ case PTP_PF_EXTTS:
++ rq.type = PTP_CLK_REQ_EXTTS;
++ rq.extts.index = chan;
++ err = ops->enable(ops, &rq, 0);
++ break;
++ case PTP_PF_PEROUT:
++ rq.type = PTP_CLK_REQ_PEROUT;
++ rq.perout.index = chan;
++ err = ops->enable(ops, &rq, 0);
++ break;
++ case PTP_PF_PHYSYNC:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return err;
++}
++
++int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
++ enum ptp_pin_function func, unsigned int chan)
++{
++ struct ptp_clock_info *info = ptp->info;
++ struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin];
++ unsigned int i;
++
++ /* Check to see if any other pin previously had this function. */
++ for (i = 0; i < info->n_pins; i++) {
++ if (info->pin_config[i].func == func &&
++ info->pin_config[i].chan == chan) {
++ pin1 = &info->pin_config[i];
++ break;
++ }
++ }
++ if (pin1 && i == pin)
++ return 0;
++
++ /* Check the desired function and channel. */
++ switch (func) {
++ case PTP_PF_NONE:
++ break;
++ case PTP_PF_EXTTS:
++ if (chan >= info->n_ext_ts)
++ return -EINVAL;
++ break;
++ case PTP_PF_PEROUT:
++ if (chan >= info->n_per_out)
++ return -EINVAL;
++ break;
++ case PTP_PF_PHYSYNC:
++ pr_err("sorry, cannot reassign the calibration pin\n");
++ return -EINVAL;
++ default:
++ return -EINVAL;
++ }
++
++ if (pin2->func == PTP_PF_PHYSYNC) {
++ pr_err("sorry, cannot reprogram the calibration pin\n");
++ return -EINVAL;
++ }
++
++ if (info->verify(info, pin, func, chan)) {
++ pr_err("driver cannot use function %u on pin %u\n", func, chan);
++ return -EOPNOTSUPP;
++ }
++
++ /* Disable whatever function was previously assigned. */
++ if (pin1) {
++ ptp_disable_pinfunc(info, func, chan);
++ pin1->func = PTP_PF_NONE;
++ pin1->chan = 0;
++ }
++ ptp_disable_pinfunc(info, pin2->func, pin2->chan);
++ pin2->func = func;
++ pin2->chan = chan;
++
++ return 0;
++}
++
+ int ptp_open(struct posix_clock *pc, fmode_t fmode)
+ {
+ return 0;
+@@ -35,12 +125,13 @@
+ struct ptp_clock_caps caps;
+ struct ptp_clock_request req;
+ struct ptp_sys_offset *sysoff = NULL;
++ struct ptp_pin_desc pd;
+ struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ struct ptp_clock_info *ops = ptp->info;
+ struct ptp_clock_time *pct;
+ struct timespec ts;
+ int enable, err = 0;
+- unsigned int i;
++ unsigned int i, pin_index;
+
+ switch (cmd) {
+
+@@ -51,6 +142,7 @@
+ caps.n_ext_ts = ptp->info->n_ext_ts;
+ caps.n_per_out = ptp->info->n_per_out;
+ caps.pps = ptp->info->pps;
++ caps.n_pins = ptp->info->n_pins;
+ if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
+ err = -EFAULT;
+ break;
+@@ -126,6 +218,40 @@
+ err = -EFAULT;
+ break;
+
++ case PTP_PIN_GETFUNC:
++ if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
++ err = -EFAULT;
++ break;
++ }
++ pin_index = pd.index;
++ if (pin_index >= ops->n_pins) {
++ err = -EINVAL;
++ break;
++ }
++ if (mutex_lock_interruptible(&ptp->pincfg_mux))
++ return -ERESTARTSYS;
++ pd = ops->pin_config[pin_index];
++ mutex_unlock(&ptp->pincfg_mux);
++ if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd)))
++ err = -EFAULT;
++ break;
++
++ case PTP_PIN_SETFUNC:
++ if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
++ err = -EFAULT;
++ break;
++ }
++ pin_index = pd.index;
++ if (pin_index >= ops->n_pins) {
++ err = -EINVAL;
++ break;
++ }
++ if (mutex_lock_interruptible(&ptp->pincfg_mux))
++ return -ERESTARTSYS;
++ err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
++ mutex_unlock(&ptp->pincfg_mux);
++ break;
++
+ default:
+ err = -ENOTTY;
+ break;
+diff -Nur linux-3.14.40.orig/drivers/ptp/ptp_clock.c linux-3.14.40/drivers/ptp/ptp_clock.c
+--- linux-3.14.40.orig/drivers/ptp/ptp_clock.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ptp/ptp_clock.c 2015-05-01 14:58:04.407427001 -0500
+@@ -169,6 +169,7 @@
+ struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+
+ mutex_destroy(&ptp->tsevq_mux);
++ mutex_destroy(&ptp->pincfg_mux);
+ ida_simple_remove(&ptp_clocks_map, ptp->index);
+ kfree(ptp);
+ }
+@@ -203,6 +204,7 @@
+ ptp->index = index;
+ spin_lock_init(&ptp->tsevq.lock);
+ mutex_init(&ptp->tsevq_mux);
++ mutex_init(&ptp->pincfg_mux);
+ init_waitqueue_head(&ptp->tsev_wq);
+
+ /* Create a new device in our class. */
+@@ -249,6 +251,7 @@
+ device_destroy(ptp_class, ptp->devid);
+ no_device:
+ mutex_destroy(&ptp->tsevq_mux);
++ mutex_destroy(&ptp->pincfg_mux);
+ no_slot:
+ kfree(ptp);
+ no_memory:
+@@ -305,6 +308,26 @@
+ }
+ EXPORT_SYMBOL(ptp_clock_index);
+
++int ptp_find_pin(struct ptp_clock *ptp,
++ enum ptp_pin_function func, unsigned int chan)
++{
++ struct ptp_pin_desc *pin = NULL;
++ int i;
++
++ mutex_lock(&ptp->pincfg_mux);
++ for (i = 0; i < ptp->info->n_pins; i++) {
++ if (ptp->info->pin_config[i].func == func &&
++ ptp->info->pin_config[i].chan == chan) {
++ pin = &ptp->info->pin_config[i];
++ break;
++ }
++ }
++ mutex_unlock(&ptp->pincfg_mux);
++
++ return pin ? i : -1;
++}
++EXPORT_SYMBOL(ptp_find_pin);
++
+ /* module operations */
+
+ static void __exit ptp_exit(void)
+diff -Nur linux-3.14.40.orig/drivers/ptp/ptp_ixp46x.c linux-3.14.40/drivers/ptp/ptp_ixp46x.c
+--- linux-3.14.40.orig/drivers/ptp/ptp_ixp46x.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ptp/ptp_ixp46x.c 2015-05-01 14:58:04.407427001 -0500
+@@ -244,6 +244,7 @@
+ .name = "IXP46X timer",
+ .max_adj = 66666655,
+ .n_ext_ts = N_EXT_TS,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = ptp_ixp_adjfreq,
+ .adjtime = ptp_ixp_adjtime,
+diff -Nur linux-3.14.40.orig/drivers/ptp/ptp_pch.c linux-3.14.40/drivers/ptp/ptp_pch.c
+--- linux-3.14.40.orig/drivers/ptp/ptp_pch.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ptp/ptp_pch.c 2015-05-01 14:58:04.407427001 -0500
+@@ -514,6 +514,7 @@
+ .name = "PCH timer",
+ .max_adj = 50000000,
+ .n_ext_ts = N_EXT_TS,
++ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = ptp_pch_adjfreq,
+ .adjtime = ptp_pch_adjtime,
+diff -Nur linux-3.14.40.orig/drivers/ptp/ptp_private.h linux-3.14.40/drivers/ptp/ptp_private.h
+--- linux-3.14.40.orig/drivers/ptp/ptp_private.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/ptp/ptp_private.h 2015-05-01 14:58:04.419427001 -0500
+@@ -48,6 +48,7 @@
+ long dialed_frequency; /* remembers the frequency adjustment */
+ struct timestamp_event_queue tsevq; /* simple fifo for time stamps */
+ struct mutex tsevq_mux; /* one process at a time reading the fifo */
++ struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
+ wait_queue_head_t tsev_wq;
+ int defunct; /* tells readers to go away when clock is being removed */
+ };
+@@ -69,6 +70,10 @@
+ * see ptp_chardev.c
+ */
+
++/* caller must hold pincfg_mux */
++int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
++ enum ptp_pin_function func, unsigned int chan);
++
+ long ptp_ioctl(struct posix_clock *pc,
+ unsigned int cmd, unsigned long arg);
+
+diff -Nur linux-3.14.40.orig/drivers/pwm/pwm-imx.c linux-3.14.40/drivers/pwm/pwm-imx.c
+--- linux-3.14.40.orig/drivers/pwm/pwm-imx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/pwm/pwm-imx.c 2015-05-01 14:58:04.431427001 -0500
+@@ -1,4 +1,5 @@
+ /*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ * simple driver for PWM (Pulse Width Modulator) controller
+ *
+ * This program is free software; you can redistribute it and/or modify
+@@ -293,11 +294,34 @@
+ return pwmchip_remove(&imx->chip);
+ }
+
++#ifdef CONFIG_PM
++static int imx_pwm_suspend(struct device *dev)
++{
++ pinctrl_pm_select_sleep_state(dev);
++
++ return 0;
++}
++
++static int imx_pwm_resume(struct device *dev)
++{
++ pinctrl_pm_select_default_state(dev);
++
++ return 0;
++}
++
++static const struct dev_pm_ops imx_pwm_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(imx_pwm_suspend, imx_pwm_resume)
++};
++#endif
++
+ static struct platform_driver imx_pwm_driver = {
+ .driver = {
+ .name = "imx-pwm",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_pwm_dt_ids,
++#ifdef CONFIG_PM
++ .pm = &imx_pwm_pm_ops,
++#endif
+ },
+ .probe = imx_pwm_probe,
+ .remove = imx_pwm_remove,
+diff -Nur linux-3.14.40.orig/drivers/regulator/anatop-regulator.c linux-3.14.40/drivers/regulator/anatop-regulator.c
+--- linux-3.14.40.orig/drivers/regulator/anatop-regulator.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/regulator/anatop-regulator.c 2015-05-01 14:58:04.439427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Copyright (C) 2011, 2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+ /*
+@@ -34,6 +34,22 @@
+ #define LDO_RAMP_UP_UNIT_IN_CYCLES 64 /* 64 cycles per step */
+ #define LDO_RAMP_UP_FREQ_IN_MHZ 24 /* cycle based on 24M OSC */
+
++#define REG_SET 0x4
++#define REG_CLR 0x8
++#define SOC_PU_FIELD_OFFSET 0x9
++
++/*
++ * for CORE, SOC and PU regulator, the register field
++ * has following definition: 00001 -- Target core voltage
++ * = 0.725V, which means the lowest setting in this
++ * field is 0.725V once the regulator is enabled. So
++ * when these regulators are turned on from off status,
++ * we need to count the voltage step of 0V to 0.7V, it will
++ * need additional delay, so the additional step number is
++ * 700mV / 25mV = 28.
++ */
++#define CORE_REG_ENABLE_STEP_ADD 28
++
+ struct anatop_regulator {
+ const char *name;
+ u32 control_reg;
+@@ -97,12 +113,86 @@
+ return regulator_get_voltage_sel_regmap(reg);
+ }
+
++/*
++ * currently on anatop regulators, only PU regulator supports
++ * enable/disable function, and its voltage must be equal
++ * to SOC voltage, so we need to get SOC voltage then set
++ * into PU regulator. Other regulators are always on due
++ * to hardware design, so enable/disable/is_enabled/enable_time
++ * functions are only used by PU regulator.
++ */
++static int anatop_regmap_enable(struct regulator_dev *reg)
++{
++ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
++ u32 val;
++
++ if (!anatop_reg->control_reg)
++ return -ENOTSUPP;
++
++ regmap_read(anatop_reg->anatop, anatop_reg->control_reg, &val);
++ val &= ((1 << anatop_reg->vol_bit_width) - 1) <<
++ (anatop_reg->vol_bit_shift + SOC_PU_FIELD_OFFSET);
++ regmap_write(anatop_reg->anatop, anatop_reg->control_reg +
++ REG_SET, val >> SOC_PU_FIELD_OFFSET);
++
++ return 0;
++}
++
++static int anatop_regmap_disable(struct regulator_dev *reg)
++{
++ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
++
++ if (!anatop_reg->control_reg)
++ return -ENOTSUPP;
++
++ regmap_write(anatop_reg->anatop, anatop_reg->control_reg +
++ REG_CLR, ((1 << anatop_reg->vol_bit_width) - 1) <<
++ anatop_reg->vol_bit_shift);
++
++ return 0;
++}
++
++static int anatop_regmap_is_enabled(struct regulator_dev *reg)
++{
++ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
++ u32 val;
++
++ if (!anatop_reg->control_reg)
++ return -ENOTSUPP;
++
++ regmap_read(anatop_reg->anatop, anatop_reg->control_reg, &val);
++
++ return (val >> anatop_reg->vol_bit_shift) &
++ ((1 << anatop_reg->vol_bit_width) - 1) ? 1 : 0;
++}
++
++static int anatop_regmap_enable_time(struct regulator_dev *reg)
++{
++ struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
++ u32 val, soc_val;
++
++ if (!anatop_reg->control_reg)
++ return -ENOTSUPP;
++
++ regmap_read(anatop_reg->anatop, anatop_reg->control_reg, &val);
++ soc_val = (val >> (anatop_reg->vol_bit_shift +
++ SOC_PU_FIELD_OFFSET)) &
++ ((1 << anatop_reg->vol_bit_width) - 1);
++
++ return anatop_regmap_set_voltage_time_sel(reg, 0,
++ soc_val + CORE_REG_ENABLE_STEP_ADD);
++}
++
+ static struct regulator_ops anatop_rops = {
+ .set_voltage_sel = anatop_regmap_set_voltage_sel,
+ .set_voltage_time_sel = anatop_regmap_set_voltage_time_sel,
+ .get_voltage_sel = anatop_regmap_get_voltage_sel,
++ .enable = anatop_regmap_enable,
++ .disable = anatop_regmap_disable,
++ .is_enabled = anatop_regmap_is_enabled,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
++ .enable_time = anatop_regmap_enable_time,
+ };
+
+ static int anatop_regulator_probe(struct platform_device *pdev)
+@@ -196,6 +286,7 @@
+ config.driver_data = sreg;
+ config.of_node = pdev->dev.of_node;
+ config.regmap = sreg->anatop;
++ config.ena_gpio = -EINVAL;
+
+ /* register regulator */
+ rdev = devm_regulator_register(dev, rdesc, &config);
+diff -Nur linux-3.14.40.orig/drivers/regulator/core.c linux-3.14.40/drivers/regulator/core.c
+--- linux-3.14.40.orig/drivers/regulator/core.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/regulator/core.c 2015-05-01 14:58:04.443427001 -0500
+@@ -3,6 +3,7 @@
+ *
+ * Copyright 2007, 2008 Wolfson Microelectronics PLC.
+ * Copyright 2008 SlimLogic Ltd.
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+@@ -24,6 +25,7 @@
+ #include <linux/suspend.h>
+ #include <linux/delay.h>
+ #include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/of.h>
+ #include <linux/regmap.h>
+ #include <linux/regulator/of_regulator.h>
+@@ -77,7 +79,7 @@
+ */
+ struct regulator_enable_gpio {
+ struct list_head list;
+- int gpio;
++ struct gpio_desc *gpiod;
+ u32 enable_count; /* a number of enabled shared GPIO */
+ u32 request_count; /* a number of requested shared GPIO */
+ unsigned int ena_gpio_invert:1;
+@@ -1655,10 +1657,13 @@
+ const struct regulator_config *config)
+ {
+ struct regulator_enable_gpio *pin;
++ struct gpio_desc *gpiod;
+ int ret;
+
++ gpiod = gpio_to_desc(config->ena_gpio);
++
+ list_for_each_entry(pin, &regulator_ena_gpio_list, list) {
+- if (pin->gpio == config->ena_gpio) {
++ if (pin->gpiod == gpiod) {
+ rdev_dbg(rdev, "GPIO %d is already used\n",
+ config->ena_gpio);
+ goto update_ena_gpio_to_rdev;
+@@ -1677,7 +1682,7 @@
+ return -ENOMEM;
+ }
+
+- pin->gpio = config->ena_gpio;
++ pin->gpiod = gpiod;
+ pin->ena_gpio_invert = config->ena_gpio_invert;
+ list_add(&pin->list, &regulator_ena_gpio_list);
+
+@@ -1696,10 +1701,10 @@
+
+ /* Free the GPIO only in case of no use */
+ list_for_each_entry_safe(pin, n, &regulator_ena_gpio_list, list) {
+- if (pin->gpio == rdev->ena_pin->gpio) {
++ if (pin->gpiod == rdev->ena_pin->gpiod) {
+ if (pin->request_count <= 1) {
+ pin->request_count = 0;
+- gpio_free(pin->gpio);
++ gpiod_put(pin->gpiod);
+ list_del(&pin->list);
+ kfree(pin);
+ } else {
+@@ -1727,8 +1732,8 @@
+ if (enable) {
+ /* Enable GPIO at initial use */
+ if (pin->enable_count == 0)
+- gpio_set_value_cansleep(pin->gpio,
+- !pin->ena_gpio_invert);
++ gpiod_set_value_cansleep(pin->gpiod,
++ !pin->ena_gpio_invert);
+
+ pin->enable_count++;
+ } else {
+@@ -1739,8 +1744,8 @@
+
+ /* Disable GPIO if not used */
+ if (pin->enable_count <= 1) {
+- gpio_set_value_cansleep(pin->gpio,
+- pin->ena_gpio_invert);
++ gpiod_set_value_cansleep(pin->gpiod,
++ pin->ena_gpio_invert);
+ pin->enable_count = 0;
+ }
+ }
+@@ -1819,6 +1824,7 @@
+ }
+
+ trace_regulator_enable_complete(rdev_get_name(rdev));
++ _notifier_call_chain(rdev, REGULATOR_EVENT_ENABLE, NULL);
+
+ return 0;
+ }
+@@ -1896,6 +1902,7 @@
+ {
+ int ret;
+
++ _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_DISABLE, NULL);
+ trace_regulator_disable(rdev_get_name(rdev));
+
+ if (rdev->ena_pin) {
+@@ -2144,7 +2151,7 @@
+ * @regulator: regulator source
+ *
+ * Returns positive if the regulator driver backing the source/client
+- * can change its voltage, false otherwise. Usefull for detecting fixed
++ * can change its voltage, false otherwise. Useful for detecting fixed
+ * or dummy regulators and disabling voltage change logic in the client
+ * driver.
+ */
+@@ -3451,7 +3458,7 @@
+
+ dev_set_drvdata(&rdev->dev, rdev);
+
+- if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) {
++ if (gpio_is_valid(config->ena_gpio)) {
+ ret = regulator_ena_gpio_request(rdev, config);
+ if (ret != 0) {
+ rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
+diff -Nur linux-3.14.40.orig/drivers/regulator/dummy.c linux-3.14.40/drivers/regulator/dummy.c
+--- linux-3.14.40.orig/drivers/regulator/dummy.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/regulator/dummy.c 2015-05-01 14:58:04.455427001 -0500
+@@ -44,6 +44,7 @@
+
+ config.dev = &pdev->dev;
+ config.init_data = &dummy_initdata;
++ config.ena_gpio = -EINVAL;
+
+ dummy_regulator_rdev = regulator_register(&dummy_desc, &config);
+ if (IS_ERR(dummy_regulator_rdev)) {
+diff -Nur linux-3.14.40.orig/drivers/regulator/fixed.c linux-3.14.40/drivers/regulator/fixed.c
+--- linux-3.14.40.orig/drivers/regulator/fixed.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/regulator/fixed.c 2015-05-01 14:58:04.471427001 -0500
+@@ -163,9 +163,7 @@
+ drvdata->desc.n_voltages = 1;
+
+ drvdata->desc.fixed_uV = config->microvolts;
+-
+- if (config->gpio >= 0)
+- cfg.ena_gpio = config->gpio;
++ cfg.ena_gpio = config->gpio;
+ cfg.ena_gpio_invert = !config->enable_high;
+ if (config->enabled_at_boot) {
+ if (config->enable_high)
+diff -Nur linux-3.14.40.orig/drivers/reset/gpio-reset.c linux-3.14.40/drivers/reset/gpio-reset.c
+--- linux-3.14.40.orig/drivers/reset/gpio-reset.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/reset/gpio-reset.c 2015-05-01 14:58:04.471427001 -0500
+@@ -0,0 +1,187 @@
++/*
++ * GPIO Reset Controller driver
++ *
++ * Copyright 2013 Philipp Zabel, Pengutronix
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/gpio.h>
++#include <linux/module.h>
++#include <linux/of_gpio.h>
++#include <linux/platform_device.h>
++#include <linux/reset-controller.h>
++
++struct gpio_reset_data {
++ struct reset_controller_dev rcdev;
++ unsigned int gpio;
++ bool active_low;
++ s32 delay_us;
++};
++
++static void gpio_reset_set(struct reset_controller_dev *rcdev, int asserted)
++{
++ struct gpio_reset_data *drvdata = container_of(rcdev,
++ struct gpio_reset_data, rcdev);
++ int value = asserted;
++
++ if (drvdata->active_low)
++ value = !value;
++
++ if (gpio_cansleep(drvdata->gpio))
++ gpio_set_value_cansleep(drvdata->gpio, value);
++ else
++ gpio_set_value(drvdata->gpio, value);
++}
++
++static int gpio_reset(struct reset_controller_dev *rcdev, unsigned long id)
++{
++ struct gpio_reset_data *drvdata = container_of(rcdev,
++ struct gpio_reset_data, rcdev);
++
++ if (drvdata->delay_us < 0)
++ return -ENOSYS;
++
++ gpio_reset_set(rcdev, 1);
++ udelay(drvdata->delay_us);
++ gpio_reset_set(rcdev, 0);
++
++ return 0;
++}
++
++static int gpio_reset_assert(struct reset_controller_dev *rcdev,
++ unsigned long id)
++{
++ gpio_reset_set(rcdev, 1);
++
++ return 0;
++}
++
++static int gpio_reset_deassert(struct reset_controller_dev *rcdev,
++ unsigned long id)
++{
++ gpio_reset_set(rcdev, 0);
++
++ return 0;
++}
++
++static struct reset_control_ops gpio_reset_ops = {
++ .reset = gpio_reset,
++ .assert = gpio_reset_assert,
++ .deassert = gpio_reset_deassert,
++};
++
++static int of_gpio_reset_xlate(struct reset_controller_dev *rcdev,
++ const struct of_phandle_args *reset_spec)
++{
++ if (WARN_ON(reset_spec->args_count != 0))
++ return -EINVAL;
++
++ return 0;
++}
++
++static int gpio_reset_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct gpio_reset_data *drvdata;
++ enum of_gpio_flags flags;
++ unsigned long gpio_flags;
++ bool initially_in_reset;
++ int ret;
++
++ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
++ if (drvdata == NULL)
++ return -ENOMEM;
++
++ if (of_gpio_named_count(np, "reset-gpios") != 1) {
++ dev_err(&pdev->dev,
++ "reset-gpios property missing, or not a single gpio\n");
++ return -EINVAL;
++ }
++
++ drvdata->gpio = of_get_named_gpio_flags(np, "reset-gpios", 0, &flags);
++ if (drvdata->gpio == -EPROBE_DEFER) {
++ return drvdata->gpio;
++ } else if (!gpio_is_valid(drvdata->gpio)) {
++ dev_err(&pdev->dev, "invalid reset gpio: %d\n", drvdata->gpio);
++ return drvdata->gpio;
++ }
++
++ drvdata->active_low = flags & OF_GPIO_ACTIVE_LOW;
++
++ ret = of_property_read_u32(np, "reset-delay-us", &drvdata->delay_us);
++ if (ret < 0)
++ drvdata->delay_us = -1;
++ else if (drvdata->delay_us < 0)
++ dev_warn(&pdev->dev, "reset delay too high\n");
++
++ initially_in_reset = of_property_read_bool(np, "initially-in-reset");
++ if (drvdata->active_low ^ initially_in_reset)
++ gpio_flags = GPIOF_OUT_INIT_HIGH;
++ else
++ gpio_flags = GPIOF_OUT_INIT_LOW;
++
++ ret = devm_gpio_request_one(&pdev->dev, drvdata->gpio, gpio_flags, NULL);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to request gpio %d: %d\n",
++ drvdata->gpio, ret);
++ return ret;
++ }
++
++ platform_set_drvdata(pdev, drvdata);
++
++ drvdata->rcdev.of_node = np;
++ drvdata->rcdev.owner = THIS_MODULE;
++ drvdata->rcdev.nr_resets = 1;
++ drvdata->rcdev.ops = &gpio_reset_ops;
++ drvdata->rcdev.of_xlate = of_gpio_reset_xlate;
++ reset_controller_register(&drvdata->rcdev);
++
++ return 0;
++}
++
++static int gpio_reset_remove(struct platform_device *pdev)
++{
++ struct gpio_reset_data *drvdata = platform_get_drvdata(pdev);
++
++ reset_controller_unregister(&drvdata->rcdev);
++
++ return 0;
++}
++
++static struct of_device_id gpio_reset_dt_ids[] = {
++ { .compatible = "gpio-reset" },
++ { }
++};
++
++static struct platform_driver gpio_reset_driver = {
++ .probe = gpio_reset_probe,
++ .remove = gpio_reset_remove,
++ .driver = {
++ .name = "gpio-reset",
++ .owner = THIS_MODULE,
++ .of_match_table = of_match_ptr(gpio_reset_dt_ids),
++ },
++};
++
++static int __init gpio_reset_init(void)
++{
++ return platform_driver_register(&gpio_reset_driver);
++}
++arch_initcall(gpio_reset_init);
++
++static void __exit gpio_reset_exit(void)
++{
++ platform_driver_unregister(&gpio_reset_driver);
++}
++module_exit(gpio_reset_exit);
++
++MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
++MODULE_DESCRIPTION("gpio reset controller");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:gpio-reset");
++MODULE_DEVICE_TABLE(of, gpio_reset_dt_ids);
+diff -Nur linux-3.14.40.orig/drivers/reset/Kconfig linux-3.14.40/drivers/reset/Kconfig
+--- linux-3.14.40.orig/drivers/reset/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/reset/Kconfig 2015-05-01 14:58:04.479427001 -0500
+@@ -11,3 +11,15 @@
+ via GPIOs or SoC-internal reset controller modules.
+
+ If unsure, say no.
++
++if RESET_CONTROLLER
++
++config RESET_GPIO
++ tristate "GPIO reset controller support"
++ default y
++ depends on GPIOLIB && OF
++ help
++ This driver provides support for reset lines that are controlled
++ directly by GPIOs.
++
++endif
+diff -Nur linux-3.14.40.orig/drivers/reset/Makefile linux-3.14.40/drivers/reset/Makefile
+--- linux-3.14.40.orig/drivers/reset/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/reset/Makefile 2015-05-01 14:58:04.479427001 -0500
+@@ -1,2 +1,3 @@
+ obj-$(CONFIG_RESET_CONTROLLER) += core.o
++obj-$(CONFIG_RESET_GPIO) += gpio-reset.o
+ obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
+diff -Nur linux-3.14.40.orig/drivers/rtc/rtc-pcf8523.c linux-3.14.40/drivers/rtc/rtc-pcf8523.c
+--- linux-3.14.40.orig/drivers/rtc/rtc-pcf8523.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/rtc/rtc-pcf8523.c 2015-05-01 14:58:04.495427001 -0500
+@@ -7,6 +7,7 @@
+ */
+
+ #include <linux/bcd.h>
++#include <linux/delay.h>
+ #include <linux/i2c.h>
+ #include <linux/module.h>
+ #include <linux/rtc.h>
+@@ -82,24 +83,85 @@
+ return 0;
+ }
+
+-static int pcf8523_select_capacitance(struct i2c_client *client, bool high)
++static int pcf8523_rtc_check_oscillator(struct i2c_client *client)
+ {
+ u8 value;
+ int err;
+
+- err = pcf8523_read(client, REG_CONTROL1, &value);
++ err = pcf8523_read(client, REG_SECONDS, &value);
+ if (err < 0)
+ return err;
+
+- if (!high)
+- value &= ~REG_CONTROL1_CAP_SEL;
+- else
+- value |= REG_CONTROL1_CAP_SEL;
++ if (value & REG_SECONDS_OS) {
++ /*
++ * If the oscillator was stopped, try to clear the flag. Upon
++ * power-up the flag is always set, but if we cannot clear it
++ * the oscillator isn't running properly for some reason. The
++ * sensible thing therefore is to return an error, signalling
++ * that the clock cannot be assumed to be correct.
++ */
++
++ value &= ~REG_SECONDS_OS;
++
++ err = pcf8523_write(client, REG_SECONDS, value);
++ if (err < 0)
++ return err;
++
++ err = pcf8523_read(client, REG_SECONDS, &value);
++ if (err < 0)
++ return err;
++
++ if (value & REG_SECONDS_OS)
++ return -EAGAIN;
++ }
++
++ return 0;
++}
++
++static int pcf8523_switch_capacitance(struct i2c_client *client)
++{
++ u8 value;
++ int err;
++
++ err = pcf8523_read(client, REG_CONTROL1, &value);
++ if (err < 0)
++ goto out;
++
++ value ^= REG_CONTROL1_CAP_SEL;
+
+ err = pcf8523_write(client, REG_CONTROL1, value);
++
++out:
++ return err;
++}
++
++static int pcf8523_enable_oscillator(struct i2c_client *client)
++{
++ int err, loop;
++
++ loop = 0;
++ while (loop < 200) {
++ err = pcf8523_rtc_check_oscillator(client);
++ if (!err)
++ return 0;
++ loop++;
++ msleep(10);
++ }
++
++ err = pcf8523_switch_capacitance(client);
+ if (err < 0)
+- return err;
++ goto out;
++
++ loop = 0;
++ while (loop < 200) {
++ err = pcf8523_rtc_check_oscillator(client);
++ if (!err)
++ return 0;
++ loop++;
++ msleep(10);
++ }
+
++out:
+ return err;
+ }
+
+@@ -290,6 +352,7 @@
+ const struct i2c_device_id *id)
+ {
+ struct pcf8523 *pcf;
++ u8 value;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+@@ -299,10 +362,20 @@
+ if (!pcf)
+ return -ENOMEM;
+
+- err = pcf8523_select_capacitance(client, true);
++ /* Check whether the RTC reports battery low */
++ err = pcf8523_read(client, REG_CONTROL3, &value);
+ if (err < 0)
+ return err;
+
++ if (value & REG_CONTROL3_BLF)
++ dev_warn(&client->dev, "RTC reports battery is low\n");
++
++ err = pcf8523_enable_oscillator(client);
++ if (err < 0) {
++ dev_warn(&client->dev, "RTC reports oscillator is not running\n");
++ return err;
++ }
++
+ err = pcf8523_set_pm(client, 0);
+ if (err < 0)
+ return err;
+diff -Nur linux-3.14.40.orig/drivers/rtc/rtc-snvs.c linux-3.14.40/drivers/rtc/rtc-snvs.c
+--- linux-3.14.40.orig/drivers/rtc/rtc-snvs.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/rtc/rtc-snvs.c 2015-05-01 14:58:04.499427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2011-2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+@@ -41,6 +41,8 @@
+ spinlock_t lock;
+ };
+
++static void __iomem *snvs_base;
++
+ static u32 rtc_read_lp_counter(void __iomem *ioaddr)
+ {
+ u64 read1, read2;
+@@ -241,6 +243,15 @@
+ return events ? IRQ_HANDLED : IRQ_NONE;
+ }
+
++static void snvs_poweroff(void)
++{
++ u32 value;
++
++ value = readl(snvs_base + SNVS_LPCR);
++ /* set TOP and DP_EN bit */
++ writel(value | 0x60, snvs_base + SNVS_LPCR);
++}
++
+ static int snvs_rtc_probe(struct platform_device *pdev)
+ {
+ struct snvs_rtc_data *data;
+@@ -270,13 +281,15 @@
+ /* Clear interrupt status */
+ writel(0xffffffff, data->ioaddr + SNVS_LPSR);
+
++ snvs_base = data->ioaddr;
+ /* Enable RTC */
+ snvs_rtc_enable(data, true);
+
+ device_init_wakeup(&pdev->dev, true);
+
+ ret = devm_request_irq(&pdev->dev, data->irq, snvs_rtc_irq_handler,
+- IRQF_SHARED, "rtc alarm", &pdev->dev);
++ IRQF_SHARED | IRQF_NO_SUSPEND,
++ "rtc alarm", &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq %d: %d\n",
+ data->irq, ret);
+@@ -290,6 +303,12 @@
+ dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
+ return ret;
+ }
++ /*
++ * if no specific power off function in board file, power off system by
++ * SNVS
++ */
++ if (!pm_power_off)
++ pm_power_off = snvs_poweroff;
+
+ return 0;
+ }
+diff -Nur linux-3.14.40.orig/drivers/scsi/scsi_transport_iscsi.c linux-3.14.40/drivers/scsi/scsi_transport_iscsi.c
+--- linux-3.14.40.orig/drivers/scsi/scsi_transport_iscsi.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/scsi/scsi_transport_iscsi.c 2015-05-01 14:58:04.527427001 -0500
+@@ -1225,7 +1225,7 @@
+ * Adds a sysfs entry for the flashnode session attributes
+ *
+ * Returns:
+- * pointer to allocated flashnode sess on sucess
++ * pointer to allocated flashnode sess on success
+ * %NULL on failure
+ */
+ struct iscsi_bus_flash_session *
+@@ -1423,7 +1423,7 @@
+ }
+
+ /**
+- * iscsi_destroy_flashnode_sess - destory flashnode session entry
++ * iscsi_destroy_flashnode_sess - destroy flashnode session entry
+ * @fnode_sess: pointer to flashnode session entry to be destroyed
+ *
+ * Deletes the flashnode session entry and all children flashnode connection
+@@ -1453,7 +1453,7 @@
+ }
+
+ /**
+- * iscsi_destroy_all_flashnode - destory all flashnode session entries
++ * iscsi_destroy_all_flashnode - destroy all flashnode session entries
+ * @shost: pointer to host data
+ *
+ * Destroys all the flashnode session entries and all corresponding children
+diff -Nur linux-3.14.40.orig/drivers/staging/bcm/Typedefs.h linux-3.14.40/drivers/staging/bcm/Typedefs.h
+--- linux-3.14.40.orig/drivers/staging/bcm/Typedefs.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/staging/bcm/Typedefs.h 2015-05-01 14:58:04.539427001 -0500
+@@ -25,16 +25,16 @@
+ typedef unsigned long ULONG;
+ typedef unsigned long DWORD;
+
+-typedef char* PCHAR;
+-typedef short* PSHORT;
+-typedef int* PINT;
+-typedef long* PLONG;
+-typedef void* PVOID;
++typedef char *PCHAR;
++typedef short *PSHORT;
++typedef int *PINT;
++typedef long *PLONG;
++typedef void *PVOID;
+
+-typedef unsigned char* PUCHAR;
+-typedef unsigned short* PUSHORT;
+-typedef unsigned int* PUINT;
+-typedef unsigned long* PULONG;
++typedef unsigned char *PUCHAR;
++typedef unsigned short *PUSHORT;
++typedef unsigned int *PUINT;
++typedef unsigned long *PULONG;
+ typedef unsigned long long ULONG64;
+ typedef unsigned long long LARGE_INTEGER;
+ typedef unsigned int UINT32;
+diff -Nur linux-3.14.40.orig/drivers/staging/media/lirc/Kconfig linux-3.14.40/drivers/staging/media/lirc/Kconfig
+--- linux-3.14.40.orig/drivers/staging/media/lirc/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/staging/media/lirc/Kconfig 2015-05-01 14:58:04.547427001 -0500
+@@ -38,6 +38,12 @@
+ help
+ Driver for Homebrew Parallel Port Receivers
+
++config LIRC_GPIO
++ tristate "Homebrew GPIO Port Receiver/Transmitter"
++ depends on LIRC
++ help
++ Driver for Homebrew GPIO Port Receiver/Transmitter
++
+ config LIRC_SASEM
+ tristate "Sasem USB IR Remote"
+ depends on LIRC && USB
+diff -Nur linux-3.14.40.orig/drivers/staging/media/lirc/lirc_gpio.c linux-3.14.40/drivers/staging/media/lirc/lirc_gpio.c
+--- linux-3.14.40.orig/drivers/staging/media/lirc/lirc_gpio.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/staging/media/lirc/lirc_gpio.c 2015-05-01 14:58:04.547427001 -0500
+@@ -0,0 +1,782 @@
++/*
++ * lirc_gpio.c
++ *
++ * lirc_gpio - Device driver that records pulse- and pause-lengths
++ * (space-lengths) (just like the lirc_serial driver does)
++ * between GPIO interrupt events on GPIO capable devices.
++ * Lots of code has been taken from the lirc_serial and the
++ * lirc_rpi modules so I would like say thanks to the authors.
++ *
++ * Copyright (C) 2014 CurlyMo <curlymoo1@gmail.com>
++ * Aron Robert Szabo <aron@reon.hu>,
++ * Michael Bishop <cleverca22@gmail.com>
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++/*
++ lirc_gpio {
++ compatible = "lirc_gpio";
++ gpios = <&gpio3 6 1 &gpio3 7 2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hummingboard_gpio3_6>;
++ pinctrl-1 = <&pinctrl_hummingboard_gpio3_7>;
++ linux,sense = <-1>;
++ linux,softcarrier = <1>;
++ linux,validgpios = <1 73 72 71 70 194 195 67>;
++ };
++ */
++
++
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/time.h>
++#include <linux/string.h>
++#include <linux/delay.h>
++#include <linux/platform_device.h>
++#include <linux/irq.h>
++#include <linux/spinlock.h>
++#include <media/lirc.h>
++#include <media/lirc_dev.h>
++#include <linux/gpio.h>
++#include <linux/of.h>
++#include <linux/of_gpio.h>
++
++#define LIRC_DRIVER_NAME "lirc_gpio"
++#define RBUF_LEN 256
++#define LIRC_TRANSMITTER_LATENCY 256
++
++#ifndef MAX_UDELAY_MS
++#define MAX_UDELAY_US 5000
++#else
++#define MAX_UDELAY_US (MAX_UDELAY_MS*1000)
++#endif
++
++static ssize_t lirc_write(struct file *file, const char *buf, size_t n, loff_t *ppos);
++static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
++static int set_use_inc(void *data);
++static void set_use_dec(void *data);
++static int lirc_gpio_probe(struct platform_device *pdev);
++static int lirc_gpio_remove(struct platform_device *pdev);
++
++struct lirc_gpio_platform_data {
++ int gpio_rx_nr;
++ int gpio_tx_nr;
++ bool active_rx_low;
++ bool active_tx_low;
++ u64 allowed_rx_protos;
++ u64 allowed_tx_protos;
++ int sense;
++ int softcarrier;
++ int validgpios[255];
++};
++
++struct lirc_gpio_dev {
++ int gpio_rx_nr;
++ int gpio_tx_nr;
++ int sense;
++ int softcarrier;
++ int validgpios[255];
++};
++
++struct lirc_gpio_dev *gpio_dev;
++
++static const struct file_operations lirc_fops = {
++ .owner = THIS_MODULE,
++ .write = lirc_write,
++ .unlocked_ioctl = lirc_ioctl,
++ .read = lirc_dev_fop_read,
++ .poll = lirc_dev_fop_poll,
++ .open = lirc_dev_fop_open,
++ .release = lirc_dev_fop_close,
++ .llseek = no_llseek,
++};
++
++struct irq_chip *irqchip;
++struct irq_data *irqdata;
++
++static struct timeval lasttv = { 0, 0 };
++static struct lirc_buffer rbuf;
++static spinlock_t lock;
++
++/* set the default GPIO input pin */
++static int gpio_in_pin = -1;
++/* set the default GPIO output pin */
++static int gpio_out_pin = -1;
++/* -1 = auto, 0 = active high, 1 = active low */
++static int sense = -2;
++/* use softcarrier by default */
++static int softcarrier = -1;
++
++/* initialized/set in init_timing_params() */
++static unsigned int freq = 38000;
++static unsigned int duty_cycle = 50;
++static unsigned long period;
++static unsigned long pulse_width;
++static unsigned long space_width;
++
++static struct lirc_driver driver = {
++ .name = LIRC_DRIVER_NAME,
++ .minor = -1,
++ .code_length = 1,
++ .sample_rate = 0,
++ .data = NULL,
++ .add_to_buf = NULL,
++ .rbuf = &rbuf,
++ .set_use_inc = set_use_inc,
++ .set_use_dec = set_use_dec,
++ .fops = &lirc_fops,
++ .dev = NULL,
++ .owner = THIS_MODULE,
++};
++
++static struct of_device_id lirc_gpio_of_match[] = {
++ { .compatible = "lirc_gpio", },
++ {}
++};
++
++static struct platform_driver lirc_gpio_driver = {
++ .probe = lirc_gpio_probe,
++ .remove = lirc_gpio_remove,
++ .driver = {
++ .name = LIRC_DRIVER_NAME,
++ .owner = THIS_MODULE,
++ .of_match_table = lirc_gpio_of_match,
++ },
++};
++
++static void safe_udelay(unsigned long usecs) {
++ while (usecs > MAX_UDELAY_US) {
++ udelay(MAX_UDELAY_US);
++ usecs -= MAX_UDELAY_US;
++ }
++ udelay(usecs);
++}
++
++static int init_timing_params(unsigned int new_duty_cycle, unsigned int new_freq) {
++ /*
++ * period, pulse/space width are kept with 8 binary places -
++ * IE multiplied by 256.
++ */
++ if(256 * 1000000L / new_freq * new_duty_cycle / 100 <=
++ LIRC_TRANSMITTER_LATENCY)
++ return -EINVAL;
++ if(256 * 1000000L / new_freq * (100 - new_duty_cycle) / 100 <=
++ LIRC_TRANSMITTER_LATENCY)
++ return -EINVAL;
++ duty_cycle = new_duty_cycle;
++ freq = new_freq;
++ period = 256 * 1000000L / freq;
++ pulse_width = period * duty_cycle / 100;
++ space_width = period - pulse_width;
++ return 0;
++}
++
++
++static long send_pulse_softcarrier(unsigned long length) {
++ int flag;
++ unsigned long actual, target, d;
++
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ length <<= 8;
++
++ actual = 0; target = 0; flag = 0;
++ while(actual < length) {
++ if(flag) {
++ gpio_set_value(gpio_dev->gpio_tx_nr, 0);
++ target += space_width;
++ } else {
++ gpio_set_value(gpio_dev->gpio_tx_nr, 1);
++ target += pulse_width;
++ }
++ d = (target - actual - LIRC_TRANSMITTER_LATENCY + 128) >> 8;
++ /*
++ * Note - we've checked in ioctl that the pulse/space
++ * widths are big enough so that d is > 0
++ */
++ udelay(d);
++ actual += (d << 8) + LIRC_TRANSMITTER_LATENCY;
++ flag = !flag;
++ }
++ return (actual-length) >> 8;
++ }
++ return 0;
++}
++
++static long send_pulse(unsigned long length) {
++ if(length <= 0)
++ return 0;
++
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ if(gpio_dev->softcarrier) {
++ return send_pulse_softcarrier(length);
++ } else {
++ gpio_set_value(gpio_dev->gpio_tx_nr, 1);
++ safe_udelay(length);
++ return 0;
++ }
++ }
++ return 0;
++}
++
++static void send_space(long length) {
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ gpio_set_value(gpio_dev->gpio_tx_nr, 0);
++ if(length <= 0)
++ return;
++ safe_udelay(length);
++ }
++}
++
++static void rbwrite(int l) {
++ if (lirc_buffer_full(&rbuf)) {
++ /* no new signals will be accepted */
++ return;
++ }
++ lirc_buffer_write(&rbuf, (void *)&l);
++}
++
++static void frbwrite(int l) {
++ /* simple noise filter */
++ static int pulse, space;
++ static unsigned int ptr;
++
++ if(ptr > 0 && (l & PULSE_BIT)) {
++ pulse += l & PULSE_MASK;
++ if(pulse > 250) {
++ rbwrite(space);
++ rbwrite(pulse | PULSE_BIT);
++ ptr = 0;
++ pulse = 0;
++ }
++ return;
++ }
++ if(!(l & PULSE_BIT)) {
++ if(ptr == 0) {
++ if (l > 20000) {
++ space = l;
++ ptr++;
++ return;
++ }
++ } else {
++ if(l > 20000) {
++ space += pulse;
++ if (space > PULSE_MASK)
++ space = PULSE_MASK;
++ space += l;
++ if (space > PULSE_MASK)
++ space = PULSE_MASK;
++ pulse = 0;
++ return;
++ }
++ rbwrite(space);
++ rbwrite(pulse | PULSE_BIT);
++ ptr = 0;
++ pulse = 0;
++ }
++ }
++ rbwrite(l);
++}
++
++static irqreturn_t irq_handler(int i, void *blah, struct pt_regs *regs) {
++ struct timeval tv;
++ long deltv;
++ int data;
++ int signal;
++
++ /* use the GPIO signal level */
++ signal = gpio_get_value(gpio_dev->gpio_rx_nr);
++
++ /* unmask the irq */
++ irqchip->irq_unmask(irqdata);
++
++ if(gpio_dev->sense != -1) {
++ /* The HB GPIO input acts like it is an analogue input.
++ Therefor a high signal is 256 and a low signal is 1.
++ For Lirc to properly interpret the spaces and pulses,
++ we need to transform these to ones and zeros. To be
++ on the safe side, every signal higher then 128 will
++ be interpreted as a high and vice versa. */
++ if (signal > 128) {
++ signal = 1;
++ } else {
++ signal = 0;
++ }
++ /* get current time */
++ do_gettimeofday(&tv);
++
++ /* calc time since last interrupt in microseconds */
++ deltv = tv.tv_sec-lasttv.tv_sec;
++ if(tv.tv_sec < lasttv.tv_sec ||
++ (tv.tv_sec == lasttv.tv_sec &&
++ tv.tv_usec < lasttv.tv_usec)) {
++ printk(KERN_WARNING LIRC_DRIVER_NAME
++ ": AIEEEE: your clock just jumped backwards\n");
++ printk(KERN_WARNING LIRC_DRIVER_NAME
++ ": %d %d %lx %lx %lx %lx\n", signal, gpio_dev->sense,
++ tv.tv_sec, lasttv.tv_sec,
++ tv.tv_usec, lasttv.tv_usec);
++ data = PULSE_MASK;
++ } else if (deltv > 15) {
++ data = PULSE_MASK; /* really long time */
++ if(!(signal^gpio_dev->sense)) {
++ /* sanity check */
++ printk(KERN_WARNING LIRC_DRIVER_NAME
++ ": AIEEEE: %d %d %lx %lx %lx %lx\n",
++ signal, gpio_dev->sense, tv.tv_sec, lasttv.tv_sec,
++ tv.tv_usec, lasttv.tv_usec);
++ /*
++ * detecting pulse while this
++ * MUST be a space!
++ */
++ gpio_dev->sense = gpio_dev->sense ? 0 : 1;
++ }
++ } else {
++ data = (int) (deltv*1000000 +
++ (tv.tv_usec - lasttv.tv_usec));
++ }
++ frbwrite(signal^gpio_dev->sense ? data : (data|PULSE_BIT));
++ lasttv = tv;
++ wake_up_interruptible(&rbuf.wait_poll);
++ }
++
++ return IRQ_HANDLED;
++}
++
++// called when the character device is opened
++static int set_use_inc(void *data) {
++ int result;
++ unsigned long flags;
++
++ /* initialize timestamp */
++ do_gettimeofday(&lasttv);
++
++ if(gpio_dev->gpio_rx_nr >= 0) {
++ result = request_irq(gpio_to_irq(gpio_dev->gpio_rx_nr),
++ (irq_handler_t) irq_handler, 0,
++ LIRC_DRIVER_NAME, (void*) 0);
++
++ switch (result) {
++ case -EBUSY:
++ printk(KERN_ERR LIRC_DRIVER_NAME
++ ": IRQ %d is busy\n",
++ gpio_to_irq(gpio_dev->gpio_rx_nr));
++ return -EBUSY;
++ case -EINVAL:
++ printk(KERN_ERR LIRC_DRIVER_NAME
++ ": Bad irq number or handler\n");
++ return -EINVAL;
++ default:
++ break;
++ };
++
++ /* initialize pulse/space widths */
++ init_timing_params(duty_cycle, freq);
++
++ spin_lock_irqsave(&lock, flags);
++
++ /* GPIO Pin Falling/Rising Edge Detect Enable */
++ irqchip->irq_set_type(irqdata,
++ IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING);
++
++ /* unmask the irq */
++ irqchip->irq_unmask(irqdata);
++
++ spin_unlock_irqrestore(&lock, flags);
++ }
++
++ return 0;
++}
++
++static void set_use_dec(void *data) {
++ unsigned long flags;
++ if(gpio_dev->gpio_rx_nr >= 0) {
++ spin_lock_irqsave(&lock, flags);
++
++ /* GPIO Pin Falling/Rising Edge Detect Disable */
++ irqchip->irq_set_type(irqdata, 0);
++ irqchip->irq_mask(irqdata);
++
++ spin_unlock_irqrestore(&lock, flags);
++
++ free_irq(gpio_to_irq(gpio_dev->gpio_rx_nr), (void *) 0);
++ }
++}
++
++static ssize_t lirc_write(struct file *file, const char *buf, size_t n, loff_t *ppos) {
++ int i, count;
++ unsigned long flags;
++ long delta = 0;
++ int *wbuf;
++
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ count = n / sizeof(int);
++ if(n % sizeof(int) || count % 2 == 0)
++ return -EINVAL;
++ wbuf = memdup_user(buf, n);
++ if(IS_ERR(wbuf))
++ return PTR_ERR(wbuf);
++ spin_lock_irqsave(&lock, flags);
++
++ for(i = 0; i < count; i++) {
++ if(i%2)
++ send_space(wbuf[i] - delta);
++ else
++ delta = send_pulse(wbuf[i]);
++ }
++ gpio_set_value(gpio_dev->gpio_tx_nr, 0);
++
++ spin_unlock_irqrestore(&lock, flags);
++ kfree(wbuf);
++ return n;
++ }
++ return 0;
++}
++
++
++static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) {
++ int result;
++ __u32 value;
++
++ switch(cmd) {
++ case LIRC_GET_SEND_MODE:
++ return -ENOIOCTLCMD;
++ break;
++
++ case LIRC_SET_SEND_MODE:
++ result = get_user(value, (__u32 *) arg);
++ if(result)
++ return result;
++ /* only LIRC_MODE_PULSE supported */
++ if(value != LIRC_MODE_PULSE)
++ return -ENOSYS;
++ break;
++
++ case LIRC_GET_LENGTH:
++ return -ENOSYS;
++ break;
++
++ case LIRC_SET_SEND_DUTY_CYCLE:
++ result = get_user(value, (__u32 *) arg);
++ if (result)
++ return result;
++ if (value <= 0 || value > 100)
++ return -EINVAL;
++ return init_timing_params(value, freq);
++ break;
++
++ case LIRC_SET_SEND_CARRIER:
++ result = get_user(value, (__u32 *) arg);
++ if(result)
++ return result;
++ if(value > 500000 || value < 20000)
++ return -EINVAL;
++ return init_timing_params(duty_cycle, value);
++ break;
++
++ default:
++ return lirc_dev_fop_ioctl(filep, cmd, arg);
++ }
++ return 0;
++}
++
++static int lirc_gpio_get_devtree_pdata(struct device *dev, struct lirc_gpio_platform_data *pdata) {
++ struct device_node *np = dev->of_node;
++ enum of_gpio_flags flags;
++ struct property *prop;
++ const __be32 *cur;
++ int gpio = -1;
++ int ret = 0;
++ int i = 0;
++
++ if(np) {
++ gpio = of_get_gpio_flags(np, 0, &flags);
++ if(gpio < 0) {
++ if(gpio != -EPROBE_DEFER)
++ dev_err(dev, "RX gpio not defined (%d)\n", gpio);
++
++ pdata->gpio_rx_nr = -1;
++ pdata->active_rx_low = 0;
++ pdata->allowed_rx_protos = 0;
++ } else {
++ pdata->gpio_rx_nr = gpio;
++ pdata->active_rx_low = (flags & OF_GPIO_ACTIVE_LOW);
++ pdata->allowed_rx_protos = 0;
++ }
++
++ gpio = of_get_gpio_flags(np, 1, &flags);
++ if(gpio < 0) {
++ if(gpio != -EPROBE_DEFER)
++ dev_err(dev, "TX gpio not defined (%d)\n", gpio);
++
++ pdata->gpio_tx_nr = -1;
++ pdata->active_tx_low = 0;
++ pdata->allowed_tx_protos = 0;
++ } else {
++ pdata->gpio_tx_nr = gpio;
++ pdata->active_tx_low = (flags & OF_GPIO_ACTIVE_LOW);
++ pdata->allowed_tx_protos = 0;
++ }
++ ret = of_property_read_u32(np, "linux,sense", &pdata->sense);
++ if(ret) {
++ pdata->sense = -1;
++ }
++ ret = of_property_read_u32(np, "linux,softcarrier", &pdata->softcarrier);
++ if(ret) {
++ pdata->softcarrier = 1;
++ }
++ i = 0;
++ printk(KERN_DEBUG LIRC_DRIVER_NAME ": valid gpios");
++ of_property_for_each_u32(np, "linux,validgpios", prop, cur, gpio) {
++ printk(" %d", gpio);
++ pdata->validgpios[i++] = gpio;
++ }
++ printk("\n");
++ pdata->validgpios[i] = -1;
++ }
++
++ return 0;
++}
++
++static int init_port(void) {
++ int i, nlow, nhigh, ret, irq;
++
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ if(gpio_request(gpio_dev->gpio_tx_nr, LIRC_DRIVER_NAME " ir/out")) {
++ printk(KERN_ALERT LIRC_DRIVER_NAME ": cant claim gpio pin %d\n", gpio_dev->gpio_tx_nr);
++ ret = -ENODEV;
++ goto exit_init_port;
++ }
++ }
++
++ if(gpio_dev->gpio_rx_nr >= 0) {
++ if(gpio_request(gpio_dev->gpio_rx_nr, LIRC_DRIVER_NAME " ir/in")) {
++ printk(KERN_ALERT LIRC_DRIVER_NAME ": cant claim gpio pin %d\n", gpio_dev->gpio_rx_nr);
++ ret = -ENODEV;
++ goto exit_gpio_free_out_pin;
++ }
++ }
++
++ if(gpio_dev->gpio_rx_nr >= 0) {
++ gpio_direction_input(gpio_dev->gpio_rx_nr);
++ }
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ gpio_direction_output(gpio_dev->gpio_tx_nr, 1);
++ gpio_set_value(gpio_dev->gpio_tx_nr, 0);
++ }
++
++ if(gpio_dev->gpio_rx_nr >= 0) {
++ irq = gpio_to_irq(gpio_dev->gpio_rx_nr);
++ irqdata = irq_get_irq_data(irq);
++
++ if(irqdata && irqdata->chip) {
++ irqchip = irqdata->chip;
++ } else {
++ ret = -ENODEV;
++ goto exit_gpio_free_in_pin;
++ }
++
++ /* if pin is high, then this must be an active low receiver. */
++ if(gpio_dev->sense == -1) {
++ /* wait 1/2 sec for the power supply */
++ msleep(500);
++
++ /*
++ * probe 9 times every 0.04s, collect "votes" for
++ * active high/low
++ */
++ nlow = 0;
++ nhigh = 0;
++ for(i = 0; i < 9; i++) {
++ if(gpio_get_value(gpio_dev->gpio_rx_nr))
++ nlow++;
++ else
++ nhigh++;
++ msleep(40);
++ }
++ gpio_dev->sense = (nlow >= nhigh ? 1 : 0);
++ printk(KERN_INFO LIRC_DRIVER_NAME ": auto-detected active %s receiver on GPIO pin %d\n",
++ gpio_dev->sense ? "low" : "high", gpio_dev->gpio_rx_nr);
++ } else {
++ printk(KERN_INFO LIRC_DRIVER_NAME ": manually using active %s receiver on GPIO pin %d\n",
++ gpio_dev->sense ? "low" : "high", gpio_dev->gpio_rx_nr);
++ }
++ }
++
++ return 0;
++
++exit_gpio_free_in_pin:
++ gpio_free(gpio_dev->gpio_rx_nr);
++
++exit_gpio_free_out_pin:
++ gpio_free(gpio_dev->gpio_tx_nr);
++
++exit_init_port:
++ return ret;
++}
++
++static void lirc_gpio_exit(void) {
++ if(gpio_dev->gpio_tx_nr >= 0) {
++ gpio_free(gpio_dev->gpio_tx_nr);
++ }
++ if(gpio_dev->gpio_rx_nr >= 0) {
++ gpio_free(gpio_dev->gpio_rx_nr);
++ }
++
++ lirc_unregister_driver(driver.minor);
++ lirc_buffer_free(&rbuf);
++}
++
++static int lirc_gpio_probe(struct platform_device *pdev) {
++ const struct lirc_gpio_platform_data *pdata =
++ pdev->dev.platform_data;
++ int rc;
++ int result = 0;
++ int match = 0;
++ int i = 0;
++
++ if(pdev->dev.of_node) {
++ struct lirc_gpio_platform_data *dtpdata = devm_kzalloc(&pdev->dev, sizeof(*dtpdata), GFP_KERNEL);
++ if(!dtpdata)
++ return -ENOMEM;
++ rc = lirc_gpio_get_devtree_pdata(&pdev->dev, dtpdata);
++ if(rc)
++ return rc;
++ pdata = dtpdata;
++ }
++
++ if(!pdata)
++ return -EINVAL;
++
++ gpio_dev = kzalloc(sizeof(struct lirc_gpio_dev), GFP_KERNEL);
++ if(!gpio_dev)
++ return -ENOMEM;
++
++ gpio_dev->gpio_rx_nr = pdata->gpio_rx_nr;
++ gpio_dev->gpio_tx_nr = pdata->gpio_tx_nr;
++ gpio_dev->sense = pdata->sense;
++ gpio_dev->softcarrier = pdata->softcarrier;
++ memcpy(gpio_dev->validgpios, pdata->validgpios, 255);
++
++ if(gpio_in_pin != gpio_out_pin) {
++ match = 0;
++ for(i = 0; (i < ARRAY_SIZE(gpio_dev->validgpios)) && (!match) && (gpio_dev->validgpios[i] != -1); i++) {
++ if(gpio_in_pin == gpio_dev->validgpios[i]) {
++ match = 1;
++ break;
++ }
++ }
++ if(gpio_in_pin > -1) {
++ if(!match) {
++ printk(KERN_ERR LIRC_DRIVER_NAME
++ ": invalid RX GPIO pin specified!\n");
++ return -EINVAL;
++ } else {
++ gpio_dev->gpio_rx_nr = gpio_in_pin;
++ }
++ }
++ match = 0;
++ for(i = 0; (i < ARRAY_SIZE(gpio_dev->validgpios)) && (!match) && (gpio_dev->validgpios[i] != -1); i++) {
++ if(gpio_out_pin == gpio_dev->validgpios[i]) {
++ match = 1;
++ break;
++ }
++ }
++ if(gpio_out_pin > -1) {
++ if(!match) {
++ printk(KERN_ERR LIRC_DRIVER_NAME
++ ": invalid TX GPIO pin specified!\n");
++ return -EINVAL;
++ } else {
++ gpio_dev->gpio_tx_nr = gpio_out_pin;
++ }
++ }
++ }
++ if(sense > -2) {
++ gpio_dev->sense = sense;
++ }
++ if(softcarrier >= 0) {
++ gpio_dev->softcarrier = softcarrier;
++ }
++
++ printk(KERN_DEBUG LIRC_DRIVER_NAME ": rx %d, tx %d, sense %d, softcarrier %d\n",
++ gpio_dev->gpio_rx_nr, gpio_dev->gpio_tx_nr, gpio_dev->sense, gpio_dev->softcarrier);
++
++ platform_set_drvdata(pdev, gpio_dev);
++
++ result = lirc_buffer_init(&rbuf, sizeof(int), RBUF_LEN);
++ if(result < 0)
++ return -ENOMEM;
++
++ driver.features = LIRC_CAN_SET_SEND_DUTY_CYCLE |
++ LIRC_CAN_SET_SEND_CARRIER |
++ LIRC_CAN_SEND_PULSE |
++ LIRC_CAN_REC_MODE2;
++
++ driver.dev = &pdev->dev;
++ driver.minor = lirc_register_driver(&driver);
++
++ if(driver.minor < 0) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": device registration failed with %d\n", result);
++ result = -EIO;
++ goto exit_gpio;
++ }
++
++ result = init_port();
++ if(result < 0)
++ goto exit_gpio;
++
++ return 0;
++
++exit_gpio:
++ lirc_gpio_exit();
++
++ return result;
++}
++
++static int lirc_gpio_remove(struct platform_device *pdev) {
++ struct lirc_gpio_dev *gpio_dev = platform_get_drvdata(pdev);
++
++ lirc_gpio_exit();
++
++ kfree(gpio_dev);
++
++ return 0;
++}
++
++MODULE_DEVICE_TABLE(of, lirc_gpio_of_match);
++module_platform_driver(lirc_gpio_driver);
++
++MODULE_DESCRIPTION("Infra-red GPIO receiver and blaster driver.");
++MODULE_AUTHOR("CurlyMo <development@xbian.org>");
++MODULE_AUTHOR("Aron Robert Szabo <aron@reon.hu>");
++MODULE_AUTHOR("Michael Bishop <cleverca22@gmail.com>");
++MODULE_LICENSE("GPL");
++
++module_param(gpio_out_pin, int, S_IRUGO);
++MODULE_PARM_DESC(gpio_out_pin, "GPIO output/transmitter pin number");
++
++module_param(gpio_in_pin, int, S_IRUGO);
++MODULE_PARM_DESC(gpio_in_pin, "GPIO input/receiver pin number.");
++
++module_param(sense, int, S_IRUGO);
++MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit"
++ " (0 = active high, 1 = active low )");
++
++module_param(softcarrier, int, S_IRUGO);
++MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)");
++
+diff -Nur linux-3.14.40.orig/drivers/staging/octeon/ethernet-rgmii.c linux-3.14.40/drivers/staging/octeon/ethernet-rgmii.c
+--- linux-3.14.40.orig/drivers/staging/octeon/ethernet-rgmii.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/staging/octeon/ethernet-rgmii.c 2015-05-01 14:58:04.551427001 -0500
+@@ -166,9 +166,8 @@
+
+ if (use_global_register_lock)
+ spin_unlock_irqrestore(&global_register_lock, flags);
+- else {
++ else
+ mutex_unlock(&priv->phydev->bus->mdio_lock);
+- }
+
+ if (priv->phydev == NULL) {
+ /* Tell core. */
+diff -Nur linux-3.14.40.orig/drivers/staging/rtl8821ae/core.c linux-3.14.40/drivers/staging/rtl8821ae/core.c
+--- linux-3.14.40.orig/drivers/staging/rtl8821ae/core.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/staging/rtl8821ae/core.c 2015-05-01 14:58:04.559427001 -0500
+@@ -1414,23 +1414,15 @@
+ * before switch channle or power save, or tx buffer packet
+ * maybe send after offchannel or rf sleep, this may cause
+ * dis-association by AP */
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
++static void rtl_op_flush(struct ieee80211_hw *hw,
++ struct ieee80211_vif *vif,
++ u32 queues, bool drop)
+ {
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->intf_ops->flush)
+ rtlpriv->intf_ops->flush(hw, queues, drop);
+ }
+-#else
+-static void rtl_op_flush(struct ieee80211_hw *hw, bool drop)
+-{
+- struct rtl_priv *rtlpriv = rtl_priv(hw);
+-
+- if (rtlpriv->intf_ops->flush)
+- rtlpriv->intf_ops->flush(hw, drop);
+-}
+-#endif
+
+ const struct ieee80211_ops rtl_ops = {
+ .start = rtl_op_start,
+diff -Nur linux-3.14.40.orig/drivers/thermal/device_cooling.c linux-3.14.40/drivers/thermal/device_cooling.c
+--- linux-3.14.40.orig/drivers/thermal/device_cooling.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/thermal/device_cooling.c 2015-05-01 14:58:04.559427001 -0500
+@@ -0,0 +1,157 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/thermal.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++
++struct devfreq_cooling_device {
++ int id;
++ struct thermal_cooling_device *cool_dev;
++ unsigned int devfreq_state;
++ unsigned int max_state;
++};
++
++static DEFINE_IDR(devfreq_idr);
++static DEFINE_MUTEX(devfreq_cooling_lock);
++
++static BLOCKING_NOTIFIER_HEAD(devfreq_cooling_chain_head);
++
++int register_devfreq_cooling_notifier(struct notifier_block *nb)
++{
++ return blocking_notifier_chain_register(
++ &devfreq_cooling_chain_head, nb);
++}
++EXPORT_SYMBOL_GPL(register_devfreq_cooling_notifier);
++
++int unregister_devfreq_cooling_notifier(struct notifier_block *nb)
++{
++ return blocking_notifier_chain_unregister(
++ &devfreq_cooling_chain_head, nb);
++}
++EXPORT_SYMBOL_GPL(unregister_devfreq_cooling_notifier);
++
++static int devfreq_cooling_notifier_call_chain(unsigned long val)
++{
++ return (blocking_notifier_call_chain(
++ &devfreq_cooling_chain_head, val, NULL)
++ == NOTIFY_BAD) ? -EINVAL : 0;
++}
++
++static int devfreq_set_cur_state(struct thermal_cooling_device *cdev,
++ unsigned long state)
++{
++ struct devfreq_cooling_device *devfreq_device = cdev->devdata;
++ int ret;
++ unsigned long notify_state;
++
++ if (state >= devfreq_device->max_state)
++ notify_state = 5;
++ else
++ notify_state = state;
++ ret = devfreq_cooling_notifier_call_chain(notify_state);
++ if (ret)
++ return -EINVAL;
++ devfreq_device->devfreq_state = state;
++
++ return 0;
++}
++
++static int devfreq_get_max_state(struct thermal_cooling_device *cdev,
++ unsigned long *state)
++{
++ struct devfreq_cooling_device *devfreq_device = cdev->devdata;
++ *state = devfreq_device->max_state;
++
++ return 0;
++}
++
++static int devfreq_get_cur_state(struct thermal_cooling_device *cdev,
++ unsigned long *state)
++{
++ struct devfreq_cooling_device *devfreq_device = cdev->devdata;
++
++ *state = devfreq_device->devfreq_state;
++
++ return 0;
++}
++
++static struct thermal_cooling_device_ops const devfreq_cooling_ops = {
++ .get_max_state = devfreq_get_max_state,
++ .get_cur_state = devfreq_get_cur_state,
++ .set_cur_state = devfreq_set_cur_state,
++};
++
++static int get_idr(struct idr *idr, int *id)
++{
++ int ret;
++
++ mutex_lock(&devfreq_cooling_lock);
++ ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
++ mutex_unlock(&devfreq_cooling_lock);
++ if (unlikely(ret < 0))
++ return ret;
++ *id = ret;
++
++ return 0;
++}
++
++static void release_idr(struct idr *idr, int id)
++{
++ mutex_lock(&devfreq_cooling_lock);
++ idr_remove(idr, id);
++ mutex_unlock(&devfreq_cooling_lock);
++}
++
++struct thermal_cooling_device *devfreq_cooling_register(unsigned long max_state)
++{
++ struct thermal_cooling_device *cool_dev;
++ struct devfreq_cooling_device *devfreq_dev = NULL;
++ char dev_name[THERMAL_NAME_LENGTH];
++ int ret = 0;
++
++ devfreq_dev = kzalloc(sizeof(struct devfreq_cooling_device),
++ GFP_KERNEL);
++ if (!devfreq_dev)
++ return ERR_PTR(-ENOMEM);
++
++ ret = get_idr(&devfreq_idr, &devfreq_dev->id);
++ if (ret) {
++ kfree(devfreq_dev);
++ return ERR_PTR(-EINVAL);
++ }
++
++ snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d",
++ devfreq_dev->id);
++
++ cool_dev = thermal_cooling_device_register(dev_name, devfreq_dev,
++ &devfreq_cooling_ops);
++ if (!cool_dev) {
++ release_idr(&devfreq_idr, devfreq_dev->id);
++ kfree(devfreq_dev);
++ return ERR_PTR(-EINVAL);
++ }
++ devfreq_dev->cool_dev = cool_dev;
++ devfreq_dev->devfreq_state = 0;
++ devfreq_dev->max_state = max_state;
++
++ return cool_dev;
++}
++EXPORT_SYMBOL_GPL(devfreq_cooling_register);
++
++void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
++{
++ struct devfreq_cooling_device *devfreq_dev = cdev->devdata;
++
++ thermal_cooling_device_unregister(devfreq_dev->cool_dev);
++ release_idr(&devfreq_idr, devfreq_dev->id);
++ kfree(devfreq_dev);
++}
++EXPORT_SYMBOL_GPL(devfreq_cooling_unregister);
+diff -Nur linux-3.14.40.orig/drivers/thermal/fair_share.c linux-3.14.40/drivers/thermal/fair_share.c
+--- linux-3.14.40.orig/drivers/thermal/fair_share.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/thermal/fair_share.c 2015-05-01 14:58:04.567427001 -0500
+@@ -23,6 +23,7 @@
+ */
+
+ #include <linux/thermal.h>
++#include <trace/events/thermal.h>
+
+ #include "thermal_core.h"
+
+@@ -34,6 +35,7 @@
+ {
+ int count = 0;
+ unsigned long trip_temp;
++ enum thermal_trip_type trip_type;
+
+ if (tz->trips == 0 || !tz->ops->get_trip_temp)
+ return 0;
+@@ -43,6 +45,16 @@
+ if (tz->temperature < trip_temp)
+ break;
+ }
++
++ /*
++ * count > 0 only if temperature is greater than first trip
++ * point, in which case, trip_point = count - 1
++ */
++ if (count > 0) {
++ tz->ops->get_trip_type(tz, count - 1, &trip_type);
++ trace_thermal_zone_trip(tz, count - 1, trip_type);
++ }
++
+ return count;
+ }
+
+diff -Nur linux-3.14.40.orig/drivers/thermal/imx_thermal.c linux-3.14.40/drivers/thermal/imx_thermal.c
+--- linux-3.14.40.orig/drivers/thermal/imx_thermal.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/thermal/imx_thermal.c 2015-05-01 14:58:04.579427001 -0500
+@@ -12,6 +12,8 @@
+ #include <linux/cpufreq.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
++#include <linux/device_cooling.h>
++#include <linux/fsl_otp.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+@@ -46,30 +48,39 @@
+
+ #define OCOTP_ANA1 0x04e0
+
+-/* The driver supports 1 passive trip point and 1 critical trip point */
+-enum imx_thermal_trip {
+- IMX_TRIP_PASSIVE,
+- IMX_TRIP_CRITICAL,
+- IMX_TRIP_NUM,
+-};
++#define OCOTP_TEMP_GRADE 0x480
++#define OCOTP_TEMP_GRADE_SHIFT 5
++#define OCOTP_TEMP_GRADE_AUT 0x3
++#define OCOTP_TEMP_GRADE_IND 0x2
++#define OCOTP_TEMP_GRADE_EXT 0x1
++#define OCOTP_TEMP_GRADE_COM 0x0
+
+ /*
+ * It defines the temperature in millicelsius for passive trip point
+ * that will trigger cooling action when crossed.
+ */
+-#define IMX_TEMP_PASSIVE 85000
++#define IMX_TEMP_MAX_PASSIVE 85000
++#define IMX_TEMP_MIN_TRIP_DELTA 6000
++
++#define IMX_POLLING_DELAY 3000 /* millisecond */
++#define IMX_PASSIVE_DELAY 2000
++
++#define FACTOR0 10000000
++#define FACTOR1 15976
++#define FACTOR2 4297157
+
+-#define IMX_POLLING_DELAY 2000 /* millisecond */
+-#define IMX_PASSIVE_DELAY 1000
++#define IMX_TRIP_PASSIVE 0
+
+ struct imx_thermal_data {
+ struct thermal_zone_device *tz;
+- struct thermal_cooling_device *cdev;
++ struct thermal_cooling_device *cdev[2];
+ enum thermal_device_mode mode;
+ struct regmap *tempmon;
+- int c1, c2; /* See formula in imx_get_sensor_data() */
++ u32 c1, c2; /* See formula in imx_get_sensor_data() */
+ unsigned long temp_passive;
+ unsigned long temp_critical;
++ unsigned long num_passive_trips;
++ unsigned long temp_zone_delta;
+ unsigned long alarm_temp;
+ unsigned long last_temp;
+ bool irq_enabled;
+@@ -84,7 +95,7 @@
+ int alarm_value;
+
+ data->alarm_temp = alarm_temp;
+- alarm_value = (alarm_temp - data->c2) / data->c1;
++ alarm_value = (data->c2 - alarm_temp) / data->c1;
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_ALARM_VALUE_MASK);
+ regmap_write(map, TEMPSENSE0 + REG_SET, alarm_value <<
+ TEMPSENSE0_ALARM_VALUE_SHIFT);
+@@ -95,6 +106,7 @@
+ struct imx_thermal_data *data = tz->devdata;
+ struct regmap *map = data->tempmon;
+ unsigned int n_meas;
++ unsigned long cur_state;
+ bool wait;
+ u32 val;
+
+@@ -136,12 +148,23 @@
+ n_meas = (val & TEMPSENSE0_TEMP_CNT_MASK) >> TEMPSENSE0_TEMP_CNT_SHIFT;
+
+ /* See imx_get_sensor_data() for formula derivation */
+- *temp = data->c2 + data->c1 * n_meas;
++ *temp = data->c2 - n_meas * data->c1;
++
++ data->cdev[0]->ops->get_cur_state(data->cdev[0], &cur_state);
+
+ /* Update alarm value to next higher trip point */
+- if (data->alarm_temp == data->temp_passive && *temp >= data->temp_passive)
++ if ((data->temp_passive < data->alarm_temp) &&
++ (data->alarm_temp < data->temp_critical) &&
++ (cur_state < data->num_passive_trips)) {
++ imx_set_alarm_temp(data, data->temp_passive + ((cur_state + 1) * data->temp_zone_delta));
++ dev_dbg(&tz->device, "thermal alarm on: T < %lu\n",
++ data->alarm_temp / 1000);
++ }
++
++ if (data->alarm_temp < data->temp_critical && *temp >= data->temp_passive + (data->num_passive_trips * data->temp_zone_delta))
+ imx_set_alarm_temp(data, data->temp_critical);
+- if (data->alarm_temp == data->temp_critical && *temp < data->temp_passive) {
++
++ if (data->alarm_temp > data->temp_passive && *temp < data->temp_passive) {
+ imx_set_alarm_temp(data, data->temp_passive);
+ dev_dbg(&tz->device, "thermal alarm off: T < %lu\n",
+ data->alarm_temp / 1000);
+@@ -210,7 +233,8 @@
+ static int imx_get_trip_type(struct thermal_zone_device *tz, int trip,
+ enum thermal_trip_type *type)
+ {
+- *type = (trip == IMX_TRIP_PASSIVE) ? THERMAL_TRIP_PASSIVE :
++ struct imx_thermal_data *data = tz->devdata;
++ *type = (trip < data->num_passive_trips) ? THERMAL_TRIP_PASSIVE :
+ THERMAL_TRIP_CRITICAL;
+ return 0;
+ }
+@@ -229,8 +253,9 @@
+ {
+ struct imx_thermal_data *data = tz->devdata;
+
+- *temp = (trip == IMX_TRIP_PASSIVE) ? data->temp_passive :
+- data->temp_critical;
++ *temp = (trip < data->num_passive_trips) ?
++ data->temp_passive + (trip * data->temp_zone_delta) :
++ data->temp_critical;
+ return 0;
+ }
+
+@@ -239,13 +264,14 @@
+ {
+ struct imx_thermal_data *data = tz->devdata;
+
+- if (trip == IMX_TRIP_CRITICAL)
++ if (trip > IMX_TRIP_PASSIVE)
+ return -EPERM;
+
+- if (temp > IMX_TEMP_PASSIVE)
++ if (trip == IMX_TRIP_PASSIVE && temp > IMX_TEMP_MAX_PASSIVE)
+ return -EINVAL;
+
+ data->temp_passive = temp;
++ data->temp_zone_delta = (data->temp_critical - data->temp_passive) / data->num_passive_trips;
+
+ imx_set_alarm_temp(data, temp);
+
+@@ -286,6 +312,37 @@
+ return 0;
+ }
+
++ int imx_get_trend(struct thermal_zone_device *tz,
++ int trip, enum thermal_trend *trend)
++{
++ struct imx_thermal_data *data = tz->devdata;
++ int ret;
++ unsigned long trip_temp, cur_state;
++
++ ret = imx_get_trip_temp(tz, trip, &trip_temp);
++ if (ret < 0)
++ return ret;
++
++ data->cdev[0]->ops->get_cur_state(data->cdev[0], &cur_state);
++
++ if (tz->temperature > tz->last_temperature &&
++ tz->temperature > (data->temp_passive + (cur_state * data->temp_zone_delta))) {
++ *trend = THERMAL_TREND_RAISING;
++ } else if (tz->temperature < tz->last_temperature && cur_state) {
++ if (tz->temperature <= (data->temp_passive - data->temp_zone_delta))
++ *trend = THERMAL_TREND_DROP_FULL;
++ else if (tz->temperature <= (data->temp_passive +
++ ((cur_state - 1) * data->temp_zone_delta)))
++ *trend = THERMAL_TREND_DROPPING;
++ else
++ *trend = THERMAL_TREND_STABLE;
++ } else {
++ *trend = THERMAL_TREND_STABLE;
++ }
++
++ return 0;
++}
++
+ static struct thermal_zone_device_ops imx_tz_ops = {
+ .bind = imx_bind,
+ .unbind = imx_unbind,
+@@ -295,6 +352,7 @@
+ .get_trip_type = imx_get_trip_type,
+ .get_trip_temp = imx_get_trip_temp,
+ .get_crit_temp = imx_get_crit_temp,
++ .get_trend = imx_get_trend,
+ .set_trip_temp = imx_set_trip_temp,
+ };
+
+@@ -302,9 +360,10 @@
+ {
+ struct imx_thermal_data *data = platform_get_drvdata(pdev);
+ struct regmap *map;
+- int t1, t2, n1, n2;
++ int t1, n1;
+ int ret;
+ u32 val;
++ u64 temp64;
+
+ map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "fsl,tempmon-data");
+@@ -328,43 +387,83 @@
+ /*
+ * Sensor data layout:
+ * [31:20] - sensor value @ 25C
+- * [19:8] - sensor value of hot
+- * [7:0] - hot temperature value
++ * Use universal formula now and only need sensor value @ 25C
++ * slope = 0.4297157 - (0.0015976 * 25C fuse)
+ */
+ n1 = val >> 20;
+- n2 = (val & 0xfff00) >> 8;
+- t2 = val & 0xff;
+ t1 = 25; /* t1 always 25C */
+
+ /*
+- * Derived from linear interpolation,
+- * Tmeas = T2 + (Nmeas - N2) * (T1 - T2) / (N1 - N2)
++ * Derived from linear interpolation:
++ * slope = 0.4297157 - (0.0015976 * 25C fuse)
++ * slope = (FACTOR2 - FACTOR1 * n1) / FACTOR0
++ * (Nmeas - n1) / (Tmeas - t1) = slope
+ * We want to reduce this down to the minimum computation necessary
+ * for each temperature read. Also, we want Tmeas in millicelsius
+ * and we don't want to lose precision from integer division. So...
+- * milli_Tmeas = 1000 * T2 + 1000 * (Nmeas - N2) * (T1 - T2) / (N1 - N2)
+- * Let constant c1 = 1000 * (T1 - T2) / (N1 - N2)
+- * milli_Tmeas = (1000 * T2) + c1 * (Nmeas - N2)
+- * milli_Tmeas = (1000 * T2) + (c1 * Nmeas) - (c1 * N2)
+- * Let constant c2 = (1000 * T2) - (c1 * N2)
+- * milli_Tmeas = c2 + (c1 * Nmeas)
++ * Tmeas = (Nmeas - n1) / slope + t1
++ * milli_Tmeas = 1000 * (Nmeas - n1) / slope + 1000 * t1
++ * milli_Tmeas = -1000 * (n1 - Nmeas) / slope + 1000 * t1
++ * Let constant c1 = (-1000 / slope)
++ * milli_Tmeas = (n1 - Nmeas) * c1 + 1000 * t1
++ * Let constant c2 = n1 *c1 + 1000 * t1
++ * milli_Tmeas = c2 - Nmeas * c1
+ */
+- data->c1 = 1000 * (t1 - t2) / (n1 - n2);
+- data->c2 = 1000 * t2 - data->c1 * n2;
++ temp64 = FACTOR0;
++ temp64 *= 1000;
++ do_div(temp64, FACTOR1 * n1 - FACTOR2);
++ data->c1 = temp64;
++ data->c2 = n1 * data->c1 + 1000 * t1;
+
+- /*
+- * Set the default passive cooling trip point to 20 °C below the
+- * maximum die temperature. Can be changed from userspace.
+- */
+- data->temp_passive = 1000 * (t2 - 20);
++ return 0;
++}
+
+- /*
+- * The maximum die temperature is t2, let's give 5 °C cushion
+- * for noise and possible temperature rise between measurements.
+- */
+- data->temp_critical = 1000 * (t2 - 5);
++static void imx_set_thermal_defaults(struct imx_thermal_data *data)
++{
++ int ret;
++ u32 val;
+
+- return 0;
++ ret = fsl_otp_readl(OCOTP_TEMP_GRADE, &val);
++
++ if (ret) {
++ /*
++ * Set the default passive cooling trip point,
++ * can be changed from userspace.
++ */
++ data->temp_passive = IMX_TEMP_MAX_PASSIVE;
++
++ /*
++ * The maximum die temperature set to 20 C higher than
++ * IMX_TEMP_MAX_PASSIVE.
++ */
++ data->temp_critical = 1000 * 20 + data->temp_passive;
++ data->temp_zone_delta = (data->temp_critical - data->temp_passive) / data->num_passive_trips;
++ } else {
++ val >>= OCOTP_TEMP_GRADE_SHIFT;
++ val &= 0x3;
++
++ switch (val) {
++ case OCOTP_TEMP_GRADE_AUT:
++ data->temp_critical = 125000;
++ break;
++ case OCOTP_TEMP_GRADE_IND:
++ case OCOTP_TEMP_GRADE_EXT:
++ data->temp_critical = 105000;
++ break;
++ case OCOTP_TEMP_GRADE_COM:
++ default:
++ data->temp_critical = 95000;
++ break;
++ }
++ data->temp_passive = data->temp_critical - (IMX_TEMP_MIN_TRIP_DELTA * data->num_passive_trips);
++ data->temp_zone_delta = IMX_TEMP_MIN_TRIP_DELTA;
++ }
++
++ pr_debug("THERMAL DEFAULTS: passive: %lu \
++ critical %lu trip_points: %lu \
++ zone_delta: %lu\n",
++ data->temp_passive, data->temp_critical,
++ data->num_passive_trips, data->temp_zone_delta);
+ }
+
+ static irqreturn_t imx_thermal_alarm_irq(int irq, void *dev)
+@@ -397,6 +496,10 @@
+ int measure_freq;
+ int ret;
+
++ if (!cpufreq_get_current_driver()) {
++ dev_dbg(&pdev->dev, "no cpufreq driver!");
++ return -EPROBE_DEFER;
++ }
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+@@ -421,6 +524,24 @@
+ return ret;
+ }
+
++ data->irq_enabled = true;
++
++ data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(data->thermal_clk)) {
++ dev_warn(&pdev->dev, "failed to get thermal clk!\n");
++ } else {
++ /*
++ * Thermal sensor needs clk on to get correct value, normally
++ * we should enable its clk before taking measurement and disable
++ * clk after measurement is done, but if alarm function is enabled,
++ * hardware will auto measure the temperature periodically, so we
++ * need to keep the clk always on for alarm function.
++ */
++ ret = clk_prepare_enable(data->thermal_clk);
++ if (ret)
++ dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
++ }
++
+ platform_set_drvdata(pdev, data);
+
+ ret = imx_get_sensor_data(pdev);
+@@ -437,16 +558,28 @@
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+
+ cpumask_set_cpu(0, &clip_cpus);
+- data->cdev = cpufreq_cooling_register(&clip_cpus);
+- if (IS_ERR(data->cdev)) {
+- ret = PTR_ERR(data->cdev);
++ data->cdev[0] = cpufreq_cooling_register(&clip_cpus);
++ if (IS_ERR(data->cdev[0])) {
++ ret = PTR_ERR(data->cdev[0]);
+ dev_err(&pdev->dev,
+ "failed to register cpufreq cooling device: %d\n", ret);
+ return ret;
+ }
+
++ data->cdev[0]->ops->get_max_state(data->cdev[0], &data->num_passive_trips);
++
++ data->cdev[1] = devfreq_cooling_register(data->num_passive_trips + 1);
++ if (IS_ERR(data->cdev[1])) {
++ ret = PTR_ERR(data->cdev[1]);
++ dev_err(&pdev->dev,
++ "failed to register devfreq cooling device: %d\n", ret);
++ return ret;
++ }
++
++ imx_set_thermal_defaults(data);
++
+ data->tz = thermal_zone_device_register("imx_thermal_zone",
+- IMX_TRIP_NUM,
++ data->num_passive_trips + 1,
+ BIT(IMX_TRIP_PASSIVE), data,
+ &imx_tz_ops, NULL,
+ IMX_PASSIVE_DELAY,
+@@ -455,26 +588,11 @@
+ ret = PTR_ERR(data->tz);
+ dev_err(&pdev->dev,
+ "failed to register thermal zone device %d\n", ret);
+- cpufreq_cooling_unregister(data->cdev);
++ cpufreq_cooling_unregister(data->cdev[0]);
++ devfreq_cooling_unregister(data->cdev[1]);
+ return ret;
+ }
+
+- data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
+- if (IS_ERR(data->thermal_clk)) {
+- dev_warn(&pdev->dev, "failed to get thermal clk!\n");
+- } else {
+- /*
+- * Thermal sensor needs clk on to get correct value, normally
+- * we should enable its clk before taking measurement and disable
+- * clk after measurement is done, but if alarm function is enabled,
+- * hardware will auto measure the temperature periodically, so we
+- * need to keep the clk always on for alarm function.
+- */
+- ret = clk_prepare_enable(data->thermal_clk);
+- if (ret)
+- dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+- }
+-
+ /* Enable measurements at ~ 10 Hz */
+ regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
+ measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
+@@ -483,7 +601,6 @@
+ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+ regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+
+- data->irq_enabled = true;
+ data->mode = THERMAL_DEVICE_ENABLED;
+
+ return 0;
+@@ -500,7 +617,8 @@
+ clk_disable_unprepare(data->thermal_clk);
+
+ thermal_zone_device_unregister(data->tz);
+- cpufreq_cooling_unregister(data->cdev);
++ cpufreq_cooling_unregister(data->cdev[0]);
++ devfreq_cooling_unregister(data->cdev[1]);
+
+ return 0;
+ }
+diff -Nur linux-3.14.40.orig/drivers/thermal/Kconfig linux-3.14.40/drivers/thermal/Kconfig
+--- linux-3.14.40.orig/drivers/thermal/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/thermal/Kconfig 2015-05-01 14:58:04.607427001 -0500
+@@ -125,6 +125,13 @@
+ cpufreq is used as the cooling device to throttle CPUs when the
+ passive trip is crossed.
+
++config DEVICE_THERMAL
++ tristate "generic device cooling support"
++ help
++ Support for device cooling.
++ It supports notification of crossing passive trip for devices,
++ devices need to do their own actions to cool down the SOC.
++
+ config SPEAR_THERMAL
+ bool "SPEAr thermal sensor driver"
+ depends on PLAT_SPEAR
+diff -Nur linux-3.14.40.orig/drivers/thermal/Makefile linux-3.14.40/drivers/thermal/Makefile
+--- linux-3.14.40.orig/drivers/thermal/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/thermal/Makefile 2015-05-01 14:58:04.607427001 -0500
+@@ -26,6 +26,7 @@
+ obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
+ obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
+ obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
++obj-$(CONFIG_DEVICE_THERMAL) += device_cooling.o
+ obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
+ obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
+ obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
+diff -Nur linux-3.14.40.orig/drivers/thermal/of-thermal.c linux-3.14.40/drivers/thermal/of-thermal.c
+--- linux-3.14.40.orig/drivers/thermal/of-thermal.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/thermal/of-thermal.c 2015-05-01 14:58:04.619427001 -0500
+@@ -156,8 +156,8 @@
+
+ ret = thermal_zone_bind_cooling_device(thermal,
+ tbp->trip_id, cdev,
+- tbp->min,
+- tbp->max);
++ tbp->max,
++ tbp->min);
+ if (ret)
+ return ret;
+ }
+@@ -712,11 +712,12 @@
+ }
+
+ i = 0;
+- for_each_child_of_node(child, gchild)
++ for_each_child_of_node(child, gchild) {
+ ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++],
+ tz->trips, tz->ntrips);
+ if (ret)
+ goto free_tbps;
++ }
+
+ finish:
+ of_node_put(child);
+diff -Nur linux-3.14.40.orig/drivers/thermal/step_wise.c linux-3.14.40/drivers/thermal/step_wise.c
+--- linux-3.14.40.orig/drivers/thermal/step_wise.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/thermal/step_wise.c 2015-05-01 14:58:04.651427001 -0500
+@@ -23,6 +23,7 @@
+ */
+
+ #include <linux/thermal.h>
++#include <trace/events/thermal.h>
+
+ #include "thermal_core.h"
+
+@@ -70,10 +71,12 @@
+ if (next_target < instance->lower)
+ next_target = instance->lower;
+ }
++ dev_dbg(&cdev->device, "THERMAL_TREND_RAISING: next_target=%ld\n", next_target);
+ break;
+ case THERMAL_TREND_RAISE_FULL:
+ if (throttle)
+ next_target = instance->upper;
++ dev_dbg(&cdev->device, "THERMAL_TREND_RAISE_FULL: next_target=%ld\n", next_target);
+ break;
+ case THERMAL_TREND_DROPPING:
+ if (cur_state == instance->lower) {
+@@ -84,6 +87,7 @@
+ if (next_target > instance->upper)
+ next_target = instance->upper;
+ }
++ dev_dbg(&cdev->device, "THERMAL_TREND_DROPPING: next_target=%ld\n", next_target);
+ break;
+ case THERMAL_TREND_DROP_FULL:
+ if (cur_state == instance->lower) {
+@@ -91,6 +95,7 @@
+ next_target = THERMAL_NO_TARGET;
+ } else
+ next_target = instance->lower;
++ dev_dbg(&cdev->device, "THERMAL_TREND_DROP_FULL: next_target=%ld\n", next_target);
+ break;
+ default:
+ break;
+@@ -117,7 +122,7 @@
+ enum thermal_trend trend;
+ struct thermal_instance *instance;
+ bool throttle = false;
+- int old_target;
++ unsigned long old_target;
+
+ if (trip == THERMAL_TRIPS_NONE) {
+ trip_temp = tz->forced_passive;
+@@ -129,8 +134,10 @@
+
+ trend = get_tz_trend(tz, trip);
+
+- if (tz->temperature >= trip_temp)
++ if (tz->temperature >= trip_temp) {
+ throttle = true;
++ trace_thermal_zone_trip(tz, trip, trip_type);
++ }
+
+ dev_dbg(&tz->device, "Trip%d[type=%d,temp=%ld]:trend=%d,throttle=%d\n",
+ trip, trip_type, trip_temp, trend, throttle);
+@@ -143,8 +150,8 @@
+
+ old_target = instance->target;
+ instance->target = get_target_state(instance, trend, throttle);
+- dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
+- old_target, (int)instance->target);
++ dev_dbg(&instance->cdev->device, "old_target=%ld, target=%ld\n",
++ old_target, instance->target);
+
+ if (old_target == instance->target)
+ continue;
+diff -Nur linux-3.14.40.orig/drivers/thermal/thermal_core.c linux-3.14.40/drivers/thermal/thermal_core.c
+--- linux-3.14.40.orig/drivers/thermal/thermal_core.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/thermal/thermal_core.c 2015-05-01 14:58:04.663427001 -0500
+@@ -38,6 +38,9 @@
+ #include <net/netlink.h>
+ #include <net/genetlink.h>
+
++#define CREATE_TRACE_POINTS
++#include <trace/events/thermal.h>
++
+ #include "thermal_core.h"
+ #include "thermal_hwmon.h"
+
+@@ -368,6 +371,8 @@
+ if (tz->temperature < trip_temp)
+ return;
+
++ trace_thermal_zone_trip(tz, trip, trip_type);
++
+ if (tz->ops->notify)
+ tz->ops->notify(tz, trip, trip_type);
+
+@@ -463,6 +468,7 @@
+ tz->temperature = temp;
+ mutex_unlock(&tz->lock);
+
++ trace_thermal_temperature(tz);
+ dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
+ tz->last_temperature, tz->temperature);
+ }
+@@ -1287,6 +1293,7 @@
+ mutex_unlock(&cdev->lock);
+ cdev->ops->set_cur_state(cdev, target);
+ cdev->updated = true;
++ trace_cdev_update(cdev, target);
+ dev_dbg(&cdev->device, "set to state %lu\n", target);
+ }
+ EXPORT_SYMBOL(thermal_cdev_update);
+@@ -1568,8 +1575,7 @@
+
+ thermal_zone_device_update(tz);
+
+- if (!result)
+- return tz;
++ return tz;
+
+ unregister:
+ release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
+diff -Nur linux-3.14.40.orig/drivers/tty/serial/earlycon.c linux-3.14.40/drivers/tty/serial/earlycon.c
+--- linux-3.14.40.orig/drivers/tty/serial/earlycon.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/tty/serial/earlycon.c 2015-05-01 14:58:04.663427001 -0500
+@@ -0,0 +1,152 @@
++/*
++ * Copyright (C) 2014 Linaro Ltd.
++ * Author: Rob Herring <robh@kernel.org>
++ *
++ * Based on 8250 earlycon:
++ * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
++ * Bjorn Helgaas <bjorn.helgaas@hp.com>
++ *
++ * This program is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++#include <linux/console.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/serial_core.h>
++
++#ifdef CONFIG_FIX_EARLYCON_MEM
++#include <asm/fixmap.h>
++#endif
++
++#include <asm/serial.h>
++
++static struct console early_con = {
++ .name = "earlycon",
++ .flags = CON_PRINTBUFFER | CON_BOOT,
++ .index = -1,
++};
++
++static struct earlycon_device early_console_dev = {
++ .con = &early_con,
++};
++
++static void __iomem * __init earlycon_map(unsigned long paddr, size_t size)
++{
++ void __iomem *base;
++#ifdef CONFIG_FIX_EARLYCON_MEM
++ set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr & PAGE_MASK);
++ base = (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
++ base += paddr & ~PAGE_MASK;
++#else
++ base = ioremap(paddr, size);
++#endif
++ if (!base)
++ pr_err("%s: Couldn't map 0x%llx\n", __func__,
++ (unsigned long long)paddr);
++
++ return base;
++}
++
++static int __init parse_options(struct earlycon_device *device,
++ char *options)
++{
++ struct uart_port *port = &device->port;
++ int mmio, mmio32, length, ret;
++ unsigned long addr;
++
++ if (!options)
++ return -ENODEV;
++
++ mmio = !strncmp(options, "mmio,", 5);
++ mmio32 = !strncmp(options, "mmio32,", 7);
++ if (mmio || mmio32) {
++ port->iotype = (mmio ? UPIO_MEM : UPIO_MEM32);
++ options += mmio ? 5 : 7;
++ ret = kstrtoul(options, 0, &addr);
++ if (ret)
++ return ret;
++ port->mapbase = addr;
++ if (mmio32)
++ port->regshift = 2;
++ } else if (!strncmp(options, "io,", 3)) {
++ port->iotype = UPIO_PORT;
++ options += 3;
++ ret = kstrtoul(options, 0, &addr);
++ if (ret)
++ return ret;
++ port->iobase = addr;
++ mmio = 0;
++ } else if (!strncmp(options, "0x", 2)) {
++ port->iotype = UPIO_MEM;
++ ret = kstrtoul(options, 0, &addr);
++ if (ret)
++ return ret;
++ port->mapbase = addr;
++ } else {
++ return -EINVAL;
++ }
++
++ port->uartclk = BASE_BAUD * 16;
++
++ options = strchr(options, ',');
++ if (options) {
++ options++;
++ ret = kstrtouint(options, 0, &device->baud);
++ if (ret)
++ return ret;
++ length = min(strcspn(options, " ") + 1,
++ (size_t)(sizeof(device->options)));
++ strlcpy(device->options, options, length);
++ }
++
++ if (mmio || mmio32)
++ pr_info("Early serial console at MMIO%s 0x%llx (options '%s')\n",
++ mmio32 ? "32" : "",
++ (unsigned long long)port->mapbase,
++ device->options);
++ else
++ pr_info("Early serial console at I/O port 0x%lx (options '%s')\n",
++ port->iobase,
++ device->options);
++
++ return 0;
++}
++
++int __init setup_earlycon(char *buf, const char *match,
++ int (*setup)(struct earlycon_device *, const char *))
++{
++ int err;
++ size_t len;
++ struct uart_port *port = &early_console_dev.port;
++
++ if (!buf || !match || !setup)
++ return 0;
++
++ len = strlen(match);
++ if (strncmp(buf, match, len))
++ return 0;
++ if (buf[len] && (buf[len] != ','))
++ return 0;
++
++ buf += len + 1;
++
++ err = parse_options(&early_console_dev, buf);
++ /* On parsing error, pass the options buf to the setup function */
++ if (!err)
++ buf = NULL;
++
++ if (port->mapbase)
++ port->membase = earlycon_map(port->mapbase, 64);
++
++ early_console_dev.con->data = &early_console_dev;
++ err = setup(&early_console_dev, buf);
++ if (err < 0)
++ return err;
++ if (!early_console_dev.con->write)
++ return -ENODEV;
++
++ register_console(early_console_dev.con);
++ return 0;
++}
+diff -Nur linux-3.14.40.orig/drivers/tty/serial/Kconfig linux-3.14.40/drivers/tty/serial/Kconfig
+--- linux-3.14.40.orig/drivers/tty/serial/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/tty/serial/Kconfig 2015-05-01 14:58:04.671427001 -0500
+@@ -7,6 +7,13 @@
+ menu "Serial drivers"
+ depends on HAS_IOMEM
+
++config SERIAL_EARLYCON
++ bool
++ help
++ Support for early consoles with the earlycon parameter. This enables
++ the console before standard serial driver is probed. The console is
++ enabled when early_param is processed.
++
+ source "drivers/tty/serial/8250/Kconfig"
+
+ comment "Non-8250 serial port support"
+diff -Nur linux-3.14.40.orig/drivers/tty/serial/Makefile linux-3.14.40/drivers/tty/serial/Makefile
+--- linux-3.14.40.orig/drivers/tty/serial/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/tty/serial/Makefile 2015-05-01 14:58:04.679427001 -0500
+@@ -5,6 +5,8 @@
+ obj-$(CONFIG_SERIAL_CORE) += serial_core.o
+ obj-$(CONFIG_SERIAL_21285) += 21285.o
+
++obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o
++
+ # These Sparc drivers have to appear before others such as 8250
+ # which share ttySx minor node space. Otherwise console device
+ # names change and other unplesantries.
+diff -Nur linux-3.14.40.orig/drivers/usb/chipidea/ci.h linux-3.14.40/drivers/usb/chipidea/ci.h
+--- linux-3.14.40.orig/drivers/usb/chipidea/ci.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/chipidea/ci.h 2015-05-01 14:58:04.683427001 -0500
+@@ -139,8 +139,8 @@
+ * @roles: array of supported roles for this controller
+ * @role: current role
+ * @is_otg: if the device is otg-capable
+- * @work: work for role changing
+- * @wq: workqueue thread
++ * @otg_task: the thread for handling otg task
++ * @otg_wait: the otg event waitqueue head
+ * @qh_pool: allocation pool for queue heads
+ * @td_pool: allocation pool for transfer descriptors
+ * @gadget: device side representation for peripheral controller
+@@ -165,6 +165,10 @@
+ * @b_sess_valid_event: indicates there is a vbus event, and handled
+ * at ci_otg_work
+ * @imx28_write_fix: Freescale imx28 needs swp instruction for writing
++ * @supports_runtime_pm: if runtime pm is supported
++ * @in_lpm: if the core in low power mode
++ * @wakeup_int: if wakeup interrupt occur
++ * @timer: timer to delay clock closing
+ */
+ struct ci_hdrc {
+ struct device *dev;
+@@ -174,8 +178,8 @@
+ struct ci_role_driver *roles[CI_ROLE_END];
+ enum ci_role role;
+ bool is_otg;
+- struct work_struct work;
+- struct workqueue_struct *wq;
++ struct task_struct *otg_task;
++ wait_queue_head_t otg_wait;
+
+ struct dma_pool *qh_pool;
+ struct dma_pool *td_pool;
+@@ -204,6 +208,10 @@
+ bool id_event;
+ bool b_sess_valid_event;
+ bool imx28_write_fix;
++ bool supports_runtime_pm;
++ bool in_lpm;
++ bool wakeup_int;
++ struct timer_list timer;
+ };
+
+ static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
+diff -Nur linux-3.14.40.orig/drivers/usb/chipidea/ci_hdrc_imx.c linux-3.14.40/drivers/usb/chipidea/ci_hdrc_imx.c
+--- linux-3.14.40.orig/drivers/usb/chipidea/ci_hdrc_imx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/chipidea/ci_hdrc_imx.c 2015-05-01 14:58:04.691427001 -0500
+@@ -19,11 +19,14 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/usb/chipidea.h>
+ #include <linux/clk.h>
++#include <linux/busfreq-imx6.h>
+
+ #include "ci.h"
+ #include "ci_hdrc_imx.h"
+
+-#define CI_HDRC_IMX_IMX28_WRITE_FIX BIT(0)
++#define CI_HDRC_IMX_IMX28_WRITE_FIX BIT(0)
++#define CI_HDRC_IMX_SUPPORT_RUNTIME_PM BIT(1)
++#define CI_HDRC_IMX_HOST_QUIRK BIT(2)
+
+ struct ci_hdrc_imx_platform_flag {
+ unsigned int flags;
+@@ -32,12 +35,30 @@
+ static const struct ci_hdrc_imx_platform_flag imx27_usb_data = {
+ };
+
++static const struct ci_hdrc_imx_platform_flag imx23_usb_data = {
++ .flags = CI_HDRC_IMX_HOST_QUIRK,
++};
++
+ static const struct ci_hdrc_imx_platform_flag imx28_usb_data = {
+- .flags = CI_HDRC_IMX_IMX28_WRITE_FIX,
++ .flags = CI_HDRC_IMX_IMX28_WRITE_FIX |
++ CI_HDRC_IMX_HOST_QUIRK,
++};
++
++static const struct ci_hdrc_imx_platform_flag imx6q_usb_data = {
++ .flags = CI_HDRC_IMX_SUPPORT_RUNTIME_PM |
++ CI_HDRC_IMX_HOST_QUIRK,
++};
++
++static const struct ci_hdrc_imx_platform_flag imx6sl_usb_data = {
++ .flags = CI_HDRC_IMX_SUPPORT_RUNTIME_PM |
++ CI_HDRC_IMX_HOST_QUIRK,
+ };
+
+ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
++ { .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data},
++ { .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
+ { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
++ { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
+ { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
+ { /* sentinel */ }
+ };
+@@ -48,6 +69,8 @@
+ struct platform_device *ci_pdev;
+ struct clk *clk;
+ struct imx_usbmisc_data *usbmisc_data;
++ bool supports_runtime_pm;
++ bool in_lpm;
+ };
+
+ /* Common functions shared by usbmisc drivers */
+@@ -123,8 +146,10 @@
+ return PTR_ERR(data->clk);
+ }
+
++ request_bus_freq(BUS_FREQ_HIGH);
+ ret = clk_prepare_enable(data->clk);
+ if (ret) {
++ release_bus_freq(BUS_FREQ_HIGH);
+ dev_err(&pdev->dev,
+ "Failed to prepare or enable clock, err=%d\n", ret);
+ return ret;
+@@ -145,6 +170,14 @@
+ if (ret)
+ goto err_clk;
+
++ if (imx_platform_flag->flags & CI_HDRC_IMX_SUPPORT_RUNTIME_PM) {
++ pdata.flags |= CI_HDRC_SUPPORTS_RUNTIME_PM;
++ data->supports_runtime_pm = true;
++ }
++
++ if (imx_platform_flag->flags & CI_HDRC_IMX_HOST_QUIRK)
++ pdata.flags |= CI_HDRC_IMX_EHCI_QUIRK;
++
+ if (data->usbmisc_data) {
+ ret = imx_usbmisc_init(data->usbmisc_data);
+ if (ret) {
+@@ -165,6 +198,11 @@
+ goto err_clk;
+ }
+
++ /* usbmisc needs to know dr mode to choose wakeup setting */
++ if (data->usbmisc_data)
++ data->usbmisc_data->available_role =
++ ci_hdrc_query_available_role(data->ci_pdev);
++
+ if (data->usbmisc_data) {
+ ret = imx_usbmisc_init_post(data->usbmisc_data);
+ if (ret) {
+@@ -174,10 +212,23 @@
+ }
+ }
+
++ if (data->usbmisc_data) {
++ ret = imx_usbmisc_set_wakeup(data->usbmisc_data, false);
++ if (ret) {
++ dev_err(&pdev->dev, "usbmisc set_wakeup failed, ret=%d\n",
++ ret);
++ goto disable_device;
++ }
++ }
++
+ platform_set_drvdata(pdev, data);
+
+- pm_runtime_no_callbacks(&pdev->dev);
+- pm_runtime_enable(&pdev->dev);
++ device_set_wakeup_capable(&pdev->dev, true);
++
++ if (data->supports_runtime_pm) {
++ pm_runtime_set_active(&pdev->dev);
++ pm_runtime_enable(&pdev->dev);
++ }
+
+ return 0;
+
+@@ -185,6 +236,7 @@
+ ci_hdrc_remove_device(data->ci_pdev);
+ err_clk:
+ clk_disable_unprepare(data->clk);
++ release_bus_freq(BUS_FREQ_HIGH);
+ return ret;
+ }
+
+@@ -195,10 +247,119 @@
+ pm_runtime_disable(&pdev->dev);
+ ci_hdrc_remove_device(data->ci_pdev);
+ clk_disable_unprepare(data->clk);
++ release_bus_freq(BUS_FREQ_HIGH);
+
+ return 0;
+ }
+
++#ifdef CONFIG_PM
++static int imx_controller_suspend(struct device *dev)
++{
++ struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
++ int ret;
++
++ dev_dbg(dev, "at %s\n", __func__);
++
++ if (data->in_lpm)
++ return 0;
++
++ if (data->usbmisc_data) {
++ ret = imx_usbmisc_set_wakeup(data->usbmisc_data, true);
++ if (ret) {
++ dev_err(dev,
++ "usbmisc set_wakeup failed, ret=%d\n",
++ ret);
++ return ret;
++ }
++ }
++
++ clk_disable_unprepare(data->clk);
++ release_bus_freq(BUS_FREQ_HIGH);
++ data->in_lpm = true;
++
++ return 0;
++}
++
++static int imx_controller_resume(struct device *dev)
++{
++ struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
++ int ret = 0;
++
++ dev_dbg(dev, "at %s\n", __func__);
++
++ if (!data->in_lpm)
++ return 0;
++
++ request_bus_freq(BUS_FREQ_HIGH);
++ ret = clk_prepare_enable(data->clk);
++ if (ret) {
++ release_bus_freq(BUS_FREQ_HIGH);
++ return ret;
++ }
++
++ data->in_lpm = false;
++
++ if (data->usbmisc_data) {
++ ret = imx_usbmisc_set_wakeup(data->usbmisc_data, false);
++ if (ret) {
++ dev_err(dev,
++ "usbmisc set_wakeup failed, ret=%d\n",
++ ret);
++ ret = -EINVAL;
++ goto clk_disable;
++ }
++ }
++
++ return 0;
++
++clk_disable:
++ clk_disable_unprepare(data->clk);
++ release_bus_freq(BUS_FREQ_HIGH);
++
++ return ret;
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int ci_hdrc_imx_suspend(struct device *dev)
++{
++ return imx_controller_suspend(dev);
++}
++
++static int ci_hdrc_imx_resume(struct device *dev)
++{
++ struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
++ int ret;
++
++ ret = imx_controller_resume(dev);
++ if (!ret && data->supports_runtime_pm) {
++ pm_runtime_disable(dev);
++ pm_runtime_set_active(dev);
++ pm_runtime_enable(dev);
++ }
++
++ return ret;
++}
++#endif /* CONFIG_PM_SLEEP */
++
++#ifdef CONFIG_PM_RUNTIME
++static int ci_hdrc_imx_runtime_suspend(struct device *dev)
++{
++ return imx_controller_suspend(dev);
++}
++
++static int ci_hdrc_imx_runtime_resume(struct device *dev)
++{
++ return imx_controller_resume(dev);
++}
++#endif /* CONFIG_PM_RUNTIME */
++
++#endif /* CONFIG_PM */
++static const struct dev_pm_ops ci_hdrc_imx_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(ci_hdrc_imx_suspend, ci_hdrc_imx_resume)
++ SET_RUNTIME_PM_OPS(ci_hdrc_imx_runtime_suspend,
++ ci_hdrc_imx_runtime_resume, NULL)
++};
++
+ static struct platform_driver ci_hdrc_imx_driver = {
+ .probe = ci_hdrc_imx_probe,
+ .remove = ci_hdrc_imx_remove,
+@@ -206,6 +367,7 @@
+ .name = "imx_usb",
+ .owner = THIS_MODULE,
+ .of_match_table = ci_hdrc_imx_dt_ids,
++ .pm = &ci_hdrc_imx_pm_ops,
+ },
+ };
+
+diff -Nur linux-3.14.40.orig/drivers/usb/chipidea/ci_hdrc_imx.h linux-3.14.40/drivers/usb/chipidea/ci_hdrc_imx.h
+--- linux-3.14.40.orig/drivers/usb/chipidea/ci_hdrc_imx.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/chipidea/ci_hdrc_imx.h 2015-05-01 14:58:04.691427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+@@ -12,14 +12,18 @@
+ #ifndef __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H
+ #define __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H
+
++#include <linux/usb/otg.h>
++
+ struct imx_usbmisc_data {
+ int index;
+
+ unsigned int disable_oc:1; /* over current detect disabled */
+ unsigned int evdo:1; /* set external vbus divider option */
++ enum usb_dr_mode available_role;
+ };
+
+ int imx_usbmisc_init(struct imx_usbmisc_data *);
+ int imx_usbmisc_init_post(struct imx_usbmisc_data *);
++int imx_usbmisc_set_wakeup(struct imx_usbmisc_data *, bool);
+
+ #endif /* __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H */
+diff -Nur linux-3.14.40.orig/drivers/usb/chipidea/ci_hdrc_msm.c linux-3.14.40/drivers/usb/chipidea/ci_hdrc_msm.c
+--- linux-3.14.40.orig/drivers/usb/chipidea/ci_hdrc_msm.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/chipidea/ci_hdrc_msm.c 2015-05-01 14:58:04.699427001 -0500
+@@ -17,7 +17,7 @@
+
+ #define MSM_USB_BASE (ci->hw_bank.abs)
+
+-static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
++static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
+ {
+ struct device *dev = ci->gadget.dev.parent;
+
+@@ -40,6 +40,8 @@
+ dev_dbg(dev, "unknown ci_hdrc event\n");
+ break;
+ }
++
++ return 0;
+ }
+
+ static struct ci_hdrc_platform_data ci_hdrc_msm_platdata = {
+diff -Nur linux-3.14.40.orig/drivers/usb/chipidea/core.c linux-3.14.40/drivers/usb/chipidea/core.c
+--- linux-3.14.40.orig/drivers/usb/chipidea/core.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/chipidea/core.c 2015-05-01 14:58:04.707427001 -0500
+@@ -165,25 +165,30 @@
+ return hw_read(ci, OP_PORTSC, PORTSC_PTC) >> __ffs(PORTSC_PTC);
+ }
+
++static void hw_wait_phy_stable(void)
++{
++ /* The controller needs at least 1ms to reflect PHY's status */
++ usleep_range(2000, 2500);
++}
++
++static void delay_runtime_pm_put_timer(unsigned long arg)
++{
++ struct ci_hdrc *ci = (struct ci_hdrc *)arg;
++
++ pm_runtime_put(ci->dev);
++}
++
+ /* The PHY enters/leaves low power mode */
+ static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable)
+ {
+ enum ci_hw_regs reg = ci->hw_bank.lpm ? OP_DEVLC : OP_PORTSC;
+ bool lpm = !!(hw_read(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm)));
+
+- if (enable && !lpm) {
++ if (enable && !lpm)
+ hw_write(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm),
+ PORTSC_PHCD(ci->hw_bank.lpm));
+- } else if (!enable && lpm) {
+- hw_write(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm),
+- 0);
+- /*
+- * The controller needs at least 1ms to reflect
+- * PHY's status, the PHY also needs some time (less
+- * than 1ms) to leave low power mode.
+- */
+- usleep_range(1500, 2000);
+- }
++ else if (!enable && lpm)
++ hw_write(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm), 0);
+ }
+
+ static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
+@@ -351,6 +356,13 @@
+ irqreturn_t ret = IRQ_NONE;
+ u32 otgsc = 0;
+
++ if (ci->in_lpm) {
++ disable_irq_nosync(irq);
++ ci->wakeup_int = true;
++ pm_runtime_get(ci->dev);
++ return IRQ_HANDLED;
++ }
++
+ if (ci->is_otg)
+ otgsc = hw_read(ci, OP_OTGSC, ~0);
+
+@@ -362,7 +374,7 @@
+ ci->id_event = true;
+ ci_clear_otg_interrupt(ci, OTGSC_IDIS);
+ disable_irq_nosync(ci->irq);
+- queue_work(ci->wq, &ci->work);
++ wake_up(&ci->otg_wait);
+ return IRQ_HANDLED;
+ }
+
+@@ -374,7 +386,7 @@
+ ci->b_sess_valid_event = true;
+ ci_clear_otg_interrupt(ci, OTGSC_BSVIS);
+ disable_irq_nosync(ci->irq);
+- queue_work(ci->wq, &ci->work);
++ wake_up(&ci->otg_wait);
+ return IRQ_HANDLED;
+ }
+
+@@ -473,6 +485,33 @@
+ }
+ EXPORT_SYMBOL_GPL(ci_hdrc_remove_device);
+
++/**
++ * ci_hdrc_query_available_role: get runtime available operation mode
++ *
++ * The glue layer can get current operation mode (host/peripheral/otg)
++ * This function should be called after ci core device has created.
++ *
++ * @pdev: the platform device of ci core.
++ *
++ * Return USB_DR_MODE_XXX.
++ */
++enum usb_dr_mode ci_hdrc_query_available_role(struct platform_device *pdev)
++{
++ struct ci_hdrc *ci = platform_get_drvdata(pdev);
++
++ if (!ci)
++ return USB_DR_MODE_UNKNOWN;
++ if (ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET])
++ return USB_DR_MODE_OTG;
++ else if (ci->roles[CI_ROLE_HOST])
++ return USB_DR_MODE_HOST;
++ else if (ci->roles[CI_ROLE_GADGET])
++ return USB_DR_MODE_PERIPHERAL;
++ else
++ return USB_DR_MODE_UNKNOWN;
++}
++EXPORT_SYMBOL_GPL(ci_hdrc_query_available_role);
++
+ static inline void ci_role_destroy(struct ci_hdrc *ci)
+ {
+ ci_hdrc_gadget_destroy(ci);
+@@ -498,9 +537,14 @@
+
+ static int ci_usb_phy_init(struct ci_hdrc *ci)
+ {
++ int ret;
++
+ if (ci->platdata->phy) {
+ ci->transceiver = ci->platdata->phy;
+- return usb_phy_init(ci->transceiver);
++ ret = usb_phy_init(ci->transceiver);
++ if (!ret)
++ hw_wait_phy_stable();
++ return ret;
+ } else {
+ ci->global_phy = true;
+ ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+@@ -559,8 +603,6 @@
+ return -ENODEV;
+ }
+
+- hw_phymode_configure(ci);
+-
+ ret = ci_usb_phy_init(ci);
+ if (ret) {
+ dev_err(dev, "unable to init phy: %d\n", ret);
+@@ -578,7 +620,13 @@
+
+ ci_get_otg_capable(ci);
+
++ hw_phymode_configure(ci);
++
+ dr_mode = ci->platdata->dr_mode;
++
++ ci->supports_runtime_pm = !!(ci->platdata->flags &
++ CI_HDRC_SUPPORTS_RUNTIME_PM);
++
+ /* initialize role(s) before the interrupt is requested */
+ if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
+ ret = ci_hdrc_host_init(ci);
+@@ -619,11 +667,6 @@
+
+ if (ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET]) {
+ if (ci->is_otg) {
+- /*
+- * ID pin needs 1ms debouce time,
+- * we delay 2ms for safe.
+- */
+- mdelay(2);
+ ci->role = ci_otg_role(ci);
+ ci_enable_otg_interrupt(ci, OTGSC_IDIE);
+ } else {
+@@ -656,6 +699,15 @@
+ if (ret)
+ goto stop;
+
++ device_set_wakeup_capable(&pdev->dev, true);
++
++ if (ci->supports_runtime_pm) {
++ pm_runtime_set_active(&pdev->dev);
++ pm_runtime_enable(&pdev->dev);
++ }
++
++ setup_timer(&ci->timer, delay_runtime_pm_put_timer,
++ (unsigned long)ci);
+ ret = dbg_create_files(ci);
+ if (!ret)
+ return 0;
+@@ -673,6 +725,11 @@
+ {
+ struct ci_hdrc *ci = platform_get_drvdata(pdev);
+
++ if (ci->supports_runtime_pm) {
++ pm_runtime_get_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++ pm_runtime_put_noidle(&pdev->dev);
++ }
+ dbg_remove_files(ci);
+ free_irq(ci->irq, ci);
+ ci_role_destroy(ci);
+@@ -682,11 +739,120 @@
+ return 0;
+ }
+
++#ifdef CONFIG_PM
++static int ci_controller_suspend(struct device *dev)
++{
++ struct ci_hdrc *ci = dev_get_drvdata(dev);
++
++ dev_dbg(dev, "at %s\n", __func__);
++
++ if (ci->in_lpm)
++ return 0;
++
++ disable_irq(ci->irq);
++
++ if (ci->transceiver)
++ usb_phy_set_wakeup(ci->transceiver, true);
++
++ ci_hdrc_enter_lpm(ci, true);
++
++ if (ci->transceiver)
++ usb_phy_set_suspend(ci->transceiver, 1);
++
++ ci->in_lpm = true;
++
++ enable_irq(ci->irq);
++
++ return 0;
++}
++
++static int ci_controller_resume(struct device *dev)
++{
++ struct ci_hdrc *ci = dev_get_drvdata(dev);
++
++ dev_dbg(dev, "at %s\n", __func__);
++
++ if (!ci->in_lpm)
++ return 0;
++
++ ci_hdrc_enter_lpm(ci, false);
++
++ if (ci->transceiver) {
++ usb_phy_set_suspend(ci->transceiver, 0);
++ usb_phy_set_wakeup(ci->transceiver, false);
++ hw_wait_phy_stable();
++ }
++
++ ci->in_lpm = false;
++
++ if (ci->wakeup_int) {
++ ci->wakeup_int = false;
++ enable_irq(ci->irq);
++ mod_timer(&ci->timer, jiffies + msecs_to_jiffies(2000));
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int ci_suspend(struct device *dev)
++{
++ struct ci_hdrc *ci = dev_get_drvdata(dev);
++ int ret;
++
++ ret = ci_controller_suspend(dev);
++ if (ret)
++ return ret;
++
++ if (device_may_wakeup(dev))
++ enable_irq_wake(ci->irq);
++
++ return ret;
++}
++
++static int ci_resume(struct device *dev)
++{
++ struct ci_hdrc *ci = dev_get_drvdata(dev);
++ int ret;
++
++ if (device_may_wakeup(dev))
++ disable_irq_wake(ci->irq);
++
++ ret = ci_controller_resume(dev);
++ if (!ret && ci->supports_runtime_pm) {
++ pm_runtime_disable(dev);
++ pm_runtime_set_active(dev);
++ pm_runtime_enable(dev);
++ }
++
++ return ret;
++}
++#endif /* CONFIG_PM_SLEEP */
++
++#ifdef CONFIG_PM_RUNTIME
++static int ci_runtime_suspend(struct device *dev)
++{
++ return ci_controller_suspend(dev);
++}
++
++static int ci_runtime_resume(struct device *dev)
++{
++ return ci_controller_resume(dev);
++}
++#endif /* CONFIG_PM_RUNTIME */
++
++#endif /* CONFIG_PM */
++static const struct dev_pm_ops ci_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(ci_suspend, ci_resume)
++ SET_RUNTIME_PM_OPS(ci_runtime_suspend, ci_runtime_resume, NULL)
++};
++
+ static struct platform_driver ci_hdrc_driver = {
+ .probe = ci_hdrc_probe,
+ .remove = ci_hdrc_remove,
+ .driver = {
+ .name = "ci_hdrc",
++ .pm = &ci_pm_ops,
+ },
+ };
+
+diff -Nur linux-3.14.40.orig/drivers/usb/chipidea/host.c linux-3.14.40/drivers/usb/chipidea/host.c
+--- linux-3.14.40.orig/drivers/usb/chipidea/host.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/chipidea/host.c 2015-05-01 14:58:04.707427001 -0500
+@@ -33,6 +33,176 @@
+ #include "host.h"
+
+ static struct hc_driver __read_mostly ci_ehci_hc_driver;
++static int (*orig_bus_suspend)(struct usb_hcd *hcd);
++static int (*orig_bus_resume)(struct usb_hcd *hcd);
++static int (*orig_hub_control)(struct usb_hcd *hcd,
++ u16 typeReq, u16 wValue, u16 wIndex,
++ char *buf, u16 wLength);
++
++static int ci_ehci_bus_suspend(struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ int port;
++ u32 tmp;
++
++ int ret = orig_bus_suspend(hcd);
++
++ if (ret)
++ return ret;
++
++ port = HCS_N_PORTS(ehci->hcs_params);
++ while (port--) {
++ u32 __iomem *reg = &ehci->regs->port_status[port];
++ u32 portsc = ehci_readl(ehci, reg);
++
++ if (portsc & PORT_CONNECT) {
++ /*
++ * For chipidea, the resume signal will be ended
++ * automatically, so for remote wakeup case, the
++ * usbcmd.rs may not be set before the resume has
++ * ended if other resume path consumes too much
++ * time (~23ms-24ms), in that case, the SOF will not
++ * send out within 3ms after resume ends, then the
++ * device will enter suspend again.
++ */
++ if (hcd->self.root_hub->do_remote_wakeup) {
++ ehci_dbg(ehci,
++ "Remote wakeup is enabled, "
++ "and device is on the port\n");
++
++ tmp = ehci_readl(ehci, &ehci->regs->command);
++ tmp |= CMD_RUN;
++ ehci_writel(ehci, tmp, &ehci->regs->command);
++ /*
++ * It needs a short delay between set RUNSTOP
++ * and set PHCD.
++ */
++ udelay(125);
++ }
++ if (hcd->phy && test_bit(port, &ehci->bus_suspended)
++ && (ehci_port_speed(ehci, portsc) ==
++ USB_PORT_STAT_HIGH_SPEED))
++ /*
++ * notify the USB PHY, it is for global
++ * suspend case.
++ */
++ usb_phy_notify_suspend(hcd->phy,
++ USB_SPEED_HIGH);
++ }
++ }
++
++ return 0;
++}
++
++static int ci_imx_ehci_bus_resume(struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ int port;
++
++ int ret = orig_bus_resume(hcd);
++
++ if (ret)
++ return ret;
++
++ port = HCS_N_PORTS(ehci->hcs_params);
++ while (port--) {
++ u32 __iomem *reg = &ehci->regs->port_status[port];
++ u32 portsc = ehci_readl(ehci, reg);
++ /*
++ * Notify PHY after resume signal has finished, it is
++ * for global suspend case.
++ */
++ if (hcd->phy
++ && test_bit(port, &ehci->bus_suspended)
++ && (portsc & PORT_CONNECT)
++ && (ehci_port_speed(ehci, portsc) ==
++ USB_PORT_STAT_HIGH_SPEED))
++ /* notify the USB PHY */
++ usb_phy_notify_resume(hcd->phy, USB_SPEED_HIGH);
++ }
++
++ return 0;
++}
++
++/* The below code is based on tegra ehci driver */
++static int ci_imx_ehci_hub_control(
++ struct usb_hcd *hcd,
++ u16 typeReq,
++ u16 wValue,
++ u16 wIndex,
++ char *buf,
++ u16 wLength
++)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ u32 __iomem *status_reg;
++ u32 temp;
++ unsigned long flags;
++ int retval = 0;
++
++ status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
++
++ spin_lock_irqsave(&ehci->lock, flags);
++
++ if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
++ temp = ehci_readl(ehci, status_reg);
++ if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
++ retval = -EPIPE;
++ goto done;
++ }
++
++ temp &= ~(PORT_RWC_BITS | PORT_WKCONN_E);
++ temp |= PORT_WKDISC_E | PORT_WKOC_E;
++ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
++
++ /*
++ * If a transaction is in progress, there may be a delay in
++ * suspending the port. Poll until the port is suspended.
++ */
++ if (ehci_handshake(ehci, status_reg, PORT_SUSPEND,
++ PORT_SUSPEND, 5000))
++ ehci_err(ehci, "timeout waiting for SUSPEND\n");
++
++ spin_unlock_irqrestore(&ehci->lock, flags);
++ if (ehci_port_speed(ehci, temp) ==
++ USB_PORT_STAT_HIGH_SPEED && hcd->phy) {
++ /* notify the USB PHY */
++ usb_phy_notify_suspend(hcd->phy, USB_SPEED_HIGH);
++ }
++ spin_lock_irqsave(&ehci->lock, flags);
++
++ set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
++ goto done;
++ }
++
++ /*
++ * After resume has finished, it needs do some post resume
++ * operation for some SoCs.
++ */
++ else if (typeReq == ClearPortFeature &&
++ wValue == USB_PORT_FEAT_C_SUSPEND) {
++
++ /* Make sure the resume has finished, it should be finished */
++ if (ehci_handshake(ehci, status_reg, PORT_RESUME, 0, 25000))
++ ehci_err(ehci, "timeout waiting for resume\n");
++
++ temp = ehci_readl(ehci, status_reg);
++
++ if (ehci_port_speed(ehci, temp) ==
++ USB_PORT_STAT_HIGH_SPEED && hcd->phy) {
++ /* notify the USB PHY */
++ usb_phy_notify_resume(hcd->phy, USB_SPEED_HIGH);
++ }
++ }
++
++ spin_unlock_irqrestore(&ehci->lock, flags);
++
++ /* Handle the hub control events here */
++ return orig_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
++done:
++ spin_unlock_irqrestore(&ehci->lock, flags);
++ return retval;
++}
+
+ static irqreturn_t host_irq(struct ci_hdrc *ci)
+ {
+@@ -64,7 +234,6 @@
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = ci->hw_bank.cap;
+ ehci->has_hostpc = ci->hw_bank.lpm;
+- ehci->has_tdi_phy_lpm = ci->hw_bank.lpm;
+ ehci->imx28_write_fix = ci->imx28_write_fix;
+
+ if (ci->platdata->reg_vbus) {
+@@ -136,5 +305,15 @@
+
+ ehci_init_driver(&ci_ehci_hc_driver, NULL);
+
++ orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
++ orig_bus_resume = ci_ehci_hc_driver.bus_resume;
++ orig_hub_control = ci_ehci_hc_driver.hub_control;
++
++ ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
++ if (ci->platdata->flags & CI_HDRC_IMX_EHCI_QUIRK) {
++ ci_ehci_hc_driver.bus_resume = ci_imx_ehci_bus_resume;
++ ci_ehci_hc_driver.hub_control = ci_imx_ehci_hub_control;
++ }
++
+ return 0;
+ }
+diff -Nur linux-3.14.40.orig/drivers/usb/chipidea/otg.c linux-3.14.40/drivers/usb/chipidea/otg.c
+--- linux-3.14.40.orig/drivers/usb/chipidea/otg.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/chipidea/otg.c 2015-05-01 14:58:04.707427001 -0500
+@@ -18,6 +18,8 @@
+ #include <linux/usb/otg.h>
+ #include <linux/usb/gadget.h>
+ #include <linux/usb/chipidea.h>
++#include <linux/kthread.h>
++#include <linux/freezer.h>
+
+ #include "ci.h"
+ #include "bits.h"
+@@ -68,26 +70,53 @@
+ ci_role_start(ci, role);
+ }
+ }
++
++/* If there is pending otg event */
++static inline bool ci_otg_event_is_pending(struct ci_hdrc *ci)
++{
++ return ci->id_event || ci->b_sess_valid_event;
++}
++
+ /**
+- * ci_otg_work - perform otg (vbus/id) event handle
+- * @work: work struct
++ * ci_otg_event - perform otg (vbus/id) event handle
++ * @ci: ci_hdrc struct
+ */
+-static void ci_otg_work(struct work_struct *work)
++static void ci_otg_event(struct ci_hdrc *ci)
+ {
+- struct ci_hdrc *ci = container_of(work, struct ci_hdrc, work);
+-
+ if (ci->id_event) {
+ ci->id_event = false;
++ /* Keep controller active during id switch */
++ pm_runtime_get_sync(ci->dev);
+ ci_handle_id_switch(ci);
++ pm_runtime_put_sync(ci->dev);
+ } else if (ci->b_sess_valid_event) {
+ ci->b_sess_valid_event = false;
++ pm_runtime_get_sync(ci->dev);
+ ci_handle_vbus_change(ci);
++ pm_runtime_put_sync(ci->dev);
+ } else
+- dev_err(ci->dev, "unexpected event occurs at %s\n", __func__);
++ dev_dbg(ci->dev, "it should be quit event\n");
+
+ enable_irq(ci->irq);
+ }
+
++static int ci_otg_thread(void *ptr)
++{
++ struct ci_hdrc *ci = ptr;
++
++ set_freezable();
++
++ do {
++ wait_event_freezable(ci->otg_wait,
++ ci_otg_event_is_pending(ci) ||
++ kthread_should_stop());
++ ci_otg_event(ci);
++ } while (!kthread_should_stop());
++
++ dev_warn(ci->dev, "ci_otg_thread quits\n");
++
++ return 0;
++}
+
+ /**
+ * ci_hdrc_otg_init - initialize otg struct
+@@ -95,11 +124,11 @@
+ */
+ int ci_hdrc_otg_init(struct ci_hdrc *ci)
+ {
+- INIT_WORK(&ci->work, ci_otg_work);
+- ci->wq = create_singlethread_workqueue("ci_otg");
+- if (!ci->wq) {
+- dev_err(ci->dev, "can't create workqueue\n");
+- return -ENODEV;
++ init_waitqueue_head(&ci->otg_wait);
++ ci->otg_task = kthread_run(ci_otg_thread, ci, "ci otg thread");
++ if (IS_ERR(ci->otg_task)) {
++ dev_err(ci->dev, "error to create otg thread\n");
++ return PTR_ERR(ci->otg_task);
+ }
+
+ return 0;
+@@ -111,10 +140,7 @@
+ */
+ void ci_hdrc_otg_destroy(struct ci_hdrc *ci)
+ {
+- if (ci->wq) {
+- flush_workqueue(ci->wq);
+- destroy_workqueue(ci->wq);
+- }
++ kthread_stop(ci->otg_task);
+ ci_disable_otg_interrupt(ci, OTGSC_INT_EN_BITS);
+ ci_clear_otg_interrupt(ci, OTGSC_INT_STATUS_BITS);
+ }
+diff -Nur linux-3.14.40.orig/drivers/usb/chipidea/udc.c linux-3.14.40/drivers/usb/chipidea/udc.c
+--- linux-3.14.40.orig/drivers/usb/chipidea/udc.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/chipidea/udc.c 2015-05-01 14:58:04.711427001 -0500
+@@ -681,12 +681,6 @@
+ struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
+ unsigned long flags;
+
+- spin_lock_irqsave(&ci->lock, flags);
+- ci->gadget.speed = USB_SPEED_UNKNOWN;
+- ci->remote_wakeup = 0;
+- ci->suspended = 0;
+- spin_unlock_irqrestore(&ci->lock, flags);
+-
+ /* flush all endpoints */
+ gadget_for_each_ep(ep, gadget) {
+ usb_ep_fifo_flush(ep);
+@@ -704,6 +698,12 @@
+ ci->status = NULL;
+ }
+
++ spin_lock_irqsave(&ci->lock, flags);
++ ci->gadget.speed = USB_SPEED_UNKNOWN;
++ ci->remote_wakeup = 0;
++ ci->suspended = 0;
++ spin_unlock_irqrestore(&ci->lock, flags);
++
+ return 0;
+ }
+
+@@ -1222,6 +1222,10 @@
+ return -EBUSY;
+
+ spin_lock_irqsave(hwep->lock, flags);
++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(hwep->lock, flags);
++ return 0;
++ }
+
+ /* only internal SW should disable ctrl endpts */
+
+@@ -1311,6 +1315,10 @@
+ return -EINVAL;
+
+ spin_lock_irqsave(hwep->lock, flags);
++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(hwep->lock, flags);
++ return 0;
++ }
+ retval = _ep_queue(ep, req, gfp_flags);
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return retval;
+@@ -1334,8 +1342,8 @@
+ return -EINVAL;
+
+ spin_lock_irqsave(hwep->lock, flags);
+-
+- hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
++ if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
++ hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+
+ list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
+ dma_pool_free(hwep->td_pool, node->ptr, node->dma);
+@@ -1379,6 +1387,10 @@
+
+ spin_lock_irqsave(hwep->lock, flags);
+
++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(hwep->lock, flags);
++ return 0;
++ }
+ #ifndef STALL_IN
+ /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
+ if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
+@@ -1440,6 +1452,10 @@
+ }
+
+ spin_lock_irqsave(hwep->lock, flags);
++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(hwep->lock, flags);
++ return;
++ }
+
+ hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+
+@@ -1506,6 +1522,10 @@
+ int ret = 0;
+
+ spin_lock_irqsave(&ci->lock, flags);
++ if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(&ci->lock, flags);
++ return 0;
++ }
+ if (!ci->remote_wakeup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+diff -Nur linux-3.14.40.orig/drivers/usb/chipidea/usbmisc_imx.c linux-3.14.40/drivers/usb/chipidea/usbmisc_imx.c
+--- linux-3.14.40.orig/drivers/usb/chipidea/usbmisc_imx.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/chipidea/usbmisc_imx.c 2015-05-01 14:58:04.711427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+@@ -11,10 +11,10 @@
+
+ #include <linux/module.h>
+ #include <linux/of_platform.h>
+-#include <linux/clk.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/delay.h>
++#include <linux/regulator/consumer.h>
+
+ #include "ci_hdrc_imx.h"
+
+@@ -33,22 +33,28 @@
+ #define MX53_BM_OVER_CUR_DIS_UHx BIT(30)
+
+ #define MX6_BM_OVER_CUR_DIS BIT(7)
++#define MX6_BM_WAKEUP_ENABLE BIT(10)
++#define MX6_BM_ID_WAKEUP BIT(16)
++#define MX6_BM_VBUS_WAKEUP BIT(17)
++#define MX6_BM_WAKEUP_INTR BIT(31)
+
+ struct usbmisc_ops {
+ /* It's called once when probe a usb device */
+ int (*init)(struct imx_usbmisc_data *data);
+ /* It's called once after adding a usb device */
+ int (*post)(struct imx_usbmisc_data *data);
++ /* It's called when we need to enable usb wakeup */
++ int (*set_wakeup)(struct imx_usbmisc_data *data, bool enabled);
+ };
+
+ struct imx_usbmisc {
+ void __iomem *base;
+ spinlock_t lock;
+- struct clk *clk;
+ const struct usbmisc_ops *ops;
+ };
+
+ static struct imx_usbmisc *usbmisc;
++static struct regulator *vbus_wakeup_reg;
+
+ static int usbmisc_imx25_post(struct imx_usbmisc_data *data)
+ {
+@@ -158,6 +164,47 @@
+ return 0;
+ }
+
++static u32 imx6q_finalize_wakeup_setting(struct imx_usbmisc_data *data)
++{
++ if (data->available_role == USB_DR_MODE_PERIPHERAL)
++ return MX6_BM_VBUS_WAKEUP;
++ else if (data->available_role == USB_DR_MODE_OTG)
++ return MX6_BM_VBUS_WAKEUP | MX6_BM_ID_WAKEUP;
++
++ return 0;
++}
++
++static int usbmisc_imx6q_set_wakeup
++ (struct imx_usbmisc_data *data, bool enabled)
++{
++ unsigned long flags;
++ u32 reg, val = MX6_BM_WAKEUP_ENABLE;
++ int ret = 0;
++
++ if (data->index > 3)
++ return -EINVAL;
++
++ spin_lock_irqsave(&usbmisc->lock, flags);
++ reg = readl(usbmisc->base + data->index * 4);
++ if (enabled) {
++ val |= imx6q_finalize_wakeup_setting(data);
++ writel(reg | val, usbmisc->base + data->index * 4);
++ if (vbus_wakeup_reg)
++ ret = regulator_enable(vbus_wakeup_reg);
++ } else {
++ if (reg & MX6_BM_WAKEUP_INTR)
++ pr_debug("wakeup int at ci_hdrc.%d\n", data->index);
++ val = MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP
++ | MX6_BM_ID_WAKEUP;
++ writel(reg & ~val, usbmisc->base + data->index * 4);
++ if (vbus_wakeup_reg && regulator_is_enabled(vbus_wakeup_reg))
++ regulator_disable(vbus_wakeup_reg);
++ }
++ spin_unlock_irqrestore(&usbmisc->lock, flags);
++
++ return ret;
++}
++
+ static const struct usbmisc_ops imx25_usbmisc_ops = {
+ .post = usbmisc_imx25_post,
+ };
+@@ -172,6 +219,7 @@
+
+ static const struct usbmisc_ops imx6q_usbmisc_ops = {
+ .init = usbmisc_imx6q_init,
++ .set_wakeup = usbmisc_imx6q_set_wakeup,
+ };
+
+ int imx_usbmisc_init(struct imx_usbmisc_data *data)
+@@ -194,6 +242,16 @@
+ }
+ EXPORT_SYMBOL_GPL(imx_usbmisc_init_post);
+
++int imx_usbmisc_set_wakeup(struct imx_usbmisc_data *data, bool enabled)
++{
++ if (!usbmisc)
++ return -ENODEV;
++ if (!usbmisc->ops->set_wakeup)
++ return 0;
++ return usbmisc->ops->set_wakeup(data, enabled);
++}
++EXPORT_SYMBOL_GPL(imx_usbmisc_set_wakeup);
++
+ static const struct of_device_id usbmisc_imx_dt_ids[] = {
+ {
+ .compatible = "fsl,imx25-usbmisc",
+@@ -223,7 +281,6 @@
+ {
+ struct resource *res;
+ struct imx_usbmisc *data;
+- int ret;
+ struct of_device_id *tmp_dev;
+
+ if (usbmisc)
+@@ -240,31 +297,28 @@
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+- data->clk = devm_clk_get(&pdev->dev, NULL);
+- if (IS_ERR(data->clk)) {
+- dev_err(&pdev->dev,
+- "failed to get clock, err=%ld\n", PTR_ERR(data->clk));
+- return PTR_ERR(data->clk);
+- }
+-
+- ret = clk_prepare_enable(data->clk);
+- if (ret) {
+- dev_err(&pdev->dev,
+- "clk_prepare_enable failed, err=%d\n", ret);
+- return ret;
+- }
+-
+ tmp_dev = (struct of_device_id *)
+ of_match_device(usbmisc_imx_dt_ids, &pdev->dev);
+ data->ops = (const struct usbmisc_ops *)tmp_dev->data;
+ usbmisc = data;
+
++ vbus_wakeup_reg = devm_regulator_get(&pdev->dev, "vbus-wakeup");
++ if (PTR_ERR(vbus_wakeup_reg) == -EPROBE_DEFER)
++ return -EPROBE_DEFER;
++ else if (PTR_ERR(vbus_wakeup_reg) == -ENODEV)
++ /* no vbus regualator is needed */
++ vbus_wakeup_reg = NULL;
++ else if (IS_ERR(vbus_wakeup_reg)) {
++ dev_err(&pdev->dev, "Getting regulator error: %ld\n",
++ PTR_ERR(vbus_wakeup_reg));
++ return PTR_ERR(vbus_wakeup_reg);
++ }
++
+ return 0;
+ }
+
+ static int usbmisc_imx_remove(struct platform_device *pdev)
+ {
+- clk_disable_unprepare(usbmisc->clk);
+ usbmisc = NULL;
+ return 0;
+ }
+diff -Nur linux-3.14.40.orig/drivers/usb/core/hub.c linux-3.14.40/drivers/usb/core/hub.c
+--- linux-3.14.40.orig/drivers/usb/core/hub.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/core/hub.c 2015-05-01 14:58:04.711427001 -0500
+@@ -3916,6 +3916,12 @@
+ void usb_enable_ltm(struct usb_device *udev) { }
+ EXPORT_SYMBOL_GPL(usb_enable_ltm);
+
++static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
++ u16 portstatus, u16 portchange)
++{
++ return 0;
++}
++
+ #endif /* CONFIG_PM */
+
+
+@@ -4512,8 +4518,7 @@
+
+ /* Disconnect any existing devices under this port */
+ if (udev) {
+- if (hcd->phy && !hdev->parent &&
+- !(portstatus & USB_PORT_STAT_CONNECTION))
++ if (hcd->phy && !hdev->parent)
+ usb_phy_notify_disconnect(hcd->phy, udev->speed);
+ usb_disconnect(&hub->ports[port1 - 1]->child);
+ }
+diff -Nur linux-3.14.40.orig/drivers/usb/core/message.c linux-3.14.40/drivers/usb/core/message.c
+--- linux-3.14.40.orig/drivers/usb/core/message.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/core/message.c 2015-05-01 14:58:04.715427001 -0500
+@@ -178,7 +178,7 @@
+ *
+ * Return:
+ * If successful, 0. Otherwise a negative error number. The number of actual
+- * bytes transferred will be stored in the @actual_length paramater.
++ * bytes transferred will be stored in the @actual_length parameter.
+ */
+ int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length, int timeout)
+diff -Nur linux-3.14.40.orig/drivers/usb/core/urb.c linux-3.14.40/drivers/usb/core/urb.c
+--- linux-3.14.40.orig/drivers/usb/core/urb.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/core/urb.c 2015-05-01 14:58:04.739427001 -0500
+@@ -831,7 +831,7 @@
+ *
+ * this allows all outstanding URBs to be unlinked starting
+ * from the back of the queue. This function is asynchronous.
+- * The unlinking is just tiggered. It may happen after this
++ * The unlinking is just triggered. It may happen after this
+ * function has returned.
+ *
+ * This routine should not be called by a driver after its disconnect
+diff -Nur linux-3.14.40.orig/drivers/usb/gadget/f_mass_storage.c linux-3.14.40/drivers/usb/gadget/f_mass_storage.c
+--- linux-3.14.40.orig/drivers/usb/gadget/f_mass_storage.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/gadget/f_mass_storage.c 2015-05-01 14:58:04.743427001 -0500
+@@ -336,8 +336,15 @@
+
+ struct usb_ep *bulk_in;
+ struct usb_ep *bulk_out;
++#ifdef CONFIG_FSL_UTP
++ void *utp;
++#endif
+ };
+
++#ifdef CONFIG_FSL_UTP
++#include "fsl_updater.h"
++#endif
++
+ static inline int __fsg_is_set(struct fsg_common *common,
+ const char *func, unsigned line)
+ {
+@@ -1131,6 +1138,13 @@
+ }
+ #endif
+
++#ifdef CONFIG_FSL_UTP
++ if (utp_get_sense(common->fsg) == 0) { /* got the sense from the UTP */
++ sd = UTP_CTX(common->fsg)->sd;
++ sdinfo = UTP_CTX(common->fsg)->sdinfo;
++ valid = 0;
++ } else
++#endif
+ if (!curlun) { /* Unsupported LUNs are okay */
+ common->bad_lun_okay = 1;
+ sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+@@ -1152,6 +1166,9 @@
+ buf[7] = 18 - 8; /* Additional sense length */
+ buf[12] = ASC(sd);
+ buf[13] = ASCQ(sd);
++#ifdef CONFIG_FSL_UTP
++ put_unaligned_be32(UTP_CTX(common->fsg)->sdinfo_h, &buf[8]);
++#endif
+ return 18;
+ }
+
+@@ -1645,7 +1662,18 @@
+ sd = SS_INVALID_COMMAND;
+ } else if (sd != SS_NO_SENSE) {
+ DBG(common, "sending command-failure status\n");
++#ifdef CONFIG_FSL_UTP
++/*
++ * mfgtool host frequently reset bus during transfer
++ * - the response in csw to request sense will be 1 due to UTP change
++ * some storage information
++ * - host will reset the bus if response to request sense is 1
++ * - change the response to 0 if CONFIG_FSL_UTP is defined
++ */
++ status = US_BULK_STAT_OK;
++#else
+ status = US_BULK_STAT_FAIL;
++#endif
+ VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
+ " info x%x\n",
+ SK(sd), ASC(sd), ASCQ(sd), sdinfo);
+@@ -1836,6 +1864,13 @@
+ common->phase_error = 0;
+ common->short_packet_received = 0;
+
++#ifdef CONFIG_FSL_UTP
++ reply = utp_handle_message(common->fsg, common->cmnd, reply);
++
++ if (reply != -EINVAL)
++ return reply;
++#endif
++
+ down_read(&common->filesem); /* We're using the backing file */
+ switch (common->cmnd[0]) {
+
+@@ -2502,12 +2537,14 @@
+ /* Allow the thread to be frozen */
+ set_freezable();
+
++#ifndef CONFIG_FSL_UTP
+ /*
+ * Arrange for userspace references to be interpreted as kernel
+ * pointers. That way we can pass a kernel pointer to a routine
+ * that expects a __user pointer and it will work okay.
+ */
+ set_fs(get_ds());
++#endif
+
+ /* The main loop */
+ while (common->state != FSG_STATE_TERMINATED) {
+@@ -3096,6 +3133,10 @@
+
+ /*-------------------------------------------------------------------------*/
+
++#ifdef CONFIG_FSL_UTP
++#include "fsl_updater.c"
++#endif
++
+ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
+ {
+ struct fsg_dev *fsg = fsg_from_func(f);
+@@ -3127,6 +3168,10 @@
+ fsg_intf_desc.bInterfaceNumber = i;
+ fsg->interface_number = i;
+
++#ifdef CONFIG_FSL_UTP
++ utp_init(fsg);
++#endif
++
+ /* Find all the endpoints we will use */
+ ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
+ if (!ep)
+@@ -3185,6 +3230,10 @@
+ }
+
+ usb_free_all_descriptors(&fsg->function);
++
++#ifdef CONFIG_FSL_UTP
++ utp_exit(fsg);
++#endif
+ }
+
+ static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item)
+diff -Nur linux-3.14.40.orig/drivers/usb/gadget/fsl_updater.c linux-3.14.40/drivers/usb/gadget/fsl_updater.c
+--- linux-3.14.40.orig/drivers/usb/gadget/fsl_updater.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/usb/gadget/fsl_updater.c 2015-05-01 14:58:04.743427001 -0500
+@@ -0,0 +1,594 @@
++/*
++ * Freescale UUT driver
++ *
++ * Copyright 2008-2013 Freescale Semiconductor, Inc.
++ * Copyright 2008-2009 Embedded Alley Solutions, Inc All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++static u64 get_be64(u8 *buf)
++{
++ return ((u64)get_unaligned_be32(buf) << 32) |
++ get_unaligned_be32(buf + 4);
++}
++
++static int utp_init(struct fsg_dev *fsg)
++{
++ init_waitqueue_head(&utp_context.wq);
++ init_waitqueue_head(&utp_context.list_full_wq);
++
++ INIT_LIST_HEAD(&utp_context.read);
++ INIT_LIST_HEAD(&utp_context.write);
++ mutex_init(&utp_context.lock);
++
++ /* the max message is 64KB */
++ utp_context.buffer = vmalloc(0x10000);
++ if (!utp_context.buffer)
++ return -EIO;
++ utp_context.utp_version = 0x1ull;
++ fsg->utp = &utp_context;
++ return misc_register(&utp_dev);
++}
++
++static void utp_exit(struct fsg_dev *fsg)
++{
++ vfree(utp_context.buffer);
++ misc_deregister(&utp_dev);
++}
++
++static struct utp_user_data *utp_user_data_alloc(size_t size)
++{
++ struct utp_user_data *uud;
++
++ uud = vmalloc(size + sizeof(*uud));
++ if (!uud)
++ return uud;
++ memset(uud, 0, size + sizeof(*uud));
++ uud->data.size = size + sizeof(uud->data);
++ INIT_LIST_HEAD(&uud->link);
++ return uud;
++}
++
++static void utp_user_data_free(struct utp_user_data *uud)
++{
++ mutex_lock(&utp_context.lock);
++ list_del(&uud->link);
++ mutex_unlock(&utp_context.lock);
++ vfree(uud);
++}
++
++/* Get the number of element for list */
++static u32 count_list(struct list_head *l)
++{
++ u32 count = 0;
++ struct list_head *tmp;
++
++ mutex_lock(&utp_context.lock);
++ list_for_each(tmp, l) {
++ count++;
++ }
++ mutex_unlock(&utp_context.lock);
++
++ return count;
++}
++/* The routine will not go on if utp_context.queue is empty */
++#define WAIT_ACTIVITY(queue) \
++ wait_event_interruptible(utp_context.wq, !list_empty(&utp_context.queue))
++
++/* Called by userspace program (uuc) */
++static ssize_t utp_file_read(struct file *file,
++ char __user *buf,
++ size_t size,
++ loff_t *off)
++{
++ struct utp_user_data *uud;
++ size_t size_to_put;
++ int free = 0;
++
++ WAIT_ACTIVITY(read);
++
++ mutex_lock(&utp_context.lock);
++ uud = list_first_entry(&utp_context.read, struct utp_user_data, link);
++ mutex_unlock(&utp_context.lock);
++ size_to_put = uud->data.size;
++
++ if (size >= size_to_put)
++ free = !0;
++ if (copy_to_user(buf, &uud->data, size_to_put)) {
++ printk(KERN_INFO "[ %s ] copy error\n", __func__);
++ return -EACCES;
++ }
++ if (free)
++ utp_user_data_free(uud);
++ else {
++ pr_info("sizeof = %d, size = %d\n",
++ sizeof(uud->data),
++ uud->data.size);
++
++ pr_err("Will not free utp_user_data, because buffer size = %d,"
++ "need to put %d\n", size, size_to_put);
++ }
++
++ /*
++ * The user program has already finished data process,
++ * go on getting data from the host
++ */
++ wake_up(&utp_context.list_full_wq);
++
++ return size_to_put;
++}
++
++static ssize_t utp_file_write(struct file *file, const char __user *buf,
++ size_t size, loff_t *off)
++{
++ struct utp_user_data *uud;
++
++ if (size < sizeof(uud->data))
++ return -EINVAL;
++ uud = utp_user_data_alloc(size);
++ if (uud == NULL)
++ return -ENOMEM;
++ if (copy_from_user(&uud->data, buf, size)) {
++ printk(KERN_INFO "[ %s ] copy error!\n", __func__);
++ vfree(uud);
++ return -EACCES;
++ }
++ mutex_lock(&utp_context.lock);
++ list_add_tail(&uud->link, &utp_context.write);
++ /* Go on EXEC routine process */
++ wake_up(&utp_context.wq);
++ mutex_unlock(&utp_context.lock);
++ return size;
++}
++
++/*
++ * uuc should change to use soc bus infrastructure to soc information
++ * /sys/devices/soc0/soc_id
++ * this function can be removed.
++ */
++static long
++utp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ int cpu_id = 0;
++
++ switch (cmd) {
++ case UTP_GET_CPU_ID:
++ return put_user(cpu_id, (int __user *)arg);
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++
++/* Will be called when the host wants to get the sense data */
++static int utp_get_sense(struct fsg_dev *fsg)
++{
++ if (UTP_CTX(fsg)->processed == 0)
++ return -1;
++
++ UTP_CTX(fsg)->processed = 0;
++ return 0;
++}
++
++static int utp_do_read(struct fsg_dev *fsg, void *data, size_t size)
++{
++ struct fsg_buffhd *bh;
++ int rc;
++ u32 amount_left;
++ unsigned int amount;
++
++ /* Get the starting Logical Block Address and check that it's
++ * not too big */
++
++ amount_left = size;
++ if (unlikely(amount_left == 0))
++ return -EIO; /* No default reply*/
++
++ pr_debug("%s: sending %d\n", __func__, size);
++ for (;;) {
++ /* Figure out how much we need to read:
++ * Try to read the remaining amount.
++ * But don't read more than the buffer size.
++ * And don't try to read past the end of the file.
++ * Finally, if we're not at a page boundary, don't read past
++ * the next page.
++ * If this means reading 0 then we were asked to read past
++ * the end of file. */
++ amount = min((unsigned int) amount_left, FSG_BUFLEN);
++
++ /* Wait for the next buffer to become available */
++ bh = fsg->common->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ rc = sleep_thread(fsg->common);
++ if (rc)
++ return rc;
++ }
++
++ /* If we were asked to read past the end of file,
++ * end with an empty buffer. */
++ if (amount == 0) {
++ bh->inreq->length = 0;
++ bh->state = BUF_STATE_FULL;
++ break;
++ }
++
++ /* Perform the read */
++ pr_info("Copied to %p, %d bytes started from %d\n",
++ bh->buf, amount, size - amount_left);
++ /* from upt buffer to file_storeage buffer */
++ memcpy(bh->buf, data + size - amount_left, amount);
++ amount_left -= amount;
++ fsg->common->residue -= amount;
++
++ bh->inreq->length = amount;
++ bh->state = BUF_STATE_FULL;
++
++ /* Send this buffer and go read some more */
++ bh->inreq->zero = 0;
++
++ /* USB Physical transfer: Data from device to host */
++ start_transfer(fsg, fsg->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++
++ fsg->common->next_buffhd_to_fill = bh->next;
++
++ if (amount_left <= 0)
++ break;
++ }
++
++ return size - amount_left;
++}
++
++static int utp_do_write(struct fsg_dev *fsg, void *data, size_t size)
++{
++ struct fsg_buffhd *bh;
++ int get_some_more;
++ u32 amount_left_to_req, amount_left_to_write;
++ unsigned int amount;
++ int rc;
++ loff_t offset;
++
++ /* Carry out the file writes */
++ get_some_more = 1;
++ amount_left_to_req = amount_left_to_write = size;
++
++ if (unlikely(amount_left_to_write == 0))
++ return -EIO;
++
++ offset = 0;
++ while (amount_left_to_write > 0) {
++
++ /* Queue a request for more data from the host */
++ bh = fsg->common->next_buffhd_to_fill;
++ if (bh->state == BUF_STATE_EMPTY && get_some_more) {
++
++ /* Figure out how much we want to get:
++ * Try to get the remaining amount.
++ * But don't get more than the buffer size.
++ * And don't try to go past the end of the file.
++ * If we're not at a page boundary,
++ * don't go past the next page.
++ * If this means getting 0, then we were asked
++ * to write past the end of file.
++ * Finally, round down to a block boundary. */
++ amount = min(amount_left_to_req, FSG_BUFLEN);
++
++ if (amount == 0) {
++ get_some_more = 0;
++ /* cry now */
++ continue;
++ }
++
++ /* Get the next buffer */
++ amount_left_to_req -= amount;
++ if (amount_left_to_req == 0)
++ get_some_more = 0;
++
++ /* amount is always divisible by 512, hence by
++ * the bulk-out maxpacket size */
++ bh->outreq->length = bh->bulk_out_intended_length =
++ amount;
++ bh->outreq->short_not_ok = 1;
++ start_transfer(fsg, fsg->bulk_out, bh->outreq,
++ &bh->outreq_busy, &bh->state);
++ fsg->common->next_buffhd_to_fill = bh->next;
++ continue;
++ }
++
++ /* Write the received data to the backing file */
++ bh = fsg->common->next_buffhd_to_drain;
++ if (bh->state == BUF_STATE_EMPTY && !get_some_more)
++ break; /* We stopped early */
++ if (bh->state == BUF_STATE_FULL) {
++ smp_rmb();
++ fsg->common->next_buffhd_to_drain = bh->next;
++ bh->state = BUF_STATE_EMPTY;
++
++ /* Did something go wrong with the transfer? */
++ if (bh->outreq->status != 0)
++ /* cry again, COMMUNICATION_FAILURE */
++ break;
++
++ amount = bh->outreq->actual;
++
++ /* Perform the write */
++ memcpy(data + offset, bh->buf, amount);
++
++ offset += amount;
++ if (signal_pending(current))
++ return -EINTR; /* Interrupted!*/
++ amount_left_to_write -= amount;
++ fsg->common->residue -= amount;
++
++ /* Did the host decide to stop early? */
++ if (bh->outreq->actual != bh->outreq->length) {
++ fsg->common->short_packet_received = 1;
++ break;
++ }
++ continue;
++ }
++
++ /* Wait for something to happen */
++ rc = sleep_thread(fsg->common);
++ if (rc)
++ return rc;
++ }
++
++ return -EIO;
++}
++
++static inline void utp_set_sense(struct fsg_dev *fsg, u16 code, u64 reply)
++{
++ UTP_CTX(fsg)->processed = true;
++ UTP_CTX(fsg)->sdinfo = reply & 0xFFFFFFFF;
++ UTP_CTX(fsg)->sdinfo_h = (reply >> 32) & 0xFFFFFFFF;
++ UTP_CTX(fsg)->sd = (UTP_SENSE_KEY << 16) | code;
++}
++
++static void utp_poll(struct fsg_dev *fsg)
++{
++ struct utp_context *ctx = UTP_CTX(fsg);
++ struct utp_user_data *uud = NULL;
++
++ mutex_lock(&ctx->lock);
++ if (!list_empty(&ctx->write))
++ uud = list_first_entry(&ctx->write, struct utp_user_data, link);
++ mutex_unlock(&ctx->lock);
++
++ if (uud) {
++ if (uud->data.flags & UTP_FLAG_STATUS) {
++ printk(KERN_WARNING "%s: exit with status %d\n",
++ __func__, uud->data.status);
++ UTP_SS_EXIT(fsg, uud->data.status);
++ } else if (uud->data.flags & UTP_FLAG_REPORT_BUSY) {
++ UTP_SS_BUSY(fsg, --ctx->counter);
++ } else {
++ printk("%s: pass returned.\n", __func__);
++ UTP_SS_PASS(fsg);
++ }
++ utp_user_data_free(uud);
++ } else {
++ if (utp_context.cur_state & UTP_FLAG_DATA) {
++ if (count_list(&ctx->read) < 7) {
++ pr_debug("%s: pass returned in POLL stage. \n", __func__);
++ UTP_SS_PASS(fsg);
++ utp_context.cur_state = 0;
++ return;
++ }
++ }
++ UTP_SS_BUSY(fsg, --ctx->counter);
++ }
++}
++
++static int utp_exec(struct fsg_dev *fsg,
++ char *command,
++ int cmdsize,
++ unsigned long long payload)
++{
++ struct utp_user_data *uud = NULL, *uud2r;
++ struct utp_context *ctx = UTP_CTX(fsg);
++
++ ctx->counter = 0xFFFF;
++ uud2r = utp_user_data_alloc(cmdsize + 1);
++ if (!uud2r)
++ return -ENOMEM;
++ uud2r->data.flags = UTP_FLAG_COMMAND;
++ uud2r->data.payload = payload;
++ strncpy(uud2r->data.command, command, cmdsize);
++
++ mutex_lock(&ctx->lock);
++ list_add_tail(&uud2r->link, &ctx->read);
++ mutex_unlock(&ctx->lock);
++ /* wake up the read routine */
++ wake_up(&ctx->wq);
++
++ if (command[0] == '!') /* there will be no response */
++ return 0;
++
++ /*
++ * the user program (uuc) will return utp_message
++ * and add list to write list
++ */
++ WAIT_ACTIVITY(write);
++
++ mutex_lock(&ctx->lock);
++ if (!list_empty(&ctx->write)) {
++ uud = list_first_entry(&ctx->write, struct utp_user_data, link);
++#ifdef DEBUG
++ pr_info("UUD:\n\tFlags = %02X\n", uud->data.flags);
++ if (uud->data.flags & UTP_FLAG_DATA) {
++ pr_info("\tbufsize = %d\n", uud->data.bufsize);
++ print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_NONE,
++ 16, 2, uud->data.data, uud->data.bufsize, true);
++ }
++ if (uud->data.flags & UTP_FLAG_REPORT_BUSY)
++ pr_info("\tBUSY\n");
++#endif
++ }
++ mutex_unlock(&ctx->lock);
++
++ if (uud->data.flags & UTP_FLAG_DATA) {
++ memcpy(ctx->buffer, uud->data.data, uud->data.bufsize);
++ UTP_SS_SIZE(fsg, uud->data.bufsize);
++ } else if (uud->data.flags & UTP_FLAG_REPORT_BUSY) {
++ UTP_SS_BUSY(fsg, ctx->counter);
++ } else if (uud->data.flags & UTP_FLAG_STATUS) {
++ printk(KERN_WARNING "%s: exit with status %d\n", __func__,
++ uud->data.status);
++ UTP_SS_EXIT(fsg, uud->data.status);
++ } else {
++ pr_debug("%s: pass returned in EXEC stage. \n", __func__);
++ UTP_SS_PASS(fsg);
++ }
++ utp_user_data_free(uud);
++ return 0;
++}
++
++static int utp_send_status(struct fsg_dev *fsg)
++{
++ struct fsg_buffhd *bh;
++ u8 status = US_BULK_STAT_OK;
++ struct bulk_cs_wrap *csw;
++ int rc;
++
++ /* Wait for the next buffer to become available */
++ bh = fsg->common->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ rc = sleep_thread(fsg->common);
++ if (rc)
++ return rc;
++ }
++
++ if (fsg->common->phase_error) {
++ DBG(fsg, "sending phase-error status\n");
++ status = US_BULK_STAT_PHASE;
++
++ } else if ((UTP_CTX(fsg)->sd & 0xFFFF) != UTP_REPLY_PASS) {
++ status = US_BULK_STAT_FAIL;
++ }
++
++ csw = bh->buf;
++
++ /* Store and send the Bulk-only CSW */
++ csw->Signature = __constant_cpu_to_le32(US_BULK_CS_SIGN);
++ csw->Tag = fsg->common->tag;
++ csw->Residue = cpu_to_le32(fsg->common->residue);
++ csw->Status = status;
++
++ bh->inreq->length = US_BULK_CS_WRAP_LEN;
++ bh->inreq->zero = 0;
++ start_transfer(fsg, fsg->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ fsg->common->next_buffhd_to_fill = bh->next;
++ return 0;
++}
++
++static int utp_handle_message(struct fsg_dev *fsg,
++ char *cdb_data,
++ int default_reply)
++{
++ struct utp_msg *m = (struct utp_msg *)cdb_data;
++ void *data = NULL;
++ int r;
++ struct utp_user_data *uud2r;
++ unsigned long long param;
++ unsigned long tag;
++
++ if (m->f0 != 0xF0)
++ return default_reply;
++
++ tag = get_unaligned_be32((void *)&m->utp_msg_tag);
++ param = get_be64((void *)&m->param);
++ pr_debug("Type 0x%x, tag 0x%08lx, param %llx\n",
++ m->utp_msg_type, tag, param);
++
++ switch ((enum utp_msg_type)m->utp_msg_type) {
++
++ case UTP_POLL:
++ if (get_be64((void *)&m->param) == 1) {
++ pr_debug("%s: version request\n", __func__);
++ UTP_SS_EXIT(fsg, UTP_CTX(fsg)->utp_version);
++ break;
++ }
++ utp_poll(fsg);
++ break;
++ case UTP_EXEC:
++ pr_debug("%s: EXEC\n", __func__);
++ data = vmalloc(fsg->common->data_size);
++ memset(data, 0, fsg->common->data_size);
++ /* copy data from usb buffer to utp buffer */
++ utp_do_write(fsg, data, fsg->common->data_size);
++ utp_exec(fsg, data, fsg->common->data_size, param);
++ vfree(data);
++ break;
++ case UTP_GET: /* data from device to host */
++ pr_debug("%s: GET, %d bytes\n", __func__,
++ fsg->common->data_size);
++ r = utp_do_read(fsg, UTP_CTX(fsg)->buffer,
++ fsg->common->data_size);
++ UTP_SS_PASS(fsg);
++ break;
++ case UTP_PUT:
++ utp_context.cur_state = UTP_FLAG_DATA;
++ pr_debug("%s: PUT, Received %d bytes\n", __func__, fsg->common->data_size);/* data from host to device */
++ uud2r = utp_user_data_alloc(fsg->common->data_size);
++ if (!uud2r)
++ return -ENOMEM;
++ uud2r->data.bufsize = fsg->common->data_size;
++ uud2r->data.flags = UTP_FLAG_DATA;
++ utp_do_write(fsg, uud2r->data.data, fsg->common->data_size);
++ /* don't know what will be written */
++ mutex_lock(&UTP_CTX(fsg)->lock);
++ list_add_tail(&uud2r->link, &UTP_CTX(fsg)->read);
++ mutex_unlock(&UTP_CTX(fsg)->lock);
++ wake_up(&UTP_CTX(fsg)->wq);
++ /*
++ * Return PASS or FAIL according to uuc's status
++ * Please open it if need to check uuc's status
++ * and use another version uuc
++ */
++#if 0
++ struct utp_user_data *uud = NULL;
++ struct utp_context *ctx;
++ WAIT_ACTIVITY(write);
++ ctx = UTP_CTX(fsg);
++ mutex_lock(&ctx->lock);
++
++ if (!list_empty(&ctx->write))
++ uud = list_first_entry(&ctx->write,
++ struct utp_user_data, link);
++
++ mutex_unlock(&ctx->lock);
++ if (uud) {
++ if (uud->data.flags & UTP_FLAG_STATUS) {
++ printk(KERN_WARNING "%s: exit with status %d\n",
++ __func__, uud->data.status);
++ UTP_SS_EXIT(fsg, uud->data.status);
++ } else {
++ pr_debug("%s: pass\n", __func__);
++ UTP_SS_PASS(fsg);
++ }
++ utp_user_data_free(uud);
++ } else{
++ UTP_SS_PASS(fsg);
++ }
++#endif
++ if (count_list(&UTP_CTX(fsg)->read) < 7) {
++ utp_context.cur_state = 0;
++ UTP_SS_PASS(fsg);
++ } else
++ UTP_SS_BUSY(fsg, UTP_CTX(fsg)->counter);
++
++ break;
++ }
++
++ utp_send_status(fsg);
++ return -1;
++}
+diff -Nur linux-3.14.40.orig/drivers/usb/gadget/fsl_updater.h linux-3.14.40/drivers/usb/gadget/fsl_updater.h
+--- linux-3.14.40.orig/drivers/usb/gadget/fsl_updater.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/usb/gadget/fsl_updater.h 2015-05-01 14:58:04.743427001 -0500
+@@ -0,0 +1,150 @@
++/*
++ * Freescale UUT driver
++ *
++ * Copyright 2008-2013 Freescale Semiconductor, Inc.
++ * Copyright 2008-2009 Embedded Alley Solutions, Inc All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef __FSL_UPDATER_H
++#define __FSL_UPDATER_H
++
++#include <linux/miscdevice.h>
++#include <linux/list.h>
++#include <linux/vmalloc.h>
++#include <linux/ioctl.h>
++/* #include <mach/hardware.h> */
++
++static int utp_init(struct fsg_dev *fsg);
++static void utp_exit(struct fsg_dev *fsg);
++static ssize_t utp_file_read(struct file *file,
++ char __user *buf,
++ size_t size,
++ loff_t *off);
++
++static ssize_t utp_file_write(struct file *file,
++ const char __user *buf,
++ size_t size,
++ loff_t *off);
++
++static long utp_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg);
++static struct utp_user_data *utp_user_data_alloc(size_t size);
++static void utp_user_data_free(struct utp_user_data *uud);
++static int utp_get_sense(struct fsg_dev *fsg);
++static int utp_do_read(struct fsg_dev *fsg, void *data, size_t size);
++static int utp_do_write(struct fsg_dev *fsg, void *data, size_t size);
++static inline void utp_set_sense(struct fsg_dev *fsg, u16 code, u64 reply);
++static int utp_handle_message(struct fsg_dev *fsg,
++ char *cdb_data,
++ int default_reply);
++
++#define UTP_REPLY_PASS 0
++#define UTP_REPLY_EXIT 0x8001
++#define UTP_REPLY_BUSY 0x8002
++#define UTP_REPLY_SIZE 0x8003
++#define UTP_SENSE_KEY 9
++
++#define UTP_MINOR 222
++/* MISC_DYNAMIC_MINOR would be better, but... */
++
++#define UTP_COMMAND_SIZE 80
++
++#define UTP_SS_EXIT(fsg, r) utp_set_sense(fsg, UTP_REPLY_EXIT, (u64)r)
++#define UTP_SS_PASS(fsg) utp_set_sense(fsg, UTP_REPLY_PASS, 0)
++#define UTP_SS_BUSY(fsg, r) utp_set_sense(fsg, UTP_REPLY_BUSY, (u64)r)
++#define UTP_SS_SIZE(fsg, r) utp_set_sense(fsg, UTP_REPLY_SIZE, (u64)r)
++
++#define UTP_IOCTL_BASE 'U'
++#define UTP_GET_CPU_ID _IOR(UTP_IOCTL_BASE, 0, int)
++/* the structure of utp message which is mapped to 16-byte SCSI CBW's CDB */
++#pragma pack(1)
++struct utp_msg {
++ u8 f0;
++ u8 utp_msg_type;
++ u32 utp_msg_tag;
++ union {
++ struct {
++ u32 param_lsb;
++ u32 param_msb;
++ };
++ u64 param;
++ };
++};
++
++enum utp_msg_type {
++ UTP_POLL = 0,
++ UTP_EXEC,
++ UTP_GET,
++ UTP_PUT,
++};
++
++static struct utp_context {
++ wait_queue_head_t wq;
++ wait_queue_head_t list_full_wq;
++ struct mutex lock;
++ struct list_head read;
++ struct list_head write;
++ u32 sd, sdinfo, sdinfo_h; /* sense data */
++ int processed;
++ u8 *buffer;
++ u32 counter;
++ u64 utp_version;
++ u32 cur_state;
++} utp_context;
++
++static const struct file_operations utp_fops = {
++ .open = nonseekable_open,
++ .read = utp_file_read,
++ .write = utp_file_write,
++ /* .ioctl = utp_ioctl, */
++ .unlocked_ioctl = utp_ioctl,
++};
++
++static struct miscdevice utp_dev = {
++ .minor = UTP_MINOR,
++ .name = "utp",
++ .fops = &utp_fops,
++};
++
++#define UTP_FLAG_COMMAND 0x00000001
++#define UTP_FLAG_DATA 0x00000002
++#define UTP_FLAG_STATUS 0x00000004
++#define UTP_FLAG_REPORT_BUSY 0x10000000
++struct utp_message {
++ u32 flags;
++ size_t size;
++ union {
++ struct {
++ u64 payload;
++ char command[1];
++ };
++ struct {
++ size_t bufsize;
++ u8 data[1];
++ };
++ u32 status;
++ };
++};
++
++struct utp_user_data {
++ struct list_head link;
++ struct utp_message data;
++};
++#pragma pack()
++
++static inline struct utp_context *UTP_CTX(struct fsg_dev *fsg)
++{
++ return (struct utp_context *)fsg->utp;
++}
++
++#endif /* __FSL_UPDATER_H */
++
+diff -Nur linux-3.14.40.orig/drivers/usb/gadget/Kconfig linux-3.14.40/drivers/usb/gadget/Kconfig
+--- linux-3.14.40.orig/drivers/usb/gadget/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/gadget/Kconfig 2015-05-01 14:58:04.763427001 -0500
+@@ -953,6 +953,12 @@
+ Say "y" to link the driver statically, or "m" to build
+ a dynamically linked module called "g_mass_storage".
+
++config FSL_UTP
++ bool "UTP over Storage Gadget"
++ depends on USB_MASS_STORAGE
++ help
++ Freescale's extension to MSC protocol
++
+ config USB_GADGET_TARGET
+ tristate "USB Gadget Target Fabric Module"
+ depends on TARGET_CORE
+diff -Nur linux-3.14.40.orig/drivers/usb/gadget/mass_storage.c linux-3.14.40/drivers/usb/gadget/mass_storage.c
+--- linux-3.14.40.orig/drivers/usb/gadget/mass_storage.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/gadget/mass_storage.c 2015-05-01 14:58:04.783427001 -0500
+@@ -266,7 +266,7 @@
+ {
+ return usb_composite_probe(&msg_driver);
+ }
+-module_init(msg_init);
++late_initcall(msg_init);
+
+ static void msg_cleanup(void)
+ {
+diff -Nur linux-3.14.40.orig/drivers/usb/host/ehci-h20ahb.c linux-3.14.40/drivers/usb/host/ehci-h20ahb.c
+--- linux-3.14.40.orig/drivers/usb/host/ehci-h20ahb.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/usb/host/ehci-h20ahb.c 2015-05-01 14:58:04.783427001 -0500
+@@ -0,0 +1,341 @@
++/*
++ * Copyright (C) 2007-2013 Texas Instruments, Inc.
++ * Author: Vikram Pandita <vikram.pandita@ti.com>
++ * Author: Anand Gadiyar <gadiyar@ti.com>
++ * Author: Keshava Munegowda <keshava_mgowda@ti.com>
++ * Author: Roger Quadros <rogerq@ti.com>
++ *
++ * Copyright (C) 2009 Nokia Corporation
++ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
++ *
++ * Based on ehci-omap.c - driver for USBHOST on OMAP3/4 processors
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/usb/ulpi.h>
++#include <linux/pm_runtime.h>
++#include <linux/gpio.h>
++#include <linux/clk.h>
++#include <linux/usb.h>
++#include <linux/usb/hcd.h>
++#include <linux/of.h>
++#include <linux/dma-mapping.h>
++
++#include "ehci.h"
++
++#define H20AHB_HS_USB_PORTS 1
++
++/* EHCI Synopsys-specific Register Set */
++#define EHCI_INSNREG04 (0xA0)
++#define EHCI_INSNREG04_DISABLE_UNSUSPEND (1 << 5)
++#define EHCI_INSNREG05_ULPI (0xA4)
++#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31
++#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24
++#define EHCI_INSNREG05_ULPI_OPSEL_SHIFT 22
++#define EHCI_INSNREG05_ULPI_REGADD_SHIFT 16
++#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
++#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
++
++#define DRIVER_DESC "H20AHB-EHCI Host Controller driver"
++
++static const char hcd_name[] = "ehci-h20ahb";
++
++/*-------------------------------------------------------------------------*/
++
++struct h20ahb_hcd {
++ struct usb_phy *phy[H20AHB_HS_USB_PORTS]; /* one PHY for each port */
++ int nports;
++};
++
++static inline void ehci_write(void __iomem *base, u32 reg, u32 val)
++{
++ __raw_writel(val, base + reg);
++}
++
++static inline u32 ehci_read(void __iomem *base, u32 reg)
++{
++ return __raw_readl(base + reg);
++}
++
++/* configure so an HC device and id are always provided */
++/* always called with process context; sleeping is OK */
++
++static struct hc_driver __read_mostly ehci_h20ahb_hc_driver;
++
++static const struct ehci_driver_overrides ehci_h20ahb_overrides __initdata = {
++ .extra_priv_size = sizeof(struct h20ahb_hcd),
++};
++
++static int ehci_h20ahb_phy_read(struct usb_phy *x, u32 reg)
++{
++ u32 val = (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT) |
++ (1 << EHCI_INSNREG05_ULPI_PORTSEL_SHIFT) |
++ (3 << EHCI_INSNREG05_ULPI_OPSEL_SHIFT) |
++ (reg << EHCI_INSNREG05_ULPI_REGADD_SHIFT);
++ ehci_write(x->io_priv, 0, val);
++ while ((val = ehci_read(x->io_priv, 0)) &
++ (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT));
++ return val & 0xff;
++}
++
++static int ehci_h20ahb_phy_write(struct usb_phy *x, u32 val, u32 reg)
++{
++ u32 v = (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT) |
++ (1 << EHCI_INSNREG05_ULPI_PORTSEL_SHIFT) |
++ (2 << EHCI_INSNREG05_ULPI_OPSEL_SHIFT) |
++ (reg << EHCI_INSNREG05_ULPI_REGADD_SHIFT) |
++ (val & 0xff);
++ ehci_write(x->io_priv, 0, v);
++ while ((v = ehci_read(x->io_priv, 0)) &
++ (1 << EHCI_INSNREG05_ULPI_CONTROL_SHIFT));
++ return 0;
++}
++
++static struct usb_phy_io_ops ehci_h20ahb_phy_io_ops = {
++ .read = ehci_h20ahb_phy_read,
++ .write = ehci_h20ahb_phy_write,
++};
++
++
++/**
++ * ehci_hcd_h20ahb_probe - initialize Synopsis-based HCDs
++ *
++ * Allocates basic resources for this USB host controller, and
++ * then invokes the start() method for the HCD associated with it
++ * through the hotplug entry's driver_data.
++ */
++static int ehci_hcd_h20ahb_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct resource *res;
++ struct usb_hcd *hcd;
++ void __iomem *regs;
++ int ret;
++ int irq;
++ int i;
++ struct h20ahb_hcd *h20ahb;
++
++ if (usb_disabled())
++ return -ENODEV;
++
++ /* if (!dev->parent) {
++ dev_err(dev, "Missing parent device\n");
++ return -ENODEV;
++ }*/
++
++ /* For DT boot, get platform data from parent. i.e. usbhshost */
++ /*if (dev->of_node) {
++ pdata = dev_get_platdata(dev->parent);
++ dev->platform_data = pdata;
++ }
++
++ if (!pdata) {
++ dev_err(dev, "Missing platform data\n");
++ return -ENODEV;
++ }*/
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ dev_err(dev, "EHCI irq failed\n");
++ return -ENODEV;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(regs))
++ return PTR_ERR(regs);
++
++ /*
++ * Right now device-tree probed devices don't get dma_mask set.
++ * Since shared usb code relies on it, set it here for now.
++ * Once we have dma capability bindings this can go away.
++ */
++ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
++ if (ret)
++ return ret;
++
++ ret = -ENODEV;
++ hcd = usb_create_hcd(&ehci_h20ahb_hc_driver, dev,
++ dev_name(dev));
++ if (!hcd) {
++ dev_err(dev, "Failed to create HCD\n");
++ return -ENOMEM;
++ }
++
++ hcd->rsrc_start = res->start;
++ hcd->rsrc_len = resource_size(res);
++ hcd->regs = regs;
++ hcd_to_ehci(hcd)->caps = regs;
++
++ h20ahb = (struct h20ahb_hcd *)hcd_to_ehci(hcd)->priv;
++ h20ahb->nports = 1;
++
++ platform_set_drvdata(pdev, hcd);
++
++ /* get the PHY devices if needed */
++ for (i = 0 ; i < h20ahb->nports ; i++) {
++ struct usb_phy *phy;
++
++ /* get the PHY device */
++#if 0
++ if (dev->of_node)
++ phy = devm_usb_get_phy_by_phandle(dev, "phys", i);
++ else
++ phy = devm_usb_get_phy_dev(dev, i);
++#endif
++ phy = otg_ulpi_create(&ehci_h20ahb_phy_io_ops, 0);
++ if (IS_ERR(phy)) {
++ ret = PTR_ERR(phy);
++ dev_err(dev, "Can't get PHY device for port %d: %d\n",
++ i, ret);
++ goto err_phy;
++ }
++ phy->dev = dev;
++ usb_add_phy_dev(phy);
++
++ h20ahb->phy[i] = phy;
++ phy->io_priv = hcd->regs + EHCI_INSNREG05_ULPI;
++
++#if 0
++ usb_phy_init(h20ahb->phy[i]);
++ /* bring PHY out of suspend */
++ usb_phy_set_suspend(h20ahb->phy[i], 0);
++#endif
++ }
++
++ /* make the first port's phy the one used by hcd as well */
++ hcd->phy = h20ahb->phy[0];
++
++ pm_runtime_enable(dev);
++ pm_runtime_get_sync(dev);
++
++ /*
++ * An undocumented "feature" in the H20AHB EHCI controller,
++ * causes suspended ports to be taken out of suspend when
++ * the USBCMD.Run/Stop bit is cleared (for example when
++ * we do ehci_bus_suspend).
++ * This breaks suspend-resume if the root-hub is allowed
++ * to suspend. Writing 1 to this undocumented register bit
++ * disables this feature and restores normal behavior.
++ */
++ ehci_write(regs, EHCI_INSNREG04,
++ EHCI_INSNREG04_DISABLE_UNSUSPEND);
++
++ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
++ if (ret) {
++ dev_err(dev, "failed to add hcd with err %d\n", ret);
++ goto err_pm_runtime;
++ }
++ device_wakeup_enable(hcd->self.controller);
++
++ /*
++ * Bring PHYs out of reset for non PHY modes.
++ * Even though HSIC mode is a PHY-less mode, the reset
++ * line exists between the chips and can be modelled
++ * as a PHY device for reset control.
++ */
++ for (i = 0; i < h20ahb->nports; i++) {
++ usb_phy_init(h20ahb->phy[i]);
++ /* bring PHY out of suspend */
++ usb_phy_set_suspend(h20ahb->phy[i], 0);
++ }
++
++ return 0;
++
++err_pm_runtime:
++ pm_runtime_put_sync(dev);
++
++err_phy:
++ for (i = 0; i < h20ahb->nports; i++) {
++ if (h20ahb->phy[i])
++ usb_phy_shutdown(h20ahb->phy[i]);
++ }
++
++ usb_put_hcd(hcd);
++
++ return ret;
++}
++
++
++/**
++ * ehci_hcd_h20ahb_remove - shutdown processing for EHCI HCDs
++ * @pdev: USB Host Controller being removed
++ *
++ * Reverses the effect of usb_ehci_hcd_h20ahb_probe(), first invoking
++ * the HCD's stop() method. It is always called from a thread
++ * context, normally "rmmod", "apmd", or something similar.
++ */
++static int ehci_hcd_h20ahb_remove(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct usb_hcd *hcd = dev_get_drvdata(dev);
++ struct h20ahb_hcd *h20ahb = (struct h20ahb_hcd *)hcd_to_ehci(hcd)->priv;
++ int i;
++
++ usb_remove_hcd(hcd);
++
++ for (i = 0; i < h20ahb->nports; i++) {
++ if (h20ahb->phy[i])
++ usb_phy_shutdown(h20ahb->phy[i]);
++ }
++
++ usb_put_hcd(hcd);
++ pm_runtime_put_sync(dev);
++ pm_runtime_disable(dev);
++
++ return 0;
++}
++
++static const struct of_device_id h20ahb_ehci_dt_ids[] = {
++ { .compatible = "snps,ehci-h20ahb" },
++ { }
++};
++
++MODULE_DEVICE_TABLE(of, h20ahb_ehci_dt_ids);
++
++static struct platform_driver ehci_hcd_h20ahb_driver = {
++ .probe = ehci_hcd_h20ahb_probe,
++ .remove = ehci_hcd_h20ahb_remove,
++ .shutdown = usb_hcd_platform_shutdown,
++ /*.suspend = ehci_hcd_h20ahb_suspend, */
++ /*.resume = ehci_hcd_h20ahb_resume, */
++ .driver = {
++ .name = hcd_name,
++ .of_match_table = h20ahb_ehci_dt_ids,
++ }
++};
++
++/*-------------------------------------------------------------------------*/
++
++static int __init ehci_h20ahb_init(void)
++{
++ if (usb_disabled())
++ return -ENODEV;
++
++ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
++
++ ehci_init_driver(&ehci_h20ahb_hc_driver, &ehci_h20ahb_overrides);
++ return platform_driver_register(&ehci_hcd_h20ahb_driver);
++}
++module_init(ehci_h20ahb_init);
++
++static void __exit ehci_h20ahb_cleanup(void)
++{
++ platform_driver_unregister(&ehci_hcd_h20ahb_driver);
++}
++module_exit(ehci_h20ahb_cleanup);
++
++MODULE_ALIAS("platform:ehci-h20ahb");
++MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>");
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/usb/host/ehci-hcd.c linux-3.14.40/drivers/usb/host/ehci-hcd.c
+--- linux-3.14.40.orig/drivers/usb/host/ehci-hcd.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/host/ehci-hcd.c 2015-05-01 14:58:04.795427001 -0500
+@@ -590,11 +590,16 @@
+ */
+ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+ if (HCC_64BIT_ADDR(hcc_params)) {
+- ehci_writel(ehci, 0, &ehci->regs->segment);
+-#if 0
+-// this is deeply broken on almost all architectures
++#ifdef CONFIG_ARM64
++ ehci_writel(ehci, ehci->periodic_dma >> 32, &ehci->regs->segment);
++ /*
++ * this is deeply broken on almost all architectures
++ * but arm64 can use it so enable it
++ */
+ if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)))
+ ehci_info(ehci, "enabled 64bit DMA\n");
++#else
++ ehci_writel(ehci, 0, &ehci->regs->segment);
+ #endif
+ }
+
+diff -Nur linux-3.14.40.orig/drivers/usb/host/ehci-hub.c linux-3.14.40/drivers/usb/host/ehci-hub.c
+--- linux-3.14.40.orig/drivers/usb/host/ehci-hub.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/host/ehci-hub.c 2015-05-01 14:58:04.811427001 -0500
+@@ -313,6 +313,15 @@
+ USB_PORT_STAT_HIGH_SPEED)
+ fs_idle_delay = true;
+ ehci_writel(ehci, t2, reg);
++ if ((t2 & PORT_WKDISC_E)
++ && (ehci_port_speed(ehci, t2) ==
++ USB_PORT_STAT_HIGH_SPEED))
++ /*
++ * If the high-speed device has not switched
++ * to full-speed idle before WKDISC_E has
++ * effected, there will be a WKDISC event.
++ */
++ mdelay(4);
+ changed = 1;
+ }
+ }
+diff -Nur linux-3.14.40.orig/drivers/usb/host/Kconfig linux-3.14.40/drivers/usb/host/Kconfig
+--- linux-3.14.40.orig/drivers/usb/host/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/host/Kconfig 2015-05-01 14:58:04.823427001 -0500
+@@ -158,6 +158,13 @@
+ Enables support for the on-chip EHCI controller on
+ ST SPEAr chips.
+
++config USB_EHCI_HCD_SYNOPSYS
++ tristate "Support for Synopsys Host-AHB USB 2.0 controller"
++ depends on USB_EHCI_HCD && USB_PHY
++ ---help---
++ Enable support for onchip USB controllers based on DesignWare USB 2.0
++ Host-AHB Controller IP from Synopsys.
++
+ config USB_EHCI_HCD_AT91
+ tristate "Support for Atmel on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && ARCH_AT91
+diff -Nur linux-3.14.40.orig/drivers/usb/host/Makefile linux-3.14.40/drivers/usb/host/Makefile
+--- linux-3.14.40.orig/drivers/usb/host/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/host/Makefile 2015-05-01 14:58:04.831427001 -0500
+@@ -33,6 +33,8 @@
+ obj-$(CONFIG_USB_EHCI_HCD_ORION) += ehci-orion.o
+ obj-$(CONFIG_USB_EHCI_HCD_SPEAR) += ehci-spear.o
+ obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o
++obj-$(CONFIG_USB_EHCI_S5P) += ehci-s5p.o
++obj-$(CONFIG_USB_EHCI_HCD_SYNOPSYS) += ehci-h20ahb.o
+ obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
+ obj-$(CONFIG_USB_EHCI_MSM) += ehci-msm.o
+ obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
+diff -Nur linux-3.14.40.orig/drivers/usb/phy/Kconfig linux-3.14.40/drivers/usb/phy/Kconfig
+--- linux-3.14.40.orig/drivers/usb/phy/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/phy/Kconfig 2015-05-01 14:58:04.835427001 -0500
+@@ -253,7 +253,7 @@
+
+ config USB_ULPI
+ bool "Generic ULPI Transceiver Driver"
+- depends on ARM
++ depends on ARM || ARM64
+ help
+ Enable this to support ULPI connected USB OTG transceivers which
+ are likely found on embedded boards.
+diff -Nur linux-3.14.40.orig/drivers/usb/phy/phy-mxs-usb.c linux-3.14.40/drivers/usb/phy/phy-mxs-usb.c
+--- linux-3.14.40.orig/drivers/usb/phy/phy-mxs-usb.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/phy/phy-mxs-usb.c 2015-05-01 14:58:04.851427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2012 Freescale Semiconductor, Inc.
++ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Marek Vasut <marex@denx.de>
+ * on behalf of DENX Software Engineering GmbH
+ *
+@@ -20,6 +20,9 @@
+ #include <linux/delay.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
++#include <linux/of_device.h>
++#include <linux/regmap.h>
++#include <linux/mfd/syscon.h>
+
+ #define DRIVER_NAME "mxs_phy"
+
+@@ -28,18 +31,137 @@
+ #define HW_USBPHY_CTRL_SET 0x34
+ #define HW_USBPHY_CTRL_CLR 0x38
+
++#define HW_USBPHY_DEBUG_SET 0x54
++#define HW_USBPHY_DEBUG_CLR 0x58
++
++#define HW_USBPHY_IP 0x90
++#define HW_USBPHY_IP_SET 0x94
++#define HW_USBPHY_IP_CLR 0x98
++
+ #define BM_USBPHY_CTRL_SFTRST BIT(31)
+ #define BM_USBPHY_CTRL_CLKGATE BIT(30)
++#define BM_USBPHY_CTRL_ENAUTOSET_USBCLKS BIT(26)
++#define BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE BIT(25)
++#define BM_USBPHY_CTRL_ENVBUSCHG_WKUP BIT(23)
++#define BM_USBPHY_CTRL_ENIDCHG_WKUP BIT(22)
++#define BM_USBPHY_CTRL_ENDPDMCHG_WKUP BIT(21)
++#define BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD BIT(20)
++#define BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE BIT(19)
++#define BM_USBPHY_CTRL_ENAUTO_PWRON_PLL BIT(18)
+ #define BM_USBPHY_CTRL_ENUTMILEVEL3 BIT(15)
+ #define BM_USBPHY_CTRL_ENUTMILEVEL2 BIT(14)
+ #define BM_USBPHY_CTRL_ENHOSTDISCONDETECT BIT(1)
+
++#define BM_USBPHY_IP_FIX (BIT(17) | BIT(18))
++
++#define BM_USBPHY_DEBUG_CLKGATE BIT(30)
++
++/* Anatop Registers */
++#define ANADIG_ANA_MISC0 0x150
++#define ANADIG_ANA_MISC0_SET 0x154
++#define ANADIG_ANA_MISC0_CLR 0x158
++
++#define ANADIG_USB1_VBUS_DET_STAT 0x1c0
++#define ANADIG_USB2_VBUS_DET_STAT 0x220
++
++#define ANADIG_USB1_LOOPBACK_SET 0x1e4
++#define ANADIG_USB1_LOOPBACK_CLR 0x1e8
++#define ANADIG_USB2_LOOPBACK_SET 0x244
++#define ANADIG_USB2_LOOPBACK_CLR 0x248
++
++#define ANADIG_USB1_MISC 0x1f0
++#define ANADIG_USB2_MISC 0x250
++
++#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG BIT(12)
++#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL BIT(11)
++
++#define BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID BIT(3)
++#define BM_ANADIG_USB2_VBUS_DET_STAT_VBUS_VALID BIT(3)
++
++#define BM_ANADIG_USB1_LOOPBACK_UTMI_DIG_TST1 BIT(2)
++#define BM_ANADIG_USB1_LOOPBACK_TSTI_TX_EN BIT(5)
++#define BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 BIT(2)
++#define BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN BIT(5)
++
++#define BM_ANADIG_USB1_MISC_RX_VPIN_FS BIT(29)
++#define BM_ANADIG_USB1_MISC_RX_VMIN_FS BIT(28)
++#define BM_ANADIG_USB2_MISC_RX_VPIN_FS BIT(29)
++#define BM_ANADIG_USB2_MISC_RX_VMIN_FS BIT(28)
++
++#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
++
++/* Do disconnection between PHY and controller without vbus */
++#define MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS BIT(0)
++
++/*
++ * The PHY will be in messy if there is an wakeup after putting
++ * bus to suspend (set portsc.suspendM) but before setting PHY to low
++ * power mode (set portsc.phcd).
++ */
++#define MXS_PHY_ABNORAML_IN_SUSPEND BIT(1)
++
++/*
++ * The SOF sends too fast after resuming, it will cause disconnection
++ * between host and high speed device.
++ */
++#define MXS_PHY_SENDING_SOF_TOO_FAST BIT(2)
++
++/* The SoCs who have anatop module */
++#define MXS_PHY_HAS_ANATOP BIT(3)
++
++struct mxs_phy_data {
++ unsigned int flags;
++};
++
++static const struct mxs_phy_data imx23_phy_data = {
++ .flags = MXS_PHY_ABNORAML_IN_SUSPEND | MXS_PHY_SENDING_SOF_TOO_FAST,
++};
++
++static const struct mxs_phy_data imx6q_phy_data = {
++ .flags = MXS_PHY_SENDING_SOF_TOO_FAST |
++ MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
++ MXS_PHY_HAS_ANATOP,
++};
++
++static const struct mxs_phy_data imx6sl_phy_data = {
++ .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
++ MXS_PHY_HAS_ANATOP,
++};
++
++static const struct of_device_id mxs_phy_dt_ids[] = {
++ { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
++ { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
++ { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
++
+ struct mxs_phy {
+ struct usb_phy phy;
+ struct clk *clk;
++ const struct mxs_phy_data *data;
++ struct regmap *regmap_anatop;
++ int port_id;
+ };
+
+-#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
++static inline bool is_imx6q_phy(struct mxs_phy *mxs_phy)
++{
++ return mxs_phy->data == &imx6q_phy_data;
++}
++
++static inline bool is_imx6sl_phy(struct mxs_phy *mxs_phy)
++{
++ return mxs_phy->data == &imx6sl_phy_data;
++}
++
++/*
++ * PHY needs some 32K cycles to switch from 32K clock to
++ * bus (such as AHB/AXI, etc) clock.
++ */
++static void mxs_phy_clock_switch(void)
++{
++ usleep_range(300, 400);
++}
+
+ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
+ {
+@@ -53,19 +175,122 @@
+ /* Power up the PHY */
+ writel(0, base + HW_USBPHY_PWD);
+
+- /* enable FS/LS device */
+- writel(BM_USBPHY_CTRL_ENUTMILEVEL2 |
+- BM_USBPHY_CTRL_ENUTMILEVEL3,
++ /*
++ * USB PHY Ctrl Setting
++ * - Auto clock/power on
++ * - Enable full/low speed support
++ */
++ writel(BM_USBPHY_CTRL_ENAUTOSET_USBCLKS |
++ BM_USBPHY_CTRL_ENAUTOCLR_USBCLKGATE |
++ BM_USBPHY_CTRL_ENAUTOCLR_PHY_PWD |
++ BM_USBPHY_CTRL_ENAUTOCLR_CLKGATE |
++ BM_USBPHY_CTRL_ENAUTO_PWRON_PLL |
++ BM_USBPHY_CTRL_ENUTMILEVEL2 |
++ BM_USBPHY_CTRL_ENUTMILEVEL3,
+ base + HW_USBPHY_CTRL_SET);
+
++ /* Enable IC solution */
++ if (is_imx6q_phy(mxs_phy) || is_imx6sl_phy(mxs_phy))
++ writel(BM_USBPHY_IP_FIX, base + HW_USBPHY_IP_SET);
++
+ return 0;
+ }
+
++/* Return true if the vbus is there */
++static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
++{
++ unsigned int vbus_value;
++
++ if (mxs_phy->port_id == 0)
++ regmap_read(mxs_phy->regmap_anatop,
++ ANADIG_USB1_VBUS_DET_STAT,
++ &vbus_value);
++ else if (mxs_phy->port_id == 1)
++ regmap_read(mxs_phy->regmap_anatop,
++ ANADIG_USB2_VBUS_DET_STAT,
++ &vbus_value);
++
++ if (vbus_value & BM_ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID)
++ return true;
++ else
++ return false;
++}
++
++static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
++{
++ void __iomem *base = mxs_phy->phy.io_priv;
++ u32 reg;
++
++ if (disconnect)
++ writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
++ base + HW_USBPHY_DEBUG_CLR);
++
++ if (mxs_phy->port_id == 0) {
++ reg = disconnect ? ANADIG_USB1_LOOPBACK_SET
++ : ANADIG_USB1_LOOPBACK_CLR;
++ regmap_write(mxs_phy->regmap_anatop, reg,
++ BM_ANADIG_USB1_LOOPBACK_UTMI_DIG_TST1 |
++ BM_ANADIG_USB1_LOOPBACK_TSTI_TX_EN);
++ } else if (mxs_phy->port_id == 1) {
++ reg = disconnect ? ANADIG_USB2_LOOPBACK_SET
++ : ANADIG_USB2_LOOPBACK_CLR;
++ regmap_write(mxs_phy->regmap_anatop, reg,
++ BM_ANADIG_USB2_LOOPBACK_UTMI_DIG_TST1 |
++ BM_ANADIG_USB2_LOOPBACK_TSTI_TX_EN);
++ }
++
++ if (!disconnect)
++ writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
++ base + HW_USBPHY_DEBUG_SET);
++
++ /* Delay some time, and let Linestate be SE0 for controller */
++ if (disconnect)
++ usleep_range(500, 1000);
++}
++
++static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
++{
++ bool vbus_is_on = false;
++
++ /* If the SoCs don't need to disconnect line without vbus, quit */
++ if (!(mxs_phy->data->flags & MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS))
++ return;
++
++ /* If the SoCs don't have anatop, quit */
++ if (!mxs_phy->regmap_anatop)
++ return;
++
++ vbus_is_on = mxs_phy_get_vbus_status(mxs_phy);
++
++ if (on && !vbus_is_on)
++ __mxs_phy_disconnect_line(mxs_phy, true);
++ else
++ __mxs_phy_disconnect_line(mxs_phy, false);
++
++}
++
++static void mxs_phy_enable_ldo_in_suspend(struct mxs_phy *mxs_phy, bool on)
++{
++ unsigned int reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR;
++
++ /* If the SoCs don't have anatop, quit */
++ if (!mxs_phy->regmap_anatop)
++ return;
++
++ if (is_imx6q_phy(mxs_phy))
++ regmap_write(mxs_phy->regmap_anatop, reg,
++ BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG);
++ else if (is_imx6sl_phy(mxs_phy))
++ regmap_write(mxs_phy->regmap_anatop,
++ reg, BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL);
++}
++
+ static int mxs_phy_init(struct usb_phy *phy)
+ {
+ int ret;
+ struct mxs_phy *mxs_phy = to_mxs_phy(phy);
+
++ mxs_phy_clock_switch();
+ ret = clk_prepare_enable(mxs_phy->clk);
+ if (ret)
+ return ret;
+@@ -83,17 +308,62 @@
+ clk_disable_unprepare(mxs_phy->clk);
+ }
+
++static bool mxs_phy_is_low_speed_connection(struct mxs_phy *mxs_phy)
++{
++ unsigned int line_state;
++ /* bit definition is the same for all controllers */
++ unsigned int dp_bit = BM_ANADIG_USB1_MISC_RX_VPIN_FS,
++ dm_bit = BM_ANADIG_USB1_MISC_RX_VMIN_FS;
++ unsigned int reg = ANADIG_USB1_MISC;
++
++ /* If the SoCs don't have anatop, quit */
++ if (!mxs_phy->regmap_anatop)
++ return false;
++
++ if (mxs_phy->port_id == 0)
++ reg = ANADIG_USB1_MISC;
++ else if (mxs_phy->port_id == 1)
++ reg = ANADIG_USB2_MISC;
++
++ regmap_read(mxs_phy->regmap_anatop, reg, &line_state);
++
++ if ((line_state & (dp_bit | dm_bit)) == dm_bit)
++ return true;
++ else
++ return false;
++}
++
+ static int mxs_phy_suspend(struct usb_phy *x, int suspend)
+ {
+ int ret;
+ struct mxs_phy *mxs_phy = to_mxs_phy(x);
++ bool low_speed_connection, vbus_is_on;
++
++ low_speed_connection = mxs_phy_is_low_speed_connection(mxs_phy);
++ vbus_is_on = mxs_phy_get_vbus_status(mxs_phy);
+
+ if (suspend) {
+ writel(0xffffffff, x->io_priv + HW_USBPHY_PWD);
++ /*
++ * FIXME: Do not power down RXPWD1PT1 bit for low speed
++ * connect. The low speed connection will have problem at
++ * very rare cases during usb suspend and resume process.
++ */
++ if (low_speed_connection & vbus_is_on) {
++ /*
++ * If value to be set as pwd value is not 0xffffffff,
++ * several 32Khz cycles are needed.
++ */
++ mxs_phy_clock_switch();
++ writel(0xffbfffff, x->io_priv + HW_USBPHY_PWD);
++ } else {
++ writel(0xffffffff, x->io_priv + HW_USBPHY_PWD);
++ }
+ writel(BM_USBPHY_CTRL_CLKGATE,
+ x->io_priv + HW_USBPHY_CTRL_SET);
+ clk_disable_unprepare(mxs_phy->clk);
+ } else {
++ mxs_phy_clock_switch();
+ ret = clk_prepare_enable(mxs_phy->clk);
+ if (ret)
+ return ret;
+@@ -105,11 +375,28 @@
+ return 0;
+ }
+
++static int mxs_phy_set_wakeup(struct usb_phy *x, bool enabled)
++{
++ struct mxs_phy *mxs_phy = to_mxs_phy(x);
++ u32 value = BM_USBPHY_CTRL_ENVBUSCHG_WKUP |
++ BM_USBPHY_CTRL_ENDPDMCHG_WKUP |
++ BM_USBPHY_CTRL_ENIDCHG_WKUP;
++ if (enabled) {
++ mxs_phy_disconnect_line(mxs_phy, true);
++ writel_relaxed(value, x->io_priv + HW_USBPHY_CTRL_SET);
++ } else {
++ writel_relaxed(value, x->io_priv + HW_USBPHY_CTRL_CLR);
++ mxs_phy_disconnect_line(mxs_phy, false);
++ }
++
++ return 0;
++}
++
+ static int mxs_phy_on_connect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+ {
+- dev_dbg(phy->dev, "%s speed device has connected\n",
+- (speed == USB_SPEED_HIGH) ? "high" : "non-high");
++ dev_dbg(phy->dev, "%s device has connected\n",
++ (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
+
+ if (speed == USB_SPEED_HIGH)
+ writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
+@@ -121,8 +408,8 @@
+ static int mxs_phy_on_disconnect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+ {
+- dev_dbg(phy->dev, "%s speed device has disconnected\n",
+- (speed == USB_SPEED_HIGH) ? "high" : "non-high");
++ dev_dbg(phy->dev, "%s device has disconnected\n",
++ (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
+
+ if (speed == USB_SPEED_HIGH)
+ writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
+@@ -131,6 +418,48 @@
+ return 0;
+ }
+
++static int mxs_phy_on_suspend(struct usb_phy *phy,
++ enum usb_device_speed speed)
++{
++ struct mxs_phy *mxs_phy = to_mxs_phy(phy);
++
++ dev_dbg(phy->dev, "%s device has suspended\n",
++ (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
++
++ /* delay 4ms to wait bus entering idle */
++ usleep_range(4000, 5000);
++
++ if (mxs_phy->data->flags & MXS_PHY_ABNORAML_IN_SUSPEND) {
++ writel_relaxed(0xffffffff, phy->io_priv + HW_USBPHY_PWD);
++ writel_relaxed(0, phy->io_priv + HW_USBPHY_PWD);
++ }
++
++ if (speed == USB_SPEED_HIGH)
++ writel_relaxed(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
++ phy->io_priv + HW_USBPHY_CTRL_CLR);
++
++ return 0;
++}
++
++/*
++ * The resume signal must be finished here.
++ */
++static int mxs_phy_on_resume(struct usb_phy *phy,
++ enum usb_device_speed speed)
++{
++ dev_dbg(phy->dev, "%s device has resumed\n",
++ (speed == USB_SPEED_HIGH) ? "HS" : "FS/LS");
++
++ if (speed == USB_SPEED_HIGH) {
++ /* Make sure the device has switched to High-Speed mode */
++ udelay(500);
++ writel_relaxed(BM_USBPHY_CTRL_ENHOSTDISCONDETECT,
++ phy->io_priv + HW_USBPHY_CTRL_SET);
++ }
++
++ return 0;
++}
++
+ static int mxs_phy_probe(struct platform_device *pdev)
+ {
+ struct resource *res;
+@@ -138,6 +467,9 @@
+ struct clk *clk;
+ struct mxs_phy *mxs_phy;
+ int ret;
++ const struct of_device_id *of_id =
++ of_match_device(mxs_phy_dt_ids, &pdev->dev);
++ struct device_node *np = pdev->dev.of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+@@ -157,6 +489,13 @@
+ return -ENOMEM;
+ }
+
++ ret = of_alias_get_id(np, "usbphy");
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
++ return ret;
++ }
++ mxs_phy->port_id = ret;
++
+ mxs_phy->phy.io_priv = base;
+ mxs_phy->phy.dev = &pdev->dev;
+ mxs_phy->phy.label = DRIVER_NAME;
+@@ -166,11 +505,30 @@
+ mxs_phy->phy.notify_connect = mxs_phy_on_connect;
+ mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect;
+ mxs_phy->phy.type = USB_PHY_TYPE_USB2;
++ mxs_phy->phy.set_wakeup = mxs_phy_set_wakeup;
+
+ mxs_phy->clk = clk;
++ mxs_phy->data = of_id->data;
++
++ if (mxs_phy->data->flags & MXS_PHY_SENDING_SOF_TOO_FAST) {
++ mxs_phy->phy.notify_suspend = mxs_phy_on_suspend;
++ mxs_phy->phy.notify_resume = mxs_phy_on_resume;
++ }
+
+ platform_set_drvdata(pdev, mxs_phy);
+
++ if (mxs_phy->data->flags & MXS_PHY_HAS_ANATOP) {
++ mxs_phy->regmap_anatop = syscon_regmap_lookup_by_phandle
++ (np, "fsl,anatop");
++ if (IS_ERR(mxs_phy->regmap_anatop)) {
++ dev_dbg(&pdev->dev,
++ "failed to find regmap for anatop\n");
++ return PTR_ERR(mxs_phy->regmap_anatop);
++ }
++ }
++
++ device_set_wakeup_capable(&pdev->dev, true);
++
+ ret = usb_add_phy_dev(&mxs_phy->phy);
+ if (ret)
+ return ret;
+@@ -187,11 +545,27 @@
+ return 0;
+ }
+
+-static const struct of_device_id mxs_phy_dt_ids[] = {
+- { .compatible = "fsl,imx23-usbphy", },
+- { /* sentinel */ }
+-};
+-MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
++static int mxs_phy_system_suspend(struct device *dev)
++{
++ struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
++
++ if (device_may_wakeup(dev))
++ mxs_phy_enable_ldo_in_suspend(mxs_phy, true);
++
++ return 0;
++}
++
++static int mxs_phy_system_resume(struct device *dev)
++{
++ struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
++
++ if (device_may_wakeup(dev))
++ mxs_phy_enable_ldo_in_suspend(mxs_phy, false);
++
++ return 0;
++}
++
++SIMPLE_DEV_PM_OPS(mxs_phy_pm, mxs_phy_system_suspend, mxs_phy_system_resume);
+
+ static struct platform_driver mxs_phy_driver = {
+ .probe = mxs_phy_probe,
+@@ -200,6 +574,7 @@
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mxs_phy_dt_ids,
++ .pm = &mxs_phy_pm,
+ },
+ };
+
+diff -Nur linux-3.14.40.orig/drivers/usb/phy/phy-ulpi.c linux-3.14.40/drivers/usb/phy/phy-ulpi.c
+--- linux-3.14.40.orig/drivers/usb/phy/phy-ulpi.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/usb/phy/phy-ulpi.c 2015-05-01 14:58:04.851427001 -0500
+@@ -48,6 +48,7 @@
+ ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
+ ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"),
+ ULPI_INFO(ULPI_ID(0x0424, 0x0007), "SMSC USB3320"),
++ ULPI_INFO(ULPI_ID(0x0424, 0x0009), "SMSC USB334x"),
+ ULPI_INFO(ULPI_ID(0x0451, 0x1507), "TI TUSB1210"),
+ };
+
+diff -Nur linux-3.14.40.orig/drivers/video/amba-clcd.c linux-3.14.40/drivers/video/amba-clcd.c
+--- linux-3.14.40.orig/drivers/video/amba-clcd.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/video/amba-clcd.c 2015-05-01 14:58:05.679427001 -0500
+@@ -17,7 +17,10 @@
+ #include <linux/string.h>
+ #include <linux/slab.h>
+ #include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/memblock.h>
+ #include <linux/mm.h>
++#include <linux/of.h>
+ #include <linux/fb.h>
+ #include <linux/init.h>
+ #include <linux/ioport.h>
+@@ -31,8 +34,20 @@
+
+ #define to_clcd(info) container_of(info, struct clcd_fb, fb)
+
++#ifdef CONFIG_ARM
++#define clcdfb_dma_alloc dma_alloc_writecombine
++#define clcdfb_dma_free dma_free_writecombine
++#define clcdfb_dma_mmap dma_mmap_writecombine
++#else
++#define clcdfb_dma_alloc dma_alloc_coherent
++#define clcdfb_dma_free dma_free_coherent
++#define clcdfb_dma_mmap dma_mmap_coherent
++#endif
++
+ /* This is limited to 16 characters when displayed by X startup */
+ static const char *clcd_name = "CLCD FB";
++static char *def_mode;
++module_param_named(mode, def_mode, charp, 0);
+
+ /*
+ * Unfortunately, the enable/disable functions may be called either from
+@@ -234,6 +249,17 @@
+ bgr = caps & CLCD_CAP_BGR && var->blue.offset == 0;
+ rgb = caps & CLCD_CAP_RGB && var->red.offset == 0;
+
++ /*
++ * Seems that for 32-bit mode there is confusion about RGB
++ * ordering somewhere between user-side, kernel and hardware.
++ * The following hack seems get things working, at least on
++ * vexpress hardware and models...
++ */
++ if (var->bits_per_pixel == 32) {
++ bgr = false;
++ rgb = true;
++ }
++
+ if (!bgr && !rgb)
+ /*
+ * The requested format was not possible, try just
+@@ -393,6 +419,44 @@
+ return 0;
+ }
+
++int clcdfb_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma)
++{
++ return clcdfb_dma_mmap(&fb->dev->dev, vma,
++ fb->fb.screen_base,
++ fb->fb.fix.smem_start,
++ fb->fb.fix.smem_len);
++}
++
++int clcdfb_mmap_io(struct clcd_fb *fb, struct vm_area_struct *vma)
++{
++ unsigned long user_count, count, pfn, off;
++
++ user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ count = PAGE_ALIGN(fb->fb.fix.smem_len) >> PAGE_SHIFT;
++ pfn = fb->fb.fix.smem_start >> PAGE_SHIFT;
++ off = vma->vm_pgoff;
++
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ if (off < count && user_count <= (count - off))
++ return remap_pfn_range(vma, vma->vm_start, pfn + off,
++ user_count << PAGE_SHIFT,
++ vma->vm_page_prot);
++
++ return -ENXIO;
++}
++
++void clcdfb_remove_dma(struct clcd_fb *fb)
++{
++ clcdfb_dma_free(&fb->dev->dev, fb->fb.fix.smem_len,
++ fb->fb.screen_base, fb->fb.fix.smem_start);
++}
++
++void clcdfb_remove_io(struct clcd_fb *fb)
++{
++ iounmap(fb->fb.screen_base);
++}
++
+ static int clcdfb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+ {
+@@ -543,14 +607,247 @@
+ return ret;
+ }
+
++struct string_lookup {
++ const char *string;
++ const u32 val;
++};
++
++static struct string_lookup vmode_lookups[] = {
++ { "FB_VMODE_NONINTERLACED", FB_VMODE_NONINTERLACED},
++ { "FB_VMODE_INTERLACED", FB_VMODE_INTERLACED},
++ { "FB_VMODE_DOUBLE", FB_VMODE_DOUBLE},
++ { "FB_VMODE_ODD_FLD_FIRST", FB_VMODE_ODD_FLD_FIRST},
++ { NULL, 0 },
++};
++
++static struct string_lookup tim2_lookups[] = {
++ { "TIM2_CLKSEL", TIM2_CLKSEL},
++ { "TIM2_IVS", TIM2_IVS},
++ { "TIM2_IHS", TIM2_IHS},
++ { "TIM2_IPC", TIM2_IPC},
++ { "TIM2_IOE", TIM2_IOE},
++ { "TIM2_BCD", TIM2_BCD},
++ { NULL, 0},
++};
++static struct string_lookup cntl_lookups[] = {
++ {"CNTL_LCDEN", CNTL_LCDEN},
++ {"CNTL_LCDBPP1", CNTL_LCDBPP1},
++ {"CNTL_LCDBPP2", CNTL_LCDBPP2},
++ {"CNTL_LCDBPP4", CNTL_LCDBPP4},
++ {"CNTL_LCDBPP8", CNTL_LCDBPP8},
++ {"CNTL_LCDBPP16", CNTL_LCDBPP16},
++ {"CNTL_LCDBPP16_565", CNTL_LCDBPP16_565},
++ {"CNTL_LCDBPP16_444", CNTL_LCDBPP16_444},
++ {"CNTL_LCDBPP24", CNTL_LCDBPP24},
++ {"CNTL_LCDBW", CNTL_LCDBW},
++ {"CNTL_LCDTFT", CNTL_LCDTFT},
++ {"CNTL_LCDMONO8", CNTL_LCDMONO8},
++ {"CNTL_LCDDUAL", CNTL_LCDDUAL},
++ {"CNTL_BGR", CNTL_BGR},
++ {"CNTL_BEBO", CNTL_BEBO},
++ {"CNTL_BEPO", CNTL_BEPO},
++ {"CNTL_LCDPWR", CNTL_LCDPWR},
++ {"CNTL_LCDVCOMP(1)", CNTL_LCDVCOMP(1)},
++ {"CNTL_LCDVCOMP(2)", CNTL_LCDVCOMP(2)},
++ {"CNTL_LCDVCOMP(3)", CNTL_LCDVCOMP(3)},
++ {"CNTL_LCDVCOMP(4)", CNTL_LCDVCOMP(4)},
++ {"CNTL_LCDVCOMP(5)", CNTL_LCDVCOMP(5)},
++ {"CNTL_LCDVCOMP(6)", CNTL_LCDVCOMP(6)},
++ {"CNTL_LCDVCOMP(7)", CNTL_LCDVCOMP(7)},
++ {"CNTL_LDMAFIFOTIME", CNTL_LDMAFIFOTIME},
++ {"CNTL_WATERMARK", CNTL_WATERMARK},
++ { NULL, 0},
++};
++static struct string_lookup caps_lookups[] = {
++ {"CLCD_CAP_RGB444", CLCD_CAP_RGB444},
++ {"CLCD_CAP_RGB5551", CLCD_CAP_RGB5551},
++ {"CLCD_CAP_RGB565", CLCD_CAP_RGB565},
++ {"CLCD_CAP_RGB888", CLCD_CAP_RGB888},
++ {"CLCD_CAP_BGR444", CLCD_CAP_BGR444},
++ {"CLCD_CAP_BGR5551", CLCD_CAP_BGR5551},
++ {"CLCD_CAP_BGR565", CLCD_CAP_BGR565},
++ {"CLCD_CAP_BGR888", CLCD_CAP_BGR888},
++ {"CLCD_CAP_444", CLCD_CAP_444},
++ {"CLCD_CAP_5551", CLCD_CAP_5551},
++ {"CLCD_CAP_565", CLCD_CAP_565},
++ {"CLCD_CAP_888", CLCD_CAP_888},
++ {"CLCD_CAP_RGB", CLCD_CAP_RGB},
++ {"CLCD_CAP_BGR", CLCD_CAP_BGR},
++ {"CLCD_CAP_ALL", CLCD_CAP_ALL},
++ { NULL, 0},
++};
++
++u32 parse_setting(struct string_lookup *lookup, const char *name)
++{
++ int i = 0;
++ while (lookup[i].string != NULL) {
++ if (strcmp(lookup[i].string, name) == 0)
++ return lookup[i].val;
++ ++i;
++ }
++ return -EINVAL;
++}
++
++u32 get_string_lookup(struct device_node *node, const char *name,
++ struct string_lookup *lookup)
++{
++ const char *string;
++ int count, i, ret = 0;
++
++ count = of_property_count_strings(node, name);
++ if (count >= 0)
++ for (i = 0; i < count; i++)
++ if (of_property_read_string_index(node, name, i,
++ &string) == 0)
++ ret |= parse_setting(lookup, string);
++ return ret;
++}
++
++int get_val(struct device_node *node, const char *string)
++{
++ u32 ret = 0;
++
++ if (of_property_read_u32(node, string, &ret))
++ ret = -1;
++ return ret;
++}
++
++struct clcd_panel *getPanel(struct device_node *node)
++{
++ static struct clcd_panel panel;
++
++ panel.mode.refresh = get_val(node, "refresh");
++ panel.mode.xres = get_val(node, "xres");
++ panel.mode.yres = get_val(node, "yres");
++ panel.mode.pixclock = get_val(node, "pixclock");
++ panel.mode.left_margin = get_val(node, "left_margin");
++ panel.mode.right_margin = get_val(node, "right_margin");
++ panel.mode.upper_margin = get_val(node, "upper_margin");
++ panel.mode.lower_margin = get_val(node, "lower_margin");
++ panel.mode.hsync_len = get_val(node, "hsync_len");
++ panel.mode.vsync_len = get_val(node, "vsync_len");
++ panel.mode.sync = get_val(node, "sync");
++ panel.bpp = get_val(node, "bpp");
++ panel.width = (signed short) get_val(node, "width");
++ panel.height = (signed short) get_val(node, "height");
++
++ panel.mode.vmode = get_string_lookup(node, "vmode", vmode_lookups);
++ panel.tim2 = get_string_lookup(node, "tim2", tim2_lookups);
++ panel.cntl = get_string_lookup(node, "cntl", cntl_lookups);
++ panel.caps = get_string_lookup(node, "caps", caps_lookups);
++
++ return &panel;
++}
++
++struct clcd_panel *clcdfb_get_panel(const char *name)
++{
++ struct device_node *node = NULL;
++ const char *mode;
++ struct clcd_panel *panel = NULL;
++
++ do {
++ node = of_find_compatible_node(node, NULL, "panel");
++ if (node)
++ if (of_property_read_string(node, "mode", &mode) == 0)
++ if (strcmp(mode, name) == 0) {
++ panel = getPanel(node);
++ panel->mode.name = name;
++ }
++ } while (node != NULL);
++
++ return panel;
++}
++
++#ifdef CONFIG_OF
++static int clcdfb_dt_init(struct clcd_fb *fb)
++{
++ int err = 0;
++ struct device_node *node;
++ const char *mode;
++ dma_addr_t dma;
++ u32 use_dma;
++ const __be32 *prop;
++ int len, na, ns;
++ phys_addr_t fb_base, fb_size;
++
++ node = fb->dev->dev.of_node;
++ if (!node)
++ return -ENODEV;
++
++ na = of_n_addr_cells(node);
++ ns = of_n_size_cells(node);
++
++ if (def_mode && strlen(def_mode) > 0) {
++ fb->panel = clcdfb_get_panel(def_mode);
++ if (!fb->panel)
++ printk(KERN_ERR "CLCD: invalid mode specified on the command line (%s)\n", def_mode);
++ }
++
++ if (!fb->panel) {
++ if (WARN_ON(of_property_read_string(node, "mode", &mode)))
++ return -ENODEV;
++ fb->panel = clcdfb_get_panel(mode);
++ }
++
++ if (!fb->panel)
++ return -EINVAL;
++ fb->fb.fix.smem_len = fb->panel->mode.xres * fb->panel->mode.yres * 4;
++
++ fb->board->name = "Device Tree CLCD PL111";
++ fb->board->caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888;
++ fb->board->check = clcdfb_check;
++ fb->board->decode = clcdfb_decode;
++
++ if (of_property_read_u32(node, "use_dma", &use_dma))
++ use_dma = 0;
++
++ if (use_dma) {
++ fb->fb.screen_base = clcdfb_dma_alloc(&fb->dev->dev,
++ fb->fb.fix.smem_len,
++ &dma, GFP_KERNEL);
++ if (!fb->fb.screen_base) {
++ pr_err("CLCD: unable to map framebuffer\n");
++ return -ENOMEM;
++ }
++
++ fb->fb.fix.smem_start = dma;
++ fb->board->mmap = clcdfb_mmap_dma;
++ fb->board->remove = clcdfb_remove_dma;
++ } else {
++ prop = of_get_property(node, "framebuffer", &len);
++ if (WARN_ON(!prop || len < (na + ns) * sizeof(*prop)))
++ return -EINVAL;
++
++ fb_base = of_read_number(prop, na);
++ fb_size = of_read_number(prop + na, ns);
++
++ fb->fb.fix.smem_start = fb_base;
++ fb->fb.screen_base = ioremap_wc(fb_base, fb_size);
++ fb->board->mmap = clcdfb_mmap_io;
++ fb->board->remove = clcdfb_remove_io;
++ }
++
++ return err;
++}
++#endif /* CONFIG_OF */
++
+ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
+ {
+ struct clcd_board *board = dev_get_platdata(&dev->dev);
+ struct clcd_fb *fb;
+ int ret;
+
+- if (!board)
+- return -EINVAL;
++ if (!board) {
++#ifdef CONFIG_OF
++ if (dev->dev.of_node) {
++ board = kzalloc(sizeof(struct clcd_board), GFP_KERNEL);
++ if (!board)
++ return -ENOMEM;
++ board->setup = clcdfb_dt_init;
++ } else
++#endif
++ return -EINVAL;
++ }
+
+ ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (ret)
+diff -Nur linux-3.14.40.orig/drivers/video/arm-hdlcd.c linux-3.14.40/drivers/video/arm-hdlcd.c
+--- linux-3.14.40.orig/drivers/video/arm-hdlcd.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/arm-hdlcd.c 2015-05-01 14:58:05.679427001 -0500
+@@ -0,0 +1,844 @@
++/*
++ * drivers/video/arm-hdlcd.c
++ *
++ * Copyright (C) 2011 ARM Limited
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ *
++ * ARM HDLCD Controller
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/of.h>
++#include <linux/fb.h>
++#include <linux/clk.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/ioport.h>
++#include <linux/dma-mapping.h>
++#include <linux/platform_device.h>
++#include <linux/memblock.h>
++#include <linux/arm-hdlcd.h>
++#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++#endif
++
++#include "edid.h"
++
++#ifdef CONFIG_SERIAL_AMBA_PCU_UART
++int get_edid(u8 *msgbuf);
++#else
++#endif
++
++#define to_hdlcd_device(info) container_of(info, struct hdlcd_device, fb)
++
++static struct of_device_id hdlcd_of_matches[] = {
++ { .compatible = "arm,hdlcd" },
++ {},
++};
++
++/* Framebuffer size. */
++static unsigned long framebuffer_size;
++
++#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
++static unsigned long buffer_underrun_events;
++static DEFINE_SPINLOCK(hdlcd_underrun_lock);
++
++static void hdlcd_underrun_set(unsigned long val)
++{
++ spin_lock(&hdlcd_underrun_lock);
++ buffer_underrun_events = val;
++ spin_unlock(&hdlcd_underrun_lock);
++}
++
++static unsigned long hdlcd_underrun_get(void)
++{
++ unsigned long val;
++ spin_lock(&hdlcd_underrun_lock);
++ val = buffer_underrun_events;
++ spin_unlock(&hdlcd_underrun_lock);
++ return val;
++}
++
++#ifdef CONFIG_PROC_FS
++static int hdlcd_underrun_show(struct seq_file *m, void *v)
++{
++ unsigned char underrun_string[32];
++ snprintf(underrun_string, 32, "%lu\n", hdlcd_underrun_get());
++ seq_puts(m, underrun_string);
++ return 0;
++}
++
++static int proc_hdlcd_underrun_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, hdlcd_underrun_show, NULL);
++}
++
++static const struct file_operations proc_hdlcd_underrun_operations = {
++ .open = proc_hdlcd_underrun_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static int hdlcd_underrun_init(void)
++{
++ hdlcd_underrun_set(0);
++ proc_create("hdlcd_underrun", 0, NULL, &proc_hdlcd_underrun_operations);
++ return 0;
++}
++static void hdlcd_underrun_close(void)
++{
++ remove_proc_entry("hdlcd_underrun", NULL);
++}
++#else
++static int hdlcd_underrun_init(void) { return 0; }
++static void hdlcd_underrun_close(void) { }
++#endif
++#endif
++
++static char *fb_mode = "1680x1050-32@60\0\0\0\0\0";
++
++static struct fb_var_screeninfo cached_var_screeninfo;
++
++static struct fb_videomode hdlcd_default_mode = {
++ .refresh = 60,
++ .xres = 1680,
++ .yres = 1050,
++ .pixclock = 8403,
++ .left_margin = 80,
++ .right_margin = 48,
++ .upper_margin = 21,
++ .lower_margin = 3,
++ .hsync_len = 32,
++ .vsync_len = 6,
++ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ .vmode = FB_VMODE_NONINTERLACED
++};
++
++static inline void hdlcd_enable(struct hdlcd_device *hdlcd)
++{
++ dev_dbg(hdlcd->dev, "HDLCD: output enabled\n");
++ writel(1, hdlcd->base + HDLCD_REG_COMMAND);
++}
++
++static inline void hdlcd_disable(struct hdlcd_device *hdlcd)
++{
++ dev_dbg(hdlcd->dev, "HDLCD: output disabled\n");
++ writel(0, hdlcd->base + HDLCD_REG_COMMAND);
++}
++
++static int hdlcd_set_bitfields(struct hdlcd_device *hdlcd,
++ struct fb_var_screeninfo *var)
++{
++ int ret = 0;
++
++ memset(&var->transp, 0, sizeof(var->transp));
++ var->red.msb_right = 0;
++ var->green.msb_right = 0;
++ var->blue.msb_right = 0;
++ var->blue.offset = 0;
++
++ switch (var->bits_per_pixel) {
++ case 8:
++ /* pseudocolor */
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ break;
++ case 16:
++ /* 565 format */
++ var->red.length = 5;
++ var->green.length = 6;
++ var->blue.length = 5;
++ break;
++ case 32:
++ var->transp.length = 8;
++ case 24:
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ if (!ret) {
++ if(var->bits_per_pixel != 32)
++ {
++ var->green.offset = var->blue.length;
++ var->red.offset = var->green.offset + var->green.length;
++ }
++ else
++ {
++ /* Previously, the byte ordering for 32-bit color was
++ * (msb)<alpha><red><green><blue>(lsb)
++ * but this does not match what android expects and
++ * the colors are odd. Instead, use
++ * <alpha><blue><green><red>
++ * Since we tell fb what we are doing, console
++ * , X and directfb access should work fine.
++ */
++ var->green.offset = var->red.length;
++ var->blue.offset = var->green.offset + var->green.length;
++ var->transp.offset = var->blue.offset + var->blue.length;
++ }
++ }
++
++ return ret;
++}
++
++static int hdlcd_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
++ int bytes_per_pixel = var->bits_per_pixel / 8;
++
++#ifdef HDLCD_NO_VIRTUAL_SCREEN
++ var->yres_virtual = var->yres;
++#else
++ var->yres_virtual = 2 * var->yres;
++#endif
++
++ if ((var->xres_virtual * bytes_per_pixel * var->yres_virtual) > hdlcd->fb.fix.smem_len)
++ return -ENOMEM;
++
++ if (var->xres > HDLCD_MAX_XRES || var->yres > HDLCD_MAX_YRES)
++ return -EINVAL;
++
++ /* make sure the bitfields are set appropriately */
++ return hdlcd_set_bitfields(hdlcd, var);
++}
++
++/* prototype */
++static int hdlcd_pan_display(struct fb_var_screeninfo *var,
++ struct fb_info *info);
++
++#define WRITE_HDLCD_REG(reg, value) writel((value), hdlcd->base + (reg))
++#define READ_HDLCD_REG(reg) readl(hdlcd->base + (reg))
++
++static int hdlcd_set_par(struct fb_info *info)
++{
++ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
++ int bytes_per_pixel = hdlcd->fb.var.bits_per_pixel / 8;
++ int polarities;
++ int old_yoffset;
++
++ /* check for shortcuts */
++ old_yoffset = cached_var_screeninfo.yoffset;
++ cached_var_screeninfo.yoffset = info->var.yoffset;
++ if (!memcmp(&info->var, &cached_var_screeninfo,
++ sizeof(struct fb_var_screeninfo))) {
++ if(old_yoffset != info->var.yoffset) {
++ /* we only changed yoffset, and we already
++ * already recorded it a couple lines up
++ */
++ hdlcd_pan_display(&info->var, info);
++ }
++ /* or no change */
++ return 0;
++ }
++
++ hdlcd->fb.fix.line_length = hdlcd->fb.var.xres * bytes_per_pixel;
++
++ if (hdlcd->fb.var.bits_per_pixel >= 16)
++ hdlcd->fb.fix.visual = FB_VISUAL_TRUECOLOR;
++ else
++ hdlcd->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
++
++ memcpy(&cached_var_screeninfo, &info->var, sizeof(struct fb_var_screeninfo));
++
++ polarities = HDLCD_POLARITY_DATAEN |
++#ifndef CONFIG_ARCH_TUSCAN
++ HDLCD_POLARITY_PIXELCLK |
++#endif
++ HDLCD_POLARITY_DATA;
++ polarities |= (hdlcd->fb.var.sync & FB_SYNC_HOR_HIGH_ACT) ? HDLCD_POLARITY_HSYNC : 0;
++ polarities |= (hdlcd->fb.var.sync & FB_SYNC_VERT_HIGH_ACT) ? HDLCD_POLARITY_VSYNC : 0;
++
++ hdlcd_disable(hdlcd);
++
++ WRITE_HDLCD_REG(HDLCD_REG_FB_LINE_LENGTH, hdlcd->fb.var.xres * bytes_per_pixel);
++ WRITE_HDLCD_REG(HDLCD_REG_FB_LINE_PITCH, hdlcd->fb.var.xres * bytes_per_pixel);
++ WRITE_HDLCD_REG(HDLCD_REG_FB_LINE_COUNT, hdlcd->fb.var.yres - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_V_SYNC, hdlcd->fb.var.vsync_len - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_V_BACK_PORCH, hdlcd->fb.var.upper_margin - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_V_DATA, hdlcd->fb.var.yres - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_V_FRONT_PORCH, hdlcd->fb.var.lower_margin - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_H_SYNC, hdlcd->fb.var.hsync_len - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_H_BACK_PORCH, hdlcd->fb.var.left_margin - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_H_DATA, hdlcd->fb.var.xres - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_H_FRONT_PORCH, hdlcd->fb.var.right_margin - 1);
++ WRITE_HDLCD_REG(HDLCD_REG_POLARITIES, polarities);
++ WRITE_HDLCD_REG(HDLCD_REG_PIXEL_FORMAT, (bytes_per_pixel - 1) << 3);
++#ifdef HDLCD_RED_DEFAULT_COLOUR
++ WRITE_HDLCD_REG(HDLCD_REG_RED_SELECT, (0x00ff0000 | (hdlcd->fb.var.red.length & 0xf) << 8) \
++ | hdlcd->fb.var.red.offset);
++#else
++ WRITE_HDLCD_REG(HDLCD_REG_RED_SELECT, ((hdlcd->fb.var.red.length & 0xf) << 8) | hdlcd->fb.var.red.offset);
++#endif
++ WRITE_HDLCD_REG(HDLCD_REG_GREEN_SELECT, ((hdlcd->fb.var.green.length & 0xf) << 8) | hdlcd->fb.var.green.offset);
++ WRITE_HDLCD_REG(HDLCD_REG_BLUE_SELECT, ((hdlcd->fb.var.blue.length & 0xf) << 8) | hdlcd->fb.var.blue.offset);
++
++ clk_set_rate(hdlcd->clk, (1000000000 / hdlcd->fb.var.pixclock) * 1000);
++ clk_enable(hdlcd->clk);
++
++ hdlcd_enable(hdlcd);
++
++ return 0;
++}
++
++static int hdlcd_setcolreg(unsigned int regno, unsigned int red, unsigned int green,
++ unsigned int blue, unsigned int transp, struct fb_info *info)
++{
++ if (regno < 16) {
++ u32 *pal = info->pseudo_palette;
++
++ pal[regno] = ((red >> 8) << info->var.red.offset) |
++ ((green >> 8) << info->var.green.offset) |
++ ((blue >> 8) << info->var.blue.offset);
++ }
++
++ return 0;
++}
++
++static irqreturn_t hdlcd_irq(int irq, void *data)
++{
++ struct hdlcd_device *hdlcd = data;
++ unsigned long irq_mask, irq_status;
++
++ irq_mask = READ_HDLCD_REG(HDLCD_REG_INT_MASK);
++ irq_status = READ_HDLCD_REG(HDLCD_REG_INT_STATUS);
++
++ /* acknowledge interrupt(s) */
++ WRITE_HDLCD_REG(HDLCD_REG_INT_CLEAR, irq_status);
++#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
++ if (irq_status & HDLCD_INTERRUPT_UNDERRUN) {
++ /* increment the count */
++ hdlcd_underrun_set(hdlcd_underrun_get() + 1);
++ }
++#endif
++ if (irq_status & HDLCD_INTERRUPT_VSYNC) {
++ /* disable future VSYNC interrupts */
++ WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, irq_mask & ~HDLCD_INTERRUPT_VSYNC);
++
++ complete(&hdlcd->vsync_completion);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int hdlcd_wait_for_vsync(struct fb_info *info)
++{
++ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
++ unsigned long irq_mask;
++ int err;
++
++ /* enable VSYNC interrupt */
++ irq_mask = READ_HDLCD_REG(HDLCD_REG_INT_MASK);
++ WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, irq_mask | HDLCD_INTERRUPT_VSYNC);
++
++ err = wait_for_completion_interruptible_timeout(&hdlcd->vsync_completion,
++ msecs_to_jiffies(100));
++
++ if (!err)
++ return -ETIMEDOUT;
++
++ return 0;
++}
++
++static int hdlcd_blank(int blank_mode, struct fb_info *info)
++{
++ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
++
++ switch (blank_mode) {
++ case FB_BLANK_POWERDOWN:
++ clk_disable(hdlcd->clk);
++ case FB_BLANK_NORMAL:
++ hdlcd_disable(hdlcd);
++ break;
++ case FB_BLANK_UNBLANK:
++ clk_enable(hdlcd->clk);
++ hdlcd_enable(hdlcd);
++ break;
++ case FB_BLANK_VSYNC_SUSPEND:
++ case FB_BLANK_HSYNC_SUSPEND:
++ default:
++ return 1;
++ }
++
++ return 0;
++}
++
++static void hdlcd_mmap_open(struct vm_area_struct *vma)
++{
++}
++
++static void hdlcd_mmap_close(struct vm_area_struct *vma)
++{
++}
++
++static struct vm_operations_struct hdlcd_mmap_ops = {
++ .open = hdlcd_mmap_open,
++ .close = hdlcd_mmap_close,
++};
++
++static int hdlcd_mmap(struct fb_info *info, struct vm_area_struct *vma)
++{
++ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
++ unsigned long off;
++ unsigned long start;
++ unsigned long len = hdlcd->fb.fix.smem_len;
++
++ if (vma->vm_end - vma->vm_start == 0)
++ return 0;
++ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
++ return -EINVAL;
++
++ off = vma->vm_pgoff << PAGE_SHIFT;
++ if ((off >= len) || (vma->vm_end - vma->vm_start + off) > len)
++ return -EINVAL;
++
++ start = hdlcd->fb.fix.smem_start;
++ off += start;
++
++ vma->vm_pgoff = off >> PAGE_SHIFT;
++ vma->vm_flags |= VM_IO;
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ vma->vm_ops = &hdlcd_mmap_ops;
++ if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static int hdlcd_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++ struct hdlcd_device *hdlcd = to_hdlcd_device(info);
++
++ hdlcd->fb.var.yoffset = var->yoffset;
++ WRITE_HDLCD_REG(HDLCD_REG_FB_BASE, hdlcd->fb.fix.smem_start +
++ (var->yoffset * hdlcd->fb.fix.line_length));
++
++ hdlcd_wait_for_vsync(info);
++
++ return 0;
++}
++
++static int hdlcd_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
++{
++ int err;
++
++ switch (cmd) {
++ case FBIO_WAITFORVSYNC:
++ err = hdlcd_wait_for_vsync(info);
++ break;
++ default:
++ err = -ENOIOCTLCMD;
++ break;
++ }
++
++ return err;
++}
++
++static struct fb_ops hdlcd_ops = {
++ .owner = THIS_MODULE,
++ .fb_check_var = hdlcd_check_var,
++ .fb_set_par = hdlcd_set_par,
++ .fb_setcolreg = hdlcd_setcolreg,
++ .fb_blank = hdlcd_blank,
++ .fb_fillrect = cfb_fillrect,
++ .fb_copyarea = cfb_copyarea,
++ .fb_imageblit = cfb_imageblit,
++ .fb_mmap = hdlcd_mmap,
++ .fb_pan_display = hdlcd_pan_display,
++ .fb_ioctl = hdlcd_ioctl,
++ .fb_compat_ioctl = hdlcd_ioctl
++};
++
++static int hdlcd_setup(struct hdlcd_device *hdlcd)
++{
++ u32 version;
++ int err = -EFAULT;
++
++ hdlcd->fb.device = hdlcd->dev;
++
++ hdlcd->clk = clk_get(hdlcd->dev, NULL);
++ if (IS_ERR(hdlcd->clk)) {
++ dev_err(hdlcd->dev, "HDLCD: unable to find clock data\n");
++ return PTR_ERR(hdlcd->clk);
++ }
++
++ err = clk_prepare(hdlcd->clk);
++ if (err)
++ goto clk_prepare_err;
++
++ hdlcd->base = ioremap_nocache(hdlcd->fb.fix.mmio_start, hdlcd->fb.fix.mmio_len);
++ if (!hdlcd->base) {
++ dev_err(hdlcd->dev, "HDLCD: unable to map registers\n");
++ goto remap_err;
++ }
++
++ hdlcd->fb.pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
++ if (!hdlcd->fb.pseudo_palette) {
++ dev_err(hdlcd->dev, "HDLCD: unable to allocate pseudo_palette memory\n");
++ err = -ENOMEM;
++ goto kmalloc_err;
++ }
++
++ version = readl(hdlcd->base + HDLCD_REG_VERSION);
++ if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
++ dev_err(hdlcd->dev, "HDLCD: unknown product id: 0x%x\n", version);
++ err = -EINVAL;
++ goto kmalloc_err;
++ }
++ dev_info(hdlcd->dev, "HDLCD: found ARM HDLCD version r%dp%d\n",
++ (version & HDLCD_VERSION_MAJOR_MASK) >> 8,
++ version & HDLCD_VERSION_MINOR_MASK);
++
++ strcpy(hdlcd->fb.fix.id, "hdlcd");
++ hdlcd->fb.fbops = &hdlcd_ops;
++ hdlcd->fb.flags = FBINFO_FLAG_DEFAULT/* | FBINFO_VIRTFB*/;
++
++ hdlcd->fb.fix.type = FB_TYPE_PACKED_PIXELS;
++ hdlcd->fb.fix.type_aux = 0;
++ hdlcd->fb.fix.xpanstep = 0;
++ hdlcd->fb.fix.ypanstep = 1;
++ hdlcd->fb.fix.ywrapstep = 0;
++ hdlcd->fb.fix.accel = FB_ACCEL_NONE;
++
++ hdlcd->fb.var.nonstd = 0;
++ hdlcd->fb.var.activate = FB_ACTIVATE_NOW;
++ hdlcd->fb.var.height = -1;
++ hdlcd->fb.var.width = -1;
++ hdlcd->fb.var.accel_flags = 0;
++
++ init_completion(&hdlcd->vsync_completion);
++
++ if (hdlcd->edid) {
++ /* build modedb from EDID */
++ fb_edid_to_monspecs(hdlcd->edid, &hdlcd->fb.monspecs);
++ fb_videomode_to_modelist(hdlcd->fb.monspecs.modedb,
++ hdlcd->fb.monspecs.modedb_len,
++ &hdlcd->fb.modelist);
++ fb_find_mode(&hdlcd->fb.var, &hdlcd->fb, fb_mode,
++ hdlcd->fb.monspecs.modedb,
++ hdlcd->fb.monspecs.modedb_len,
++ &hdlcd_default_mode, 32);
++ } else {
++ hdlcd->fb.monspecs.hfmin = 0;
++ hdlcd->fb.monspecs.hfmax = 100000;
++ hdlcd->fb.monspecs.vfmin = 0;
++ hdlcd->fb.monspecs.vfmax = 400;
++ hdlcd->fb.monspecs.dclkmin = 1000000;
++ hdlcd->fb.monspecs.dclkmax = 100000000;
++ fb_find_mode(&hdlcd->fb.var, &hdlcd->fb, fb_mode, NULL, 0, &hdlcd_default_mode, 32);
++ }
++
++ dev_info(hdlcd->dev, "using %dx%d-%d@%d mode\n", hdlcd->fb.var.xres,
++ hdlcd->fb.var.yres, hdlcd->fb.var.bits_per_pixel,
++ hdlcd->fb.mode ? hdlcd->fb.mode->refresh : 60);
++ hdlcd->fb.var.xres_virtual = hdlcd->fb.var.xres;
++#ifdef HDLCD_NO_VIRTUAL_SCREEN
++ hdlcd->fb.var.yres_virtual = hdlcd->fb.var.yres;
++#else
++ hdlcd->fb.var.yres_virtual = hdlcd->fb.var.yres * 2;
++#endif
++
++ /* initialise and set the palette */
++ if (fb_alloc_cmap(&hdlcd->fb.cmap, NR_PALETTE, 0)) {
++ dev_err(hdlcd->dev, "failed to allocate cmap memory\n");
++ err = -ENOMEM;
++ goto setup_err;
++ }
++ fb_set_cmap(&hdlcd->fb.cmap, &hdlcd->fb);
++
++ /* Allow max number of outstanding requests with the largest beat burst */
++ WRITE_HDLCD_REG(HDLCD_REG_BUS_OPTIONS, HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
++ /* Set the framebuffer base to start of allocated memory */
++ WRITE_HDLCD_REG(HDLCD_REG_FB_BASE, hdlcd->fb.fix.smem_start);
++#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
++ /* turn on underrun interrupt for counting */
++ WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, HDLCD_INTERRUPT_UNDERRUN);
++#else
++ /* Ensure interrupts are disabled */
++ WRITE_HDLCD_REG(HDLCD_REG_INT_MASK, 0);
++#endif
++ fb_set_var(&hdlcd->fb, &hdlcd->fb.var);
++
++ if (!register_framebuffer(&hdlcd->fb)) {
++ return 0;
++ }
++
++ dev_err(hdlcd->dev, "HDLCD: cannot register framebuffer\n");
++
++ fb_dealloc_cmap(&hdlcd->fb.cmap);
++setup_err:
++ iounmap(hdlcd->base);
++kmalloc_err:
++ kfree(hdlcd->fb.pseudo_palette);
++remap_err:
++ clk_unprepare(hdlcd->clk);
++clk_prepare_err:
++ clk_put(hdlcd->clk);
++ return err;
++}
++
++static inline unsigned char atohex(u8 data)
++{
++ if (!isxdigit(data))
++ return 0;
++ /* truncate the upper nibble and add 9 to non-digit values */
++ return (data > 0x39) ? ((data & 0xf) + 9) : (data & 0xf);
++}
++
++/* EDID data is passed from devicetree in a literal string that can contain spaces and
++ the hexadecimal dump of the data */
++static int parse_edid_data(struct hdlcd_device *hdlcd, const u8 *edid_data, int data_len)
++{
++ int i, j;
++
++ if (!edid_data)
++ return -EINVAL;
++
++ hdlcd->edid = kzalloc(EDID_LENGTH, GFP_KERNEL);
++ if (!hdlcd->edid)
++ return -ENOMEM;
++
++ for (i = 0, j = 0; i < data_len; i++) {
++ if (isspace(edid_data[i]))
++ continue;
++ hdlcd->edid[j++] = atohex(edid_data[i]);
++ if (j >= EDID_LENGTH)
++ break;
++ }
++
++ if (j < EDID_LENGTH) {
++ kfree(hdlcd->edid);
++ hdlcd->edid = NULL;
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int hdlcd_probe(struct platform_device *pdev)
++{
++ int err = 0, i;
++ struct hdlcd_device *hdlcd;
++ struct resource *mem;
++#ifdef CONFIG_OF
++ struct device_node *of_node;
++#endif
++
++ memset(&cached_var_screeninfo, 0, sizeof(struct fb_var_screeninfo));
++
++ dev_dbg(&pdev->dev, "HDLCD: probing\n");
++
++ hdlcd = kzalloc(sizeof(*hdlcd), GFP_KERNEL);
++ if (!hdlcd)
++ return -ENOMEM;
++
++#ifdef CONFIG_OF
++ of_node = pdev->dev.of_node;
++ if (of_node) {
++ int len;
++ const u8 *edid;
++ const __be32 *prop = of_get_property(of_node, "mode", &len);
++ if (prop)
++ strncpy(fb_mode, (char *)prop, len);
++ prop = of_get_property(of_node, "framebuffer", &len);
++ if (prop) {
++ hdlcd->fb.fix.smem_start = of_read_ulong(prop,
++ of_n_addr_cells(of_node));
++ prop += of_n_addr_cells(of_node);
++ framebuffer_size = of_read_ulong(prop,
++ of_n_size_cells(of_node));
++ if (framebuffer_size > HDLCD_MAX_FRAMEBUFFER_SIZE)
++ framebuffer_size = HDLCD_MAX_FRAMEBUFFER_SIZE;
++ dev_dbg(&pdev->dev, "HDLCD: phys_addr = 0x%lx, size = 0x%lx\n",
++ hdlcd->fb.fix.smem_start, framebuffer_size);
++ }
++ edid = of_get_property(of_node, "edid", &len);
++ if (edid) {
++ err = parse_edid_data(hdlcd, edid, len);
++#ifdef CONFIG_SERIAL_AMBA_PCU_UART
++ } else {
++ /* ask the firmware to fetch the EDID */
++ dev_dbg(&pdev->dev, "HDLCD: Requesting EDID data\n");
++ hdlcd->edid = kzalloc(EDID_LENGTH, GFP_KERNEL);
++ if (!hdlcd->edid)
++ return -ENOMEM;
++ err = get_edid(hdlcd->edid);
++#endif /* CONFIG_SERIAL_AMBA_PCU_UART */
++ }
++ if (err)
++ dev_info(&pdev->dev, "HDLCD: Failed to parse EDID data\n");
++ }
++#endif /* CONFIG_OF */
++
++ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!mem) {
++ dev_err(&pdev->dev, "HDLCD: cannot get platform resources\n");
++ err = -EINVAL;
++ goto resource_err;
++ }
++
++ i = platform_get_irq(pdev, 0);
++ if (i < 0) {
++ dev_err(&pdev->dev, "HDLCD: no irq defined for vsync\n");
++ err = -ENOENT;
++ goto resource_err;
++ } else {
++ err = request_irq(i, hdlcd_irq, 0, dev_name(&pdev->dev), hdlcd);
++ if (err) {
++ dev_err(&pdev->dev, "HDLCD: unable to request irq\n");
++ goto resource_err;
++ }
++ hdlcd->irq = i;
++ }
++
++ if (!request_mem_region(mem->start, resource_size(mem), dev_name(&pdev->dev))) {
++ err = -ENXIO;
++ goto request_err;
++ }
++
++ if (!hdlcd->fb.fix.smem_start) {
++ dev_err(&pdev->dev, "platform did not allocate frame buffer memory\n");
++ err = -ENOMEM;
++ goto memalloc_err;
++ }
++ hdlcd->fb.screen_base = ioremap_wc(hdlcd->fb.fix.smem_start, framebuffer_size);
++ if (!hdlcd->fb.screen_base) {
++ dev_err(&pdev->dev, "unable to ioremap framebuffer\n");
++ err = -ENOMEM;
++ goto probe_err;
++ }
++
++ hdlcd->fb.screen_size = framebuffer_size;
++ hdlcd->fb.fix.smem_len = framebuffer_size;
++ hdlcd->fb.fix.mmio_start = mem->start;
++ hdlcd->fb.fix.mmio_len = resource_size(mem);
++
++ /* Clear the framebuffer */
++ memset(hdlcd->fb.screen_base, 0, framebuffer_size);
++
++ hdlcd->dev = &pdev->dev;
++
++ dev_dbg(&pdev->dev, "HDLCD: framebuffer virt base %p, phys base 0x%lX\n",
++ hdlcd->fb.screen_base, (unsigned long)hdlcd->fb.fix.smem_start);
++
++ err = hdlcd_setup(hdlcd);
++
++ if (err)
++ goto probe_err;
++
++ platform_set_drvdata(pdev, hdlcd);
++ return 0;
++
++probe_err:
++ iounmap(hdlcd->fb.screen_base);
++ memblock_free(hdlcd->fb.fix.smem_start, hdlcd->fb.fix.smem_start);
++
++memalloc_err:
++ release_mem_region(mem->start, resource_size(mem));
++
++request_err:
++ free_irq(hdlcd->irq, hdlcd);
++
++resource_err:
++ kfree(hdlcd);
++
++ return err;
++}
++
++static int hdlcd_remove(struct platform_device *pdev)
++{
++ struct hdlcd_device *hdlcd = platform_get_drvdata(pdev);
++
++ clk_disable(hdlcd->clk);
++ clk_unprepare(hdlcd->clk);
++ clk_put(hdlcd->clk);
++
++ /* unmap memory */
++ iounmap(hdlcd->fb.screen_base);
++ iounmap(hdlcd->base);
++
++ /* deallocate fb memory */
++ fb_dealloc_cmap(&hdlcd->fb.cmap);
++ kfree(hdlcd->fb.pseudo_palette);
++ memblock_free(hdlcd->fb.fix.smem_start, hdlcd->fb.fix.smem_start);
++ release_mem_region(hdlcd->fb.fix.mmio_start, hdlcd->fb.fix.mmio_len);
++
++ free_irq(hdlcd->irq, NULL);
++ kfree(hdlcd);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int hdlcd_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ /* not implemented yet */
++ return 0;
++}
++
++static int hdlcd_resume(struct platform_device *pdev)
++{
++ /* not implemented yet */
++ return 0;
++}
++#else
++#define hdlcd_suspend NULL
++#define hdlcd_resume NULL
++#endif
++
++static struct platform_driver hdlcd_driver = {
++ .probe = hdlcd_probe,
++ .remove = hdlcd_remove,
++ .suspend = hdlcd_suspend,
++ .resume = hdlcd_resume,
++ .driver = {
++ .name = "hdlcd",
++ .owner = THIS_MODULE,
++ .of_match_table = hdlcd_of_matches,
++ },
++};
++
++static int __init hdlcd_init(void)
++{
++#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
++ int err = platform_driver_register(&hdlcd_driver);
++ if (!err)
++ hdlcd_underrun_init();
++ return err;
++#else
++ return platform_driver_register(&hdlcd_driver);
++#endif
++}
++
++void __exit hdlcd_exit(void)
++{
++#ifdef HDLCD_COUNT_BUFFERUNDERRUNS
++ hdlcd_underrun_close();
++#endif
++ platform_driver_unregister(&hdlcd_driver);
++}
++
++module_init(hdlcd_init);
++module_exit(hdlcd_exit);
++
++MODULE_AUTHOR("Liviu Dudau");
++MODULE_DESCRIPTION("ARM HDLCD core driver");
++MODULE_LICENSE("GPL v2");
+diff -Nur linux-3.14.40.orig/drivers/video/backlight/backlight.c linux-3.14.40/drivers/video/backlight/backlight.c
+--- linux-3.14.40.orig/drivers/video/backlight/backlight.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/video/backlight/backlight.c 2015-05-01 14:58:05.687427001 -0500
+@@ -41,6 +41,8 @@
+ {
+ struct backlight_device *bd;
+ struct fb_event *evdata = data;
++ int node = evdata->info->node;
++ int fb_blank = 0;
+
+ /* If we aren't interested in this event, skip it immediately ... */
+ if (event != FB_EVENT_BLANK && event != FB_EVENT_CONBLANK)
+@@ -51,12 +53,24 @@
+ if (bd->ops)
+ if (!bd->ops->check_fb ||
+ bd->ops->check_fb(bd, evdata->info)) {
+- bd->props.fb_blank = *(int *)evdata->data;
+- if (bd->props.fb_blank == FB_BLANK_UNBLANK)
+- bd->props.state &= ~BL_CORE_FBBLANK;
+- else
+- bd->props.state |= BL_CORE_FBBLANK;
+- backlight_update_status(bd);
++ fb_blank = *(int *)evdata->data;
++ if (fb_blank == FB_BLANK_UNBLANK &&
++ !bd->fb_bl_on[node]) {
++ bd->fb_bl_on[node] = true;
++ if (!bd->use_count++) {
++ bd->props.state &= ~BL_CORE_FBBLANK;
++ bd->props.fb_blank = FB_BLANK_UNBLANK;
++ backlight_update_status(bd);
++ }
++ } else if (fb_blank != FB_BLANK_UNBLANK &&
++ bd->fb_bl_on[node]) {
++ bd->fb_bl_on[node] = false;
++ if (!(--bd->use_count)) {
++ bd->props.state |= BL_CORE_FBBLANK;
++ bd->props.fb_blank = FB_BLANK_POWERDOWN;
++ backlight_update_status(bd);
++ }
++ }
+ }
+ mutex_unlock(&bd->ops_lock);
+ return 0;
+diff -Nur linux-3.14.40.orig/drivers/video/Kconfig linux-3.14.40/drivers/video/Kconfig
+--- linux-3.14.40.orig/drivers/video/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/video/Kconfig 2015-05-01 14:58:05.707427001 -0500
+@@ -39,6 +39,11 @@
+ config HDMI
+ bool
+
++config VEXPRESS_DVI_CONTROL
++ bool "Versatile Express DVI control"
++ depends on FB && VEXPRESS_CONFIG
++ default y
++
+ menuconfig FB
+ tristate "Support for frame buffer devices"
+ ---help---
+@@ -327,6 +332,21 @@
+ here and read <file:Documentation/kbuild/modules.txt>. The module
+ will be called amba-clcd.
+
++config FB_ARMHDLCD
++ tristate "ARM High Definition LCD support"
++ depends on FB && ARM
++ select FB_CFB_FILLRECT
++ select FB_CFB_COPYAREA
++ select FB_CFB_IMAGEBLIT
++ help
++ This framebuffer device driver is for the ARM High Definition
++ Colour LCD controller.
++
++ If you want to compile this as a module (=code which can be
++ inserted into and removed from the running kernel), say M
++ here and read <file:Documentation/kbuild/modules.txt>. The module
++ will be called arm-hdlcd.
++
+ config FB_ACORN
+ bool "Acorn VIDC support"
+ depends on (FB = y) && ARM && ARCH_ACORN
+@@ -2491,6 +2511,10 @@
+ source "drivers/video/mmp/Kconfig"
+ source "drivers/video/backlight/Kconfig"
+
++if ARCH_MXC
++source "drivers/video/mxc/Kconfig"
++endif
++
+ if VT
+ source "drivers/video/console/Kconfig"
+ endif
+diff -Nur linux-3.14.40.orig/drivers/video/Makefile linux-3.14.40/drivers/video/Makefile
+--- linux-3.14.40.orig/drivers/video/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/video/Makefile 2015-05-01 14:58:05.723427001 -0500
+@@ -53,6 +53,7 @@
+ obj-$(CONFIG_FB_SAVAGE) += savage/
+ obj-$(CONFIG_FB_GEODE) += geode/
+ obj-$(CONFIG_FB_MBX) += mbx/
++obj-$(CONFIG_FB_MXC) += mxc/
+ obj-$(CONFIG_FB_NEOMAGIC) += neofb.o
+ obj-$(CONFIG_FB_3DFX) += tdfxfb.o
+ obj-$(CONFIG_FB_CONTROL) += controlfb.o
+@@ -99,6 +100,7 @@
+ obj-$(CONFIG_FB_PVR2) += pvr2fb.o
+ obj-$(CONFIG_FB_VOODOO1) += sstfb.o
+ obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
++obj-$(CONFIG_FB_ARMHDLCD) += arm-hdlcd.o
+ obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o
+ obj-$(CONFIG_FB_68328) += 68328fb.o
+ obj-$(CONFIG_FB_GBE) += gbefb.o
+@@ -178,3 +180,6 @@
+ ifeq ($(CONFIG_OF),y)
+ obj-$(CONFIG_VIDEOMODE_HELPERS) += of_display_timing.o of_videomode.o
+ endif
++
++# platform specific output drivers
++obj-$(CONFIG_VEXPRESS_DVI_CONTROL) += vexpress-dvi.o
+diff -Nur linux-3.14.40.orig/drivers/video/mxc/Kconfig linux-3.14.40/drivers/video/mxc/Kconfig
+--- linux-3.14.40.orig/drivers/video/mxc/Kconfig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/mxc/Kconfig 2015-05-01 14:58:05.723427001 -0500
+@@ -0,0 +1,48 @@
++config FB_MXC
++ tristate "MXC Framebuffer support"
++ depends on FB
++ select FB_CFB_FILLRECT
++ select FB_CFB_COPYAREA
++ select FB_CFB_IMAGEBLIT
++ select FB_MODE_HELPERS
++ default y
++ help
++ This is a framebuffer device for the MXC LCD Controller.
++ See <http://www.linux-fbdev.org/> for information on framebuffer
++ devices.
++
++ If you plan to use the LCD display with your MXC system, say
++ Y here.
++
++config FB_MXC_SYNC_PANEL
++ depends on FB_MXC
++ tristate "Synchronous Panel Framebuffer"
++
++config FB_MXC_LDB
++ tristate "MXC LDB"
++ depends on FB_MXC_SYNC_PANEL
++ depends on MXC_IPU_V3
++
++config FB_MXC_MIPI_DSI
++ tristate "MXC MIPI_DSI"
++ depends on FB_MXC_SYNC_PANEL
++ depends on MXC_IPU_V3
++
++config FB_MXC_TRULY_WVGA_SYNC_PANEL
++ tristate "TRULY WVGA Panel"
++ depends on FB_MXC_SYNC_PANEL
++ depends on FB_MXC_MIPI_DSI
++
++config FB_MXC_HDMI
++ depends on FB_MXC_SYNC_PANEL
++ depends on MXC_IPU_V3
++ depends on I2C
++ tristate "MXC HDMI driver support"
++ select MFD_MXC_HDMI
++ help
++ Driver for the on-chip MXC HDMI controller.
++
++config FB_MXC_EDID
++ depends on FB_MXC && I2C
++ tristate "MXC EDID support"
++ default y
+diff -Nur linux-3.14.40.orig/drivers/video/mxc/ldb.c linux-3.14.40/drivers/video/mxc/ldb.c
+--- linux-3.14.40.orig/drivers/video/mxc/ldb.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/mxc/ldb.c 2015-05-01 14:58:05.727427001 -0500
+@@ -0,0 +1,1036 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @file mxc_ldb.c
++ *
++ * @brief This file contains the LDB driver device interface and fops
++ * functions.
++ */
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/io.h>
++#include <linux/ipu.h>
++#include <linux/mxcfb.h>
++#include <linux/regulator/consumer.h>
++#include <linux/spinlock.h>
++#include <linux/of_device.h>
++#include <linux/mod_devicetable.h>
++#include "mxc_dispdrv.h"
++
++#define DISPDRV_LDB "ldb"
++
++#define LDB_BGREF_RMODE_MASK 0x00008000
++#define LDB_BGREF_RMODE_INT 0x00008000
++#define LDB_BGREF_RMODE_EXT 0x0
++
++#define LDB_DI1_VS_POL_MASK 0x00000400
++#define LDB_DI1_VS_POL_ACT_LOW 0x00000400
++#define LDB_DI1_VS_POL_ACT_HIGH 0x0
++#define LDB_DI0_VS_POL_MASK 0x00000200
++#define LDB_DI0_VS_POL_ACT_LOW 0x00000200
++#define LDB_DI0_VS_POL_ACT_HIGH 0x0
++
++#define LDB_BIT_MAP_CH1_MASK 0x00000100
++#define LDB_BIT_MAP_CH1_JEIDA 0x00000100
++#define LDB_BIT_MAP_CH1_SPWG 0x0
++#define LDB_BIT_MAP_CH0_MASK 0x00000040
++#define LDB_BIT_MAP_CH0_JEIDA 0x00000040
++#define LDB_BIT_MAP_CH0_SPWG 0x0
++
++#define LDB_DATA_WIDTH_CH1_MASK 0x00000080
++#define LDB_DATA_WIDTH_CH1_24 0x00000080
++#define LDB_DATA_WIDTH_CH1_18 0x0
++#define LDB_DATA_WIDTH_CH0_MASK 0x00000020
++#define LDB_DATA_WIDTH_CH0_24 0x00000020
++#define LDB_DATA_WIDTH_CH0_18 0x0
++
++#define LDB_CH1_MODE_MASK 0x0000000C
++#define LDB_CH1_MODE_EN_TO_DI1 0x0000000C
++#define LDB_CH1_MODE_EN_TO_DI0 0x00000004
++#define LDB_CH1_MODE_DISABLE 0x0
++#define LDB_CH0_MODE_MASK 0x00000003
++#define LDB_CH0_MODE_EN_TO_DI1 0x00000003
++#define LDB_CH0_MODE_EN_TO_DI0 0x00000001
++#define LDB_CH0_MODE_DISABLE 0x0
++
++#define LDB_SPLIT_MODE_EN 0x00000010
++
++enum {
++ IMX6_LDB,
++};
++
++enum {
++ LDB_IMX6 = 1,
++};
++
++struct fsl_mxc_ldb_platform_data {
++ int devtype;
++ u32 ext_ref;
++#define LDB_SPL_DI0 1
++#define LDB_SPL_DI1 2
++#define LDB_DUL_DI0 3
++#define LDB_DUL_DI1 4
++#define LDB_SIN0 5
++#define LDB_SIN1 6
++#define LDB_SEP0 7
++#define LDB_SEP1 8
++ int mode;
++ int ipu_id;
++ int disp_id;
++
++ /*only work for separate mode*/
++ int sec_ipu_id;
++ int sec_disp_id;
++};
++
++struct ldb_data {
++ struct platform_device *pdev;
++ struct mxc_dispdrv_handle *disp_ldb;
++ uint32_t *reg;
++ uint32_t *control_reg;
++ uint32_t *gpr3_reg;
++ uint32_t control_reg_data;
++ struct regulator *lvds_bg_reg;
++ int mode;
++ bool inited;
++ struct ldb_setting {
++ struct clk *di_clk;
++ struct clk *ldb_di_clk;
++ struct clk *div_3_5_clk;
++ struct clk *div_7_clk;
++ struct clk *div_sel_clk;
++ bool active;
++ bool clk_en;
++ int ipu;
++ int di;
++ uint32_t ch_mask;
++ uint32_t ch_val;
++ } setting[2];
++ struct notifier_block nb;
++};
++
++static int g_ldb_mode;
++
++static struct fb_videomode ldb_modedb[] = {
++ {
++ "LDB-WXGA", 60, 1280, 800, 14065,
++ 40, 40,
++ 10, 3,
++ 80, 10,
++ 0,
++ FB_VMODE_NONINTERLACED,
++ FB_MODE_IS_DETAILED,},
++ {
++ "LDB-XGA", 60, 1024, 768, 15385,
++ 220, 40,
++ 21, 7,
++ 60, 10,
++ 0,
++ FB_VMODE_NONINTERLACED,
++ FB_MODE_IS_DETAILED,},
++ {
++ "LDB-1080P60", 60, 1920, 1080, 7692,
++ 100, 40,
++ 30, 3,
++ 10, 2,
++ 0,
++ FB_VMODE_NONINTERLACED,
++ FB_MODE_IS_DETAILED,},
++};
++static int ldb_modedb_sz = ARRAY_SIZE(ldb_modedb);
++
++static inline int is_imx6_ldb(struct fsl_mxc_ldb_platform_data *plat_data)
++{
++ return (plat_data->devtype == LDB_IMX6);
++}
++
++static int bits_per_pixel(int pixel_fmt)
++{
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ return 24;
++ break;
++ case IPU_PIX_FMT_BGR666:
++ case IPU_PIX_FMT_RGB666:
++ case IPU_PIX_FMT_LVDS666:
++ return 18;
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
++static int valid_mode(int pixel_fmt)
++{
++ return ((pixel_fmt == IPU_PIX_FMT_RGB24) ||
++ (pixel_fmt == IPU_PIX_FMT_BGR24) ||
++ (pixel_fmt == IPU_PIX_FMT_LVDS666) ||
++ (pixel_fmt == IPU_PIX_FMT_RGB666) ||
++ (pixel_fmt == IPU_PIX_FMT_BGR666));
++}
++
++static int parse_ldb_mode(char *mode)
++{
++ int ldb_mode;
++
++ if (!strcmp(mode, "spl0"))
++ ldb_mode = LDB_SPL_DI0;
++ else if (!strcmp(mode, "spl1"))
++ ldb_mode = LDB_SPL_DI1;
++ else if (!strcmp(mode, "dul0"))
++ ldb_mode = LDB_DUL_DI0;
++ else if (!strcmp(mode, "dul1"))
++ ldb_mode = LDB_DUL_DI1;
++ else if (!strcmp(mode, "sin0"))
++ ldb_mode = LDB_SIN0;
++ else if (!strcmp(mode, "sin1"))
++ ldb_mode = LDB_SIN1;
++ else if (!strcmp(mode, "sep0"))
++ ldb_mode = LDB_SEP0;
++ else if (!strcmp(mode, "sep1"))
++ ldb_mode = LDB_SEP1;
++ else
++ ldb_mode = -EINVAL;
++
++ return ldb_mode;
++}
++
++#ifndef MODULE
++/*
++ * "ldb=spl0/1" -- split mode on DI0/1
++ * "ldb=dul0/1" -- dual mode on DI0/1
++ * "ldb=sin0/1" -- single mode on LVDS0/1
++ * "ldb=sep0/1" -- separate mode begin from LVDS0/1
++ *
++ * there are two LVDS channels(LVDS0 and LVDS1) which can transfer video
++ * datas, there two channels can be used as split/dual/single/separate mode.
++ *
++ * split mode means display data from DI0 or DI1 will send to both channels
++ * LVDS0+LVDS1.
++ * dual mode means display data from DI0 or DI1 will be duplicated on LVDS0
++ * and LVDS1, it said, LVDS0 and LVDS1 has the same content.
++ * single mode means only work for DI0/DI1->LVDS0 or DI0/DI1->LVDS1.
++ * separate mode means you can make DI0/DI1->LVDS0 and DI0/DI1->LVDS1 work
++ * at the same time.
++ */
++static int __init ldb_setup(char *options)
++{
++ g_ldb_mode = parse_ldb_mode(options);
++ return (g_ldb_mode < 0) ? 0 : 1;
++}
++__setup("ldb=", ldb_setup);
++#endif
++
++static int ldb_get_of_property(struct platform_device *pdev,
++ struct fsl_mxc_ldb_platform_data *plat_data)
++{
++ struct device_node *np = pdev->dev.of_node;
++ int err;
++ u32 ipu_id, disp_id;
++ u32 sec_ipu_id, sec_disp_id;
++ char *mode;
++ u32 ext_ref;
++
++ err = of_property_read_string(np, "mode", (const char **)&mode);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property mode fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "ext_ref", &ext_ref);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property ext_ref fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "ipu_id", &ipu_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property ipu_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "disp_id", &disp_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property disp_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "sec_ipu_id", &sec_ipu_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property sec_ipu_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "sec_disp_id", &sec_disp_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property sec_disp_id fail\n");
++ return err;
++ }
++
++ plat_data->mode = parse_ldb_mode(mode);
++ plat_data->ext_ref = ext_ref;
++ plat_data->ipu_id = ipu_id;
++ plat_data->disp_id = disp_id;
++ plat_data->sec_ipu_id = sec_ipu_id;
++ plat_data->sec_disp_id = sec_disp_id;
++
++ return err;
++}
++
++static int find_ldb_setting(struct ldb_data *ldb, struct fb_info *fbi)
++{
++ char *id_di[] = {
++ "DISP3 BG",
++ "DISP3 BG - DI1",
++ };
++ char id[16];
++ int i;
++
++ for (i = 0; i < 2; i++) {
++ if (ldb->setting[i].active) {
++ memset(id, 0, 16);
++ memcpy(id, id_di[ldb->setting[i].di],
++ strlen(id_di[ldb->setting[i].di]));
++ id[4] += ldb->setting[i].ipu;
++ if (!strcmp(id, fbi->fix.id))
++ return i;
++ }
++ }
++ return -EINVAL;
++}
++
++static int ldb_disp_setup(struct mxc_dispdrv_handle *disp, struct fb_info *fbi)
++{
++ uint32_t reg, val;
++ uint32_t pixel_clk, rounded_pixel_clk;
++ struct clk *ldb_clk_parent;
++ struct ldb_data *ldb = mxc_dispdrv_getdata(disp);
++ int setting_idx, di;
++ int ret;
++
++ setting_idx = find_ldb_setting(ldb, fbi);
++ if (setting_idx < 0)
++ return setting_idx;
++
++ di = ldb->setting[setting_idx].di;
++
++ /* restore channel mode setting */
++ val = readl(ldb->control_reg);
++ val |= ldb->setting[setting_idx].ch_val;
++ writel(val, ldb->control_reg);
++ dev_dbg(&ldb->pdev->dev, "LDB setup, control reg:0x%x\n",
++ readl(ldb->control_reg));
++
++ /* vsync setup */
++ reg = readl(ldb->control_reg);
++ if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT) {
++ if (di == 0)
++ reg = (reg & ~LDB_DI0_VS_POL_MASK)
++ | LDB_DI0_VS_POL_ACT_HIGH;
++ else
++ reg = (reg & ~LDB_DI1_VS_POL_MASK)
++ | LDB_DI1_VS_POL_ACT_HIGH;
++ } else {
++ if (di == 0)
++ reg = (reg & ~LDB_DI0_VS_POL_MASK)
++ | LDB_DI0_VS_POL_ACT_LOW;
++ else
++ reg = (reg & ~LDB_DI1_VS_POL_MASK)
++ | LDB_DI1_VS_POL_ACT_LOW;
++ }
++ writel(reg, ldb->control_reg);
++
++ /* clk setup */
++ if (ldb->setting[setting_idx].clk_en)
++ clk_disable_unprepare(ldb->setting[setting_idx].ldb_di_clk);
++ pixel_clk = (PICOS2KHZ(fbi->var.pixclock)) * 1000UL;
++ ldb_clk_parent = clk_get_parent(ldb->setting[setting_idx].ldb_di_clk);
++ if (IS_ERR(ldb_clk_parent)) {
++ dev_err(&ldb->pdev->dev, "get ldb di parent clk fail\n");
++ return PTR_ERR(ldb_clk_parent);
++ }
++ if ((ldb->mode == LDB_SPL_DI0) || (ldb->mode == LDB_SPL_DI1))
++ ret = clk_set_rate(ldb_clk_parent, pixel_clk * 7 / 2);
++ else
++ ret = clk_set_rate(ldb_clk_parent, pixel_clk * 7);
++ if (ret < 0) {
++ dev_err(&ldb->pdev->dev, "set ldb parent clk fail:%d\n", ret);
++ return ret;
++ }
++ rounded_pixel_clk = clk_round_rate(ldb->setting[setting_idx].ldb_di_clk,
++ pixel_clk);
++ dev_dbg(&ldb->pdev->dev, "pixel_clk:%d, rounded_pixel_clk:%d\n",
++ pixel_clk, rounded_pixel_clk);
++ ret = clk_set_rate(ldb->setting[setting_idx].ldb_di_clk,
++ rounded_pixel_clk);
++ if (ret < 0) {
++ dev_err(&ldb->pdev->dev, "set ldb di clk fail:%d\n", ret);
++ return ret;
++ }
++ ret = clk_prepare_enable(ldb->setting[setting_idx].ldb_di_clk);
++ if (ret < 0) {
++ dev_err(&ldb->pdev->dev, "enable ldb di clk fail:%d\n", ret);
++ return ret;
++ }
++
++ if (!ldb->setting[setting_idx].clk_en)
++ ldb->setting[setting_idx].clk_en = true;
++
++ return 0;
++}
++
++int ldb_fb_event(struct notifier_block *nb, unsigned long val, void *v)
++{
++ struct ldb_data *ldb = container_of(nb, struct ldb_data, nb);
++ struct fb_event *event = v;
++ struct fb_info *fbi = event->info;
++ int index;
++ uint32_t data;
++
++ index = find_ldb_setting(ldb, fbi);
++ if (index < 0)
++ return 0;
++
++ fbi->mode = (struct fb_videomode *)fb_match_mode(&fbi->var,
++ &fbi->modelist);
++
++ if (!fbi->mode) {
++ dev_warn(&ldb->pdev->dev,
++ "LDB: can not find mode for xres=%d, yres=%d\n",
++ fbi->var.xres, fbi->var.yres);
++ if (ldb->setting[index].clk_en) {
++ clk_disable(ldb->setting[index].ldb_di_clk);
++ ldb->setting[index].clk_en = false;
++ data = readl(ldb->control_reg);
++ data &= ~ldb->setting[index].ch_mask;
++ writel(data, ldb->control_reg);
++ }
++ return 0;
++ }
++
++ switch (val) {
++ case FB_EVENT_BLANK:
++ {
++ if (*((int *)event->data) == FB_BLANK_UNBLANK) {
++ if (!ldb->setting[index].clk_en) {
++ clk_enable(ldb->setting[index].ldb_di_clk);
++ ldb->setting[index].clk_en = true;
++ }
++ } else {
++ if (ldb->setting[index].clk_en) {
++ clk_disable(ldb->setting[index].ldb_di_clk);
++ ldb->setting[index].clk_en = false;
++ data = readl(ldb->control_reg);
++ data &= ~ldb->setting[index].ch_mask;
++ writel(data, ldb->control_reg);
++ dev_dbg(&ldb->pdev->dev,
++ "LDB blank, control reg:0x%x\n",
++ readl(ldb->control_reg));
++ }
++ }
++ break;
++ }
++ case FB_EVENT_SUSPEND:
++ if (ldb->setting[index].clk_en) {
++ clk_disable(ldb->setting[index].ldb_di_clk);
++ ldb->setting[index].clk_en = false;
++ }
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
++#define LVDS_MUX_CTL_WIDTH 2
++#define LVDS_MUX_CTL_MASK 3
++#define LVDS0_MUX_CTL_OFFS 6
++#define LVDS1_MUX_CTL_OFFS 8
++#define LVDS0_MUX_CTL_MASK (LVDS_MUX_CTL_MASK << 6)
++#define LVDS1_MUX_CTL_MASK (LVDS_MUX_CTL_MASK << 8)
++#define ROUTE_IPU_DI(ipu, di) (((ipu << 1) | di) & LVDS_MUX_CTL_MASK)
++static int ldb_ipu_ldb_route(int ipu, int di, struct ldb_data *ldb)
++{
++ uint32_t reg;
++ int channel;
++ int shift;
++ int mode = ldb->mode;
++
++ reg = readl(ldb->gpr3_reg);
++ if (mode < LDB_SIN0) {
++ reg &= ~(LVDS0_MUX_CTL_MASK | LVDS1_MUX_CTL_MASK);
++ reg |= (ROUTE_IPU_DI(ipu, di) << LVDS0_MUX_CTL_OFFS) |
++ (ROUTE_IPU_DI(ipu, di) << LVDS1_MUX_CTL_OFFS);
++ dev_dbg(&ldb->pdev->dev,
++ "Dual/Split mode both channels route to IPU%d-DI%d\n",
++ ipu, di);
++ } else if ((mode == LDB_SIN0) || (mode == LDB_SIN1)) {
++ reg &= ~(LVDS0_MUX_CTL_MASK | LVDS1_MUX_CTL_MASK);
++ channel = mode - LDB_SIN0;
++ shift = LVDS0_MUX_CTL_OFFS + channel * LVDS_MUX_CTL_WIDTH;
++ reg |= ROUTE_IPU_DI(ipu, di) << shift;
++ dev_dbg(&ldb->pdev->dev,
++ "Single mode channel %d route to IPU%d-DI%d\n",
++ channel, ipu, di);
++ } else {
++ static bool first = true;
++
++ if (first) {
++ if (mode == LDB_SEP0) {
++ reg &= ~LVDS0_MUX_CTL_MASK;
++ channel = 0;
++ } else {
++ reg &= ~LVDS1_MUX_CTL_MASK;
++ channel = 1;
++ }
++ first = false;
++ } else {
++ if (mode == LDB_SEP0) {
++ reg &= ~LVDS1_MUX_CTL_MASK;
++ channel = 1;
++ } else {
++ reg &= ~LVDS0_MUX_CTL_MASK;
++ channel = 0;
++ }
++ }
++
++ shift = LVDS0_MUX_CTL_OFFS + channel * LVDS_MUX_CTL_WIDTH;
++ reg |= ROUTE_IPU_DI(ipu, di) << shift;
++
++ dev_dbg(&ldb->pdev->dev,
++ "Separate mode channel %d route to IPU%d-DI%d\n",
++ channel, ipu, di);
++ }
++ writel(reg, ldb->gpr3_reg);
++
++ return 0;
++}
++
++static int ldb_disp_init(struct mxc_dispdrv_handle *disp,
++ struct mxc_dispdrv_setting *setting)
++{
++ int ret = 0, i, lvds_channel = 0;
++ struct ldb_data *ldb = mxc_dispdrv_getdata(disp);
++ struct fsl_mxc_ldb_platform_data *plat_data = ldb->pdev->dev.platform_data;
++ struct resource *res;
++ uint32_t reg, setting_idx;
++ uint32_t ch_mask = 0, ch_val = 0;
++ uint32_t ipu_id, disp_id;
++ char di_clk[] = "ipu1_di0_sel";
++ char ldb_clk[] = "ldb_di0";
++ char div_3_5_clk[] = "di0_div_3_5";
++ char div_7_clk[] = "di0_div_7";
++ char div_sel_clk[] = "di0_div_sel";
++
++ /* if input format not valid, make RGB666 as default*/
++ if (!valid_mode(setting->if_fmt)) {
++ dev_warn(&ldb->pdev->dev, "Input pixel format not valid"
++ " use default RGB666\n");
++ setting->if_fmt = IPU_PIX_FMT_RGB666;
++ }
++
++ if (!ldb->inited) {
++ setting_idx = 0;
++ res = platform_get_resource(ldb->pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&ldb->pdev->dev, "get iomem fail.\n");
++ return -ENOMEM;
++ }
++
++ ldb->reg = devm_ioremap(&ldb->pdev->dev, res->start,
++ resource_size(res));
++ ldb->control_reg = ldb->reg + 2;
++ ldb->gpr3_reg = ldb->reg + 3;
++
++ /* ipu selected by platform data setting */
++ setting->dev_id = plat_data->ipu_id;
++
++ reg = readl(ldb->control_reg);
++
++ /* refrence resistor select */
++ reg &= ~LDB_BGREF_RMODE_MASK;
++ if (plat_data->ext_ref)
++ reg |= LDB_BGREF_RMODE_EXT;
++ else
++ reg |= LDB_BGREF_RMODE_INT;
++
++ /* TODO: now only use SPWG data mapping for both channel */
++ reg &= ~(LDB_BIT_MAP_CH0_MASK | LDB_BIT_MAP_CH1_MASK);
++ reg |= LDB_BIT_MAP_CH0_SPWG | LDB_BIT_MAP_CH1_SPWG;
++
++ /* channel mode setting */
++ reg &= ~(LDB_CH0_MODE_MASK | LDB_CH1_MODE_MASK);
++ reg &= ~(LDB_DATA_WIDTH_CH0_MASK | LDB_DATA_WIDTH_CH1_MASK);
++
++ if (bits_per_pixel(setting->if_fmt) == 24)
++ reg |= LDB_DATA_WIDTH_CH0_24 | LDB_DATA_WIDTH_CH1_24;
++ else
++ reg |= LDB_DATA_WIDTH_CH0_18 | LDB_DATA_WIDTH_CH1_18;
++
++ if (g_ldb_mode >= LDB_SPL_DI0)
++ ldb->mode = g_ldb_mode;
++ else
++ ldb->mode = plat_data->mode;
++
++ if ((ldb->mode == LDB_SIN0) || (ldb->mode == LDB_SIN1)) {
++ ret = ldb->mode - LDB_SIN0;
++ if (plat_data->disp_id != ret) {
++ dev_warn(&ldb->pdev->dev,
++ "change IPU DI%d to IPU DI%d for LDB "
++ "channel%d.\n",
++ plat_data->disp_id, ret, ret);
++ plat_data->disp_id = ret;
++ }
++ } else if (((ldb->mode == LDB_SEP0) || (ldb->mode == LDB_SEP1))
++ && is_imx6_ldb(plat_data)) {
++ if (plat_data->disp_id == plat_data->sec_disp_id) {
++ dev_err(&ldb->pdev->dev,
++ "For LVDS separate mode,"
++ "two DIs should be different!\n");
++ return -EINVAL;
++ }
++
++ if (((!plat_data->disp_id) && (ldb->mode == LDB_SEP1))
++ || ((plat_data->disp_id) &&
++ (ldb->mode == LDB_SEP0))) {
++ dev_dbg(&ldb->pdev->dev,
++ "LVDS separate mode:"
++ "swap DI configuration!\n");
++ ipu_id = plat_data->ipu_id;
++ disp_id = plat_data->disp_id;
++ plat_data->ipu_id = plat_data->sec_ipu_id;
++ plat_data->disp_id = plat_data->sec_disp_id;
++ plat_data->sec_ipu_id = ipu_id;
++ plat_data->sec_disp_id = disp_id;
++ }
++ }
++
++ if (ldb->mode == LDB_SPL_DI0) {
++ reg |= LDB_SPLIT_MODE_EN | LDB_CH0_MODE_EN_TO_DI0
++ | LDB_CH1_MODE_EN_TO_DI0;
++ setting->disp_id = 0;
++ } else if (ldb->mode == LDB_SPL_DI1) {
++ reg |= LDB_SPLIT_MODE_EN | LDB_CH0_MODE_EN_TO_DI1
++ | LDB_CH1_MODE_EN_TO_DI1;
++ setting->disp_id = 1;
++ } else if (ldb->mode == LDB_DUL_DI0) {
++ reg &= ~LDB_SPLIT_MODE_EN;
++ reg |= LDB_CH0_MODE_EN_TO_DI0 | LDB_CH1_MODE_EN_TO_DI0;
++ setting->disp_id = 0;
++ } else if (ldb->mode == LDB_DUL_DI1) {
++ reg &= ~LDB_SPLIT_MODE_EN;
++ reg |= LDB_CH0_MODE_EN_TO_DI1 | LDB_CH1_MODE_EN_TO_DI1;
++ setting->disp_id = 1;
++ } else if (ldb->mode == LDB_SIN0) {
++ reg &= ~LDB_SPLIT_MODE_EN;
++ setting->disp_id = plat_data->disp_id;
++ if (setting->disp_id == 0)
++ reg |= LDB_CH0_MODE_EN_TO_DI0;
++ else
++ reg |= LDB_CH0_MODE_EN_TO_DI1;
++ ch_mask = LDB_CH0_MODE_MASK;
++ ch_val = reg & LDB_CH0_MODE_MASK;
++ } else if (ldb->mode == LDB_SIN1) {
++ reg &= ~LDB_SPLIT_MODE_EN;
++ setting->disp_id = plat_data->disp_id;
++ if (setting->disp_id == 0)
++ reg |= LDB_CH1_MODE_EN_TO_DI0;
++ else
++ reg |= LDB_CH1_MODE_EN_TO_DI1;
++ ch_mask = LDB_CH1_MODE_MASK;
++ ch_val = reg & LDB_CH1_MODE_MASK;
++ } else { /* separate mode*/
++ setting->disp_id = plat_data->disp_id;
++
++ /* first output is LVDS0 or LVDS1 */
++ if (ldb->mode == LDB_SEP0)
++ lvds_channel = 0;
++ else
++ lvds_channel = 1;
++
++ reg &= ~LDB_SPLIT_MODE_EN;
++
++ if ((lvds_channel == 0) && (setting->disp_id == 0))
++ reg |= LDB_CH0_MODE_EN_TO_DI0;
++ else if ((lvds_channel == 0) && (setting->disp_id == 1))
++ reg |= LDB_CH0_MODE_EN_TO_DI1;
++ else if ((lvds_channel == 1) && (setting->disp_id == 0))
++ reg |= LDB_CH1_MODE_EN_TO_DI0;
++ else
++ reg |= LDB_CH1_MODE_EN_TO_DI1;
++ ch_mask = lvds_channel ? LDB_CH1_MODE_MASK :
++ LDB_CH0_MODE_MASK;
++ ch_val = reg & ch_mask;
++
++ if (bits_per_pixel(setting->if_fmt) == 24) {
++ if (lvds_channel == 0)
++ reg &= ~LDB_DATA_WIDTH_CH1_24;
++ else
++ reg &= ~LDB_DATA_WIDTH_CH0_24;
++ } else {
++ if (lvds_channel == 0)
++ reg &= ~LDB_DATA_WIDTH_CH1_18;
++ else
++ reg &= ~LDB_DATA_WIDTH_CH0_18;
++ }
++ }
++
++ writel(reg, ldb->control_reg);
++ if (ldb->mode < LDB_SIN0) {
++ ch_mask = LDB_CH0_MODE_MASK | LDB_CH1_MODE_MASK;
++ ch_val = reg & (LDB_CH0_MODE_MASK | LDB_CH1_MODE_MASK);
++ }
++ } else { /* second time for separate mode */
++ if ((ldb->mode == LDB_SPL_DI0) ||
++ (ldb->mode == LDB_SPL_DI1) ||
++ (ldb->mode == LDB_DUL_DI0) ||
++ (ldb->mode == LDB_DUL_DI1) ||
++ (ldb->mode == LDB_SIN0) ||
++ (ldb->mode == LDB_SIN1)) {
++ dev_err(&ldb->pdev->dev, "for second ldb disp"
++ "ldb mode should in separate mode\n");
++ return -EINVAL;
++ }
++
++ setting_idx = 1;
++ if (is_imx6_ldb(plat_data)) {
++ setting->dev_id = plat_data->sec_ipu_id;
++ setting->disp_id = plat_data->sec_disp_id;
++ } else {
++ setting->dev_id = plat_data->ipu_id;
++ setting->disp_id = !plat_data->disp_id;
++ }
++ if (setting->disp_id == ldb->setting[0].di) {
++ dev_err(&ldb->pdev->dev, "Err: for second ldb disp in"
++ "separate mode, DI should be different!\n");
++ return -EINVAL;
++ }
++
++ /* second output is LVDS0 or LVDS1 */
++ if (ldb->mode == LDB_SEP0)
++ lvds_channel = 1;
++ else
++ lvds_channel = 0;
++
++ reg = readl(ldb->control_reg);
++ if ((lvds_channel == 0) && (setting->disp_id == 0))
++ reg |= LDB_CH0_MODE_EN_TO_DI0;
++ else if ((lvds_channel == 0) && (setting->disp_id == 1))
++ reg |= LDB_CH0_MODE_EN_TO_DI1;
++ else if ((lvds_channel == 1) && (setting->disp_id == 0))
++ reg |= LDB_CH1_MODE_EN_TO_DI0;
++ else
++ reg |= LDB_CH1_MODE_EN_TO_DI1;
++ ch_mask = lvds_channel ? LDB_CH1_MODE_MASK :
++ LDB_CH0_MODE_MASK;
++ ch_val = reg & ch_mask;
++
++ if (bits_per_pixel(setting->if_fmt) == 24) {
++ if (lvds_channel == 0)
++ reg |= LDB_DATA_WIDTH_CH0_24;
++ else
++ reg |= LDB_DATA_WIDTH_CH1_24;
++ } else {
++ if (lvds_channel == 0)
++ reg |= LDB_DATA_WIDTH_CH0_18;
++ else
++ reg |= LDB_DATA_WIDTH_CH1_18;
++ }
++ writel(reg, ldb->control_reg);
++ }
++
++ /* get clocks */
++ if (is_imx6_ldb(plat_data) &&
++ ((ldb->mode == LDB_SEP0) || (ldb->mode == LDB_SEP1))) {
++ ldb_clk[6] += lvds_channel;
++ div_3_5_clk[2] += lvds_channel;
++ div_7_clk[2] += lvds_channel;
++ div_sel_clk[2] += lvds_channel;
++ } else {
++ ldb_clk[6] += setting->disp_id;
++ div_3_5_clk[2] += setting->disp_id;
++ div_7_clk[2] += setting->disp_id;
++ div_sel_clk[2] += setting->disp_id;
++ }
++ ldb->setting[setting_idx].ldb_di_clk = clk_get(&ldb->pdev->dev,
++ ldb_clk);
++ if (IS_ERR(ldb->setting[setting_idx].ldb_di_clk)) {
++ dev_err(&ldb->pdev->dev, "get ldb clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].ldb_di_clk);
++ }
++
++ ldb->setting[setting_idx].div_3_5_clk = clk_get(&ldb->pdev->dev,
++ div_3_5_clk);
++ if (IS_ERR(ldb->setting[setting_idx].div_3_5_clk)) {
++ dev_err(&ldb->pdev->dev, "get div 3.5 clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].div_3_5_clk);
++ }
++ ldb->setting[setting_idx].div_7_clk = clk_get(&ldb->pdev->dev,
++ div_7_clk);
++ if (IS_ERR(ldb->setting[setting_idx].div_7_clk)) {
++ dev_err(&ldb->pdev->dev, "get div 7 clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].div_7_clk);
++ }
++
++ ldb->setting[setting_idx].div_sel_clk = clk_get(&ldb->pdev->dev,
++ div_sel_clk);
++ if (IS_ERR(ldb->setting[setting_idx].div_sel_clk)) {
++ dev_err(&ldb->pdev->dev, "get div sel clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].div_sel_clk);
++ }
++
++ di_clk[3] += setting->dev_id;
++ di_clk[7] += setting->disp_id;
++ ldb->setting[setting_idx].di_clk = clk_get(&ldb->pdev->dev,
++ di_clk);
++ if (IS_ERR(ldb->setting[setting_idx].di_clk)) {
++ dev_err(&ldb->pdev->dev, "get di clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].di_clk);
++ }
++
++ ldb->setting[setting_idx].ch_mask = ch_mask;
++ ldb->setting[setting_idx].ch_val = ch_val;
++
++ if (is_imx6_ldb(plat_data))
++ ldb_ipu_ldb_route(setting->dev_id, setting->disp_id, ldb);
++
++ /* must use spec video mode defined by driver */
++ ret = fb_find_mode(&setting->fbi->var, setting->fbi, setting->dft_mode_str,
++ ldb_modedb, ldb_modedb_sz, NULL, setting->default_bpp);
++ if (ret != 1)
++ fb_videomode_to_var(&setting->fbi->var, &ldb_modedb[0]);
++
++ INIT_LIST_HEAD(&setting->fbi->modelist);
++ for (i = 0; i < ldb_modedb_sz; i++) {
++ struct fb_videomode m;
++ fb_var_to_videomode(&m, &setting->fbi->var);
++ if (fb_mode_is_equal(&m, &ldb_modedb[i])) {
++ fb_add_videomode(&ldb_modedb[i],
++ &setting->fbi->modelist);
++ break;
++ }
++ }
++
++ ldb->setting[setting_idx].ipu = setting->dev_id;
++ ldb->setting[setting_idx].di = setting->disp_id;
++
++ return ret;
++}
++
++static int ldb_post_disp_init(struct mxc_dispdrv_handle *disp,
++ int ipu_id, int disp_id)
++{
++ struct ldb_data *ldb = mxc_dispdrv_getdata(disp);
++ int setting_idx = ldb->inited ? 1 : 0;
++ int ret = 0;
++
++ if (!ldb->inited) {
++ ldb->nb.notifier_call = ldb_fb_event;
++ fb_register_client(&ldb->nb);
++ }
++
++ ret = clk_set_parent(ldb->setting[setting_idx].di_clk,
++ ldb->setting[setting_idx].ldb_di_clk);
++ if (ret) {
++ dev_err(&ldb->pdev->dev, "fail to set ldb_di clk as"
++ "the parent of ipu_di clk\n");
++ return ret;
++ }
++
++ if ((ldb->mode == LDB_SPL_DI0) || (ldb->mode == LDB_SPL_DI1)) {
++ ret = clk_set_parent(ldb->setting[setting_idx].div_sel_clk,
++ ldb->setting[setting_idx].div_3_5_clk);
++ if (ret) {
++ dev_err(&ldb->pdev->dev, "fail to set div 3.5 clk as"
++ "the parent of div sel clk\n");
++ return ret;
++ }
++ } else {
++ ret = clk_set_parent(ldb->setting[setting_idx].div_sel_clk,
++ ldb->setting[setting_idx].div_7_clk);
++ if (ret) {
++ dev_err(&ldb->pdev->dev, "fail to set div 7 clk as"
++ "the parent of div sel clk\n");
++ return ret;
++ }
++ }
++
++ /* save active ldb setting for fb notifier */
++ ldb->setting[setting_idx].active = true;
++
++ ldb->inited = true;
++ return ret;
++}
++
++static void ldb_disp_deinit(struct mxc_dispdrv_handle *disp)
++{
++ struct ldb_data *ldb = mxc_dispdrv_getdata(disp);
++ int i;
++
++ writel(0, ldb->control_reg);
++
++ for (i = 0; i < 2; i++) {
++ clk_disable(ldb->setting[i].ldb_di_clk);
++ clk_put(ldb->setting[i].ldb_di_clk);
++ clk_put(ldb->setting[i].div_3_5_clk);
++ clk_put(ldb->setting[i].div_7_clk);
++ clk_put(ldb->setting[i].div_sel_clk);
++ }
++
++ fb_unregister_client(&ldb->nb);
++}
++
++static struct mxc_dispdrv_driver ldb_drv = {
++ .name = DISPDRV_LDB,
++ .init = ldb_disp_init,
++ .post_init = ldb_post_disp_init,
++ .deinit = ldb_disp_deinit,
++ .setup = ldb_disp_setup,
++};
++
++static int ldb_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct ldb_data *ldb = dev_get_drvdata(&pdev->dev);
++ uint32_t data;
++
++ if (!ldb->inited)
++ return 0;
++ data = readl(ldb->control_reg);
++ ldb->control_reg_data = data;
++ data &= ~(LDB_CH0_MODE_MASK | LDB_CH1_MODE_MASK);
++ writel(data, ldb->control_reg);
++
++ return 0;
++}
++
++static int ldb_resume(struct platform_device *pdev)
++{
++ struct ldb_data *ldb = dev_get_drvdata(&pdev->dev);
++
++ if (!ldb->inited)
++ return 0;
++ writel(ldb->control_reg_data, ldb->control_reg);
++
++ return 0;
++}
++
++static struct platform_device_id imx_ldb_devtype[] = {
++ {
++ .name = "ldb-imx6",
++ .driver_data = LDB_IMX6,
++ }, {
++ /* sentinel */
++ }
++};
++
++static const struct of_device_id imx_ldb_dt_ids[] = {
++ { .compatible = "fsl,imx6q-ldb", .data = &imx_ldb_devtype[IMX6_LDB],},
++ { /* sentinel */ }
++};
++
++/*!
++ * This function is called by the driver framework to initialize the LDB
++ * device.
++ *
++ * @param dev The device structure for the LDB passed in by the
++ * driver framework.
++ *
++ * @return Returns 0 on success or negative error code on error
++ */
++static int ldb_probe(struct platform_device *pdev)
++{
++ int ret = 0;
++ struct ldb_data *ldb;
++ struct fsl_mxc_ldb_platform_data *plat_data;
++ const struct of_device_id *of_id =
++ of_match_device(imx_ldb_dt_ids, &pdev->dev);
++
++ dev_dbg(&pdev->dev, "%s enter\n", __func__);
++ ldb = devm_kzalloc(&pdev->dev, sizeof(struct ldb_data), GFP_KERNEL);
++ if (!ldb)
++ return -ENOMEM;
++
++ plat_data = devm_kzalloc(&pdev->dev,
++ sizeof(struct fsl_mxc_ldb_platform_data),
++ GFP_KERNEL);
++ if (!plat_data)
++ return -ENOMEM;
++ pdev->dev.platform_data = plat_data;
++ if (of_id)
++ pdev->id_entry = of_id->data;
++ plat_data->devtype = pdev->id_entry->driver_data;
++
++ ret = ldb_get_of_property(pdev, plat_data);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "get ldb of property fail\n");
++ return ret;
++ }
++
++ ldb->pdev = pdev;
++ ldb->disp_ldb = mxc_dispdrv_register(&ldb_drv);
++ mxc_dispdrv_setdata(ldb->disp_ldb, ldb);
++
++ dev_set_drvdata(&pdev->dev, ldb);
++
++ dev_dbg(&pdev->dev, "%s exit\n", __func__);
++ return ret;
++}
++
++static int ldb_remove(struct platform_device *pdev)
++{
++ struct ldb_data *ldb = dev_get_drvdata(&pdev->dev);
++
++ if (!ldb->inited)
++ return 0;
++ mxc_dispdrv_puthandle(ldb->disp_ldb);
++ mxc_dispdrv_unregister(ldb->disp_ldb);
++ return 0;
++}
++
++static struct platform_driver mxcldb_driver = {
++ .driver = {
++ .name = "mxc_ldb",
++ .of_match_table = imx_ldb_dt_ids,
++ },
++ .probe = ldb_probe,
++ .remove = ldb_remove,
++ .suspend = ldb_suspend,
++ .resume = ldb_resume,
++};
++
++static int __init ldb_init(void)
++{
++ return platform_driver_register(&mxcldb_driver);
++}
++
++static void __exit ldb_uninit(void)
++{
++ platform_driver_unregister(&mxcldb_driver);
++}
++
++module_init(ldb_init);
++module_exit(ldb_uninit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("MXC LDB driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/video/mxc/Makefile linux-3.14.40/drivers/video/mxc/Makefile
+--- linux-3.14.40.orig/drivers/video/mxc/Makefile 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/mxc/Makefile 2015-05-01 14:58:05.727427001 -0500
+@@ -0,0 +1,6 @@
++obj-$(CONFIG_FB_MXC_LDB) += ldb.o
++obj-$(CONFIG_FB_MXC_MIPI_DSI) += mipi_dsi.o
++obj-$(CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL) += mxcfb_hx8369_wvga.o
++obj-$(CONFIG_FB_MXC_HDMI) += mxc_hdmi.o
++obj-$(CONFIG_FB_MXC_EDID) += mxc_edid.o
++obj-$(CONFIG_FB_MXC_SYNC_PANEL) += mxc_dispdrv.o mxc_lcdif.o mxc_ipuv3_fb.o
+diff -Nur linux-3.14.40.orig/drivers/video/mxc/mipi_dsi.c linux-3.14.40/drivers/video/mxc/mipi_dsi.c
+--- linux-3.14.40.orig/drivers/video/mxc/mipi_dsi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/mxc/mipi_dsi.c 2015-05-01 14:58:05.731427001 -0500
+@@ -0,0 +1,953 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/io.h>
++#include <linux/bitops.h>
++#include <linux/ipu.h>
++#include <linux/mfd/syscon.h>
++#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
++#include <linux/mipi_dsi.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <linux/backlight.h>
++#include <linux/of_device.h>
++#include <linux/regulator/consumer.h>
++#include <linux/reset.h>
++#include <linux/spinlock.h>
++#include <linux/delay.h>
++#include <video/mipi_display.h>
++
++#include "mxc_dispdrv.h"
++#include "mipi_dsi.h"
++
++#define DISPDRV_MIPI "mipi_dsi"
++#define ROUND_UP(x) ((x)+1)
++#define NS2PS_RATIO (1000)
++#define NUMBER_OF_CHUNKS (0x8)
++#define NULL_PKT_SIZE (0x8)
++#define PHY_BTA_MAXTIME (0xd00)
++#define PHY_LP2HS_MAXTIME (0x40)
++#define PHY_HS2LP_MAXTIME (0x40)
++#define PHY_STOP_WAIT_TIME (0x20)
++#define DSI_CLKMGR_CFG_CLK_DIV (0x107)
++#define DSI_GEN_PLD_DATA_BUF_ENTRY (0x10)
++#define MIPI_MUX_CTRL(v) (((v) & 0x3) << 4)
++#define MIPI_LCD_SLEEP_MODE_DELAY (120)
++#define MIPI_DSI_REG_RW_TIMEOUT (20)
++#define MIPI_DSI_PHY_TIMEOUT (10)
++
++static struct mipi_dsi_match_lcd mipi_dsi_lcd_db[] = {
++#ifdef CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL
++ {
++ "TRULY-WVGA",
++ {mipid_hx8369_get_lcd_videomode, mipid_hx8369_lcd_setup}
++ },
++#endif
++ {
++ "", {NULL, NULL}
++ }
++};
++
++struct _mipi_dsi_phy_pll_clk {
++ u32 max_phy_clk;
++ u32 config;
++};
++
++/* configure data for DPHY PLL 27M reference clk out */
++static const struct _mipi_dsi_phy_pll_clk mipi_dsi_phy_pll_clk_table[] = {
++ {1000, 0x74}, /* 950-1000MHz */
++ {950, 0x54}, /* 900-950Mhz */
++ {900, 0x34}, /* 850-900Mhz */
++ {850, 0x14}, /* 800-850MHz */
++ {800, 0x32}, /* 750-800MHz */
++ {750, 0x12}, /* 700-750Mhz */
++ {700, 0x30}, /* 650-700Mhz */
++ {650, 0x10}, /* 600-650MHz */
++ {600, 0x2e}, /* 550-600MHz */
++ {550, 0x0e}, /* 500-550Mhz */
++ {500, 0x2c}, /* 450-500Mhz */
++ {450, 0x0c}, /* 400-450MHz */
++ {400, 0x4a}, /* 360-400MHz */
++ {360, 0x2a}, /* 330-360Mhz */
++ {330, 0x48}, /* 300-330Mhz */
++ {300, 0x28}, /* 270-300MHz */
++ {270, 0x08}, /* 250-270MHz */
++ {250, 0x46}, /* 240-250Mhz */
++ {240, 0x26}, /* 210-240Mhz */
++ {210, 0x06}, /* 200-210MHz */
++ {200, 0x44}, /* 180-200MHz */
++ {180, 0x24}, /* 160-180MHz */
++ {160, 0x04}, /* 150-160MHz */
++};
++
++static int valid_mode(int pixel_fmt)
++{
++ return ((pixel_fmt == IPU_PIX_FMT_RGB24) ||
++ (pixel_fmt == IPU_PIX_FMT_BGR24) ||
++ (pixel_fmt == IPU_PIX_FMT_RGB666) ||
++ (pixel_fmt == IPU_PIX_FMT_RGB565) ||
++ (pixel_fmt == IPU_PIX_FMT_BGR666) ||
++ (pixel_fmt == IPU_PIX_FMT_RGB332));
++}
++
++static inline void mipi_dsi_read_register(struct mipi_dsi_info *mipi_dsi,
++ u32 reg, u32 *val)
++{
++ *val = ioread32(mipi_dsi->mmio_base + reg);
++ dev_dbg(&mipi_dsi->pdev->dev, "read_reg:0x%02x, val:0x%08x.\n",
++ reg, *val);
++}
++
++static inline void mipi_dsi_write_register(struct mipi_dsi_info *mipi_dsi,
++ u32 reg, u32 val)
++{
++ iowrite32(val, mipi_dsi->mmio_base + reg);
++ dev_dbg(&mipi_dsi->pdev->dev, "\t\twrite_reg:0x%02x, val:0x%08x.\n",
++ reg, val);
++}
++
++int mipi_dsi_pkt_write(struct mipi_dsi_info *mipi_dsi,
++ u8 data_type, const u32 *buf, int len)
++{
++ u32 val;
++ u32 status = 0;
++ int write_len = len;
++ uint32_t timeout = 0;
++
++ if (len) {
++ /* generic long write command */
++ while (len / DSI_GEN_PLD_DATA_BUF_SIZE) {
++ mipi_dsi_write_register(mipi_dsi,
++ MIPI_DSI_GEN_PLD_DATA, *buf);
++ buf++;
++ len -= DSI_GEN_PLD_DATA_BUF_SIZE;
++ mipi_dsi_read_register(mipi_dsi,
++ MIPI_DSI_CMD_PKT_STATUS, &status);
++ while ((status & DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL) ==
++ DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi,
++ MIPI_DSI_CMD_PKT_STATUS, &status);
++ }
++ }
++ /* write the remainder bytes */
++ if (len > 0) {
++ while ((status & DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL) ==
++ DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi,
++ MIPI_DSI_CMD_PKT_STATUS, &status);
++ }
++ mipi_dsi_write_register(mipi_dsi,
++ MIPI_DSI_GEN_PLD_DATA, *buf);
++ }
++
++ val = data_type | ((write_len & DSI_GEN_HDR_DATA_MASK)
++ << DSI_GEN_HDR_DATA_SHIFT);
++ } else {
++ /* generic short write command */
++ val = data_type | ((*buf & DSI_GEN_HDR_DATA_MASK)
++ << DSI_GEN_HDR_DATA_SHIFT);
++ }
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS, &status);
++ while ((status & DSI_CMD_PKT_STATUS_GEN_CMD_FULL) ==
++ DSI_CMD_PKT_STATUS_GEN_CMD_FULL) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &status);
++ }
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_GEN_HDR, val);
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS, &status);
++ while (!((status & DSI_CMD_PKT_STATUS_GEN_CMD_EMPTY) ==
++ DSI_CMD_PKT_STATUS_GEN_CMD_EMPTY) ||
++ !((status & DSI_CMD_PKT_STATUS_GEN_PLD_W_EMPTY) ==
++ DSI_CMD_PKT_STATUS_GEN_PLD_W_EMPTY)) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &status);
++ }
++
++ return 0;
++}
++
++int mipi_dsi_pkt_read(struct mipi_dsi_info *mipi_dsi,
++ u8 data_type, u32 *buf, int len)
++{
++ u32 val;
++ int read_len = 0;
++ uint32_t timeout = 0;
++
++ if (!len) {
++ mipi_dbg("%s, len = 0 invalid error!\n", __func__);
++ return -EINVAL;
++ }
++
++ val = data_type | ((*buf & DSI_GEN_HDR_DATA_MASK)
++ << DSI_GEN_HDR_DATA_SHIFT);
++ memset(buf, 0, len);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_GEN_HDR, val);
++
++ /* wait for cmd to sent out */
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS, &val);
++ while ((val & DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY) !=
++ DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &val);
++ }
++ /* wait for entire response stroed in FIFO */
++ while ((val & DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY) ==
++ DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &val);
++ }
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS, &val);
++ while (!(val & DSI_CMD_PKT_STATUS_GEN_PLD_R_EMPTY)) {
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_GEN_PLD_DATA, buf);
++ read_len += DSI_GEN_PLD_DATA_BUF_SIZE;
++ buf++;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &val);
++ if (read_len == (DSI_GEN_PLD_DATA_BUF_ENTRY *
++ DSI_GEN_PLD_DATA_BUF_SIZE))
++ break;
++ }
++
++ if ((len <= read_len) &&
++ ((len + DSI_GEN_PLD_DATA_BUF_SIZE) >= read_len))
++ return 0;
++ else {
++ dev_err(&mipi_dsi->pdev->dev,
++ "actually read_len:%d != len:%d.\n", read_len, len);
++ return -ERANGE;
++ }
++}
++
++int mipi_dsi_dcs_cmd(struct mipi_dsi_info *mipi_dsi,
++ u8 cmd, const u32 *param, int num)
++{
++ int err = 0;
++ u32 buf[DSI_CMD_BUF_MAXSIZE];
++
++ switch (cmd) {
++ case MIPI_DCS_EXIT_SLEEP_MODE:
++ case MIPI_DCS_ENTER_SLEEP_MODE:
++ case MIPI_DCS_SET_DISPLAY_ON:
++ case MIPI_DCS_SET_DISPLAY_OFF:
++ buf[0] = cmd;
++ err = mipi_dsi_pkt_write(mipi_dsi,
++ MIPI_DSI_DCS_SHORT_WRITE, buf, 0);
++ break;
++
++ default:
++ dev_err(&mipi_dsi->pdev->dev,
++ "MIPI DSI DCS Command:0x%x Not supported!\n", cmd);
++ break;
++ }
++
++ return err;
++}
++
++static void mipi_dsi_dphy_init(struct mipi_dsi_info *mipi_dsi,
++ u32 cmd, u32 data)
++{
++ u32 val;
++ u32 timeout = 0;
++
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_IF_CTRL,
++ DSI_PHY_IF_CTRL_RESET);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP, DSI_PWRUP_POWERUP);
++
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 0);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL1,
++ (0x10000 | cmd));
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 2);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 0);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL1, (0 | data));
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 2);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 0);
++ val = DSI_PHY_RSTZ_EN_CLK | DSI_PHY_RSTZ_DISABLE_RST |
++ DSI_PHY_RSTZ_DISABLE_SHUTDOWN;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_RSTZ, val);
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_PHY_STATUS, &val);
++ while ((val & DSI_PHY_STATUS_LOCK) != DSI_PHY_STATUS_LOCK) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_PHY_TIMEOUT) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "Error: phy lock timeout!\n");
++ break;
++ }
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_PHY_STATUS, &val);
++ }
++ timeout = 0;
++ while ((val & DSI_PHY_STATUS_STOPSTATE_CLK_LANE) !=
++ DSI_PHY_STATUS_STOPSTATE_CLK_LANE) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_PHY_TIMEOUT) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "Error: phy lock lane timeout!\n");
++ break;
++ }
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_PHY_STATUS, &val);
++ }
++}
++
++static void mipi_dsi_enable_controller(struct mipi_dsi_info *mipi_dsi,
++ bool init)
++{
++ u32 val;
++ u32 lane_byte_clk_period;
++ struct fb_videomode *mode = mipi_dsi->mode;
++ struct mipi_lcd_config *lcd_config = mipi_dsi->lcd_config;
++
++ if (init) {
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_RESET);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_RSTZ,
++ DSI_PHY_RSTZ_RST);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_CLKMGR_CFG,
++ DSI_CLKMGR_CFG_CLK_DIV);
++
++ if (!(mode->sync & FB_SYNC_VERT_HIGH_ACT))
++ val = DSI_DPI_CFG_VSYNC_ACT_LOW;
++ if (!(mode->sync & FB_SYNC_HOR_HIGH_ACT))
++ val |= DSI_DPI_CFG_HSYNC_ACT_LOW;
++ if ((mode->sync & FB_SYNC_OE_LOW_ACT))
++ val |= DSI_DPI_CFG_DATAEN_ACT_LOW;
++ if (MIPI_RGB666_LOOSELY == lcd_config->dpi_fmt)
++ val |= DSI_DPI_CFG_EN18LOOSELY;
++ val |= (lcd_config->dpi_fmt & DSI_DPI_CFG_COLORCODE_MASK)
++ << DSI_DPI_CFG_COLORCODE_SHIFT;
++ val |= (lcd_config->virtual_ch & DSI_DPI_CFG_VID_MASK)
++ << DSI_DPI_CFG_VID_SHIFT;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_DPI_CFG, val);
++
++ val = DSI_PCKHDL_CFG_EN_BTA |
++ DSI_PCKHDL_CFG_EN_ECC_RX |
++ DSI_PCKHDL_CFG_EN_CRC_RX;
++
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PCKHDL_CFG, val);
++
++ val = (mode->xres & DSI_VID_PKT_CFG_VID_PKT_SZ_MASK)
++ << DSI_VID_PKT_CFG_VID_PKT_SZ_SHIFT;
++ val |= (NUMBER_OF_CHUNKS & DSI_VID_PKT_CFG_NUM_CHUNKS_MASK)
++ << DSI_VID_PKT_CFG_NUM_CHUNKS_SHIFT;
++ val |= (NULL_PKT_SIZE & DSI_VID_PKT_CFG_NULL_PKT_SZ_MASK)
++ << DSI_VID_PKT_CFG_NULL_PKT_SZ_SHIFT;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_VID_PKT_CFG, val);
++
++ /* enable LP mode when TX DCS cmd and enable DSI command mode */
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG,
++ MIPI_DSI_CMD_MODE_CFG_EN_LOWPOWER);
++
++ /* mipi lane byte clk period in ns unit */
++ lane_byte_clk_period = NS2PS_RATIO /
++ (lcd_config->max_phy_clk / BITS_PER_BYTE);
++ val = ROUND_UP(mode->hsync_len * mode->pixclock /
++ NS2PS_RATIO / lane_byte_clk_period)
++ << DSI_TME_LINE_CFG_HSA_TIME_SHIFT;
++ val |= ROUND_UP(mode->left_margin * mode->pixclock /
++ NS2PS_RATIO / lane_byte_clk_period)
++ << DSI_TME_LINE_CFG_HBP_TIME_SHIFT;
++ val |= ROUND_UP((mode->left_margin + mode->right_margin +
++ mode->hsync_len + mode->xres) * mode->pixclock
++ / NS2PS_RATIO / lane_byte_clk_period)
++ << DSI_TME_LINE_CFG_HLINE_TIME_SHIFT;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_TMR_LINE_CFG, val);
++
++ val = ((mode->vsync_len & DSI_VTIMING_CFG_VSA_LINES_MASK)
++ << DSI_VTIMING_CFG_VSA_LINES_SHIFT);
++ val |= ((mode->upper_margin & DSI_VTIMING_CFG_VBP_LINES_MASK)
++ << DSI_VTIMING_CFG_VBP_LINES_SHIFT);
++ val |= ((mode->lower_margin & DSI_VTIMING_CFG_VFP_LINES_MASK)
++ << DSI_VTIMING_CFG_VFP_LINES_SHIFT);
++ val |= ((mode->yres & DSI_VTIMING_CFG_V_ACT_LINES_MASK)
++ << DSI_VTIMING_CFG_V_ACT_LINES_SHIFT);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_VTIMING_CFG, val);
++
++ val = ((PHY_BTA_MAXTIME & DSI_PHY_TMR_CFG_BTA_TIME_MASK)
++ << DSI_PHY_TMR_CFG_BTA_TIME_SHIFT);
++ val |= ((PHY_LP2HS_MAXTIME & DSI_PHY_TMR_CFG_LP2HS_TIME_MASK)
++ << DSI_PHY_TMR_CFG_LP2HS_TIME_SHIFT);
++ val |= ((PHY_HS2LP_MAXTIME & DSI_PHY_TMR_CFG_HS2LP_TIME_MASK)
++ << DSI_PHY_TMR_CFG_HS2LP_TIME_SHIFT);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TMR_CFG, val);
++
++ val = (((lcd_config->data_lane_num - 1) &
++ DSI_PHY_IF_CFG_N_LANES_MASK)
++ << DSI_PHY_IF_CFG_N_LANES_SHIFT);
++ val |= ((PHY_STOP_WAIT_TIME & DSI_PHY_IF_CFG_WAIT_TIME_MASK)
++ << DSI_PHY_IF_CFG_WAIT_TIME_SHIFT);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_IF_CFG, val);
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_ST0, &val);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_ST1, &val);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_ERROR_MSK0, 0);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_ERROR_MSK1, 0);
++
++ mipi_dsi_dphy_init(mipi_dsi, DSI_PHY_CLK_INIT_COMMAND,
++ mipi_dsi->dphy_pll_config);
++ } else {
++ mipi_dsi_dphy_init(mipi_dsi, DSI_PHY_CLK_INIT_COMMAND,
++ mipi_dsi->dphy_pll_config);
++ }
++}
++
++static void mipi_dsi_disable_controller(struct mipi_dsi_info *mipi_dsi)
++{
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_IF_CTRL,
++ DSI_PHY_IF_CTRL_RESET);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP, DSI_PWRUP_RESET);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_RSTZ, DSI_PHY_RSTZ_RST);
++}
++
++static irqreturn_t mipi_dsi_irq_handler(int irq, void *data)
++{
++ u32 mask0;
++ u32 mask1;
++ u32 status0;
++ u32 status1;
++ struct mipi_dsi_info *mipi_dsi;
++
++ mipi_dsi = (struct mipi_dsi_info *)data;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_ST0, &status0);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_ST1, &status1);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_MSK0, &mask0);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_MSK1, &mask1);
++
++ if ((status0 & (~mask0)) || (status1 & (~mask1))) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "mipi_dsi IRQ status0:0x%x, status1:0x%x!\n",
++ status0, status1);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static inline void mipi_dsi_set_mode(struct mipi_dsi_info *mipi_dsi,
++ bool cmd_mode)
++{
++ u32 val;
++
++ if (cmd_mode) {
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_RESET);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG, &val);
++ val |= MIPI_DSI_CMD_MODE_CFG_EN_CMD_MODE;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG, val);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_VID_MODE_CFG, 0);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_POWERUP);
++ } else {
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_RESET);
++ /* Disable Command mode when tranfering video data */
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG, &val);
++ val &= ~MIPI_DSI_CMD_MODE_CFG_EN_CMD_MODE;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG, val);
++ val = DSI_VID_MODE_CFG_EN | DSI_VID_MODE_CFG_EN_BURSTMODE |
++ DSI_VID_MODE_CFG_EN_LP_MODE;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_VID_MODE_CFG, val);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_POWERUP);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_IF_CTRL,
++ DSI_PHY_IF_CTRL_TX_REQ_CLK_HS);
++ }
++}
++
++static int mipi_dsi_power_on(struct mxc_dispdrv_handle *disp)
++{
++ int err;
++ struct mipi_dsi_info *mipi_dsi = mxc_dispdrv_getdata(disp);
++
++ if (!mipi_dsi->dsi_power_on) {
++ clk_prepare_enable(mipi_dsi->dphy_clk);
++ clk_prepare_enable(mipi_dsi->cfg_clk);
++ mipi_dsi_enable_controller(mipi_dsi, false);
++ mipi_dsi_set_mode(mipi_dsi, false);
++ /* host send pclk/hsync/vsync for two frames before sleep-out */
++ msleep((1000/mipi_dsi->mode->refresh + 1) << 1);
++ mipi_dsi_set_mode(mipi_dsi, true);
++ err = mipi_dsi_dcs_cmd(mipi_dsi, MIPI_DCS_EXIT_SLEEP_MODE,
++ NULL, 0);
++ if (err) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "MIPI DSI DCS Command sleep-in error!\n");
++ }
++ msleep(MIPI_LCD_SLEEP_MODE_DELAY);
++ mipi_dsi_set_mode(mipi_dsi, false);
++ mipi_dsi->dsi_power_on = 1;
++ }
++
++ return 0;
++}
++
++void mipi_dsi_power_off(struct mxc_dispdrv_handle *disp)
++{
++ int err;
++ struct mipi_dsi_info *mipi_dsi = mxc_dispdrv_getdata(disp);
++
++ if (mipi_dsi->dsi_power_on) {
++ mipi_dsi_set_mode(mipi_dsi, true);
++ err = mipi_dsi_dcs_cmd(mipi_dsi, MIPI_DCS_ENTER_SLEEP_MODE,
++ NULL, 0);
++ if (err) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "MIPI DSI DCS Command display on error!\n");
++ }
++ /* To allow time for the supply voltages
++ * and clock circuits to stabilize.
++ */
++ msleep(5);
++ /* video stream timing on */
++ mipi_dsi_set_mode(mipi_dsi, false);
++ msleep(MIPI_LCD_SLEEP_MODE_DELAY);
++
++ mipi_dsi_set_mode(mipi_dsi, true);
++ mipi_dsi_disable_controller(mipi_dsi);
++ mipi_dsi->dsi_power_on = 0;
++ clk_disable_unprepare(mipi_dsi->dphy_clk);
++ clk_disable_unprepare(mipi_dsi->cfg_clk);
++ }
++}
++
++static int mipi_dsi_lcd_init(struct mipi_dsi_info *mipi_dsi,
++ struct mxc_dispdrv_setting *setting)
++{
++ int err;
++ int size;
++ int i;
++ struct fb_videomode *mipi_lcd_modedb;
++ struct fb_videomode mode;
++ struct device *dev = &mipi_dsi->pdev->dev;
++
++ for (i = 0; i < ARRAY_SIZE(mipi_dsi_lcd_db); i++) {
++ if (!strcmp(mipi_dsi->lcd_panel,
++ mipi_dsi_lcd_db[i].lcd_panel)) {
++ mipi_dsi->lcd_callback =
++ &mipi_dsi_lcd_db[i].lcd_callback;
++ break;
++ }
++ }
++ if (i == ARRAY_SIZE(mipi_dsi_lcd_db)) {
++ dev_err(dev, "failed to find supported lcd panel.\n");
++ return -EINVAL;
++ }
++ /* get the videomode in the order: cmdline->platform data->driver */
++ mipi_dsi->lcd_callback->get_mipi_lcd_videomode(&mipi_lcd_modedb, &size,
++ &mipi_dsi->lcd_config);
++ err = fb_find_mode(&setting->fbi->var, setting->fbi,
++ setting->dft_mode_str,
++ mipi_lcd_modedb, size, NULL,
++ setting->default_bpp);
++ if (err != 1)
++ fb_videomode_to_var(&setting->fbi->var, mipi_lcd_modedb);
++
++ INIT_LIST_HEAD(&setting->fbi->modelist);
++ for (i = 0; i < size; i++) {
++ fb_var_to_videomode(&mode, &setting->fbi->var);
++ if (fb_mode_is_equal(&mode, mipi_lcd_modedb + i)) {
++ err = fb_add_videomode(mipi_lcd_modedb + i,
++ &setting->fbi->modelist);
++ /* Note: only support fb mode from driver */
++ mipi_dsi->mode = mipi_lcd_modedb + i;
++ break;
++ }
++ }
++ if ((err < 0) || (size == i)) {
++ dev_err(dev, "failed to add videomode.\n");
++ return err;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(mipi_dsi_phy_pll_clk_table); i++) {
++ if (mipi_dsi_phy_pll_clk_table[i].max_phy_clk <
++ mipi_dsi->lcd_config->max_phy_clk)
++ break;
++ }
++ if ((i == ARRAY_SIZE(mipi_dsi_phy_pll_clk_table)) ||
++ (mipi_dsi->lcd_config->max_phy_clk >
++ mipi_dsi_phy_pll_clk_table[0].max_phy_clk)) {
++ dev_err(dev, "failed to find data in"
++ "mipi_dsi_phy_pll_clk_table.\n");
++ return -EINVAL;
++ }
++ mipi_dsi->dphy_pll_config = mipi_dsi_phy_pll_clk_table[--i].config;
++ dev_dbg(dev, "dphy_pll_config:0x%x.\n", mipi_dsi->dphy_pll_config);
++
++ return 0;
++}
++
++int mipi_dsi_enable(struct mxc_dispdrv_handle *disp)
++{
++ int err;
++ struct mipi_dsi_info *mipi_dsi = mxc_dispdrv_getdata(disp);
++
++ if (!mipi_dsi->lcd_inited) {
++ err = clk_prepare_enable(mipi_dsi->dphy_clk);
++ err |= clk_prepare_enable(mipi_dsi->cfg_clk);
++ if (err)
++ dev_err(&mipi_dsi->pdev->dev,
++ "clk enable error:%d!\n", err);
++ mipi_dsi_enable_controller(mipi_dsi, true);
++ err = mipi_dsi->lcd_callback->mipi_lcd_setup(
++ mipi_dsi);
++ if (err < 0) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "failed to init mipi lcd.");
++ clk_disable_unprepare(mipi_dsi->dphy_clk);
++ clk_disable_unprepare(mipi_dsi->cfg_clk);
++ return err;
++ }
++ mipi_dsi_set_mode(mipi_dsi, false);
++ mipi_dsi->dsi_power_on = 1;
++ mipi_dsi->lcd_inited = 1;
++ }
++ mipi_dsi_power_on(mipi_dsi->disp_mipi);
++
++ return 0;
++}
++
++static int mipi_dsi_disp_init(struct mxc_dispdrv_handle *disp,
++ struct mxc_dispdrv_setting *setting)
++{
++ struct mipi_dsi_info *mipi_dsi = mxc_dispdrv_getdata(disp);
++ struct device *dev = &mipi_dsi->pdev->dev;
++ int ret = 0;
++
++ if (!valid_mode(setting->if_fmt)) {
++ dev_warn(dev, "Input pixel format not valid"
++ "use default RGB24\n");
++ setting->if_fmt = IPU_PIX_FMT_RGB24;
++ }
++
++ setting->dev_id = mipi_dsi->dev_id;
++ setting->disp_id = mipi_dsi->disp_id;
++
++ ret = mipi_dsi_lcd_init(mipi_dsi, setting);
++ if (ret) {
++ dev_err(dev, "failed to init mipi dsi lcd\n");
++ return ret;
++ }
++
++ dev_dbg(dev, "MIPI DSI dispdrv inited!\n");
++ return ret;
++}
++
++static void mipi_dsi_disp_deinit(struct mxc_dispdrv_handle *disp)
++{
++ struct mipi_dsi_info *mipi_dsi;
++
++ mipi_dsi = mxc_dispdrv_getdata(disp);
++
++ mipi_dsi_power_off(mipi_dsi->disp_mipi);
++ if (mipi_dsi->bl)
++ backlight_device_unregister(mipi_dsi->bl);
++}
++
++static struct mxc_dispdrv_driver mipi_dsi_drv = {
++ .name = DISPDRV_MIPI,
++ .init = mipi_dsi_disp_init,
++ .deinit = mipi_dsi_disp_deinit,
++ .enable = mipi_dsi_enable,
++ .disable = mipi_dsi_power_off,
++};
++
++static int imx6q_mipi_dsi_get_mux(int dev_id, int disp_id)
++{
++ if (dev_id > 1 || disp_id > 1)
++ return -EINVAL;
++
++ return (dev_id << 5) | (disp_id << 4);
++}
++
++static struct mipi_dsi_bus_mux imx6q_mipi_dsi_mux[] = {
++ {
++ .reg = IOMUXC_GPR3,
++ .mask = IMX6Q_GPR3_MIPI_MUX_CTL_MASK,
++ .get_mux = imx6q_mipi_dsi_get_mux,
++ },
++};
++
++static int imx6dl_mipi_dsi_get_mux(int dev_id, int disp_id)
++{
++ if (dev_id > 1 || disp_id > 1)
++ return -EINVAL;
++
++ /* MIPI DSI source is LCDIF */
++ if (dev_id)
++ disp_id = 0;
++
++ return (dev_id << 5) | (disp_id << 4);
++}
++
++static struct mipi_dsi_bus_mux imx6dl_mipi_dsi_mux[] = {
++ {
++ .reg = IOMUXC_GPR3,
++ .mask = IMX6Q_GPR3_MIPI_MUX_CTL_MASK,
++ .get_mux = imx6dl_mipi_dsi_get_mux,
++ },
++};
++
++static const struct of_device_id imx_mipi_dsi_dt_ids[] = {
++ { .compatible = "fsl,imx6q-mipi-dsi", .data = imx6q_mipi_dsi_mux, },
++ { .compatible = "fsl,imx6dl-mipi-dsi", .data = imx6dl_mipi_dsi_mux, },
++ { }
++};
++MODULE_DEVICE_TABLE(of, imx_mipi_dsi_dt_ids);
++
++/**
++ * This function is called by the driver framework to initialize the MIPI DSI
++ * device.
++ *
++ * @param pdev The device structure for the MIPI DSI passed in by the
++ * driver framework.
++ *
++ * @return Returns 0 on success or negative error code on error
++ */
++static int mipi_dsi_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ const struct of_device_id *of_id =
++ of_match_device(of_match_ptr(imx_mipi_dsi_dt_ids),
++ &pdev->dev);
++ struct mipi_dsi_info *mipi_dsi;
++ struct resource *res;
++ u32 dev_id, disp_id;
++ const char *lcd_panel;
++ unsigned int mux;
++ int ret = 0;
++
++ mipi_dsi = devm_kzalloc(&pdev->dev, sizeof(*mipi_dsi), GFP_KERNEL);
++ if (!mipi_dsi)
++ return -ENOMEM;
++
++ ret = of_property_read_string(np, "lcd_panel", &lcd_panel);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to read of property lcd_panel\n");
++ return ret;
++ }
++
++ ret = of_property_read_u32(np, "dev_id", &dev_id);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to read of property dev_id\n");
++ return ret;
++ }
++ ret = of_property_read_u32(np, "disp_id", &disp_id);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to read of property disp_id\n");
++ return ret;
++ }
++ mipi_dsi->dev_id = dev_id;
++ mipi_dsi->disp_id = disp_id;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "failed to get platform resource 0\n");
++ return -ENODEV;
++ }
++
++ if (!devm_request_mem_region(&pdev->dev, res->start,
++ resource_size(res), pdev->name))
++ return -EBUSY;
++
++ mipi_dsi->mmio_base = devm_ioremap(&pdev->dev, res->start,
++ resource_size(res));
++ if (!mipi_dsi->mmio_base)
++ return -EBUSY;
++
++ mipi_dsi->irq = platform_get_irq(pdev, 0);
++ if (mipi_dsi->irq < 0) {
++ dev_err(&pdev->dev, "failed get device irq\n");
++ return -ENODEV;
++ }
++
++ ret = devm_request_irq(&pdev->dev, mipi_dsi->irq,
++ mipi_dsi_irq_handler,
++ 0, "mipi_dsi", mipi_dsi);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to request irq\n");
++ return ret;
++ }
++
++ mipi_dsi->dphy_clk = devm_clk_get(&pdev->dev, "mipi_pllref_clk");
++ if (IS_ERR(mipi_dsi->dphy_clk)) {
++ dev_err(&pdev->dev, "failed to get dphy pll_ref_clk\n");
++ return PTR_ERR(mipi_dsi->dphy_clk);
++ }
++
++ mipi_dsi->cfg_clk = devm_clk_get(&pdev->dev, "mipi_cfg_clk");
++ if (IS_ERR(mipi_dsi->cfg_clk)) {
++ dev_err(&pdev->dev, "failed to get cfg_clk\n");
++ return PTR_ERR(mipi_dsi->cfg_clk);
++ }
++
++ mipi_dsi->disp_power_on = devm_regulator_get(&pdev->dev,
++ "disp-power-on");
++ if (!IS_ERR(mipi_dsi->disp_power_on)) {
++ ret = regulator_enable(mipi_dsi->disp_power_on);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to enable display "
++ "power regulator, err=%d\n", ret);
++ return ret;
++ }
++ } else {
++ mipi_dsi->disp_power_on = NULL;
++ }
++
++ ret = device_reset(&pdev->dev);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to reset: %d\n", ret);
++ goto dev_reset_fail;
++ }
++
++ if (of_id)
++ mipi_dsi->bus_mux = of_id->data;
++
++ mipi_dsi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
++ if (IS_ERR(mipi_dsi->regmap)) {
++ dev_err(&pdev->dev, "failed to get parent regmap\n");
++ ret = PTR_ERR(mipi_dsi->regmap);
++ goto get_parent_regmap_fail;
++ }
++
++ mux = mipi_dsi->bus_mux->get_mux(dev_id, disp_id);
++ if (mux >= 0)
++ regmap_update_bits(mipi_dsi->regmap, mipi_dsi->bus_mux->reg,
++ mipi_dsi->bus_mux->mask, mux);
++ else
++ dev_warn(&pdev->dev, "invalid dev_id or disp_id muxing\n");
++
++ mipi_dsi->lcd_panel = kstrdup(lcd_panel, GFP_KERNEL);
++ if (!mipi_dsi->lcd_panel) {
++ dev_err(&pdev->dev, "failed to allocate lcd panel name\n");
++ ret = -ENOMEM;
++ goto kstrdup_fail;
++ }
++
++ mipi_dsi->pdev = pdev;
++ mipi_dsi->disp_mipi = mxc_dispdrv_register(&mipi_dsi_drv);
++ if (IS_ERR(mipi_dsi->disp_mipi)) {
++ dev_err(&pdev->dev, "mxc_dispdrv_register error\n");
++ ret = PTR_ERR(mipi_dsi->disp_mipi);
++ goto dispdrv_reg_fail;
++ }
++
++ mxc_dispdrv_setdata(mipi_dsi->disp_mipi, mipi_dsi);
++ dev_set_drvdata(&pdev->dev, mipi_dsi);
++
++ dev_info(&pdev->dev, "i.MX MIPI DSI driver probed\n");
++ return ret;
++
++dispdrv_reg_fail:
++ kfree(mipi_dsi->lcd_panel);
++kstrdup_fail:
++get_parent_regmap_fail:
++dev_reset_fail:
++ if (mipi_dsi->disp_power_on)
++ regulator_disable(mipi_dsi->disp_power_on);
++ return ret;
++}
++
++static void mipi_dsi_shutdown(struct platform_device *pdev)
++{
++ struct mipi_dsi_info *mipi_dsi = dev_get_drvdata(&pdev->dev);
++
++ mipi_dsi_power_off(mipi_dsi->disp_mipi);
++}
++
++static int mipi_dsi_remove(struct platform_device *pdev)
++{
++ struct mipi_dsi_info *mipi_dsi = dev_get_drvdata(&pdev->dev);
++
++ mxc_dispdrv_puthandle(mipi_dsi->disp_mipi);
++ mxc_dispdrv_unregister(mipi_dsi->disp_mipi);
++
++ if (mipi_dsi->disp_power_on)
++ regulator_disable(mipi_dsi->disp_power_on);
++
++ kfree(mipi_dsi->lcd_panel);
++ dev_set_drvdata(&pdev->dev, NULL);
++
++ return 0;
++}
++
++static struct platform_driver mipi_dsi_driver = {
++ .driver = {
++ .of_match_table = imx_mipi_dsi_dt_ids,
++ .name = "mxc_mipi_dsi",
++ },
++ .probe = mipi_dsi_probe,
++ .remove = mipi_dsi_remove,
++ .shutdown = mipi_dsi_shutdown,
++};
++
++static int __init mipi_dsi_init(void)
++{
++ int err;
++
++ err = platform_driver_register(&mipi_dsi_driver);
++ if (err) {
++ pr_err("mipi_dsi_driver register failed\n");
++ return -ENODEV;
++ }
++ pr_info("MIPI DSI driver module loaded\n");
++ return 0;
++}
++
++static void __exit mipi_dsi_cleanup(void)
++{
++ platform_driver_unregister(&mipi_dsi_driver);
++}
++
++module_init(mipi_dsi_init);
++module_exit(mipi_dsi_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX MIPI DSI driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/video/mxc/mipi_dsi.h linux-3.14.40/drivers/video/mxc/mipi_dsi.h
+--- linux-3.14.40.orig/drivers/video/mxc/mipi_dsi.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/mxc/mipi_dsi.h 2015-05-01 14:58:05.731427001 -0500
+@@ -0,0 +1,112 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __MIPI_DSI_H__
++#define __MIPI_DSI_H__
++
++#include <linux/regmap.h>
++
++#ifdef DEBUG
++#define mipi_dbg(fmt, ...) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
++#else
++#define mipi_dbg(fmt, ...)
++#endif
++
++#define DSI_CMD_BUF_MAXSIZE (32)
++
++/* DPI interface pixel color coding map */
++enum mipi_dsi_dpi_fmt {
++ MIPI_RGB565_PACKED = 0,
++ MIPI_RGB565_LOOSELY,
++ MIPI_RGB565_CONFIG3,
++ MIPI_RGB666_PACKED,
++ MIPI_RGB666_LOOSELY,
++ MIPI_RGB888,
++};
++
++struct mipi_lcd_config {
++ u32 virtual_ch;
++ u32 data_lane_num;
++ /* device max DPHY clock in MHz unit */
++ u32 max_phy_clk;
++ enum mipi_dsi_dpi_fmt dpi_fmt;
++};
++
++struct mipi_dsi_info;
++struct mipi_dsi_lcd_callback {
++ /* callback for lcd panel operation */
++ void (*get_mipi_lcd_videomode)(struct fb_videomode **, int *,
++ struct mipi_lcd_config **);
++ int (*mipi_lcd_setup)(struct mipi_dsi_info *);
++
++};
++
++struct mipi_dsi_match_lcd {
++ char *lcd_panel;
++ struct mipi_dsi_lcd_callback lcd_callback;
++};
++
++struct mipi_dsi_bus_mux {
++ int reg;
++ int mask;
++ int (*get_mux) (int dev_id, int disp_id);
++};
++
++/* driver private data */
++struct mipi_dsi_info {
++ struct platform_device *pdev;
++ void __iomem *mmio_base;
++ struct regmap *regmap;
++ const struct mipi_dsi_bus_mux *bus_mux;
++ int dsi_power_on;
++ int lcd_inited;
++ u32 dphy_pll_config;
++ int dev_id;
++ int disp_id;
++ char *lcd_panel;
++ int irq;
++ struct clk *dphy_clk;
++ struct clk *cfg_clk;
++ struct mxc_dispdrv_handle *disp_mipi;
++ struct fb_videomode *mode;
++ struct regulator *disp_power_on;
++ struct mipi_lcd_config *lcd_config;
++ /* board related power control */
++ struct backlight_device *bl;
++ /* callback for lcd panel operation */
++ struct mipi_dsi_lcd_callback *lcd_callback;
++};
++
++int mipi_dsi_pkt_write(struct mipi_dsi_info *mipi,
++ u8 data_type, const u32 *buf, int len);
++int mipi_dsi_pkt_read(struct mipi_dsi_info *mipi,
++ u8 data_type, u32 *buf, int len);
++int mipi_dsi_dcs_cmd(struct mipi_dsi_info *mipi,
++ u8 cmd, const u32 *param, int num);
++
++#ifdef CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL
++void mipid_hx8369_get_lcd_videomode(struct fb_videomode **mode, int *size,
++ struct mipi_lcd_config **data);
++int mipid_hx8369_lcd_setup(struct mipi_dsi_info *);
++#endif
++
++#ifndef CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL
++#error "Please configure MIPI LCD panel, we cannot find one!"
++#endif
++
++#endif
+diff -Nur linux-3.14.40.orig/drivers/video/mxc/mxc_dispdrv.c linux-3.14.40/drivers/video/mxc/mxc_dispdrv.c
+--- linux-3.14.40.orig/drivers/video/mxc/mxc_dispdrv.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/mxc/mxc_dispdrv.c 2015-05-01 14:58:05.731427001 -0500
+@@ -0,0 +1,150 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file mxc_dispdrv.c
++ * @brief mxc display driver framework.
++ *
++ * A display device driver could call mxc_dispdrv_register(drv) in its dev_probe() function.
++ * Move all dev_probe() things into mxc_dispdrv_driver->init(), init() function should init
++ * and feedback setting;
++ * Necessary deferred operations can be done in mxc_dispdrv_driver->post_init(),
++ * after dev_id and disp_id pass usage check;
++ * Move all dev_remove() things into mxc_dispdrv_driver->deinit();
++ * Move all dev_suspend() things into fb_notifier for SUSPEND, if there is;
++ * Move all dev_resume() things into fb_notifier for RESUME, if there is;
++ *
++ * ipuv3 fb driver could call mxc_dispdrv_gethandle(name, setting) before a fb
++ * need be added, with fbi param passing by setting, after
++ * mxc_dispdrv_gethandle() return, FB driver should get the basic setting
++ * about fbi info and ipuv3-hw (ipu_id and disp_id).
++ *
++ * @ingroup Framebuffer
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/slab.h>
++#include <linux/err.h>
++#include <linux/string.h>
++#include "mxc_dispdrv.h"
++
++static LIST_HEAD(dispdrv_list);
++static DEFINE_MUTEX(dispdrv_lock);
++
++struct mxc_dispdrv_entry {
++ /* Note: drv always the first element */
++ struct mxc_dispdrv_driver *drv;
++ bool active;
++ void *priv;
++ struct list_head list;
++};
++
++struct mxc_dispdrv_handle *mxc_dispdrv_register(struct mxc_dispdrv_driver *drv)
++{
++ struct mxc_dispdrv_entry *new;
++
++ mutex_lock(&dispdrv_lock);
++
++ new = kzalloc(sizeof(struct mxc_dispdrv_entry), GFP_KERNEL);
++ if (!new) {
++ mutex_unlock(&dispdrv_lock);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ new->drv = drv;
++ list_add_tail(&new->list, &dispdrv_list);
++
++ mutex_unlock(&dispdrv_lock);
++
++ return (struct mxc_dispdrv_handle *)new;
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_register);
++
++int mxc_dispdrv_unregister(struct mxc_dispdrv_handle *handle)
++{
++ struct mxc_dispdrv_entry *entry = (struct mxc_dispdrv_entry *)handle;
++
++ if (entry) {
++ mutex_lock(&dispdrv_lock);
++ list_del(&entry->list);
++ mutex_unlock(&dispdrv_lock);
++ kfree(entry);
++ return 0;
++ } else
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_unregister);
++
++struct mxc_dispdrv_handle *mxc_dispdrv_gethandle(char *name,
++ struct mxc_dispdrv_setting *setting)
++{
++ int ret, found = 0;
++ struct mxc_dispdrv_entry *entry;
++
++ mutex_lock(&dispdrv_lock);
++ list_for_each_entry(entry, &dispdrv_list, list) {
++ if (!strcmp(entry->drv->name, name) && (entry->drv->init)) {
++ ret = entry->drv->init((struct mxc_dispdrv_handle *)
++ entry, setting);
++ if (ret >= 0) {
++ entry->active = true;
++ found = 1;
++ break;
++ }
++ }
++ }
++ mutex_unlock(&dispdrv_lock);
++
++ return found ? (struct mxc_dispdrv_handle *)entry:ERR_PTR(-ENODEV);
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_gethandle);
++
++void mxc_dispdrv_puthandle(struct mxc_dispdrv_handle *handle)
++{
++ struct mxc_dispdrv_entry *entry = (struct mxc_dispdrv_entry *)handle;
++
++ mutex_lock(&dispdrv_lock);
++ if (entry && entry->active && entry->drv->deinit) {
++ entry->drv->deinit(handle);
++ entry->active = false;
++ }
++ mutex_unlock(&dispdrv_lock);
++
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_puthandle);
++
++int mxc_dispdrv_setdata(struct mxc_dispdrv_handle *handle, void *data)
++{
++ struct mxc_dispdrv_entry *entry = (struct mxc_dispdrv_entry *)handle;
++
++ if (entry) {
++ entry->priv = data;
++ return 0;
++ } else
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_setdata);
++
++void *mxc_dispdrv_getdata(struct mxc_dispdrv_handle *handle)
++{
++ struct mxc_dispdrv_entry *entry = (struct mxc_dispdrv_entry *)handle;
++
++ if (entry) {
++ return entry->priv;
++ } else
++ return ERR_PTR(-EINVAL);
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_getdata);
+diff -Nur linux-3.14.40.orig/drivers/video/mxc/mxc_dispdrv.h linux-3.14.40/drivers/video/mxc/mxc_dispdrv.h
+--- linux-3.14.40.orig/drivers/video/mxc/mxc_dispdrv.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/mxc/mxc_dispdrv.h 2015-05-01 14:58:05.731427001 -0500
+@@ -0,0 +1,54 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#ifndef __MXC_DISPDRV_H__
++#define __MXC_DISPDRV_H__
++#include <linux/fb.h>
++
++struct mxc_dispdrv_handle {
++ struct mxc_dispdrv_driver *drv;
++};
++
++struct mxc_dispdrv_setting {
++ /*input-feedback parameter*/
++ struct fb_info *fbi;
++ int if_fmt;
++ int default_bpp;
++ char *dft_mode_str;
++
++ /*feedback parameter*/
++ int dev_id;
++ int disp_id;
++};
++
++struct mxc_dispdrv_driver {
++ const char *name;
++ int (*init) (struct mxc_dispdrv_handle *, struct mxc_dispdrv_setting *);
++ /* deferred operations after dev_id and disp_id pass usage check */
++ int (*post_init) (struct mxc_dispdrv_handle *, int dev_id, int disp_id);
++ void (*deinit) (struct mxc_dispdrv_handle *);
++ /* display driver enable function for extension */
++ int (*enable) (struct mxc_dispdrv_handle *);
++ /* display driver disable function, called at early part of fb_blank */
++ void (*disable) (struct mxc_dispdrv_handle *);
++ /* display driver setup function, called at early part of fb_set_par */
++ int (*setup) (struct mxc_dispdrv_handle *, struct fb_info *fbi);
++};
++
++struct mxc_dispdrv_handle *mxc_dispdrv_register(struct mxc_dispdrv_driver *drv);
++int mxc_dispdrv_unregister(struct mxc_dispdrv_handle *handle);
++struct mxc_dispdrv_handle *mxc_dispdrv_gethandle(char *name,
++ struct mxc_dispdrv_setting *setting);
++void mxc_dispdrv_puthandle(struct mxc_dispdrv_handle *handle);
++int mxc_dispdrv_setdata(struct mxc_dispdrv_handle *handle, void *data);
++void *mxc_dispdrv_getdata(struct mxc_dispdrv_handle *handle);
++#endif
+diff -Nur linux-3.14.40.orig/drivers/video/mxc/mxc_edid.c linux-3.14.40/drivers/video/mxc/mxc_edid.c
+--- linux-3.14.40.orig/drivers/video/mxc/mxc_edid.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/mxc/mxc_edid.c 2015-05-01 14:58:05.735427001 -0500
+@@ -0,0 +1,762 @@
++/*
++ * Copyright 2009-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @defgroup Framebuffer Framebuffer Driver for SDC and ADC.
++ */
++
++/*!
++ * @file mxc_edid.c
++ *
++ * @brief MXC EDID driver
++ *
++ * @ingroup Framebuffer
++ */
++
++/*!
++ * Include files
++ */
++#include <linux/i2c.h>
++#include <linux/fb.h>
++#include <video/mxc_edid.h>
++#include "../edid.h"
++
++#undef DEBUG /* define this for verbose EDID parsing output */
++#ifdef DEBUG
++#define DPRINTK(fmt, args...) printk(fmt, ## args)
++#else
++#define DPRINTK(fmt, args...)
++#endif
++
++const struct fb_videomode mxc_cea_mode[64] = {
++ /* #1: 640x480p@59.94/60Hz 4:3 */
++ [1] = {
++ NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #2: 720x480p@59.94/60Hz 4:3 */
++ [2] = {
++ NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #3: 720x480p@59.94/60Hz 16:9 */
++ [3] = {
++ NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #4: 1280x720p@59.94/60Hz 16:9 */
++ [4] = {
++ NULL, 60, 1280, 720, 13468, 220, 110, 20, 5, 40, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0
++ },
++ /* #5: 1920x1080i@59.94/60Hz 16:9 */
++ [5] = {
++ NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #6: 720(1440)x480iH@59.94/60Hz 4:3 */
++ [6] = {
++ NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #7: 720(1440)x480iH@59.94/60Hz 16:9 */
++ [7] = {
++ NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #8: 720(1440)x240pH@59.94/60Hz 4:3 */
++ [8] = {
++ NULL, 60, 1440, 240, 37108, 114, 38, 15, 4, 124, 3, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #9: 720(1440)x240pH@59.94/60Hz 16:9 */
++ [9] = {
++ NULL, 60, 1440, 240, 37108, 114, 38, 15, 4, 124, 3, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #14: 1440x480p@59.94/60Hz 4:3 */
++ [14] = {
++ NULL, 60, 1440, 480, 18500, 120, 32, 30, 9, 124, 6, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #15: 1440x480p@59.94/60Hz 16:9 */
++ [15] = {
++ NULL, 60, 1440, 480, 18500, 120, 32, 30, 9, 124, 6, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #16: 1920x1080p@60Hz 16:9 */
++ [16] = {
++ NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #17: 720x576pH@50Hz 4:3 */
++ [17] = {
++ NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #18: 720x576pH@50Hz 16:9 */
++ [18] = {
++ NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #19: 1280x720p@50Hz */
++ [19] = {
++ NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #20: 1920x1080i@50Hz */
++ [20] = {
++ NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #23: 720(1440)x288pH@50Hz 4:3 */
++ [23] = {
++ NULL, 50, 1440, 288, 37037, 138, 24, 19, 2, 126, 3, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #24: 720(1440)x288pH@50Hz 16:9 */
++ [24] = {
++ NULL, 50, 1440, 288, 37037, 138, 24, 19, 2, 126, 3, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #29: 720(1440)x576pH@50Hz 4:3 */
++ [29] = {
++ NULL, 50, 1440, 576, 18518, 136, 24, 39, 5, 128, 5, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #30: 720(1440)x576pH@50Hz 16:9 */
++ [30] = {
++ NULL, 50, 1440, 576, 18518, 136, 24, 39, 5, 128, 5, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #31: 1920x1080p@50Hz */
++ [31] = {
++ NULL, 50, 1920, 1080, 6734, 148, 528, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #32: 1920x1080p@23.98/24Hz */
++ [32] = {
++ NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #33: 1920x1080p@25Hz */
++ [33] = {
++ NULL, 25, 1920, 1080, 13468, 148, 528, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #34: 1920x1080p@30Hz */
++ [34] = {
++ NULL, 30, 1920, 1080, 13468, 148, 88, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #41: 1280x720p@100Hz 16:9 */
++ [41] = {
++ NULL, 100, 1280, 720, 6734, 220, 440, 20, 5, 40, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0
++ },
++ /* #47: 1280x720p@119.88/120Hz 16:9 */
++ [47] = {
++ NULL, 120, 1280, 720, 6734, 220, 110, 20, 5, 40, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0
++ },
++};
++
++/*
++ * We have a special version of fb_mode_is_equal that ignores
++ * pixclock, since for many CEA modes, 2 frequencies are supported
++ * e.g. 640x480 @ 60Hz or 59.94Hz
++ */
++int mxc_edid_fb_mode_is_equal(bool use_aspect,
++ const struct fb_videomode *mode1,
++ const struct fb_videomode *mode2)
++{
++ u32 mask;
++
++ if (use_aspect)
++ mask = ~0;
++ else
++ mask = ~FB_VMODE_ASPECT_MASK;
++
++ return (mode1->xres == mode2->xres &&
++ mode1->yres == mode2->yres &&
++ mode1->hsync_len == mode2->hsync_len &&
++ mode1->vsync_len == mode2->vsync_len &&
++ mode1->left_margin == mode2->left_margin &&
++ mode1->right_margin == mode2->right_margin &&
++ mode1->upper_margin == mode2->upper_margin &&
++ mode1->lower_margin == mode2->lower_margin &&
++ mode1->sync == mode2->sync &&
++ /* refresh check, 59.94Hz and 60Hz have the same parameter
++ * in struct of mxc_cea_mode */
++ abs(mode1->refresh - mode2->refresh) <= 1 &&
++ (mode1->vmode & mask) == (mode2->vmode & mask));
++}
++
++static void get_detailed_timing(unsigned char *block,
++ struct fb_videomode *mode)
++{
++ mode->xres = H_ACTIVE;
++ mode->yres = V_ACTIVE;
++ mode->pixclock = PIXEL_CLOCK;
++ mode->pixclock /= 1000;
++ mode->pixclock = KHZ2PICOS(mode->pixclock);
++ mode->right_margin = H_SYNC_OFFSET;
++ mode->left_margin = (H_ACTIVE + H_BLANKING) -
++ (H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH);
++ mode->upper_margin = V_BLANKING - V_SYNC_OFFSET -
++ V_SYNC_WIDTH;
++ mode->lower_margin = V_SYNC_OFFSET;
++ mode->hsync_len = H_SYNC_WIDTH;
++ mode->vsync_len = V_SYNC_WIDTH;
++ if (HSYNC_POSITIVE)
++ mode->sync |= FB_SYNC_HOR_HIGH_ACT;
++ if (VSYNC_POSITIVE)
++ mode->sync |= FB_SYNC_VERT_HIGH_ACT;
++ mode->refresh = PIXEL_CLOCK/((H_ACTIVE + H_BLANKING) *
++ (V_ACTIVE + V_BLANKING));
++ if (INTERLACED) {
++ mode->yres *= 2;
++ mode->upper_margin *= 2;
++ mode->lower_margin *= 2;
++ mode->vsync_len *= 2;
++ mode->vmode |= FB_VMODE_INTERLACED;
++ }
++ mode->flag = FB_MODE_IS_DETAILED;
++
++ if ((H_SIZE / 16) == (V_SIZE / 9))
++ mode->vmode |= FB_VMODE_ASPECT_16_9;
++ else if ((H_SIZE / 4) == (V_SIZE / 3))
++ mode->vmode |= FB_VMODE_ASPECT_4_3;
++ else if ((mode->xres / 16) == (mode->yres / 9))
++ mode->vmode |= FB_VMODE_ASPECT_16_9;
++ else if ((mode->xres / 4) == (mode->yres / 3))
++ mode->vmode |= FB_VMODE_ASPECT_4_3;
++
++ if (mode->vmode & FB_VMODE_ASPECT_16_9)
++ DPRINTK("Aspect ratio: 16:9\n");
++ if (mode->vmode & FB_VMODE_ASPECT_4_3)
++ DPRINTK("Aspect ratio: 4:3\n");
++ DPRINTK(" %d MHz ", PIXEL_CLOCK/1000000);
++ DPRINTK("%d %d %d %d ", H_ACTIVE, H_ACTIVE + H_SYNC_OFFSET,
++ H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH, H_ACTIVE + H_BLANKING);
++ DPRINTK("%d %d %d %d ", V_ACTIVE, V_ACTIVE + V_SYNC_OFFSET,
++ V_ACTIVE + V_SYNC_OFFSET + V_SYNC_WIDTH, V_ACTIVE + V_BLANKING);
++ DPRINTK("%sHSync %sVSync\n\n", (HSYNC_POSITIVE) ? "+" : "-",
++ (VSYNC_POSITIVE) ? "+" : "-");
++}
++
++int mxc_edid_parse_ext_blk(unsigned char *edid,
++ struct mxc_edid_cfg *cfg,
++ struct fb_monspecs *specs)
++{
++ char detail_timing_desc_offset;
++ struct fb_videomode *mode, *m;
++ unsigned char index = 0x0;
++ unsigned char *block;
++ int i, num = 0, revision;
++
++ if (edid[index++] != 0x2) /* only support cea ext block now */
++ return -1;
++ revision = edid[index++];
++ DPRINTK("cea extent revision %d\n", revision);
++ mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
++ if (mode == NULL)
++ return -1;
++
++ detail_timing_desc_offset = edid[index++];
++
++ if (revision >= 2) {
++ cfg->cea_underscan = (edid[index] >> 7) & 0x1;
++ cfg->cea_basicaudio = (edid[index] >> 6) & 0x1;
++ cfg->cea_ycbcr444 = (edid[index] >> 5) & 0x1;
++ cfg->cea_ycbcr422 = (edid[index] >> 4) & 0x1;
++
++ DPRINTK("CEA underscan %d\n", cfg->cea_underscan);
++ DPRINTK("CEA basicaudio %d\n", cfg->cea_basicaudio);
++ DPRINTK("CEA ycbcr444 %d\n", cfg->cea_ycbcr444);
++ DPRINTK("CEA ycbcr422 %d\n", cfg->cea_ycbcr422);
++ }
++
++ if (revision >= 3) {
++ /* short desc */
++ DPRINTK("CEA Short desc timmings\n");
++ index++;
++ while (index < detail_timing_desc_offset) {
++ unsigned char tagcode, blklen;
++
++ tagcode = (edid[index] >> 5) & 0x7;
++ blklen = (edid[index]) & 0x1f;
++
++ DPRINTK("Tagcode %x Len %d\n", tagcode, blklen);
++
++ switch (tagcode) {
++ case 0x2: /*Video data block*/
++ {
++ int cea_idx;
++ i = 0;
++ while (i < blklen) {
++ index++;
++ cea_idx = edid[index] & 0x7f;
++ if (cea_idx < ARRAY_SIZE(mxc_cea_mode) &&
++ (mxc_cea_mode[cea_idx].xres)) {
++ DPRINTK("Support CEA Format #%d\n", cea_idx);
++ mode[num] = mxc_cea_mode[cea_idx];
++ mode[num].flag |= FB_MODE_IS_STANDARD;
++ num++;
++ }
++ i++;
++ }
++ break;
++ }
++ case 0x3: /*Vendor specific data*/
++ {
++ unsigned char IEEE_reg_iden[3];
++ unsigned char deep_color;
++ unsigned char latency_present;
++ unsigned char I_latency_present;
++ unsigned char hdmi_video_present;
++ unsigned char hdmi_3d_present;
++ unsigned char hdmi_3d_multi_present;
++ unsigned char hdmi_vic_len;
++ unsigned char hdmi_3d_len;
++ unsigned char index_inc = 0;
++ unsigned char vsd_end;
++
++ vsd_end = index + blklen;
++
++ IEEE_reg_iden[0] = edid[index+1];
++ IEEE_reg_iden[1] = edid[index+2];
++ IEEE_reg_iden[2] = edid[index+3];
++ cfg->physical_address[0] = (edid[index+4] & 0xf0) >> 4;
++ cfg->physical_address[1] = (edid[index+4] & 0x0f);
++ cfg->physical_address[2] = (edid[index+5] & 0xf0) >> 4;
++ cfg->physical_address[3] = (edid[index+5] & 0x0f);
++
++ if ((IEEE_reg_iden[0] == 0x03) &&
++ (IEEE_reg_iden[1] == 0x0c) &&
++ (IEEE_reg_iden[2] == 0x00))
++ cfg->hdmi_cap = 1;
++
++ if (blklen > 5) {
++ deep_color = edid[index+6];
++ if (deep_color & 0x80)
++ cfg->vsd_support_ai = true;
++ if (deep_color & 0x40)
++ cfg->vsd_dc_48bit = true;
++ if (deep_color & 0x20)
++ cfg->vsd_dc_36bit = true;
++ if (deep_color & 0x10)
++ cfg->vsd_dc_30bit = true;
++ if (deep_color & 0x08)
++ cfg->vsd_dc_y444 = true;
++ if (deep_color & 0x01)
++ cfg->vsd_dvi_dual = true;
++ }
++
++ DPRINTK("VSD hdmi capability %d\n", cfg->hdmi_cap);
++ DPRINTK("VSD support ai %d\n", cfg->vsd_support_ai);
++ DPRINTK("VSD support deep color 48bit %d\n", cfg->vsd_dc_48bit);
++ DPRINTK("VSD support deep color 36bit %d\n", cfg->vsd_dc_36bit);
++ DPRINTK("VSD support deep color 30bit %d\n", cfg->vsd_dc_30bit);
++ DPRINTK("VSD support deep color y444 %d\n", cfg->vsd_dc_y444);
++ DPRINTK("VSD support dvi dual %d\n", cfg->vsd_dvi_dual);
++
++ if (blklen > 6)
++ cfg->vsd_max_tmdsclk_rate = edid[index+7] * 5;
++ DPRINTK("VSD MAX TMDS CLOCK RATE %d\n", cfg->vsd_max_tmdsclk_rate);
++
++ if (blklen > 7) {
++ latency_present = edid[index+8] >> 7;
++ I_latency_present = (edid[index+8] & 0x40) >> 6;
++ hdmi_video_present = (edid[index+8] & 0x20) >> 5;
++ cfg->vsd_cnc3 = (edid[index+8] & 0x8) >> 3;
++ cfg->vsd_cnc2 = (edid[index+8] & 0x4) >> 2;
++ cfg->vsd_cnc1 = (edid[index+8] & 0x2) >> 1;
++ cfg->vsd_cnc0 = edid[index+8] & 0x1;
++
++ DPRINTK("VSD cnc0 %d\n", cfg->vsd_cnc0);
++ DPRINTK("VSD cnc1 %d\n", cfg->vsd_cnc1);
++ DPRINTK("VSD cnc2 %d\n", cfg->vsd_cnc2);
++ DPRINTK("VSD cnc3 %d\n", cfg->vsd_cnc3);
++ DPRINTK("latency_present %d\n", latency_present);
++ DPRINTK("I_latency_present %d\n", I_latency_present);
++ DPRINTK("hdmi_video_present %d\n", hdmi_video_present);
++
++ } else {
++ index += blklen;
++ break;
++ }
++
++ index += 9;
++
++ /*latency present */
++ if (latency_present) {
++ cfg->vsd_video_latency = edid[index++];
++ cfg->vsd_audio_latency = edid[index++];
++
++ if (I_latency_present) {
++ cfg->vsd_I_video_latency = edid[index++];
++ cfg->vsd_I_audio_latency = edid[index++];
++ } else {
++ cfg->vsd_I_video_latency = cfg->vsd_video_latency;
++ cfg->vsd_I_audio_latency = cfg->vsd_audio_latency;
++ }
++
++ DPRINTK("VSD latency video_latency %d\n", cfg->vsd_video_latency);
++ DPRINTK("VSD latency audio_latency %d\n", cfg->vsd_audio_latency);
++ DPRINTK("VSD latency I_video_latency %d\n", cfg->vsd_I_video_latency);
++ DPRINTK("VSD latency I_audio_latency %d\n", cfg->vsd_I_audio_latency);
++ }
++
++ if (hdmi_video_present) {
++ hdmi_3d_present = edid[index] >> 7;
++ hdmi_3d_multi_present = (edid[index] & 0x60) >> 5;
++ index++;
++ hdmi_vic_len = (edid[index] & 0xe0) >> 5;
++ hdmi_3d_len = edid[index] & 0x1f;
++ index++;
++
++ DPRINTK("hdmi_3d_present %d\n", hdmi_3d_present);
++ DPRINTK("hdmi_3d_multi_present %d\n", hdmi_3d_multi_present);
++ DPRINTK("hdmi_vic_len %d\n", hdmi_vic_len);
++ DPRINTK("hdmi_3d_len %d\n", hdmi_3d_len);
++
++ if (hdmi_vic_len > 0) {
++ for (i = 0; i < hdmi_vic_len; i++) {
++ cfg->hdmi_vic[i] = edid[index++];
++ DPRINTK("HDMI_vic=%d\n", cfg->hdmi_vic[i]);
++ }
++ }
++
++ if (hdmi_3d_len > 0) {
++ if (hdmi_3d_present) {
++ if (hdmi_3d_multi_present == 0x1) {
++ cfg->hdmi_3d_struct_all = (edid[index] << 8) | edid[index+1];
++ index_inc = 2;
++ } else if (hdmi_3d_multi_present == 0x2) {
++ cfg->hdmi_3d_struct_all = (edid[index] << 8) | edid[index+1];
++ cfg->hdmi_3d_mask_all = (edid[index+2] << 8) | edid[index+3];
++ index_inc = 4;
++ } else
++ index_inc = 0;
++ }
++
++ DPRINTK("HDMI 3d struct all =0x%x\n", cfg->hdmi_3d_struct_all);
++ DPRINTK("HDMI 3d mask all =0x%x\n", cfg->hdmi_3d_mask_all);
++
++ /* Read 2D vic 3D_struct */
++ if ((hdmi_3d_len - index_inc) > 0) {
++ DPRINTK("Support 3D video format\n");
++ i = 0;
++ while ((hdmi_3d_len - index_inc) > 0) {
++
++ cfg->hdmi_3d_format[i].vic_order_2d = edid[index+index_inc] >> 4;
++ cfg->hdmi_3d_format[i].struct_3d = edid[index+index_inc] & 0x0f;
++ index_inc++;
++
++ if (cfg->hdmi_3d_format[i].struct_3d == 8) {
++ cfg->hdmi_3d_format[i].detail_3d = edid[index+index_inc] >> 4;
++ index_inc++;
++ } else if (cfg->hdmi_3d_format[i].struct_3d > 8) {
++ cfg->hdmi_3d_format[i].detail_3d = 0;
++ index_inc++;
++ }
++
++ DPRINTK("vic_order_2d=%d, 3d_struct=%d, 3d_detail=0x%x\n",
++ cfg->hdmi_3d_format[i].vic_order_2d,
++ cfg->hdmi_3d_format[i].struct_3d,
++ cfg->hdmi_3d_format[i].detail_3d);
++ i++;
++ }
++ }
++ index += index_inc;
++ }
++ }
++
++ index = vsd_end;
++
++ break;
++ }
++ case 0x1: /*Audio data block*/
++ {
++ u8 audio_format, max_ch, byte1, byte2, byte3;
++
++ i = 0;
++ cfg->max_channels = 0;
++ cfg->sample_rates = 0;
++ cfg->sample_sizes = 0;
++
++ while (i < blklen) {
++ byte1 = edid[index + 1];
++ byte2 = edid[index + 2];
++ byte3 = edid[index + 3];
++ index += 3;
++ i += 3;
++
++ audio_format = byte1 >> 3;
++ max_ch = (byte1 & 0x07) + 1;
++
++ DPRINTK("Audio Format Descriptor : %2d\n", audio_format);
++ DPRINTK("Max Number of Channels : %2d\n", max_ch);
++ DPRINTK("Sample Rates : %02x\n", byte2);
++
++ /* ALSA can't specify specific compressed
++ * formats, so only care about PCM for now. */
++ if (audio_format == AUDIO_CODING_TYPE_LPCM) {
++ if (max_ch > cfg->max_channels)
++ cfg->max_channels = max_ch;
++
++ cfg->sample_rates |= byte2;
++ cfg->sample_sizes |= byte3 & 0x7;
++ DPRINTK("Sample Sizes : %02x\n",
++ byte3 & 0x7);
++ }
++ }
++ break;
++ }
++ case 0x4: /*Speaker allocation block*/
++ {
++ i = 0;
++ while (i < blklen) {
++ cfg->speaker_alloc = edid[index + 1];
++ index += 3;
++ i += 3;
++ DPRINTK("Speaker Alloc : %02x\n", cfg->speaker_alloc);
++ }
++ break;
++ }
++ case 0x7: /*User extended block*/
++ default:
++ /* skip */
++ DPRINTK("Not handle block, tagcode = 0x%x\n", tagcode);
++ index += blklen;
++ break;
++ }
++
++ index++;
++ }
++ }
++
++ /* long desc */
++ DPRINTK("CEA long desc timmings\n");
++ index = detail_timing_desc_offset;
++ block = edid + index;
++ while (index < (EDID_LENGTH - DETAILED_TIMING_DESCRIPTION_SIZE)) {
++ if (!(block[0] == 0x00 && block[1] == 0x00)) {
++ get_detailed_timing(block, &mode[num]);
++ num++;
++ }
++ block += DETAILED_TIMING_DESCRIPTION_SIZE;
++ index += DETAILED_TIMING_DESCRIPTION_SIZE;
++ }
++
++ if (!num) {
++ kfree(mode);
++ return 0;
++ }
++
++ m = kmalloc((num + specs->modedb_len) *
++ sizeof(struct fb_videomode), GFP_KERNEL);
++ if (!m)
++ return 0;
++
++ if (specs->modedb_len) {
++ memmove(m, specs->modedb,
++ specs->modedb_len * sizeof(struct fb_videomode));
++ kfree(specs->modedb);
++ }
++ memmove(m+specs->modedb_len, mode,
++ num * sizeof(struct fb_videomode));
++ kfree(mode);
++
++ specs->modedb_len += num;
++ specs->modedb = m;
++
++ return 0;
++}
++EXPORT_SYMBOL(mxc_edid_parse_ext_blk);
++
++static int mxc_edid_readblk(struct i2c_adapter *adp,
++ unsigned short addr, unsigned char *edid)
++{
++ int ret = 0, extblknum = 0;
++ unsigned char regaddr = 0x0;
++ struct i2c_msg msg[2] = {
++ {
++ .addr = addr,
++ .flags = 0,
++ .len = 1,
++ .buf = &regaddr,
++ }, {
++ .addr = addr,
++ .flags = I2C_M_RD,
++ .len = EDID_LENGTH,
++ .buf = edid,
++ },
++ };
++
++ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
++ if (ret != ARRAY_SIZE(msg)) {
++ DPRINTK("unable to read EDID block\n");
++ return -EIO;
++ }
++
++ if (edid[1] == 0x00)
++ return -ENOENT;
++
++ extblknum = edid[0x7E];
++
++ if (extblknum) {
++ regaddr = 128;
++ msg[1].buf = edid + EDID_LENGTH;
++
++ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
++ if (ret != ARRAY_SIZE(msg)) {
++ DPRINTK("unable to read EDID ext block\n");
++ return -EIO;
++ }
++ }
++
++ return extblknum;
++}
++
++static int mxc_edid_readsegblk(struct i2c_adapter *adp, unsigned short addr,
++ unsigned char *edid, int seg_num)
++{
++ int ret = 0;
++ unsigned char segment = 0x1, regaddr = 0;
++ struct i2c_msg msg[3] = {
++ {
++ .addr = 0x30,
++ .flags = 0,
++ .len = 1,
++ .buf = &segment,
++ }, {
++ .addr = addr,
++ .flags = 0,
++ .len = 1,
++ .buf = &regaddr,
++ }, {
++ .addr = addr,
++ .flags = I2C_M_RD,
++ .len = EDID_LENGTH,
++ .buf = edid,
++ },
++ };
++
++ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
++ if (ret != ARRAY_SIZE(msg)) {
++ DPRINTK("unable to read EDID block\n");
++ return -EIO;
++ }
++
++ if (seg_num == 2) {
++ regaddr = 128;
++ msg[2].buf = edid + EDID_LENGTH;
++
++ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
++ if (ret != ARRAY_SIZE(msg)) {
++ DPRINTK("unable to read EDID block\n");
++ return -EIO;
++ }
++ }
++
++ return ret;
++}
++
++int mxc_edid_var_to_vic(struct fb_var_screeninfo *var)
++{
++ int i;
++ struct fb_videomode m;
++
++ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
++ fb_var_to_videomode(&m, var);
++ if (mxc_edid_fb_mode_is_equal(false, &m, &mxc_cea_mode[i]))
++ break;
++ }
++
++ if (i == ARRAY_SIZE(mxc_cea_mode))
++ return 0;
++
++ return i;
++}
++EXPORT_SYMBOL(mxc_edid_var_to_vic);
++
++int mxc_edid_mode_to_vic(const struct fb_videomode *mode)
++{
++ int i;
++ bool use_aspect = (mode->vmode & FB_VMODE_ASPECT_MASK);
++
++ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
++ if (mxc_edid_fb_mode_is_equal(use_aspect, mode, &mxc_cea_mode[i]))
++ break;
++ }
++
++ if (i == ARRAY_SIZE(mxc_cea_mode))
++ return 0;
++
++ return i;
++}
++EXPORT_SYMBOL(mxc_edid_mode_to_vic);
++
++/* make sure edid has 512 bytes*/
++int mxc_edid_read(struct i2c_adapter *adp, unsigned short addr,
++ unsigned char *edid, struct mxc_edid_cfg *cfg, struct fb_info *fbi)
++{
++ int ret = 0, extblknum;
++ if (!adp || !edid || !cfg || !fbi)
++ return -EINVAL;
++
++ memset(edid, 0, EDID_LENGTH*4);
++ memset(cfg, 0, sizeof(struct mxc_edid_cfg));
++
++ extblknum = mxc_edid_readblk(adp, addr, edid);
++ if (extblknum < 0)
++ return extblknum;
++
++ /* edid first block parsing */
++ memset(&fbi->monspecs, 0, sizeof(fbi->monspecs));
++ fb_edid_to_monspecs(edid, &fbi->monspecs);
++
++ if (extblknum) {
++ int i;
++
++ /* need read segment block? */
++ if (extblknum > 1) {
++ ret = mxc_edid_readsegblk(adp, addr,
++ edid + EDID_LENGTH*2, extblknum - 1);
++ if (ret < 0)
++ return ret;
++ }
++
++ for (i = 1; i <= extblknum; i++)
++ /* edid ext block parsing */
++ mxc_edid_parse_ext_blk(edid + i*EDID_LENGTH,
++ cfg, &fbi->monspecs);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(mxc_edid_read);
++
+diff -Nur linux-3.14.40.orig/drivers/video/mxc/mxcfb_hx8369_wvga.c linux-3.14.40/drivers/video/mxc/mxcfb_hx8369_wvga.c
+--- linux-3.14.40.orig/drivers/video/mxc/mxcfb_hx8369_wvga.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/mxc/mxcfb_hx8369_wvga.c 2015-05-01 14:58:05.735427001 -0500
+@@ -0,0 +1,449 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/io.h>
++#include <linux/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/mipi_dsi.h>
++#include <linux/mxcfb.h>
++#include <linux/backlight.h>
++#include <video/mipi_display.h>
++
++#include "mipi_dsi.h"
++
++#define MIPI_DSI_MAX_RET_PACK_SIZE (0x4)
++
++#define HX8369BL_MAX_BRIGHT (255)
++#define HX8369BL_DEF_BRIGHT (255)
++
++#define HX8369_MAX_DPHY_CLK (800)
++#define HX8369_ONE_DATA_LANE (0x1)
++#define HX8369_TWO_DATA_LANE (0x2)
++
++#define HX8369_CMD_SETEXTC (0xB9)
++#define HX8369_CMD_SETEXTC_LEN (0x4)
++#define HX8369_CMD_SETEXTC_PARAM_1 (0x6983ff)
++
++#define HX8369_CMD_GETHXID (0xF4)
++#define HX8369_CMD_GETHXID_LEN (0x4)
++#define HX8369_ID (0x69)
++#define HX8369_ID_MASK (0xFF)
++
++#define HX8369_CMD_SETDISP (0xB2)
++#define HX8369_CMD_SETDISP_LEN (16)
++#define HX8369_CMD_SETDISP_1_HALT (0x00)
++#define HX8369_CMD_SETDISP_2_RES_MODE (0x23)
++#define HX8369_CMD_SETDISP_3_BP (0x03)
++#define HX8369_CMD_SETDISP_4_FP (0x03)
++#define HX8369_CMD_SETDISP_5_SAP (0x70)
++#define HX8369_CMD_SETDISP_6_GENON (0x00)
++#define HX8369_CMD_SETDISP_7_GENOFF (0xff)
++#define HX8369_CMD_SETDISP_8_RTN (0x00)
++#define HX8369_CMD_SETDISP_9_TEI (0x00)
++#define HX8369_CMD_SETDISP_10_TEP_UP (0x00)
++#define HX8369_CMD_SETDISP_11_TEP_LOW (0x00)
++#define HX8369_CMD_SETDISP_12_BP_PE (0x03)
++#define HX8369_CMD_SETDISP_13_FP_PE (0x03)
++#define HX8369_CMD_SETDISP_14_RTN_PE (0x00)
++#define HX8369_CMD_SETDISP_15_GON (0x01)
++
++#define HX8369_CMD_SETCYC (0xB4)
++#define HX8369_CMD_SETCYC_LEN (6)
++#define HX8369_CMD_SETCYC_PARAM_1 (0x5f1d00)
++#define HX8369_CMD_SETCYC_PARAM_2 (0x060e)
++
++#define HX8369_CMD_SETGIP (0xD5)
++#define HX8369_CMD_SETGIP_LEN (27)
++#define HX8369_CMD_SETGIP_PARAM_1 (0x030400)
++#define HX8369_CMD_SETGIP_PARAM_2 (0x1c050100)
++#define HX8369_CMD_SETGIP_PARAM_3 (0x00030170)
++#define HX8369_CMD_SETGIP_PARAM_4 (0x51064000)
++#define HX8369_CMD_SETGIP_PARAM_5 (0x41000007)
++#define HX8369_CMD_SETGIP_PARAM_6 (0x07075006)
++#define HX8369_CMD_SETGIP_PARAM_7 (0x040f)
++
++#define HX8369_CMD_SETPOWER (0xB1)
++#define HX8369_CMD_SETPOWER_LEN (20)
++#define HX8369_CMD_SETPOWER_PARAM_1 (0x340001)
++#define HX8369_CMD_SETPOWER_PARAM_2 (0x0f0f0006)
++#define HX8369_CMD_SETPOWER_PARAM_3 (0x3f3f322a)
++#define HX8369_CMD_SETPOWER_PARAM_4 (0xe6013a07)
++#define HX8369_CMD_SETPOWER_PARAM_5 (0xe6e6e6e6)
++
++#define HX8369_CMD_SETVCOM (0xB6)
++#define HX8369_CMD_SETVCOM_LEN (3)
++#define HX8369_CMD_SETVCOM_PARAM_1 (0x5656)
++
++#define HX8369_CMD_SETPANEL (0xCC)
++#define HX8369_CMD_SETPANEL_PARAM_1 (0x02)
++
++#define HX8369_CMD_SETGAMMA (0xE0)
++#define HX8369_CMD_SETGAMMA_LEN (35)
++#define HX8369_CMD_SETGAMMA_PARAM_1 (0x221d00)
++#define HX8369_CMD_SETGAMMA_PARAM_2 (0x2e3f3d38)
++#define HX8369_CMD_SETGAMMA_PARAM_3 (0x0f0d064a)
++#define HX8369_CMD_SETGAMMA_PARAM_4 (0x16131513)
++#define HX8369_CMD_SETGAMMA_PARAM_5 (0x1d001910)
++#define HX8369_CMD_SETGAMMA_PARAM_6 (0x3f3d3822)
++#define HX8369_CMD_SETGAMMA_PARAM_7 (0x0d064a2e)
++#define HX8369_CMD_SETGAMMA_PARAM_8 (0x1315130f)
++#define HX8369_CMD_SETGAMMA_PARAM_9 (0x191016)
++
++#define HX8369_CMD_SETMIPI (0xBA)
++#define HX8369_CMD_SETMIPI_LEN (14)
++#define HX8369_CMD_SETMIPI_PARAM_1 (0xc6a000)
++#define HX8369_CMD_SETMIPI_PARAM_2 (0x10000a00)
++#define HX8369_CMD_SETMIPI_ONELANE (0x10 << 24)
++#define HX8369_CMD_SETMIPI_TWOLANE (0x11 << 24)
++#define HX8369_CMD_SETMIPI_PARAM_3 (0x00026f30)
++#define HX8369_CMD_SETMIPI_PARAM_4 (0x4018)
++
++#define HX8369_CMD_SETPIXEL_FMT (0x3A)
++#define HX8369_CMD_SETPIXEL_FMT_24BPP (0x77)
++#define HX8369_CMD_SETPIXEL_FMT_18BPP (0x66)
++#define HX8369_CMD_SETPIXEL_FMT_16BPP (0x55)
++
++#define HX8369_CMD_SETCLUMN_ADDR (0x2A)
++#define HX8369_CMD_SETCLUMN_ADDR_LEN (5)
++#define HX8369_CMD_SETCLUMN_ADDR_PARAM_1 (0xdf0000)
++#define HX8369_CMD_SETCLUMN_ADDR_PARAM_2 (0x01)
++
++#define HX8369_CMD_SETPAGE_ADDR (0x2B)
++#define HX8369_CMD_SETPAGE_ADDR_LEN (5)
++#define HX8369_CMD_SETPAGE_ADDR_PARAM_1 (0x1f0000)
++#define HX8369_CMD_SETPAGE_ADDR_PARAM_2 (0x03)
++
++#define HX8369_CMD_WRT_DISP_BRIGHT (0x51)
++#define HX8369_CMD_WRT_DISP_BRIGHT_PARAM_1 (0xFF)
++
++#define HX8369_CMD_WRT_CABC_MIN_BRIGHT (0x5E)
++#define HX8369_CMD_WRT_CABC_MIN_BRIGHT_PARAM_1 (0x20)
++
++#define HX8369_CMD_WRT_CABC_CTRL (0x55)
++#define HX8369_CMD_WRT_CABC_CTRL_PARAM_1 (0x1)
++
++#define HX8369_CMD_WRT_CTRL_DISP (0x53)
++#define HX8369_CMD_WRT_CTRL_DISP_PARAM_1 (0x24)
++
++#define CHECK_RETCODE(ret) \
++do { \
++ if (ret < 0) { \
++ dev_err(&mipi_dsi->pdev->dev, \
++ "%s ERR: ret:%d, line:%d.\n", \
++ __func__, ret, __LINE__); \
++ return ret; \
++ } \
++} while (0)
++
++static int hx8369bl_brightness;
++static int mipid_init_backlight(struct mipi_dsi_info *mipi_dsi);
++
++static struct fb_videomode truly_lcd_modedb[] = {
++ {
++ "TRULY-WVGA", 64, 480, 800, 37880,
++ 8, 8,
++ 6, 6,
++ 8, 6,
++ FB_SYNC_OE_LOW_ACT,
++ FB_VMODE_NONINTERLACED,
++ 0,
++ },
++};
++
++static struct mipi_lcd_config lcd_config = {
++ .virtual_ch = 0x0,
++ .data_lane_num = HX8369_TWO_DATA_LANE,
++ .max_phy_clk = HX8369_MAX_DPHY_CLK,
++ .dpi_fmt = MIPI_RGB888,
++};
++void mipid_hx8369_get_lcd_videomode(struct fb_videomode **mode, int *size,
++ struct mipi_lcd_config **data)
++{
++ *mode = &truly_lcd_modedb[0];
++ *size = ARRAY_SIZE(truly_lcd_modedb);
++ *data = &lcd_config;
++}
++
++int mipid_hx8369_lcd_setup(struct mipi_dsi_info *mipi_dsi)
++{
++ u32 buf[DSI_CMD_BUF_MAXSIZE];
++ int err;
++
++ dev_dbg(&mipi_dsi->pdev->dev, "MIPI DSI LCD setup.\n");
++ buf[0] = HX8369_CMD_SETEXTC | (HX8369_CMD_SETEXTC_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETEXTC_LEN);
++ CHECK_RETCODE(err);
++ buf[0] = MIPI_DSI_MAX_RET_PACK_SIZE;
++ err = mipi_dsi_pkt_write(mipi_dsi,
++ MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
++ buf, 0);
++ CHECK_RETCODE(err);
++ buf[0] = HX8369_CMD_GETHXID;
++ err = mipi_dsi_pkt_read(mipi_dsi,
++ MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM,
++ buf, HX8369_CMD_GETHXID_LEN);
++ if (!err && ((buf[0] & HX8369_ID_MASK) == HX8369_ID)) {
++ dev_info(&mipi_dsi->pdev->dev,
++ "MIPI DSI LCD ID:0x%x.\n", buf[0]);
++ } else {
++ dev_err(&mipi_dsi->pdev->dev,
++ "mipi_dsi_pkt_read err:%d, data:0x%x.\n",
++ err, buf[0]);
++ dev_info(&mipi_dsi->pdev->dev,
++ "MIPI DSI LCD not detected!\n");
++ return err;
++ }
++
++ /* set LCD resolution as 480RGBx800, DPI interface,
++ * display operation mode: RGB data bypass GRAM mode.
++ */
++ buf[0] = HX8369_CMD_SETDISP | (HX8369_CMD_SETDISP_1_HALT << 8) |
++ (HX8369_CMD_SETDISP_2_RES_MODE << 16) |
++ (HX8369_CMD_SETDISP_3_BP << 24);
++ buf[1] = HX8369_CMD_SETDISP_4_FP | (HX8369_CMD_SETDISP_5_SAP << 8) |
++ (HX8369_CMD_SETDISP_6_GENON << 16) |
++ (HX8369_CMD_SETDISP_7_GENOFF << 24);
++ buf[2] = HX8369_CMD_SETDISP_8_RTN | (HX8369_CMD_SETDISP_9_TEI << 8) |
++ (HX8369_CMD_SETDISP_10_TEP_UP << 16) |
++ (HX8369_CMD_SETDISP_11_TEP_LOW << 24);
++ buf[3] = HX8369_CMD_SETDISP_12_BP_PE |
++ (HX8369_CMD_SETDISP_13_FP_PE << 8) |
++ (HX8369_CMD_SETDISP_14_RTN_PE << 16) |
++ (HX8369_CMD_SETDISP_15_GON << 24);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETDISP_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set display waveform cycle */
++ buf[0] = HX8369_CMD_SETCYC | (HX8369_CMD_SETCYC_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETCYC_PARAM_2;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETCYC_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set GIP timing output control */
++ buf[0] = HX8369_CMD_SETGIP | (HX8369_CMD_SETGIP_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETGIP_PARAM_2;
++ buf[2] = HX8369_CMD_SETGIP_PARAM_3;
++ buf[3] = HX8369_CMD_SETGIP_PARAM_4;
++ buf[4] = HX8369_CMD_SETGIP_PARAM_5;
++ buf[5] = HX8369_CMD_SETGIP_PARAM_6;
++ buf[6] = HX8369_CMD_SETGIP_PARAM_7;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETGIP_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set power: standby, DC etc. */
++ buf[0] = HX8369_CMD_SETPOWER | (HX8369_CMD_SETPOWER_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETPOWER_PARAM_2;
++ buf[2] = HX8369_CMD_SETPOWER_PARAM_3;
++ buf[3] = HX8369_CMD_SETPOWER_PARAM_4;
++ buf[4] = HX8369_CMD_SETPOWER_PARAM_5;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETPOWER_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set VCOM voltage. */
++ buf[0] = HX8369_CMD_SETVCOM | (HX8369_CMD_SETVCOM_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETVCOM_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set Panel: BGR/RGB or Inversion. */
++ buf[0] = HX8369_CMD_SETPANEL | (HX8369_CMD_SETPANEL_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi,
++ MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM, buf, 0);
++ CHECK_RETCODE(err);
++
++ /* Set gamma curve related setting */
++ buf[0] = HX8369_CMD_SETGAMMA | (HX8369_CMD_SETGAMMA_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETGAMMA_PARAM_2;
++ buf[2] = HX8369_CMD_SETGAMMA_PARAM_3;
++ buf[3] = HX8369_CMD_SETGAMMA_PARAM_4;
++ buf[4] = HX8369_CMD_SETGAMMA_PARAM_5;
++ buf[5] = HX8369_CMD_SETGAMMA_PARAM_6;
++ buf[7] = HX8369_CMD_SETGAMMA_PARAM_7;
++ buf[7] = HX8369_CMD_SETGAMMA_PARAM_8;
++ buf[8] = HX8369_CMD_SETGAMMA_PARAM_9;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETGAMMA_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set MIPI: DPHYCMD & DSICMD, data lane number */
++ buf[0] = HX8369_CMD_SETMIPI | (HX8369_CMD_SETMIPI_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETMIPI_PARAM_2;
++ buf[2] = HX8369_CMD_SETMIPI_PARAM_3;
++ if (lcd_config.data_lane_num == HX8369_ONE_DATA_LANE)
++ buf[2] |= HX8369_CMD_SETMIPI_ONELANE;
++ else
++ buf[2] |= HX8369_CMD_SETMIPI_TWOLANE;
++ buf[3] = HX8369_CMD_SETMIPI_PARAM_4;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETMIPI_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set pixel format:24bpp */
++ buf[0] = HX8369_CMD_SETPIXEL_FMT;
++ switch (lcd_config.dpi_fmt) {
++ case MIPI_RGB565_PACKED:
++ case MIPI_RGB565_LOOSELY:
++ case MIPI_RGB565_CONFIG3:
++ buf[0] |= (HX8369_CMD_SETPIXEL_FMT_16BPP << 8);
++ break;
++
++ case MIPI_RGB666_LOOSELY:
++ case MIPI_RGB666_PACKED:
++ buf[0] |= (HX8369_CMD_SETPIXEL_FMT_18BPP << 8);
++ break;
++
++ case MIPI_RGB888:
++ buf[0] |= (HX8369_CMD_SETPIXEL_FMT_24BPP << 8);
++ break;
++
++ default:
++ buf[0] |= (HX8369_CMD_SETPIXEL_FMT_24BPP << 8);
++ break;
++ }
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ /* Set column address: 0~479 */
++ buf[0] = HX8369_CMD_SETCLUMN_ADDR |
++ (HX8369_CMD_SETCLUMN_ADDR_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETCLUMN_ADDR_PARAM_2;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETCLUMN_ADDR_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set page address: 0~799 */
++ buf[0] = HX8369_CMD_SETPAGE_ADDR |
++ (HX8369_CMD_SETPAGE_ADDR_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETPAGE_ADDR_PARAM_2;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETPAGE_ADDR_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set display brightness related */
++ buf[0] = HX8369_CMD_WRT_DISP_BRIGHT |
++ (HX8369_CMD_WRT_DISP_BRIGHT_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ buf[0] = HX8369_CMD_WRT_CABC_CTRL |
++ (HX8369_CMD_WRT_CABC_CTRL_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ buf[0] = HX8369_CMD_WRT_CTRL_DISP |
++ (HX8369_CMD_WRT_CTRL_DISP_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ /* exit sleep mode and set display on */
++ buf[0] = MIPI_DCS_EXIT_SLEEP_MODE;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++ /* To allow time for the supply voltages
++ * and clock circuits to stabilize.
++ */
++ msleep(5);
++ buf[0] = MIPI_DCS_SET_DISPLAY_ON;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ err = mipid_init_backlight(mipi_dsi);
++ return err;
++}
++
++static int mipid_bl_update_status(struct backlight_device *bl)
++{
++ u32 buf;
++ int brightness = bl->props.brightness;
++ struct mipi_dsi_info *mipi_dsi = bl_get_data(bl);
++
++ if (bl->props.power != FB_BLANK_UNBLANK ||
++ bl->props.fb_blank != FB_BLANK_UNBLANK)
++ brightness = 0;
++
++ buf = HX8369_CMD_WRT_DISP_BRIGHT |
++ ((brightness & HX8369BL_MAX_BRIGHT) << 8);
++ mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ &buf, 0);
++
++ hx8369bl_brightness = brightness & HX8369BL_MAX_BRIGHT;
++
++ dev_dbg(&bl->dev, "mipid backlight bringtness:%d.\n", brightness);
++ return 0;
++}
++
++static int mipid_bl_get_brightness(struct backlight_device *bl)
++{
++ return hx8369bl_brightness;
++}
++
++static int mipi_bl_check_fb(struct backlight_device *bl, struct fb_info *fbi)
++{
++ return 0;
++}
++
++static const struct backlight_ops mipid_lcd_bl_ops = {
++ .update_status = mipid_bl_update_status,
++ .get_brightness = mipid_bl_get_brightness,
++ .check_fb = mipi_bl_check_fb,
++};
++
++static int mipid_init_backlight(struct mipi_dsi_info *mipi_dsi)
++{
++ struct backlight_properties props;
++ struct backlight_device *bl;
++
++ if (mipi_dsi->bl) {
++ pr_debug("mipid backlight already init!\n");
++ return 0;
++ }
++ memset(&props, 0, sizeof(struct backlight_properties));
++ props.max_brightness = HX8369BL_MAX_BRIGHT;
++ props.type = BACKLIGHT_RAW;
++ bl = backlight_device_register("mipid-bl", &mipi_dsi->pdev->dev,
++ mipi_dsi, &mipid_lcd_bl_ops, &props);
++ if (IS_ERR(bl)) {
++ pr_err("error %ld on backlight register\n", PTR_ERR(bl));
++ return PTR_ERR(bl);
++ }
++ mipi_dsi->bl = bl;
++ bl->props.power = FB_BLANK_UNBLANK;
++ bl->props.fb_blank = FB_BLANK_UNBLANK;
++ bl->props.brightness = HX8369BL_DEF_BRIGHT;
++
++ mipid_bl_update_status(bl);
++ return 0;
++}
+diff -Nur linux-3.14.40.orig/drivers/video/mxc/mxc_hdmi.c linux-3.14.40/drivers/video/mxc/mxc_hdmi.c
+--- linux-3.14.40.orig/drivers/video/mxc/mxc_hdmi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/mxc/mxc_hdmi.c 2015-05-01 14:58:05.735427001 -0500
+@@ -0,0 +1,3042 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++/*
++ * SH-Mobile High-Definition Multimedia Interface (HDMI) driver
++ * for SLISHDMI13T and SLIPHDMIT IP cores
++ *
++ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/input.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/io.h>
++#include <linux/fb.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/uaccess.h>
++#include <linux/cpufreq.h>
++#include <linux/firmware.h>
++#include <linux/kthread.h>
++#include <linux/regulator/driver.h>
++#include <linux/fsl_devices.h>
++#include <linux/ipu.h>
++#include <linux/regmap.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/of_device.h>
++
++#include <linux/console.h>
++#include <linux/types.h>
++
++#include "../edid.h"
++#include <video/mxc_edid.h>
++#include <video/mxc_hdmi.h>
++#include "mxc_dispdrv.h"
++
++#include <linux/mfd/mxc-hdmi-core.h>
++
++#define DISPDRV_HDMI "hdmi"
++#define HDMI_EDID_LEN 512
++
++/* status codes for reading edid */
++#define HDMI_EDID_SUCCESS 0
++#define HDMI_EDID_FAIL -1
++#define HDMI_EDID_SAME -2
++#define HDMI_EDID_NO_MODES -3
++
++#define NUM_CEA_VIDEO_MODES 64
++#define DEFAULT_VIDEO_MODE 16 /* 1080P */
++
++#define RGB 0
++#define YCBCR444 1
++#define YCBCR422_16BITS 2
++#define YCBCR422_8BITS 3
++#define XVYCC444 4
++
++/*
++ * We follow a flowchart which is in the "Synopsys DesignWare Courses
++ * HDMI Transmitter Controller User Guide, 1.30a", section 3.1
++ * (dwc_hdmi_tx_user.pdf)
++ *
++ * Below are notes that say "HDMI Initialization Step X"
++ * These correspond to the flowchart.
++ */
++
++/*
++ * We are required to configure VGA mode before reading edid
++ * in HDMI Initialization Step B
++ */
++static const struct fb_videomode vga_mode = {
++ /* 640x480 @ 60 Hz, 31.5 kHz hsync */
++ NULL, 60, 640, 480, 39721, 48, 16, 33, 10, 96, 2, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, FB_MODE_IS_VESA,
++};
++
++enum hdmi_datamap {
++ RGB444_8B = 0x01,
++ RGB444_10B = 0x03,
++ RGB444_12B = 0x05,
++ RGB444_16B = 0x07,
++ YCbCr444_8B = 0x09,
++ YCbCr444_10B = 0x0B,
++ YCbCr444_12B = 0x0D,
++ YCbCr444_16B = 0x0F,
++ YCbCr422_8B = 0x16,
++ YCbCr422_10B = 0x14,
++ YCbCr422_12B = 0x12,
++};
++
++enum hdmi_colorimetry {
++ eITU601,
++ eITU709,
++};
++
++struct hdmi_vmode {
++ bool mDVI;
++ bool mHSyncPolarity;
++ bool mVSyncPolarity;
++ bool mInterlaced;
++ bool mDataEnablePolarity;
++
++ unsigned int mPixelClock;
++ unsigned int mPixelRepetitionInput;
++ unsigned int mPixelRepetitionOutput;
++};
++
++struct hdmi_data_info {
++ unsigned int enc_in_format;
++ unsigned int enc_out_format;
++ unsigned int enc_color_depth;
++ unsigned int colorimetry;
++ unsigned int pix_repet_factor;
++ unsigned int hdcp_enable;
++ unsigned int rgb_out_enable;
++ unsigned int rgb_quant_range;
++ struct hdmi_vmode video_mode;
++};
++
++struct hdmi_phy_reg_config {
++ /* HDMI PHY register config for pass HCT */
++ u16 reg_vlev;
++ u16 reg_cksymtx;
++};
++
++struct mxc_hdmi {
++ struct platform_device *pdev;
++ struct platform_device *core_pdev;
++ struct mxc_dispdrv_handle *disp_mxc_hdmi;
++ struct fb_info *fbi;
++ struct clk *hdmi_isfr_clk;
++ struct clk *hdmi_iahb_clk;
++ struct timer_list jitter_timer;
++ struct work_struct hotplug_work;
++ struct delayed_work hdcp_hdp_work;
++
++ struct notifier_block nb;
++
++ struct hdmi_data_info hdmi_data;
++ int vic;
++ int edid_status;
++ struct mxc_edid_cfg edid_cfg;
++ u8 edid[HDMI_EDID_LEN];
++ bool fb_reg;
++ bool cable_plugin;
++ u8 blank;
++ bool dft_mode_set;
++ char *dft_mode_str;
++ int default_bpp;
++ u8 latest_intr_stat;
++ u8 plug_event;
++ u8 plug_mask;
++ bool irq_enabled;
++ spinlock_t irq_lock;
++ bool phy_enabled;
++ struct fb_videomode default_mode;
++ struct fb_videomode previous_non_vga_mode;
++ bool requesting_vga_for_initialization;
++
++ int *gpr_base;
++ int *gpr_hdmi_base;
++ int *gpr_sdma_base;
++ int cpu_type;
++ int cpu_version;
++ struct hdmi_phy_reg_config phy_config;
++
++ struct pinctrl *pinctrl;
++};
++
++static int hdmi_major;
++static struct class *hdmi_class;
++
++struct i2c_client *hdmi_i2c;
++struct mxc_hdmi *g_hdmi;
++
++static bool hdmi_inited;
++static bool hdcp_init;
++
++extern const struct fb_videomode mxc_cea_mode[64];
++extern void mxc_hdmi_cec_handle(u16 cec_stat);
++
++static void mxc_hdmi_setup(struct mxc_hdmi *hdmi, unsigned long event);
++static void hdmi_enable_overflow_interrupts(void);
++static void hdmi_disable_overflow_interrupts(void);
++
++static char *rgb_quant_range = "default";
++module_param(rgb_quant_range, charp, S_IRUGO);
++MODULE_PARM_DESC(rgb_quant_range, "RGB Quant Range (default, limited, full)");
++
++static struct platform_device_id imx_hdmi_devtype[] = {
++ {
++ .name = "hdmi-imx6DL",
++ .driver_data = IMX6DL_HDMI,
++ }, {
++ .name = "hdmi-imx6Q",
++ .driver_data = IMX6Q_HDMI,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, imx_hdmi_devtype);
++
++static const struct of_device_id imx_hdmi_dt_ids[] = {
++ { .compatible = "fsl,imx6dl-hdmi-video", .data = &imx_hdmi_devtype[IMX6DL_HDMI], },
++ { .compatible = "fsl,imx6q-hdmi-video", .data = &imx_hdmi_devtype[IMX6Q_HDMI], },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids);
++
++static inline int cpu_is_imx6dl(struct mxc_hdmi *hdmi)
++{
++ return hdmi->cpu_type == IMX6DL_HDMI;
++}
++#ifdef DEBUG
++static void dump_fb_videomode(struct fb_videomode *m)
++{
++ pr_debug("fb_videomode = %d %d %d %d %d %d %d %d %d %d %d %d %d\n",
++ m->refresh, m->xres, m->yres, m->pixclock, m->left_margin,
++ m->right_margin, m->upper_margin, m->lower_margin,
++ m->hsync_len, m->vsync_len, m->sync, m->vmode, m->flag);
++}
++#else
++static void dump_fb_videomode(struct fb_videomode *m)
++{}
++#endif
++
++static ssize_t mxc_hdmi_show_name(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ strcpy(buf, hdmi->fbi->fix.id);
++ sprintf(buf+strlen(buf), "\n");
++
++ return strlen(buf);
++}
++
++static DEVICE_ATTR(fb_name, S_IRUGO, mxc_hdmi_show_name, NULL);
++
++static ssize_t mxc_hdmi_show_state(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ if (hdmi->cable_plugin == false)
++ strcpy(buf, "plugout\n");
++ else
++ strcpy(buf, "plugin\n");
++
++ return strlen(buf);
++}
++
++static DEVICE_ATTR(cable_state, S_IRUGO, mxc_hdmi_show_state, NULL);
++
++static ssize_t mxc_hdmi_show_edid(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ int i, j, len = 0;
++
++ for (j = 0; j < HDMI_EDID_LEN/16; j++) {
++ for (i = 0; i < 16; i++)
++ len += sprintf(buf+len, "0x%02X ",
++ hdmi->edid[j*16 + i]);
++ len += sprintf(buf+len, "\n");
++ }
++
++ return len;
++}
++
++static DEVICE_ATTR(edid, S_IRUGO, mxc_hdmi_show_edid, NULL);
++
++static ssize_t mxc_hdmi_show_rgb_out_enable(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ if (hdmi->hdmi_data.rgb_out_enable == true)
++ strcpy(buf, "RGB out\n");
++ else
++ strcpy(buf, "YCbCr out\n");
++
++ return strlen(buf);
++}
++
++static ssize_t mxc_hdmi_store_rgb_out_enable(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ unsigned long value;
++ int ret;
++
++ ret = strict_strtoul(buf, 10, &value);
++ if (ret)
++ return ret;
++
++ hdmi->hdmi_data.rgb_out_enable = value;
++
++ /* Reconfig HDMI for output color space change */
++ mxc_hdmi_setup(hdmi, 0);
++
++ return count;
++}
++
++static DEVICE_ATTR(rgb_out_enable, S_IRUGO | S_IWUSR,
++ mxc_hdmi_show_rgb_out_enable,
++ mxc_hdmi_store_rgb_out_enable);
++
++static ssize_t mxc_hdmi_show_rgb_quant_range(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ switch (hdmi->hdmi_data.rgb_quant_range) {
++ case HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE:
++ strcpy(buf, "limited\n");
++ break;
++ case HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE:
++ strcpy(buf, "full\n");
++ break;
++ case HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT:
++ default:
++ strcpy(buf, "default\n");
++ break;
++ };
++
++ return strlen(buf);
++}
++
++static ssize_t mxc_hdmi_store_rgb_quant_range(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ int ret = count;
++
++ if (sysfs_streq("limited", buf)) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE;
++ } else if (sysfs_streq("full", buf)) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE;
++ } else if (sysfs_streq("default", buf)) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT;
++ } else {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* Reconfig HDMI for output RGB Quant Range change if using RGB out */
++ if(hdmi->hdmi_data.rgb_out_enable)
++ mxc_hdmi_setup(hdmi, 0);
++out:
++ return ret;
++}
++
++static DEVICE_ATTR(rgb_quant_range, S_IRUGO | S_IWUSR,
++ mxc_hdmi_show_rgb_quant_range,
++ mxc_hdmi_store_rgb_quant_range);
++
++static ssize_t mxc_hdmi_show_hdcp_enable(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ if (hdmi->hdmi_data.hdcp_enable == false)
++ strcpy(buf, "hdcp disable\n");
++ else
++ strcpy(buf, "hdcp enable\n");
++
++ return strlen(buf);
++
++}
++
++static ssize_t mxc_hdmi_store_hdcp_enable(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ char event_string[32];
++ char *envp[] = { event_string, NULL };
++ unsigned long value;
++ int ret;
++
++ ret = strict_strtoul(buf, 10, &value);
++ if (ret)
++ return ret;
++
++ hdmi->hdmi_data.hdcp_enable = value;
++
++ /* Reconfig HDMI for HDCP */
++ mxc_hdmi_setup(hdmi, 0);
++
++ if (hdmi->hdmi_data.hdcp_enable == false) {
++ sprintf(event_string, "EVENT=hdcpdisable");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++ } else {
++ sprintf(event_string, "EVENT=hdcpenable");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++ }
++
++ return count;
++
++}
++
++static DEVICE_ATTR(hdcp_enable, S_IRUGO | S_IWUSR,
++ mxc_hdmi_show_hdcp_enable, mxc_hdmi_store_hdcp_enable);
++
++/*!
++ * this submodule is responsible for the video data synchronization.
++ * for example, for RGB 4:4:4 input, the data map is defined as
++ * pin{47~40} <==> R[7:0]
++ * pin{31~24} <==> G[7:0]
++ * pin{15~8} <==> B[7:0]
++ */
++static void hdmi_video_sample(struct mxc_hdmi *hdmi)
++{
++ int color_format = 0;
++ u8 val;
++
++ if (hdmi->hdmi_data.enc_in_format == RGB) {
++ if (hdmi->hdmi_data.enc_color_depth == 8)
++ color_format = 0x01;
++ else if (hdmi->hdmi_data.enc_color_depth == 10)
++ color_format = 0x03;
++ else if (hdmi->hdmi_data.enc_color_depth == 12)
++ color_format = 0x05;
++ else if (hdmi->hdmi_data.enc_color_depth == 16)
++ color_format = 0x07;
++ else
++ return;
++ } else if (hdmi->hdmi_data.enc_in_format == YCBCR444) {
++ if (hdmi->hdmi_data.enc_color_depth == 8)
++ color_format = 0x09;
++ else if (hdmi->hdmi_data.enc_color_depth == 10)
++ color_format = 0x0B;
++ else if (hdmi->hdmi_data.enc_color_depth == 12)
++ color_format = 0x0D;
++ else if (hdmi->hdmi_data.enc_color_depth == 16)
++ color_format = 0x0F;
++ else
++ return;
++ } else if (hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) {
++ if (hdmi->hdmi_data.enc_color_depth == 8)
++ color_format = 0x16;
++ else if (hdmi->hdmi_data.enc_color_depth == 10)
++ color_format = 0x14;
++ else if (hdmi->hdmi_data.enc_color_depth == 12)
++ color_format = 0x12;
++ else
++ return;
++ }
++
++ val = HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE |
++ ((color_format << HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET) &
++ HDMI_TX_INVID0_VIDEO_MAPPING_MASK);
++ hdmi_writeb(val, HDMI_TX_INVID0);
++
++ /* Enable TX stuffing: When DE is inactive, fix the output data to 0 */
++ val = HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE |
++ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE |
++ HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE;
++ hdmi_writeb(val, HDMI_TX_INSTUFFING);
++ hdmi_writeb(0x0, HDMI_TX_GYDATA0);
++ hdmi_writeb(0x0, HDMI_TX_GYDATA1);
++ hdmi_writeb(0x0, HDMI_TX_RCRDATA0);
++ hdmi_writeb(0x0, HDMI_TX_RCRDATA1);
++ hdmi_writeb(0x0, HDMI_TX_BCBDATA0);
++ hdmi_writeb(0x0, HDMI_TX_BCBDATA1);
++}
++
++static int isColorSpaceConversion(struct mxc_hdmi *hdmi)
++{
++ return (hdmi->hdmi_data.enc_in_format != hdmi->hdmi_data.enc_out_format) ||
++ (hdmi->hdmi_data.enc_out_format == RGB &&
++ ((hdmi->hdmi_data.rgb_quant_range == HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE) ||
++ (hdmi->hdmi_data.rgb_quant_range == HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT && hdmi->vic > 1)));
++}
++
++static int isColorSpaceDecimation(struct mxc_hdmi *hdmi)
++{
++ return ((hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS) &&
++ (hdmi->hdmi_data.enc_in_format == RGB ||
++ hdmi->hdmi_data.enc_in_format == YCBCR444));
++}
++
++static int isColorSpaceInterpolation(struct mxc_hdmi *hdmi)
++{
++ return ((hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) &&
++ (hdmi->hdmi_data.enc_out_format == RGB
++ || hdmi->hdmi_data.enc_out_format == YCBCR444));
++}
++
++/*!
++ * update the color space conversion coefficients.
++ */
++static void update_csc_coeffs(struct mxc_hdmi *hdmi)
++{
++ unsigned short csc_coeff[3][4];
++ unsigned int csc_scale = 1;
++ u8 val;
++ bool coeff_selected = false;
++
++ if (isColorSpaceConversion(hdmi)) { /* csc needed */
++ if (hdmi->hdmi_data.enc_out_format == RGB) {
++ if (hdmi->hdmi_data.enc_in_format == RGB) {
++ csc_coeff[0][0] = 0x1b80;
++ csc_coeff[0][1] = 0x0000;
++ csc_coeff[0][2] = 0x0000;
++ csc_coeff[0][3] = 0x0020;
++
++ csc_coeff[1][0] = 0x0000;
++ csc_coeff[1][1] = 0x1b80;
++ csc_coeff[1][2] = 0x0000;
++ csc_coeff[1][3] = 0x0020;
++
++ csc_coeff[2][0] = 0x0000;
++ csc_coeff[2][1] = 0x0000;
++ csc_coeff[2][2] = 0x1b80;
++ csc_coeff[2][3] = 0x0020;
++
++ csc_scale = 1;
++ coeff_selected = true;
++ } else if (hdmi->hdmi_data.colorimetry == eITU601) {
++ csc_coeff[0][0] = 0x2000;
++ csc_coeff[0][1] = 0x6926;
++ csc_coeff[0][2] = 0x74fd;
++ csc_coeff[0][3] = 0x010e;
++
++ csc_coeff[1][0] = 0x2000;
++ csc_coeff[1][1] = 0x2cdd;
++ csc_coeff[1][2] = 0x0000;
++ csc_coeff[1][3] = 0x7e9a;
++
++ csc_coeff[2][0] = 0x2000;
++ csc_coeff[2][1] = 0x0000;
++ csc_coeff[2][2] = 0x38b4;
++ csc_coeff[2][3] = 0x7e3b;
++
++ csc_scale = 1;
++ coeff_selected = true;
++ } else if (hdmi->hdmi_data.colorimetry == eITU709) {
++ csc_coeff[0][0] = 0x2000;
++ csc_coeff[0][1] = 0x7106;
++ csc_coeff[0][2] = 0x7a02;
++ csc_coeff[0][3] = 0x00a7;
++
++ csc_coeff[1][0] = 0x2000;
++ csc_coeff[1][1] = 0x3264;
++ csc_coeff[1][2] = 0x0000;
++ csc_coeff[1][3] = 0x7e6d;
++
++ csc_coeff[2][0] = 0x2000;
++ csc_coeff[2][1] = 0x0000;
++ csc_coeff[2][2] = 0x3b61;
++ csc_coeff[2][3] = 0x7e25;
++
++ csc_scale = 1;
++ coeff_selected = true;
++ }
++ } else if (hdmi->hdmi_data.enc_in_format == RGB) {
++ if (hdmi->hdmi_data.colorimetry == eITU601) {
++ csc_coeff[0][0] = 0x2591;
++ csc_coeff[0][1] = 0x1322;
++ csc_coeff[0][2] = 0x074b;
++ csc_coeff[0][3] = 0x0000;
++
++ csc_coeff[1][0] = 0x6535;
++ csc_coeff[1][1] = 0x2000;
++ csc_coeff[1][2] = 0x7acc;
++ csc_coeff[1][3] = 0x0200;
++
++ csc_coeff[2][0] = 0x6acd;
++ csc_coeff[2][1] = 0x7534;
++ csc_coeff[2][2] = 0x2000;
++ csc_coeff[2][3] = 0x0200;
++
++ csc_scale = 0;
++ coeff_selected = true;
++ } else if (hdmi->hdmi_data.colorimetry == eITU709) {
++ csc_coeff[0][0] = 0x2dc5;
++ csc_coeff[0][1] = 0x0d9b;
++ csc_coeff[0][2] = 0x049e;
++ csc_coeff[0][3] = 0x0000;
++
++ csc_coeff[1][0] = 0x62f0;
++ csc_coeff[1][1] = 0x2000;
++ csc_coeff[1][2] = 0x7d11;
++ csc_coeff[1][3] = 0x0200;
++
++ csc_coeff[2][0] = 0x6756;
++ csc_coeff[2][1] = 0x78ab;
++ csc_coeff[2][2] = 0x2000;
++ csc_coeff[2][3] = 0x0200;
++
++ csc_scale = 0;
++ coeff_selected = true;
++ }
++ }
++ }
++
++ if (!coeff_selected) {
++ csc_coeff[0][0] = 0x2000;
++ csc_coeff[0][1] = 0x0000;
++ csc_coeff[0][2] = 0x0000;
++ csc_coeff[0][3] = 0x0000;
++
++ csc_coeff[1][0] = 0x0000;
++ csc_coeff[1][1] = 0x2000;
++ csc_coeff[1][2] = 0x0000;
++ csc_coeff[1][3] = 0x0000;
++
++ csc_coeff[2][0] = 0x0000;
++ csc_coeff[2][1] = 0x0000;
++ csc_coeff[2][2] = 0x2000;
++ csc_coeff[2][3] = 0x0000;
++
++ csc_scale = 1;
++ }
++
++ /* Update CSC parameters in HDMI CSC registers */
++ hdmi_writeb((unsigned char)(csc_coeff[0][0] & 0xFF),
++ HDMI_CSC_COEF_A1_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][0] >> 8),
++ HDMI_CSC_COEF_A1_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][1] & 0xFF),
++ HDMI_CSC_COEF_A2_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][1] >> 8),
++ HDMI_CSC_COEF_A2_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][2] & 0xFF),
++ HDMI_CSC_COEF_A3_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][2] >> 8),
++ HDMI_CSC_COEF_A3_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][3] & 0xFF),
++ HDMI_CSC_COEF_A4_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][3] >> 8),
++ HDMI_CSC_COEF_A4_MSB);
++
++ hdmi_writeb((unsigned char)(csc_coeff[1][0] & 0xFF),
++ HDMI_CSC_COEF_B1_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][0] >> 8),
++ HDMI_CSC_COEF_B1_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][1] & 0xFF),
++ HDMI_CSC_COEF_B2_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][1] >> 8),
++ HDMI_CSC_COEF_B2_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][2] & 0xFF),
++ HDMI_CSC_COEF_B3_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][2] >> 8),
++ HDMI_CSC_COEF_B3_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][3] & 0xFF),
++ HDMI_CSC_COEF_B4_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][3] >> 8),
++ HDMI_CSC_COEF_B4_MSB);
++
++ hdmi_writeb((unsigned char)(csc_coeff[2][0] & 0xFF),
++ HDMI_CSC_COEF_C1_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][0] >> 8),
++ HDMI_CSC_COEF_C1_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][1] & 0xFF),
++ HDMI_CSC_COEF_C2_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][1] >> 8),
++ HDMI_CSC_COEF_C2_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][2] & 0xFF),
++ HDMI_CSC_COEF_C3_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][2] >> 8),
++ HDMI_CSC_COEF_C3_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][3] & 0xFF),
++ HDMI_CSC_COEF_C4_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][3] >> 8),
++ HDMI_CSC_COEF_C4_MSB);
++
++ val = hdmi_readb(HDMI_CSC_SCALE);
++ val &= ~HDMI_CSC_SCALE_CSCSCALE_MASK;
++ val |= csc_scale & HDMI_CSC_SCALE_CSCSCALE_MASK;
++ hdmi_writeb(val, HDMI_CSC_SCALE);
++}
++
++static void hdmi_video_csc(struct mxc_hdmi *hdmi)
++{
++ int color_depth = 0;
++ int interpolation = HDMI_CSC_CFG_INTMODE_DISABLE;
++ int decimation = HDMI_CSC_CFG_DECMODE_DISABLE;
++ u8 val;
++
++ /* YCC422 interpolation to 444 mode */
++ if (isColorSpaceInterpolation(hdmi))
++ interpolation = HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1;
++ else if (isColorSpaceDecimation(hdmi))
++ decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3;
++
++ if (hdmi->hdmi_data.enc_color_depth == 8)
++ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP;
++ else if (hdmi->hdmi_data.enc_color_depth == 10)
++ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP;
++ else if (hdmi->hdmi_data.enc_color_depth == 12)
++ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP;
++ else if (hdmi->hdmi_data.enc_color_depth == 16)
++ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP;
++ else
++ return;
++
++ /*configure the CSC registers */
++ hdmi_writeb(interpolation | decimation, HDMI_CSC_CFG);
++ val = hdmi_readb(HDMI_CSC_SCALE);
++ val &= ~HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK;
++ val |= color_depth;
++ hdmi_writeb(val, HDMI_CSC_SCALE);
++
++ update_csc_coeffs(hdmi);
++}
++
++/*!
++ * HDMI video packetizer is used to packetize the data.
++ * for example, if input is YCC422 mode or repeater is used,
++ * data should be repacked this module can be bypassed.
++ */
++static void hdmi_video_packetize(struct mxc_hdmi *hdmi)
++{
++ unsigned int color_depth = 0;
++ unsigned int remap_size = HDMI_VP_REMAP_YCC422_16bit;
++ unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP;
++ struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
++ u8 val;
++
++ if (hdmi_data->enc_out_format == RGB
++ || hdmi_data->enc_out_format == YCBCR444) {
++ if (hdmi_data->enc_color_depth == 0)
++ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
++ else if (hdmi_data->enc_color_depth == 8) {
++ color_depth = 4;
++ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
++ } else if (hdmi_data->enc_color_depth == 10)
++ color_depth = 5;
++ else if (hdmi_data->enc_color_depth == 12)
++ color_depth = 6;
++ else if (hdmi_data->enc_color_depth == 16)
++ color_depth = 7;
++ else
++ return;
++ } else if (hdmi_data->enc_out_format == YCBCR422_8BITS) {
++ if (hdmi_data->enc_color_depth == 0 ||
++ hdmi_data->enc_color_depth == 8)
++ remap_size = HDMI_VP_REMAP_YCC422_16bit;
++ else if (hdmi_data->enc_color_depth == 10)
++ remap_size = HDMI_VP_REMAP_YCC422_20bit;
++ else if (hdmi_data->enc_color_depth == 12)
++ remap_size = HDMI_VP_REMAP_YCC422_24bit;
++ else
++ return;
++ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422;
++ } else
++ return;
++
++ /* HDMI not support deep color,
++ * because IPU MAX support color depth is 24bit */
++ color_depth = 0;
++
++ /* set the packetizer registers */
++ val = ((color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) &
++ HDMI_VP_PR_CD_COLOR_DEPTH_MASK) |
++ ((hdmi_data->pix_repet_factor <<
++ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET) &
++ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK);
++ hdmi_writeb(val, HDMI_VP_PR_CD);
++
++ val = hdmi_readb(HDMI_VP_STUFF);
++ val &= ~HDMI_VP_STUFF_PR_STUFFING_MASK;
++ val |= HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE;
++ hdmi_writeb(val, HDMI_VP_STUFF);
++
++ /* Data from pixel repeater block */
++ if (hdmi_data->pix_repet_factor > 1) {
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_PR_EN_MASK |
++ HDMI_VP_CONF_BYPASS_SELECT_MASK);
++ val |= HDMI_VP_CONF_PR_EN_ENABLE |
++ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ } else { /* data from packetizer block */
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_PR_EN_MASK |
++ HDMI_VP_CONF_BYPASS_SELECT_MASK);
++ val |= HDMI_VP_CONF_PR_EN_DISABLE |
++ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ }
++
++ val = hdmi_readb(HDMI_VP_STUFF);
++ val &= ~HDMI_VP_STUFF_IDEFAULT_PHASE_MASK;
++ val |= 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET;
++ hdmi_writeb(val, HDMI_VP_STUFF);
++
++ hdmi_writeb(remap_size, HDMI_VP_REMAP);
++
++ if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_PP) {
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
++ HDMI_VP_CONF_PP_EN_ENMASK |
++ HDMI_VP_CONF_YCC422_EN_MASK);
++ val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
++ HDMI_VP_CONF_PP_EN_ENABLE |
++ HDMI_VP_CONF_YCC422_EN_DISABLE;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422) {
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
++ HDMI_VP_CONF_PP_EN_ENMASK |
++ HDMI_VP_CONF_YCC422_EN_MASK);
++ val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
++ HDMI_VP_CONF_PP_EN_DISABLE |
++ HDMI_VP_CONF_YCC422_EN_ENABLE;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS) {
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
++ HDMI_VP_CONF_PP_EN_ENMASK |
++ HDMI_VP_CONF_YCC422_EN_MASK);
++ val |= HDMI_VP_CONF_BYPASS_EN_ENABLE |
++ HDMI_VP_CONF_PP_EN_DISABLE |
++ HDMI_VP_CONF_YCC422_EN_DISABLE;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ } else {
++ return;
++ }
++
++ val = hdmi_readb(HDMI_VP_STUFF);
++ val &= ~(HDMI_VP_STUFF_PP_STUFFING_MASK |
++ HDMI_VP_STUFF_YCC422_STUFFING_MASK);
++ val |= HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE |
++ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE;
++ hdmi_writeb(val, HDMI_VP_STUFF);
++
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~HDMI_VP_CONF_OUTPUT_SELECTOR_MASK;
++ val |= output_select;
++ hdmi_writeb(val, HDMI_VP_CONF);
++}
++
++#if 0
++/* Force a fixed color screen */
++static void hdmi_video_force_output(struct mxc_hdmi *hdmi, unsigned char force)
++{
++ u8 val;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ if (force) {
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS2); /* R */
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS1); /* G */
++ hdmi_writeb(0xFF, HDMI_FC_DBGTMDS0); /* B */
++ val = hdmi_readb(HDMI_FC_DBGFORCE);
++ val |= HDMI_FC_DBGFORCE_FORCEVIDEO;
++ hdmi_writeb(val, HDMI_FC_DBGFORCE);
++ } else {
++ val = hdmi_readb(HDMI_FC_DBGFORCE);
++ val &= ~HDMI_FC_DBGFORCE_FORCEVIDEO;
++ hdmi_writeb(val, HDMI_FC_DBGFORCE);
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS2); /* R */
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS1); /* G */
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS0); /* B */
++ }
++}
++#endif
++
++static inline void hdmi_phy_test_clear(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ u8 val = hdmi_readb(HDMI_PHY_TST0);
++ val &= ~HDMI_PHY_TST0_TSTCLR_MASK;
++ val |= (bit << HDMI_PHY_TST0_TSTCLR_OFFSET) &
++ HDMI_PHY_TST0_TSTCLR_MASK;
++ hdmi_writeb(val, HDMI_PHY_TST0);
++}
++
++static inline void hdmi_phy_test_enable(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ u8 val = hdmi_readb(HDMI_PHY_TST0);
++ val &= ~HDMI_PHY_TST0_TSTEN_MASK;
++ val |= (bit << HDMI_PHY_TST0_TSTEN_OFFSET) &
++ HDMI_PHY_TST0_TSTEN_MASK;
++ hdmi_writeb(val, HDMI_PHY_TST0);
++}
++
++static inline void hdmi_phy_test_clock(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ u8 val = hdmi_readb(HDMI_PHY_TST0);
++ val &= ~HDMI_PHY_TST0_TSTCLK_MASK;
++ val |= (bit << HDMI_PHY_TST0_TSTCLK_OFFSET) &
++ HDMI_PHY_TST0_TSTCLK_MASK;
++ hdmi_writeb(val, HDMI_PHY_TST0);
++}
++
++static inline void hdmi_phy_test_din(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ hdmi_writeb(bit, HDMI_PHY_TST1);
++}
++
++static inline void hdmi_phy_test_dout(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ hdmi_writeb(bit, HDMI_PHY_TST2);
++}
++
++static bool hdmi_phy_wait_i2c_done(struct mxc_hdmi *hdmi, int msec)
++{
++ unsigned char val = 0;
++ val = hdmi_readb(HDMI_IH_I2CMPHY_STAT0) & 0x3;
++ while (val == 0) {
++ udelay(1000);
++ if (msec-- == 0)
++ return false;
++ val = hdmi_readb(HDMI_IH_I2CMPHY_STAT0) & 0x3;
++ }
++ return true;
++}
++
++static void hdmi_phy_i2c_write(struct mxc_hdmi *hdmi, unsigned short data,
++ unsigned char addr)
++{
++ hdmi_writeb(0xFF, HDMI_IH_I2CMPHY_STAT0);
++ hdmi_writeb(addr, HDMI_PHY_I2CM_ADDRESS_ADDR);
++ hdmi_writeb((unsigned char)(data >> 8),
++ HDMI_PHY_I2CM_DATAO_1_ADDR);
++ hdmi_writeb((unsigned char)(data >> 0),
++ HDMI_PHY_I2CM_DATAO_0_ADDR);
++ hdmi_writeb(HDMI_PHY_I2CM_OPERATION_ADDR_WRITE,
++ HDMI_PHY_I2CM_OPERATION_ADDR);
++ hdmi_phy_wait_i2c_done(hdmi, 1000);
++}
++
++#if 0
++static unsigned short hdmi_phy_i2c_read(struct mxc_hdmi *hdmi,
++ unsigned char addr)
++{
++ unsigned short data;
++ unsigned char msb = 0, lsb = 0;
++ hdmi_writeb(0xFF, HDMI_IH_I2CMPHY_STAT0);
++ hdmi_writeb(addr, HDMI_PHY_I2CM_ADDRESS_ADDR);
++ hdmi_writeb(HDMI_PHY_I2CM_OPERATION_ADDR_READ,
++ HDMI_PHY_I2CM_OPERATION_ADDR);
++ hdmi_phy_wait_i2c_done(hdmi, 1000);
++ msb = hdmi_readb(HDMI_PHY_I2CM_DATAI_1_ADDR);
++ lsb = hdmi_readb(HDMI_PHY_I2CM_DATAI_0_ADDR);
++ data = (msb << 8) | lsb;
++ return data;
++}
++
++static int hdmi_phy_i2c_write_verify(struct mxc_hdmi *hdmi, unsigned short data,
++ unsigned char addr)
++{
++ unsigned short val = 0;
++ hdmi_phy_i2c_write(hdmi, data, addr);
++ val = hdmi_phy_i2c_read(hdmi, addr);
++ return (val == data);
++}
++#endif
++
++static bool hdmi_edid_wait_i2c_done(struct mxc_hdmi *hdmi, int msec)
++{
++ unsigned char val = 0;
++ val = hdmi_readb(HDMI_IH_I2CM_STAT0) & 0x2;
++ while (val == 0) {
++
++ udelay(1000);
++ if (msec-- == 0) {
++ dev_dbg(&hdmi->pdev->dev,
++ "HDMI EDID i2c operation time out!!\n");
++ return false;
++ }
++ val = hdmi_readb(HDMI_IH_I2CM_STAT0) & 0x2;
++ }
++ return true;
++}
++
++static u8 hdmi_edid_i2c_read(struct mxc_hdmi *hdmi,
++ u8 addr, u8 blockno)
++{
++ u8 spointer = blockno / 2;
++ u8 edidaddress = ((blockno % 2) * 0x80) + addr;
++ u8 data;
++
++ hdmi_writeb(0xFF, HDMI_IH_I2CM_STAT0);
++ hdmi_writeb(edidaddress, HDMI_I2CM_ADDRESS);
++ hdmi_writeb(spointer, HDMI_I2CM_SEGADDR);
++ if (spointer == 0)
++ hdmi_writeb(HDMI_I2CM_OPERATION_READ,
++ HDMI_I2CM_OPERATION);
++ else
++ hdmi_writeb(HDMI_I2CM_OPERATION_READ_EXT,
++ HDMI_I2CM_OPERATION);
++
++ hdmi_edid_wait_i2c_done(hdmi, 1000);
++ data = hdmi_readb(HDMI_I2CM_DATAI);
++ hdmi_writeb(0xFF, HDMI_IH_I2CM_STAT0);
++ return data;
++}
++
++
++/* "Power-down enable (active low)"
++ * That mean that power up == 1! */
++static void mxc_hdmi_phy_enable_power(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_PDZ_OFFSET,
++ HDMI_PHY_CONF0_PDZ_MASK);
++}
++
++static void mxc_hdmi_phy_enable_tmds(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_ENTMDS_OFFSET,
++ HDMI_PHY_CONF0_ENTMDS_MASK);
++}
++
++static void mxc_hdmi_phy_gen2_pddq(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET,
++ HDMI_PHY_CONF0_GEN2_PDDQ_MASK);
++}
++
++static void mxc_hdmi_phy_gen2_txpwron(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET,
++ HDMI_PHY_CONF0_GEN2_TXPWRON_MASK);
++}
++
++#if 0
++static void mxc_hdmi_phy_gen2_enhpdrxsense(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_OFFSET,
++ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_MASK);
++}
++#endif
++
++static void mxc_hdmi_phy_sel_data_en_pol(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_SELDATAENPOL_OFFSET,
++ HDMI_PHY_CONF0_SELDATAENPOL_MASK);
++}
++
++static void mxc_hdmi_phy_sel_interface_control(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_SELDIPIF_OFFSET,
++ HDMI_PHY_CONF0_SELDIPIF_MASK);
++}
++
++static int hdmi_phy_configure(struct mxc_hdmi *hdmi, unsigned char pRep,
++ unsigned char cRes, int cscOn)
++{
++ u8 val;
++ u8 msec;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* color resolution 0 is 8 bit colour depth */
++ if (cRes == 0)
++ cRes = 8;
++
++ if (pRep != 0)
++ return false;
++ else if (cRes != 8 && cRes != 12)
++ return false;
++
++ /* Enable csc path */
++ if (cscOn)
++ val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH;
++ else
++ val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS;
++
++ hdmi_writeb(val, HDMI_MC_FLOWCTRL);
++
++ /* gen2 tx power off */
++ mxc_hdmi_phy_gen2_txpwron(0);
++
++ /* gen2 pddq */
++ mxc_hdmi_phy_gen2_pddq(1);
++
++ /* PHY reset */
++ hdmi_writeb(HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ);
++ hdmi_writeb(HDMI_MC_PHYRSTZ_ASSERT, HDMI_MC_PHYRSTZ);
++
++ hdmi_writeb(HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST);
++
++ hdmi_phy_test_clear(hdmi, 1);
++ hdmi_writeb(HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2,
++ HDMI_PHY_I2CM_SLAVE_ADDR);
++ hdmi_phy_test_clear(hdmi, 0);
++
++ if (hdmi->hdmi_data.video_mode.mPixelClock < 0) {
++ dev_dbg(&hdmi->pdev->dev, "Pixel clock (%d) must be positive\n",
++ hdmi->hdmi_data.video_mode.mPixelClock);
++ return false;
++ }
++
++ if (hdmi->hdmi_data.video_mode.mPixelClock <= 45250000) {
++ switch (cRes) {
++ case 8:
++ /* PLL/MPLL Cfg */
++ hdmi_phy_i2c_write(hdmi, 0x01e0, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15); /* GMPCTRL */
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x21e1, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x41e2, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 92500000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x0140, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x2141, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x4142, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 148500000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x20a1, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x40a2, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
++ default:
++ return false;
++ }
++ } else {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x2001, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x4002, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
++ default:
++ return false;
++ }
++ }
++
++ if (hdmi->hdmi_data.video_mode.mPixelClock <= 54000000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); /* CURRCTRL */
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 58400000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 72000000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 74250000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 118800000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 216000000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else {
++ dev_err(&hdmi->pdev->dev,
++ "Pixel clock %d - unsupported by HDMI\n",
++ hdmi->hdmi_data.video_mode.mPixelClock);
++ return false;
++ }
++
++ hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
++ hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
++ /* RESISTANCE TERM 133Ohm Cfg */
++ hdmi_phy_i2c_write(hdmi, 0x0005, 0x19); /* TXTERM */
++ /* PREEMP Cgf 0.00 */
++ hdmi_phy_i2c_write(hdmi, 0x800d, 0x09); /* CKSYMTXCTRL */
++ /* TX/CK LVL 10 */
++ hdmi_phy_i2c_write(hdmi, 0x01ad, 0x0E); /* VLEVCTRL */
++
++ /* Board specific setting for PHY register 0x09, 0x0e to pass HCT */
++ if (hdmi->phy_config.reg_cksymtx != 0)
++ hdmi_phy_i2c_write(hdmi, hdmi->phy_config.reg_cksymtx, 0x09);
++
++ if (hdmi->phy_config.reg_vlev != 0)
++ hdmi_phy_i2c_write(hdmi, hdmi->phy_config.reg_vlev, 0x0E);
++
++ /* REMOVE CLK TERM */
++ hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */
++
++ if (hdmi->hdmi_data.video_mode.mPixelClock > 148500000) {
++ hdmi_phy_i2c_write(hdmi, 0x800b, 0x09);
++ hdmi_phy_i2c_write(hdmi, 0x0129, 0x0E);
++ }
++
++ mxc_hdmi_phy_enable_power(1);
++
++ /* toggle TMDS enable */
++ mxc_hdmi_phy_enable_tmds(0);
++ mxc_hdmi_phy_enable_tmds(1);
++
++ /* gen2 tx power on */
++ mxc_hdmi_phy_gen2_txpwron(1);
++ mxc_hdmi_phy_gen2_pddq(0);
++
++ /*Wait for PHY PLL lock */
++ msec = 4;
++ val = hdmi_readb(HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK;
++ while (val == 0) {
++ udelay(1000);
++ if (msec-- == 0) {
++ dev_dbg(&hdmi->pdev->dev, "PHY PLL not locked\n");
++ return false;
++ }
++ val = hdmi_readb(HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK;
++ }
++
++ return true;
++}
++
++static void mxc_hdmi_phy_init(struct mxc_hdmi *hdmi)
++{
++ int i;
++ bool cscon = false;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Never do phy init if pixel clock is gated.
++ * Otherwise HDMI PHY will get messed up and generate an overflow
++ * interrupt that can't be cleared or detected by accessing the
++ * status register. */
++ if (!hdmi->fb_reg || !hdmi->cable_plugin
++ || (hdmi->blank != FB_BLANK_UNBLANK))
++ return;
++
++ /*check csc whether needed activated in HDMI mode */
++ cscon = (isColorSpaceConversion(hdmi) &&
++ !hdmi->hdmi_data.video_mode.mDVI);
++
++ /* HDMI Phy spec says to do the phy initialization sequence twice */
++ for (i = 0 ; i < 2 ; i++) {
++ mxc_hdmi_phy_sel_data_en_pol(1);
++ mxc_hdmi_phy_sel_interface_control(0);
++ mxc_hdmi_phy_enable_tmds(0);
++ mxc_hdmi_phy_enable_power(0);
++
++ /* Enable CSC */
++ hdmi_phy_configure(hdmi, 0, 8, cscon);
++ }
++
++ hdmi->phy_enabled = true;
++ if (!hdmi->hdmi_data.video_mode.mDVI)
++ hdmi_enable_overflow_interrupts();
++}
++
++static void hdmi_config_AVI(struct mxc_hdmi *hdmi)
++{
++ u8 val;
++ u8 pix_fmt;
++ u8 under_scan;
++ u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry;
++ struct fb_videomode mode;
++ const struct fb_videomode *edid_mode;
++ bool aspect_16_9;
++
++ dev_dbg(&hdmi->pdev->dev, "set up AVI frame\n");
++
++ fb_var_to_videomode(&mode, &hdmi->fbi->var);
++ /* Use mode from list extracted from EDID to get aspect ratio */
++ if (!list_empty(&hdmi->fbi->modelist)) {
++ edid_mode = fb_find_nearest_mode(&mode, &hdmi->fbi->modelist);
++ if (edid_mode->vmode & FB_VMODE_ASPECT_16_9)
++ aspect_16_9 = true;
++ else
++ aspect_16_9 = false;
++ } else
++ aspect_16_9 = false;
++
++ /********************************************
++ * AVI Data Byte 1
++ ********************************************/
++ if (hdmi->hdmi_data.enc_out_format == YCBCR444)
++ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR444;
++ else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS)
++ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR422;
++ else
++ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_RGB;
++
++ if (hdmi->edid_cfg.cea_underscan)
++ under_scan = HDMI_FC_AVICONF0_SCAN_INFO_UNDERSCAN;
++ else
++ under_scan = HDMI_FC_AVICONF0_SCAN_INFO_NODATA;
++
++ /*
++ * Active format identification data is present in the AVI InfoFrame.
++ * Under scan info, no bar data
++ */
++ val = pix_fmt | under_scan |
++ HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT |
++ HDMI_FC_AVICONF0_BAR_DATA_NO_DATA;
++
++ hdmi_writeb(val, HDMI_FC_AVICONF0);
++
++ /********************************************
++ * AVI Data Byte 2
++ ********************************************/
++
++ /* Set the Aspect Ratio */
++ if (aspect_16_9) {
++ act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9;
++ coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9;
++ } else {
++ act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3;
++ coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3;
++ }
++
++ /* Set up colorimetry */
++ if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
++ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
++ if (hdmi->hdmi_data.colorimetry == eITU601)
++ ext_colorimetry =
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
++ else /* hdmi->hdmi_data.colorimetry == eITU709 */
++ ext_colorimetry =
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
++ } else if (hdmi->hdmi_data.enc_out_format != RGB) {
++ if (hdmi->hdmi_data.colorimetry == eITU601)
++ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
++ else /* hdmi->hdmi_data.colorimetry == eITU709 */
++ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
++ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
++ } else { /* Carries no data */
++ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA;
++ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
++ }
++
++ val = colorimetry | coded_ratio | act_ratio;
++ hdmi_writeb(val, HDMI_FC_AVICONF1);
++
++ /********************************************
++ * AVI Data Byte 3
++ ********************************************/
++
++ val = HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA | ext_colorimetry |
++ hdmi->hdmi_data.rgb_quant_range |
++ HDMI_FC_AVICONF2_SCALING_NONE;
++ hdmi_writeb(val, HDMI_FC_AVICONF2);
++
++ /********************************************
++ * AVI Data Byte 4
++ ********************************************/
++ hdmi_writeb(hdmi->vic, HDMI_FC_AVIVID);
++
++ /********************************************
++ * AVI Data Byte 5
++ ********************************************/
++
++ /* Set up input and output pixel repetition */
++ val = (((hdmi->hdmi_data.video_mode.mPixelRepetitionInput + 1) <<
++ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET) &
++ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK) |
++ ((hdmi->hdmi_data.video_mode.mPixelRepetitionOutput <<
++ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET) &
++ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK);
++ hdmi_writeb(val, HDMI_FC_PRCONF);
++
++ /* IT Content and quantization range = don't care */
++ val = HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS |
++ HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED;
++ hdmi_writeb(val, HDMI_FC_AVICONF3);
++
++ /********************************************
++ * AVI Data Bytes 6-13
++ ********************************************/
++ hdmi_writeb(0, HDMI_FC_AVIETB0);
++ hdmi_writeb(0, HDMI_FC_AVIETB1);
++ hdmi_writeb(0, HDMI_FC_AVISBB0);
++ hdmi_writeb(0, HDMI_FC_AVISBB1);
++ hdmi_writeb(0, HDMI_FC_AVIELB0);
++ hdmi_writeb(0, HDMI_FC_AVIELB1);
++ hdmi_writeb(0, HDMI_FC_AVISRB0);
++ hdmi_writeb(0, HDMI_FC_AVISRB1);
++}
++
++/*!
++ * this submodule is responsible for the video/audio data composition.
++ */
++static void hdmi_av_composer(struct mxc_hdmi *hdmi)
++{
++ u8 inv_val;
++ struct fb_info *fbi = hdmi->fbi;
++ struct fb_videomode fb_mode;
++ struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
++ int hblank, vblank;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ fb_var_to_videomode(&fb_mode, &fbi->var);
++
++ vmode->mHSyncPolarity = ((fb_mode.sync & FB_SYNC_HOR_HIGH_ACT) != 0);
++ vmode->mVSyncPolarity = ((fb_mode.sync & FB_SYNC_VERT_HIGH_ACT) != 0);
++ vmode->mInterlaced = ((fb_mode.vmode & FB_VMODE_INTERLACED) != 0);
++ vmode->mPixelClock = (fb_mode.xres + fb_mode.left_margin +
++ fb_mode.right_margin + fb_mode.hsync_len) * (fb_mode.yres +
++ fb_mode.upper_margin + fb_mode.lower_margin +
++ fb_mode.vsync_len) * fb_mode.refresh;
++
++ dev_dbg(&hdmi->pdev->dev, "final pixclk = %d\n", vmode->mPixelClock);
++
++ /* Set up HDMI_FC_INVIDCONF */
++ inv_val = (vmode->mVSyncPolarity ?
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH :
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW);
++
++ inv_val |= (vmode->mHSyncPolarity ?
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH :
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW);
++
++ inv_val |= (vmode->mDataEnablePolarity ?
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH :
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW);
++
++ if (hdmi->vic == 39)
++ inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH;
++ else
++ inv_val |= (vmode->mInterlaced ?
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH :
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW);
++
++ inv_val |= (vmode->mInterlaced ?
++ HDMI_FC_INVIDCONF_IN_I_P_INTERLACED :
++ HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE);
++
++ inv_val |= (vmode->mDVI ?
++ HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE :
++ HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE);
++
++ hdmi_writeb(inv_val, HDMI_FC_INVIDCONF);
++
++ /* Set up horizontal active pixel region width */
++ hdmi_writeb(fb_mode.xres >> 8, HDMI_FC_INHACTV1);
++ hdmi_writeb(fb_mode.xres, HDMI_FC_INHACTV0);
++
++ /* Set up vertical blanking pixel region width */
++ hdmi_writeb(fb_mode.yres >> 8, HDMI_FC_INVACTV1);
++ hdmi_writeb(fb_mode.yres, HDMI_FC_INVACTV0);
++
++ /* Set up horizontal blanking pixel region width */
++ hblank = fb_mode.left_margin + fb_mode.right_margin +
++ fb_mode.hsync_len;
++ hdmi_writeb(hblank >> 8, HDMI_FC_INHBLANK1);
++ hdmi_writeb(hblank, HDMI_FC_INHBLANK0);
++
++ /* Set up vertical blanking pixel region width */
++ vblank = fb_mode.upper_margin + fb_mode.lower_margin +
++ fb_mode.vsync_len;
++ hdmi_writeb(vblank, HDMI_FC_INVBLANK);
++
++ /* Set up HSYNC active edge delay width (in pixel clks) */
++ hdmi_writeb(fb_mode.right_margin >> 8, HDMI_FC_HSYNCINDELAY1);
++ hdmi_writeb(fb_mode.right_margin, HDMI_FC_HSYNCINDELAY0);
++
++ /* Set up VSYNC active edge delay (in pixel clks) */
++ hdmi_writeb(fb_mode.lower_margin, HDMI_FC_VSYNCINDELAY);
++
++ /* Set up HSYNC active pulse width (in pixel clks) */
++ hdmi_writeb(fb_mode.hsync_len >> 8, HDMI_FC_HSYNCINWIDTH1);
++ hdmi_writeb(fb_mode.hsync_len, HDMI_FC_HSYNCINWIDTH0);
++
++ /* Set up VSYNC active edge delay (in pixel clks) */
++ hdmi_writeb(fb_mode.vsync_len, HDMI_FC_VSYNCINWIDTH);
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
++}
++
++static int mxc_edid_read_internal(struct mxc_hdmi *hdmi, unsigned char *edid,
++ struct mxc_edid_cfg *cfg, struct fb_info *fbi)
++{
++ int extblknum;
++ int i, j, ret;
++ unsigned char *ediddata = edid;
++ unsigned char tmpedid[EDID_LENGTH];
++
++ dev_info(&hdmi->pdev->dev, "%s\n", __func__);
++
++ if (!edid || !cfg || !fbi)
++ return -EINVAL;
++
++ /* init HDMI I2CM for read edid*/
++ hdmi_writeb(0x0, HDMI_I2CM_DIV);
++ hdmi_writeb(0x00, HDMI_I2CM_SS_SCL_HCNT_1_ADDR);
++ hdmi_writeb(0x79, HDMI_I2CM_SS_SCL_HCNT_0_ADDR);
++ hdmi_writeb(0x00, HDMI_I2CM_SS_SCL_LCNT_1_ADDR);
++ hdmi_writeb(0x91, HDMI_I2CM_SS_SCL_LCNT_0_ADDR);
++
++ hdmi_writeb(0x00, HDMI_I2CM_FS_SCL_HCNT_1_ADDR);
++ hdmi_writeb(0x0F, HDMI_I2CM_FS_SCL_HCNT_0_ADDR);
++ hdmi_writeb(0x00, HDMI_I2CM_FS_SCL_LCNT_1_ADDR);
++ hdmi_writeb(0x21, HDMI_I2CM_FS_SCL_LCNT_0_ADDR);
++
++ hdmi_writeb(0x50, HDMI_I2CM_SLAVE);
++ hdmi_writeb(0x30, HDMI_I2CM_SEGADDR);
++
++ /* Umask edid interrupt */
++ hdmi_writeb(HDMI_I2CM_INT_DONE_POL,
++ HDMI_I2CM_INT);
++
++ hdmi_writeb(HDMI_I2CM_CTLINT_NAC_POL |
++ HDMI_I2CM_CTLINT_ARBITRATION_POL,
++ HDMI_I2CM_CTLINT);
++
++ /* reset edid data zero */
++ memset(edid, 0, EDID_LENGTH*4);
++ memset(cfg, 0, sizeof(struct mxc_edid_cfg));
++
++ /* Check first three byte of EDID head */
++ if (!(hdmi_edid_i2c_read(hdmi, 0, 0) == 0x00) ||
++ !(hdmi_edid_i2c_read(hdmi, 1, 0) == 0xFF) ||
++ !(hdmi_edid_i2c_read(hdmi, 2, 0) == 0xFF)) {
++ dev_info(&hdmi->pdev->dev, "EDID head check failed!");
++ return -ENOENT;
++ }
++
++ for (i = 0; i < 128; i++) {
++ *ediddata = hdmi_edid_i2c_read(hdmi, i, 0);
++ ediddata++;
++ }
++
++ extblknum = edid[0x7E];
++ if (extblknum < 0)
++ return extblknum;
++
++ if (extblknum) {
++ ediddata = edid + EDID_LENGTH;
++ for (i = 0; i < 128; i++) {
++ *ediddata = hdmi_edid_i2c_read(hdmi, i, 1);
++ ediddata++;
++ }
++ }
++
++ /* edid first block parsing */
++ memset(&fbi->monspecs, 0, sizeof(fbi->monspecs));
++ fb_edid_to_monspecs(edid, &fbi->monspecs);
++
++ ret = mxc_edid_parse_ext_blk(edid + EDID_LENGTH,
++ cfg, &fbi->monspecs);
++ if (ret < 0) {
++ fb_edid_add_monspecs(edid + EDID_LENGTH, &fbi->monspecs);
++ if (fbi->monspecs.modedb_len > 0)
++ hdmi->edid_cfg.hdmi_cap = false;
++ else
++ return -ENOENT;
++ }
++
++ /* need read segment block? */
++ if (extblknum > 1) {
++ for (j = 1; j <= extblknum; j++) {
++ for (i = 0; i < 128; i++)
++ *(tmpedid + 1) = hdmi_edid_i2c_read(hdmi, i, j);
++
++ /* edid ext block parsing */
++ ret = mxc_edid_parse_ext_blk(tmpedid + EDID_LENGTH,
++ cfg, &fbi->monspecs);
++ if (ret < 0)
++ return -ENOENT;
++ }
++ }
++
++ return 0;
++}
++
++static int mxc_hdmi_read_edid(struct mxc_hdmi *hdmi)
++{
++ int ret;
++ u8 edid_old[HDMI_EDID_LEN];
++ u8 clkdis;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* save old edid */
++ memcpy(edid_old, hdmi->edid, HDMI_EDID_LEN);
++
++ /* Read EDID via HDMI DDC when HDCP Enable */
++ if (!hdcp_init)
++ ret = mxc_edid_read(hdmi_i2c->adapter, hdmi_i2c->addr,
++ hdmi->edid, &hdmi->edid_cfg, hdmi->fbi);
++ else {
++
++ /* Disable HDCP clk */
++ if (hdmi->hdmi_data.hdcp_enable) {
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS);
++ clkdis |= HDMI_MC_CLKDIS_HDCPCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++ }
++
++ ret = mxc_edid_read_internal(hdmi, hdmi->edid,
++ &hdmi->edid_cfg, hdmi->fbi);
++
++ /* Enable HDCP clk */
++ if (hdmi->hdmi_data.hdcp_enable) {
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS);
++ clkdis &= ~HDMI_MC_CLKDIS_HDCPCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++ }
++
++ }
++
++ if (ret < 0)
++ return HDMI_EDID_FAIL;
++
++ dev_info(&hdmi->pdev->dev, "%s HDMI in %s mode\n", __func__, hdmi->edid_cfg.hdmi_cap?"HDMI":"DVI");
++ hdmi->plug_event = hdmi->edid_cfg.hdmi_cap?HDMI_IH_PHY_STAT0_HPD:HDMI_DVI_IH_STAT;
++ hdmi->plug_mask = hdmi->edid_cfg.hdmi_cap?HDMI_PHY_HPD:HDMI_DVI_STAT;
++
++ if (!memcmp(edid_old, hdmi->edid, HDMI_EDID_LEN)) {
++ dev_info(&hdmi->pdev->dev, "same edid\n");
++ return HDMI_EDID_SAME;
++ }
++
++ if (hdmi->fbi->monspecs.modedb_len == 0) {
++ dev_info(&hdmi->pdev->dev, "No modes read from edid\n");
++ return HDMI_EDID_NO_MODES;
++ }
++
++ return HDMI_EDID_SUCCESS;
++}
++
++static void mxc_hdmi_phy_disable(struct mxc_hdmi *hdmi)
++{
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ if (!hdmi->phy_enabled)
++ return;
++
++ hdmi_disable_overflow_interrupts();
++
++ /* Setting PHY to reset status */
++ hdmi_writeb(HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ);
++
++ /* Power down PHY */
++ mxc_hdmi_phy_enable_tmds(0);
++ mxc_hdmi_phy_enable_power(0);
++ mxc_hdmi_phy_gen2_txpwron(0);
++ mxc_hdmi_phy_gen2_pddq(1);
++
++ hdmi->phy_enabled = false;
++ dev_dbg(&hdmi->pdev->dev, "%s - exit\n", __func__);
++}
++
++/* HDMI Initialization Step B.4 */
++static void mxc_hdmi_enable_video_path(struct mxc_hdmi *hdmi)
++{
++ u8 clkdis;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* control period minimum duration */
++ hdmi_writeb(12, HDMI_FC_CTRLDUR);
++ hdmi_writeb(32, HDMI_FC_EXCTRLDUR);
++ hdmi_writeb(1, HDMI_FC_EXCTRLSPAC);
++
++ /* Set to fill TMDS data channels */
++ hdmi_writeb(0x0B, HDMI_FC_CH0PREAM);
++ hdmi_writeb(0x16, HDMI_FC_CH1PREAM);
++ hdmi_writeb(0x21, HDMI_FC_CH2PREAM);
++
++ /* Save CEC clock */
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS) & HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ clkdis |= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
++
++ /* Enable pixel clock and tmds data path */
++ clkdis = 0x7F & clkdis;
++ clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++
++ clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++
++ /* Enable csc path */
++ if (isColorSpaceConversion(hdmi) && !hdmi->hdmi_data.video_mode.mDVI) {
++ clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++ }
++}
++
++static void hdmi_enable_audio_clk(struct mxc_hdmi *hdmi)
++{
++ u8 clkdis;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS);
++ clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++}
++
++/* Workaround to clear the overflow condition */
++static void mxc_hdmi_clear_overflow(struct mxc_hdmi *hdmi)
++{
++ int count;
++ u8 val;
++
++ /* TMDS software reset */
++ hdmi_writeb((u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ);
++
++ val = hdmi_readb(HDMI_FC_INVIDCONF);
++
++ if (cpu_is_imx6dl(hdmi)) {
++ hdmi_writeb(val, HDMI_FC_INVIDCONF);
++ return;
++ }
++
++ for (count = 0 ; count < 5 ; count++)
++ hdmi_writeb(val, HDMI_FC_INVIDCONF);
++}
++
++static void hdmi_enable_overflow_interrupts(void)
++{
++ pr_debug("%s\n", __func__);
++ hdmi_writeb(0, HDMI_FC_MASK2);
++ hdmi_writeb(0, HDMI_IH_MUTE_FC_STAT2);
++}
++
++static void hdmi_disable_overflow_interrupts(void)
++{
++ pr_debug("%s\n", __func__);
++ hdmi_writeb(HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK,
++ HDMI_IH_MUTE_FC_STAT2);
++ hdmi_writeb(0xff, HDMI_FC_MASK2);
++}
++
++static void mxc_hdmi_notify_fb(struct mxc_hdmi *hdmi)
++{
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Don't notify if we aren't registered yet */
++ WARN_ON(!hdmi->fb_reg);
++
++ /* disable the phy before ipu changes mode */
++ mxc_hdmi_phy_disable(hdmi);
++
++ /*
++ * Note that fb_set_var will block. During this time,
++ * FB_EVENT_MODE_CHANGE callback will happen.
++ * So by the end of this function, mxc_hdmi_setup()
++ * will be done.
++ */
++ hdmi->fbi->var.activate |= FB_ACTIVATE_FORCE;
++ console_lock();
++ hdmi->fbi->flags |= FBINFO_MISC_USEREVENT;
++ fb_set_var(hdmi->fbi, &hdmi->fbi->var);
++ hdmi->fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
++}
++
++static void mxc_hdmi_edid_rebuild_modelist(struct mxc_hdmi *hdmi)
++{
++ int i;
++ struct fb_videomode *mode;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ console_lock();
++
++ fb_destroy_modelist(&hdmi->fbi->modelist);
++ fb_add_videomode(&vga_mode, &hdmi->fbi->modelist);
++
++ for (i = 0; i < hdmi->fbi->monspecs.modedb_len; i++) {
++ /*
++ * We might check here if mode is supported by HDMI.
++ * We do not currently support interlaced modes.
++ * And add CEA modes in the modelist.
++ */
++ mode = &hdmi->fbi->monspecs.modedb[i];
++
++ if ((mode->vmode & FB_VMODE_INTERLACED) ||
++ (hdmi->edid_cfg.hdmi_cap &&
++ (mxc_edid_mode_to_vic(mode) == 0)))
++ continue;
++
++ dev_dbg(&hdmi->pdev->dev, "Added mode %d:", i);
++ dev_dbg(&hdmi->pdev->dev,
++ "xres = %d, yres = %d, freq = %d, vmode = %d, flag = %d\n",
++ hdmi->fbi->monspecs.modedb[i].xres,
++ hdmi->fbi->monspecs.modedb[i].yres,
++ hdmi->fbi->monspecs.modedb[i].refresh,
++ hdmi->fbi->monspecs.modedb[i].vmode,
++ hdmi->fbi->monspecs.modedb[i].flag);
++
++ fb_add_videomode(mode, &hdmi->fbi->modelist);
++ }
++
++ console_unlock();
++}
++
++static void mxc_hdmi_default_edid_cfg(struct mxc_hdmi *hdmi)
++{
++ /* Default setting HDMI working in HDMI mode */
++ hdmi->edid_cfg.hdmi_cap = true;
++}
++
++static void mxc_hdmi_default_modelist(struct mxc_hdmi *hdmi)
++{
++ u32 i;
++ const struct fb_videomode *mode;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* If not EDID data read, set up default modelist */
++ dev_info(&hdmi->pdev->dev, "No modes read from edid\n");
++ dev_info(&hdmi->pdev->dev, "create default modelist\n");
++
++ console_lock();
++
++ fb_destroy_modelist(&hdmi->fbi->modelist);
++
++ /*Add all no interlaced CEA mode to default modelist */
++ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
++ mode = &mxc_cea_mode[i];
++ if (!(mode->vmode & FB_VMODE_INTERLACED) && (mode->xres != 0))
++ fb_add_videomode(mode, &hdmi->fbi->modelist);
++ }
++
++ console_unlock();
++}
++
++static void mxc_hdmi_set_mode_to_vga_dvi(struct mxc_hdmi *hdmi)
++{
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ hdmi_disable_overflow_interrupts();
++
++ fb_videomode_to_var(&hdmi->fbi->var, &vga_mode);
++
++ hdmi->requesting_vga_for_initialization = true;
++ mxc_hdmi_notify_fb(hdmi);
++ hdmi->requesting_vga_for_initialization = false;
++}
++
++static void mxc_hdmi_set_mode(struct mxc_hdmi *hdmi)
++{
++ const struct fb_videomode *mode;
++ struct fb_videomode m;
++ struct fb_var_screeninfo var;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Set the default mode only once. */
++ if (!hdmi->dft_mode_set) {
++ fb_videomode_to_var(&var, &hdmi->default_mode);
++ hdmi->dft_mode_set = true;
++ } else
++ fb_videomode_to_var(&var, &hdmi->previous_non_vga_mode);
++
++ fb_var_to_videomode(&m, &var);
++ dump_fb_videomode(&m);
++
++ mode = fb_find_nearest_mode(&m, &hdmi->fbi->modelist);
++ if (!mode) {
++ pr_err("%s: could not find mode in modelist\n", __func__);
++ return;
++ }
++
++ /* If video mode same as previous, init HDMI again */
++ if (fb_mode_is_equal(&hdmi->previous_non_vga_mode, mode)) {
++ dev_dbg(&hdmi->pdev->dev,
++ "%s: Video mode same as previous\n", __func__);
++ /* update fbi mode in case modelist is updated */
++ hdmi->fbi->mode = (struct fb_videomode *)mode;
++ /* update hdmi setting in case EDID data updated */
++ mxc_hdmi_setup(hdmi, 0);
++ } else {
++ dev_dbg(&hdmi->pdev->dev, "%s: New video mode\n", __func__);
++ mxc_hdmi_set_mode_to_vga_dvi(hdmi);
++ fb_videomode_to_var(&hdmi->fbi->var, mode);
++ dump_fb_videomode((struct fb_videomode *)mode);
++ mxc_hdmi_notify_fb(hdmi);
++ }
++
++}
++
++static void mxc_hdmi_cable_connected(struct mxc_hdmi *hdmi)
++{
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ hdmi->cable_plugin = true;
++
++ /* HDMI Initialization Step C */
++ hdmi->edid_status = mxc_hdmi_read_edid(hdmi);
++
++ /* Read EDID again if first EDID read failed */
++ if (hdmi->edid_status == HDMI_EDID_NO_MODES ||
++ hdmi->edid_status == HDMI_EDID_FAIL) {
++ dev_info(&hdmi->pdev->dev, "Read EDID again\n");
++ hdmi->edid_status = mxc_hdmi_read_edid(hdmi);
++ }
++
++ /* HDMI Initialization Steps D, E, F */
++ switch (hdmi->edid_status) {
++ case HDMI_EDID_SUCCESS:
++ mxc_hdmi_edid_rebuild_modelist(hdmi);
++ break;
++
++ /* Nothing to do if EDID same */
++ case HDMI_EDID_SAME:
++ break;
++
++ case HDMI_EDID_FAIL:
++ mxc_hdmi_default_edid_cfg(hdmi);
++ /* No break here */
++ case HDMI_EDID_NO_MODES:
++ default:
++ mxc_hdmi_default_modelist(hdmi);
++ break;
++ }
++
++ /* Save edid cfg for audio driver */
++ hdmi_set_edid_cfg(hdmi->edid_status, &hdmi->edid_cfg);
++
++ /* Setting video mode */
++ mxc_hdmi_set_mode(hdmi);
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
++}
++
++static int mxc_hdmi_power_on(struct mxc_dispdrv_handle *disp)
++{
++ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
++ mxc_hdmi_phy_init(hdmi);
++ return 0;
++}
++
++static void mxc_hdmi_power_off(struct mxc_dispdrv_handle *disp)
++{
++ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
++ mxc_hdmi_phy_disable(hdmi);
++}
++
++static void mxc_hdmi_cable_disconnected(struct mxc_hdmi *hdmi)
++{
++ u8 clkdis;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Save CEC clock */
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS) & HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ clkdis |= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
++
++ /* Disable All HDMI clock */
++ hdmi_writeb(0xff & clkdis, HDMI_MC_CLKDIS);
++
++ mxc_hdmi_phy_disable(hdmi);
++
++ hdmi_disable_overflow_interrupts();
++
++ hdmi->cable_plugin = false;
++}
++
++static void hotplug_worker(struct work_struct *work)
++{
++ struct mxc_hdmi *hdmi =
++ container_of(work, struct mxc_hdmi, hotplug_work);
++ u32 hdmi_phy_stat0, hdmi_phy_pol0, hdmi_phy_mask0;
++ unsigned long flags;
++ char event_string[32];
++ char *envp[] = { event_string, NULL };
++
++ hdmi_phy_stat0 = hdmi_readb(HDMI_PHY_STAT0);
++ hdmi_phy_pol0 = hdmi_readb(HDMI_PHY_POL0);
++
++ if (hdmi->latest_intr_stat & hdmi->plug_event) {
++ /* Make HPD intr active low to capture unplug event or
++ * active high to capture plugin event */
++ hdmi_writeb((hdmi->plug_mask & ~hdmi_phy_pol0), HDMI_PHY_POL0);
++
++ /* check cable status */
++ if (hdmi_phy_stat0 & hdmi->plug_mask) {
++ /* Plugin event */
++ dev_dbg(&hdmi->pdev->dev, "EVENT=plugin\n");
++ mxc_hdmi_cable_connected(hdmi);
++
++ sprintf(event_string, "EVENT=plugin");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++#ifdef CONFIG_MXC_HDMI_CEC
++ mxc_hdmi_cec_handle(0x80);
++#endif
++ hdmi_set_cable_state(1);
++ } else {
++ /* Plugout event */
++ dev_dbg(&hdmi->pdev->dev, "EVENT=plugout\n");
++ hdmi_set_cable_state(0);
++ mxc_hdmi_abort_stream();
++ mxc_hdmi_cable_disconnected(hdmi);
++
++ sprintf(event_string, "EVENT=plugout");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++#ifdef CONFIG_MXC_HDMI_CEC
++ mxc_hdmi_cec_handle(0x100);
++#endif
++ }
++ }
++
++ /* Lock here to ensure full powerdown sequence
++ * completed before next interrupt processed */
++ spin_lock_irqsave(&hdmi->irq_lock, flags);
++
++ /* Re-enable HPD interrupts */
++ hdmi_phy_mask0 = hdmi_readb(HDMI_PHY_MASK0);
++ hdmi_phy_mask0 &= ~hdmi->plug_mask;
++ hdmi_writeb(hdmi_phy_mask0, HDMI_PHY_MASK0);
++
++ /* Unmute interrupts */
++ hdmi_writeb(~hdmi->plug_event, HDMI_IH_MUTE_PHY_STAT0);
++
++ if (hdmi_readb(HDMI_IH_FC_STAT2) & HDMI_IH_FC_STAT2_OVERFLOW_MASK)
++ mxc_hdmi_clear_overflow(hdmi);
++
++ spin_unlock_irqrestore(&hdmi->irq_lock, flags);
++}
++
++static void hotplug_work_launch(unsigned long data)
++{
++ struct mxc_hdmi *hdmi = (struct mxc_hdmi *)data;
++ pr_debug("%s\n", __func__);
++ schedule_work(&hdmi->hotplug_work);
++}
++
++static void hdcp_hdp_worker(struct work_struct *work)
++{
++ struct delayed_work *delay_work = to_delayed_work(work);
++ struct mxc_hdmi *hdmi =
++ container_of(delay_work, struct mxc_hdmi, hdcp_hdp_work);
++ char event_string[32];
++ char *envp[] = { event_string, NULL };
++
++ /* HDCP interrupt */
++ sprintf(event_string, "EVENT=hdcpint");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++
++ /* Unmute interrupts in HDCP application*/
++}
++
++static irqreturn_t mxc_hdmi_hotplug(int irq, void *data)
++{
++ struct mxc_hdmi *hdmi = data;
++ u8 val, intr_stat;
++ unsigned long flags;
++
++ spin_lock_irqsave(&hdmi->irq_lock, flags);
++
++ /* Check and clean packet overflow interrupt.*/
++ if (hdmi_readb(HDMI_IH_FC_STAT2) &
++ HDMI_IH_FC_STAT2_OVERFLOW_MASK) {
++ mxc_hdmi_clear_overflow(hdmi);
++
++ dev_dbg(&hdmi->pdev->dev, "Overflow interrupt received\n");
++ /* clear irq status */
++ hdmi_writeb(HDMI_IH_FC_STAT2_OVERFLOW_MASK,
++ HDMI_IH_FC_STAT2);
++ }
++
++ /*
++ * We could not disable the irq. Probably the audio driver
++ * has enabled it. Masking off the HDMI interrupts using
++ * HDMI registers.
++ */
++ /* Capture status - used in hotplug_worker ISR */
++ intr_stat = hdmi_readb(HDMI_IH_PHY_STAT0);
++ if (intr_stat & hdmi->plug_event) {
++
++ dev_dbg(&hdmi->pdev->dev, "Hotplug interrupt received\n");
++ dev_dbg(&hdmi->pdev->dev, "intr_stat %u plug_event %u\n", intr_stat, hdmi->plug_event);
++ hdmi->latest_intr_stat = intr_stat;
++
++ /* Mute interrupts until handled */
++
++ val = hdmi_readb(HDMI_IH_MUTE_PHY_STAT0);
++ val |= hdmi->plug_event;
++ hdmi_writeb(val, HDMI_IH_MUTE_PHY_STAT0);
++
++ val = hdmi_readb(HDMI_PHY_MASK0);
++ val |= hdmi->plug_mask;
++ hdmi_writeb(val, HDMI_PHY_MASK0);
++
++ /* Clear Hotplug interrupts */
++ hdmi_writeb(hdmi->plug_event, HDMI_IH_PHY_STAT0);
++
++ if(hdmi_inited) {
++ mod_timer(&hdmi->jitter_timer, jiffies + HZ);
++ }
++ }
++
++ /* Check HDCP interrupt state */
++ if (hdmi->hdmi_data.hdcp_enable) {
++ val = hdmi_readb(HDMI_A_APIINTSTAT);
++ if (val != 0) {
++ /* Mute interrupts until interrupt handled */
++ val = 0xFF;
++ hdmi_writeb(val, HDMI_A_APIINTMSK);
++ schedule_delayed_work(&(hdmi->hdcp_hdp_work), msecs_to_jiffies(50));
++ }
++ }
++
++ spin_unlock_irqrestore(&hdmi->irq_lock, flags);
++ return IRQ_HANDLED;
++}
++
++static void mxc_hdmi_setup(struct mxc_hdmi *hdmi, unsigned long event)
++{
++ struct fb_videomode m;
++ const struct fb_videomode *edid_mode;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ fb_var_to_videomode(&m, &hdmi->fbi->var);
++ dump_fb_videomode(&m);
++
++ dev_dbg(&hdmi->pdev->dev, "%s - video mode changed\n", __func__);
++
++ hdmi->vic = 0;
++ if (!hdmi->requesting_vga_for_initialization) {
++ /* Save mode if this isn't the result of requesting
++ * vga default. */
++ memcpy(&hdmi->previous_non_vga_mode, &m,
++ sizeof(struct fb_videomode));
++ if (!list_empty(&hdmi->fbi->modelist)) {
++ edid_mode = fb_find_nearest_mode(&m, &hdmi->fbi->modelist);
++ pr_debug("edid mode ");
++ dump_fb_videomode((struct fb_videomode *)edid_mode);
++ /* update fbi mode */
++ hdmi->fbi->mode = (struct fb_videomode *)edid_mode;
++ hdmi->vic = mxc_edid_mode_to_vic(edid_mode);
++ }
++ }
++
++ hdmi_disable_overflow_interrupts();
++
++ dev_dbg(&hdmi->pdev->dev, "CEA mode used vic=%d\n", hdmi->vic);
++ if (hdmi->edid_cfg.hdmi_cap || !hdmi->edid_status) {
++ hdmi_set_dvi_mode(0);
++ hdmi->hdmi_data.video_mode.mDVI = false;
++ } else {
++ hdmi_set_dvi_mode(1);
++ dev_dbg(&hdmi->pdev->dev, "CEA mode vic=%d work in DVI\n", hdmi->vic);
++ hdmi->hdmi_data.video_mode.mDVI = true;
++ }
++
++ if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
++ (hdmi->vic == 21) || (hdmi->vic == 22) ||
++ (hdmi->vic == 2) || (hdmi->vic == 3) ||
++ (hdmi->vic == 17) || (hdmi->vic == 18))
++ hdmi->hdmi_data.colorimetry = eITU601;
++ else
++ hdmi->hdmi_data.colorimetry = eITU709;
++
++ if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
++ (hdmi->vic == 12) || (hdmi->vic == 13) ||
++ (hdmi->vic == 14) || (hdmi->vic == 15) ||
++ (hdmi->vic == 25) || (hdmi->vic == 26) ||
++ (hdmi->vic == 27) || (hdmi->vic == 28) ||
++ (hdmi->vic == 29) || (hdmi->vic == 30) ||
++ (hdmi->vic == 35) || (hdmi->vic == 36) ||
++ (hdmi->vic == 37) || (hdmi->vic == 38))
++ hdmi->hdmi_data.video_mode.mPixelRepetitionOutput = 1;
++ else
++ hdmi->hdmi_data.video_mode.mPixelRepetitionOutput = 0;
++
++ hdmi->hdmi_data.video_mode.mPixelRepetitionInput = 0;
++
++ /* TODO: Get input format from IPU (via FB driver iface) */
++ hdmi->hdmi_data.enc_in_format = RGB;
++
++ hdmi->hdmi_data.enc_out_format = RGB;
++
++ /* YCbCr only enabled in HDMI mode */
++ if (!hdmi->hdmi_data.video_mode.mDVI &&
++ !hdmi->hdmi_data.rgb_out_enable) {
++ if (hdmi->edid_cfg.cea_ycbcr444)
++ hdmi->hdmi_data.enc_out_format = YCBCR444;
++ else if (hdmi->edid_cfg.cea_ycbcr422)
++ hdmi->hdmi_data.enc_out_format = YCBCR422_8BITS;
++ }
++
++ /* IPU not support depth color output */
++ hdmi->hdmi_data.enc_color_depth = 8;
++ hdmi->hdmi_data.pix_repet_factor = 0;
++ hdmi->hdmi_data.video_mode.mDataEnablePolarity = true;
++
++ /* HDMI Initialization Step B.1 */
++ hdmi_av_composer(hdmi);
++
++ /* HDMI Initializateion Step B.2 */
++ mxc_hdmi_phy_init(hdmi);
++
++ /* HDMI Initialization Step B.3 */
++ mxc_hdmi_enable_video_path(hdmi);
++
++ /* not for DVI mode */
++ if (hdmi->hdmi_data.video_mode.mDVI)
++ dev_dbg(&hdmi->pdev->dev, "%s DVI mode\n", __func__);
++ else {
++ dev_dbg(&hdmi->pdev->dev, "%s CEA mode\n", __func__);
++
++ /* HDMI Initialization Step E - Configure audio */
++ hdmi_clk_regenerator_update_pixel_clock(hdmi->fbi->var.pixclock);
++ hdmi_enable_audio_clk(hdmi);
++
++ /* HDMI Initialization Step F - Configure AVI InfoFrame */
++ hdmi_config_AVI(hdmi);
++ }
++
++ hdmi_video_packetize(hdmi);
++ hdmi_video_csc(hdmi);
++ hdmi_video_sample(hdmi);
++
++ mxc_hdmi_clear_overflow(hdmi);
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n\n", __func__);
++
++}
++
++/* Wait until we are registered to enable interrupts */
++static void mxc_hdmi_fb_registered(struct mxc_hdmi *hdmi)
++{
++ unsigned long flags;
++
++ if (hdmi->fb_reg)
++ return;
++
++ spin_lock_irqsave(&hdmi->irq_lock, flags);
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ hdmi_writeb(HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
++ HDMI_PHY_I2CM_INT_ADDR);
++
++ hdmi_writeb(HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL |
++ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL,
++ HDMI_PHY_I2CM_CTLINT_ADDR);
++
++ /* enable cable hot plug irq */
++ hdmi_writeb(~hdmi->plug_mask, HDMI_PHY_MASK0);
++
++ /* Clear Hotplug interrupts */
++ hdmi_writeb(hdmi->plug_event, HDMI_IH_PHY_STAT0);
++
++ /* Unmute interrupts */
++ hdmi_writeb(~hdmi->plug_event, HDMI_IH_MUTE_PHY_STAT0);
++
++ hdmi->fb_reg = true;
++
++ spin_unlock_irqrestore(&hdmi->irq_lock, flags);
++
++}
++
++static int mxc_hdmi_fb_event(struct notifier_block *nb,
++ unsigned long val, void *v)
++{
++ struct fb_event *event = v;
++ struct mxc_hdmi *hdmi = container_of(nb, struct mxc_hdmi, nb);
++
++ if (strcmp(event->info->fix.id, hdmi->fbi->fix.id))
++ return 0;
++
++ switch (val) {
++ case FB_EVENT_FB_REGISTERED:
++ dev_dbg(&hdmi->pdev->dev, "event=FB_EVENT_FB_REGISTERED\n");
++ mxc_hdmi_fb_registered(hdmi);
++ hdmi_set_registered(1);
++ break;
++
++ case FB_EVENT_FB_UNREGISTERED:
++ dev_dbg(&hdmi->pdev->dev, "event=FB_EVENT_FB_UNREGISTERED\n");
++ hdmi->fb_reg = false;
++ hdmi_set_registered(0);
++ break;
++
++ case FB_EVENT_MODE_CHANGE:
++ dev_dbg(&hdmi->pdev->dev, "event=FB_EVENT_MODE_CHANGE\n");
++ if (hdmi->fb_reg)
++ mxc_hdmi_setup(hdmi, val);
++ break;
++
++ case FB_EVENT_BLANK:
++ if ((*((int *)event->data) == FB_BLANK_UNBLANK) &&
++ (*((int *)event->data) != hdmi->blank)) {
++ dev_dbg(&hdmi->pdev->dev,
++ "event=FB_EVENT_BLANK - UNBLANK\n");
++
++ hdmi->blank = *((int *)event->data);
++
++ /* Re-enable HPD interrupts */
++ val = hdmi_readb(HDMI_PHY_MASK0);
++ val &= ~hdmi->plug_mask;
++ hdmi_writeb(val, HDMI_PHY_MASK0);
++
++ /* Unmute interrupts */
++ hdmi_writeb(~hdmi->plug_event, HDMI_IH_MUTE_PHY_STAT0);
++
++ if (hdmi->fb_reg && hdmi->cable_plugin)
++ mxc_hdmi_setup(hdmi, val);
++ hdmi_set_blank_state(1);
++ } else if (*((int *)event->data) != hdmi->blank) {
++ dev_dbg(&hdmi->pdev->dev,
++ "event=FB_EVENT_BLANK - BLANK\n");
++ hdmi_set_blank_state(0);
++ mxc_hdmi_abort_stream();
++
++ mxc_hdmi_phy_disable(hdmi);
++
++ if(hdmi->plug_mask == HDMI_DVI_STAT) {
++ u8 val;
++ pr_info("In DVI Mode disable interrupts\n");
++ val = hdmi_readb(HDMI_IH_MUTE_PHY_STAT0);
++ val |= hdmi->plug_event;
++ hdmi_writeb(val, HDMI_IH_MUTE_PHY_STAT0);
++
++ val = hdmi_readb(HDMI_PHY_MASK0);
++ val |= hdmi->plug_mask;
++ hdmi_writeb(val, HDMI_PHY_MASK0);
++
++ hdmi_set_dvi_mode(1);
++ }
++
++ hdmi->blank = *((int *)event->data);
++ } else
++ dev_dbg(&hdmi->pdev->dev,
++ "FB BLANK state no changed!\n");
++
++ break;
++
++ case FB_EVENT_SUSPEND:
++ dev_dbg(&hdmi->pdev->dev,
++ "event=FB_EVENT_SUSPEND\n");
++
++ if (hdmi->blank == FB_BLANK_UNBLANK) {
++ mxc_hdmi_phy_disable(hdmi);
++ clk_disable(hdmi->hdmi_iahb_clk);
++ clk_disable(hdmi->hdmi_isfr_clk);
++ }
++ break;
++
++ case FB_EVENT_RESUME:
++ dev_dbg(&hdmi->pdev->dev,
++ "event=FB_EVENT_RESUME\n");
++
++ if (hdmi->blank == FB_BLANK_UNBLANK) {
++ clk_enable(hdmi->hdmi_iahb_clk);
++ clk_enable(hdmi->hdmi_isfr_clk);
++ mxc_hdmi_phy_init(hdmi);
++ }
++ break;
++
++ }
++ return 0;
++}
++
++static void hdmi_init_route(struct mxc_hdmi *hdmi)
++{
++ uint32_t hdmi_mux_setting, reg;
++ int ipu_id, disp_id;
++
++ ipu_id = mxc_hdmi_ipu_id;
++ disp_id = mxc_hdmi_disp_id;
++
++ if ((ipu_id > 1) || (ipu_id < 0)) {
++ pr_err("Invalid IPU select for HDMI: %d. Set to 0\n", ipu_id);
++ ipu_id = 0;
++ }
++
++ if ((disp_id > 1) || (disp_id < 0)) {
++ pr_err("Invalid DI select for HDMI: %d. Set to 0\n", disp_id);
++ disp_id = 0;
++ }
++
++ reg = readl(hdmi->gpr_hdmi_base);
++
++ /* Configure the connection between IPU1/2 and HDMI */
++ hdmi_mux_setting = 2*ipu_id + disp_id;
++
++ /* GPR3, bits 2-3 = HDMI_MUX_CTL */
++ reg &= ~0xd;
++ reg |= hdmi_mux_setting << 2;
++
++ writel(reg, hdmi->gpr_hdmi_base);
++
++ /* Set HDMI event as SDMA event2 for HDMI audio */
++ reg = readl(hdmi->gpr_sdma_base);
++ reg |= 0x1;
++ writel(reg, hdmi->gpr_sdma_base);
++}
++
++static void hdmi_hdcp_get_property(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++
++ /* Check hdcp enable by dts.*/
++ hdcp_init = of_property_read_bool(np, "fsl,hdcp");
++ if (hdcp_init)
++ dev_dbg(&pdev->dev, "hdcp enable\n");
++ else
++ dev_dbg(&pdev->dev, "hdcp disable\n");
++}
++
++static void hdmi_get_of_property(struct mxc_hdmi *hdmi)
++{
++ struct platform_device *pdev = hdmi->pdev;
++ struct device_node *np = pdev->dev.of_node;
++ const struct of_device_id *of_id =
++ of_match_device(imx_hdmi_dt_ids, &pdev->dev);
++ int ret;
++ u32 phy_reg_vlev = 0, phy_reg_cksymtx = 0;
++
++ if (of_id) {
++ pdev->id_entry = of_id->data;
++ hdmi->cpu_type = pdev->id_entry->driver_data;
++ }
++
++ /* HDMI PHY register vlev and cksymtx preperty is optional.
++ * It is for specific board to pass HCT electrical part.
++ * Default value will been setting in HDMI PHY config function
++ * if it is not define in device tree.
++ */
++ ret = of_property_read_u32(np, "fsl,phy_reg_vlev", &phy_reg_vlev);
++ if (ret)
++ dev_dbg(&pdev->dev, "No board specific HDMI PHY vlev\n");
++
++ ret = of_property_read_u32(np, "fsl,phy_reg_cksymtx", &phy_reg_cksymtx);
++ if (ret)
++ dev_dbg(&pdev->dev, "No board specific HDMI PHY cksymtx\n");
++
++ /* Specific phy config */
++ hdmi->phy_config.reg_cksymtx = phy_reg_cksymtx;
++ hdmi->phy_config.reg_vlev = phy_reg_vlev;
++
++}
++
++/* HDMI Initialization Step A */
++static int mxc_hdmi_disp_init(struct mxc_dispdrv_handle *disp,
++ struct mxc_dispdrv_setting *setting)
++{
++ int ret = 0;
++ u32 i;
++ const struct fb_videomode *mode;
++ struct fb_videomode m;
++ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
++ int irq = platform_get_irq(hdmi->pdev, 0);
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Check hdmi disp init once */
++ if (hdmi_inited) {
++ dev_err(&hdmi->pdev->dev,
++ "Error only one HDMI output support now!\n");
++ return -1;
++ }
++
++ hdmi_get_of_property(hdmi);
++
++ if (irq < 0)
++ return -ENODEV;
++
++ /* Setting HDMI default to blank state */
++ hdmi->blank = FB_BLANK_POWERDOWN;
++
++ setting->dev_id = mxc_hdmi_ipu_id;
++ setting->disp_id = mxc_hdmi_disp_id;
++ setting->if_fmt = IPU_PIX_FMT_RGB24;
++
++ hdmi->dft_mode_str = setting->dft_mode_str;
++ hdmi->default_bpp = setting->default_bpp;
++ dev_dbg(&hdmi->pdev->dev, "%s - default mode %s bpp=%d\n",
++ __func__, hdmi->dft_mode_str, hdmi->default_bpp);
++
++ hdmi->fbi = setting->fbi;
++
++ hdmi_init_route(hdmi);
++
++ hdmi->hdmi_isfr_clk = clk_get(&hdmi->pdev->dev, "hdmi_isfr");
++ if (IS_ERR(hdmi->hdmi_isfr_clk)) {
++ ret = PTR_ERR(hdmi->hdmi_isfr_clk);
++ dev_err(&hdmi->pdev->dev,
++ "Unable to get HDMI clk: %d\n", ret);
++ goto egetclk1;
++ }
++
++ ret = clk_prepare_enable(hdmi->hdmi_isfr_clk);
++ if (ret < 0) {
++ dev_err(&hdmi->pdev->dev,
++ "Cannot enable HDMI isfr clock: %d\n", ret);
++ goto erate1;
++ }
++
++ hdmi->hdmi_iahb_clk = clk_get(&hdmi->pdev->dev, "hdmi_iahb");
++ if (IS_ERR(hdmi->hdmi_iahb_clk)) {
++ ret = PTR_ERR(hdmi->hdmi_iahb_clk);
++ dev_err(&hdmi->pdev->dev,
++ "Unable to get HDMI clk: %d\n", ret);
++ goto egetclk2;
++ }
++
++ ret = clk_prepare_enable(hdmi->hdmi_iahb_clk);
++ if (ret < 0) {
++ dev_err(&hdmi->pdev->dev,
++ "Cannot enable HDMI iahb clock: %d\n", ret);
++ goto erate2;
++ }
++
++ dev_dbg(&hdmi->pdev->dev, "Enabled HDMI clocks\n");
++
++ /* Init DDC pins for HDCP */
++ if (hdcp_init) {
++ hdmi->pinctrl = devm_pinctrl_get_select_default(&hdmi->pdev->dev);
++ if (IS_ERR(hdmi->pinctrl)) {
++ dev_err(&hdmi->pdev->dev, "can't get/select DDC pinctrl\n");
++ goto erate2;
++ }
++ }
++
++ /* Product and revision IDs */
++ dev_info(&hdmi->pdev->dev,
++ "Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n",
++ hdmi_readb(HDMI_DESIGN_ID),
++ hdmi_readb(HDMI_REVISION_ID),
++ hdmi_readb(HDMI_PRODUCT_ID0),
++ hdmi_readb(HDMI_PRODUCT_ID1));
++
++ /* To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
++ * N and cts values before enabling phy */
++ hdmi_init_clk_regenerator();
++
++ INIT_LIST_HEAD(&hdmi->fbi->modelist);
++
++ spin_lock_init(&hdmi->irq_lock);
++
++ /* Set the default mode and modelist when disp init. */
++ fb_find_mode(&hdmi->fbi->var, hdmi->fbi,
++ hdmi->dft_mode_str, NULL, 0, NULL,
++ hdmi->default_bpp);
++
++ console_lock();
++
++ fb_destroy_modelist(&hdmi->fbi->modelist);
++
++ /*Add all no interlaced CEA mode to default modelist */
++ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
++ mode = &mxc_cea_mode[i];
++ if (!(mode->vmode & FB_VMODE_INTERLACED) && (mode->xres != 0))
++ fb_add_videomode(mode, &hdmi->fbi->modelist);
++ }
++
++ console_unlock();
++
++ /* Find a nearest mode in default modelist */
++ fb_var_to_videomode(&m, &hdmi->fbi->var);
++ dump_fb_videomode(&m);
++
++ hdmi->dft_mode_set = false;
++ /* Save default video mode */
++ memcpy(&hdmi->default_mode, &m, sizeof(struct fb_videomode));
++
++ mode = fb_find_nearest_mode(&m, &hdmi->fbi->modelist);
++ if (!mode) {
++ pr_err("%s: could not find mode in modelist\n", __func__);
++ return -1;
++ }
++
++ fb_videomode_to_var(&hdmi->fbi->var, mode);
++
++ /* update fbi mode */
++ hdmi->fbi->mode = (struct fb_videomode *)mode;
++
++ /* Default setting HDMI working in HDMI mode*/
++ hdmi->edid_cfg.hdmi_cap = true;
++
++ hdmi->plug_event = HDMI_DVI_IH_STAT;
++ hdmi->plug_mask = HDMI_DVI_STAT;
++
++ setup_timer(&hdmi->jitter_timer, hotplug_work_launch, (unsigned long)hdmi);
++ INIT_WORK(&hdmi->hotplug_work, hotplug_worker);
++ INIT_DELAYED_WORK(&hdmi->hdcp_hdp_work, hdcp_hdp_worker);
++
++ /* Configure registers related to HDMI interrupt
++ * generation before registering IRQ. */
++ hdmi_writeb(hdmi->plug_mask, HDMI_PHY_POL0);
++
++ /* Clear Hotplug interrupts */
++ hdmi_writeb(hdmi->plug_event, HDMI_IH_PHY_STAT0);
++
++ hdmi->nb.notifier_call = mxc_hdmi_fb_event;
++ ret = fb_register_client(&hdmi->nb);
++ if (ret < 0)
++ goto efbclient;
++
++ memset(&hdmi->hdmi_data, 0, sizeof(struct hdmi_data_info));
++
++ /* Default HDMI working in RGB mode */
++ hdmi->hdmi_data.rgb_out_enable = true;
++
++ if (!strcasecmp(rgb_quant_range, "limited")) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE;
++ } else if (!strcasecmp(rgb_quant_range, "full")) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE;
++ } else {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT;
++ }
++
++ ret = devm_request_irq(&hdmi->pdev->dev, irq, mxc_hdmi_hotplug, IRQF_SHARED,
++ dev_name(&hdmi->pdev->dev), hdmi);
++ if (ret < 0) {
++ dev_err(&hdmi->pdev->dev,
++ "Unable to request irq: %d\n", ret);
++ goto ereqirq;
++ }
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_fb_name);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for fb name\n");
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_cable_state);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for cable state\n");
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_edid);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for edid\n");
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_rgb_out_enable);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for rgb out enable\n");
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_rgb_quant_range);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for rgb quant range\n");
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_hdcp_enable);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for hdcp enable\n");
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
++
++ hdmi_inited = true;
++
++ return ret;
++
++efbclient:
++ free_irq(irq, hdmi);
++ereqirq:
++ clk_disable_unprepare(hdmi->hdmi_iahb_clk);
++erate2:
++ clk_put(hdmi->hdmi_iahb_clk);
++egetclk2:
++ clk_disable_unprepare(hdmi->hdmi_isfr_clk);
++erate1:
++ clk_put(hdmi->hdmi_isfr_clk);
++egetclk1:
++ dev_dbg(&hdmi->pdev->dev, "%s error exit\n", __func__);
++
++ return ret;
++}
++
++static void mxc_hdmi_disp_deinit(struct mxc_dispdrv_handle *disp)
++{
++ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ fb_unregister_client(&hdmi->nb);
++
++ clk_disable_unprepare(hdmi->hdmi_isfr_clk);
++ clk_put(hdmi->hdmi_isfr_clk);
++ clk_disable_unprepare(hdmi->hdmi_iahb_clk);
++ clk_put(hdmi->hdmi_iahb_clk);
++
++ platform_device_unregister(hdmi->pdev);
++
++ hdmi_inited = false;
++}
++
++static struct mxc_dispdrv_driver mxc_hdmi_drv = {
++ .name = DISPDRV_HDMI,
++ .init = mxc_hdmi_disp_init,
++ .deinit = mxc_hdmi_disp_deinit,
++ .enable = mxc_hdmi_power_on,
++ .disable = mxc_hdmi_power_off,
++};
++
++
++static int mxc_hdmi_open(struct inode *inode, struct file *file)
++{
++ return 0;
++}
++
++static long mxc_hdmi_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ int __user *argp = (void __user *)arg;
++ int ret = 0;
++
++ switch (cmd) {
++ case HDMI_IOC_GET_RESOURCE:
++ ret = copy_to_user(argp, &g_hdmi->hdmi_data,
++ sizeof(g_hdmi->hdmi_data)) ? -EFAULT : 0;
++ break;
++ case HDMI_IOC_GET_CPU_TYPE:
++ *argp = g_hdmi->cpu_type;
++ break;
++ default:
++ pr_debug("Unsupport cmd %d\n", cmd);
++ break;
++ }
++ return ret;
++}
++
++static int mxc_hdmi_release(struct inode *inode, struct file *file)
++{
++ return 0;
++}
++
++static const struct file_operations mxc_hdmi_fops = {
++ .owner = THIS_MODULE,
++ .open = mxc_hdmi_open,
++ .release = mxc_hdmi_release,
++ .unlocked_ioctl = mxc_hdmi_ioctl,
++};
++
++
++static int mxc_hdmi_probe(struct platform_device *pdev)
++{
++ struct mxc_hdmi *hdmi;
++ struct device *temp_class;
++ struct resource *res;
++ int ret = 0;
++
++ /* Check I2C driver is loaded and available
++ * check hdcp function is enable by dts */
++ hdmi_hdcp_get_property(pdev);
++ if (!hdmi_i2c && !hdcp_init)
++ return -ENODEV;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ return -ENOENT;
++
++ hdmi = devm_kzalloc(&pdev->dev,
++ sizeof(struct mxc_hdmi),
++ GFP_KERNEL);
++ if (!hdmi) {
++ dev_err(&pdev->dev, "Cannot allocate device data\n");
++ ret = -ENOMEM;
++ goto ealloc;
++ }
++ g_hdmi = hdmi;
++
++ hdmi_major = register_chrdev(hdmi_major, "mxc_hdmi", &mxc_hdmi_fops);
++ if (hdmi_major < 0) {
++ printk(KERN_ERR "HDMI: unable to get a major for HDMI\n");
++ ret = -EBUSY;
++ goto ealloc;
++ }
++
++ hdmi_class = class_create(THIS_MODULE, "mxc_hdmi");
++ if (IS_ERR(hdmi_class)) {
++ ret = PTR_ERR(hdmi_class);
++ goto err_out_chrdev;
++ }
++
++ temp_class = device_create(hdmi_class, NULL, MKDEV(hdmi_major, 0),
++ NULL, "mxc_hdmi");
++ if (IS_ERR(temp_class)) {
++ ret = PTR_ERR(temp_class);
++ goto err_out_class;
++ }
++
++ hdmi->pdev = pdev;
++
++ hdmi->core_pdev = platform_device_alloc("mxc_hdmi_core", -1);
++ if (!hdmi->core_pdev) {
++ pr_err("%s failed platform_device_alloc for hdmi core\n",
++ __func__);
++ ret = -ENOMEM;
++ goto ecore;
++ }
++
++ hdmi->gpr_base = ioremap(res->start, resource_size(res));
++ if (!hdmi->gpr_base) {
++ dev_err(&pdev->dev, "ioremap failed\n");
++ ret = -ENOMEM;
++ goto eiomap;
++ }
++
++ hdmi->gpr_hdmi_base = hdmi->gpr_base + 3;
++ hdmi->gpr_sdma_base = hdmi->gpr_base;
++
++ hdmi_inited = false;
++
++ hdmi->disp_mxc_hdmi = mxc_dispdrv_register(&mxc_hdmi_drv);
++ if (IS_ERR(hdmi->disp_mxc_hdmi)) {
++ dev_err(&pdev->dev, "Failed to register dispdrv - 0x%x\n",
++ (int)hdmi->disp_mxc_hdmi);
++ ret = (int)hdmi->disp_mxc_hdmi;
++ goto edispdrv;
++ }
++ mxc_dispdrv_setdata(hdmi->disp_mxc_hdmi, hdmi);
++
++ platform_set_drvdata(pdev, hdmi);
++
++ return 0;
++edispdrv:
++ iounmap(hdmi->gpr_base);
++eiomap:
++ platform_device_put(hdmi->core_pdev);
++ecore:
++ kfree(hdmi);
++err_out_class:
++ device_destroy(hdmi_class, MKDEV(hdmi_major, 0));
++ class_destroy(hdmi_class);
++err_out_chrdev:
++ unregister_chrdev(hdmi_major, "mxc_hdmi");
++ealloc:
++ return ret;
++}
++
++static int mxc_hdmi_remove(struct platform_device *pdev)
++{
++ struct mxc_hdmi *hdmi = platform_get_drvdata(pdev);
++ int irq = platform_get_irq(pdev, 0);
++
++ fb_unregister_client(&hdmi->nb);
++
++ mxc_dispdrv_puthandle(hdmi->disp_mxc_hdmi);
++ mxc_dispdrv_unregister(hdmi->disp_mxc_hdmi);
++ iounmap(hdmi->gpr_base);
++ /* No new work will be scheduled, wait for running ISR */
++ free_irq(irq, hdmi);
++ kfree(hdmi);
++ g_hdmi = NULL;
++
++ return 0;
++}
++
++static struct platform_driver mxc_hdmi_driver = {
++ .probe = mxc_hdmi_probe,
++ .remove = mxc_hdmi_remove,
++ .driver = {
++ .name = "mxc_hdmi",
++ .of_match_table = imx_hdmi_dt_ids,
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init mxc_hdmi_init(void)
++{
++ return platform_driver_register(&mxc_hdmi_driver);
++}
++module_init(mxc_hdmi_init);
++
++static void __exit mxc_hdmi_exit(void)
++{
++ if (hdmi_major > 0) {
++ device_destroy(hdmi_class, MKDEV(hdmi_major, 0));
++ class_destroy(hdmi_class);
++ unregister_chrdev(hdmi_major, "mxc_hdmi");
++ hdmi_major = 0;
++ }
++
++ platform_driver_unregister(&mxc_hdmi_driver);
++}
++module_exit(mxc_hdmi_exit);
++
++static int mxc_hdmi_i2c_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ if (!i2c_check_functionality(client->adapter,
++ I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
++ return -ENODEV;
++
++ hdmi_i2c = client;
++
++ return 0;
++}
++
++static int mxc_hdmi_i2c_remove(struct i2c_client *client)
++{
++ hdmi_i2c = NULL;
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_i2c_match[] = {
++ { .compatible = "fsl,imx6-hdmi-i2c", },
++ { /* sentinel */ }
++};
++
++static const struct i2c_device_id mxc_hdmi_i2c_id[] = {
++ { "mxc_hdmi_i2c", 0 },
++ {},
++};
++MODULE_DEVICE_TABLE(i2c, mxc_hdmi_i2c_id);
++
++static struct i2c_driver mxc_hdmi_i2c_driver = {
++ .driver = {
++ .name = "mxc_hdmi_i2c",
++ .of_match_table = imx_hdmi_i2c_match,
++ },
++ .probe = mxc_hdmi_i2c_probe,
++ .remove = mxc_hdmi_i2c_remove,
++ .id_table = mxc_hdmi_i2c_id,
++};
++
++static int __init mxc_hdmi_i2c_init(void)
++{
++ return i2c_add_driver(&mxc_hdmi_i2c_driver);
++}
++
++static void __exit mxc_hdmi_i2c_exit(void)
++{
++ i2c_del_driver(&mxc_hdmi_i2c_driver);
++}
++
++module_init(mxc_hdmi_i2c_init);
++module_exit(mxc_hdmi_i2c_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+diff -Nur linux-3.14.40.orig/drivers/video/mxc/mxc_ipuv3_fb.c linux-3.14.40/drivers/video/mxc/mxc_ipuv3_fb.c
+--- linux-3.14.40.orig/drivers/video/mxc/mxc_ipuv3_fb.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/mxc/mxc_ipuv3_fb.c 2015-05-01 14:58:05.735427001 -0500
+@@ -0,0 +1,2578 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @defgroup Framebuffer Framebuffer Driver for SDC and ADC.
++ */
++
++/*!
++ * @file mxcfb.c
++ *
++ * @brief MXC Frame buffer driver for SDC
++ *
++ * @ingroup Framebuffer
++ */
++
++/*!
++ * Include files
++ */
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/errno.h>
++#include <linux/fb.h>
++#include <linux/fsl_devices.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/ioport.h>
++#include <linux/ipu.h>
++#include <linux/ipu-v3.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/uaccess.h>
++
++#include "mxc_dispdrv.h"
++
++/*
++ * Driver name
++ */
++#define MXCFB_NAME "mxc_sdc_fb"
++
++/* Display port number */
++#define MXCFB_PORT_NUM 2
++/*!
++ * Structure containing the MXC specific framebuffer information.
++ */
++struct mxcfb_info {
++ int default_bpp;
++ int cur_blank;
++ int next_blank;
++ ipu_channel_t ipu_ch;
++ int ipu_id;
++ int ipu_di;
++ u32 ipu_di_pix_fmt;
++ bool ipu_int_clk;
++ bool overlay;
++ bool alpha_chan_en;
++ bool late_init;
++ bool first_set_par;
++ dma_addr_t alpha_phy_addr0;
++ dma_addr_t alpha_phy_addr1;
++ void *alpha_virt_addr0;
++ void *alpha_virt_addr1;
++ uint32_t alpha_mem_len;
++ uint32_t ipu_ch_irq;
++ uint32_t ipu_ch_nf_irq;
++ uint32_t ipu_alp_ch_irq;
++ uint32_t cur_ipu_buf;
++ uint32_t cur_ipu_alpha_buf;
++
++ u32 pseudo_palette[16];
++
++ bool mode_found;
++ struct completion flip_complete;
++ struct completion alpha_flip_complete;
++ struct completion vsync_complete;
++
++ void *ipu;
++ struct fb_info *ovfbi;
++
++ struct mxc_dispdrv_handle *dispdrv;
++
++ struct fb_var_screeninfo cur_var;
++};
++
++struct mxcfb_pfmt {
++ u32 fb_pix_fmt;
++ int bpp;
++ struct fb_bitfield red;
++ struct fb_bitfield green;
++ struct fb_bitfield blue;
++ struct fb_bitfield transp;
++};
++
++static const struct mxcfb_pfmt mxcfb_pfmts[] = {
++ /* pixel bpp red green blue transp */
++ {IPU_PIX_FMT_RGB565, 16, {11, 5, 0}, { 5, 6, 0}, { 0, 5, 0}, { 0, 0, 0} },
++ {IPU_PIX_FMT_RGB24, 24, { 0, 8, 0}, { 8, 8, 0}, {16, 8, 0}, { 0, 0, 0} },
++ {IPU_PIX_FMT_BGR24, 24, {16, 8, 0}, { 8, 8, 0}, { 0, 8, 0}, { 0, 0, 0} },
++ {IPU_PIX_FMT_RGB32, 32, { 0, 8, 0}, { 8, 8, 0}, {16, 8, 0}, {24, 8, 0} },
++ {IPU_PIX_FMT_BGR32, 32, {16, 8, 0}, { 8, 8, 0}, { 0, 8, 0}, {24, 8, 0} },
++ {IPU_PIX_FMT_ABGR32, 32, {24, 8, 0}, {16, 8, 0}, { 8, 8, 0}, { 0, 8, 0} },
++};
++
++struct mxcfb_alloc_list {
++ struct list_head list;
++ dma_addr_t phy_addr;
++ void *cpu_addr;
++ u32 size;
++};
++
++enum {
++ BOTH_ON,
++ SRC_ON,
++ TGT_ON,
++ BOTH_OFF
++};
++
++static bool g_dp_in_use[2];
++LIST_HEAD(fb_alloc_list);
++
++/* Return default standard(RGB) pixel format */
++static uint32_t bpp_to_pixfmt(int bpp)
++{
++ uint32_t pixfmt = 0;
++
++ switch (bpp) {
++ case 24:
++ pixfmt = IPU_PIX_FMT_BGR24;
++ break;
++ case 32:
++ pixfmt = IPU_PIX_FMT_BGR32;
++ break;
++ case 16:
++ pixfmt = IPU_PIX_FMT_RGB565;
++ break;
++ }
++ return pixfmt;
++}
++
++static inline int bitfield_is_equal(struct fb_bitfield f1,
++ struct fb_bitfield f2)
++{
++ return !memcmp(&f1, &f2, sizeof(f1));
++}
++
++static int pixfmt_to_var(uint32_t pixfmt, struct fb_var_screeninfo *var)
++{
++ int i, ret = -1;
++
++ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
++ if (pixfmt == mxcfb_pfmts[i].fb_pix_fmt) {
++ var->red = mxcfb_pfmts[i].red;
++ var->green = mxcfb_pfmts[i].green;
++ var->blue = mxcfb_pfmts[i].blue;
++ var->transp = mxcfb_pfmts[i].transp;
++ var->bits_per_pixel = mxcfb_pfmts[i].bpp;
++ ret = 0;
++ break;
++ }
++ }
++ return ret;
++}
++
++static int bpp_to_var(int bpp, struct fb_var_screeninfo *var)
++{
++ uint32_t pixfmt = 0;
++
++ pixfmt = bpp_to_pixfmt(bpp);
++ if (pixfmt)
++ return pixfmt_to_var(pixfmt, var);
++ else
++ return -1;
++}
++
++static int check_var_pixfmt(struct fb_var_screeninfo *var)
++{
++ int i, ret = -1;
++
++ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
++ if (bitfield_is_equal(var->red, mxcfb_pfmts[i].red) &&
++ bitfield_is_equal(var->green, mxcfb_pfmts[i].green) &&
++ bitfield_is_equal(var->blue, mxcfb_pfmts[i].blue) &&
++ bitfield_is_equal(var->transp, mxcfb_pfmts[i].transp) &&
++ var->bits_per_pixel == mxcfb_pfmts[i].bpp) {
++ ret = 0;
++ break;
++ }
++ }
++ return ret;
++}
++
++static uint32_t fbi_to_pixfmt(struct fb_info *fbi)
++{
++ int i;
++ uint32_t pixfmt = 0;
++
++ if (fbi->var.nonstd)
++ return fbi->var.nonstd;
++
++ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
++ if (bitfield_is_equal(fbi->var.red, mxcfb_pfmts[i].red) &&
++ bitfield_is_equal(fbi->var.green, mxcfb_pfmts[i].green) &&
++ bitfield_is_equal(fbi->var.blue, mxcfb_pfmts[i].blue) &&
++ bitfield_is_equal(fbi->var.transp, mxcfb_pfmts[i].transp)) {
++ pixfmt = mxcfb_pfmts[i].fb_pix_fmt;
++ break;
++ }
++ }
++
++ if (pixfmt == 0)
++ dev_err(fbi->device, "cannot get pixel format\n");
++
++ return pixfmt;
++}
++
++static struct fb_info *found_registered_fb(ipu_channel_t ipu_ch, int ipu_id)
++{
++ int i;
++ struct mxcfb_info *mxc_fbi;
++ struct fb_info *fbi = NULL;
++
++ for (i = 0; i < num_registered_fb; i++) {
++ mxc_fbi =
++ ((struct mxcfb_info *)(registered_fb[i]->par));
++
++ if ((mxc_fbi->ipu_ch == ipu_ch) &&
++ (mxc_fbi->ipu_id == ipu_id)) {
++ fbi = registered_fb[i];
++ break;
++ }
++ }
++ return fbi;
++}
++
++static irqreturn_t mxcfb_irq_handler(int irq, void *dev_id);
++static irqreturn_t mxcfb_nf_irq_handler(int irq, void *dev_id);
++static int mxcfb_blank(int blank, struct fb_info *info);
++static int mxcfb_map_video_memory(struct fb_info *fbi);
++static int mxcfb_unmap_video_memory(struct fb_info *fbi);
++
++/*
++ * Set fixed framebuffer parameters based on variable settings.
++ *
++ * @param info framebuffer information pointer
++ */
++static int mxcfb_set_fix(struct fb_info *info)
++{
++ struct fb_fix_screeninfo *fix = &info->fix;
++ struct fb_var_screeninfo *var = &info->var;
++
++ fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
++
++ fix->type = FB_TYPE_PACKED_PIXELS;
++ fix->accel = FB_ACCEL_NONE;
++ fix->visual = FB_VISUAL_TRUECOLOR;
++ fix->xpanstep = 1;
++ fix->ywrapstep = 1;
++ fix->ypanstep = 1;
++
++ return 0;
++}
++
++static int _setup_disp_channel1(struct fb_info *fbi)
++{
++ ipu_channel_params_t params;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ memset(&params, 0, sizeof(params));
++
++ if (mxc_fbi->ipu_ch == MEM_DC_SYNC) {
++ params.mem_dc_sync.di = mxc_fbi->ipu_di;
++ if (fbi->var.vmode & FB_VMODE_INTERLACED)
++ params.mem_dc_sync.interlaced = true;
++ params.mem_dc_sync.out_pixel_fmt = mxc_fbi->ipu_di_pix_fmt;
++ params.mem_dc_sync.in_pixel_fmt = fbi_to_pixfmt(fbi);
++ } else {
++ params.mem_dp_bg_sync.di = mxc_fbi->ipu_di;
++ if (fbi->var.vmode & FB_VMODE_INTERLACED)
++ params.mem_dp_bg_sync.interlaced = true;
++ params.mem_dp_bg_sync.out_pixel_fmt = mxc_fbi->ipu_di_pix_fmt;
++ params.mem_dp_bg_sync.in_pixel_fmt = fbi_to_pixfmt(fbi);
++ if (mxc_fbi->alpha_chan_en)
++ params.mem_dp_bg_sync.alpha_chan_en = true;
++ }
++ ipu_init_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch, &params);
++
++ return 0;
++}
++
++static int _setup_disp_channel2(struct fb_info *fbi)
++{
++ int retval = 0;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++ int fb_stride;
++ unsigned long base;
++ unsigned int fr_xoff, fr_yoff, fr_w, fr_h;
++
++ switch (fbi_to_pixfmt(fbi)) {
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_NV12:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YUV444P:
++ fb_stride = fbi->var.xres_virtual;
++ break;
++ default:
++ fb_stride = fbi->fix.line_length;
++ }
++
++ base = fbi->fix.smem_start;
++ fr_xoff = fbi->var.xoffset;
++ fr_w = fbi->var.xres_virtual;
++ if (!(fbi->var.vmode & FB_VMODE_YWRAP)) {
++ dev_dbg(fbi->device, "Y wrap disabled\n");
++ fr_yoff = fbi->var.yoffset % fbi->var.yres;
++ fr_h = fbi->var.yres;
++ base += fbi->fix.line_length * fbi->var.yres *
++ (fbi->var.yoffset / fbi->var.yres);
++ } else {
++ dev_dbg(fbi->device, "Y wrap enabled\n");
++ fr_yoff = fbi->var.yoffset;
++ fr_h = fbi->var.yres_virtual;
++ }
++ base += fr_yoff * fb_stride + fr_xoff;
++
++ mxc_fbi->cur_ipu_buf = 2;
++ init_completion(&mxc_fbi->flip_complete);
++ /*
++ * We don't need to wait for vsync at the first time
++ * we do pan display after fb is initialized, as IPU will
++ * switch to the newly selected buffer automatically,
++ * so we call complete() for both mxc_fbi->flip_complete
++ * and mxc_fbi->alpha_flip_complete.
++ */
++ complete(&mxc_fbi->flip_complete);
++ if (mxc_fbi->alpha_chan_en) {
++ mxc_fbi->cur_ipu_alpha_buf = 1;
++ init_completion(&mxc_fbi->alpha_flip_complete);
++ complete(&mxc_fbi->alpha_flip_complete);
++ }
++
++ retval = ipu_init_channel_buffer(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch, IPU_INPUT_BUFFER,
++ fbi_to_pixfmt(fbi),
++ fbi->var.xres, fbi->var.yres,
++ fb_stride,
++ fbi->var.rotate,
++ base,
++ base,
++ fbi->var.accel_flags &
++ FB_ACCEL_DOUBLE_FLAG ? 0 : base,
++ 0, 0);
++ if (retval) {
++ dev_err(fbi->device,
++ "ipu_init_channel_buffer error %d\n", retval);
++ return retval;
++ }
++
++ /* update u/v offset */
++ ipu_update_channel_offset(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER,
++ fbi_to_pixfmt(fbi),
++ fr_w,
++ fr_h,
++ fr_w,
++ 0, 0,
++ fr_yoff,
++ fr_xoff);
++
++ if (mxc_fbi->alpha_chan_en) {
++ retval = ipu_init_channel_buffer(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ IPU_PIX_FMT_GENERIC,
++ fbi->var.xres, fbi->var.yres,
++ fbi->var.xres,
++ fbi->var.rotate,
++ mxc_fbi->alpha_phy_addr1,
++ mxc_fbi->alpha_phy_addr0,
++ 0,
++ 0, 0);
++ if (retval) {
++ dev_err(fbi->device,
++ "ipu_init_channel_buffer error %d\n", retval);
++ return retval;
++ }
++ }
++
++ return retval;
++}
++
++static bool mxcfb_need_to_set_par(struct fb_info *fbi)
++{
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ if ((fbi->var.activate & FB_ACTIVATE_FORCE) &&
++ (fbi->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW)
++ return true;
++
++ /*
++ * Ignore xoffset and yoffset update,
++ * because pan display handles this case.
++ */
++ mxc_fbi->cur_var.xoffset = fbi->var.xoffset;
++ mxc_fbi->cur_var.yoffset = fbi->var.yoffset;
++
++ return !!memcmp(&mxc_fbi->cur_var, &fbi->var,
++ sizeof(struct fb_var_screeninfo));
++}
++
++/*
++ * Set framebuffer parameters and change the operating mode.
++ *
++ * @param info framebuffer information pointer
++ */
++static int mxcfb_set_par(struct fb_info *fbi)
++{
++ int retval = 0;
++ u32 mem_len, alpha_mem_len;
++ ipu_di_signal_cfg_t sig_cfg;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ int16_t ov_pos_x = 0, ov_pos_y = 0;
++ int ov_pos_ret = 0;
++ struct mxcfb_info *mxc_fbi_fg = NULL;
++ bool ovfbi_enable = false;
++
++ if (ipu_ch_param_bad_alpha_pos(fbi_to_pixfmt(fbi)) &&
++ mxc_fbi->alpha_chan_en) {
++ dev_err(fbi->device, "Bad pixel format for "
++ "graphics plane fb\n");
++ return -EINVAL;
++ }
++
++ if (mxc_fbi->ovfbi)
++ mxc_fbi_fg = (struct mxcfb_info *)mxc_fbi->ovfbi->par;
++
++ if (mxc_fbi->ovfbi && mxc_fbi_fg)
++ if (mxc_fbi_fg->next_blank == FB_BLANK_UNBLANK)
++ ovfbi_enable = true;
++
++ if (!mxcfb_need_to_set_par(fbi))
++ return 0;
++
++ dev_dbg(fbi->device, "Reconfiguring framebuffer\n");
++
++ if (fbi->var.xres == 0 || fbi->var.yres == 0)
++ return 0;
++
++ if (ovfbi_enable) {
++ ov_pos_ret = ipu_disp_get_window_pos(
++ mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch,
++ &ov_pos_x, &ov_pos_y);
++ if (ov_pos_ret < 0)
++ dev_err(fbi->device, "Get overlay pos failed, dispdrv:%s.\n",
++ mxc_fbi->dispdrv->drv->name);
++
++ ipu_clear_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_irq);
++ ipu_disable_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_irq);
++ ipu_clear_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_nf_irq);
++ ipu_disable_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_nf_irq);
++ ipu_disable_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch, true);
++ ipu_uninit_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch);
++ }
++
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ ipu_disable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
++ ipu_disable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
++ ipu_disable_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch, true);
++ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++
++ /*
++ * Disable IPU hsp clock if it is enabled for an
++ * additional time in ipu common driver.
++ */
++ if (mxc_fbi->first_set_par && mxc_fbi->late_init)
++ ipu_disable_hsp_clk(mxc_fbi->ipu);
++
++ mxcfb_set_fix(fbi);
++
++ mem_len = fbi->var.yres_virtual * fbi->fix.line_length;
++ if (!fbi->fix.smem_start || (mem_len > fbi->fix.smem_len)) {
++ if (fbi->fix.smem_start)
++ mxcfb_unmap_video_memory(fbi);
++
++ if (mxcfb_map_video_memory(fbi) < 0)
++ return -ENOMEM;
++ }
++
++ if (mxc_fbi->first_set_par) {
++ /*
++ * Clear the screen in case uboot fb pixel format is not
++ * the same to kernel fb pixel format.
++ */
++ if (mxc_fbi->late_init)
++ memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
++
++ mxc_fbi->first_set_par = false;
++ }
++
++ if (mxc_fbi->alpha_chan_en) {
++ alpha_mem_len = fbi->var.xres * fbi->var.yres;
++ if ((!mxc_fbi->alpha_phy_addr0 && !mxc_fbi->alpha_phy_addr1) ||
++ (alpha_mem_len > mxc_fbi->alpha_mem_len)) {
++ if (mxc_fbi->alpha_phy_addr0)
++ dma_free_coherent(fbi->device,
++ mxc_fbi->alpha_mem_len,
++ mxc_fbi->alpha_virt_addr0,
++ mxc_fbi->alpha_phy_addr0);
++ if (mxc_fbi->alpha_phy_addr1)
++ dma_free_coherent(fbi->device,
++ mxc_fbi->alpha_mem_len,
++ mxc_fbi->alpha_virt_addr1,
++ mxc_fbi->alpha_phy_addr1);
++
++ mxc_fbi->alpha_virt_addr0 =
++ dma_alloc_coherent(fbi->device,
++ alpha_mem_len,
++ &mxc_fbi->alpha_phy_addr0,
++ GFP_DMA | GFP_KERNEL);
++
++ mxc_fbi->alpha_virt_addr1 =
++ dma_alloc_coherent(fbi->device,
++ alpha_mem_len,
++ &mxc_fbi->alpha_phy_addr1,
++ GFP_DMA | GFP_KERNEL);
++ if (mxc_fbi->alpha_virt_addr0 == NULL ||
++ mxc_fbi->alpha_virt_addr1 == NULL) {
++ dev_err(fbi->device, "mxcfb: dma alloc for"
++ " alpha buffer failed.\n");
++ if (mxc_fbi->alpha_virt_addr0)
++ dma_free_coherent(fbi->device,
++ mxc_fbi->alpha_mem_len,
++ mxc_fbi->alpha_virt_addr0,
++ mxc_fbi->alpha_phy_addr0);
++ if (mxc_fbi->alpha_virt_addr1)
++ dma_free_coherent(fbi->device,
++ mxc_fbi->alpha_mem_len,
++ mxc_fbi->alpha_virt_addr1,
++ mxc_fbi->alpha_phy_addr1);
++ return -ENOMEM;
++ }
++ mxc_fbi->alpha_mem_len = alpha_mem_len;
++ }
++ }
++
++ if (mxc_fbi->next_blank != FB_BLANK_UNBLANK)
++ return retval;
++
++ if (mxc_fbi->dispdrv && mxc_fbi->dispdrv->drv->setup) {
++ retval = mxc_fbi->dispdrv->drv->setup(mxc_fbi->dispdrv, fbi);
++ if (retval < 0) {
++ dev_err(fbi->device, "setup error, dispdrv:%s.\n",
++ mxc_fbi->dispdrv->drv->name);
++ return -EINVAL;
++ }
++ }
++
++ _setup_disp_channel1(fbi);
++ if (ovfbi_enable)
++ _setup_disp_channel1(mxc_fbi->ovfbi);
++
++ if (!mxc_fbi->overlay) {
++ uint32_t out_pixel_fmt;
++
++ memset(&sig_cfg, 0, sizeof(sig_cfg));
++ if (fbi->var.vmode & FB_VMODE_INTERLACED)
++ sig_cfg.interlaced = true;
++ out_pixel_fmt = mxc_fbi->ipu_di_pix_fmt;
++ if (fbi->var.vmode & FB_VMODE_ODD_FLD_FIRST) /* PAL */
++ sig_cfg.odd_field_first = true;
++ if (mxc_fbi->ipu_int_clk)
++ sig_cfg.int_clk = true;
++ if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT)
++ sig_cfg.Hsync_pol = true;
++ if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT)
++ sig_cfg.Vsync_pol = true;
++ if (!(fbi->var.sync & FB_SYNC_CLK_LAT_FALL))
++ sig_cfg.clk_pol = true;
++ if (fbi->var.sync & FB_SYNC_DATA_INVERT)
++ sig_cfg.data_pol = true;
++ if (!(fbi->var.sync & FB_SYNC_OE_LOW_ACT))
++ sig_cfg.enable_pol = true;
++ if (fbi->var.sync & FB_SYNC_CLK_IDLE_EN)
++ sig_cfg.clkidle_en = true;
++
++ dev_dbg(fbi->device, "pixclock = %ul Hz\n",
++ (u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL));
++
++ if (ipu_init_sync_panel(mxc_fbi->ipu, mxc_fbi->ipu_di,
++ (PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
++ fbi->var.xres, fbi->var.yres,
++ out_pixel_fmt,
++ fbi->var.left_margin,
++ fbi->var.hsync_len,
++ fbi->var.right_margin,
++ fbi->var.upper_margin,
++ fbi->var.vsync_len,
++ fbi->var.lower_margin,
++ 0, sig_cfg) != 0) {
++ dev_err(fbi->device,
++ "mxcfb: Error initializing panel.\n");
++ return -EINVAL;
++ }
++
++ fbi->mode =
++ (struct fb_videomode *)fb_match_mode(&fbi->var,
++ &fbi->modelist);
++
++ ipu_disp_set_window_pos(mxc_fbi->ipu, mxc_fbi->ipu_ch, 0, 0);
++ }
++
++ retval = _setup_disp_channel2(fbi);
++ if (retval) {
++ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++ return retval;
++ }
++
++ if (ovfbi_enable) {
++ if (ov_pos_ret >= 0)
++ ipu_disp_set_window_pos(
++ mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch,
++ ov_pos_x, ov_pos_y);
++ retval = _setup_disp_channel2(mxc_fbi->ovfbi);
++ if (retval) {
++ ipu_uninit_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch);
++ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++ return retval;
++ }
++ }
++
++ ipu_enable_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++ if (ovfbi_enable)
++ ipu_enable_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch);
++
++ if (mxc_fbi->dispdrv && mxc_fbi->dispdrv->drv->enable) {
++ retval = mxc_fbi->dispdrv->drv->enable(mxc_fbi->dispdrv);
++ if (retval < 0) {
++ dev_err(fbi->device, "enable error, dispdrv:%s.\n",
++ mxc_fbi->dispdrv->drv->name);
++ return -EINVAL;
++ }
++ }
++
++ mxc_fbi->cur_var = fbi->var;
++
++ return retval;
++}
++
++static int _swap_channels(struct fb_info *fbi_from,
++ struct fb_info *fbi_to, bool both_on)
++{
++ int retval, tmp;
++ ipu_channel_t old_ch;
++ struct fb_info *ovfbi;
++ struct mxcfb_info *mxc_fbi_from = (struct mxcfb_info *)fbi_from->par;
++ struct mxcfb_info *mxc_fbi_to = (struct mxcfb_info *)fbi_to->par;
++
++ if (both_on) {
++ ipu_disable_channel(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch, true);
++ ipu_uninit_channel(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch);
++ }
++
++ /* switch the mxc fbi parameters */
++ old_ch = mxc_fbi_from->ipu_ch;
++ mxc_fbi_from->ipu_ch = mxc_fbi_to->ipu_ch;
++ mxc_fbi_to->ipu_ch = old_ch;
++ tmp = mxc_fbi_from->ipu_ch_irq;
++ mxc_fbi_from->ipu_ch_irq = mxc_fbi_to->ipu_ch_irq;
++ mxc_fbi_to->ipu_ch_irq = tmp;
++ tmp = mxc_fbi_from->ipu_ch_nf_irq;
++ mxc_fbi_from->ipu_ch_nf_irq = mxc_fbi_to->ipu_ch_nf_irq;
++ mxc_fbi_to->ipu_ch_nf_irq = tmp;
++ ovfbi = mxc_fbi_from->ovfbi;
++ mxc_fbi_from->ovfbi = mxc_fbi_to->ovfbi;
++ mxc_fbi_to->ovfbi = ovfbi;
++
++ _setup_disp_channel1(fbi_from);
++ retval = _setup_disp_channel2(fbi_from);
++ if (retval)
++ return retval;
++
++ /* switch between dp and dc, disable old idmac, enable new idmac */
++ retval = ipu_swap_channel(mxc_fbi_from->ipu, old_ch, mxc_fbi_from->ipu_ch);
++ ipu_uninit_channel(mxc_fbi_from->ipu, old_ch);
++
++ if (both_on) {
++ _setup_disp_channel1(fbi_to);
++ retval = _setup_disp_channel2(fbi_to);
++ if (retval)
++ return retval;
++ ipu_enable_channel(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch);
++ }
++
++ return retval;
++}
++
++static int swap_channels(struct fb_info *fbi_from)
++{
++ int i;
++ int swap_mode;
++ ipu_channel_t ch_to;
++ struct mxcfb_info *mxc_fbi_from = (struct mxcfb_info *)fbi_from->par;
++ struct fb_info *fbi_to = NULL;
++ struct mxcfb_info *mxc_fbi_to;
++
++ /* what's the target channel? */
++ if (mxc_fbi_from->ipu_ch == MEM_BG_SYNC)
++ ch_to = MEM_DC_SYNC;
++ else
++ ch_to = MEM_BG_SYNC;
++
++ fbi_to = found_registered_fb(ch_to, mxc_fbi_from->ipu_id);
++ if (!fbi_to)
++ return -1;
++ mxc_fbi_to = (struct mxcfb_info *)fbi_to->par;
++
++ ipu_clear_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq);
++ ipu_clear_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq);
++ ipu_free_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq, fbi_from);
++ ipu_free_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq, fbi_to);
++ ipu_clear_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq);
++ ipu_clear_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq);
++ ipu_free_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq, fbi_from);
++ ipu_free_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq, fbi_to);
++
++ if (mxc_fbi_from->cur_blank == FB_BLANK_UNBLANK) {
++ if (mxc_fbi_to->cur_blank == FB_BLANK_UNBLANK)
++ swap_mode = BOTH_ON;
++ else
++ swap_mode = SRC_ON;
++ } else {
++ if (mxc_fbi_to->cur_blank == FB_BLANK_UNBLANK)
++ swap_mode = TGT_ON;
++ else
++ swap_mode = BOTH_OFF;
++ }
++
++ switch (swap_mode) {
++ case BOTH_ON:
++ /* disable target->switch src->enable target */
++ _swap_channels(fbi_from, fbi_to, true);
++ break;
++ case SRC_ON:
++ /* just switch src */
++ _swap_channels(fbi_from, fbi_to, false);
++ break;
++ case TGT_ON:
++ /* just switch target */
++ _swap_channels(fbi_to, fbi_from, false);
++ break;
++ case BOTH_OFF:
++ /* switch directly, no more need to do */
++ mxc_fbi_to->ipu_ch = mxc_fbi_from->ipu_ch;
++ mxc_fbi_from->ipu_ch = ch_to;
++ i = mxc_fbi_from->ipu_ch_irq;
++ mxc_fbi_from->ipu_ch_irq = mxc_fbi_to->ipu_ch_irq;
++ mxc_fbi_to->ipu_ch_irq = i;
++ i = mxc_fbi_from->ipu_ch_nf_irq;
++ mxc_fbi_from->ipu_ch_nf_irq = mxc_fbi_to->ipu_ch_nf_irq;
++ mxc_fbi_to->ipu_ch_nf_irq = i;
++ break;
++ default:
++ break;
++ }
++
++ if (ipu_request_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq,
++ mxcfb_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi_from) != 0) {
++ dev_err(fbi_from->device, "Error registering irq %d\n",
++ mxc_fbi_from->ipu_ch_irq);
++ return -EBUSY;
++ }
++ ipu_disable_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq);
++ if (ipu_request_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq,
++ mxcfb_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi_to) != 0) {
++ dev_err(fbi_to->device, "Error registering irq %d\n",
++ mxc_fbi_to->ipu_ch_irq);
++ return -EBUSY;
++ }
++ ipu_disable_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq);
++ if (ipu_request_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq,
++ mxcfb_nf_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi_from) != 0) {
++ dev_err(fbi_from->device, "Error registering irq %d\n",
++ mxc_fbi_from->ipu_ch_nf_irq);
++ return -EBUSY;
++ }
++ ipu_disable_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq);
++ if (ipu_request_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq,
++ mxcfb_nf_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi_to) != 0) {
++ dev_err(fbi_to->device, "Error registering irq %d\n",
++ mxc_fbi_to->ipu_ch_nf_irq);
++ return -EBUSY;
++ }
++ ipu_disable_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq);
++
++ return 0;
++}
++
++/*
++ * Check framebuffer variable parameters and adjust to valid values.
++ *
++ * @param var framebuffer variable parameters
++ *
++ * @param info framebuffer information pointer
++ */
++static int mxcfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++ u32 vtotal;
++ u32 htotal;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)info->par;
++
++
++ if (var->xres == 0 || var->yres == 0)
++ return 0;
++
++ /* fg should not bigger than bg */
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC) {
++ struct fb_info *fbi_tmp;
++ int bg_xres = 0, bg_yres = 0;
++ int16_t pos_x, pos_y;
++
++ bg_xres = var->xres;
++ bg_yres = var->yres;
++
++ fbi_tmp = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
++ if (fbi_tmp) {
++ bg_xres = fbi_tmp->var.xres;
++ bg_yres = fbi_tmp->var.yres;
++ }
++
++ ipu_disp_get_window_pos(mxc_fbi->ipu, mxc_fbi->ipu_ch, &pos_x, &pos_y);
++
++ if ((var->xres + pos_x) > bg_xres)
++ var->xres = bg_xres - pos_x;
++ if ((var->yres + pos_y) > bg_yres)
++ var->yres = bg_yres - pos_y;
++ }
++
++ if (var->rotate > IPU_ROTATE_VERT_FLIP)
++ var->rotate = IPU_ROTATE_NONE;
++
++ if (var->xres_virtual < var->xres)
++ var->xres_virtual = var->xres;
++
++ if (var->yres_virtual < var->yres)
++ var->yres_virtual = var->yres * 3;
++
++ if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) &&
++ (var->bits_per_pixel != 16) && (var->bits_per_pixel != 12) &&
++ (var->bits_per_pixel != 8))
++ var->bits_per_pixel = 16;
++
++ if (check_var_pixfmt(var))
++ /* Fall back to default */
++ bpp_to_var(var->bits_per_pixel, var);
++
++ if (var->pixclock < 1000) {
++ htotal = var->xres + var->right_margin + var->hsync_len +
++ var->left_margin;
++ vtotal = var->yres + var->lower_margin + var->vsync_len +
++ var->upper_margin;
++ var->pixclock = (vtotal * htotal * 6UL) / 100UL;
++ var->pixclock = KHZ2PICOS(var->pixclock);
++ dev_dbg(info->device,
++ "pixclock set for 60Hz refresh = %u ps\n",
++ var->pixclock);
++ }
++
++ var->height = -1;
++ var->width = -1;
++ var->grayscale = 0;
++
++ return 0;
++}
++
++static inline u_int _chan_to_field(u_int chan, struct fb_bitfield *bf)
++{
++ chan &= 0xffff;
++ chan >>= 16 - bf->length;
++ return chan << bf->offset;
++}
++
++static int mxcfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
++ u_int trans, struct fb_info *fbi)
++{
++ unsigned int val;
++ int ret = 1;
++
++ /*
++ * If greyscale is true, then we convert the RGB value
++ * to greyscale no matter what visual we are using.
++ */
++ if (fbi->var.grayscale)
++ red = green = blue = (19595 * red + 38470 * green +
++ 7471 * blue) >> 16;
++ switch (fbi->fix.visual) {
++ case FB_VISUAL_TRUECOLOR:
++ /*
++ * 16-bit True Colour. We encode the RGB value
++ * according to the RGB bitfield information.
++ */
++ if (regno < 16) {
++ u32 *pal = fbi->pseudo_palette;
++
++ val = _chan_to_field(red, &fbi->var.red);
++ val |= _chan_to_field(green, &fbi->var.green);
++ val |= _chan_to_field(blue, &fbi->var.blue);
++
++ pal[regno] = val;
++ ret = 0;
++ }
++ break;
++
++ case FB_VISUAL_STATIC_PSEUDOCOLOR:
++ case FB_VISUAL_PSEUDOCOLOR:
++ break;
++ }
++
++ return ret;
++}
++
++/*
++ * Function to handle custom ioctls for MXC framebuffer.
++ *
++ * @param inode inode struct
++ *
++ * @param file file struct
++ *
++ * @param cmd Ioctl command to handle
++ *
++ * @param arg User pointer to command arguments
++ *
++ * @param fbi framebuffer information pointer
++ */
++static int mxcfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
++{
++ int retval = 0;
++ int __user *argp = (void __user *)arg;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ switch (cmd) {
++ case MXCFB_SET_GBL_ALPHA:
++ {
++ struct mxcfb_gbl_alpha ga;
++
++ if (copy_from_user(&ga, (void *)arg, sizeof(ga))) {
++ retval = -EFAULT;
++ break;
++ }
++
++ if (ipu_disp_set_global_alpha(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch,
++ (bool)ga.enable,
++ ga.alpha)) {
++ retval = -EINVAL;
++ break;
++ }
++
++ if (ga.enable)
++ mxc_fbi->alpha_chan_en = false;
++
++ if (ga.enable)
++ dev_dbg(fbi->device,
++ "Set global alpha of %s to %d\n",
++ fbi->fix.id, ga.alpha);
++ break;
++ }
++ case MXCFB_SET_LOC_ALPHA:
++ {
++ struct mxcfb_loc_alpha la;
++ bool bad_pixfmt =
++ ipu_ch_param_bad_alpha_pos(fbi_to_pixfmt(fbi));
++
++ if (copy_from_user(&la, (void *)arg, sizeof(la))) {
++ retval = -EFAULT;
++ break;
++ }
++
++ if (la.enable && !la.alpha_in_pixel) {
++ struct fb_info *fbi_tmp;
++ ipu_channel_t ipu_ch;
++
++ if (bad_pixfmt) {
++ dev_err(fbi->device, "Bad pixel format "
++ "for graphics plane fb\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ mxc_fbi->alpha_chan_en = true;
++
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC)
++ ipu_ch = MEM_BG_SYNC;
++ else if (mxc_fbi->ipu_ch == MEM_BG_SYNC)
++ ipu_ch = MEM_FG_SYNC;
++ else {
++ retval = -EINVAL;
++ break;
++ }
++
++ fbi_tmp = found_registered_fb(ipu_ch, mxc_fbi->ipu_id);
++ if (fbi_tmp)
++ ((struct mxcfb_info *)(fbi_tmp->par))->alpha_chan_en = false;
++ } else
++ mxc_fbi->alpha_chan_en = false;
++
++ if (ipu_disp_set_global_alpha(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch,
++ !(bool)la.enable, 0)) {
++ retval = -EINVAL;
++ break;
++ }
++
++ fbi->var.activate = (fbi->var.activate & ~FB_ACTIVATE_MASK) |
++ FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
++ mxcfb_set_par(fbi);
++
++ la.alpha_phy_addr0 = mxc_fbi->alpha_phy_addr0;
++ la.alpha_phy_addr1 = mxc_fbi->alpha_phy_addr1;
++ if (copy_to_user((void *)arg, &la, sizeof(la))) {
++ retval = -EFAULT;
++ break;
++ }
++
++ if (la.enable)
++ dev_dbg(fbi->device,
++ "Enable DP local alpha for %s\n",
++ fbi->fix.id);
++ break;
++ }
++ case MXCFB_SET_LOC_ALP_BUF:
++ {
++ unsigned long base;
++ uint32_t ipu_alp_ch_irq;
++
++ if (!(((mxc_fbi->ipu_ch == MEM_FG_SYNC) ||
++ (mxc_fbi->ipu_ch == MEM_BG_SYNC)) &&
++ (mxc_fbi->alpha_chan_en))) {
++ dev_err(fbi->device,
++ "Should use background or overlay "
++ "framebuffer to set the alpha buffer "
++ "number\n");
++ return -EINVAL;
++ }
++
++ if (get_user(base, argp))
++ return -EFAULT;
++
++ if (base != mxc_fbi->alpha_phy_addr0 &&
++ base != mxc_fbi->alpha_phy_addr1) {
++ dev_err(fbi->device,
++ "Wrong alpha buffer physical address "
++ "%lu\n", base);
++ return -EINVAL;
++ }
++
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC)
++ ipu_alp_ch_irq = IPU_IRQ_FG_ALPHA_SYNC_EOF;
++ else
++ ipu_alp_ch_irq = IPU_IRQ_BG_ALPHA_SYNC_EOF;
++
++ retval = wait_for_completion_timeout(
++ &mxc_fbi->alpha_flip_complete, HZ/2);
++ if (retval == 0) {
++ dev_err(fbi->device, "timeout when waiting for alpha flip irq\n");
++ retval = -ETIMEDOUT;
++ break;
++ }
++
++ mxc_fbi->cur_ipu_alpha_buf =
++ !mxc_fbi->cur_ipu_alpha_buf;
++ if (ipu_update_channel_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ mxc_fbi->
++ cur_ipu_alpha_buf,
++ base) == 0) {
++ ipu_select_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ mxc_fbi->cur_ipu_alpha_buf);
++ ipu_clear_irq(mxc_fbi->ipu, ipu_alp_ch_irq);
++ ipu_enable_irq(mxc_fbi->ipu, ipu_alp_ch_irq);
++ } else {
++ dev_err(fbi->device,
++ "Error updating %s SDC alpha buf %d "
++ "to address=0x%08lX\n",
++ fbi->fix.id,
++ mxc_fbi->cur_ipu_alpha_buf, base);
++ }
++ break;
++ }
++ case MXCFB_SET_CLR_KEY:
++ {
++ struct mxcfb_color_key key;
++ if (copy_from_user(&key, (void *)arg, sizeof(key))) {
++ retval = -EFAULT;
++ break;
++ }
++ retval = ipu_disp_set_color_key(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ key.enable,
++ key.color_key);
++ dev_dbg(fbi->device, "Set color key to 0x%08X\n",
++ key.color_key);
++ break;
++ }
++ case MXCFB_SET_GAMMA:
++ {
++ struct mxcfb_gamma gamma;
++ if (copy_from_user(&gamma, (void *)arg, sizeof(gamma))) {
++ retval = -EFAULT;
++ break;
++ }
++ retval = ipu_disp_set_gamma_correction(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch,
++ gamma.enable,
++ gamma.constk,
++ gamma.slopek);
++ break;
++ }
++ case MXCFB_WAIT_FOR_VSYNC:
++ {
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC) {
++ /* BG should poweron */
++ struct mxcfb_info *bg_mxcfbi = NULL;
++ struct fb_info *fbi_tmp;
++
++ fbi_tmp = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
++ if (fbi_tmp)
++ bg_mxcfbi = ((struct mxcfb_info *)(fbi_tmp->par));
++
++ if (!bg_mxcfbi) {
++ retval = -EINVAL;
++ break;
++ }
++ if (bg_mxcfbi->cur_blank != FB_BLANK_UNBLANK) {
++ retval = -EINVAL;
++ break;
++ }
++ }
++ if (mxc_fbi->cur_blank != FB_BLANK_UNBLANK) {
++ retval = -EINVAL;
++ break;
++ }
++
++ init_completion(&mxc_fbi->vsync_complete);
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
++ ipu_enable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
++ retval = wait_for_completion_interruptible_timeout(
++ &mxc_fbi->vsync_complete, 1 * HZ);
++ if (retval == 0) {
++ dev_err(fbi->device,
++ "MXCFB_WAIT_FOR_VSYNC: timeout %d\n",
++ retval);
++ retval = -ETIME;
++ } else if (retval > 0) {
++ retval = 0;
++ }
++ break;
++ }
++ case FBIO_ALLOC:
++ {
++ int size;
++ struct mxcfb_alloc_list *mem;
++
++ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
++ if (mem == NULL)
++ return -ENOMEM;
++
++ if (get_user(size, argp))
++ return -EFAULT;
++
++ mem->size = PAGE_ALIGN(size);
++
++ mem->cpu_addr = dma_alloc_coherent(fbi->device, size,
++ &mem->phy_addr,
++ GFP_KERNEL);
++ if (mem->cpu_addr == NULL) {
++ kfree(mem);
++ return -ENOMEM;
++ }
++
++ list_add(&mem->list, &fb_alloc_list);
++
++ dev_dbg(fbi->device, "allocated %d bytes @ 0x%08X\n",
++ mem->size, mem->phy_addr);
++
++ if (put_user(mem->phy_addr, argp))
++ return -EFAULT;
++
++ break;
++ }
++ case FBIO_FREE:
++ {
++ unsigned long offset;
++ struct mxcfb_alloc_list *mem;
++
++ if (get_user(offset, argp))
++ return -EFAULT;
++
++ retval = -EINVAL;
++ list_for_each_entry(mem, &fb_alloc_list, list) {
++ if (mem->phy_addr == offset) {
++ list_del(&mem->list);
++ dma_free_coherent(fbi->device,
++ mem->size,
++ mem->cpu_addr,
++ mem->phy_addr);
++ kfree(mem);
++ retval = 0;
++ break;
++ }
++ }
++
++ break;
++ }
++ case MXCFB_SET_OVERLAY_POS:
++ {
++ struct mxcfb_pos pos;
++ struct fb_info *bg_fbi = NULL;
++ struct mxcfb_info *bg_mxcfbi = NULL;
++
++ if (mxc_fbi->ipu_ch != MEM_FG_SYNC) {
++ dev_err(fbi->device, "Should use the overlay "
++ "framebuffer to set the position of "
++ "the overlay window\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ if (copy_from_user(&pos, (void *)arg, sizeof(pos))) {
++ retval = -EFAULT;
++ break;
++ }
++
++ bg_fbi = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
++ if (bg_fbi)
++ bg_mxcfbi = ((struct mxcfb_info *)(bg_fbi->par));
++
++ if (bg_fbi == NULL) {
++ dev_err(fbi->device, "Cannot find the "
++ "background framebuffer\n");
++ retval = -ENOENT;
++ break;
++ }
++
++ /* if fb is unblank, check if the pos fit the display */
++ if (mxc_fbi->cur_blank == FB_BLANK_UNBLANK) {
++ if (fbi->var.xres + pos.x > bg_fbi->var.xres) {
++ if (bg_fbi->var.xres < fbi->var.xres)
++ pos.x = 0;
++ else
++ pos.x = bg_fbi->var.xres - fbi->var.xres;
++ }
++ if (fbi->var.yres + pos.y > bg_fbi->var.yres) {
++ if (bg_fbi->var.yres < fbi->var.yres)
++ pos.y = 0;
++ else
++ pos.y = bg_fbi->var.yres - fbi->var.yres;
++ }
++ }
++
++ retval = ipu_disp_set_window_pos(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ pos.x, pos.y);
++
++ if (copy_to_user((void *)arg, &pos, sizeof(pos))) {
++ retval = -EFAULT;
++ break;
++ }
++ break;
++ }
++ case MXCFB_GET_FB_IPU_CHAN:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (put_user(mxc_fbi->ipu_ch, argp))
++ return -EFAULT;
++ break;
++ }
++ case MXCFB_GET_DIFMT:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (put_user(mxc_fbi->ipu_di_pix_fmt, argp))
++ return -EFAULT;
++ break;
++ }
++ case MXCFB_GET_FB_IPU_DI:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (put_user(mxc_fbi->ipu_di, argp))
++ return -EFAULT;
++ break;
++ }
++ case MXCFB_GET_FB_BLANK:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (put_user(mxc_fbi->cur_blank, argp))
++ return -EFAULT;
++ break;
++ }
++ case MXCFB_SET_DIFMT:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (get_user(mxc_fbi->ipu_di_pix_fmt, argp))
++ return -EFAULT;
++
++ break;
++ }
++ case MXCFB_CSC_UPDATE:
++ {
++ struct mxcfb_csc_matrix csc;
++
++ if (copy_from_user(&csc, (void *) arg, sizeof(csc)))
++ return -EFAULT;
++
++ if ((mxc_fbi->ipu_ch != MEM_FG_SYNC) &&
++ (mxc_fbi->ipu_ch != MEM_BG_SYNC) &&
++ (mxc_fbi->ipu_ch != MEM_BG_ASYNC0))
++ return -EFAULT;
++ ipu_set_csc_coefficients(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ csc.param);
++ }
++ default:
++ retval = -EINVAL;
++ }
++ return retval;
++}
++
++/*
++ * mxcfb_blank():
++ * Blank the display.
++ */
++static int mxcfb_blank(int blank, struct fb_info *info)
++{
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)info->par;
++ int ret = 0;
++
++ dev_dbg(info->device, "blank = %d\n", blank);
++
++ if (mxc_fbi->cur_blank == blank)
++ return 0;
++
++ mxc_fbi->next_blank = blank;
++
++ switch (blank) {
++ case FB_BLANK_POWERDOWN:
++ case FB_BLANK_VSYNC_SUSPEND:
++ case FB_BLANK_HSYNC_SUSPEND:
++ case FB_BLANK_NORMAL:
++ if (mxc_fbi->dispdrv && mxc_fbi->dispdrv->drv->disable)
++ mxc_fbi->dispdrv->drv->disable(mxc_fbi->dispdrv);
++ ipu_disable_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch, true);
++ if (mxc_fbi->ipu_di >= 0)
++ ipu_uninit_sync_panel(mxc_fbi->ipu, mxc_fbi->ipu_di);
++ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++ break;
++ case FB_BLANK_UNBLANK:
++ info->var.activate = (info->var.activate & ~FB_ACTIVATE_MASK) |
++ FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
++ ret = mxcfb_set_par(info);
++ break;
++ }
++ if (!ret)
++ mxc_fbi->cur_blank = blank;
++ return ret;
++}
++
++/*
++ * Pan or Wrap the Display
++ *
++ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
++ *
++ * @param var Variable screen buffer information
++ * @param info Framebuffer information pointer
++ */
++static int
++mxcfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)info->par,
++ *mxc_graphic_fbi = NULL;
++ u_int y_bottom;
++ unsigned int fr_xoff, fr_yoff, fr_w, fr_h;
++ unsigned long base, active_alpha_phy_addr = 0;
++ bool loc_alpha_en = false;
++ int fb_stride;
++ int i;
++ int ret;
++
++ /* no pan display during fb blank */
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC) {
++ struct mxcfb_info *bg_mxcfbi = NULL;
++ struct fb_info *fbi_tmp;
++
++ fbi_tmp = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
++ if (fbi_tmp)
++ bg_mxcfbi = ((struct mxcfb_info *)(fbi_tmp->par));
++ if (!bg_mxcfbi)
++ return -EINVAL;
++ if (bg_mxcfbi->cur_blank != FB_BLANK_UNBLANK)
++ return -EINVAL;
++ }
++ if (mxc_fbi->cur_blank != FB_BLANK_UNBLANK)
++ return -EINVAL;
++
++ y_bottom = var->yoffset;
++
++ if (y_bottom > info->var.yres_virtual)
++ return -EINVAL;
++
++ switch (fbi_to_pixfmt(info)) {
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_NV12:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YUV444P:
++ fb_stride = info->var.xres_virtual;
++ break;
++ default:
++ fb_stride = info->fix.line_length;
++ }
++
++ base = info->fix.smem_start;
++ fr_xoff = var->xoffset;
++ fr_w = info->var.xres_virtual;
++ if (!(var->vmode & FB_VMODE_YWRAP)) {
++ dev_dbg(info->device, "Y wrap disabled\n");
++ fr_yoff = var->yoffset % info->var.yres;
++ fr_h = info->var.yres;
++ base += info->fix.line_length * info->var.yres *
++ (var->yoffset / info->var.yres);
++ } else {
++ dev_dbg(info->device, "Y wrap enabled\n");
++ fr_yoff = var->yoffset;
++ fr_h = info->var.yres_virtual;
++ }
++ base += fr_yoff * fb_stride + fr_xoff;
++
++ /* Check if DP local alpha is enabled and find the graphic fb */
++ if (mxc_fbi->ipu_ch == MEM_BG_SYNC || mxc_fbi->ipu_ch == MEM_FG_SYNC) {
++ for (i = 0; i < num_registered_fb; i++) {
++ char bg_id[] = "DISP3 BG";
++ char fg_id[] = "DISP3 FG";
++ char *idstr = registered_fb[i]->fix.id;
++ bg_id[4] += mxc_fbi->ipu_id;
++ fg_id[4] += mxc_fbi->ipu_id;
++ if ((strcmp(idstr, bg_id) == 0 ||
++ strcmp(idstr, fg_id) == 0) &&
++ ((struct mxcfb_info *)
++ (registered_fb[i]->par))->alpha_chan_en) {
++ loc_alpha_en = true;
++ mxc_graphic_fbi = (struct mxcfb_info *)
++ (registered_fb[i]->par);
++ active_alpha_phy_addr =
++ mxc_fbi->cur_ipu_alpha_buf ?
++ mxc_graphic_fbi->alpha_phy_addr1 :
++ mxc_graphic_fbi->alpha_phy_addr0;
++ dev_dbg(info->device, "Updating SDC alpha "
++ "buf %d address=0x%08lX\n",
++ !mxc_fbi->cur_ipu_alpha_buf,
++ active_alpha_phy_addr);
++ break;
++ }
++ }
++ }
++
++ ret = wait_for_completion_timeout(&mxc_fbi->flip_complete, HZ/2);
++ if (ret == 0) {
++ dev_err(info->device, "timeout when waiting for flip irq\n");
++ return -ETIMEDOUT;
++ }
++
++ ++mxc_fbi->cur_ipu_buf;
++ mxc_fbi->cur_ipu_buf %= 3;
++ mxc_fbi->cur_ipu_alpha_buf = !mxc_fbi->cur_ipu_alpha_buf;
++
++ dev_dbg(info->device, "Updating SDC %s buf %d address=0x%08lX\n",
++ info->fix.id, mxc_fbi->cur_ipu_buf, base);
++
++ if (ipu_update_channel_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch, IPU_INPUT_BUFFER,
++ mxc_fbi->cur_ipu_buf, base) == 0) {
++ /* Update the DP local alpha buffer only for graphic plane */
++ if (loc_alpha_en && mxc_graphic_fbi == mxc_fbi &&
++ ipu_update_channel_buffer(mxc_graphic_fbi->ipu, mxc_graphic_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ mxc_fbi->cur_ipu_alpha_buf,
++ active_alpha_phy_addr) == 0) {
++ ipu_select_buffer(mxc_graphic_fbi->ipu, mxc_graphic_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ mxc_fbi->cur_ipu_alpha_buf);
++ }
++
++ /* update u/v offset */
++ ipu_update_channel_offset(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER,
++ fbi_to_pixfmt(info),
++ fr_w,
++ fr_h,
++ fr_w,
++ 0, 0,
++ fr_yoff,
++ fr_xoff);
++
++ ipu_select_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch, IPU_INPUT_BUFFER,
++ mxc_fbi->cur_ipu_buf);
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ ipu_enable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ } else {
++ dev_err(info->device,
++ "Error updating SDC buf %d to address=0x%08lX, "
++ "current buf %d, buf0 ready %d, buf1 ready %d, "
++ "buf2 ready %d\n", mxc_fbi->cur_ipu_buf, base,
++ ipu_get_cur_buffer_idx(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER),
++ ipu_check_buffer_ready(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER, 0),
++ ipu_check_buffer_ready(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER, 1),
++ ipu_check_buffer_ready(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER, 2));
++ ++mxc_fbi->cur_ipu_buf;
++ mxc_fbi->cur_ipu_buf %= 3;
++ ++mxc_fbi->cur_ipu_buf;
++ mxc_fbi->cur_ipu_buf %= 3;
++ mxc_fbi->cur_ipu_alpha_buf = !mxc_fbi->cur_ipu_alpha_buf;
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ ipu_enable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ return -EBUSY;
++ }
++
++ dev_dbg(info->device, "Update complete\n");
++
++ info->var.yoffset = var->yoffset;
++
++ return 0;
++}
++
++/*
++ * Function to handle custom mmap for MXC framebuffer.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @param vma Pointer to vm_area_struct
++ */
++static int mxcfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
++{
++ bool found = false;
++ u32 len;
++ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++ struct mxcfb_alloc_list *mem;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ if (offset < fbi->fix.smem_len) {
++ /* mapping framebuffer memory */
++ len = fbi->fix.smem_len - offset;
++ vma->vm_pgoff = (fbi->fix.smem_start + offset) >> PAGE_SHIFT;
++ } else if ((vma->vm_pgoff ==
++ (mxc_fbi->alpha_phy_addr0 >> PAGE_SHIFT)) ||
++ (vma->vm_pgoff ==
++ (mxc_fbi->alpha_phy_addr1 >> PAGE_SHIFT))) {
++ len = mxc_fbi->alpha_mem_len;
++ } else {
++ list_for_each_entry(mem, &fb_alloc_list, list) {
++ if (offset == mem->phy_addr) {
++ found = true;
++ len = mem->size;
++ break;
++ }
++ }
++ if (!found)
++ return -EINVAL;
++ }
++
++ len = PAGE_ALIGN(len);
++ if (vma->vm_end - vma->vm_start > len)
++ return -EINVAL;
++
++ /* make buffers bufferable */
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ vma->vm_flags |= VM_IO;
++
++ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
++ dev_dbg(fbi->device, "mmap remap_pfn_range failed\n");
++ return -ENOBUFS;
++ }
++
++ return 0;
++}
++
++/*!
++ * This structure contains the pointers to the control functions that are
++ * invoked by the core framebuffer driver to perform operations like
++ * blitting, rectangle filling, copy regions and cursor definition.
++ */
++static struct fb_ops mxcfb_ops = {
++ .owner = THIS_MODULE,
++ .fb_set_par = mxcfb_set_par,
++ .fb_check_var = mxcfb_check_var,
++ .fb_setcolreg = mxcfb_setcolreg,
++ .fb_pan_display = mxcfb_pan_display,
++ .fb_ioctl = mxcfb_ioctl,
++ .fb_mmap = mxcfb_mmap,
++ .fb_fillrect = cfb_fillrect,
++ .fb_copyarea = cfb_copyarea,
++ .fb_imageblit = cfb_imageblit,
++ .fb_blank = mxcfb_blank,
++};
++
++static irqreturn_t mxcfb_irq_handler(int irq, void *dev_id)
++{
++ struct fb_info *fbi = dev_id;
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ complete(&mxc_fbi->flip_complete);
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t mxcfb_nf_irq_handler(int irq, void *dev_id)
++{
++ struct fb_info *fbi = dev_id;
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ complete(&mxc_fbi->vsync_complete);
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t mxcfb_alpha_irq_handler(int irq, void *dev_id)
++{
++ struct fb_info *fbi = dev_id;
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ complete(&mxc_fbi->alpha_flip_complete);
++ return IRQ_HANDLED;
++}
++
++/*
++ * Suspends the framebuffer and blanks the screen. Power management support
++ */
++static int mxcfb_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct fb_info *fbi = platform_get_drvdata(pdev);
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++ int saved_blank;
++#ifdef CONFIG_FB_MXC_LOW_PWR_DISPLAY
++ void *fbmem;
++#endif
++
++ if (mxc_fbi->ovfbi) {
++ struct mxcfb_info *mxc_fbi_fg =
++ (struct mxcfb_info *)mxc_fbi->ovfbi->par;
++
++ console_lock();
++ fb_set_suspend(mxc_fbi->ovfbi, 1);
++ saved_blank = mxc_fbi_fg->cur_blank;
++ mxcfb_blank(FB_BLANK_POWERDOWN, mxc_fbi->ovfbi);
++ mxc_fbi_fg->next_blank = saved_blank;
++ console_unlock();
++ }
++
++ console_lock();
++ fb_set_suspend(fbi, 1);
++ saved_blank = mxc_fbi->cur_blank;
++ mxcfb_blank(FB_BLANK_POWERDOWN, fbi);
++ mxc_fbi->next_blank = saved_blank;
++ console_unlock();
++
++ return 0;
++}
++
++/*
++ * Resumes the framebuffer and unblanks the screen. Power management support
++ */
++static int mxcfb_resume(struct platform_device *pdev)
++{
++ struct fb_info *fbi = platform_get_drvdata(pdev);
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ console_lock();
++ mxcfb_blank(mxc_fbi->next_blank, fbi);
++ fb_set_suspend(fbi, 0);
++ console_unlock();
++
++ if (mxc_fbi->ovfbi) {
++ struct mxcfb_info *mxc_fbi_fg =
++ (struct mxcfb_info *)mxc_fbi->ovfbi->par;
++ console_lock();
++ mxcfb_blank(mxc_fbi_fg->next_blank, mxc_fbi->ovfbi);
++ fb_set_suspend(mxc_fbi->ovfbi, 0);
++ console_unlock();
++ }
++
++ return 0;
++}
++
++/*
++ * Main framebuffer functions
++ */
++
++/*!
++ * Allocates the DRAM memory for the frame buffer. This buffer is remapped
++ * into a non-cached, non-buffered, memory region to allow palette and pixel
++ * writes to occur without flushing the cache. Once this area is remapped,
++ * all virtual memory access to the video memory should occur at the new region.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @return Error code indicating success or failure
++ */
++static int mxcfb_map_video_memory(struct fb_info *fbi)
++{
++ if (fbi->fix.smem_len < fbi->var.yres_virtual * fbi->fix.line_length)
++ fbi->fix.smem_len = fbi->var.yres_virtual *
++ fbi->fix.line_length;
++
++ fbi->screen_base = dma_alloc_writecombine(fbi->device,
++ fbi->fix.smem_len,
++ (dma_addr_t *)&fbi->fix.smem_start,
++ GFP_DMA | GFP_KERNEL);
++ if (fbi->screen_base == 0) {
++ dev_err(fbi->device, "Unable to allocate framebuffer memory\n");
++ fbi->fix.smem_len = 0;
++ fbi->fix.smem_start = 0;
++ return -EBUSY;
++ }
++
++ dev_dbg(fbi->device, "allocated fb @ paddr=0x%08X, size=%d.\n",
++ (uint32_t) fbi->fix.smem_start, fbi->fix.smem_len);
++
++ fbi->screen_size = fbi->fix.smem_len;
++
++ /* Clear the screen */
++ memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
++
++ return 0;
++}
++
++/*!
++ * De-allocates the DRAM memory for the frame buffer.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @return Error code indicating success or failure
++ */
++static int mxcfb_unmap_video_memory(struct fb_info *fbi)
++{
++ dma_free_writecombine(fbi->device, fbi->fix.smem_len,
++ fbi->screen_base, fbi->fix.smem_start);
++ fbi->screen_base = 0;
++ fbi->fix.smem_start = 0;
++ fbi->fix.smem_len = 0;
++ return 0;
++}
++
++/*!
++ * Initializes the framebuffer information pointer. After allocating
++ * sufficient memory for the framebuffer structure, the fields are
++ * filled with custom information passed in from the configurable
++ * structures. This includes information such as bits per pixel,
++ * color maps, screen width/height and RGBA offsets.
++ *
++ * @return Framebuffer structure initialized with our information
++ */
++static struct fb_info *mxcfb_init_fbinfo(struct device *dev, struct fb_ops *ops)
++{
++ struct fb_info *fbi;
++ struct mxcfb_info *mxcfbi;
++
++ /*
++ * Allocate sufficient memory for the fb structure
++ */
++ fbi = framebuffer_alloc(sizeof(struct mxcfb_info), dev);
++ if (!fbi)
++ return NULL;
++
++ mxcfbi = (struct mxcfb_info *)fbi->par;
++
++ fbi->var.activate = FB_ACTIVATE_NOW;
++
++ fbi->fbops = ops;
++ fbi->flags = FBINFO_FLAG_DEFAULT;
++ fbi->pseudo_palette = mxcfbi->pseudo_palette;
++
++ /*
++ * Allocate colormap
++ */
++ fb_alloc_cmap(&fbi->cmap, 16, 0);
++
++ return fbi;
++}
++
++static ssize_t show_disp_chan(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct fb_info *info = dev_get_drvdata(dev);
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)info->par;
++
++ if (mxcfbi->ipu_ch == MEM_BG_SYNC)
++ return sprintf(buf, "2-layer-fb-bg\n");
++ else if (mxcfbi->ipu_ch == MEM_FG_SYNC)
++ return sprintf(buf, "2-layer-fb-fg\n");
++ else if (mxcfbi->ipu_ch == MEM_DC_SYNC)
++ return sprintf(buf, "1-layer-fb\n");
++ else
++ return sprintf(buf, "err: no display chan\n");
++}
++
++static ssize_t swap_disp_chan(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct fb_info *info = dev_get_drvdata(dev);
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)info->par;
++ struct mxcfb_info *fg_mxcfbi = NULL;
++
++ console_lock();
++ /* swap only happen between DP-BG and DC, while DP-FG disable */
++ if (((mxcfbi->ipu_ch == MEM_BG_SYNC) &&
++ (strstr(buf, "1-layer-fb") != NULL)) ||
++ ((mxcfbi->ipu_ch == MEM_DC_SYNC) &&
++ (strstr(buf, "2-layer-fb-bg") != NULL))) {
++ struct fb_info *fbi_fg;
++
++ fbi_fg = found_registered_fb(MEM_FG_SYNC, mxcfbi->ipu_id);
++ if (fbi_fg)
++ fg_mxcfbi = (struct mxcfb_info *)fbi_fg->par;
++
++ if (!fg_mxcfbi ||
++ fg_mxcfbi->cur_blank == FB_BLANK_UNBLANK) {
++ dev_err(dev,
++ "Can not switch while fb2(fb-fg) is on.\n");
++ console_unlock();
++ return count;
++ }
++
++ if (swap_channels(info) < 0)
++ dev_err(dev, "Swap display channel failed.\n");
++ }
++
++ console_unlock();
++ return count;
++}
++static DEVICE_ATTR(fsl_disp_property, S_IWUSR | S_IRUGO,
++ show_disp_chan, swap_disp_chan);
++
++static ssize_t show_disp_dev(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct fb_info *info = dev_get_drvdata(dev);
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)info->par;
++
++ if (mxcfbi->ipu_ch == MEM_FG_SYNC)
++ return sprintf(buf, "overlay\n");
++ else
++ return sprintf(buf, "%s\n", mxcfbi->dispdrv->drv->name);
++}
++static DEVICE_ATTR(fsl_disp_dev_property, S_IRUGO, show_disp_dev, NULL);
++
++static int mxcfb_dispdrv_init(struct platform_device *pdev,
++ struct fb_info *fbi)
++{
++ struct ipuv3_fb_platform_data *plat_data = pdev->dev.platform_data;
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)fbi->par;
++ struct mxc_dispdrv_setting setting;
++ char disp_dev[32], *default_dev = "lcd";
++ int ret = 0;
++
++ setting.if_fmt = plat_data->interface_pix_fmt;
++ setting.dft_mode_str = plat_data->mode_str;
++ setting.default_bpp = plat_data->default_bpp;
++ if (!setting.default_bpp)
++ setting.default_bpp = 16;
++ setting.fbi = fbi;
++ if (!strlen(plat_data->disp_dev)) {
++ memcpy(disp_dev, default_dev, strlen(default_dev));
++ disp_dev[strlen(default_dev)] = '\0';
++ } else {
++ memcpy(disp_dev, plat_data->disp_dev,
++ strlen(plat_data->disp_dev));
++ disp_dev[strlen(plat_data->disp_dev)] = '\0';
++ }
++
++ dev_info(&pdev->dev, "register mxc display driver %s\n", disp_dev);
++
++ mxcfbi->dispdrv = mxc_dispdrv_gethandle(disp_dev, &setting);
++ if (IS_ERR(mxcfbi->dispdrv)) {
++ ret = PTR_ERR(mxcfbi->dispdrv);
++ dev_err(&pdev->dev, "NO mxc display driver found!\n");
++ return ret;
++ } else {
++ /* fix-up */
++ mxcfbi->ipu_di_pix_fmt = setting.if_fmt;
++ mxcfbi->default_bpp = setting.default_bpp;
++
++ /* setting */
++ mxcfbi->ipu_id = setting.dev_id;
++ mxcfbi->ipu_di = setting.disp_id;
++ dev_dbg(&pdev->dev, "di_pixfmt:0x%x, bpp:0x%x, di:%d, ipu:%d\n",
++ setting.if_fmt, setting.default_bpp,
++ setting.disp_id, setting.dev_id);
++ }
++
++ return ret;
++}
++
++/*
++ * Parse user specified options (`video=trident:')
++ * example:
++ * video=mxcfb0:dev=lcd,800x480M-16@55,if=RGB565,bpp=16,noaccel
++ * video=mxcfb0:dev=lcd,800x480M-16@55,if=RGB565,fbpix=RGB565
++ */
++static int mxcfb_option_setup(struct platform_device *pdev, struct fb_info *fbi)
++{
++ struct ipuv3_fb_platform_data *pdata = pdev->dev.platform_data;
++ char *options, *opt, *fb_mode_str = NULL;
++ char name[] = "mxcfb0";
++ uint32_t fb_pix_fmt = 0;
++
++ name[5] += pdev->id;
++ if (fb_get_options(name, &options)) {
++ dev_err(&pdev->dev, "Can't get fb option for %s!\n", name);
++ return -ENODEV;
++ }
++
++ if (!options || !*options)
++ return 0;
++
++ while ((opt = strsep(&options, ",")) != NULL) {
++ if (!*opt)
++ continue;
++
++ if (!strncmp(opt, "dev=", 4)) {
++ memcpy(pdata->disp_dev, opt + 4, strlen(opt) - 4);
++ pdata->disp_dev[strlen(opt) - 4] = '\0';
++ } else if (!strncmp(opt, "if=", 3)) {
++ if (!strncmp(opt+3, "RGB24", 5))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_RGB24;
++ else if (!strncmp(opt+3, "BGR24", 5))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_BGR24;
++ else if (!strncmp(opt+3, "GBR24", 5))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_GBR24;
++ else if (!strncmp(opt+3, "RGB565", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_RGB565;
++ else if (!strncmp(opt+3, "RGB666", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_RGB666;
++ else if (!strncmp(opt+3, "YUV444", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_YUV444;
++ else if (!strncmp(opt+3, "LVDS666", 7))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_LVDS666;
++ else if (!strncmp(opt+3, "YUYV16", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_YUYV;
++ else if (!strncmp(opt+3, "UYVY16", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_UYVY;
++ else if (!strncmp(opt+3, "YVYU16", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_YVYU;
++ else if (!strncmp(opt+3, "VYUY16", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_VYUY;
++ } else if (!strncmp(opt, "fbpix=", 6)) {
++ if (!strncmp(opt+6, "RGB24", 5))
++ fb_pix_fmt = IPU_PIX_FMT_RGB24;
++ else if (!strncmp(opt+6, "BGR24", 5))
++ fb_pix_fmt = IPU_PIX_FMT_BGR24;
++ else if (!strncmp(opt+6, "RGB32", 5))
++ fb_pix_fmt = IPU_PIX_FMT_RGB32;
++ else if (!strncmp(opt+6, "BGR32", 5))
++ fb_pix_fmt = IPU_PIX_FMT_BGR32;
++ else if (!strncmp(opt+6, "ABGR32", 6))
++ fb_pix_fmt = IPU_PIX_FMT_ABGR32;
++ else if (!strncmp(opt+6, "RGB565", 6))
++ fb_pix_fmt = IPU_PIX_FMT_RGB565;
++
++ if (fb_pix_fmt) {
++ pixfmt_to_var(fb_pix_fmt, &fbi->var);
++ pdata->default_bpp =
++ fbi->var.bits_per_pixel;
++ }
++ } else if (!strncmp(opt, "int_clk", 7)) {
++ pdata->int_clk = true;
++ continue;
++ } else if (!strncmp(opt, "bpp=", 4)) {
++ /* bpp setting cannot overwirte fbpix setting */
++ if (fb_pix_fmt)
++ continue;
++
++ pdata->default_bpp =
++ simple_strtoul(opt + 4, NULL, 0);
++
++ fb_pix_fmt = bpp_to_pixfmt(pdata->default_bpp);
++ if (fb_pix_fmt)
++ pixfmt_to_var(fb_pix_fmt, &fbi->var);
++ } else
++ fb_mode_str = opt;
++ }
++
++ if (fb_mode_str)
++ pdata->mode_str = fb_mode_str;
++
++ return 0;
++}
++
++static int mxcfb_register(struct fb_info *fbi)
++{
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)fbi->par;
++ struct fb_videomode m;
++ int ret = 0;
++ char bg0_id[] = "DISP3 BG";
++ char bg1_id[] = "DISP3 BG - DI1";
++ char fg_id[] = "DISP3 FG";
++
++ if (mxcfbi->ipu_di == 0) {
++ bg0_id[4] += mxcfbi->ipu_id;
++ strcpy(fbi->fix.id, bg0_id);
++ } else if (mxcfbi->ipu_di == 1) {
++ bg1_id[4] += mxcfbi->ipu_id;
++ strcpy(fbi->fix.id, bg1_id);
++ } else { /* Overlay */
++ fg_id[4] += mxcfbi->ipu_id;
++ strcpy(fbi->fix.id, fg_id);
++ }
++
++ mxcfb_check_var(&fbi->var, fbi);
++
++ mxcfb_set_fix(fbi);
++
++ /* Added first mode to fbi modelist. */
++ if (!fbi->modelist.next || !fbi->modelist.prev)
++ INIT_LIST_HEAD(&fbi->modelist);
++ fb_var_to_videomode(&m, &fbi->var);
++ fb_add_videomode(&m, &fbi->modelist);
++
++ if (ipu_request_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq,
++ mxcfb_irq_handler, IPU_IRQF_ONESHOT, MXCFB_NAME, fbi) != 0) {
++ dev_err(fbi->device, "Error registering EOF irq handler.\n");
++ ret = -EBUSY;
++ goto err0;
++ }
++ ipu_disable_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq);
++ if (ipu_request_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq,
++ mxcfb_nf_irq_handler, IPU_IRQF_ONESHOT, MXCFB_NAME, fbi) != 0) {
++ dev_err(fbi->device, "Error registering NFACK irq handler.\n");
++ ret = -EBUSY;
++ goto err1;
++ }
++ ipu_disable_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq);
++
++ if (mxcfbi->ipu_alp_ch_irq != -1)
++ if (ipu_request_irq(mxcfbi->ipu, mxcfbi->ipu_alp_ch_irq,
++ mxcfb_alpha_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi) != 0) {
++ dev_err(fbi->device, "Error registering alpha irq "
++ "handler.\n");
++ ret = -EBUSY;
++ goto err2;
++ }
++
++ if (!mxcfbi->late_init) {
++ fbi->var.activate |= FB_ACTIVATE_FORCE;
++ console_lock();
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ ret = fb_set_var(fbi, &fbi->var);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++ if (ret < 0) {
++ dev_err(fbi->device, "Error fb_set_var ret:%d\n", ret);
++ goto err3;
++ }
++
++ if (mxcfbi->next_blank == FB_BLANK_UNBLANK) {
++ console_lock();
++ ret = fb_blank(fbi, FB_BLANK_UNBLANK);
++ console_unlock();
++ if (ret < 0) {
++ dev_err(fbi->device,
++ "Error fb_blank ret:%d\n", ret);
++ goto err4;
++ }
++ }
++ } else {
++ /*
++ * Setup the channel again though bootloader
++ * has done this, then set_par() can stop the
++ * channel neatly and re-initialize it .
++ */
++ if (mxcfbi->next_blank == FB_BLANK_UNBLANK) {
++ console_lock();
++ _setup_disp_channel1(fbi);
++ ipu_enable_channel(mxcfbi->ipu, mxcfbi->ipu_ch);
++ console_unlock();
++ }
++ }
++
++
++ ret = register_framebuffer(fbi);
++ if (ret < 0)
++ goto err5;
++
++ return ret;
++err5:
++ if (mxcfbi->next_blank == FB_BLANK_UNBLANK) {
++ console_lock();
++ if (!mxcfbi->late_init)
++ fb_blank(fbi, FB_BLANK_POWERDOWN);
++ else {
++ ipu_disable_channel(mxcfbi->ipu, mxcfbi->ipu_ch,
++ true);
++ ipu_uninit_channel(mxcfbi->ipu, mxcfbi->ipu_ch);
++ }
++ console_unlock();
++ }
++err4:
++err3:
++ if (mxcfbi->ipu_alp_ch_irq != -1)
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_alp_ch_irq, fbi);
++err2:
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq, fbi);
++err1:
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq, fbi);
++err0:
++ return ret;
++}
++
++static void mxcfb_unregister(struct fb_info *fbi)
++{
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)fbi->par;
++
++ if (mxcfbi->ipu_alp_ch_irq != -1)
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_alp_ch_irq, fbi);
++ if (mxcfbi->ipu_ch_irq)
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq, fbi);
++ if (mxcfbi->ipu_ch_nf_irq)
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq, fbi);
++
++ unregister_framebuffer(fbi);
++}
++
++static int mxcfb_setup_overlay(struct platform_device *pdev,
++ struct fb_info *fbi_bg, struct resource *res)
++{
++ struct fb_info *ovfbi;
++ struct mxcfb_info *mxcfbi_bg = (struct mxcfb_info *)fbi_bg->par;
++ struct mxcfb_info *mxcfbi_fg;
++ int ret = 0;
++
++ ovfbi = mxcfb_init_fbinfo(&pdev->dev, &mxcfb_ops);
++ if (!ovfbi) {
++ ret = -ENOMEM;
++ goto init_ovfbinfo_failed;
++ }
++ mxcfbi_fg = (struct mxcfb_info *)ovfbi->par;
++
++ mxcfbi_fg->ipu = ipu_get_soc(mxcfbi_bg->ipu_id);
++ if (IS_ERR(mxcfbi_fg->ipu)) {
++ ret = -ENODEV;
++ goto get_ipu_failed;
++ }
++ mxcfbi_fg->ipu_id = mxcfbi_bg->ipu_id;
++ mxcfbi_fg->ipu_ch_irq = IPU_IRQ_FG_SYNC_EOF;
++ mxcfbi_fg->ipu_ch_nf_irq = IPU_IRQ_FG_SYNC_NFACK;
++ mxcfbi_fg->ipu_alp_ch_irq = IPU_IRQ_FG_ALPHA_SYNC_EOF;
++ mxcfbi_fg->ipu_ch = MEM_FG_SYNC;
++ mxcfbi_fg->ipu_di = -1;
++ mxcfbi_fg->ipu_di_pix_fmt = mxcfbi_bg->ipu_di_pix_fmt;
++ mxcfbi_fg->overlay = true;
++ mxcfbi_fg->cur_blank = mxcfbi_fg->next_blank = FB_BLANK_POWERDOWN;
++
++ /* Need dummy values until real panel is configured */
++ ovfbi->var.xres = 240;
++ ovfbi->var.yres = 320;
++
++ if (res && res->start && res->end) {
++ ovfbi->fix.smem_len = res->end - res->start + 1;
++ ovfbi->fix.smem_start = res->start;
++ ovfbi->screen_base = ioremap(
++ ovfbi->fix.smem_start,
++ ovfbi->fix.smem_len);
++ }
++
++ ret = mxcfb_register(ovfbi);
++ if (ret < 0)
++ goto register_ov_failed;
++
++ mxcfbi_bg->ovfbi = ovfbi;
++
++ return ret;
++
++register_ov_failed:
++get_ipu_failed:
++ fb_dealloc_cmap(&ovfbi->cmap);
++ framebuffer_release(ovfbi);
++init_ovfbinfo_failed:
++ return ret;
++}
++
++static void mxcfb_unsetup_overlay(struct fb_info *fbi_bg)
++{
++ struct mxcfb_info *mxcfbi_bg = (struct mxcfb_info *)fbi_bg->par;
++ struct fb_info *ovfbi = mxcfbi_bg->ovfbi;
++
++ mxcfb_unregister(ovfbi);
++
++ if (&ovfbi->cmap)
++ fb_dealloc_cmap(&ovfbi->cmap);
++ framebuffer_release(ovfbi);
++}
++
++static bool ipu_usage[2][2];
++static int ipu_test_set_usage(int ipu, int di)
++{
++ if (ipu_usage[ipu][di])
++ return -EBUSY;
++ else
++ ipu_usage[ipu][di] = true;
++ return 0;
++}
++
++static void ipu_clear_usage(int ipu, int di)
++{
++ ipu_usage[ipu][di] = false;
++}
++
++static int mxcfb_get_of_property(struct platform_device *pdev,
++ struct ipuv3_fb_platform_data *plat_data)
++{
++ struct device_node *np = pdev->dev.of_node;
++ const char *disp_dev;
++ const char *mode_str;
++ const char *pixfmt;
++ int err;
++ int len;
++ u32 bpp, int_clk;
++ u32 late_init;
++
++ err = of_property_read_string(np, "disp_dev", &disp_dev);
++ if (err < 0) {
++ dev_dbg(&pdev->dev, "get of property disp_dev fail\n");
++ return err;
++ }
++ err = of_property_read_string(np, "mode_str", &mode_str);
++ if (err < 0) {
++ dev_dbg(&pdev->dev, "get of property mode_str fail\n");
++ return err;
++ }
++ err = of_property_read_string(np, "interface_pix_fmt", &pixfmt);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property pix fmt fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "default_bpp", &bpp);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property bpp fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "int_clk", &int_clk);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property int_clk fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "late_init", &late_init);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property late_init fail\n");
++ return err;
++ }
++
++ if (!strncmp(pixfmt, "RGB24", 5))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_RGB24;
++ else if (!strncmp(pixfmt, "BGR24", 5))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_BGR24;
++ else if (!strncmp(pixfmt, "GBR24", 5))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_GBR24;
++ else if (!strncmp(pixfmt, "RGB565", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_RGB565;
++ else if (!strncmp(pixfmt, "RGB666", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_RGB666;
++ else if (!strncmp(pixfmt, "YUV444", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_YUV444;
++ else if (!strncmp(pixfmt, "LVDS666", 7))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_LVDS666;
++ else if (!strncmp(pixfmt, "YUYV16", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_YUYV;
++ else if (!strncmp(pixfmt, "UYVY16", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_UYVY;
++ else if (!strncmp(pixfmt, "YVYU16", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_YVYU;
++ else if (!strncmp(pixfmt, "VYUY16", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_VYUY;
++ else {
++ dev_err(&pdev->dev, "err interface_pix_fmt!\n");
++ return -ENOENT;
++ }
++
++ len = min(sizeof(plat_data->disp_dev) - 1, strlen(disp_dev));
++ memcpy(plat_data->disp_dev, disp_dev, len);
++ plat_data->disp_dev[len] = '\0';
++ plat_data->mode_str = (char *)mode_str;
++ plat_data->default_bpp = bpp;
++ plat_data->int_clk = (bool)int_clk;
++ plat_data->late_init = (bool)late_init;
++ return err;
++}
++
++/*!
++ * Probe routine for the framebuffer driver. It is called during the
++ * driver binding process. The following functions are performed in
++ * this routine: Framebuffer initialization, Memory allocation and
++ * mapping, Framebuffer registration, IPU initialization.
++ *
++ * @return Appropriate error code to the kernel common code
++ */
++static int mxcfb_probe(struct platform_device *pdev)
++{
++ struct ipuv3_fb_platform_data *plat_data;
++ struct fb_info *fbi;
++ struct mxcfb_info *mxcfbi;
++ struct resource *res;
++ int ret = 0;
++
++ dev_dbg(&pdev->dev, "%s enter\n", __func__);
++ pdev->id = of_alias_get_id(pdev->dev.of_node, "mxcfb");
++ if (pdev->id < 0) {
++ dev_err(&pdev->dev, "can not get alias id\n");
++ return pdev->id;
++ }
++
++ plat_data = devm_kzalloc(&pdev->dev, sizeof(struct
++ ipuv3_fb_platform_data), GFP_KERNEL);
++ if (!plat_data)
++ return -ENOMEM;
++ pdev->dev.platform_data = plat_data;
++
++ ret = mxcfb_get_of_property(pdev, plat_data);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "get mxcfb of property fail\n");
++ return ret;
++ }
++
++ /* Initialize FB structures */
++ fbi = mxcfb_init_fbinfo(&pdev->dev, &mxcfb_ops);
++ if (!fbi) {
++ ret = -ENOMEM;
++ goto init_fbinfo_failed;
++ }
++
++ ret = mxcfb_option_setup(pdev, fbi);
++ if (ret)
++ goto get_fb_option_failed;
++
++ mxcfbi = (struct mxcfb_info *)fbi->par;
++ mxcfbi->ipu_int_clk = plat_data->int_clk;
++ mxcfbi->late_init = plat_data->late_init;
++ mxcfbi->first_set_par = true;
++ ret = mxcfb_dispdrv_init(pdev, fbi);
++ if (ret < 0)
++ goto init_dispdrv_failed;
++
++ ret = ipu_test_set_usage(mxcfbi->ipu_id, mxcfbi->ipu_di);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "ipu%d-di%d already in use\n",
++ mxcfbi->ipu_id, mxcfbi->ipu_di);
++ goto ipu_in_busy;
++ }
++
++ if (mxcfbi->dispdrv->drv->post_init) {
++ ret = mxcfbi->dispdrv->drv->post_init(mxcfbi->dispdrv,
++ mxcfbi->ipu_id,
++ mxcfbi->ipu_di);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "post init failed\n");
++ goto post_init_failed;
++ }
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (res && res->start && res->end) {
++ fbi->fix.smem_len = res->end - res->start + 1;
++ fbi->fix.smem_start = res->start;
++ fbi->screen_base = ioremap(fbi->fix.smem_start, fbi->fix.smem_len);
++ /* Do not clear the fb content drawn in bootloader. */
++ if (!mxcfbi->late_init)
++ memset(fbi->screen_base, 0, fbi->fix.smem_len);
++ }
++
++ mxcfbi->ipu = ipu_get_soc(mxcfbi->ipu_id);
++ if (IS_ERR(mxcfbi->ipu)) {
++ ret = -ENODEV;
++ goto get_ipu_failed;
++ }
++
++ /* first user uses DP with alpha feature */
++ if (!g_dp_in_use[mxcfbi->ipu_id]) {
++ mxcfbi->ipu_ch_irq = IPU_IRQ_BG_SYNC_EOF;
++ mxcfbi->ipu_ch_nf_irq = IPU_IRQ_BG_SYNC_NFACK;
++ mxcfbi->ipu_alp_ch_irq = IPU_IRQ_BG_ALPHA_SYNC_EOF;
++ mxcfbi->ipu_ch = MEM_BG_SYNC;
++ /* Unblank the primary fb only by default */
++ if (pdev->id == 0)
++ mxcfbi->cur_blank = mxcfbi->next_blank = FB_BLANK_UNBLANK;
++ else
++ mxcfbi->cur_blank = mxcfbi->next_blank = FB_BLANK_POWERDOWN;
++
++ ret = mxcfb_register(fbi);
++ if (ret < 0)
++ goto mxcfb_register_failed;
++
++ ipu_disp_set_global_alpha(mxcfbi->ipu, mxcfbi->ipu_ch,
++ true, 0x80);
++ ipu_disp_set_color_key(mxcfbi->ipu, mxcfbi->ipu_ch, false, 0);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ ret = mxcfb_setup_overlay(pdev, fbi, res);
++
++ if (ret < 0) {
++ mxcfb_unregister(fbi);
++ goto mxcfb_setupoverlay_failed;
++ }
++
++ g_dp_in_use[mxcfbi->ipu_id] = true;
++
++ ret = device_create_file(mxcfbi->ovfbi->dev,
++ &dev_attr_fsl_disp_property);
++ if (ret)
++ dev_err(mxcfbi->ovfbi->dev, "Error %d on creating "
++ "file for disp property\n",
++ ret);
++
++ ret = device_create_file(mxcfbi->ovfbi->dev,
++ &dev_attr_fsl_disp_dev_property);
++ if (ret)
++ dev_err(mxcfbi->ovfbi->dev, "Error %d on creating "
++ "file for disp device "
++ "propety\n", ret);
++ } else {
++ mxcfbi->ipu_ch_irq = IPU_IRQ_DC_SYNC_EOF;
++ mxcfbi->ipu_ch_nf_irq = IPU_IRQ_DC_SYNC_NFACK;
++ mxcfbi->ipu_alp_ch_irq = -1;
++ mxcfbi->ipu_ch = MEM_DC_SYNC;
++ mxcfbi->cur_blank = mxcfbi->next_blank = FB_BLANK_POWERDOWN;
++
++ ret = mxcfb_register(fbi);
++ if (ret < 0)
++ goto mxcfb_register_failed;
++ }
++
++ platform_set_drvdata(pdev, fbi);
++
++ ret = device_create_file(fbi->dev, &dev_attr_fsl_disp_property);
++ if (ret)
++ dev_err(&pdev->dev, "Error %d on creating file for disp "
++ "property\n", ret);
++
++ ret = device_create_file(fbi->dev, &dev_attr_fsl_disp_dev_property);
++ if (ret)
++ dev_err(&pdev->dev, "Error %d on creating file for disp "
++ " device propety\n", ret);
++
++ return 0;
++
++mxcfb_setupoverlay_failed:
++mxcfb_register_failed:
++get_ipu_failed:
++post_init_failed:
++ ipu_clear_usage(mxcfbi->ipu_id, mxcfbi->ipu_di);
++ipu_in_busy:
++init_dispdrv_failed:
++ fb_dealloc_cmap(&fbi->cmap);
++ framebuffer_release(fbi);
++get_fb_option_failed:
++init_fbinfo_failed:
++ return ret;
++}
++
++static int mxcfb_remove(struct platform_device *pdev)
++{
++ struct fb_info *fbi = platform_get_drvdata(pdev);
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ if (!fbi)
++ return 0;
++
++ device_remove_file(fbi->dev, &dev_attr_fsl_disp_dev_property);
++ device_remove_file(fbi->dev, &dev_attr_fsl_disp_property);
++ mxcfb_blank(FB_BLANK_POWERDOWN, fbi);
++ mxcfb_unregister(fbi);
++ mxcfb_unmap_video_memory(fbi);
++
++ if (mxc_fbi->ovfbi) {
++ device_remove_file(mxc_fbi->ovfbi->dev,
++ &dev_attr_fsl_disp_dev_property);
++ device_remove_file(mxc_fbi->ovfbi->dev,
++ &dev_attr_fsl_disp_property);
++ mxcfb_blank(FB_BLANK_POWERDOWN, mxc_fbi->ovfbi);
++ mxcfb_unsetup_overlay(fbi);
++ mxcfb_unmap_video_memory(mxc_fbi->ovfbi);
++ }
++
++ ipu_clear_usage(mxc_fbi->ipu_id, mxc_fbi->ipu_di);
++ if (&fbi->cmap)
++ fb_dealloc_cmap(&fbi->cmap);
++ framebuffer_release(fbi);
++ return 0;
++}
++
++static const struct of_device_id imx_mxcfb_dt_ids[] = {
++ { .compatible = "fsl,mxc_sdc_fb"},
++ { /* sentinel */ }
++};
++
++/*!
++ * This structure contains pointers to the power management callback functions.
++ */
++static struct platform_driver mxcfb_driver = {
++ .driver = {
++ .name = MXCFB_NAME,
++ .of_match_table = imx_mxcfb_dt_ids,
++ },
++ .probe = mxcfb_probe,
++ .remove = mxcfb_remove,
++ .suspend = mxcfb_suspend,
++ .resume = mxcfb_resume,
++};
++
++/*!
++ * Main entry function for the framebuffer. The function registers the power
++ * management callback functions with the kernel and also registers the MXCFB
++ * callback functions with the core Linux framebuffer driver \b fbmem.c
++ *
++ * @return Error code indicating success or failure
++ */
++int __init mxcfb_init(void)
++{
++ return platform_driver_register(&mxcfb_driver);
++}
++
++void mxcfb_exit(void)
++{
++ platform_driver_unregister(&mxcfb_driver);
++}
++
++module_init(mxcfb_init);
++module_exit(mxcfb_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("MXC framebuffer driver");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE("fb");
+diff -Nur linux-3.14.40.orig/drivers/video/mxc/mxc_lcdif.c linux-3.14.40/drivers/video/mxc/mxc_lcdif.c
+--- linux-3.14.40.orig/drivers/video/mxc/mxc_lcdif.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/mxc/mxc_lcdif.c 2015-05-01 14:58:05.735427001 -0500
+@@ -0,0 +1,235 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/init.h>
++#include <linux/ipu.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <linux/of_device.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/platform_device.h>
++
++#include "mxc_dispdrv.h"
++
++struct mxc_lcd_platform_data {
++ u32 default_ifmt;
++ u32 ipu_id;
++ u32 disp_id;
++};
++
++struct mxc_lcdif_data {
++ struct platform_device *pdev;
++ struct mxc_dispdrv_handle *disp_lcdif;
++};
++
++#define DISPDRV_LCD "lcd"
++
++static struct fb_videomode lcdif_modedb[] = {
++ {
++ /* 800x480 @ 57 Hz , pixel clk @ 27MHz */
++ "CLAA-WVGA", 57, 800, 480, 37037, 40, 60, 10, 10, 20, 10,
++ FB_SYNC_CLK_LAT_FALL,
++ FB_VMODE_NONINTERLACED,
++ 0,},
++ {
++ /* 800x480 @ 60 Hz , pixel clk @ 32MHz */
++ "SEIKO-WVGA", 60, 800, 480, 29850, 89, 164, 23, 10, 10, 10,
++ FB_SYNC_CLK_LAT_FALL,
++ FB_VMODE_NONINTERLACED,
++ 0,},
++};
++static int lcdif_modedb_sz = ARRAY_SIZE(lcdif_modedb);
++
++static int lcdif_init(struct mxc_dispdrv_handle *disp,
++ struct mxc_dispdrv_setting *setting)
++{
++ int ret, i;
++ struct mxc_lcdif_data *lcdif = mxc_dispdrv_getdata(disp);
++ struct mxc_lcd_platform_data *plat_data
++ = lcdif->pdev->dev.platform_data;
++ struct fb_videomode *modedb = lcdif_modedb;
++ int modedb_sz = lcdif_modedb_sz;
++
++ /* use platform defined ipu/di */
++ setting->dev_id = plat_data->ipu_id;
++ setting->disp_id = plat_data->disp_id;
++
++ ret = fb_find_mode(&setting->fbi->var, setting->fbi, setting->dft_mode_str,
++ modedb, modedb_sz, NULL, setting->default_bpp);
++ if (!ret) {
++ fb_videomode_to_var(&setting->fbi->var, &modedb[0]);
++ setting->if_fmt = plat_data->default_ifmt;
++ }
++
++ INIT_LIST_HEAD(&setting->fbi->modelist);
++ for (i = 0; i < modedb_sz; i++) {
++ struct fb_videomode m;
++ fb_var_to_videomode(&m, &setting->fbi->var);
++ if (fb_mode_is_equal(&m, &modedb[i])) {
++ fb_add_videomode(&modedb[i],
++ &setting->fbi->modelist);
++ break;
++ }
++ }
++
++ return ret;
++}
++
++void lcdif_deinit(struct mxc_dispdrv_handle *disp)
++{
++ /*TODO*/
++}
++
++static struct mxc_dispdrv_driver lcdif_drv = {
++ .name = DISPDRV_LCD,
++ .init = lcdif_init,
++ .deinit = lcdif_deinit,
++};
++
++static int lcd_get_of_property(struct platform_device *pdev,
++ struct mxc_lcd_platform_data *plat_data)
++{
++ struct device_node *np = pdev->dev.of_node;
++ int err;
++ u32 ipu_id, disp_id;
++ const char *default_ifmt;
++
++ err = of_property_read_string(np, "default_ifmt", &default_ifmt);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property default_ifmt fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "ipu_id", &ipu_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property ipu_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "disp_id", &disp_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property disp_id fail\n");
++ return err;
++ }
++
++ plat_data->ipu_id = ipu_id;
++ plat_data->disp_id = disp_id;
++ if (!strncmp(default_ifmt, "RGB24", 5))
++ plat_data->default_ifmt = IPU_PIX_FMT_RGB24;
++ else if (!strncmp(default_ifmt, "BGR24", 5))
++ plat_data->default_ifmt = IPU_PIX_FMT_BGR24;
++ else if (!strncmp(default_ifmt, "GBR24", 5))
++ plat_data->default_ifmt = IPU_PIX_FMT_GBR24;
++ else if (!strncmp(default_ifmt, "RGB565", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_RGB565;
++ else if (!strncmp(default_ifmt, "RGB666", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_RGB666;
++ else if (!strncmp(default_ifmt, "YUV444", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_YUV444;
++ else if (!strncmp(default_ifmt, "LVDS666", 7))
++ plat_data->default_ifmt = IPU_PIX_FMT_LVDS666;
++ else if (!strncmp(default_ifmt, "YUYV16", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_YUYV;
++ else if (!strncmp(default_ifmt, "UYVY16", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_UYVY;
++ else if (!strncmp(default_ifmt, "YVYU16", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_YVYU;
++ else if (!strncmp(default_ifmt, "VYUY16", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_VYUY;
++ else {
++ dev_err(&pdev->dev, "err default_ifmt!\n");
++ return -ENOENT;
++ }
++
++ return err;
++}
++
++static int mxc_lcdif_probe(struct platform_device *pdev)
++{
++ int ret;
++ struct pinctrl *pinctrl;
++ struct mxc_lcdif_data *lcdif;
++ struct mxc_lcd_platform_data *plat_data;
++
++ dev_dbg(&pdev->dev, "%s enter\n", __func__);
++ lcdif = devm_kzalloc(&pdev->dev, sizeof(struct mxc_lcdif_data),
++ GFP_KERNEL);
++ if (!lcdif)
++ return -ENOMEM;
++ plat_data = devm_kzalloc(&pdev->dev,
++ sizeof(struct mxc_lcd_platform_data),
++ GFP_KERNEL);
++ if (!plat_data)
++ return -ENOMEM;
++ pdev->dev.platform_data = plat_data;
++
++ ret = lcd_get_of_property(pdev, plat_data);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "get lcd of property fail\n");
++ return ret;
++ }
++
++ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(&pdev->dev, "can't get/select pinctrl\n");
++ return PTR_ERR(pinctrl);
++ }
++
++ lcdif->pdev = pdev;
++ lcdif->disp_lcdif = mxc_dispdrv_register(&lcdif_drv);
++ mxc_dispdrv_setdata(lcdif->disp_lcdif, lcdif);
++
++ dev_set_drvdata(&pdev->dev, lcdif);
++ dev_dbg(&pdev->dev, "%s exit\n", __func__);
++
++ return ret;
++}
++
++static int mxc_lcdif_remove(struct platform_device *pdev)
++{
++ struct mxc_lcdif_data *lcdif = dev_get_drvdata(&pdev->dev);
++
++ mxc_dispdrv_puthandle(lcdif->disp_lcdif);
++ mxc_dispdrv_unregister(lcdif->disp_lcdif);
++ kfree(lcdif);
++ return 0;
++}
++
++static const struct of_device_id imx_lcd_dt_ids[] = {
++ { .compatible = "fsl,lcd"},
++ { /* sentinel */ }
++};
++static struct platform_driver mxc_lcdif_driver = {
++ .driver = {
++ .name = "mxc_lcdif",
++ .of_match_table = imx_lcd_dt_ids,
++ },
++ .probe = mxc_lcdif_probe,
++ .remove = mxc_lcdif_remove,
++};
++
++static int __init mxc_lcdif_init(void)
++{
++ return platform_driver_register(&mxc_lcdif_driver);
++}
++
++static void __exit mxc_lcdif_exit(void)
++{
++ platform_driver_unregister(&mxc_lcdif_driver);
++}
++
++module_init(mxc_lcdif_init);
++module_exit(mxc_lcdif_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX ipuv3 LCD extern port driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/drivers/video/mxsfb.c linux-3.14.40/drivers/video/mxsfb.c
+--- linux-3.14.40.orig/drivers/video/mxsfb.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/drivers/video/mxsfb.c 2015-05-01 14:58:05.755427001 -0500
+@@ -96,9 +96,10 @@
+ #define CTRL_DF24 (1 << 1)
+ #define CTRL_RUN (1 << 0)
+
+-#define CTRL1_FIFO_CLEAR (1 << 21)
+-#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
+-#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
++#define CTRL1_RECOVERY_ON_UNDERFLOW (1 << 24)
++#define CTRL1_FIFO_CLEAR (1 << 21)
++#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
++#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
+
+ #define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
+ #define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
+@@ -149,8 +150,8 @@
+ #define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */
+ #define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
+
+-#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
+-#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negtive edge sampling */
++#define FB_SYNC_OE_LOW_ACT 0x80000000
++#define FB_SYNC_CLK_LAT_FALL 0x40000000
+
+ enum mxsfb_devtype {
+ MXSFB_V3,
+@@ -178,7 +179,6 @@
+ unsigned ld_intf_width;
+ unsigned dotclk_delay;
+ const struct mxsfb_devdata *devdata;
+- u32 sync;
+ struct regulator *reg_lcd;
+ };
+
+@@ -275,9 +275,15 @@
+ if (var->yres < MIN_YRES)
+ var->yres = MIN_YRES;
+
+- var->xres_virtual = var->xres;
++ if (var->xres_virtual > var->xres) {
++ dev_dbg(fb_info->device, "stride not supported\n");
++ return -EINVAL;
++ }
+
+- var->yres_virtual = var->yres;
++ if (var->xres_virtual < var->xres)
++ var->xres_virtual = var->xres;
++ if (var->yres_virtual < var->yres)
++ var->yres_virtual = var->yres;
+
+ switch (var->bits_per_pixel) {
+ case 16:
+@@ -344,6 +350,9 @@
+
+ writel(CTRL_RUN, host->base + LCDC_CTRL + REG_SET);
+
++ /* Recovery on underflow */
++ writel(CTRL1_RECOVERY_ON_UNDERFLOW, host->base + LCDC_CTRL1 + REG_SET);
++
+ host->enabled = 1;
+ }
+
+@@ -392,14 +401,6 @@
+ int line_size, fb_size;
+ int reenable = 0;
+
+- line_size = fb_info->var.xres * (fb_info->var.bits_per_pixel >> 3);
+- fb_size = fb_info->var.yres_virtual * line_size;
+-
+- if (fb_size > fb_info->fix.smem_len)
+- return -ENOMEM;
+-
+- fb_info->fix.line_length = line_size;
+-
+ /*
+ * It seems, you can't re-program the controller if it is still running.
+ * This may lead into shifted pictures (FIFO issue?).
+@@ -413,6 +414,19 @@
+ /* clear the FIFOs */
+ writel(CTRL1_FIFO_CLEAR, host->base + LCDC_CTRL1 + REG_SET);
+
++ line_size = fb_info->var.xres * (fb_info->var.bits_per_pixel >> 3);
++ fb_info->fix.line_length = line_size;
++ fb_size = fb_info->var.yres_virtual * line_size;
++
++ /* Reallocate memory */
++ if (!fb_info->fix.smem_start || (fb_size > fb_info->fix.smem_len)) {
++ if (fb_info->fix.smem_start)
++ mxsfb_unmap_videomem(fb_info);
++
++ if (mxsfb_map_videomem(fb_info) < 0)
++ return -ENOMEM;
++ }
++
+ ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER |
+ CTRL_SET_BUS_WIDTH(host->ld_intf_width);
+
+@@ -459,9 +473,9 @@
+ vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
+ if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT)
+ vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
+- if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT)
++ if (!(fb_info->var.sync & FB_SYNC_OE_LOW_ACT))
+ vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
+- if (host->sync & MXSFB_SYNC_DOTCLK_FALLING_ACT)
++ if (fb_info->var.sync & FB_SYNC_CLK_LAT_FALL)
+ vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
+
+ writel(vdctrl0, host->base + LCDC_VDCTRL0);
+@@ -578,6 +592,34 @@
+ return 0;
+ }
+
++static int mxsfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
++{
++ u32 len;
++ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++
++ if (offset < info->fix.smem_len) {
++ /* mapping framebuffer memory */
++ len = info->fix.smem_len - offset;
++ vma->vm_pgoff = (info->fix.smem_start + offset) >> PAGE_SHIFT;
++ } else
++ return -EINVAL;
++
++ len = PAGE_ALIGN(len);
++ if (vma->vm_end - vma->vm_start > len)
++ return -EINVAL;
++
++ /* make buffers bufferable */
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
++ dev_dbg(info->device, "mmap remap_pfn_range failed\n");
++ return -ENOBUFS;
++ }
++
++ return 0;
++}
++
+ static struct fb_ops mxsfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = mxsfb_check_var,
+@@ -585,6 +627,7 @@
+ .fb_setcolreg = mxsfb_setcolreg,
+ .fb_blank = mxsfb_blank,
+ .fb_pan_display = mxsfb_pan_display,
++ .fb_mmap = mxsfb_mmap,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+@@ -800,7 +843,62 @@
+ {
+ struct fb_info *fb_info = &host->fb_info;
+
+- free_pages_exact(fb_info->screen_base, fb_info->fix.smem_len);
++ mxsfb_unmap_videomem(fb_info);
++}
++
++/*!
++ * Allocates the DRAM memory for the frame buffer. This buffer is remapped
++ * into a non-cached, non-buffered, memory region to allow palette and pixel
++ * writes to occur without flushing the cache. Once this area is remapped,
++ * all virtual memory access to the video memory should occur at the new region.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @return Error code indicating success or failure
++ */
++static int mxsfb_map_videomem(struct fb_info *fbi)
++{
++ if (fbi->fix.smem_len < fbi->var.yres_virtual * fbi->fix.line_length)
++ fbi->fix.smem_len = fbi->var.yres_virtual *
++ fbi->fix.line_length;
++
++ fbi->screen_base = dma_alloc_writecombine(fbi->device,
++ fbi->fix.smem_len,
++ (dma_addr_t *)&fbi->fix.smem_start,
++ GFP_DMA | GFP_KERNEL);
++ if (fbi->screen_base == 0) {
++ dev_err(fbi->device, "Unable to allocate framebuffer memory\n");
++ fbi->fix.smem_len = 0;
++ fbi->fix.smem_start = 0;
++ return -EBUSY;
++ }
++
++ dev_dbg(fbi->device, "allocated fb @ paddr=0x%08X, size=%d.\n",
++ (uint32_t) fbi->fix.smem_start, fbi->fix.smem_len);
++
++ fbi->screen_size = fbi->fix.smem_len;
++
++ /* Clear the screen */
++ memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
++
++ return 0;
++}
++
++/*!
++ * De-allocates the DRAM memory for the frame buffer.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @return Error code indicating success or failure
++ */
++static int mxsfb_unmap_videomem(struct fb_info *fbi)
++{
++ dma_free_writecombine(fbi->device, fbi->fix.smem_len,
++ fbi->screen_base, fbi->fix.smem_start);
++ fbi->screen_base = 0;
++ fbi->fix.smem_start = 0;
++ fbi->fix.smem_len = 0;
++ return 0;
+ }
+
+ static struct platform_device_id mxsfb_devtype[] = {
+diff -Nur linux-3.14.40.orig/drivers/video/vexpress-dvi.c linux-3.14.40/drivers/video/vexpress-dvi.c
+--- linux-3.14.40.orig/drivers/video/vexpress-dvi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/drivers/video/vexpress-dvi.c 2015-05-01 14:58:05.755427001 -0500
+@@ -0,0 +1,220 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * Copyright (C) 2012 ARM Limited
++ */
++
++#define pr_fmt(fmt) "vexpress-dvi: " fmt
++
++#include <linux/fb.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/vexpress.h>
++
++
++static struct vexpress_config_func *vexpress_dvimode_func;
++
++static struct {
++ u32 xres, yres, mode;
++} vexpress_dvi_dvimodes[] = {
++ { 640, 480, 0 }, /* VGA */
++ { 800, 600, 1 }, /* SVGA */
++ { 1024, 768, 2 }, /* XGA */
++ { 1280, 1024, 3 }, /* SXGA */
++ { 1600, 1200, 4 }, /* UXGA */
++ { 1920, 1080, 5 }, /* HD1080 */
++};
++
++static void vexpress_dvi_mode_set(struct fb_info *info, u32 xres, u32 yres)
++{
++ int err = -ENOENT;
++ int i;
++
++ if (!vexpress_dvimode_func)
++ return;
++
++ for (i = 0; i < ARRAY_SIZE(vexpress_dvi_dvimodes); i++) {
++ if (vexpress_dvi_dvimodes[i].xres == xres &&
++ vexpress_dvi_dvimodes[i].yres == yres) {
++ pr_debug("mode: %ux%u = %d\n", xres, yres,
++ vexpress_dvi_dvimodes[i].mode);
++ err = vexpress_config_write(vexpress_dvimode_func, 0,
++ vexpress_dvi_dvimodes[i].mode);
++ break;
++ }
++ }
++
++ if (err)
++ pr_warn("Failed to set %ux%u mode! (%d)\n", xres, yres, err);
++}
++
++
++static struct vexpress_config_func *vexpress_muxfpga_func;
++static int vexpress_dvi_fb = -1;
++
++static int vexpress_dvi_mux_set(struct fb_info *info)
++{
++ int err;
++ u32 site = vexpress_get_site_by_dev(info->device);
++
++ if (!vexpress_muxfpga_func)
++ return -ENXIO;
++
++ err = vexpress_config_write(vexpress_muxfpga_func, 0, site);
++ if (!err) {
++ pr_debug("Selected MUXFPGA input %d (fb%d)\n", site,
++ info->node);
++ vexpress_dvi_fb = info->node;
++ vexpress_dvi_mode_set(info, info->var.xres,
++ info->var.yres);
++ } else {
++ pr_warn("Failed to select MUXFPGA input %d (fb%d)! (%d)\n",
++ site, info->node, err);
++ }
++
++ return err;
++}
++
++static int vexpress_dvi_fb_select(int fb)
++{
++ int err;
++ struct fb_info *info;
++
++ /* fb0 is the default */
++ if (fb < 0)
++ fb = 0;
++
++ info = registered_fb[fb];
++ if (!info || !lock_fb_info(info))
++ return -ENODEV;
++
++ err = vexpress_dvi_mux_set(info);
++
++ unlock_fb_info(info);
++
++ return err;
++}
++
++static ssize_t vexpress_dvi_fb_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%d\n", vexpress_dvi_fb);
++}
++
++static ssize_t vexpress_dvi_fb_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ long value;
++ int err = kstrtol(buf, 0, &value);
++
++ if (!err)
++ err = vexpress_dvi_fb_select(value);
++
++ return err ? err : count;
++}
++
++DEVICE_ATTR(fb, S_IRUGO | S_IWUSR, vexpress_dvi_fb_show,
++ vexpress_dvi_fb_store);
++
++
++static int vexpress_dvi_fb_event_notify(struct notifier_block *self,
++ unsigned long action, void *data)
++{
++ struct fb_event *event = data;
++ struct fb_info *info = event->info;
++ struct fb_videomode *mode = event->data;
++
++ switch (action) {
++ case FB_EVENT_FB_REGISTERED:
++ if (vexpress_dvi_fb < 0)
++ vexpress_dvi_mux_set(info);
++ break;
++ case FB_EVENT_MODE_CHANGE:
++ case FB_EVENT_MODE_CHANGE_ALL:
++ if (info->node == vexpress_dvi_fb)
++ vexpress_dvi_mode_set(info, mode->xres, mode->yres);
++ break;
++ }
++
++ return NOTIFY_OK;
++}
++
++static struct notifier_block vexpress_dvi_fb_notifier = {
++ .notifier_call = vexpress_dvi_fb_event_notify,
++};
++static bool vexpress_dvi_fb_notifier_registered;
++
++
++enum vexpress_dvi_func { FUNC_MUXFPGA, FUNC_DVIMODE };
++
++static struct of_device_id vexpress_dvi_of_match[] = {
++ {
++ .compatible = "arm,vexpress-muxfpga",
++ .data = (void *)FUNC_MUXFPGA,
++ }, {
++ .compatible = "arm,vexpress-dvimode",
++ .data = (void *)FUNC_DVIMODE,
++ },
++ {}
++};
++
++static int vexpress_dvi_probe(struct platform_device *pdev)
++{
++ enum vexpress_dvi_func func;
++ const struct of_device_id *match =
++ of_match_device(vexpress_dvi_of_match, &pdev->dev);
++
++ if (match)
++ func = (enum vexpress_dvi_func)match->data;
++ else
++ func = pdev->id_entry->driver_data;
++
++ switch (func) {
++ case FUNC_MUXFPGA:
++ vexpress_muxfpga_func =
++ vexpress_config_func_get_by_dev(&pdev->dev);
++ device_create_file(&pdev->dev, &dev_attr_fb);
++ break;
++ case FUNC_DVIMODE:
++ vexpress_dvimode_func =
++ vexpress_config_func_get_by_dev(&pdev->dev);
++ break;
++ }
++
++ if (!vexpress_dvi_fb_notifier_registered) {
++ fb_register_client(&vexpress_dvi_fb_notifier);
++ vexpress_dvi_fb_notifier_registered = true;
++ }
++
++ vexpress_dvi_fb_select(vexpress_dvi_fb);
++
++ return 0;
++}
++
++static const struct platform_device_id vexpress_dvi_id_table[] = {
++ { .name = "vexpress-muxfpga", .driver_data = FUNC_MUXFPGA, },
++ { .name = "vexpress-dvimode", .driver_data = FUNC_DVIMODE, },
++ {}
++};
++
++static struct platform_driver vexpress_dvi_driver = {
++ .probe = vexpress_dvi_probe,
++ .driver = {
++ .name = "vexpress-dvi",
++ .of_match_table = vexpress_dvi_of_match,
++ },
++ .id_table = vexpress_dvi_id_table,
++};
++
++static int __init vexpress_dvi_init(void)
++{
++ return platform_driver_register(&vexpress_dvi_driver);
++}
++device_initcall(vexpress_dvi_init);
+diff -Nur linux-3.14.40.orig/firmware/imx/sdma/sdma-imx6q.bin.ihex linux-3.14.40/firmware/imx/sdma/sdma-imx6q.bin.ihex
+--- linux-3.14.40.orig/firmware/imx/sdma/sdma-imx6q.bin.ihex 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/firmware/imx/sdma/sdma-imx6q.bin.ihex 2015-05-01 14:58:05.755427001 -0500
+@@ -0,0 +1,116 @@
++:1000000053444D4101000000010000001C000000AD
++:1000100026000000B40000007A0600008202000002
++:10002000FFFFFFFF00000000FFFFFFFFFFFFFFFFDC
++:10003000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD0
++:10004000FFFFFFFFFFFFFFFF6A1A0000FFFFFFFF38
++:10005000EB020000BB180000FFFFFFFF08040000D8
++:10006000FFFFFFFFC0030000FFFFFFFFFFFFFFFFD9
++:10007000FFFFFFFFAB020000FFFFFFFF7B0300005D
++:10008000FFFFFFFFFFFFFFFF4C0400006E040000B6
++:10009000FFFFFFFF00180000FFFFFFFFFFFFFFFF54
++:1000A000000000000018000062180000161A00008E
++:1000B000061B0000E3C1DB57E35FE357F352016A1D
++:1000C0008F00D500017D8D00A005EB5D7804037DD8
++:1000D00079042C7D367C79041F7CEE56000F600677
++:1000E000057D0965437E0A62417E20980A623E7E54
++:1000F00009653C7E12051205AD026007037DFB55C4
++:10010000D36D2B98FB55041DD36DC86A2F7F011F3B
++:1001100003200048E47C5398FB55D76D1500057803
++:100120000962C86A0962C86AD76D5298FB55D76DD3
++:100130001500150005780A62C86A0A62C86AD76D98
++:100140005298FB55D76D15001500150005780B6208
++:10015000C86A0B62C86AD76D097CDF6D077F000033
++:10016000EB55004D077DFAC1E35706980700CC68B0
++:100170000C6813C20AC20398D9C1E3C1DB57E35F1D
++:10018000E357F352216A8F00D500017D8D00A00551
++:10019000EB5DFB567804037D79042A7D317C79047C
++:1001A000207C700B1103EB53000F6003057D096584
++:1001B000377E0A62357E86980A62327E0965307E15
++:1001C00012051205AD026007027C065A8E98265A67
++:1001D000277F011F03200048E87C700B1103135395
++:1001E000AF98150004780962065A0962265AAE983B
++:1001F0001500150004780A62065A0A62265AAE985B
++:1002000015001500150004780B62065A0B62265A79
++:10021000077C0000EB55004D067DFAC1E357699855
++:1002200007000C6813C20AC26698700B11031353BF
++:100230006C07017CD9C1FB5E8A066B07017CD9C1C2
++:10024000F35EDB59D3588F0110010F398B003CC18D
++:100250002B7DC05AC85B4EC1277C88038906E35CAE
++:10026000FF0D1105FF1DBC053E07004D187D7008F0
++:1002700011007E07097D7D07027D2852E698F8521D
++:10028000DB54BC02CC02097C7C07027D2852EF982B
++:10029000F852D354BC02CC02097D0004DD988B00D7
++:1002A000C052C85359C1D67D0002CD98FF08BF0087
++:1002B0007F07157D8804D500017D8D00A005EB5DCD
++:1002C0008F0212021202FF3ADA05027C3E071899E9
++:1002D000A402DD02027D3E0718995E071899EB55CE
++:1002E0009805EB5DF352FB546A07267D6C07017D90
++:1002F00055996B07577C6907047D6807027D010EDD
++:100300002F999358D600017D8E009355A005935DDB
++:10031000A00602780255045D1D7C004E087C69072A
++:10032000037D0255177E3C99045D147F8906935026
++:100330000048017D2799A099150006780255045DB3
++:100340004F070255245D2F07017CA09917006F0706
++:10035000017C012093559D000700A7D9F598D36C27
++:100360006907047D6807027D010E64999358D600E1
++:10037000017D8E009355A005935DA006027802557D
++:10038000C86D0F7C004E087C6907037D0255097E0D
++:100390007199C86D067F890693500048017D5C996C
++:1003A000A0999A99C36A6907047D6807027D010EC6
++:1003B00087999358D600017D8E009355A005935DD3
++:1003C000A0060278C865045D0F7C004E087C6907B2
++:1003D000037DC865097E9499045D067F8906935064
++:1003E0000048017D7F99A09993559D000700FF6CFF
++:1003F000A7D9F5980000E354EB55004D017CF59822
++:10040000DD98E354EB55FF0A1102FF1A7F07027CC7
++:10041000A005B4999D008C05BA05A0051002BA0488
++:10042000AD0454040600E3C1DB57FB52C36AF35228
++:10043000056A8F00D500017D8D00A005EB5D780475
++:10044000037D79042B7D1E7C7904337CEE56000FEE
++:10045000FB556007027DC36DD599041DC36DC8624D
++:100460003B7E6006027D10021202096A357F12028D
++:10047000096A327F1202096A2F7F011F0320004898
++:10048000E77C099AFB55C76D150015001500057826
++:10049000C8620B6AC8620B6AC76D089AFB55C76DC4
++:1004A000150015000578C8620A6AC8620A6AC76D35
++:1004B000089AFB55C76D15000578C862096AC862BD
++:1004C000096AC76D097C286A077F0000EB55004D5B
++:1004D000057DFAC1DB57BF9977C254040AC2BA99A5
++:1004E000D9C1E3C1DB57F352056A8F00D500017D06
++:1004F0008D00A005FB567804037D7904297D1F7CBF
++:1005000079042E7CE35D700D1105ED55000F600739
++:10051000027D0652329A2652337E6005027D100219
++:100520001202096A2D7F1202096A2A7F1202096AE1
++:10053000277F011F03200048EA7CE3555D9A1500E0
++:1005400015001500047806520B6A26520B6A5C9A55
++:1005500015001500047806520A6A26520A6A5C9A47
++:10056000150004780652096A2652096A097C286A2D
++:10057000077F0000DB57004D057DFAC1DB571B9A52
++:1005800077C254040AC2189AE3C1DB57F352056AD2
++:10059000FB568E02941AC36AC8626902247D941EB7
++:1005A000C36ED36EC8624802C86A9426981EC36E92
++:1005B000D36EC8624C02C86A9826C36E981EC36E7A
++:1005C000C8629826C36E6002097CC8626E02247DF0
++:1005D000096A1E7F0125004D257D849A286A187FAF
++:1005E00004627AC2B89AE36E8F00D805017D8D004F
++:1005F000A005C8626E02107D096A0A7F0120F97C9D
++:10060000286A067F0000004D0D7DFAC1DB576E9A07
++:10061000070004620C6AB59A286AFA7F04627AC2FB
++:1006200058045404286AF47F0AC26B9AD9C1E3C102
++:10063000DB57F352056AFB568E02941A0252690286
++:100640001D7D941E06524802065A9426981E065294
++:100650004C02065A9826981E065260020A7C98267A
++:1006600006526E02237D096A1D7F0125004D247DFF
++:10067000D19A286A177F04627AC2029B8F00D8053C
++:10068000017D8D00A00506526E02107D096A0A7F69
++:100690000120F97C286A067F0000004D0D7DFAC11B
++:1006A000DB57C19A070004620C6AFF9A286AFA7F36
++:1006B00004627AC258045404286AF47F0AC2BE9ABB
++:1006C000016E0B612F7E0B622D7E0B632B7E0C0D5A
++:1006D0001704170417049D04081DCC05017C0C0D9C
++:1006E000D16A000F4207C86FDD6F1C7F8E009D002E
++:1006F00001680B67177ED56B04080278C86F120774
++:10070000117C0B670F7E04080278C86F12070A7C01
++:10071000DD6F087FD169010FC86FDD6F037F0101B5
++:0E0720000004129B0700FF680C680002129B89
++:00000001FF
+diff -Nur linux-3.14.40.orig/firmware/Makefile linux-3.14.40/firmware/Makefile
+--- linux-3.14.40.orig/firmware/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/firmware/Makefile 2015-05-01 14:58:05.763427001 -0500
+@@ -61,6 +61,7 @@
+ radeon/RV770_pfp.bin radeon/RV770_me.bin \
+ radeon/RV730_pfp.bin radeon/RV730_me.bin \
+ radeon/RV710_pfp.bin radeon/RV710_me.bin
++fw-shipped-$(CONFIG_IMX_SDMA) += imx/sdma/sdma-imx6q.bin
+ fw-shipped-$(CONFIG_DVB_AV7110) += av7110/bootcode.bin
+ fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
+ fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
+@@ -210,6 +211,8 @@
+ $(obj)/%: $(obj)/%.ihex | $(objtree)/$(obj)/$$(dir %)
+ $(call cmd,ihex)
+
++.NOTPARALLEL: $(obj)/%
++
+ # Don't depend on ihex2fw if we're installing and it already exists.
+ # Putting it after | in the dependencies doesn't seem sufficient when
+ # we're installing after a cross-compile, because ihex2fw has dependencies
+diff -Nur linux-3.14.40.orig/fs/btrfs/Kconfig linux-3.14.40/fs/btrfs/Kconfig
+--- linux-3.14.40.orig/fs/btrfs/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/fs/btrfs/Kconfig 2015-05-01 14:58:05.771427001 -0500
+@@ -1,5 +1,6 @@
+ config BTRFS_FS
+ tristate "Btrfs filesystem support"
++ select LIBCRC32C
+ select CRYPTO
+ select CRYPTO_CRC32C
+ select ZLIB_INFLATE
+diff -Nur linux-3.14.40.orig/fs/buffer.c linux-3.14.40/fs/buffer.c
+--- linux-3.14.40.orig/fs/buffer.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/fs/buffer.c 2015-05-01 14:58:05.791427001 -0500
+@@ -3110,7 +3110,7 @@
+ * until the buffer gets unlocked).
+ *
+ * ll_rw_block sets b_end_io to simple completion handler that marks
+- * the buffer up-to-date (if approriate), unlocks the buffer and wakes
++ * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
+ * any waiters.
+ *
+ * All of the buffers must be for the same device, and must also be a
+diff -Nur linux-3.14.40.orig/fs/compat_binfmt_elf.c linux-3.14.40/fs/compat_binfmt_elf.c
+--- linux-3.14.40.orig/fs/compat_binfmt_elf.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/fs/compat_binfmt_elf.c 2015-05-01 14:58:05.811427001 -0500
+@@ -88,6 +88,11 @@
+ #define ELF_HWCAP COMPAT_ELF_HWCAP
+ #endif
+
++#ifdef COMPAT_ELF_HWCAP2
++#undef ELF_HWCAP2
++#define ELF_HWCAP2 COMPAT_ELF_HWCAP2
++#endif
++
+ #ifdef COMPAT_ARCH_DLINFO
+ #undef ARCH_DLINFO
+ #define ARCH_DLINFO COMPAT_ARCH_DLINFO
+diff -Nur linux-3.14.40.orig/fs/debugfs/inode.c linux-3.14.40/fs/debugfs/inode.c
+--- linux-3.14.40.orig/fs/debugfs/inode.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/fs/debugfs/inode.c 2015-05-01 14:58:05.835427001 -0500
+@@ -367,7 +367,7 @@
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+- * directory dentry if set. If this paramater is NULL, then the
++ * directory dentry if set. If this parameter is NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @data: a pointer to something that the caller will want to get to later
+ * on. The inode.i_private pointer will point to this value on
+@@ -409,7 +409,7 @@
+ * @name: a pointer to a string containing the name of the directory to
+ * create.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+- * directory dentry if set. If this paramater is NULL, then the
++ * directory dentry if set. If this parameter is NULL, then the
+ * directory will be created in the root of the debugfs filesystem.
+ *
+ * This function creates a directory in debugfs with the given name.
+@@ -434,7 +434,7 @@
+ * @name: a pointer to a string containing the name of the symbolic link to
+ * create.
+ * @parent: a pointer to the parent dentry for this symbolic link. This
+- * should be a directory dentry if set. If this paramater is NULL,
++ * should be a directory dentry if set. If this parameter is NULL,
+ * then the symbolic link will be created in the root of the debugfs
+ * filesystem.
+ * @target: a pointer to a string containing the path to the target of the
+diff -Nur linux-3.14.40.orig/include/asm-generic/word-at-a-time.h linux-3.14.40/include/asm-generic/word-at-a-time.h
+--- linux-3.14.40.orig/include/asm-generic/word-at-a-time.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/asm-generic/word-at-a-time.h 2015-05-01 14:58:05.851427001 -0500
+@@ -50,7 +50,7 @@
+ }
+
+ #ifndef zero_bytemask
+-#define zero_bytemask(mask) (~0ul << __fls(mask) << 1)
++#define zero_bytemask(mask) (~1ul << __fls(mask))
+ #endif
+
+ #endif /* _ASM_WORD_AT_A_TIME_H */
+diff -Nur linux-3.14.40.orig/include/crypto/algapi.h linux-3.14.40/include/crypto/algapi.h
+--- linux-3.14.40.orig/include/crypto/algapi.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/crypto/algapi.h 2015-05-01 14:58:05.859427001 -0500
+@@ -100,9 +100,12 @@
+ void *page;
+ u8 *buffer;
+ u8 *iv;
++ unsigned int ivsize;
+
+ int flags;
+- unsigned int blocksize;
++ unsigned int walk_blocksize;
++ unsigned int cipher_blocksize;
++ unsigned int alignmask;
+ };
+
+ struct ablkcipher_walk {
+@@ -192,6 +195,10 @@
+ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk,
+ unsigned int blocksize);
++int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
++ struct blkcipher_walk *walk,
++ struct crypto_aead *tfm,
++ unsigned int blocksize);
+
+ int ablkcipher_walk_done(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk, int err);
+diff -Nur linux-3.14.40.orig/include/drm/drm_fb_helper.h linux-3.14.40/include/drm/drm_fb_helper.h
+--- linux-3.14.40.orig/include/drm/drm_fb_helper.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/drm/drm_fb_helper.h 2015-05-01 14:58:05.879427001 -0500
+@@ -55,7 +55,7 @@
+ * save the current lut when force-restoring the fbdev for e.g.
+ * kdbg.
+ * @fb_probe: Driver callback to allocate and initialize the fbdev info
+- * structure. Futhermore it also needs to allocate the drm
++ * structure. Furthermore it also needs to allocate the drm
+ * framebuffer used to back the fbdev.
+ * @initial_config: Setup an initial fbdev display configuration
+ *
+diff -Nur linux-3.14.40.orig/include/dt-bindings/clock/imx6sl-clock.h linux-3.14.40/include/dt-bindings/clock/imx6sl-clock.h
+--- linux-3.14.40.orig/include/dt-bindings/clock/imx6sl-clock.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/dt-bindings/clock/imx6sl-clock.h 2015-05-01 14:58:05.883427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2013 Freescale Semiconductor, Inc.
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -71,8 +71,8 @@
+ #define IMX6SL_CLK_PERIPH 58
+ #define IMX6SL_CLK_PERIPH2 59
+ #define IMX6SL_CLK_OCRAM_PODF 60
+-#define IMX6SL_CLK_PERIPH_CLK2_PODF 61
+-#define IMX6SL_CLK_PERIPH2_CLK2_PODF 62
++#define IMX6SL_CLK_PERIPH_CLK2 61
++#define IMX6SL_CLK_PERIPH2_CLK2 62
+ #define IMX6SL_CLK_IPG 63
+ #define IMX6SL_CLK_CSI_PODF 64
+ #define IMX6SL_CLK_LCDIF_AXI_PODF 65
+@@ -145,6 +145,7 @@
+ #define IMX6SL_CLK_USDHC4 132
+ #define IMX6SL_CLK_PLL4_AUDIO_DIV 133
+ #define IMX6SL_CLK_SPBA 134
+-#define IMX6SL_CLK_END 135
++#define IMX6SL_CLK_UART_OSC_4M 135
++#define IMX6SL_CLK_END 136
+
+ #endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */
+diff -Nur linux-3.14.40.orig/include/linux/ahci_platform.h linux-3.14.40/include/linux/ahci_platform.h
+--- linux-3.14.40.orig/include/linux/ahci_platform.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/ahci_platform.h 2015-05-01 14:58:05.899427001 -0500
+@@ -19,15 +19,38 @@
+
+ struct device;
+ struct ata_port_info;
++struct ahci_host_priv;
++struct platform_device;
+
++/*
++ * Note ahci_platform_data is deprecated, it is only kept around for use
++ * by the old da850 and spear13xx ahci code.
++ * New drivers should instead declare their own platform_driver struct, and
++ * use ahci_platform* functions in their own probe, suspend and resume methods.
++ */
+ struct ahci_platform_data {
+ int (*init)(struct device *dev, void __iomem *addr);
+ void (*exit)(struct device *dev);
+ int (*suspend)(struct device *dev);
+ int (*resume)(struct device *dev);
+- const struct ata_port_info *ata_port_info;
+- unsigned int force_port_map;
+- unsigned int mask_port_map;
+ };
+
++int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
++void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
++int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
++void ahci_platform_disable_resources(struct ahci_host_priv *hpriv);
++struct ahci_host_priv *ahci_platform_get_resources(
++ struct platform_device *pdev);
++int ahci_platform_init_host(struct platform_device *pdev,
++ struct ahci_host_priv *hpriv,
++ const struct ata_port_info *pi_template,
++ unsigned long host_flags,
++ unsigned int force_port_map,
++ unsigned int mask_port_map);
++
++int ahci_platform_suspend_host(struct device *dev);
++int ahci_platform_resume_host(struct device *dev);
++int ahci_platform_suspend(struct device *dev);
++int ahci_platform_resume(struct device *dev);
++
+ #endif /* _AHCI_PLATFORM_H */
+diff -Nur linux-3.14.40.orig/include/linux/amba/clcd.h linux-3.14.40/include/linux/amba/clcd.h
+--- linux-3.14.40.orig/include/linux/amba/clcd.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/amba/clcd.h 2015-05-01 14:58:05.919427001 -0500
+@@ -243,6 +243,9 @@
+ val |= CNTL_BGR;
+ }
+
++ /* Reset the current colour depth */
++ val &= ~CNTL_LCDBPP16_444;
++
+ switch (var->bits_per_pixel) {
+ case 1:
+ val |= CNTL_LCDBPP1;
+@@ -264,14 +267,15 @@
+ */
+ if (amba_part(fb->dev) == 0x110 ||
+ var->green.length == 5)
+- val |= CNTL_LCDBPP16;
++ val |= CNTL_LCDBPP16 | CNTL_BGR;
+ else if (var->green.length == 6)
+- val |= CNTL_LCDBPP16_565;
++ val |= CNTL_LCDBPP16_565 | CNTL_BGR;
+ else
+- val |= CNTL_LCDBPP16_444;
++ val |= CNTL_LCDBPP16_444 | CNTL_BGR;
+ break;
+ case 32:
+ val |= CNTL_LCDBPP24;
++ val &= ~CNTL_BGR;
+ break;
+ }
+
+diff -Nur linux-3.14.40.orig/include/linux/arm-hdlcd.h linux-3.14.40/include/linux/arm-hdlcd.h
+--- linux-3.14.40.orig/include/linux/arm-hdlcd.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/arm-hdlcd.h 2015-05-01 14:58:05.919427001 -0500
+@@ -0,0 +1,122 @@
++/*
++ * include/linux/arm-hdlcd.h
++ *
++ * Copyright (C) 2011 ARM Limited
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive
++ * for more details.
++ *
++ * ARM HDLCD Controller register definition
++ */
++
++#include <linux/fb.h>
++#include <linux/completion.h>
++
++/* register offsets */
++#define HDLCD_REG_VERSION 0x0000 /* ro */
++#define HDLCD_REG_INT_RAWSTAT 0x0010 /* rw */
++#define HDLCD_REG_INT_CLEAR 0x0014 /* wo */
++#define HDLCD_REG_INT_MASK 0x0018 /* rw */
++#define HDLCD_REG_INT_STATUS 0x001c /* ro */
++#define HDLCD_REG_USER_OUT 0x0020 /* rw */
++#define HDLCD_REG_FB_BASE 0x0100 /* rw */
++#define HDLCD_REG_FB_LINE_LENGTH 0x0104 /* rw */
++#define HDLCD_REG_FB_LINE_COUNT 0x0108 /* rw */
++#define HDLCD_REG_FB_LINE_PITCH 0x010c /* rw */
++#define HDLCD_REG_BUS_OPTIONS 0x0110 /* rw */
++#define HDLCD_REG_V_SYNC 0x0200 /* rw */
++#define HDLCD_REG_V_BACK_PORCH 0x0204 /* rw */
++#define HDLCD_REG_V_DATA 0x0208 /* rw */
++#define HDLCD_REG_V_FRONT_PORCH 0x020c /* rw */
++#define HDLCD_REG_H_SYNC 0x0210 /* rw */
++#define HDLCD_REG_H_BACK_PORCH 0x0214 /* rw */
++#define HDLCD_REG_H_DATA 0x0218 /* rw */
++#define HDLCD_REG_H_FRONT_PORCH 0x021c /* rw */
++#define HDLCD_REG_POLARITIES 0x0220 /* rw */
++#define HDLCD_REG_COMMAND 0x0230 /* rw */
++#define HDLCD_REG_PIXEL_FORMAT 0x0240 /* rw */
++#define HDLCD_REG_BLUE_SELECT 0x0244 /* rw */
++#define HDLCD_REG_GREEN_SELECT 0x0248 /* rw */
++#define HDLCD_REG_RED_SELECT 0x024c /* rw */
++
++/* version */
++#define HDLCD_PRODUCT_ID 0x1CDC0000
++#define HDLCD_PRODUCT_MASK 0xFFFF0000
++#define HDLCD_VERSION_MAJOR_MASK 0x0000FF00
++#define HDLCD_VERSION_MINOR_MASK 0x000000FF
++
++/* interrupts */
++#define HDLCD_INTERRUPT_DMA_END (1 << 0)
++#define HDLCD_INTERRUPT_BUS_ERROR (1 << 1)
++#define HDLCD_INTERRUPT_VSYNC (1 << 2)
++#define HDLCD_INTERRUPT_UNDERRUN (1 << 3)
++
++/* polarity */
++#define HDLCD_POLARITY_VSYNC (1 << 0)
++#define HDLCD_POLARITY_HSYNC (1 << 1)
++#define HDLCD_POLARITY_DATAEN (1 << 2)
++#define HDLCD_POLARITY_DATA (1 << 3)
++#define HDLCD_POLARITY_PIXELCLK (1 << 4)
++
++/* commands */
++#define HDLCD_COMMAND_DISABLE (0 << 0)
++#define HDLCD_COMMAND_ENABLE (1 << 0)
++
++/* pixel format */
++#define HDLCD_PIXEL_FMT_LITTLE_ENDIAN (0 << 31)
++#define HDLCD_PIXEL_FMT_BIG_ENDIAN (1 << 31)
++#define HDLCD_BYTES_PER_PIXEL_MASK (3 << 3)
++
++/* bus options */
++#define HDLCD_BUS_BURST_MASK 0x01f
++#define HDLCD_BUS_MAX_OUTSTAND 0xf00
++#define HDLCD_BUS_BURST_NONE (0 << 0)
++#define HDLCD_BUS_BURST_1 (1 << 0)
++#define HDLCD_BUS_BURST_2 (1 << 1)
++#define HDLCD_BUS_BURST_4 (1 << 2)
++#define HDLCD_BUS_BURST_8 (1 << 3)
++#define HDLCD_BUS_BURST_16 (1 << 4)
++
++/* Max resolution supported is 4096x4096, 8 bit per color component,
++ 8 bit alpha, but we are going to choose the usual hardware default
++ (2048x2048, 32 bpp) and enable double buffering */
++#define HDLCD_MAX_XRES 2048
++#define HDLCD_MAX_YRES 2048
++#define HDLCD_MAX_FRAMEBUFFER_SIZE (HDLCD_MAX_XRES * HDLCD_MAX_YRES << 2)
++
++#define HDLCD_MEM_BASE (CONFIG_PAGE_OFFSET - 0x1000000)
++
++#define NR_PALETTE 256
++
++/* OEMs using HDLCD may wish to enable these settings if
++ * display disruption is apparent and you suspect HDLCD
++ * access to RAM may be starved.
++ */
++/* Turn HDLCD default color red instead of black so
++ * that it's easy to see pixel clock data underruns
++ * (compared to other visual disruption)
++ */
++//#define HDLCD_RED_DEFAULT_COLOUR
++/* Add a counter in the IRQ handler to count buffer underruns
++ * and /proc/hdlcd_underrun to read the counter
++ */
++//#define HDLCD_COUNT_BUFFERUNDERRUNS
++/* Restrict height to 1x screen size
++ *
++ */
++//#define HDLCD_NO_VIRTUAL_SCREEN
++
++#ifdef CONFIG_ANDROID
++#define HDLCD_NO_VIRTUAL_SCREEN
++#endif
++
++struct hdlcd_device {
++ struct fb_info fb;
++ struct device *dev;
++ struct clk *clk;
++ void __iomem *base;
++ int irq;
++ struct completion vsync_completion;
++ unsigned char *edid;
++};
+diff -Nur linux-3.14.40.orig/include/linux/backlight.h linux-3.14.40/include/linux/backlight.h
+--- linux-3.14.40.orig/include/linux/backlight.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/backlight.h 2015-05-01 14:58:05.927427001 -0500
+@@ -9,6 +9,7 @@
+ #define _LINUX_BACKLIGHT_H
+
+ #include <linux/device.h>
++#include <linux/fb.h>
+ #include <linux/mutex.h>
+ #include <linux/notifier.h>
+
+@@ -104,6 +105,11 @@
+ struct list_head entry;
+
+ struct device dev;
++
++ /* Multiple framebuffers may share one backlight device */
++ bool fb_bl_on[FB_MAX];
++
++ int use_count;
+ };
+
+ static inline void backlight_update_status(struct backlight_device *bd)
+diff -Nur linux-3.14.40.orig/include/linux/busfreq-imx6.h linux-3.14.40/include/linux/busfreq-imx6.h
+--- linux-3.14.40.orig/include/linux/busfreq-imx6.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/busfreq-imx6.h 2015-05-01 14:58:05.927427001 -0500
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __ASM_ARCH_MXC_BUSFREQ_H__
++#define __ASM_ARCH_MXC_BUSFREQ_H__
++
++/*
++ * This enumerates busfreq mode.
++ */
++enum bus_freq_mode {
++ BUS_FREQ_HIGH,
++ BUS_FREQ_MED,
++ BUS_FREQ_AUDIO,
++ BUS_FREQ_LOW,
++};
++void request_bus_freq(enum bus_freq_mode mode);
++void release_bus_freq(enum bus_freq_mode mode);
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/cgroup_subsys.h linux-3.14.40/include/linux/cgroup_subsys.h
+--- linux-3.14.40.orig/include/linux/cgroup_subsys.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/cgroup_subsys.h 2015-05-01 14:58:05.935427001 -0500
+@@ -39,6 +39,10 @@
+ SUBSYS(blkio)
+ #endif
+
++#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_BFQIO)
++SUBSYS(bfqio)
++#endif
++
+ #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF)
+ SUBSYS(perf)
+ #endif
+diff -Nur linux-3.14.40.orig/include/linux/clk-provider.h linux-3.14.40/include/linux/clk-provider.h
+--- linux-3.14.40.orig/include/linux/clk-provider.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/clk-provider.h 2015-05-01 14:58:05.947427001 -0500
+@@ -30,6 +30,13 @@
+ #define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
+ #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
+ #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
++/*
++ * Basic mux clk, can't switch parent while there is another basic mux clk
++ * being its child. Otherwise, a glitch might be propagated to downstream
++ * clocks through this child mux.
++ */
++#define CLK_IS_BASIC_MUX BIT(9)
++
+
+ struct clk_hw;
+
+diff -Nur linux-3.14.40.orig/include/linux/cma.h linux-3.14.40/include/linux/cma.h
+--- linux-3.14.40.orig/include/linux/cma.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/cma.h 2015-05-01 14:58:05.947427001 -0500
+@@ -0,0 +1,27 @@
++#ifndef __CMA_H__
++#define __CMA_H__
++
++/*
++ * There is always at least global CMA area and a few optional
++ * areas configured in kernel .config.
++ */
++#ifdef CONFIG_CMA_AREAS
++#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
++
++#else
++#define MAX_CMA_AREAS (0)
++
++#endif
++
++struct cma;
++
++extern phys_addr_t cma_get_base(struct cma *cma);
++extern unsigned long cma_get_size(struct cma *cma);
++
++extern int __init cma_declare_contiguous(phys_addr_t size,
++ phys_addr_t base, phys_addr_t limit,
++ phys_addr_t alignment, unsigned int order_per_bit,
++ bool fixed, struct cma **res_cma);
++extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
++extern bool cma_release(struct cma *cma, struct page *pages, int count);
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/cpufeature.h linux-3.14.40/include/linux/cpufeature.h
+--- linux-3.14.40.orig/include/linux/cpufeature.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/cpufeature.h 2015-05-01 14:58:05.947427001 -0500
+@@ -0,0 +1,60 @@
++/*
++ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __LINUX_CPUFEATURE_H
++#define __LINUX_CPUFEATURE_H
++
++#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
++
++#include <linux/mod_devicetable.h>
++#include <asm/cpufeature.h>
++
++/*
++ * Macros imported from <asm/cpufeature.h>:
++ * - cpu_feature(x) ordinal value of feature called 'x'
++ * - cpu_have_feature(u32 n) whether feature #n is available
++ * - MAX_CPU_FEATURES upper bound for feature ordinal values
++ * Optional:
++ * - CPU_FEATURE_TYPEFMT format string fragment for printing the cpu type
++ * - CPU_FEATURE_TYPEVAL set of values matching the format string above
++ */
++
++#ifndef CPU_FEATURE_TYPEFMT
++#define CPU_FEATURE_TYPEFMT "%s"
++#endif
++
++#ifndef CPU_FEATURE_TYPEVAL
++#define CPU_FEATURE_TYPEVAL ELF_PLATFORM
++#endif
++
++/*
++ * Use module_cpu_feature_match(feature, module_init_function) to
++ * declare that
++ * a) the module shall be probed upon discovery of CPU feature 'feature'
++ * (typically at boot time using udev)
++ * b) the module must not be loaded if CPU feature 'feature' is not present
++ * (not even by manual insmod).
++ *
++ * For a list of legal values for 'feature', please consult the file
++ * 'asm/cpufeature.h' of your favorite architecture.
++ */
++#define module_cpu_feature_match(x, __init) \
++static struct cpu_feature const cpu_feature_match_ ## x[] = \
++ { { .feature = cpu_feature(x) }, { } }; \
++MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
++ \
++static int cpu_feature_match_ ## x ## _init(void) \
++{ \
++ if (!cpu_have_feature(cpu_feature(x))) \
++ return -ENODEV; \
++ return __init(); \
++} \
++module_init(cpu_feature_match_ ## x ## _init)
++
++#endif
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/cpufreq.h linux-3.14.40/include/linux/cpufreq.h
+--- linux-3.14.40.orig/include/linux/cpufreq.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/cpufreq.h 2015-05-01 14:58:05.947427001 -0500
+@@ -429,6 +429,9 @@
+ #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND)
+ extern struct cpufreq_governor cpufreq_gov_ondemand;
+ #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand)
++#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
++extern struct cpufreq_governor cpufreq_gov_interactive;
++#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive)
+ #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
+ extern struct cpufreq_governor cpufreq_gov_conservative;
+ #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative)
+diff -Nur linux-3.14.40.orig/include/linux/cpu.h linux-3.14.40/include/linux/cpu.h
+--- linux-3.14.40.orig/include/linux/cpu.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/cpu.h 2015-05-01 14:58:05.959427001 -0500
+@@ -226,4 +226,11 @@
+ void arch_cpu_idle_exit(void);
+ void arch_cpu_idle_dead(void);
+
++#define IDLE_START 1
++#define IDLE_END 2
++
++void idle_notifier_register(struct notifier_block *n);
++void idle_notifier_unregister(struct notifier_block *n);
++void idle_notifier_call_chain(unsigned long val);
++
+ #endif /* _LINUX_CPU_H_ */
+diff -Nur linux-3.14.40.orig/include/linux/device_cooling.h linux-3.14.40/include/linux/device_cooling.h
+--- linux-3.14.40.orig/include/linux/device_cooling.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/device_cooling.h 2015-05-01 14:58:05.959427001 -0500
+@@ -0,0 +1,45 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ */
++
++#ifndef __DEVICE_THERMAL_H__
++#define __DEVICE_THERMAL_H__
++
++#include <linux/thermal.h>
++
++#ifdef CONFIG_DEVICE_THERMAL
++int register_devfreq_cooling_notifier(struct notifier_block *nb);
++int unregister_devfreq_cooling_notifier(struct notifier_block *nb);
++struct thermal_cooling_device *devfreq_cooling_register(unsigned long max_state);
++void devfreq_cooling_unregister(struct thermal_cooling_device *cdev);
++#else
++static inline
++int register_devfreq_cooling_notifier(struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline
++int unregister_devfreq_cooling_notifier(struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline
++struct thermal_cooling_device *devfreq_cooling_register(unsigned long max_state)
++{
++ return NULL;
++}
++
++static inline
++void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
++{
++ return;
++}
++#endif
++#endif /* __DEVICE_THERMAL_H__ */
+diff -Nur linux-3.14.40.orig/include/linux/dma-contiguous.h linux-3.14.40/include/linux/dma-contiguous.h
+--- linux-3.14.40.orig/include/linux/dma-contiguous.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/dma-contiguous.h 2015-05-01 14:58:05.963427001 -0500
+@@ -53,18 +53,13 @@
+
+ #ifdef __KERNEL__
+
++#include <linux/device.h>
++
+ struct cma;
+ struct page;
+-struct device;
+
+ #ifdef CONFIG_DMA_CMA
+
+-/*
+- * There is always at least global CMA area and a few optional device
+- * private areas configured in kernel .config.
+- */
+-#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
+-
+ extern struct cma *dma_contiguous_default_area;
+
+ static inline struct cma *dev_get_cma_area(struct device *dev)
+@@ -88,7 +83,8 @@
+ void dma_contiguous_reserve(phys_addr_t addr_limit);
+
+ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+- phys_addr_t limit, struct cma **res_cma);
++ phys_addr_t limit, struct cma **res_cma,
++ bool fixed);
+
+ /**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+@@ -108,7 +104,7 @@
+ {
+ struct cma *cma;
+ int ret;
+- ret = dma_contiguous_reserve_area(size, base, limit, &cma);
++ ret = dma_contiguous_reserve_area(size, base, limit, &cma, true);
+ if (ret == 0)
+ dev_set_cma_area(dev, cma);
+
+@@ -122,8 +118,6 @@
+
+ #else
+
+-#define MAX_CMA_AREAS (0)
+-
+ static inline struct cma *dev_get_cma_area(struct device *dev)
+ {
+ return NULL;
+@@ -136,7 +130,9 @@
+ static inline void dma_contiguous_reserve(phys_addr_t limit) { }
+
+ static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+- phys_addr_t limit, struct cma **res_cma) {
++ phys_addr_t limit, struct cma **res_cma,
++ bool fixed)
++{
+ return -ENOSYS;
+ }
+
+diff -Nur linux-3.14.40.orig/include/linux/dmaengine.h linux-3.14.40/include/linux/dmaengine.h
+--- linux-3.14.40.orig/include/linux/dmaengine.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/dmaengine.h 2015-05-01 14:58:05.983427001 -0500
+@@ -333,6 +333,8 @@
+ * @slave_id: Slave requester id. Only valid for slave channels. The dma
+ * slave peripheral will have unique id as dma requester which need to be
+ * pass as slave config.
++ * @dma_request0: this is the first dma request of this dma channel.
++ * @dma_request1: this is the second dma request of this dma channel.
+ *
+ * This struct is passed in as configuration data to a DMA engine
+ * in order to set up a certain channel for DMA transport at runtime.
+@@ -361,6 +363,8 @@
+ u32 dst_maxburst;
+ bool device_fc;
+ unsigned int slave_id;
++ int dma_request0;
++ int dma_request1;
+ };
+
+ /**
+diff -Nur linux-3.14.40.orig/include/linux/fsl_otp.h linux-3.14.40/include/linux/fsl_otp.h
+--- linux-3.14.40.orig/include/linux/fsl_otp.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/fsl_otp.h 2015-05-01 14:58:05.983427001 -0500
+@@ -0,0 +1,6 @@
++#ifndef _LINUX_FSL_OTP_H
++#define _LINUX_FSL_OTP_H
++
++int fsl_otp_readl(unsigned long offset, u32 *value);
++
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/ftrace.h linux-3.14.40/include/linux/ftrace.h
+--- linux-3.14.40.orig/include/linux/ftrace.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/ftrace.h 2015-05-01 14:58:05.991427001 -0500
+@@ -605,25 +605,27 @@
+ #endif
+ }
+
+-#ifndef HAVE_ARCH_CALLER_ADDR
++/* All archs should have this, but we define it for consistency */
++#ifndef ftrace_return_address0
++# define ftrace_return_address0 __builtin_return_address(0)
++#endif
++
++/* Archs may use other ways for ADDR1 and beyond */
++#ifndef ftrace_return_address
+ # ifdef CONFIG_FRAME_POINTER
+-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+-# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
+-# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
+-# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
+-# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
+-# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
+-# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
++# define ftrace_return_address(n) __builtin_return_address(n)
+ # else
+-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+-# define CALLER_ADDR1 0UL
+-# define CALLER_ADDR2 0UL
+-# define CALLER_ADDR3 0UL
+-# define CALLER_ADDR4 0UL
+-# define CALLER_ADDR5 0UL
+-# define CALLER_ADDR6 0UL
++# define ftrace_return_address(n) 0UL
+ # endif
+-#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
++#endif
++
++#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
++#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
++#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
++#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
++#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
++#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
++#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
+
+ #ifdef CONFIG_IRQSOFF_TRACER
+ extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
+diff -Nur linux-3.14.40.orig/include/linux/hardirq.h linux-3.14.40/include/linux/hardirq.h
+--- linux-3.14.40.orig/include/linux/hardirq.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/hardirq.h 2015-05-01 14:58:05.991427001 -0500
+@@ -9,6 +9,7 @@
+
+
+ extern void synchronize_irq(unsigned int irq);
++extern void synchronize_hardirq(unsigned int irq);
+
+ #if defined(CONFIG_TINY_RCU)
+
+diff -Nur linux-3.14.40.orig/include/linux/hsi/hsi.h linux-3.14.40/include/linux/hsi/hsi.h
+--- linux-3.14.40.orig/include/linux/hsi/hsi.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/hsi/hsi.h 2015-05-01 14:58:06.003427001 -0500
+@@ -178,7 +178,7 @@
+ * @complete: Transfer completion callback
+ * @destructor: Destructor to free resources when flushing
+ * @status: Status of the transfer when completed
+- * @actual_len: Actual length of data transfered on completion
++ * @actual_len: Actual length of data transferred on completion
+ * @channel: Channel were to TX/RX the message
+ * @ttype: Transfer type (TX if set, RX otherwise)
+ * @break_frame: if true HSI will send/receive a break frame. Data buffers are
+diff -Nur linux-3.14.40.orig/include/linux/ipu.h linux-3.14.40/include/linux/ipu.h
+--- linux-3.14.40.orig/include/linux/ipu.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/ipu.h 2015-05-01 14:58:06.003427001 -0500
+@@ -0,0 +1,38 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU Lesser General
++ * Public License. You may obtain a copy of the GNU Lesser General
++ * Public License Version 2.1 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/lgpl-license.html
++ * http://www.gnu.org/copyleft/lgpl.html
++ */
++
++/*!
++ * @defgroup IPU MXC Image Processing Unit (IPU) Driver
++ */
++/*!
++ * @file linux/ipu.h
++ *
++ * @brief This file contains the IPU driver API declarations.
++ *
++ * @ingroup IPU
++ */
++
++#ifndef __LINUX_IPU_H__
++#define __LINUX_IPU_H__
++
++#include <linux/interrupt.h>
++#include <uapi/linux/ipu.h>
++
++unsigned int fmt_to_bpp(unsigned int pixelformat);
++cs_t colorspaceofpixel(int fmt);
++int need_csc(int ifmt, int ofmt);
++
++int ipu_queue_task(struct ipu_task *task);
++int ipu_check_task(struct ipu_task *task);
++
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/ipu-v3.h linux-3.14.40/include/linux/ipu-v3.h
+--- linux-3.14.40.orig/include/linux/ipu-v3.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/ipu-v3.h 2015-05-01 14:58:06.003427001 -0500
+@@ -0,0 +1,752 @@
++/*
++ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ */
++
++#ifndef __LINUX_IPU_V3_H_
++#define __LINUX_IPU_V3_H_
++
++#include <linux/ipu.h>
++
++/* IPU Driver channels definitions. */
++/* Note these are different from IDMA channels */
++#define IPU_MAX_CH 32
++#define _MAKE_CHAN(num, v_in, g_in, a_in, out) \
++ ((num << 24) | (v_in << 18) | (g_in << 12) | (a_in << 6) | out)
++#define _MAKE_ALT_CHAN(ch) (ch | (IPU_MAX_CH << 24))
++#define IPU_CHAN_ID(ch) (ch >> 24)
++#define IPU_CHAN_ALT(ch) (ch & 0x02000000)
++#define IPU_CHAN_ALPHA_IN_DMA(ch) ((uint32_t) (ch >> 6) & 0x3F)
++#define IPU_CHAN_GRAPH_IN_DMA(ch) ((uint32_t) (ch >> 12) & 0x3F)
++#define IPU_CHAN_VIDEO_IN_DMA(ch) ((uint32_t) (ch >> 18) & 0x3F)
++#define IPU_CHAN_OUT_DMA(ch) ((uint32_t) (ch & 0x3F))
++#define NO_DMA 0x3F
++#define ALT 1
++/*!
++ * Enumeration of IPU logical channels. An IPU logical channel is defined as a
++ * combination of an input (memory to IPU), output (IPU to memory), and/or
++ * secondary input IDMA channels and in some cases an Image Converter task.
++ * Some channels consist of only an input or output.
++ */
++typedef enum {
++ CHAN_NONE = -1,
++ MEM_ROT_ENC_MEM = _MAKE_CHAN(1, 45, NO_DMA, NO_DMA, 48),
++ MEM_ROT_VF_MEM = _MAKE_CHAN(2, 46, NO_DMA, NO_DMA, 49),
++ MEM_ROT_PP_MEM = _MAKE_CHAN(3, 47, NO_DMA, NO_DMA, 50),
++
++ MEM_PRP_ENC_MEM = _MAKE_CHAN(4, 12, 14, 17, 20),
++ MEM_PRP_VF_MEM = _MAKE_CHAN(5, 12, 14, 17, 21),
++ MEM_PP_MEM = _MAKE_CHAN(6, 11, 15, 18, 22),
++
++ MEM_DC_SYNC = _MAKE_CHAN(7, 28, NO_DMA, NO_DMA, NO_DMA),
++ MEM_DC_ASYNC = _MAKE_CHAN(8, 41, NO_DMA, NO_DMA, NO_DMA),
++ MEM_BG_SYNC = _MAKE_CHAN(9, 23, NO_DMA, 51, NO_DMA),
++ MEM_FG_SYNC = _MAKE_CHAN(10, 27, NO_DMA, 31, NO_DMA),
++
++ MEM_BG_ASYNC0 = _MAKE_CHAN(11, 24, NO_DMA, 52, NO_DMA),
++ MEM_FG_ASYNC0 = _MAKE_CHAN(12, 29, NO_DMA, 33, NO_DMA),
++ MEM_BG_ASYNC1 = _MAKE_ALT_CHAN(MEM_BG_ASYNC0),
++ MEM_FG_ASYNC1 = _MAKE_ALT_CHAN(MEM_FG_ASYNC0),
++
++ DIRECT_ASYNC0 = _MAKE_CHAN(13, NO_DMA, NO_DMA, NO_DMA, NO_DMA),
++ DIRECT_ASYNC1 = _MAKE_CHAN(14, NO_DMA, NO_DMA, NO_DMA, NO_DMA),
++
++ CSI_MEM0 = _MAKE_CHAN(15, NO_DMA, NO_DMA, NO_DMA, 0),
++ CSI_MEM1 = _MAKE_CHAN(16, NO_DMA, NO_DMA, NO_DMA, 1),
++ CSI_MEM2 = _MAKE_CHAN(17, NO_DMA, NO_DMA, NO_DMA, 2),
++ CSI_MEM3 = _MAKE_CHAN(18, NO_DMA, NO_DMA, NO_DMA, 3),
++
++ CSI_MEM = CSI_MEM0,
++
++ CSI_PRP_ENC_MEM = _MAKE_CHAN(19, NO_DMA, NO_DMA, NO_DMA, 20),
++ CSI_PRP_VF_MEM = _MAKE_CHAN(20, NO_DMA, NO_DMA, NO_DMA, 21),
++
++ /* for vdi mem->vdi->ic->mem , add graphics plane and alpha*/
++ MEM_VDI_PRP_VF_MEM_P = _MAKE_CHAN(21, 8, 14, 17, 21),
++ MEM_VDI_PRP_VF_MEM = _MAKE_CHAN(22, 9, 14, 17, 21),
++ MEM_VDI_PRP_VF_MEM_N = _MAKE_CHAN(23, 10, 14, 17, 21),
++
++ /* for vdi mem->vdi->mem */
++ MEM_VDI_MEM_P = _MAKE_CHAN(24, 8, NO_DMA, NO_DMA, 5),
++ MEM_VDI_MEM = _MAKE_CHAN(25, 9, NO_DMA, NO_DMA, 5),
++ MEM_VDI_MEM_N = _MAKE_CHAN(26, 10, NO_DMA, NO_DMA, 5),
++
++ /* fake channel for vdoa to link with IPU */
++ MEM_VDOA_MEM = _MAKE_CHAN(27, NO_DMA, NO_DMA, NO_DMA, NO_DMA),
++
++ MEM_PP_ADC = CHAN_NONE,
++ ADC_SYS2 = CHAN_NONE,
++
++} ipu_channel_t;
++
++/*!
++ * Enumeration of types of buffers for a logical channel.
++ */
++typedef enum {
++ IPU_OUTPUT_BUFFER = 0, /*!< Buffer for output from IPU */
++ IPU_ALPHA_IN_BUFFER = 1, /*!< Buffer for input to IPU */
++ IPU_GRAPH_IN_BUFFER = 2, /*!< Buffer for input to IPU */
++ IPU_VIDEO_IN_BUFFER = 3, /*!< Buffer for input to IPU */
++ IPU_INPUT_BUFFER = IPU_VIDEO_IN_BUFFER,
++ IPU_SEC_INPUT_BUFFER = IPU_GRAPH_IN_BUFFER,
++} ipu_buffer_t;
++
++#define IPU_PANEL_SERIAL 1
++#define IPU_PANEL_PARALLEL 2
++
++/*!
++ * Enumeration of ADC channel operation mode.
++ */
++typedef enum {
++ Disable,
++ WriteTemplateNonSeq,
++ ReadTemplateNonSeq,
++ WriteTemplateUnCon,
++ ReadTemplateUnCon,
++ WriteDataWithRS,
++ WriteDataWoRS,
++ WriteCmd
++} mcu_mode_t;
++
++/*!
++ * Enumeration of ADC channel addressing mode.
++ */
++typedef enum {
++ FullWoBE,
++ FullWithBE,
++ XY
++} display_addressing_t;
++
++/*!
++ * Union of initialization parameters for a logical channel.
++ */
++typedef union {
++ struct {
++ uint32_t csi;
++ uint32_t mipi_id;
++ uint32_t mipi_vc;
++ bool mipi_en;
++ bool interlaced;
++ } csi_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ uint32_t csi;
++ uint32_t mipi_id;
++ uint32_t mipi_vc;
++ bool mipi_en;
++ } csi_prp_enc_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ } mem_prp_enc_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ } mem_rot_enc_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ uint32_t in_g_pixel_fmt;
++ uint8_t alpha;
++ uint32_t key_color;
++ bool alpha_chan_en;
++ ipu_motion_sel motion_sel;
++ enum v4l2_field field_fmt;
++ uint32_t csi;
++ uint32_t mipi_id;
++ uint32_t mipi_vc;
++ bool mipi_en;
++ } csi_prp_vf_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ display_port_t disp;
++ uint32_t out_left;
++ uint32_t out_top;
++ } csi_prp_vf_adc;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ uint32_t in_g_pixel_fmt;
++ uint8_t alpha;
++ uint32_t key_color;
++ bool alpha_chan_en;
++ ipu_motion_sel motion_sel;
++ enum v4l2_field field_fmt;
++ } mem_prp_vf_mem;
++ struct {
++ uint32_t temp;
++ } mem_prp_vf_adc;
++ struct {
++ uint32_t temp;
++ } mem_rot_vf_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ uint32_t in_g_pixel_fmt;
++ uint8_t alpha;
++ uint32_t key_color;
++ bool alpha_chan_en;
++ } mem_pp_mem;
++ struct {
++ uint32_t temp;
++ } mem_rot_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ display_port_t disp;
++ uint32_t out_left;
++ uint32_t out_top;
++ } mem_pp_adc;
++ struct {
++ uint32_t di;
++ bool interlaced;
++ uint32_t in_pixel_fmt;
++ uint32_t out_pixel_fmt;
++ } mem_dc_sync;
++ struct {
++ uint32_t temp;
++ } mem_sdc_fg;
++ struct {
++ uint32_t di;
++ bool interlaced;
++ uint32_t in_pixel_fmt;
++ uint32_t out_pixel_fmt;
++ bool alpha_chan_en;
++ } mem_dp_bg_sync;
++ struct {
++ uint32_t temp;
++ } mem_sdc_bg;
++ struct {
++ uint32_t di;
++ bool interlaced;
++ uint32_t in_pixel_fmt;
++ uint32_t out_pixel_fmt;
++ bool alpha_chan_en;
++ } mem_dp_fg_sync;
++ struct {
++ uint32_t di;
++ } direct_async;
++ struct {
++ display_port_t disp;
++ mcu_mode_t ch_mode;
++ uint32_t out_left;
++ uint32_t out_top;
++ } adc_sys1;
++ struct {
++ display_port_t disp;
++ mcu_mode_t ch_mode;
++ uint32_t out_left;
++ uint32_t out_top;
++ } adc_sys2;
++} ipu_channel_params_t;
++
++/*
++ * IPU_IRQF_ONESHOT - Interrupt is not reenabled after the irq handler finished.
++ */
++#define IPU_IRQF_NONE 0x00000000
++#define IPU_IRQF_ONESHOT 0x00000001
++
++/*!
++ * Enumeration of IPU interrupt sources.
++ */
++enum ipu_irq_line {
++ IPU_IRQ_CSI0_OUT_EOF = 0,
++ IPU_IRQ_CSI1_OUT_EOF = 1,
++ IPU_IRQ_CSI2_OUT_EOF = 2,
++ IPU_IRQ_CSI3_OUT_EOF = 3,
++ IPU_IRQ_VDIC_OUT_EOF = 5,
++ IPU_IRQ_VDI_P_IN_EOF = 8,
++ IPU_IRQ_VDI_C_IN_EOF = 9,
++ IPU_IRQ_VDI_N_IN_EOF = 10,
++ IPU_IRQ_PP_IN_EOF = 11,
++ IPU_IRQ_PRP_IN_EOF = 12,
++ IPU_IRQ_PRP_GRAPH_IN_EOF = 14,
++ IPU_IRQ_PP_GRAPH_IN_EOF = 15,
++ IPU_IRQ_PRP_ALPHA_IN_EOF = 17,
++ IPU_IRQ_PP_ALPHA_IN_EOF = 18,
++ IPU_IRQ_PRP_ENC_OUT_EOF = 20,
++ IPU_IRQ_PRP_VF_OUT_EOF = 21,
++ IPU_IRQ_PP_OUT_EOF = 22,
++ IPU_IRQ_BG_SYNC_EOF = 23,
++ IPU_IRQ_BG_ASYNC_EOF = 24,
++ IPU_IRQ_FG_SYNC_EOF = 27,
++ IPU_IRQ_DC_SYNC_EOF = 28,
++ IPU_IRQ_FG_ASYNC_EOF = 29,
++ IPU_IRQ_FG_ALPHA_SYNC_EOF = 31,
++
++ IPU_IRQ_FG_ALPHA_ASYNC_EOF = 33,
++ IPU_IRQ_DC_READ_EOF = 40,
++ IPU_IRQ_DC_ASYNC_EOF = 41,
++ IPU_IRQ_DC_CMD1_EOF = 42,
++ IPU_IRQ_DC_CMD2_EOF = 43,
++ IPU_IRQ_DC_MASK_EOF = 44,
++ IPU_IRQ_PRP_ENC_ROT_IN_EOF = 45,
++ IPU_IRQ_PRP_VF_ROT_IN_EOF = 46,
++ IPU_IRQ_PP_ROT_IN_EOF = 47,
++ IPU_IRQ_PRP_ENC_ROT_OUT_EOF = 48,
++ IPU_IRQ_PRP_VF_ROT_OUT_EOF = 49,
++ IPU_IRQ_PP_ROT_OUT_EOF = 50,
++ IPU_IRQ_BG_ALPHA_SYNC_EOF = 51,
++ IPU_IRQ_BG_ALPHA_ASYNC_EOF = 52,
++
++ IPU_IRQ_BG_SYNC_NFACK = 64 + 23,
++ IPU_IRQ_FG_SYNC_NFACK = 64 + 27,
++ IPU_IRQ_DC_SYNC_NFACK = 64 + 28,
++
++ IPU_IRQ_DP_SF_START = 448 + 2,
++ IPU_IRQ_DP_SF_END = 448 + 3,
++ IPU_IRQ_BG_SF_END = IPU_IRQ_DP_SF_END,
++ IPU_IRQ_DC_FC_0 = 448 + 8,
++ IPU_IRQ_DC_FC_1 = 448 + 9,
++ IPU_IRQ_DC_FC_2 = 448 + 10,
++ IPU_IRQ_DC_FC_3 = 448 + 11,
++ IPU_IRQ_DC_FC_4 = 448 + 12,
++ IPU_IRQ_DC_FC_6 = 448 + 13,
++ IPU_IRQ_VSYNC_PRE_0 = 448 + 14,
++ IPU_IRQ_VSYNC_PRE_1 = 448 + 15,
++
++ IPU_IRQ_COUNT
++};
++
++/*!
++ * Bitfield of Display Interface signal polarities.
++ */
++typedef struct {
++ unsigned datamask_en:1;
++ unsigned int_clk:1;
++ unsigned interlaced:1;
++ unsigned odd_field_first:1;
++ unsigned clksel_en:1;
++ unsigned clkidle_en:1;
++ unsigned data_pol:1; /* true = inverted */
++ unsigned clk_pol:1; /* true = rising edge */
++ unsigned enable_pol:1;
++ unsigned Hsync_pol:1; /* true = active high */
++ unsigned Vsync_pol:1;
++} ipu_di_signal_cfg_t;
++
++/*!
++ * Bitfield of CSI signal polarities and modes.
++ */
++
++typedef struct {
++ unsigned data_width:4;
++ unsigned clk_mode:3;
++ unsigned ext_vsync:1;
++ unsigned Vsync_pol:1;
++ unsigned Hsync_pol:1;
++ unsigned pixclk_pol:1;
++ unsigned data_pol:1;
++ unsigned sens_clksrc:1;
++ unsigned pack_tight:1;
++ unsigned force_eof:1;
++ unsigned data_en_pol:1;
++ unsigned data_fmt;
++ unsigned csi;
++ unsigned mclk;
++} ipu_csi_signal_cfg_t;
++
++/*!
++ * Enumeration of CSI data bus widths.
++ */
++enum {
++ IPU_CSI_DATA_WIDTH_4 = 0,
++ IPU_CSI_DATA_WIDTH_8 = 1,
++ IPU_CSI_DATA_WIDTH_10 = 3,
++ IPU_CSI_DATA_WIDTH_16 = 9,
++};
++
++/*!
++ * Enumeration of CSI clock modes.
++ */
++enum {
++ IPU_CSI_CLK_MODE_GATED_CLK,
++ IPU_CSI_CLK_MODE_NONGATED_CLK,
++ IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE,
++ IPU_CSI_CLK_MODE_CCIR656_INTERLACED,
++ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR,
++ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR,
++ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR,
++ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR,
++};
++
++enum {
++ IPU_CSI_MIPI_DI0,
++ IPU_CSI_MIPI_DI1,
++ IPU_CSI_MIPI_DI2,
++ IPU_CSI_MIPI_DI3,
++};
++
++typedef enum {
++ RGB,
++ YCbCr,
++ YUV
++} ipu_color_space_t;
++
++/*!
++ * Enumeration of ADC vertical sync mode.
++ */
++typedef enum {
++ VsyncNone,
++ VsyncInternal,
++ VsyncCSI,
++ VsyncExternal
++} vsync_t;
++
++typedef enum {
++ DAT,
++ CMD
++} cmddata_t;
++
++/*!
++ * Enumeration of ADC display update mode.
++ */
++typedef enum {
++ IPU_ADC_REFRESH_NONE,
++ IPU_ADC_AUTO_REFRESH,
++ IPU_ADC_AUTO_REFRESH_SNOOP,
++ IPU_ADC_SNOOPING,
++} ipu_adc_update_mode_t;
++
++/*!
++ * Enumeration of ADC display interface types (serial or parallel).
++ */
++enum {
++ IPU_ADC_IFC_MODE_SYS80_TYPE1,
++ IPU_ADC_IFC_MODE_SYS80_TYPE2,
++ IPU_ADC_IFC_MODE_SYS68K_TYPE1,
++ IPU_ADC_IFC_MODE_SYS68K_TYPE2,
++ IPU_ADC_IFC_MODE_3WIRE_SERIAL,
++ IPU_ADC_IFC_MODE_4WIRE_SERIAL,
++ IPU_ADC_IFC_MODE_5WIRE_SERIAL_CLK,
++ IPU_ADC_IFC_MODE_5WIRE_SERIAL_CS,
++};
++
++enum {
++ IPU_ADC_IFC_WIDTH_8,
++ IPU_ADC_IFC_WIDTH_16,
++};
++
++/*!
++ * Enumeration of ADC display interface burst mode.
++ */
++enum {
++ IPU_ADC_BURST_WCS,
++ IPU_ADC_BURST_WBLCK,
++ IPU_ADC_BURST_NONE,
++ IPU_ADC_BURST_SERIAL,
++};
++
++/*!
++ * Enumeration of ADC display interface RW signal timing modes.
++ */
++enum {
++ IPU_ADC_SER_NO_RW,
++ IPU_ADC_SER_RW_BEFORE_RS,
++ IPU_ADC_SER_RW_AFTER_RS,
++};
++
++/*!
++ * Bitfield of ADC signal polarities and modes.
++ */
++typedef struct {
++ unsigned data_pol:1;
++ unsigned clk_pol:1;
++ unsigned cs_pol:1;
++ unsigned rs_pol:1;
++ unsigned addr_pol:1;
++ unsigned read_pol:1;
++ unsigned write_pol:1;
++ unsigned Vsync_pol:1;
++ unsigned burst_pol:1;
++ unsigned burst_mode:2;
++ unsigned ifc_mode:3;
++ unsigned ifc_width:5;
++ unsigned ser_preamble_len:4;
++ unsigned ser_preamble:8;
++ unsigned ser_rw_mode:2;
++} ipu_adc_sig_cfg_t;
++
++/*!
++ * Enumeration of ADC template commands.
++ */
++enum {
++ RD_DATA,
++ RD_ACK,
++ RD_WAIT,
++ WR_XADDR,
++ WR_YADDR,
++ WR_ADDR,
++ WR_CMND,
++ WR_DATA,
++};
++
++/*!
++ * Enumeration of ADC template command flow control.
++ */
++enum {
++ SINGLE_STEP,
++ PAUSE,
++ STOP,
++};
++
++
++/*Define template constants*/
++#define ATM_ADDR_RANGE 0x20 /*offset address of DISP */
++#define TEMPLATE_BUF_SIZE 0x20 /*size of template */
++
++/*!
++ * Define to create ADC template command entry.
++ */
++#define ipu_adc_template_gen(oc, rs, fc, dat) (((rs) << 29) | ((fc) << 27) | \
++ ((oc) << 24) | (dat))
++
++typedef struct {
++ u32 reg;
++ u32 value;
++} ipu_lpmc_reg_t;
++
++#define IPU_LPMC_REG_READ 0x80000000L
++
++#define CSI_MCLK_VF 1
++#define CSI_MCLK_ENC 2
++#define CSI_MCLK_RAW 4
++#define CSI_MCLK_I2C 8
++
++struct ipu_soc;
++/* Common IPU API */
++struct ipu_soc *ipu_get_soc(int id);
++int32_t ipu_init_channel(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params);
++void ipu_uninit_channel(struct ipu_soc *ipu, ipu_channel_t channel);
++void ipu_disable_hsp_clk(struct ipu_soc *ipu);
++
++static inline bool ipu_can_rotate_in_place(ipu_rotate_mode_t rot)
++{
++#ifdef CONFIG_MXC_IPU_V3D
++ return (rot < IPU_ROTATE_HORIZ_FLIP);
++#else
++ return (rot < IPU_ROTATE_90_RIGHT);
++#endif
++}
++
++int32_t ipu_init_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t pixel_fmt,
++ uint16_t width, uint16_t height,
++ uint32_t stride,
++ ipu_rotate_mode_t rot_mode,
++ dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
++ dma_addr_t phyaddr_2,
++ uint32_t u_offset, uint32_t v_offset);
++
++int32_t ipu_update_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum, dma_addr_t phyaddr);
++
++int32_t ipu_update_channel_offset(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t pixel_fmt,
++ uint16_t width, uint16_t height,
++ uint32_t stride,
++ uint32_t u, uint32_t v,
++ uint32_t vertical_offset, uint32_t horizontal_offset);
++
++int32_t ipu_select_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t bufNum);
++int32_t ipu_select_multi_vdi_buffer(struct ipu_soc *ipu, uint32_t bufNum);
++
++int32_t ipu_link_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch);
++int32_t ipu_unlink_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch);
++
++int32_t ipu_is_channel_busy(struct ipu_soc *ipu, ipu_channel_t channel);
++int32_t ipu_check_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum);
++void ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum);
++uint32_t ipu_get_cur_buffer_idx(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type);
++int32_t ipu_enable_channel(struct ipu_soc *ipu, ipu_channel_t channel);
++int32_t ipu_disable_channel(struct ipu_soc *ipu, ipu_channel_t channel, bool wait_for_stop);
++int32_t ipu_swap_channel(struct ipu_soc *ipu, ipu_channel_t from_ch, ipu_channel_t to_ch);
++uint32_t ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel);
++
++int32_t ipu_enable_csi(struct ipu_soc *ipu, uint32_t csi);
++int32_t ipu_disable_csi(struct ipu_soc *ipu, uint32_t csi);
++
++int ipu_lowpwr_display_enable(void);
++int ipu_lowpwr_display_disable(void);
++
++int ipu_enable_irq(struct ipu_soc *ipu, uint32_t irq);
++void ipu_disable_irq(struct ipu_soc *ipu, uint32_t irq);
++void ipu_clear_irq(struct ipu_soc *ipu, uint32_t irq);
++int ipu_request_irq(struct ipu_soc *ipu, uint32_t irq,
++ irqreturn_t(*handler) (int, void *),
++ uint32_t irq_flags, const char *devname, void *dev_id);
++void ipu_free_irq(struct ipu_soc *ipu, uint32_t irq, void *dev_id);
++bool ipu_get_irq_status(struct ipu_soc *ipu, uint32_t irq);
++void ipu_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3]);
++int32_t ipu_set_channel_bandmode(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t band_height);
++
++/* two stripe calculations */
++struct stripe_param{
++ unsigned int input_width; /* width of the input stripe */
++ unsigned int output_width; /* width of the output stripe */
++ unsigned int input_column; /* the first column on the input stripe */
++ unsigned int output_column; /* the first column on the output stripe */
++ unsigned int idr;
++ /* inverse downisizing ratio parameter; expressed as a power of 2 */
++ unsigned int irr;
++ /* inverse resizing ratio parameter; expressed as a multiple of 2^-13 */
++};
++int ipu_calc_stripes_sizes(const unsigned int input_frame_width,
++ unsigned int output_frame_width,
++ const unsigned int maximal_stripe_width,
++ const unsigned long long cirr,
++ const unsigned int equal_stripes,
++ u32 input_pixelformat,
++ u32 output_pixelformat,
++ struct stripe_param *left,
++ struct stripe_param *right);
++
++/* SDC API */
++int32_t ipu_init_sync_panel(struct ipu_soc *ipu, int disp,
++ uint32_t pixel_clk,
++ uint16_t width, uint16_t height,
++ uint32_t pixel_fmt,
++ uint16_t h_start_width, uint16_t h_sync_width,
++ uint16_t h_end_width, uint16_t v_start_width,
++ uint16_t v_sync_width, uint16_t v_end_width,
++ uint32_t v_to_h_sync, ipu_di_signal_cfg_t sig);
++
++void ipu_uninit_sync_panel(struct ipu_soc *ipu, int disp);
++
++int32_t ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel, int16_t x_pos,
++ int16_t y_pos);
++int32_t ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel, int16_t *x_pos,
++ int16_t *y_pos);
++int32_t ipu_disp_set_global_alpha(struct ipu_soc *ipu, ipu_channel_t channel, bool enable,
++ uint8_t alpha);
++int32_t ipu_disp_set_color_key(struct ipu_soc *ipu, ipu_channel_t channel, bool enable,
++ uint32_t colorKey);
++int32_t ipu_disp_set_gamma_correction(struct ipu_soc *ipu, ipu_channel_t channel, bool enable,
++ int constk[], int slopek[]);
++
++int ipu_init_async_panel(struct ipu_soc *ipu, int disp, int type, uint32_t cycle_time,
++ uint32_t pixel_fmt, ipu_adc_sig_cfg_t sig);
++void ipu_disp_direct_write(struct ipu_soc *ipu, ipu_channel_t channel, u32 value, u32 offset);
++void ipu_reset_disp_panel(struct ipu_soc *ipu);
++
++/* CMOS Sensor Interface API */
++int32_t ipu_csi_init_interface(struct ipu_soc *ipu, uint16_t width, uint16_t height,
++ uint32_t pixel_fmt, ipu_csi_signal_cfg_t sig);
++
++int32_t ipu_csi_get_sensor_protocol(struct ipu_soc *ipu, uint32_t csi);
++
++int32_t ipu_csi_enable_mclk(struct ipu_soc *ipu, int src, bool flag, bool wait);
++
++static inline int32_t ipu_csi_enable_mclk_if(struct ipu_soc *ipu, int src, uint32_t csi,
++ bool flag, bool wait)
++{
++ return ipu_csi_enable_mclk(ipu, csi, flag, wait);
++}
++
++int ipu_csi_read_mclk_flag(void);
++
++void ipu_csi_flash_strobe(bool flag);
++
++void ipu_csi_get_window_size(struct ipu_soc *ipu, uint32_t *width, uint32_t *height, uint32_t csi);
++
++void ipu_csi_set_window_size(struct ipu_soc *ipu, uint32_t width, uint32_t height, uint32_t csi);
++
++void ipu_csi_set_window_pos(struct ipu_soc *ipu, uint32_t left, uint32_t top, uint32_t csi);
++
++uint32_t bytes_per_pixel(uint32_t fmt);
++
++bool ipu_ch_param_bad_alpha_pos(uint32_t fmt);
++
++struct ipuv3_fb_platform_data {
++ char disp_dev[32];
++ u32 interface_pix_fmt;
++ char *mode_str;
++ int default_bpp;
++ bool int_clk;
++
++ /* reserved mem */
++ resource_size_t res_base[2];
++ resource_size_t res_size[2];
++
++ /*
++ * Late init to avoid display channel being
++ * re-initialized as we've probably setup the
++ * channel in bootloader.
++ */
++ bool late_init;
++};
++
++#endif /* __LINUX_IPU_V3_H_ */
+diff -Nur linux-3.14.40.orig/include/linux/isl29023.h linux-3.14.40/include/linux/isl29023.h
+--- linux-3.14.40.orig/include/linux/isl29023.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/isl29023.h 2015-05-01 14:58:06.003427001 -0500
+@@ -0,0 +1,47 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __ISL29023_H__
++#define __ISL29023_H__
++
++#include <linux/types.h>
++
++#define ISL29023_PD_MODE 0x0
++#define ISL29023_ALS_ONCE_MODE 0x1
++#define ISL29023_IR_ONCE_MODE 0x2
++#define ISL29023_ALS_CONT_MODE 0x5
++#define ISL29023_IR_CONT_MODE 0x6
++
++#define ISL29023_INT_PERSISTS_1 0x0
++#define ISL29023_INT_PERSISTS_4 0x1
++#define ISL29023_INT_PERSISTS_8 0x2
++#define ISL29023_INT_PERSISTS_16 0x3
++
++#define ISL29023_RES_16 0x0
++#define ISL29023_RES_12 0x1
++#define ISL29023_RES_8 0x2
++#define ISL29023_RES_4 0x3
++
++#define ISL29023_RANGE_1K 0x0
++#define ISL29023_RANGE_4K 0x1
++#define ISL29023_RANGE_16K 0x2
++#define ISL29023_RANGE_64K 0x3
++
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/kfifo.h linux-3.14.40/include/linux/kfifo.h
+--- linux-3.14.40.orig/include/linux/kfifo.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/kfifo.h 2015-05-01 14:58:06.003427001 -0500
+@@ -722,7 +722,7 @@
+ /**
+ * kfifo_dma_out_finish - finish a DMA OUT operation
+ * @fifo: address of the fifo to be used
+- * @len: number of bytes transferd
++ * @len: number of bytes transferrd
+ *
+ * This macro finish a DMA OUT operation. The out counter will be updated by
+ * the len parameter. No error checking will be done.
+diff -Nur linux-3.14.40.orig/include/linux/mailbox_client.h linux-3.14.40/include/linux/mailbox_client.h
+--- linux-3.14.40.orig/include/linux/mailbox_client.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/mailbox_client.h 2015-05-01 14:58:06.003427001 -0500
+@@ -0,0 +1,46 @@
++/*
++ * Copyright (C) 2014 Linaro Ltd.
++ * Author: Jassi Brar <jassisinghbrar@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __MAILBOX_CLIENT_H
++#define __MAILBOX_CLIENT_H
++
++#include <linux/of.h>
++
++struct mbox_chan;
++
++/**
++ * struct mbox_client - User of a mailbox
++ * @dev: The client device
++ * @chan_name: The "controller:channel" this client wants
++ * @rx_callback: Atomic callback to provide client the data received
++ * @tx_done: Atomic callback to tell client of data transmission
++ * @tx_block: If the mbox_send_message should block until data is
++ * transmitted.
++ * @tx_tout: Max block period in ms before TX is assumed failure
++ * @knows_txdone: if the client could run the TX state machine. Usually
++ * if the client receives some ACK packet for transmission.
++ * Unused if the controller already has TX_Done/RTR IRQ.
++ */
++struct mbox_client {
++ struct device *dev;
++ const char *chan_name;
++ void (*rx_callback)(struct mbox_client *cl, void *mssg);
++ void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
++ bool tx_block;
++ unsigned long tx_tout;
++ bool knows_txdone;
++};
++
++struct mbox_chan *mbox_request_channel(struct mbox_client *cl);
++int mbox_send_message(struct mbox_chan *chan, void *mssg);
++void mbox_client_txdone(struct mbox_chan *chan, int r);
++bool mbox_client_peek_data(struct mbox_chan *chan);
++void mbox_free_channel(struct mbox_chan *chan);
++
++#endif /* __MAILBOX_CLIENT_H */
+diff -Nur linux-3.14.40.orig/include/linux/mailbox_controller.h linux-3.14.40/include/linux/mailbox_controller.h
+--- linux-3.14.40.orig/include/linux/mailbox_controller.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/mailbox_controller.h 2015-05-01 14:58:06.003427001 -0500
+@@ -0,0 +1,121 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __MAILBOX_CONTROLLER_H
++#define __MAILBOX_CONTROLLER_H
++
++#include <linux/of.h>
++
++struct mbox_chan;
++
++/**
++ * struct mbox_chan_ops - s/w representation of a communication chan
++ * @send_data: The API asks the MBOX controller driver, in atomic
++ * context try to transmit a message on the bus. Returns 0 if
++ * data is accepted for transmission, -EBUSY while rejecting
++ * if the remote hasn't yet read the last data sent. Actual
++ * transmission of data is reported by the controller via
++ * mbox_chan_txdone (if it has some TX ACK irq). It must not
++ * block.
++ * @startup: Called when a client requests the chan. The controller
++ * could ask clients for additional parameters of communication
++ * to be provided via client's chan_data. This call may
++ * block. After this call the Controller must forward any
++ * data received on the chan by calling mbox_chan_received_data.
++ * @shutdown: Called when a client relinquishes control of a chan.
++ * This call may block too. The controller must not forwared
++ * any received data anymore.
++ * @last_tx_done: If the controller sets 'txdone_poll', the API calls
++ * this to poll status of last TX. The controller must
++ * give priority to IRQ method over polling and never
++ * set both txdone_poll and txdone_irq. Only in polling
++ * mode 'send_data' is expected to return -EBUSY.
++ * Used only if txdone_poll:=true && txdone_irq:=false
++ * @peek_data: Atomic check for any received data. Return true if controller
++ * has some data to push to the client. False otherwise.
++ */
++struct mbox_chan_ops {
++ int (*send_data)(struct mbox_chan *chan, void *data);
++ int (*startup)(struct mbox_chan *chan);
++ void (*shutdown)(struct mbox_chan *chan);
++ bool (*last_tx_done)(struct mbox_chan *chan);
++ bool (*peek_data)(struct mbox_chan *chan);
++};
++
++/**
++ * struct mbox_controller - Controller of a class of communication chans
++ * @dev: Device backing this controller
++ * @controller_name: Literal name of the controller.
++ * @ops: Operators that work on each communication chan
++ * @chans: Null terminated array of chans.
++ * @txdone_irq: Indicates if the controller can report to API when
++ * the last transmitted data was read by the remote.
++ * Eg, if it has some TX ACK irq.
++ * @txdone_poll: If the controller can read but not report the TX
++ * done. Ex, some register shows the TX status but
++ * no interrupt rises. Ignored if 'txdone_irq' is set.
++ * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
++ * last TX's status after these many millisecs
++ */
++struct mbox_controller {
++ struct device *dev;
++ struct mbox_chan_ops *ops;
++ struct mbox_chan *chans;
++ int num_chans;
++ bool txdone_irq;
++ bool txdone_poll;
++ unsigned txpoll_period;
++ struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
++ const struct of_phandle_args *sp);
++ /*
++ * If the controller supports only TXDONE_BY_POLL,
++ * this timer polls all the links for txdone.
++ */
++ struct timer_list poll;
++ unsigned period;
++ /* Hook to add to the global controller list */
++ struct list_head node;
++};
++
++/*
++ * The length of circular buffer for queuing messages from a client.
++ * 'msg_count' tracks the number of buffered messages while 'msg_free'
++ * is the index where the next message would be buffered.
++ * We shouldn't need it too big because every transferr is interrupt
++ * triggered and if we have lots of data to transfer, the interrupt
++ * latencies are going to be the bottleneck, not the buffer length.
++ * Besides, mbox_send_message could be called from atomic context and
++ * the client could also queue another message from the notifier 'tx_done'
++ * of the last transfer done.
++ * REVIST: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN"
++ * print, it needs to be taken from config option or somesuch.
++ */
++#define MBOX_TX_QUEUE_LEN 20
++
++struct mbox_chan {
++ struct mbox_controller *mbox; /* Parent Controller */
++ unsigned txdone_method;
++
++ /* client */
++ struct mbox_client *cl;
++ struct completion tx_complete;
++
++ void *active_req;
++ unsigned msg_count, msg_free;
++ void *msg_data[MBOX_TX_QUEUE_LEN];
++ /* Access to the channel */
++ spinlock_t lock;
++
++ /* Private data for controller */
++ void *con_priv;
++};
++
++int mbox_controller_register(struct mbox_controller *mbox);
++void mbox_chan_received_data(struct mbox_chan *chan, void *data);
++void mbox_chan_txdone(struct mbox_chan *chan, int r);
++void mbox_controller_unregister(struct mbox_controller *mbox);
++
++#endif /* __MAILBOX_CONTROLLER_H */
+diff -Nur linux-3.14.40.orig/include/linux/mailbox.h linux-3.14.40/include/linux/mailbox.h
+--- linux-3.14.40.orig/include/linux/mailbox.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/mailbox.h 1969-12-31 18:00:00.000000000 -0600
+@@ -1,17 +0,0 @@
+-/*
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program. If not, see <http://www.gnu.org/licenses/>.
+- */
+-
+-int pl320_ipc_transmit(u32 *data);
+-int pl320_ipc_register_notifier(struct notifier_block *nb);
+-int pl320_ipc_unregister_notifier(struct notifier_block *nb);
+diff -Nur linux-3.14.40.orig/include/linux/memblock.h linux-3.14.40/include/linux/memblock.h
+--- linux-3.14.40.orig/include/linux/memblock.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/memblock.h 2015-05-01 14:58:06.007427001 -0500
+@@ -221,6 +221,8 @@
+ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
+ #define MEMBLOCK_ALLOC_ACCESSIBLE 0
+
++phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
++ phys_addr_t start, phys_addr_t end);
+ phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
+ phys_addr_t max_addr);
+ phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
+diff -Nur linux-3.14.40.orig/include/linux/mfd/abx500/ab8500.h linux-3.14.40/include/linux/mfd/abx500/ab8500.h
+--- linux-3.14.40.orig/include/linux/mfd/abx500/ab8500.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/mfd/abx500/ab8500.h 2015-05-01 14:58:06.015427001 -0500
+@@ -347,7 +347,6 @@
+ struct mutex lock;
+ struct mutex irq_lock;
+ atomic_t transfer_ongoing;
+- int irq_base;
+ int irq;
+ struct irq_domain *domain;
+ enum ab8500_version version;
+@@ -378,7 +377,6 @@
+ * @regulator: machine-specific constraints for regulators
+ */
+ struct ab8500_platform_data {
+- int irq_base;
+ void (*init) (struct ab8500 *);
+ struct ab8500_regulator_platform_data *regulator;
+ struct ab8500_codec_platform_data *codec;
+diff -Nur linux-3.14.40.orig/include/linux/mfd/dbx500-prcmu.h linux-3.14.40/include/linux/mfd/dbx500-prcmu.h
+--- linux-3.14.40.orig/include/linux/mfd/dbx500-prcmu.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/mfd/dbx500-prcmu.h 2015-05-01 14:58:06.015427001 -0500
+@@ -183,8 +183,6 @@
+ bool enable_set_ddr_opp;
+ bool enable_ape_opp_100_voltage;
+ struct ab8500_platform_data *ab_platdata;
+- int ab_irq;
+- int irq_base;
+ u32 version_offset;
+ u32 legacy_offset;
+ u32 adt_offset;
+diff -Nur linux-3.14.40.orig/include/linux/mfd/mxc-hdmi-core.h linux-3.14.40/include/linux/mfd/mxc-hdmi-core.h
+--- linux-3.14.40.orig/include/linux/mfd/mxc-hdmi-core.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/mfd/mxc-hdmi-core.h 2015-05-01 14:58:06.015427001 -0500
+@@ -0,0 +1,68 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef __LINUX_MXC_HDMI_CORE_H_
++#define __LINUX_MXC_HDMI_CORE_H_
++
++#include <video/mxc_edid.h>
++
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/soc.h>
++
++#define IRQ_DISABLE_SUCCEED 0
++#define IRQ_DISABLE_FAIL 1
++
++bool hdmi_check_overflow(void);
++
++u8 hdmi_readb(unsigned int reg);
++void hdmi_writeb(u8 value, unsigned int reg);
++void hdmi_mask_writeb(u8 data, unsigned int addr, u8 shift, u8 mask);
++unsigned int hdmi_read4(unsigned int reg);
++void hdmi_write4(unsigned int value, unsigned int reg);
++
++void hdmi_irq_init(void);
++void hdmi_irq_enable(int irq);
++unsigned int hdmi_irq_disable(int irq);
++
++void hdmi_set_sample_rate(unsigned int rate);
++void hdmi_set_dma_mode(unsigned int dma_running);
++void hdmi_init_clk_regenerator(void);
++void hdmi_clk_regenerator_update_pixel_clock(u32 pixclock);
++
++void hdmi_set_edid_cfg(int edid_status, struct mxc_edid_cfg *cfg);
++int hdmi_get_edid_cfg(struct mxc_edid_cfg *cfg);
++
++extern int mxc_hdmi_ipu_id;
++extern int mxc_hdmi_disp_id;
++
++void hdmi_set_registered(int registered);
++int hdmi_get_registered(void);
++int mxc_hdmi_abort_stream(void);
++int mxc_hdmi_register_audio(struct snd_pcm_substream *substream);
++void mxc_hdmi_unregister_audio(struct snd_pcm_substream *substream);
++void hdmi_set_dvi_mode(unsigned int state);
++unsigned int hdmi_set_cable_state(unsigned int state);
++unsigned int hdmi_set_blank_state(unsigned int state);
++int check_hdmi_state(void);
++
++void hdmi_cec_start_device(void);
++void hdmi_cec_stop_device(void);
++
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h linux-3.14.40/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+--- linux-3.14.40.orig/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h 2015-05-01 14:58:06.031427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -122,7 +122,9 @@
+ #define IMX6Q_GPR1_USB_OTG_ID_SEL_MASK BIT(13)
+ #define IMX6Q_GPR1_USB_OTG_ID_SEL_ENET_RX_ER 0x0
+ #define IMX6Q_GPR1_USB_OTG_ID_SEL_GPIO_1 BIT(13)
+-#define IMX6Q_GPR1_GINT BIT(12)
++#define IMX6Q_GPR1_GINT_MASK BIT(12)
++#define IMX6Q_GPR1_GINT_CLEAR 0x0
++#define IMX6Q_GPR1_GINT_ASSERT BIT(12)
+ #define IMX6Q_GPR1_ADDRS3_MASK (0x3 << 10)
+ #define IMX6Q_GPR1_ADDRS3_32MB (0x0 << 10)
+ #define IMX6Q_GPR1_ADDRS3_64MB (0x1 << 10)
+diff -Nur linux-3.14.40.orig/include/linux/mipi_csi2.h linux-3.14.40/include/linux/mipi_csi2.h
+--- linux-3.14.40.orig/include/linux/mipi_csi2.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/mipi_csi2.h 2015-05-01 14:58:06.035427001 -0500
+@@ -0,0 +1,93 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __INCLUDE_MIPI_CSI2_H
++#define __INCLUDE_MIPI_CSI2_H
++
++/* MIPI CSI2 registers */
++#define MIPI_CSI2_REG(offset) (offset)
++
++#define MIPI_CSI2_VERSION MIPI_CSI2_REG(0x000)
++#define MIPI_CSI2_N_LANES MIPI_CSI2_REG(0x004)
++#define MIPI_CSI2_PHY_SHUTDOWNZ MIPI_CSI2_REG(0x008)
++#define MIPI_CSI2_DPHY_RSTZ MIPI_CSI2_REG(0x00c)
++#define MIPI_CSI2_CSI2_RESETN MIPI_CSI2_REG(0x010)
++#define MIPI_CSI2_PHY_STATE MIPI_CSI2_REG(0x014)
++#define MIPI_CSI2_DATA_IDS_1 MIPI_CSI2_REG(0x018)
++#define MIPI_CSI2_DATA_IDS_2 MIPI_CSI2_REG(0x01c)
++#define MIPI_CSI2_ERR1 MIPI_CSI2_REG(0x020)
++#define MIPI_CSI2_ERR2 MIPI_CSI2_REG(0x024)
++#define MIPI_CSI2_MASK1 MIPI_CSI2_REG(0x028)
++#define MIPI_CSI2_MASK2 MIPI_CSI2_REG(0x02c)
++#define MIPI_CSI2_PHY_TST_CTRL0 MIPI_CSI2_REG(0x030)
++#define MIPI_CSI2_PHY_TST_CTRL1 MIPI_CSI2_REG(0x034)
++#define MIPI_CSI2_SFT_RESET MIPI_CSI2_REG(0xf00)
++
++/* mipi data type */
++#define MIPI_DT_YUV420 0x18 /* YYY.../UYVY.... */
++#define MIPI_DT_YUV420_LEGACY 0x1a /* UYY.../VYY... */
++#define MIPI_DT_YUV422 0x1e /* UYVY... */
++#define MIPI_DT_RGB444 0x20
++#define MIPI_DT_RGB555 0x21
++#define MIPI_DT_RGB565 0x22
++#define MIPI_DT_RGB666 0x23
++#define MIPI_DT_RGB888 0x24
++#define MIPI_DT_RAW6 0x28
++#define MIPI_DT_RAW7 0x29
++#define MIPI_DT_RAW8 0x2a
++#define MIPI_DT_RAW10 0x2b
++#define MIPI_DT_RAW12 0x2c
++#define MIPI_DT_RAW14 0x2d
++
++
++struct mipi_csi2_info;
++/* mipi csi2 API */
++struct mipi_csi2_info *mipi_csi2_get_info(void);
++
++bool mipi_csi2_enable(struct mipi_csi2_info *info);
++
++bool mipi_csi2_disable(struct mipi_csi2_info *info);
++
++bool mipi_csi2_get_status(struct mipi_csi2_info *info);
++
++int mipi_csi2_get_bind_ipu(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_get_bind_csi(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_get_virtual_channel(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_set_lanes(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_set_datatype(struct mipi_csi2_info *info,
++ unsigned int datatype);
++
++unsigned int mipi_csi2_get_datatype(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_dphy_status(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_get_error1(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_get_error2(struct mipi_csi2_info *info);
++
++int mipi_csi2_pixelclk_enable(struct mipi_csi2_info *info);
++
++void mipi_csi2_pixelclk_disable(struct mipi_csi2_info *info);
++
++int mipi_csi2_reset(struct mipi_csi2_info *info);
++
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/mipi_dsi.h linux-3.14.40/include/linux/mipi_dsi.h
+--- linux-3.14.40.orig/include/linux/mipi_dsi.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/mipi_dsi.h 2015-05-01 14:58:06.035427001 -0500
+@@ -0,0 +1,171 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __INCLUDE_MIPI_DSI_H
++#define __INCLUDE_MIPI_DSI_H
++
++#define MIPI_DSI_VERSION (0x000)
++#define MIPI_DSI_PWR_UP (0x004)
++#define MIPI_DSI_CLKMGR_CFG (0x008)
++#define MIPI_DSI_DPI_CFG (0x00c)
++#define MIPI_DSI_DBI_CFG (0x010)
++#define MIPI_DSI_DBIS_CMDSIZE (0x014)
++#define MIPI_DSI_PCKHDL_CFG (0x018)
++#define MIPI_DSI_VID_MODE_CFG (0x01c)
++#define MIPI_DSI_VID_PKT_CFG (0x020)
++#define MIPI_DSI_CMD_MODE_CFG (0x024)
++#define MIPI_DSI_TMR_LINE_CFG (0x028)
++#define MIPI_DSI_VTIMING_CFG (0x02c)
++#define MIPI_DSI_PHY_TMR_CFG (0x030)
++#define MIPI_DSI_GEN_HDR (0x034)
++#define MIPI_DSI_GEN_PLD_DATA (0x038)
++#define MIPI_DSI_CMD_PKT_STATUS (0x03c)
++#define MIPI_DSI_TO_CNT_CFG (0x040)
++#define MIPI_DSI_ERROR_ST0 (0x044)
++#define MIPI_DSI_ERROR_ST1 (0x048)
++#define MIPI_DSI_ERROR_MSK0 (0x04c)
++#define MIPI_DSI_ERROR_MSK1 (0x050)
++#define MIPI_DSI_PHY_RSTZ (0x054)
++#define MIPI_DSI_PHY_IF_CFG (0x058)
++#define MIPI_DSI_PHY_IF_CTRL (0x05c)
++#define MIPI_DSI_PHY_STATUS (0x060)
++#define MIPI_DSI_PHY_TST_CTRL0 (0x064)
++#define MIPI_DSI_PHY_TST_CTRL1 (0x068)
++
++#define DSI_PWRUP_RESET (0x0 << 0)
++#define DSI_PWRUP_POWERUP (0x1 << 0)
++
++#define DSI_DPI_CFG_VID_SHIFT (0)
++#define DSI_DPI_CFG_VID_MASK (0x3)
++#define DSI_DPI_CFG_COLORCODE_SHIFT (2)
++#define DSI_DPI_CFG_COLORCODE_MASK (0x7)
++#define DSI_DPI_CFG_DATAEN_ACT_LOW (0x1 << 5)
++#define DSI_DPI_CFG_DATAEN_ACT_HIGH (0x0 << 5)
++#define DSI_DPI_CFG_VSYNC_ACT_LOW (0x1 << 6)
++#define DSI_DPI_CFG_VSYNC_ACT_HIGH (0x0 << 6)
++#define DSI_DPI_CFG_HSYNC_ACT_LOW (0x1 << 7)
++#define DSI_DPI_CFG_HSYNC_ACT_HIGH (0x0 << 7)
++#define DSI_DPI_CFG_SHUTD_ACT_LOW (0x1 << 8)
++#define DSI_DPI_CFG_SHUTD_ACT_HIGH (0x0 << 8)
++#define DSI_DPI_CFG_COLORMODE_ACT_LOW (0x1 << 9)
++#define DSI_DPI_CFG_COLORMODE_ACT_HIGH (0x0 << 9)
++#define DSI_DPI_CFG_EN18LOOSELY (0x1 << 10)
++
++#define DSI_PCKHDL_CFG_EN_EOTP_TX (0x1 << 0)
++#define DSI_PCKHDL_CFG_EN_EOTP_RX (0x1 << 1)
++#define DSI_PCKHDL_CFG_EN_BTA (0x1 << 2)
++#define DSI_PCKHDL_CFG_EN_ECC_RX (0x1 << 3)
++#define DSI_PCKHDL_CFG_EN_CRC_RX (0x1 << 4)
++#define DSI_PCKHDL_CFG_GEN_VID_RX_MASK (0x3)
++#define DSI_PCKHDL_CFG_GEN_VID_RX_SHIFT (5)
++
++#define DSI_VID_MODE_CFG_EN (0x1 << 0)
++#define DSI_VID_MODE_CFG_EN_BURSTMODE (0x3 << 1)
++#define DSI_VID_MODE_CFG_TYPE_MASK (0x3)
++#define DSI_VID_MODE_CFG_TYPE_SHIFT (1)
++#define DSI_VID_MODE_CFG_EN_LP_VSA (0x1 << 3)
++#define DSI_VID_MODE_CFG_EN_LP_VBP (0x1 << 4)
++#define DSI_VID_MODE_CFG_EN_LP_VFP (0x1 << 5)
++#define DSI_VID_MODE_CFG_EN_LP_VACT (0x1 << 6)
++#define DSI_VID_MODE_CFG_EN_LP_HBP (0x1 << 7)
++#define DSI_VID_MODE_CFG_EN_LP_HFP (0x1 << 8)
++#define DSI_VID_MODE_CFG_EN_MULTI_PKT (0x1 << 9)
++#define DSI_VID_MODE_CFG_EN_NULL_PKT (0x1 << 10)
++#define DSI_VID_MODE_CFG_EN_FRAME_ACK (0x1 << 11)
++#define DSI_VID_MODE_CFG_EN_LP_MODE (DSI_VID_MODE_CFG_EN_LP_VSA | \
++ DSI_VID_MODE_CFG_EN_LP_VBP | \
++ DSI_VID_MODE_CFG_EN_LP_VFP | \
++ DSI_VID_MODE_CFG_EN_LP_HFP | \
++ DSI_VID_MODE_CFG_EN_LP_HBP | \
++ DSI_VID_MODE_CFG_EN_LP_VACT)
++
++
++
++#define DSI_VID_PKT_CFG_VID_PKT_SZ_MASK (0x7ff)
++#define DSI_VID_PKT_CFG_VID_PKT_SZ_SHIFT (0)
++#define DSI_VID_PKT_CFG_NUM_CHUNKS_MASK (0x3ff)
++#define DSI_VID_PKT_CFG_NUM_CHUNKS_SHIFT (11)
++#define DSI_VID_PKT_CFG_NULL_PKT_SZ_MASK (0x3ff)
++#define DSI_VID_PKT_CFG_NULL_PKT_SZ_SHIFT (21)
++
++#define MIPI_DSI_CMD_MODE_CFG_EN_LOWPOWER (0x1FFF)
++#define MIPI_DSI_CMD_MODE_CFG_EN_CMD_MODE (0x1 << 0)
++
++#define DSI_TME_LINE_CFG_HSA_TIME_MASK (0x1ff)
++#define DSI_TME_LINE_CFG_HSA_TIME_SHIFT (0)
++#define DSI_TME_LINE_CFG_HBP_TIME_MASK (0x1ff)
++#define DSI_TME_LINE_CFG_HBP_TIME_SHIFT (9)
++#define DSI_TME_LINE_CFG_HLINE_TIME_MASK (0x3fff)
++#define DSI_TME_LINE_CFG_HLINE_TIME_SHIFT (18)
++
++#define DSI_VTIMING_CFG_VSA_LINES_MASK (0xf)
++#define DSI_VTIMING_CFG_VSA_LINES_SHIFT (0)
++#define DSI_VTIMING_CFG_VBP_LINES_MASK (0x3f)
++#define DSI_VTIMING_CFG_VBP_LINES_SHIFT (4)
++#define DSI_VTIMING_CFG_VFP_LINES_MASK (0x3f)
++#define DSI_VTIMING_CFG_VFP_LINES_SHIFT (10)
++#define DSI_VTIMING_CFG_V_ACT_LINES_MASK (0x7ff)
++#define DSI_VTIMING_CFG_V_ACT_LINES_SHIFT (16)
++
++#define DSI_PHY_TMR_CFG_BTA_TIME_MASK (0xfff)
++#define DSI_PHY_TMR_CFG_BTA_TIME_SHIFT (0)
++#define DSI_PHY_TMR_CFG_LP2HS_TIME_MASK (0xff)
++#define DSI_PHY_TMR_CFG_LP2HS_TIME_SHIFT (12)
++#define DSI_PHY_TMR_CFG_HS2LP_TIME_MASK (0xff)
++#define DSI_PHY_TMR_CFG_HS2LP_TIME_SHIFT (20)
++
++#define DSI_PHY_IF_CFG_N_LANES_MASK (0x3)
++#define DSI_PHY_IF_CFG_N_LANES_SHIFT (0)
++#define DSI_PHY_IF_CFG_WAIT_TIME_MASK (0xff)
++#define DSI_PHY_IF_CFG_WAIT_TIME_SHIFT (2)
++
++#define DSI_PHY_RSTZ_EN_CLK (0x1 << 2)
++#define DSI_PHY_RSTZ_DISABLE_RST (0x1 << 1)
++#define DSI_PHY_RSTZ_DISABLE_SHUTDOWN (0x1 << 0)
++#define DSI_PHY_RSTZ_RST (0x0)
++
++#define DSI_PHY_STATUS_LOCK (0x1 << 0)
++#define DSI_PHY_STATUS_STOPSTATE_CLK_LANE (0x1 << 2)
++
++#define DSI_GEN_HDR_TYPE_MASK (0xff)
++#define DSI_GEN_HDR_TYPE_SHIFT (0)
++#define DSI_GEN_HDR_DATA_MASK (0xffff)
++#define DSI_GEN_HDR_DATA_SHIFT (8)
++
++#define DSI_CMD_PKT_STATUS_GEN_CMD_EMPTY (0x1 << 0)
++#define DSI_CMD_PKT_STATUS_GEN_CMD_FULL (0x1 << 1)
++#define DSI_CMD_PKT_STATUS_GEN_PLD_W_EMPTY (0x1 << 2)
++#define DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL (0x1 << 3)
++#define DSI_CMD_PKT_STATUS_GEN_PLD_R_EMPTY (0x1 << 4)
++#define DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY (0x1 << 6)
++
++#define DSI_ERROR_MSK0_ALL_MASK (0x1fffff)
++#define DSI_ERROR_MSK1_ALL_MASK (0x3ffff)
++
++#define DSI_PHY_IF_CTRL_RESET (0x0)
++#define DSI_PHY_IF_CTRL_TX_REQ_CLK_HS (0x1 << 0)
++#define DSI_PHY_IF_CTRL_TX_REQ_CLK_ULPS (0x1 << 1)
++#define DSI_PHY_IF_CTRL_TX_EXIT_CLK_ULPS (0x1 << 2)
++#define DSI_PHY_IF_CTRL_TX_REQ_DATA_ULPS (0x1 << 3)
++#define DSI_PHY_IF_CTRL_TX_EXIT_DATA_ULPS (0x1 << 4)
++#define DSI_PHY_IF_CTRL_TX_TRIG_MASK (0xF)
++#define DSI_PHY_IF_CTRL_TX_TRIG_SHIFT (5)
++
++#define DSI_PHY_CLK_INIT_COMMAND (0x44)
++#define DSI_GEN_PLD_DATA_BUF_SIZE (0x4)
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/mmc/card.h linux-3.14.40/include/linux/mmc/card.h
+--- linux-3.14.40.orig/include/linux/mmc/card.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/mmc/card.h 2015-05-01 14:58:06.039427001 -0500
+@@ -86,10 +86,13 @@
+ unsigned int data_sector_size; /* 512 bytes or 4KB */
+ unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
+ unsigned int boot_ro_lock; /* ro lock support */
++ unsigned int boot_size;
+ bool boot_ro_lockable;
+ u8 raw_exception_status; /* 54 */
+ u8 raw_partition_support; /* 160 */
+ u8 raw_rpmb_size_mult; /* 168 */
++ u8 boot_bus_width; /* 177 */
++ u8 boot_config; /* 179 */
+ u8 raw_erased_mem_count; /* 181 */
+ u8 raw_ext_csd_structure; /* 194 */
+ u8 raw_card_type; /* 196 */
+@@ -102,6 +105,7 @@
+ u8 raw_hc_erase_gap_size; /* 221 */
+ u8 raw_erase_timeout_mult; /* 223 */
+ u8 raw_hc_erase_grp_size; /* 224 */
++ u8 boot_info; /* 228 */
+ u8 raw_sec_trim_mult; /* 229 */
+ u8 raw_sec_erase_mult; /* 230 */
+ u8 raw_sec_feature_support;/* 231 */
+diff -Nur linux-3.14.40.orig/include/linux/mmc/host.h linux-3.14.40/include/linux/mmc/host.h
+--- linux-3.14.40.orig/include/linux/mmc/host.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/mmc/host.h 2015-05-01 14:58:06.047427001 -0500
+@@ -282,6 +282,7 @@
+ MMC_CAP2_PACKED_WR)
+ #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
+ #define MMC_CAP2_SANITIZE (1 << 15) /* Support Sanitize */
++#define MMC_CAP2_SDIO_NOTHREAD (1 << 16)
+
+ mmc_pm_flag_t pm_caps; /* supported pm features */
+
+@@ -297,6 +298,11 @@
+ unsigned long clkgate_delay;
+ #endif
+
++ /* card specific properties to deal with power and reset */
++ struct regulator *card_regulator; /* External VCC needed by the card */
++ struct gpio_desc *card_reset_gpios[2]; /* External resets, active low */
++ struct clk *card_clk; /* External clock needed by the card */
++
+ /* host specific block data */
+ unsigned int max_seg_size; /* see blk_queue_max_segment_size */
+ unsigned short max_segs; /* see blk_queue_max_segments */
+@@ -397,6 +403,8 @@
+ wake_up_process(host->sdio_irq_thread);
+ }
+
++void sdio_run_irqs(struct mmc_host *host);
++
+ #ifdef CONFIG_REGULATOR
+ int mmc_regulator_get_ocrmask(struct regulator *supply);
+ int mmc_regulator_set_ocr(struct mmc_host *mmc,
+diff -Nur linux-3.14.40.orig/include/linux/mmc/mmc.h linux-3.14.40/include/linux/mmc/mmc.h
+--- linux-3.14.40.orig/include/linux/mmc/mmc.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/mmc/mmc.h 2015-05-01 14:58:06.063427001 -0500
+@@ -292,6 +292,7 @@
+ #define EXT_CSD_RPMB_MULT 168 /* RO */
+ #define EXT_CSD_BOOT_WP 173 /* R/W */
+ #define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
++#define EXT_CSD_BOOT_BUS_WIDTH 177 /* R/W */
+ #define EXT_CSD_PART_CONFIG 179 /* R/W */
+ #define EXT_CSD_ERASED_MEM_CONT 181 /* RO */
+ #define EXT_CSD_BUS_WIDTH 183 /* R/W */
+@@ -313,6 +314,7 @@
+ #define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */
+ #define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */
+ #define EXT_CSD_BOOT_MULT 226 /* RO */
++#define EXT_CSD_BOOT_INFO 228 /* RO, 1 bytes */
+ #define EXT_CSD_SEC_TRIM_MULT 229 /* RO */
+ #define EXT_CSD_SEC_ERASE_MULT 230 /* RO */
+ #define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */
+@@ -378,6 +380,29 @@
+ #define EXT_CSD_SEC_GB_CL_EN BIT(4)
+ #define EXT_CSD_SEC_SANITIZE BIT(6) /* v4.5 only */
+
++#define EXT_CSD_BOOT_BUS_WIDTH_MASK (0x1F)
++#define EXT_CSD_BOOT_BUS_WIDTH_MODE_MASK (0x3 << 3)
++#define EXT_CSD_BOOT_BUS_WIDTH_MODE_SDR_NORMAL (0x0)
++#define EXT_CSD_BOOT_BUS_WIDTH_MODE_SDR_HIGH (0x1)
++#define EXT_CSD_BOOT_BUS_WIDTH_MODE_DDR (0x2)
++#define EXT_CSD_BOOT_BUS_WIDTH_RST_WIDTH (1 << 2)
++#define EXT_CSD_BOOT_BUS_WIDTH_WIDTH_MASK (0x3)
++#define EXT_CSD_BOOT_BUS_WIDTH_1_SDR_4_DDR (0x0)
++#define EXT_CSD_BOOT_BUS_WIDTH_4_SDR_4_DDR (0x1)
++#define EXT_CSD_BOOT_BUS_WIDTH_8_SDR_8_DDR (0x2)
++
++#define EXT_CSD_BOOT_ACK_ENABLE (0x1 << 6)
++#define EXT_CSD_BOOT_PARTITION_ENABLE_MASK (0x7 << 3)
++#define EXT_CSD_BOOT_PARTITION_DISABLE (0x0)
++#define EXT_CSD_BOOT_PARTITION_PART1 (0x1 << 3)
++#define EXT_CSD_BOOT_PARTITION_PART2 (0x2 << 3)
++#define EXT_CSD_BOOT_PARTITION_USER (0x7 << 3)
++
++#define EXT_CSD_BOOT_PARTITION_ACCESS_MASK (0x7)
++#define EXT_CSD_BOOT_PARTITION_ACCESS_DISABLE (0x0)
++#define EXT_CSD_BOOT_PARTITION_ACCESS_PART1 (0x1)
++#define EXT_CSD_BOOT_PARTITION_ACCESS_PART2 (0x2)
++
+ #define EXT_CSD_RST_N_EN_MASK 0x3
+ #define EXT_CSD_RST_N_ENABLED 1 /* RST_n is enabled on card */
+
+diff -Nur linux-3.14.40.orig/include/linux/mmc/sdhci.h linux-3.14.40/include/linux/mmc/sdhci.h
+--- linux-3.14.40.orig/include/linux/mmc/sdhci.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/mmc/sdhci.h 2015-05-01 14:58:06.067427001 -0500
+@@ -57,12 +57,8 @@
+ #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
+ /* Controller reports inverted write-protect state */
+ #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
+-/* Controller has nonstandard clock management */
+-#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17)
+ /* Controller does not like fast PIO transfers */
+ #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
+-/* Controller losing signal/interrupt enable states after reset */
+-#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
+ /* Controller has to be forced to use block size of 2048 bytes */
+ #define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
+ /* Controller cannot do multi-block transfers */
+@@ -100,6 +96,7 @@
+ #define SDHCI_QUIRK2_BROKEN_HOST_CONTROL (1<<5)
+ /* Controller does not support HS200 */
+ #define SDHCI_QUIRK2_BROKEN_HS200 (1<<6)
++#define SDHCI_QUIRK2_NOSTD_TIMEOUT_COUNTER (1<<7)
+
+ int irq; /* Device IRQ */
+ void __iomem *ioaddr; /* Mapped address */
+@@ -145,6 +142,7 @@
+
+ bool runtime_suspended; /* Host is runtime suspended */
+ bool bus_on; /* Bus power prevents runtime suspend */
++ bool preset_enabled; /* Preset is enabled */
+
+ struct mmc_request *mrq; /* Current request */
+ struct mmc_command *cmd; /* Current command */
+@@ -162,8 +160,7 @@
+ dma_addr_t adma_addr; /* Mapped ADMA descr. table */
+ dma_addr_t align_addr; /* Mapped bounce buffer */
+
+- struct tasklet_struct card_tasklet; /* Tasklet structures */
+- struct tasklet_struct finish_tasklet;
++ struct tasklet_struct finish_tasklet; /* Tasklet structures */
+
+ struct timer_list timer; /* Timer for timeouts */
+
+@@ -175,6 +172,13 @@
+ unsigned int ocr_avail_mmc;
+ u32 ocr_mask; /* available voltages */
+
++ unsigned timing; /* Current timing */
++
++ u32 thread_isr;
++
++ /* cached registers */
++ u32 ier;
++
+ wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
+ unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
+
+diff -Nur linux-3.14.40.orig/include/linux/mmc/sdio_ids.h linux-3.14.40/include/linux/mmc/sdio_ids.h
+--- linux-3.14.40.orig/include/linux/mmc/sdio_ids.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/mmc/sdio_ids.h 2015-05-01 14:58:06.067427001 -0500
+@@ -31,6 +31,7 @@
+ #define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+ #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
+ #define SDIO_DEVICE_ID_BROADCOM_43362 43362
++#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
+
+ #define SDIO_VENDOR_ID_INTEL 0x0089
+ #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
+diff -Nur linux-3.14.40.orig/include/linux/mod_devicetable.h linux-3.14.40/include/linux/mod_devicetable.h
+--- linux-3.14.40.orig/include/linux/mod_devicetable.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/mod_devicetable.h 2015-05-01 14:58:06.083427001 -0500
+@@ -564,6 +564,15 @@
+ #define X86_MODEL_ANY 0
+ #define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
+
++/*
++ * Generic table type for matching CPU features.
++ * @feature: the bit number of the feature (0 - 65535)
++ */
++
++struct cpu_feature {
++ __u16 feature;
++};
++
+ #define IPACK_ANY_FORMAT 0xff
+ #define IPACK_ANY_ID (~0)
+ struct ipack_device_id {
+diff -Nur linux-3.14.40.orig/include/linux/mtd/map.h linux-3.14.40/include/linux/mtd/map.h
+--- linux-3.14.40.orig/include/linux/mtd/map.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/mtd/map.h 2015-05-01 14:58:06.091427001 -0500
+@@ -438,7 +438,7 @@
+ if (map->cached)
+ memcpy(to, (char *)map->cached + from, len);
+ else
+- memcpy_fromio(to, map->virt + from, len);
++ memcpy(to, map->virt + from, len);
+ }
+
+ static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+diff -Nur linux-3.14.40.orig/include/linux/mxc_asrc.h linux-3.14.40/include/linux/mxc_asrc.h
+--- linux-3.14.40.orig/include/linux/mxc_asrc.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/mxc_asrc.h 2015-05-01 14:58:06.091427001 -0500
+@@ -0,0 +1,386 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ *
++ * @file mxc_asrc.h
++ *
++ * @brief i.MX Asynchronous Sample Rate Converter
++ *
++ * @ingroup Audio
++ */
++
++#ifndef __MXC_ASRC_H__
++#define __MXC_ASRC_H__
++
++#include <uapi/linux/mxc_asrc.h>
++#include <linux/scatterlist.h>
++
++#define ASRC_DMA_BUFFER_NUM 2
++#define ASRC_INPUTFIFO_THRESHOLD 32
++#define ASRC_OUTPUTFIFO_THRESHOLD 32
++#define ASRC_FIFO_THRESHOLD_MIN 0
++#define ASRC_FIFO_THRESHOLD_MAX 63
++#define ASRC_DMA_BUFFER_SIZE (1024 * 48 * 4)
++#define ASRC_MAX_BUFFER_SIZE (1024 * 48)
++#define ASRC_OUTPUT_LAST_SAMPLE_DEFAULT 8
++
++
++/* Ideal Ratio mode doesn't care the outclk frequency, so be fixed */
++#define ASRC_PRESCALER_IDEAL_RATIO 5
++/* SPDIF rxclk pulse rate is 128 * samplerate, so 2 ^ 7 */
++#define ASRC_PRESCALER_SPDIF_RX 7
++/* SPDIF txclk pulse rate is 64 * samplerate, so 2 ^ 6 */
++#define ASRC_PRESCALER_SPDIF_TX 6
++/* I2S bclk is 16 * 2 = 32, so 2 ^ 5 */
++#define ASRC_PRESCALER_I2S_16BIT 5
++/* I2S bclk is 24 * 2 = 48 -> 64, so 2 ^ 6 */
++#define ASRC_PRESCALER_I2S_24BIT 6
++
++
++#define REG_ASRCTR 0x00
++#define REG_ASRIER 0x04
++#define REG_ASRCNCR 0x0C
++#define REG_ASRCFG 0x10
++#define REG_ASRCSR 0x14
++
++#define REG_ASRCDR1 0x18
++#define REG_ASRCDR2 0x1C
++#define REG_ASRCDR(x) ((x < 2) ? REG_ASRCDR1 : REG_ASRCDR2)
++
++#define REG_ASRSTR 0x20
++#define REG_ASRRA 0x24
++#define REG_ASRRB 0x28
++#define REG_ASRRC 0x2C
++#define REG_ASRPM1 0x40
++#define REG_ASRPM2 0x44
++#define REG_ASRPM3 0x48
++#define REG_ASRPM4 0x4C
++#define REG_ASRPM5 0x50
++#define REG_ASRTFR1 0x54
++#define REG_ASRCCR 0x5C
++
++#define REG_ASRDIA 0x60
++#define REG_ASRDOA 0x64
++#define REG_ASRDIB 0x68
++#define REG_ASRDOB 0x6C
++#define REG_ASRDIC 0x70
++#define REG_ASRDOC 0x74
++#define REG_ASRDI(x) (REG_ASRDIA + (x << 3))
++#define REG_ASRDO(x) (REG_ASRDOA + (x << 3))
++
++#define REG_ASRIDRHA 0x80
++#define REG_ASRIDRLA 0x84
++#define REG_ASRIDRHB 0x88
++#define REG_ASRIDRLB 0x8C
++#define REG_ASRIDRHC 0x90
++#define REG_ASRIDRLC 0x94
++#define REG_ASRIDRH(x) (REG_ASRIDRHA + (x << 3))
++#define REG_ASRIDRL(x) (REG_ASRIDRLA + (x << 3))
++
++#define REG_ASR76K 0x98
++#define REG_ASR56K 0x9C
++
++#define REG_ASRMCRA 0xA0
++#define REG_ASRFSTA 0xA4
++#define REG_ASRMCRB 0xA8
++#define REG_ASRFSTB 0xAC
++#define REG_ASRMCRC 0xB0
++#define REG_ASRFSTC 0xB4
++#define REG_ASRMCR(x) (REG_ASRMCRA + (x << 3))
++#define REG_ASRFST(x) (REG_ASRFSTA + (x << 3))
++
++#define REG_ASRMCR1A 0xC0
++#define REG_ASRMCR1B 0xC4
++#define REG_ASRMCR1C 0xC8
++#define REG_ASRMCR1(x) (REG_ASRMCR1A + (x << 2))
++
++
++/* REG0 0x00 REG_ASRCTR */
++#define ASRCTR_ATSx_SHIFT(x) (20 + x)
++#define ASRCTR_ATSx_MASK(x) (1 << ASRCTR_ATSx_SHIFT(x))
++#define ASRCTR_ATS(x) (1 << ASRCTR_ATSx_SHIFT(x))
++#define ASRCTR_USRx_SHIFT(x) (14 + (x << 1))
++#define ASRCTR_USRx_MASK(x) (1 << ASRCTR_USRx_SHIFT(x))
++#define ASRCTR_USR(x) (1 << ASRCTR_USRx_SHIFT(x))
++#define ASRCTR_IDRx_SHIFT(x) (13 + (x << 1))
++#define ASRCTR_IDRx_MASK(x) (1 << ASRCTR_IDRx_SHIFT(x))
++#define ASRCTR_IDR(x) (1 << ASRCTR_IDRx_SHIFT(x))
++#define ASRCTR_SRST_SHIFT 4
++#define ASRCTR_SRST_MASK (1 << ASRCTR_SRST_SHIFT)
++#define ASRCTR_SRST (1 << ASRCTR_SRST_SHIFT)
++#define ASRCTR_ASRCEx_SHIFT(x) (1 + x)
++#define ASRCTR_ASRCEx_MASK(x) (1 << ASRCTR_ASRCEx_SHIFT(x))
++#define ASRCTR_ASRCE(x) (1 << ASRCTR_ASRCEx_SHIFT(x))
++#define ASRCTR_ASRCEN_SHIFT 0
++#define ASRCTR_ASRCEN_MASK (1 << ASRCTR_ASRCEN_SHIFT)
++#define ASRCTR_ASRCEN (1 << ASRCTR_ASRCEN_SHIFT)
++
++/* REG1 0x04 REG_ASRIER */
++#define ASRIER_AFPWE_SHIFT 7
++#define ASRIER_AFPWE_MASK (1 << ASRIER_AFPWE_SHIFT)
++#define ASRIER_AFPWE (1 << ASRIER_AFPWE_SHIFT)
++#define ASRIER_AOLIE_SHIFT 6
++#define ASRIER_AOLIE_MASK (1 << ASRIER_AOLIE_SHIFT)
++#define ASRIER_AOLIE (1 << ASRIER_AOLIE_SHIFT)
++#define ASRIER_ADOEx_SHIFT(x) (3 + x)
++#define ASRIER_ADOEx_MASK(x) (1 << ASRIER_ADOEx_SHIFT(x))
++#define ASRIER_ADOE(x) (1 << ASRIER_ADOEx_SHIFT(x))
++#define ASRIER_ADIEx_SHIFT(x) (0 + x)
++#define ASRIER_ADIEx_MASK(x) (1 << ASRIER_ADIEx_SHIFT(x))
++#define ASRIER_ADIE(x) (1 << ASRIER_ADIEx_SHIFT(x))
++
++/* REG2 0x0C REG_ASRCNCR */
++#define ASRCNCR_ANCx_SHIFT(x, b) (b * x)
++#define ASRCNCR_ANCx_MASK(x, b) (((1 << b) - 1) << ASRCNCR_ANCx_SHIFT(x, b))
++#define ASRCNCR_ANCx_get(x, v, b) ((v & ASRCNCR_ANCx_MASK(x, b)) >> ASRCNCR_ANCx_SHIFT(x, b))
++#define ASRCNCR_ANCx_set(x, v, b) ((v << ASRCNCR_ANCx_SHIFT(x, b)) & ASRCNCR_ANCx_MASK(x, b))
++
++/* REG3 0x10 REG_ASRCFG */
++#define ASRCFG_INIRQx_SHIFT(x) (21 + x)
++#define ASRCFG_INIRQx_MASK(x) (1 << ASRCFG_INIRQx_SHIFT(x))
++#define ASRCFG_INIRQx (1 << ASRCFG_INIRQx_SHIFT(x))
++#define ASRCFG_NDPRx_SHIFT(x) (18 + x)
++#define ASRCFG_NDPRx_MASK(x) (1 << ASRCFG_NDPRx_SHIFT(x))
++#define ASRCFG_NDPRx (1 << ASRCFG_NDPRx_SHIFT(x))
++#define ASRCFG_POSTMODx_SHIFT(x) (8 + (x << 2))
++#define ASRCFG_POSTMODx_WIDTH 2
++#define ASRCFG_POSTMODx_MASK(x) (((1 << ASRCFG_POSTMODx_WIDTH) - 1) << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_POSTMOD(x, v) ((v) << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_POSTMODx_UP(x) (0 << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_POSTMODx_DCON(x) (1 << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_POSTMODx_DOWN(x) (2 << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_PREMODx_SHIFT(x) (6 + (x << 2))
++#define ASRCFG_PREMODx_WIDTH 2
++#define ASRCFG_PREMODx_MASK(x) (((1 << ASRCFG_PREMODx_WIDTH) - 1) << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMOD(x, v) ((v) << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMODx_UP(x) (0 << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMODx_DCON(x) (1 << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMODx_DOWN(x) (2 << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMODx_BYPASS(x) (3 << ASRCFG_PREMODx_SHIFT(x))
++
++/* REG4 0x14 REG_ASRCSR */
++#define ASRCSR_AxCSx_WIDTH 4
++#define ASRCSR_AxCSx_MASK ((1 << ASRCSR_AxCSx_WIDTH) - 1)
++#define ASRCSR_AOCSx_SHIFT(x) (12 + (x << 2))
++#define ASRCSR_AOCSx_MASK(x) (((1 << ASRCSR_AxCSx_WIDTH) - 1) << ASRCSR_AOCSx_SHIFT(x))
++#define ASRCSR_AOCS(x, v) ((v) << ASRCSR_AOCSx_SHIFT(x))
++#define ASRCSR_AICSx_SHIFT(x) (x << 2)
++#define ASRCSR_AICSx_MASK(x) (((1 << ASRCSR_AxCSx_WIDTH) - 1) << ASRCSR_AICSx_SHIFT(x))
++#define ASRCSR_AICS(x, v) ((v) << ASRCSR_AICSx_SHIFT(x))
++
++/* REG5&6 0x18 & 0x1C REG_ASRCDR1 & ASRCDR2 */
++#define ASRCDRx_AxCPx_WIDTH 3
++#define ASRCDRx_AICPx_SHIFT(x) (0 + (x % 2) * 6)
++#define ASRCDRx_AICPx_MASK(x) (((1 << ASRCDRx_AxCPx_WIDTH) - 1) << ASRCDRx_AICPx_SHIFT(x))
++#define ASRCDRx_AICP(x, v) ((v) << ASRCDRx_AICPx_SHIFT(x))
++#define ASRCDRx_AICDx_SHIFT(x) (3 + (x % 2) * 6)
++#define ASRCDRx_AICDx_MASK(x) (((1 << ASRCDRx_AxCPx_WIDTH) - 1) << ASRCDRx_AICDx_SHIFT(x))
++#define ASRCDRx_AICD(x, v) ((v) << ASRCDRx_AICDx_SHIFT(x))
++#define ASRCDRx_AOCPx_SHIFT(x) ((x < 2) ? 12 + x * 6 : 6)
++#define ASRCDRx_AOCPx_MASK(x) (((1 << ASRCDRx_AxCPx_WIDTH) - 1) << ASRCDRx_AOCPx_SHIFT(x))
++#define ASRCDRx_AOCP(x, v) ((v) << ASRCDRx_AOCPx_SHIFT(x))
++#define ASRCDRx_AOCDx_SHIFT(x) ((x < 2) ? 15 + x * 6 : 9)
++#define ASRCDRx_AOCDx_MASK(x) (((1 << ASRCDRx_AxCPx_WIDTH) - 1) << ASRCDRx_AOCDx_SHIFT(x))
++#define ASRCDRx_AOCD(x, v) ((v) << ASRCDRx_AOCDx_SHIFT(x))
++
++/* REG7 0x20 REG_ASRSTR */
++#define ASRSTR_DSLCNT_SHIFT 21
++#define ASRSTR_DSLCNT_MASK (1 << ASRSTR_DSLCNT_SHIFT)
++#define ASRSTR_DSLCNT (1 << ASRSTR_DSLCNT_SHIFT)
++#define ASRSTR_ATQOL_SHIFT 20
++#define ASRSTR_ATQOL_MASK (1 << ASRSTR_ATQOL_SHIFT)
++#define ASRSTR_ATQOL (1 << ASRSTR_ATQOL_SHIFT)
++#define ASRSTR_AOOLx_SHIFT(x) (17 + x)
++#define ASRSTR_AOOLx_MASK(x) (1 << ASRSTR_AOOLx_SHIFT(x))
++#define ASRSTR_AOOL(x) (1 << ASRSTR_AOOLx_SHIFT(x))
++#define ASRSTR_AIOLx_SHIFT(x) (14 + x)
++#define ASRSTR_AIOLx_MASK(x) (1 << ASRSTR_AIOLx_SHIFT(x))
++#define ASRSTR_AIOL(x) (1 << ASRSTR_AIOLx_SHIFT(x))
++#define ASRSTR_AODOx_SHIFT(x) (11 + x)
++#define ASRSTR_AODOx_MASK(x) (1 << ASRSTR_AODOx_SHIFT(x))
++#define ASRSTR_AODO(x) (1 << ASRSTR_AODOx_SHIFT(x))
++#define ASRSTR_AIDUx_SHIFT(x) (8 + x)
++#define ASRSTR_AIDUx_MASK(x) (1 << ASRSTR_AIDUx_SHIFT(x))
++#define ASRSTR_AIDU(x) (1 << ASRSTR_AIDUx_SHIFT(x))
++#define ASRSTR_FPWT_SHIFT 7
++#define ASRSTR_FPWT_MASK (1 << ASRSTR_FPWT_SHIFT)
++#define ASRSTR_FPWT (1 << ASRSTR_FPWT_SHIFT)
++#define ASRSTR_AOLE_SHIFT 6
++#define ASRSTR_AOLE_MASK (1 << ASRSTR_AOLE_SHIFT)
++#define ASRSTR_AOLE (1 << ASRSTR_AOLE_SHIFT)
++#define ASRSTR_AODEx_SHIFT(x) (3 + x)
++#define ASRSTR_AODFx_MASK(x) (1 << ASRSTR_AODEx_SHIFT(x))
++#define ASRSTR_AODF(x) (1 << ASRSTR_AODEx_SHIFT(x))
++#define ASRSTR_AIDEx_SHIFT(x) (0 + x)
++#define ASRSTR_AIDEx_MASK(x) (1 << ASRSTR_AIDEx_SHIFT(x))
++#define ASRSTR_AIDE(x) (1 << ASRSTR_AIDEx_SHIFT(x))
++
++/* REG10 0x54 REG_ASRTFR1 */
++#define ASRTFR1_TF_BASE_WIDTH 7
++#define ASRTFR1_TF_BASE_SHIFT 6
++#define ASRTFR1_TF_BASE_MASK (((1 << ASRTFR1_TF_BASE_WIDTH) - 1) << ASRTFR1_TF_BASE_SHIFT)
++#define ASRTFR1_TF_BASE(x) ((x) << ASRTFR1_TF_BASE_SHIFT)
++
++/*
++ * REG22 0xA0 REG_ASRMCRA
++ * REG24 0xA8 REG_ASRMCRB
++ * REG26 0xB0 REG_ASRMCRC
++ */
++#define ASRMCRx_ZEROBUFx_SHIFT 23
++#define ASRMCRx_ZEROBUFxCLR_MASK (1 << ASRMCRx_ZEROBUFx_SHIFT)
++#define ASRMCRx_ZEROBUFxCLR (1 << ASRMCRx_ZEROBUFx_SHIFT)
++#define ASRMCRx_EXTTHRSHx_SHIFT 22
++#define ASRMCRx_EXTTHRSHx_MASK (1 << ASRMCRx_EXTTHRSHx_SHIFT)
++#define ASRMCRx_EXTTHRSHx (1 << ASRMCRx_EXTTHRSHx_SHIFT)
++#define ASRMCRx_BUFSTALLx_SHIFT 21
++#define ASRMCRx_BUFSTALLx_MASK (1 << ASRMCRx_BUFSTALLx_SHIFT)
++#define ASRMCRx_BUFSTALLx (1 << ASRMCRx_BUFSTALLx_SHIFT)
++#define ASRMCRx_BYPASSPOLYx_SHIFT 20
++#define ASRMCRx_BYPASSPOLYx_MASK (1 << ASRMCRx_BYPASSPOLYx_SHIFT)
++#define ASRMCRx_BYPASSPOLYx (1 << ASRMCRx_BYPASSPOLYx_SHIFT)
++#define ASRMCRx_OUTFIFO_THRESHOLD_WIDTH 6
++#define ASRMCRx_OUTFIFO_THRESHOLD_SHIFT 12
++#define ASRMCRx_OUTFIFO_THRESHOLD_MASK (((1 << ASRMCRx_OUTFIFO_THRESHOLD_WIDTH) - 1) << ASRMCRx_OUTFIFO_THRESHOLD_SHIFT)
++#define ASRMCRx_OUTFIFO_THRESHOLD(v) (((v) << ASRMCRx_OUTFIFO_THRESHOLD_SHIFT) & ASRMCRx_OUTFIFO_THRESHOLD_MASK)
++#define ASRMCRx_RSYNIFx_SHIFT 11
++#define ASRMCRx_RSYNIFx_MASK (1 << ASRMCRx_RSYNIFx_SHIFT)
++#define ASRMCRx_RSYNIFx (1 << ASRMCRx_RSYNIFx_SHIFT)
++#define ASRMCRx_RSYNOFx_SHIFT 10
++#define ASRMCRx_RSYNOFx_MASK (1 << ASRMCRx_RSYNOFx_SHIFT)
++#define ASRMCRx_RSYNOFx (1 << ASRMCRx_RSYNOFx_SHIFT)
++#define ASRMCRx_INFIFO_THRESHOLD_WIDTH 6
++#define ASRMCRx_INFIFO_THRESHOLD_SHIFT 0
++#define ASRMCRx_INFIFO_THRESHOLD_MASK (((1 << ASRMCRx_INFIFO_THRESHOLD_WIDTH) - 1) << ASRMCRx_INFIFO_THRESHOLD_SHIFT)
++#define ASRMCRx_INFIFO_THRESHOLD(v) (((v) << ASRMCRx_INFIFO_THRESHOLD_SHIFT) & ASRMCRx_INFIFO_THRESHOLD_MASK)
++
++/*
++ * REG23 0xA4 REG_ASRFSTA
++ * REG25 0xAC REG_ASRFSTB
++ * REG27 0xB4 REG_ASRFSTC
++ */
++#define ASRFSTx_OAFx_SHIFT 23
++#define ASRFSTx_OAFx_MASK (1 << ASRFSTx_OAFx_SHIFT)
++#define ASRFSTx_OAFx (1 << ASRFSTx_OAFx_SHIFT)
++#define ASRFSTx_OUTPUT_FIFO_WIDTH 7
++#define ASRFSTx_OUTPUT_FIFO_SHIFT 12
++#define ASRFSTx_OUTPUT_FIFO_MASK (((1 << ASRFSTx_OUTPUT_FIFO_WIDTH) - 1) << ASRFSTx_OUTPUT_FIFO_SHIFT)
++#define ASRFSTx_IAEx_SHIFT 11
++#define ASRFSTx_IAEx_MASK (1 << ASRFSTx_OAFx_SHIFT)
++#define ASRFSTx_IAEx (1 << ASRFSTx_OAFx_SHIFT)
++#define ASRFSTx_INPUT_FIFO_WIDTH 7
++#define ASRFSTx_INPUT_FIFO_SHIFT 0
++#define ASRFSTx_INPUT_FIFO_MASK ((1 << ASRFSTx_INPUT_FIFO_WIDTH) - 1)
++
++/* REG28 0xC0 & 0xC4 & 0xC8 REG_ASRMCR1x */
++#define ASRMCR1x_IWD_WIDTH 3
++#define ASRMCR1x_IWD_SHIFT 9
++#define ASRMCR1x_IWD_MASK (((1 << ASRMCR1x_IWD_WIDTH) - 1) << ASRMCR1x_IWD_SHIFT)
++#define ASRMCR1x_IWD(v) ((v) << ASRMCR1x_IWD_SHIFT)
++#define ASRMCR1x_IMSB_SHIFT 8
++#define ASRMCR1x_IMSB_MASK (1 << ASRMCR1x_IMSB_SHIFT)
++#define ASRMCR1x_IMSB_MSB (1 << ASRMCR1x_IMSB_SHIFT)
++#define ASRMCR1x_IMSB_LSB (0 << ASRMCR1x_IMSB_SHIFT)
++#define ASRMCR1x_OMSB_SHIFT 2
++#define ASRMCR1x_OMSB_MASK (1 << ASRMCR1x_OMSB_SHIFT)
++#define ASRMCR1x_OMSB_MSB (1 << ASRMCR1x_OMSB_SHIFT)
++#define ASRMCR1x_OMSB_LSB (0 << ASRMCR1x_OMSB_SHIFT)
++#define ASRMCR1x_OSGN_SHIFT 1
++#define ASRMCR1x_OSGN_MASK (1 << ASRMCR1x_OSGN_SHIFT)
++#define ASRMCR1x_OSGN (1 << ASRMCR1x_OSGN_SHIFT)
++#define ASRMCR1x_OW16_SHIFT 0
++#define ASRMCR1x_OW16_MASK (1 << ASRMCR1x_OW16_SHIFT)
++#define ASRMCR1x_OW16(v) ((v) << ASRMCR1x_OW16_SHIFT)
++
++
++struct dma_block {
++ unsigned int index;
++ unsigned int length;
++ void *dma_vaddr;
++ dma_addr_t dma_paddr;
++ struct list_head queue;
++};
++
++struct asrc_p2p_params {
++ u32 p2p_rate; /* ASRC output rate for p2p */
++ enum asrc_word_width p2p_width; /* ASRC output wordwidth for p2p */
++};
++
++struct asrc_pair_params {
++ enum asrc_pair_index index;
++ struct completion input_complete;
++ struct completion output_complete;
++ struct completion lastperiod_complete;
++ struct dma_chan *input_dma_channel;
++ struct dma_chan *output_dma_channel;
++ unsigned int input_buffer_size;
++ unsigned int output_buffer_size;
++ unsigned int buffer_num;
++ unsigned int pair_hold;
++ unsigned int asrc_active;
++ unsigned int channel_nums;
++ struct dma_block input_dma_total;
++ struct dma_block input_dma[ASRC_DMA_BUFFER_NUM];
++ struct dma_block output_dma_total;
++ struct dma_block output_dma[ASRC_DMA_BUFFER_NUM];
++ struct dma_block output_last_period;
++ struct dma_async_tx_descriptor *desc_in;
++ struct dma_async_tx_descriptor *desc_out;
++ struct work_struct task_output_work;
++ unsigned int input_sg_nodes;
++ unsigned int output_sg_nodes;
++ struct scatterlist input_sg[4], output_sg[4];
++ enum asrc_word_width input_word_width;
++ enum asrc_word_width output_word_width;
++ u32 input_sample_rate;
++ u32 output_sample_rate;
++ u32 input_wm;
++ u32 output_wm;
++ unsigned int last_period_sample;
++};
++
++struct asrc_data {
++ struct asrc_pair asrc_pair[ASRC_PAIR_MAX_NUM];
++ struct proc_dir_entry *proc_asrc;
++ struct class *asrc_class;
++ struct regmap *regmap;
++ struct clk *asrc_clk;
++ struct clk *dma_clk;
++ unsigned long paddr;
++ unsigned int channel_bits;
++ int asrc_major;
++ int irq;
++ struct device *dev;
++};
++
++struct asrc_p2p_ops {
++ void (*asrc_p2p_start_conv)(enum asrc_pair_index);
++ void (*asrc_p2p_stop_conv)(enum asrc_pair_index);
++ int (*asrc_p2p_get_dma_request)(enum asrc_pair_index, bool);
++ u32 (*asrc_p2p_per_addr)(enum asrc_pair_index, bool);
++ int (*asrc_p2p_req_pair)(int, enum asrc_pair_index *index);
++ int (*asrc_p2p_config_pair)(struct asrc_config *config);
++ void (*asrc_p2p_release_pair)(enum asrc_pair_index);
++ void (*asrc_p2p_finish_conv)(enum asrc_pair_index);
++};
++
++extern void asrc_p2p_hook(struct asrc_p2p_ops *asrc_p2p_ct);
++
++extern int asrc_req_pair(int chn_num, enum asrc_pair_index *index);
++extern void asrc_release_pair(enum asrc_pair_index index);
++extern int asrc_config_pair(struct asrc_config *config);
++extern void asrc_get_status(struct asrc_status_flags *flags);
++extern void asrc_start_conv(enum asrc_pair_index index);
++extern void asrc_stop_conv(enum asrc_pair_index index);
++extern u32 asrc_get_per_addr(enum asrc_pair_index index, bool i);
++extern int asrc_get_dma_request(enum asrc_pair_index index, bool i);
++extern void asrc_finish_conv(enum asrc_pair_index index);
++extern int asrc_set_watermark(enum asrc_pair_index index,
++ u32 in_wm, u32 out_wm);
++
++#endif/* __MXC_ASRC_H__ */
+diff -Nur linux-3.14.40.orig/include/linux/mxcfb.h linux-3.14.40/include/linux/mxcfb.h
+--- linux-3.14.40.orig/include/linux/mxcfb.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/mxcfb.h 2015-05-01 14:58:06.091427001 -0500
+@@ -0,0 +1,46 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU Lesser General
++ * Public License. You may obtain a copy of the GNU Lesser General
++ * Public License Version 2.1 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/lgpl-license.html
++ * http://www.gnu.org/copyleft/lgpl.html
++ */
++
++/*
++ * @file linux/mxcfb.h
++ *
++ * @brief Global header file for the MXC Frame buffer
++ *
++ * @ingroup Framebuffer
++ */
++#ifndef __LINUX_MXCFB_H__
++#define __LINUX_MXCFB_H__
++
++#include <uapi/linux/mxcfb.h>
++
++extern struct fb_videomode mxcfb_modedb[];
++extern int mxcfb_modedb_sz;
++
++enum {
++ MXC_DISP_SPEC_DEV = 0,
++ MXC_DISP_DDC_DEV = 1,
++};
++
++enum {
++ MXCFB_REFRESH_OFF,
++ MXCFB_REFRESH_AUTO,
++ MXCFB_REFRESH_PARTIAL,
++};
++
++int mxcfb_set_refresh_mode(struct fb_info *fbi, int mode,
++ struct mxcfb_rect *update_region);
++int mxc_elcdif_frame_addr_setup(dma_addr_t phys);
++void mxcfb_elcdif_register_mode(const struct fb_videomode *modedb,
++ int num_modes, int dev_mode);
++
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/mxc_mlb.h linux-3.14.40/include/linux/mxc_mlb.h
+--- linux-3.14.40.orig/include/linux/mxc_mlb.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/mxc_mlb.h 2015-05-01 14:58:06.091427001 -0500
+@@ -0,0 +1,55 @@
++/*
++ * mxc_mlb.h
++ *
++ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef _MXC_MLB_H
++#define _MXC_MLB_H
++
++/* define IOCTL command */
++#define MLB_DBG_RUNTIME _IO('S', 0x09)
++#define MLB_SET_FPS _IOW('S', 0x10, unsigned int)
++#define MLB_GET_VER _IOR('S', 0x11, unsigned long)
++#define MLB_SET_DEVADDR _IOR('S', 0x12, unsigned char)
++
++/*!
++ * set channel address for each logical channel
++ * the MSB 16bits is for tx channel, the left LSB is for rx channel
++ */
++#define MLB_CHAN_SETADDR _IOW('S', 0x13, unsigned int)
++#define MLB_CHAN_STARTUP _IO('S', 0x14)
++#define MLB_CHAN_SHUTDOWN _IO('S', 0x15)
++#define MLB_CHAN_GETEVENT _IOR('S', 0x16, unsigned long)
++
++#define MLB_SET_ISOC_BLKSIZE_188 _IO('S', 0x17)
++#define MLB_SET_ISOC_BLKSIZE_196 _IO('S', 0x18)
++#define MLB_SET_SYNC_QUAD _IOW('S', 0x19, unsigned int)
++#define MLB_IRQ_ENABLE _IO('S', 0x20)
++#define MLB_IRQ_DISABLE _IO('S', 0x21)
++
++/*!
++ * MLB event define
++ */
++enum {
++ MLB_EVT_TX_PROTO_ERR_CUR = 1 << 0,
++ MLB_EVT_TX_BRK_DETECT_CUR = 1 << 1,
++ MLB_EVT_TX_PROTO_ERR_PREV = 1 << 8,
++ MLB_EVT_TX_BRK_DETECT_PREV = 1 << 9,
++ MLB_EVT_RX_PROTO_ERR_CUR = 1 << 16,
++ MLB_EVT_RX_BRK_DETECT_CUR = 1 << 17,
++ MLB_EVT_RX_PROTO_ERR_PREV = 1 << 24,
++ MLB_EVT_RX_BRK_DETECT_PREV = 1 << 25,
++};
++
++
++#endif /* _MXC_MLB_H */
+diff -Nur linux-3.14.40.orig/include/linux/mxc_v4l2.h linux-3.14.40/include/linux/mxc_v4l2.h
+--- linux-3.14.40.orig/include/linux/mxc_v4l2.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/mxc_v4l2.h 2015-05-01 14:58:06.091427001 -0500
+@@ -0,0 +1,27 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU Lesser General
++ * Public License. You may obtain a copy of the GNU Lesser General
++ * Public License Version 2.1 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/lgpl-license.html
++ * http://www.gnu.org/copyleft/lgpl.html
++ */
++
++/*!
++ * @file linux/mxc_v4l2.h
++ *
++ * @brief MXC V4L2 private header file
++ *
++ * @ingroup MXC V4L2
++ */
++
++#ifndef __LINUX_MXC_V4L2_H__
++#define __LINUX_MXC_V4L2_H__
++
++#include <uapi/linux/mxc_v4l2.h>
++
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/mxc_vpu.h linux-3.14.40/include/linux/mxc_vpu.h
+--- linux-3.14.40.orig/include/linux/mxc_vpu.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/mxc_vpu.h 2015-05-01 14:58:06.095427001 -0500
+@@ -0,0 +1,118 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU Lesser General
++ * Public License. You may obtain a copy of the GNU Lesser General
++ * Public License Version 2.1 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/lgpl-license.html
++ * http://www.gnu.org/copyleft/lgpl.html
++ */
++
++/*!
++ * @defgroup VPU Video Processor Unit Driver
++ */
++
++/*!
++ * @file linux/mxc_vpu.h
++ *
++ * @brief VPU system initialization and file operation definition
++ *
++ * @ingroup VPU
++ */
++
++#ifndef __LINUX_MXC_VPU_H__
++#define __LINUX_MXC_VPU_H__
++
++#include <linux/fs.h>
++
++struct mxc_vpu_platform_data {
++ bool iram_enable;
++ int iram_size;
++ void (*reset) (void);
++ void (*pg) (int);
++};
++
++struct vpu_mem_desc {
++ u32 size;
++ dma_addr_t phy_addr;
++ u32 cpu_addr; /* cpu address to free the dma mem */
++ u32 virt_uaddr; /* virtual user space address */
++};
++
++#define VPU_IOC_MAGIC 'V'
++
++#define VPU_IOC_PHYMEM_ALLOC _IO(VPU_IOC_MAGIC, 0)
++#define VPU_IOC_PHYMEM_FREE _IO(VPU_IOC_MAGIC, 1)
++#define VPU_IOC_WAIT4INT _IO(VPU_IOC_MAGIC, 2)
++#define VPU_IOC_PHYMEM_DUMP _IO(VPU_IOC_MAGIC, 3)
++#define VPU_IOC_REG_DUMP _IO(VPU_IOC_MAGIC, 4)
++#define VPU_IOC_IRAM_SETTING _IO(VPU_IOC_MAGIC, 6)
++#define VPU_IOC_CLKGATE_SETTING _IO(VPU_IOC_MAGIC, 7)
++#define VPU_IOC_GET_WORK_ADDR _IO(VPU_IOC_MAGIC, 8)
++#define VPU_IOC_REQ_VSHARE_MEM _IO(VPU_IOC_MAGIC, 9)
++#define VPU_IOC_SYS_SW_RESET _IO(VPU_IOC_MAGIC, 11)
++#define VPU_IOC_GET_SHARE_MEM _IO(VPU_IOC_MAGIC, 12)
++#define VPU_IOC_QUERY_BITWORK_MEM _IO(VPU_IOC_MAGIC, 13)
++#define VPU_IOC_SET_BITWORK_MEM _IO(VPU_IOC_MAGIC, 14)
++#define VPU_IOC_PHYMEM_CHECK _IO(VPU_IOC_MAGIC, 15)
++#define VPU_IOC_LOCK_DEV _IO(VPU_IOC_MAGIC, 16)
++
++#define BIT_CODE_RUN 0x000
++#define BIT_CODE_DOWN 0x004
++#define BIT_INT_CLEAR 0x00C
++#define BIT_INT_STATUS 0x010
++#define BIT_CUR_PC 0x018
++#define BIT_INT_REASON 0x174
++
++#define MJPEG_PIC_STATUS_REG 0x3004
++#define MBC_SET_SUBBLK_EN 0x4A0
++
++#define BIT_WORK_CTRL_BUF_BASE 0x100
++#define BIT_WORK_CTRL_BUF_REG(i) (BIT_WORK_CTRL_BUF_BASE + i * 4)
++#define BIT_CODE_BUF_ADDR BIT_WORK_CTRL_BUF_REG(0)
++#define BIT_WORK_BUF_ADDR BIT_WORK_CTRL_BUF_REG(1)
++#define BIT_PARA_BUF_ADDR BIT_WORK_CTRL_BUF_REG(2)
++#define BIT_BIT_STREAM_CTRL BIT_WORK_CTRL_BUF_REG(3)
++#define BIT_FRAME_MEM_CTRL BIT_WORK_CTRL_BUF_REG(4)
++#define BIT_BIT_STREAM_PARAM BIT_WORK_CTRL_BUF_REG(5)
++
++#ifndef CONFIG_SOC_IMX6Q
++#define BIT_RESET_CTRL 0x11C
++#else
++#define BIT_RESET_CTRL 0x128
++#endif
++
++/* i could be 0, 1, 2, 3 */
++#define BIT_RD_PTR_BASE 0x120
++#define BIT_RD_PTR_REG(i) (BIT_RD_PTR_BASE + i * 8)
++#define BIT_WR_PTR_REG(i) (BIT_RD_PTR_BASE + i * 8 + 4)
++
++/* i could be 0, 1, 2, 3 */
++#define BIT_FRM_DIS_FLG_BASE (cpu_is_mx51() ? 0x150 : 0x140)
++#define BIT_FRM_DIS_FLG_REG(i) (BIT_FRM_DIS_FLG_BASE + i * 4)
++
++#define BIT_BUSY_FLAG 0x160
++#define BIT_RUN_COMMAND 0x164
++#define BIT_INT_ENABLE 0x170
++
++#define BITVAL_PIC_RUN 8
++
++#define VPU_SLEEP_REG_VALUE 10
++#define VPU_WAKE_REG_VALUE 11
++
++int vl2cc_init(u32 vl2cc_hw_base);
++void vl2cc_enable(void);
++void vl2cc_flush(void);
++void vl2cc_disable(void);
++void vl2cc_cleanup(void);
++
++int vl2cc_init(u32 vl2cc_hw_base);
++void vl2cc_enable(void);
++void vl2cc_flush(void);
++void vl2cc_disable(void);
++void vl2cc_cleanup(void);
++
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/phy.h linux-3.14.40/include/linux/phy.h
+--- linux-3.14.40.orig/include/linux/phy.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/phy.h 2015-05-01 14:58:06.095427001 -0500
+@@ -609,6 +609,7 @@
+ return phydev->drv->read_status(phydev);
+ }
+
++int genphy_config_init(struct phy_device *phydev);
+ int genphy_setup_forced(struct phy_device *phydev);
+ int genphy_restart_aneg(struct phy_device *phydev);
+ int genphy_config_aneg(struct phy_device *phydev);
+diff -Nur linux-3.14.40.orig/include/linux/pipe_fs_i.h linux-3.14.40/include/linux/pipe_fs_i.h
+--- linux-3.14.40.orig/include/linux/pipe_fs_i.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/pipe_fs_i.h 2015-05-01 14:58:06.111427001 -0500
+@@ -35,7 +35,7 @@
+ * @tmp_page: cached released page
+ * @readers: number of current readers of this pipe
+ * @writers: number of current writers of this pipe
+- * @files: number of struct file refering this pipe (protected by ->i_lock)
++ * @files: number of struct file referring this pipe (protected by ->i_lock)
+ * @waiting_writers: number of writers blocked waiting for room
+ * @r_counter: reader counter
+ * @w_counter: writer counter
+diff -Nur linux-3.14.40.orig/include/linux/pl320-ipc.h linux-3.14.40/include/linux/pl320-ipc.h
+--- linux-3.14.40.orig/include/linux/pl320-ipc.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/pl320-ipc.h 2015-05-01 14:58:06.111427001 -0500
+@@ -0,0 +1,17 @@
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++int pl320_ipc_transmit(u32 *data);
++int pl320_ipc_register_notifier(struct notifier_block *nb);
++int pl320_ipc_unregister_notifier(struct notifier_block *nb);
+diff -Nur linux-3.14.40.orig/include/linux/platform_data/dma-imx.h linux-3.14.40/include/linux/platform_data/dma-imx.h
+--- linux-3.14.40.orig/include/linux/platform_data/dma-imx.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/platform_data/dma-imx.h 2015-05-01 14:58:06.131427001 -0500
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -40,6 +40,7 @@
+ IMX_DMATYPE_ASRC, /* ASRC */
+ IMX_DMATYPE_ESAI, /* ESAI */
+ IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
++ IMX_DMATYPE_HDMI, /* HDMI Audio */
+ };
+
+ enum imx_dma_prio {
+@@ -49,9 +50,11 @@
+ };
+
+ struct imx_dma_data {
+- int dma_request; /* DMA request line */
++ int dma_request0; /* DMA request line */
++ int dma_request1;
+ enum sdma_peripheral_type peripheral_type;
+ int priority;
++ void *data_addr1, *data_addr2;
+ };
+
+ static inline int imx_dma_is_ipu(struct dma_chan *chan)
+@@ -59,6 +62,11 @@
+ return !strcmp(dev_name(chan->device->dev), "ipu-core");
+ }
+
++static inline int imx_dma_is_pxp(struct dma_chan *chan)
++{
++ return strstr(dev_name(chan->device->dev), "pxp") != NULL;
++}
++
+ static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
+ {
+ return !strcmp(chan->device->dev->driver->name, "imx-sdma") ||
+diff -Nur linux-3.14.40.orig/include/linux/power/imx6_usb_charger.h linux-3.14.40/include/linux/power/imx6_usb_charger.h
+--- linux-3.14.40.orig/include/linux/power/imx6_usb_charger.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/power/imx6_usb_charger.h 2015-05-01 14:58:06.131427001 -0500
+@@ -0,0 +1,80 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef __IMXUSB6_CHARGER_H
++#define __IMXUSB6_CHARGER_H
++
++#include <linux/power_supply.h>
++enum battery_charging_spec {
++ BATTERY_CHARGING_SPEC_NONE = 0,
++ BATTERY_CHARGING_SPEC_UNKNOWN,
++ BATTERY_CHARGING_SPEC_1_0,
++ BATTERY_CHARGING_SPEC_1_1,
++ BATTERY_CHARGING_SPEC_1_2,
++};
++
++struct usb_charger {
++ /* The anatop regmap */
++ struct regmap *anatop;
++ /* USB controller */
++ struct device *dev;
++ struct power_supply psy;
++ struct mutex lock;
++
++ /* Compliant with Battery Charging Specification version (if any) */
++ enum battery_charging_spec bc;
++
++ /* properties */
++ unsigned present:1;
++ unsigned online:1;
++ unsigned max_current;
++ int (*connect)(struct usb_charger *charger);
++ int (*disconnect)(struct usb_charger *charger);
++ int (*set_power)(struct usb_charger *charger, unsigned mA);
++
++ int (*detect)(struct usb_charger *charger);
++};
++
++#ifdef CONFIG_IMX6_USB_CHARGER
++extern void imx6_usb_remove_charger(struct usb_charger *charger);
++extern int imx6_usb_create_charger(struct usb_charger *charger,
++ const char *name);
++extern int imx6_usb_vbus_disconnect(struct usb_charger *charger);
++extern int imx6_usb_vbus_connect(struct usb_charger *charger);
++extern int imx6_usb_charger_detect_post(struct usb_charger *charger);
++#else
++void imx6_usb_remove_charger(struct usb_charger *charger)
++{
++
++}
++
++int imx6_usb_create_charger(struct usb_charger *charger,
++ const char *name)
++{
++ return -ENODEV;
++}
++
++int imx6_usb_vbus_disconnect(struct usb_charger *charger)
++{
++ return -ENODEV;
++}
++
++int imx6_usb_vbus_connect(struct usb_charger *charger)
++{
++ return -ENODEV;
++}
++int imx6_usb_charger_detect_post(struct usb_charger *charger)
++{
++ return -ENODEV;
++}
++#endif
++
++#endif /* __IMXUSB6_CHARGER_H */
+diff -Nur linux-3.14.40.orig/include/linux/ptp_clock_kernel.h linux-3.14.40/include/linux/ptp_clock_kernel.h
+--- linux-3.14.40.orig/include/linux/ptp_clock_kernel.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/ptp_clock_kernel.h 2015-05-01 14:58:06.131427001 -0500
+@@ -49,7 +49,11 @@
+ * @n_alarm: The number of programmable alarms.
+ * @n_ext_ts: The number of external time stamp channels.
+ * @n_per_out: The number of programmable periodic signals.
++ * @n_pins: The number of programmable pins.
+ * @pps: Indicates whether the clock supports a PPS callback.
++ * @pin_config: Array of length 'n_pins'. If the number of
++ * programmable pins is nonzero, then drivers must
++ * allocate and initialize this array.
+ *
+ * clock operations
+ *
+@@ -70,6 +74,18 @@
+ * parameter request: Desired resource to enable or disable.
+ * parameter on: Caller passes one to enable or zero to disable.
+ *
++ * @verify: Confirm that a pin can perform a given function. The PTP
++ * Hardware Clock subsystem maintains the 'pin_config'
++ * array on behalf of the drivers, but the PHC subsystem
++ * assumes that every pin can perform every function. This
++ * hook gives drivers a way of telling the core about
++ * limitations on specific pins. This function must return
++ * zero if the function can be assigned to this pin, and
++ * nonzero otherwise.
++ * parameter pin: index of the pin in question.
++ * parameter func: the desired function to use.
++ * parameter chan: the function channel index to use.
++ *
+ * Drivers should embed their ptp_clock_info within a private
+ * structure, obtaining a reference to it using container_of().
+ *
+@@ -83,13 +99,17 @@
+ int n_alarm;
+ int n_ext_ts;
+ int n_per_out;
++ int n_pins;
+ int pps;
++ struct ptp_pin_desc *pin_config;
+ int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
+ int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
+ int (*gettime)(struct ptp_clock_info *ptp, struct timespec *ts);
+ int (*settime)(struct ptp_clock_info *ptp, const struct timespec *ts);
+ int (*enable)(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *request, int on);
++ int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
++ enum ptp_pin_function func, unsigned int chan);
+ };
+
+ struct ptp_clock;
+@@ -156,4 +176,17 @@
+
+ extern int ptp_clock_index(struct ptp_clock *ptp);
+
++/**
++ * ptp_find_pin() - obtain the pin index of a given auxiliary function
++ *
++ * @ptp: The clock obtained from ptp_clock_register().
++ * @func: One of the ptp_pin_function enumerated values.
++ * @chan: The particular functional channel to find.
++ * Return: Pin index in the range of zero to ptp_clock_caps.n_pins - 1,
++ * or -1 if the auxiliary function cannot be found.
++ */
++
++int ptp_find_pin(struct ptp_clock *ptp,
++ enum ptp_pin_function func, unsigned int chan);
++
+ #endif
+diff -Nur linux-3.14.40.orig/include/linux/pxp_device.h linux-3.14.40/include/linux/pxp_device.h
+--- linux-3.14.40.orig/include/linux/pxp_device.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/pxp_device.h 2015-05-01 14:58:06.131427001 -0500
+@@ -0,0 +1,68 @@
++/*
++ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef _PXP_DEVICE
++#define _PXP_DEVICE
++
++#include <linux/idr.h>
++#include <linux/hash.h>
++#include <uapi/linux/pxp_device.h>
++
++struct pxp_irq_info {
++ wait_queue_head_t waitq;
++ atomic_t irq_pending;
++ int hist_status;
++};
++
++struct pxp_buffer_hash {
++ struct hlist_head *hash_table;
++ u32 order;
++ spinlock_t hash_lock;
++};
++
++struct pxp_buf_obj {
++ uint32_t handle;
++
++ uint32_t size;
++ uint32_t mem_type;
++
++ unsigned long offset;
++ void *virtual;
++
++ struct hlist_node item;
++};
++
++struct pxp_chan_obj {
++ uint32_t handle;
++ struct dma_chan *chan;
++};
++
++/* File private data */
++struct pxp_file {
++ struct file *filp;
++
++ /* record allocated dma buffer */
++ struct idr buffer_idr;
++ spinlock_t buffer_lock;
++
++ /* record allocated dma channel */
++ struct idr channel_idr;
++ spinlock_t channel_lock;
++};
++
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/pxp_dma.h linux-3.14.40/include/linux/pxp_dma.h
+--- linux-3.14.40.orig/include/linux/pxp_dma.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/linux/pxp_dma.h 2015-05-01 14:58:06.131427001 -0500
+@@ -0,0 +1,72 @@
++/*
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef _PXP_DMA
++#define _PXP_DMA
++
++#include <uapi/linux/pxp_dma.h>
++
++struct pxp_tx_desc {
++ struct dma_async_tx_descriptor txd;
++ struct list_head tx_list;
++ struct list_head list;
++ int len;
++ union {
++ struct pxp_layer_param s0_param;
++ struct pxp_layer_param out_param;
++ struct pxp_layer_param ol_param;
++ } layer_param;
++ struct pxp_proc_data proc_data;
++
++ u32 hist_status; /* Histogram output status */
++
++ struct pxp_tx_desc *next;
++};
++
++struct pxp_channel {
++ struct dma_chan dma_chan;
++ dma_cookie_t completed; /* last completed cookie */
++ enum pxp_channel_status status;
++ void *client; /* Only one client per channel */
++ unsigned int n_tx_desc;
++ struct pxp_tx_desc *desc; /* allocated tx-descriptors */
++ struct list_head queue; /* queued tx-descriptors */
++ struct list_head list; /* track queued channel number */
++ spinlock_t lock; /* protects sg[0,1], queue,
++ * status, cookie, free_list
++ */
++ int active_buffer;
++ unsigned int eof_irq;
++ char eof_name[16]; /* EOF IRQ name for request_irq() */
++};
++
++#define to_tx_desc(tx) container_of(tx, struct pxp_tx_desc, txd)
++#define to_pxp_channel(d) container_of(d, struct pxp_channel, dma_chan)
++
++void pxp_txd_ack(struct dma_async_tx_descriptor *txd,
++ struct pxp_channel *pxp_chan);
++
++#ifdef CONFIG_MXC_PXP_CLIENT_DEVICE
++int register_pxp_device(void);
++void unregister_pxp_device(void);
++#else
++int register_pxp_device(void) { return 0; }
++void unregister_pxp_device(void) {}
++#endif
++
++#endif
+diff -Nur linux-3.14.40.orig/include/linux/regulator/consumer.h linux-3.14.40/include/linux/regulator/consumer.h
+--- linux-3.14.40.orig/include/linux/regulator/consumer.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/regulator/consumer.h 2015-05-01 14:58:06.131427001 -0500
+@@ -2,6 +2,7 @@
+ * consumer.h -- SoC Regulator consumer support.
+ *
+ * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+@@ -105,6 +106,8 @@
+ #define REGULATOR_EVENT_FORCE_DISABLE 0x20
+ #define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
+ #define REGULATOR_EVENT_DISABLE 0x80
++#define REGULATOR_EVENT_PRE_DISABLE 0x100
++#define REGULATOR_EVENT_ENABLE 0x200
+
+ struct regulator;
+
+diff -Nur linux-3.14.40.orig/include/linux/reset.h linux-3.14.40/include/linux/reset.h
+--- linux-3.14.40.orig/include/linux/reset.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/reset.h 2015-05-01 14:58:06.131427001 -0500
+@@ -12,6 +12,13 @@
+ void reset_control_put(struct reset_control *rstc);
+ struct reset_control *devm_reset_control_get(struct device *dev, const char *id);
+
++#ifdef CONFIG_RESET_CONTROLLER
+ int device_reset(struct device *dev);
++#else
++static inline int device_reset(struct device *dev)
++{
++ return 0;
++}
++#endif /* CONFIG_RESET_CONTROLLER */
+
+ #endif
+diff -Nur linux-3.14.40.orig/include/linux/serial_core.h linux-3.14.40/include/linux/serial_core.h
+--- linux-3.14.40.orig/include/linux/serial_core.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/serial_core.h 2015-05-01 14:58:06.139427001 -0500
+@@ -285,6 +285,22 @@
+ /*
+ * Console helpers.
+ */
++struct earlycon_device {
++ struct console *con;
++ struct uart_port port;
++ char options[16]; /* e.g., 115200n8 */
++ unsigned int baud;
++};
++int setup_earlycon(char *buf, const char *match,
++ int (*setup)(struct earlycon_device *, const char *));
++
++#define EARLYCON_DECLARE(name, func) \
++static int __init name ## _setup_earlycon(char *buf) \
++{ \
++ return setup_earlycon(buf, __stringify(name), func); \
++} \
++early_param("earlycon", name ## _setup_earlycon);
++
+ struct uart_port *uart_get_console(struct uart_port *ports, int nr,
+ struct console *c);
+ void uart_parse_options(char *options, int *baud, int *parity, int *bits,
+diff -Nur linux-3.14.40.orig/include/linux/skbuff.h linux-3.14.40/include/linux/skbuff.h
+--- linux-3.14.40.orig/include/linux/skbuff.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/skbuff.h 2015-05-01 14:58:06.143427001 -0500
+@@ -2038,7 +2038,7 @@
+ }
+
+ /**
+- * skb_frag_page - retrieve the page refered to by a paged fragment
++ * skb_frag_page - retrieve the page referred to by a paged fragment
+ * @frag: the paged fragment
+ *
+ * Returns the &struct page associated with @frag.
+diff -Nur linux-3.14.40.orig/include/linux/spi/spi.h linux-3.14.40/include/linux/spi/spi.h
+--- linux-3.14.40.orig/include/linux/spi/spi.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/spi/spi.h 2015-05-01 14:58:06.143427001 -0500
+@@ -234,7 +234,7 @@
+ * @mode_bits: flags understood by this controller driver
+ * @bits_per_word_mask: A mask indicating which values of bits_per_word are
+ * supported by the driver. Bit n indicates that a bits_per_word n+1 is
+- * suported. If set, the SPI core will reject any transfer with an
++ * supported. If set, the SPI core will reject any transfer with an
+ * unsupported bits_per_word. If not set, this value is simply ignored,
+ * and it's up to the individual driver to perform any validation.
+ * @min_speed_hz: Lowest supported transfer speed
+@@ -259,7 +259,7 @@
+ * @cur_msg: the currently in-flight message
+ * @cur_msg_prepared: spi_prepare_message was called for the currently
+ * in-flight message
+- * @xfer_completion: used by core tranfer_one_message()
++ * @xfer_completion: used by core transfer_one_message()
+ * @busy: message pump is busy
+ * @running: message pump is running
+ * @rt: whether this queue is set to run as a realtime task
+@@ -498,7 +498,7 @@
+ * @rx_buf: data to be read (dma-safe memory), or NULL
+ * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
+ * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
+- * @tx_nbits: number of bits used for writting. If 0 the default
++ * @tx_nbits: number of bits used for writing. If 0 the default
+ * (SPI_NBITS_SINGLE) is used.
+ * @rx_nbits: number of bits used for reading. If 0 the default
+ * (SPI_NBITS_SINGLE) is used.
+@@ -556,7 +556,7 @@
+ * by the results of previous messages and where the whole transaction
+ * ends when the chipselect goes intactive.
+ *
+- * When SPI can transfer in 1x,2x or 4x. It can get this tranfer information
++ * When SPI can transfer in 1x,2x or 4x. It can get this transfer information
+ * from device through @tx_nbits and @rx_nbits. In Bi-direction, these
+ * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
+ * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
+diff -Nur linux-3.14.40.orig/include/linux/syscalls.h linux-3.14.40/include/linux/syscalls.h
+--- linux-3.14.40.orig/include/linux/syscalls.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/syscalls.h 2015-05-01 14:58:06.147427001 -0500
+@@ -744,6 +744,9 @@
+ int newdfd, const char __user *newname, int flags);
+ asmlinkage long sys_renameat(int olddfd, const char __user * oldname,
+ int newdfd, const char __user * newname);
++asmlinkage long sys_renameat2(int olddfd, const char __user *oldname,
++ int newdfd, const char __user *newname,
++ unsigned int flags);
+ asmlinkage long sys_futimesat(int dfd, const char __user *filename,
+ struct timeval __user *utimes);
+ asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode);
+diff -Nur linux-3.14.40.orig/include/linux/usb/chipidea.h linux-3.14.40/include/linux/usb/chipidea.h
+--- linux-3.14.40.orig/include/linux/usb/chipidea.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/usb/chipidea.h 2015-05-01 14:58:06.151427001 -0500
+@@ -18,6 +18,7 @@
+ unsigned long flags;
+ #define CI_HDRC_REGS_SHARED BIT(0)
+ #define CI_HDRC_REQUIRE_TRANSCEIVER BIT(1)
++#define CI_HDRC_SUPPORTS_RUNTIME_PM BIT(2)
+ #define CI_HDRC_DISABLE_STREAMING BIT(3)
+ /*
+ * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
+@@ -25,6 +26,7 @@
+ */
+ #define CI_HDRC_DUAL_ROLE_NOT_OTG BIT(4)
+ #define CI_HDRC_IMX28_WRITE_FIX BIT(5)
++#define CI_HDRC_IMX_EHCI_QUIRK BIT(6)
+ enum usb_dr_mode dr_mode;
+ #define CI_HDRC_CONTROLLER_RESET_EVENT 0
+ #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
+@@ -42,4 +44,6 @@
+ /* Remove ci hdrc device */
+ void ci_hdrc_remove_device(struct platform_device *pdev);
+
++/* Get current available role */
++enum usb_dr_mode ci_hdrc_query_available_role(struct platform_device *pdev);
+ #endif
+diff -Nur linux-3.14.40.orig/include/linux/usb/composite.h linux-3.14.40/include/linux/usb/composite.h
+--- linux-3.14.40.orig/include/linux/usb/composite.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/usb/composite.h 2015-05-01 14:58:06.151427001 -0500
+@@ -92,7 +92,7 @@
+ * @suspend: Notifies functions when the host stops sending USB traffic.
+ * @resume: Notifies functions when the host restarts USB traffic.
+ * @get_status: Returns function status as a reply to
+- * GetStatus() request when the recepient is Interface.
++ * GetStatus() request when the recipient is Interface.
+ * @func_suspend: callback to be called when
+ * SetFeature(FUNCTION_SUSPEND) is reseived
+ *
+diff -Nur linux-3.14.40.orig/include/linux/usb/phy.h linux-3.14.40/include/linux/usb/phy.h
+--- linux-3.14.40.orig/include/linux/usb/phy.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/linux/usb/phy.h 2015-05-01 14:58:06.155427001 -0500
+@@ -111,11 +111,23 @@
+ int (*set_suspend)(struct usb_phy *x,
+ int suspend);
+
++ /*
++ * Set wakeup enable for PHY, in that case, the PHY can be
++ * waken up from suspend status due to external events,
++ * like vbus change, dp/dm change and id.
++ */
++ int (*set_wakeup)(struct usb_phy *x, bool enabled);
++
+ /* notify phy connect status change */
+ int (*notify_connect)(struct usb_phy *x,
+ enum usb_device_speed speed);
+ int (*notify_disconnect)(struct usb_phy *x,
+ enum usb_device_speed speed);
++ int (*notify_suspend)(struct usb_phy *x,
++ enum usb_device_speed speed);
++ int (*notify_resume)(struct usb_phy *x,
++ enum usb_device_speed speed);
++
+ };
+
+ /**
+@@ -265,6 +277,15 @@
+ }
+
+ static inline int
++usb_phy_set_wakeup(struct usb_phy *x, bool enabled)
++{
++ if (x && x->set_wakeup)
++ return x->set_wakeup(x, enabled);
++ else
++ return 0;
++}
++
++static inline int
+ usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
+ {
+ if (x && x->notify_connect)
+@@ -281,6 +302,24 @@
+ else
+ return 0;
+ }
++
++static inline int usb_phy_notify_suspend
++ (struct usb_phy *x, enum usb_device_speed speed)
++{
++ if (x && x->notify_suspend)
++ return x->notify_suspend(x, speed);
++ else
++ return 0;
++}
++
++static inline int usb_phy_notify_resume
++ (struct usb_phy *x, enum usb_device_speed speed)
++{
++ if (x && x->notify_resume)
++ return x->notify_resume(x, speed);
++ else
++ return 0;
++}
+
+ /* notifiers */
+ static inline int
+diff -Nur linux-3.14.40.orig/include/net/cfg80211.h linux-3.14.40/include/net/cfg80211.h
+--- linux-3.14.40.orig/include/net/cfg80211.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/net/cfg80211.h 2015-05-01 14:58:06.167427001 -0500
+@@ -1729,7 +1729,7 @@
+ u8 *ssid;
+ size_t ssid_len;
+ enum nl80211_auth_type auth_type;
+- u8 *ie;
++ const u8 *ie;
+ size_t ie_len;
+ bool privacy;
+ enum nl80211_mfp mfp;
+@@ -3888,6 +3888,7 @@
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the IBSS joined
++ * @channel: the channel of the IBSS joined
+ * @gfp: allocation flags
+ *
+ * This function notifies cfg80211 that the device joined an IBSS or
+@@ -3897,7 +3898,8 @@
+ * with the locally generated beacon -- this guarantees that there is
+ * always a scan result for this IBSS. cfg80211 will handle the rest.
+ */
+-void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp);
++void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
++ struct ieee80211_channel *channel, gfp_t gfp);
+
+ /**
+ * cfg80211_notify_new_candidate - notify cfg80211 of a new mesh peer candidate
+diff -Nur linux-3.14.40.orig/include/net/mac80211.h linux-3.14.40/include/net/mac80211.h
+--- linux-3.14.40.orig/include/net/mac80211.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/net/mac80211.h 2015-05-01 14:58:06.191427001 -0500
+@@ -1895,7 +1895,7 @@
+ *
+ * Driver informs U-APSD client support by enabling
+ * %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the
+- * uapsd paramater in conf_tx() operation. Hardware needs to send the QoS
++ * uapsd parameter in conf_tx() operation. Hardware needs to send the QoS
+ * Nullfunc frames and stay awake until the service period has ended. To
+ * utilize U-APSD, dynamic powersave is disabled for voip AC and all frames
+ * from that AC are transmitted with powersave enabled.
+@@ -2101,7 +2101,7 @@
+ * with the number of frames to be released and which TIDs they are
+ * to come from. In this case, the driver is responsible for setting
+ * the EOSP (for uAPSD) and MORE_DATA bits in the released frames,
+- * to help the @more_data paramter is passed to tell the driver if
++ * to help the @more_data parameter is passed to tell the driver if
+ * there is more data on other TIDs -- the TIDs to release frames
+ * from are ignored since mac80211 doesn't know how many frames the
+ * buffers for those TIDs contain.
+@@ -2616,6 +2616,7 @@
+ * of queues to flush, which is useful if different virtual interfaces
+ * use different hardware queues; it may also indicate all queues.
+ * If the parameter @drop is set to %true, pending frames may be dropped.
++ * Note that vif can be NULL.
+ * The callback can sleep.
+ *
+ * @channel_switch: Drivers that need (or want) to offload the channel
+@@ -2662,7 +2663,7 @@
+ * parameters. In the case where the driver buffers some frames for
+ * sleeping stations mac80211 will use this callback to tell the driver
+ * to release some frames, either for PS-poll or uAPSD.
+- * Note that if the @more_data paramter is %false the driver must check
++ * Note that if the @more_data parameter is %false the driver must check
+ * if there are more frames on the given TIDs, and if there are more than
+ * the frames being released then it must still set the more-data bit in
+ * the frame. If the @more_data parameter is %true, then of course the
+@@ -2878,7 +2879,8 @@
+ struct netlink_callback *cb,
+ void *data, int len);
+ #endif
+- void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop);
++ void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
++ u32 queues, bool drop);
+ void (*channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch);
+ int (*napi_poll)(struct ieee80211_hw *hw, int budget);
+diff -Nur linux-3.14.40.orig/include/net/rtnetlink.h linux-3.14.40/include/net/rtnetlink.h
+--- linux-3.14.40.orig/include/net/rtnetlink.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/net/rtnetlink.h 2015-05-01 14:58:06.199427001 -0500
+@@ -140,7 +140,7 @@
+ struct nlattr *tb[]);
+ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
+
+-extern const struct nla_policy ifla_policy[IFLA_MAX+1];
++int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
+
+ #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
+
+diff -Nur linux-3.14.40.orig/include/net/tso.h linux-3.14.40/include/net/tso.h
+--- linux-3.14.40.orig/include/net/tso.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/net/tso.h 2015-05-01 14:58:06.203427001 -0500
+@@ -0,0 +1,20 @@
++#ifndef _TSO_H
++#define _TSO_H
++
++#include <net/ip.h>
++
++struct tso_t {
++ int next_frag_idx;
++ void *data;
++ size_t size;
++ u16 ip_id;
++ u32 tcp_seq;
++};
++
++int tso_count_descs(struct sk_buff *skb);
++void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
++ int size, bool is_last);
++void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size);
++void tso_start(struct sk_buff *skb, struct tso_t *tso);
++
++#endif /* _TSO_H */
+diff -Nur linux-3.14.40.orig/include/sound/wm8962.h linux-3.14.40/include/sound/wm8962.h
+--- linux-3.14.40.orig/include/sound/wm8962.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/sound/wm8962.h 2015-05-01 14:58:06.219427001 -0500
+@@ -55,6 +55,9 @@
+ * in a DC measurement configuration.
+ */
+ bool in4_dc_measure;
++
++ /* MCLK for wm8962 */
++ struct clk *codec_mclk;
+ };
+
+ #endif
+diff -Nur linux-3.14.40.orig/include/trace/events/cpufreq_interactive.h linux-3.14.40/include/trace/events/cpufreq_interactive.h
+--- linux-3.14.40.orig/include/trace/events/cpufreq_interactive.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/trace/events/cpufreq_interactive.h 2015-05-01 14:58:06.219427001 -0500
+@@ -0,0 +1,112 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM cpufreq_interactive
++
++#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_CPUFREQ_INTERACTIVE_H
++
++#include <linux/tracepoint.h>
++
++DECLARE_EVENT_CLASS(set,
++ TP_PROTO(u32 cpu_id, unsigned long targfreq,
++ unsigned long actualfreq),
++ TP_ARGS(cpu_id, targfreq, actualfreq),
++
++ TP_STRUCT__entry(
++ __field( u32, cpu_id )
++ __field(unsigned long, targfreq )
++ __field(unsigned long, actualfreq )
++ ),
++
++ TP_fast_assign(
++ __entry->cpu_id = (u32) cpu_id;
++ __entry->targfreq = targfreq;
++ __entry->actualfreq = actualfreq;
++ ),
++
++ TP_printk("cpu=%u targ=%lu actual=%lu",
++ __entry->cpu_id, __entry->targfreq,
++ __entry->actualfreq)
++);
++
++DEFINE_EVENT(set, cpufreq_interactive_setspeed,
++ TP_PROTO(u32 cpu_id, unsigned long targfreq,
++ unsigned long actualfreq),
++ TP_ARGS(cpu_id, targfreq, actualfreq)
++);
++
++DECLARE_EVENT_CLASS(loadeval,
++ TP_PROTO(unsigned long cpu_id, unsigned long load,
++ unsigned long curtarg, unsigned long curactual,
++ unsigned long newtarg),
++ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
++
++ TP_STRUCT__entry(
++ __field(unsigned long, cpu_id )
++ __field(unsigned long, load )
++ __field(unsigned long, curtarg )
++ __field(unsigned long, curactual )
++ __field(unsigned long, newtarg )
++ ),
++
++ TP_fast_assign(
++ __entry->cpu_id = cpu_id;
++ __entry->load = load;
++ __entry->curtarg = curtarg;
++ __entry->curactual = curactual;
++ __entry->newtarg = newtarg;
++ ),
++
++ TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
++ __entry->cpu_id, __entry->load, __entry->curtarg,
++ __entry->curactual, __entry->newtarg)
++);
++
++DEFINE_EVENT(loadeval, cpufreq_interactive_target,
++ TP_PROTO(unsigned long cpu_id, unsigned long load,
++ unsigned long curtarg, unsigned long curactual,
++ unsigned long newtarg),
++ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
++);
++
++DEFINE_EVENT(loadeval, cpufreq_interactive_already,
++ TP_PROTO(unsigned long cpu_id, unsigned long load,
++ unsigned long curtarg, unsigned long curactual,
++ unsigned long newtarg),
++ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
++);
++
++DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
++ TP_PROTO(unsigned long cpu_id, unsigned long load,
++ unsigned long curtarg, unsigned long curactual,
++ unsigned long newtarg),
++ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
++);
++
++TRACE_EVENT(cpufreq_interactive_boost,
++ TP_PROTO(const char *s),
++ TP_ARGS(s),
++ TP_STRUCT__entry(
++ __string(s, s)
++ ),
++ TP_fast_assign(
++ __assign_str(s, s);
++ ),
++ TP_printk("%s", __get_str(s))
++);
++
++TRACE_EVENT(cpufreq_interactive_unboost,
++ TP_PROTO(const char *s),
++ TP_ARGS(s),
++ TP_STRUCT__entry(
++ __string(s, s)
++ ),
++ TP_fast_assign(
++ __assign_str(s, s);
++ ),
++ TP_printk("%s", __get_str(s))
++);
++
++#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff -Nur linux-3.14.40.orig/include/trace/events/thermal.h linux-3.14.40/include/trace/events/thermal.h
+--- linux-3.14.40.orig/include/trace/events/thermal.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/trace/events/thermal.h 2015-05-01 14:58:06.219427001 -0500
+@@ -0,0 +1,83 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM thermal
++
++#if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_THERMAL_H
++
++#include <linux/thermal.h>
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(thermal_temperature,
++
++ TP_PROTO(struct thermal_zone_device *tz),
++
++ TP_ARGS(tz),
++
++ TP_STRUCT__entry(
++ __string(thermal_zone, tz->type)
++ __field(int, id)
++ __field(int, temp_prev)
++ __field(int, temp)
++ ),
++
++ TP_fast_assign(
++ __assign_str(thermal_zone, tz->type);
++ __entry->id = tz->id;
++ __entry->temp_prev = tz->last_temperature;
++ __entry->temp = tz->temperature;
++ ),
++
++ TP_printk("thermal_zone=%s id=%d temp_prev=%d temp=%d",
++ __get_str(thermal_zone), __entry->id, __entry->temp_prev,
++ __entry->temp)
++);
++
++TRACE_EVENT(cdev_update,
++
++ TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target),
++
++ TP_ARGS(cdev, target),
++
++ TP_STRUCT__entry(
++ __string(type, cdev->type)
++ __field(unsigned long, target)
++ ),
++
++ TP_fast_assign(
++ __assign_str(type, cdev->type);
++ __entry->target = target;
++ ),
++
++ TP_printk("type=%s target=%lu", __get_str(type), __entry->target)
++);
++
++TRACE_EVENT(thermal_zone_trip,
++
++ TP_PROTO(struct thermal_zone_device *tz, int trip,
++ enum thermal_trip_type trip_type),
++
++ TP_ARGS(tz, trip, trip_type),
++
++ TP_STRUCT__entry(
++ __string(thermal_zone, tz->type)
++ __field(int, id)
++ __field(int, trip)
++ __field(enum thermal_trip_type, trip_type)
++ ),
++
++ TP_fast_assign(
++ __assign_str(thermal_zone, tz->type);
++ __entry->id = tz->id;
++ __entry->trip = trip;
++ __entry->trip_type = trip_type;
++ ),
++
++ TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%d",
++ __get_str(thermal_zone), __entry->id, __entry->trip,
++ __entry->trip_type)
++);
++
++#endif /* _TRACE_THERMAL_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff -Nur linux-3.14.40.orig/include/uapi/linux/ipu.h linux-3.14.40/include/uapi/linux/ipu.h
+--- linux-3.14.40.orig/include/uapi/linux/ipu.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/uapi/linux/ipu.h 2015-05-01 14:58:06.219427001 -0500
+@@ -0,0 +1,282 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @defgroup IPU MXC Image Processing Unit (IPU) Driver
++ */
++/*!
++ * @file uapi/linux/ipu.h
++ *
++ * @brief This file contains the IPU driver API declarations.
++ *
++ * @ingroup IPU
++ */
++
++#ifndef __ASM_ARCH_IPU_H__
++#define __ASM_ARCH_IPU_H__
++
++#include <linux/types.h>
++#include <linux/videodev2.h>
++
++#ifndef __KERNEL__
++#ifndef __cplusplus
++typedef unsigned char bool;
++#endif
++#define irqreturn_t int
++#define dma_addr_t int
++#define uint32_t unsigned int
++#define uint16_t unsigned short
++#define uint8_t unsigned char
++#define u32 unsigned int
++#define u8 unsigned char
++#define __u32 u32
++#endif
++
++/*!
++ * Enumeration of IPU rotation modes
++ */
++typedef enum {
++ /* Note the enum values correspond to BAM value */
++ IPU_ROTATE_NONE = 0,
++ IPU_ROTATE_VERT_FLIP = 1,
++ IPU_ROTATE_HORIZ_FLIP = 2,
++ IPU_ROTATE_180 = 3,
++ IPU_ROTATE_90_RIGHT = 4,
++ IPU_ROTATE_90_RIGHT_VFLIP = 5,
++ IPU_ROTATE_90_RIGHT_HFLIP = 6,
++ IPU_ROTATE_90_LEFT = 7,
++} ipu_rotate_mode_t;
++
++/*!
++ * Enumeration of VDI MOTION select
++ */
++typedef enum {
++ MED_MOTION = 0,
++ LOW_MOTION = 1,
++ HIGH_MOTION = 2,
++} ipu_motion_sel;
++
++/*!
++ * Enumeration of DI ports for ADC.
++ */
++typedef enum {
++ DISP0,
++ DISP1,
++ DISP2,
++ DISP3
++} display_port_t;
++
++/* IPU Pixel format definitions */
++/* Four-character-code (FOURCC) */
++#define fourcc(a, b, c, d)\
++ (((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24))
++
++/*!
++ * @name IPU Pixel Formats
++ *
++ * Pixel formats are defined with ASCII FOURCC code. The pixel format codes are
++ * the same used by V4L2 API.
++ */
++
++/*! @{ */
++/*! @name Generic or Raw Data Formats */
++/*! @{ */
++#define IPU_PIX_FMT_GENERIC fourcc('I', 'P', 'U', '0') /*!< IPU Generic Data */
++#define IPU_PIX_FMT_GENERIC_32 fourcc('I', 'P', 'U', '1') /*!< IPU Generic Data */
++#define IPU_PIX_FMT_GENERIC_16 fourcc('I', 'P', 'U', '2') /*!< IPU Generic Data */
++#define IPU_PIX_FMT_LVDS666 fourcc('L', 'V', 'D', '6') /*!< IPU Generic Data */
++#define IPU_PIX_FMT_LVDS888 fourcc('L', 'V', 'D', '8') /*!< IPU Generic Data */
++/*! @} */
++/*! @name RGB Formats */
++/*! @{ */
++#define IPU_PIX_FMT_RGB332 fourcc('R', 'G', 'B', '1') /*!< 8 RGB-3-3-2 */
++#define IPU_PIX_FMT_RGB555 fourcc('R', 'G', 'B', 'O') /*!< 16 RGB-5-5-5 */
++#define IPU_PIX_FMT_RGB565 fourcc('R', 'G', 'B', 'P') /*!< 1 6 RGB-5-6-5 */
++#define IPU_PIX_FMT_RGB666 fourcc('R', 'G', 'B', '6') /*!< 18 RGB-6-6-6 */
++#define IPU_PIX_FMT_BGR666 fourcc('B', 'G', 'R', '6') /*!< 18 BGR-6-6-6 */
++#define IPU_PIX_FMT_BGR24 fourcc('B', 'G', 'R', '3') /*!< 24 BGR-8-8-8 */
++#define IPU_PIX_FMT_RGB24 fourcc('R', 'G', 'B', '3') /*!< 24 RGB-8-8-8 */
++#define IPU_PIX_FMT_GBR24 fourcc('G', 'B', 'R', '3') /*!< 24 GBR-8-8-8 */
++#define IPU_PIX_FMT_BGR32 fourcc('B', 'G', 'R', '4') /*!< 32 BGR-8-8-8-8 */
++#define IPU_PIX_FMT_BGRA32 fourcc('B', 'G', 'R', 'A') /*!< 32 BGR-8-8-8-8 */
++#define IPU_PIX_FMT_RGB32 fourcc('R', 'G', 'B', '4') /*!< 32 RGB-8-8-8-8 */
++#define IPU_PIX_FMT_RGBA32 fourcc('R', 'G', 'B', 'A') /*!< 32 RGB-8-8-8-8 */
++#define IPU_PIX_FMT_ABGR32 fourcc('A', 'B', 'G', 'R') /*!< 32 ABGR-8-8-8-8 */
++/*! @} */
++/*! @name YUV Interleaved Formats */
++/*! @{ */
++#define IPU_PIX_FMT_YUYV fourcc('Y', 'U', 'Y', 'V') /*!< 16 YUV 4:2:2 */
++#define IPU_PIX_FMT_UYVY fourcc('U', 'Y', 'V', 'Y') /*!< 16 YUV 4:2:2 */
++#define IPU_PIX_FMT_YVYU fourcc('Y', 'V', 'Y', 'U') /*!< 16 YVYU 4:2:2 */
++#define IPU_PIX_FMT_VYUY fourcc('V', 'Y', 'U', 'Y') /*!< 16 VYYU 4:2:2 */
++#define IPU_PIX_FMT_Y41P fourcc('Y', '4', '1', 'P') /*!< 12 YUV 4:1:1 */
++#define IPU_PIX_FMT_YUV444 fourcc('Y', '4', '4', '4') /*!< 24 YUV 4:4:4 */
++#define IPU_PIX_FMT_VYU444 fourcc('V', '4', '4', '4') /*!< 24 VYU 4:4:4 */
++/* two planes -- one Y, one Cb + Cr interleaved */
++#define IPU_PIX_FMT_NV12 fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
++/* two planes -- 12 tiled Y/CbCr 4:2:0 */
++#define IPU_PIX_FMT_TILED_NV12 fourcc('T', 'N', 'V', 'P')
++#define IPU_PIX_FMT_TILED_NV12F fourcc('T', 'N', 'V', 'F')
++
++/*! @} */
++/*! @name YUV Planar Formats */
++/*! @{ */
++#define IPU_PIX_FMT_GREY fourcc('G', 'R', 'E', 'Y') /*!< 8 Greyscale */
++#define IPU_PIX_FMT_YVU410P fourcc('Y', 'V', 'U', '9') /*!< 9 YVU 4:1:0 */
++#define IPU_PIX_FMT_YUV410P fourcc('Y', 'U', 'V', '9') /*!< 9 YUV 4:1:0 */
++#define IPU_PIX_FMT_YVU420P fourcc('Y', 'V', '1', '2') /*!< 12 YVU 4:2:0 */
++#define IPU_PIX_FMT_YUV420P fourcc('I', '4', '2', '0') /*!< 12 YUV 4:2:0 */
++#define IPU_PIX_FMT_YUV420P2 fourcc('Y', 'U', '1', '2') /*!< 12 YUV 4:2:0 */
++#define IPU_PIX_FMT_YVU422P fourcc('Y', 'V', '1', '6') /*!< 16 YVU 4:2:2 */
++#define IPU_PIX_FMT_YUV422P fourcc('4', '2', '2', 'P') /*!< 16 YUV 4:2:2 */
++/* non-interleaved 4:4:4 */
++#define IPU_PIX_FMT_YUV444P fourcc('4', '4', '4', 'P') /*!< 24 YUV 4:4:4 */
++/*! @} */
++#define IPU_PIX_FMT_TILED_NV12_MBALIGN (16)
++#define TILED_NV12_FRAME_SIZE(w, h) \
++ (ALIGN((w) * (h), SZ_4K) + ALIGN((w) * (h) / 2, SZ_4K))
++/* IPU device */
++typedef enum {
++ RGB_CS,
++ YUV_CS,
++ NULL_CS
++} cs_t;
++
++struct ipu_pos {
++ u32 x;
++ u32 y;
++};
++
++struct ipu_crop {
++ struct ipu_pos pos;
++ u32 w;
++ u32 h;
++};
++
++struct ipu_deinterlace {
++ bool enable;
++ u8 motion; /*see ipu_motion_sel*/
++#define IPU_DEINTERLACE_FIELD_TOP 0
++#define IPU_DEINTERLACE_FIELD_BOTTOM 1
++#define IPU_DEINTERLACE_FIELD_MASK \
++ (IPU_DEINTERLACE_FIELD_TOP | IPU_DEINTERLACE_FIELD_BOTTOM)
++ /* deinterlace frame rate double flags */
++#define IPU_DEINTERLACE_RATE_EN 0x80
++#define IPU_DEINTERLACE_RATE_FRAME1 0x40
++#define IPU_DEINTERLACE_RATE_MASK \
++ (IPU_DEINTERLACE_RATE_EN | IPU_DEINTERLACE_RATE_FRAME1)
++#define IPU_DEINTERLACE_MAX_FRAME 2
++ u8 field_fmt;
++};
++
++struct ipu_input {
++ u32 width;
++ u32 height;
++ u32 format;
++ struct ipu_crop crop;
++ dma_addr_t paddr;
++
++ struct ipu_deinterlace deinterlace;
++ dma_addr_t paddr_n; /*valid when deinterlace enable*/
++};
++
++struct ipu_alpha {
++#define IPU_ALPHA_MODE_GLOBAL 0
++#define IPU_ALPHA_MODE_LOCAL 1
++ u8 mode;
++ u8 gvalue; /* 0~255 */
++ dma_addr_t loc_alp_paddr;
++};
++
++struct ipu_colorkey {
++ bool enable;
++ u32 value; /* RGB 24bit */
++};
++
++struct ipu_overlay {
++ u32 width;
++ u32 height;
++ u32 format;
++ struct ipu_crop crop;
++ struct ipu_alpha alpha;
++ struct ipu_colorkey colorkey;
++ dma_addr_t paddr;
++};
++
++struct ipu_output {
++ u32 width;
++ u32 height;
++ u32 format;
++ u8 rotate;
++ struct ipu_crop crop;
++ dma_addr_t paddr;
++};
++
++struct ipu_task {
++ struct ipu_input input;
++ struct ipu_output output;
++
++ bool overlay_en;
++ struct ipu_overlay overlay;
++
++#define IPU_TASK_PRIORITY_NORMAL 0
++#define IPU_TASK_PRIORITY_HIGH 1
++ u8 priority;
++
++#define IPU_TASK_ID_ANY 0
++#define IPU_TASK_ID_VF 1
++#define IPU_TASK_ID_PP 2
++#define IPU_TASK_ID_MAX 3
++ u8 task_id;
++
++ int timeout;
++};
++
++enum {
++ IPU_CHECK_OK = 0,
++ IPU_CHECK_WARN_INPUT_OFFS_NOT8ALIGN = 0x1,
++ IPU_CHECK_WARN_OUTPUT_OFFS_NOT8ALIGN = 0x2,
++ IPU_CHECK_WARN_OVERLAY_OFFS_NOT8ALIGN = 0x4,
++ IPU_CHECK_ERR_MIN,
++ IPU_CHECK_ERR_INPUT_CROP,
++ IPU_CHECK_ERR_OUTPUT_CROP,
++ IPU_CHECK_ERR_OVERLAY_CROP,
++ IPU_CHECK_ERR_INPUT_OVER_LIMIT,
++ IPU_CHECK_ERR_OV_OUT_NO_FIT,
++ IPU_CHECK_ERR_OVERLAY_WITH_VDI,
++ IPU_CHECK_ERR_PROC_NO_NEED,
++ IPU_CHECK_ERR_SPLIT_INPUTW_OVER,
++ IPU_CHECK_ERR_SPLIT_INPUTH_OVER,
++ IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER,
++ IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER,
++ IPU_CHECK_ERR_SPLIT_WITH_ROT,
++ IPU_CHECK_ERR_NOT_SUPPORT,
++ IPU_CHECK_ERR_NOT16ALIGN,
++ IPU_CHECK_ERR_W_DOWNSIZE_OVER,
++ IPU_CHECK_ERR_H_DOWNSIZE_OVER,
++};
++
++/* IOCTL commands */
++#define IPU_CHECK_TASK _IOWR('I', 0x1, struct ipu_task)
++#define IPU_QUEUE_TASK _IOW('I', 0x2, struct ipu_task)
++#define IPU_ALLOC _IOWR('I', 0x3, int)
++#define IPU_FREE _IOW('I', 0x4, int)
++
++#endif
+diff -Nur linux-3.14.40.orig/include/uapi/linux/isl29023.h linux-3.14.40/include/uapi/linux/isl29023.h
+--- linux-3.14.40.orig/include/uapi/linux/isl29023.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/uapi/linux/isl29023.h 2015-05-01 14:58:06.223427001 -0500
+@@ -0,0 +1,47 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __UAPI_LINUX_ISL29023_H__
++#define __UAPI_LINUX_ISL29023_H__
++
++#include <linux/types.h>
++
++#define ISL29023_PD_MODE 0x0
++#define ISL29023_ALS_ONCE_MODE 0x1
++#define ISL29023_IR_ONCE_MODE 0x2
++#define ISL29023_ALS_CONT_MODE 0x5
++#define ISL29023_IR_CONT_MODE 0x6
++
++#define ISL29023_INT_PERSISTS_1 0x0
++#define ISL29023_INT_PERSISTS_4 0x1
++#define ISL29023_INT_PERSISTS_8 0x2
++#define ISL29023_INT_PERSISTS_16 0x3
++
++#define ISL29023_RES_16 0x0
++#define ISL29023_RES_12 0x1
++#define ISL29023_RES_8 0x2
++#define ISL29023_RES_4 0x3
++
++#define ISL29023_RANGE_1K 0x0
++#define ISL29023_RANGE_4K 0x1
++#define ISL29023_RANGE_16K 0x2
++#define ISL29023_RANGE_64K 0x3
++
++#endif
+diff -Nur linux-3.14.40.orig/include/uapi/linux/Kbuild linux-3.14.40/include/uapi/linux/Kbuild
+--- linux-3.14.40.orig/include/uapi/linux/Kbuild 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/uapi/linux/Kbuild 2015-05-01 14:58:06.223427001 -0500
+@@ -226,6 +226,7 @@
+ header-y += kvm_para.h
+ endif
+
++header-y += ipu.h
+ header-y += l2tp.h
+ header-y += libc-compat.h
+ header-y += limits.h
+@@ -253,6 +254,9 @@
+ header-y += msdos_fs.h
+ header-y += msg.h
+ header-y += mtio.h
++header-y += mxcfb.h
++header-y += mxc_asrc.h
++header-y += mxc_v4l2.h
+ header-y += n_r3964.h
+ header-y += nbd.h
+ header-y += ncp.h
+@@ -318,6 +322,8 @@
+ header-y += prctl.h
+ header-y += ptp_clock.h
+ header-y += ptrace.h
++header-y += pxp_dma.h
++header-y += pxp_device.h
+ header-y += qnx4_fs.h
+ header-y += qnxtypes.h
+ header-y += quota.h
+diff -Nur linux-3.14.40.orig/include/uapi/linux/mxc_asrc.h linux-3.14.40/include/uapi/linux/mxc_asrc.h
+--- linux-3.14.40.orig/include/uapi/linux/mxc_asrc.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/uapi/linux/mxc_asrc.h 2015-05-01 14:58:06.223427001 -0500
+@@ -0,0 +1,143 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ *
++ * @file mxc_asrc.h
++ *
++ * @brief i.MX Asynchronous Sample Rate Converter
++ *
++ * @ingroup Audio
++ */
++
++#ifndef __MXC_ASRC_UAPI_H__
++#define __MXC_ASRC_UAPI_H__
++
++#define ASRC_IOC_MAGIC 'C'
++
++#define ASRC_REQ_PAIR _IOWR(ASRC_IOC_MAGIC, 0, struct asrc_req)
++#define ASRC_CONFIG_PAIR _IOWR(ASRC_IOC_MAGIC, 1, struct asrc_config)
++#define ASRC_RELEASE_PAIR _IOW(ASRC_IOC_MAGIC, 2, enum asrc_pair_index)
++#define ASRC_CONVERT _IOW(ASRC_IOC_MAGIC, 3, struct asrc_convert_buffer)
++#define ASRC_START_CONV _IOW(ASRC_IOC_MAGIC, 4, enum asrc_pair_index)
++#define ASRC_STOP_CONV _IOW(ASRC_IOC_MAGIC, 5, enum asrc_pair_index)
++#define ASRC_STATUS _IOW(ASRC_IOC_MAGIC, 6, struct asrc_status_flags)
++#define ASRC_FLUSH _IOW(ASRC_IOC_MAGIC, 7, enum asrc_pair_index)
++
++enum asrc_pair_index {
++ ASRC_UNVALID_PAIR = -1,
++ ASRC_PAIR_A = 0,
++ ASRC_PAIR_B = 1,
++ ASRC_PAIR_C = 2,
++};
++
++#define ASRC_PAIR_MAX_NUM (ASRC_PAIR_C + 1)
++
++enum asrc_inclk {
++ INCLK_NONE = 0x03,
++ INCLK_ESAI_RX = 0x00,
++ INCLK_SSI1_RX = 0x01,
++ INCLK_SSI2_RX = 0x02,
++ INCLK_SSI3_RX = 0x07,
++ INCLK_SPDIF_RX = 0x04,
++ INCLK_MLB_CLK = 0x05,
++ INCLK_PAD = 0x06,
++ INCLK_ESAI_TX = 0x08,
++ INCLK_SSI1_TX = 0x09,
++ INCLK_SSI2_TX = 0x0a,
++ INCLK_SSI3_TX = 0x0b,
++ INCLK_SPDIF_TX = 0x0c,
++ INCLK_ASRCK1_CLK = 0x0f,
++};
++
++enum asrc_outclk {
++ OUTCLK_NONE = 0x03,
++ OUTCLK_ESAI_TX = 0x00,
++ OUTCLK_SSI1_TX = 0x01,
++ OUTCLK_SSI2_TX = 0x02,
++ OUTCLK_SSI3_TX = 0x07,
++ OUTCLK_SPDIF_TX = 0x04,
++ OUTCLK_MLB_CLK = 0x05,
++ OUTCLK_PAD = 0x06,
++ OUTCLK_ESAI_RX = 0x08,
++ OUTCLK_SSI1_RX = 0x09,
++ OUTCLK_SSI2_RX = 0x0a,
++ OUTCLK_SSI3_RX = 0x0b,
++ OUTCLK_SPDIF_RX = 0x0c,
++ OUTCLK_ASRCK1_CLK = 0x0f,
++};
++
++enum asrc_word_width {
++ ASRC_WIDTH_24_BIT = 0,
++ ASRC_WIDTH_16_BIT = 1,
++ ASRC_WIDTH_8_BIT = 2,
++};
++
++struct asrc_config {
++ enum asrc_pair_index pair;
++ unsigned int channel_num;
++ unsigned int buffer_num;
++ unsigned int dma_buffer_size;
++ unsigned int input_sample_rate;
++ unsigned int output_sample_rate;
++ enum asrc_word_width input_word_width;
++ enum asrc_word_width output_word_width;
++ enum asrc_inclk inclk;
++ enum asrc_outclk outclk;
++};
++
++struct asrc_pair {
++ unsigned int start_channel;
++ unsigned int chn_num;
++ unsigned int chn_max;
++ unsigned int active;
++ unsigned int overload_error;
++};
++
++struct asrc_req {
++ unsigned int chn_num;
++ enum asrc_pair_index index;
++};
++
++struct asrc_querybuf {
++ unsigned int buffer_index;
++ unsigned int input_length;
++ unsigned int output_length;
++ unsigned long input_offset;
++ unsigned long output_offset;
++};
++
++struct asrc_convert_buffer {
++ void *input_buffer_vaddr;
++ void *output_buffer_vaddr;
++ unsigned int input_buffer_length;
++ unsigned int output_buffer_length;
++};
++
++struct asrc_buffer {
++ unsigned int index;
++ unsigned int length;
++ unsigned int output_last_length;
++ int buf_valid;
++};
++
++struct asrc_status_flags {
++ enum asrc_pair_index index;
++ unsigned int overload_error;
++};
++
++#define ASRC_BUF_NA -35 /* ASRC DQ's buffer is NOT available */
++#define ASRC_BUF_AV 35 /* ASRC DQ's buffer is available */
++enum asrc_error_status {
++ ASRC_TASK_Q_OVERLOAD = 0x01,
++ ASRC_OUTPUT_TASK_OVERLOAD = 0x02,
++ ASRC_INPUT_TASK_OVERLOAD = 0x04,
++ ASRC_OUTPUT_BUFFER_OVERFLOW = 0x08,
++ ASRC_INPUT_BUFFER_UNDERRUN = 0x10,
++};
++#endif/* __MXC_ASRC_UAPI_H__ */
+diff -Nur linux-3.14.40.orig/include/uapi/linux/mxcfb.h linux-3.14.40/include/uapi/linux/mxcfb.h
+--- linux-3.14.40.orig/include/uapi/linux/mxcfb.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/uapi/linux/mxcfb.h 2015-05-01 14:58:06.223427001 -0500
+@@ -0,0 +1,174 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*
++ * @file uapi/linux/mxcfb.h
++ *
++ * @brief Global header file for the MXC frame buffer
++ *
++ * @ingroup Framebuffer
++ */
++#ifndef __ASM_ARCH_MXCFB_H__
++#define __ASM_ARCH_MXCFB_H__
++
++#include <linux/fb.h>
++
++#define FB_SYNC_OE_LOW_ACT 0x80000000
++#define FB_SYNC_CLK_LAT_FALL 0x40000000
++#define FB_SYNC_DATA_INVERT 0x20000000
++#define FB_SYNC_CLK_IDLE_EN 0x10000000
++#define FB_SYNC_SHARP_MODE 0x08000000
++#define FB_SYNC_SWAP_RGB 0x04000000
++#define FB_ACCEL_TRIPLE_FLAG 0x00000000
++#define FB_ACCEL_DOUBLE_FLAG 0x00000001
++
++struct mxcfb_gbl_alpha {
++ int enable;
++ int alpha;
++};
++
++struct mxcfb_loc_alpha {
++ int enable;
++ int alpha_in_pixel;
++ unsigned long alpha_phy_addr0;
++ unsigned long alpha_phy_addr1;
++};
++
++struct mxcfb_color_key {
++ int enable;
++ __u32 color_key;
++};
++
++struct mxcfb_pos {
++ __u16 x;
++ __u16 y;
++};
++
++struct mxcfb_gamma {
++ int enable;
++ int constk[16];
++ int slopek[16];
++};
++
++struct mxcfb_rect {
++ __u32 top;
++ __u32 left;
++ __u32 width;
++ __u32 height;
++};
++
++#define GRAYSCALE_8BIT 0x1
++#define GRAYSCALE_8BIT_INVERTED 0x2
++#define GRAYSCALE_4BIT 0x3
++#define GRAYSCALE_4BIT_INVERTED 0x4
++
++#define AUTO_UPDATE_MODE_REGION_MODE 0
++#define AUTO_UPDATE_MODE_AUTOMATIC_MODE 1
++
++#define UPDATE_SCHEME_SNAPSHOT 0
++#define UPDATE_SCHEME_QUEUE 1
++#define UPDATE_SCHEME_QUEUE_AND_MERGE 2
++
++#define UPDATE_MODE_PARTIAL 0x0
++#define UPDATE_MODE_FULL 0x1
++
++#define WAVEFORM_MODE_AUTO 257
++
++#define TEMP_USE_AMBIENT 0x1000
++
++#define EPDC_FLAG_ENABLE_INVERSION 0x01
++#define EPDC_FLAG_FORCE_MONOCHROME 0x02
++#define EPDC_FLAG_USE_CMAP 0x04
++#define EPDC_FLAG_USE_ALT_BUFFER 0x100
++#define EPDC_FLAG_TEST_COLLISION 0x200
++#define EPDC_FLAG_GROUP_UPDATE 0x400
++#define EPDC_FLAG_USE_DITHERING_Y1 0x2000
++#define EPDC_FLAG_USE_DITHERING_Y4 0x4000
++
++#define FB_POWERDOWN_DISABLE -1
++
++struct mxcfb_alt_buffer_data {
++ __u32 phys_addr;
++ __u32 width; /* width of entire buffer */
++ __u32 height; /* height of entire buffer */
++ struct mxcfb_rect alt_update_region; /* region within buffer to update */
++};
++
++struct mxcfb_update_data {
++ struct mxcfb_rect update_region;
++ __u32 waveform_mode;
++ __u32 update_mode;
++ __u32 update_marker;
++ int temp;
++ unsigned int flags;
++ struct mxcfb_alt_buffer_data alt_buffer_data;
++};
++
++struct mxcfb_update_marker_data {
++ __u32 update_marker;
++ __u32 collision_test;
++};
++
++/*
++ * Structure used to define waveform modes for driver
++ * Needed for driver to perform auto-waveform selection
++ */
++struct mxcfb_waveform_modes {
++ int mode_init;
++ int mode_du;
++ int mode_gc4;
++ int mode_gc8;
++ int mode_gc16;
++ int mode_gc32;
++};
++
++/*
++ * Structure used to define a 5*3 matrix of parameters for
++ * setting IPU DP CSC module related to this framebuffer.
++ */
++struct mxcfb_csc_matrix {
++ int param[5][3];
++};
++
++#define MXCFB_WAIT_FOR_VSYNC _IOW('F', 0x20, u_int32_t)
++#define MXCFB_SET_GBL_ALPHA _IOW('F', 0x21, struct mxcfb_gbl_alpha)
++#define MXCFB_SET_CLR_KEY _IOW('F', 0x22, struct mxcfb_color_key)
++#define MXCFB_SET_OVERLAY_POS _IOWR('F', 0x24, struct mxcfb_pos)
++#define MXCFB_GET_FB_IPU_CHAN _IOR('F', 0x25, u_int32_t)
++#define MXCFB_SET_LOC_ALPHA _IOWR('F', 0x26, struct mxcfb_loc_alpha)
++#define MXCFB_SET_LOC_ALP_BUF _IOW('F', 0x27, unsigned long)
++#define MXCFB_SET_GAMMA _IOW('F', 0x28, struct mxcfb_gamma)
++#define MXCFB_GET_FB_IPU_DI _IOR('F', 0x29, u_int32_t)
++#define MXCFB_GET_DIFMT _IOR('F', 0x2A, u_int32_t)
++#define MXCFB_GET_FB_BLANK _IOR('F', 0x2B, u_int32_t)
++#define MXCFB_SET_DIFMT _IOW('F', 0x2C, u_int32_t)
++#define MXCFB_CSC_UPDATE _IOW('F', 0x2D, struct mxcfb_csc_matrix)
++
++/* IOCTLs for E-ink panel updates */
++#define MXCFB_SET_WAVEFORM_MODES _IOW('F', 0x2B, struct mxcfb_waveform_modes)
++#define MXCFB_SET_TEMPERATURE _IOW('F', 0x2C, int32_t)
++#define MXCFB_SET_AUTO_UPDATE_MODE _IOW('F', 0x2D, __u32)
++#define MXCFB_SEND_UPDATE _IOW('F', 0x2E, struct mxcfb_update_data)
++#define MXCFB_WAIT_FOR_UPDATE_COMPLETE _IOWR('F', 0x2F, struct mxcfb_update_marker_data)
++#define MXCFB_SET_PWRDOWN_DELAY _IOW('F', 0x30, int32_t)
++#define MXCFB_GET_PWRDOWN_DELAY _IOR('F', 0x31, int32_t)
++#define MXCFB_SET_UPDATE_SCHEME _IOW('F', 0x32, __u32)
++#define MXCFB_GET_WORK_BUFFER _IOWR('F', 0x34, unsigned long)
++#endif
+diff -Nur linux-3.14.40.orig/include/uapi/linux/mxc_mlb.h linux-3.14.40/include/uapi/linux/mxc_mlb.h
+--- linux-3.14.40.orig/include/uapi/linux/mxc_mlb.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/uapi/linux/mxc_mlb.h 2015-05-01 14:58:06.223427001 -0500
+@@ -0,0 +1,55 @@
++/*
++ * mxc_mlb.h
++ *
++ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef _MXC_MLB_UAPI_H
++#define _MXC_MLB_UAPI_H
++
++/* define IOCTL command */
++#define MLB_DBG_RUNTIME _IO('S', 0x09)
++#define MLB_SET_FPS _IOW('S', 0x10, unsigned int)
++#define MLB_GET_VER _IOR('S', 0x11, unsigned long)
++#define MLB_SET_DEVADDR _IOR('S', 0x12, unsigned char)
++
++/*!
++ * set channel address for each logical channel
++ * the MSB 16bits is for tx channel, the left LSB is for rx channel
++ */
++#define MLB_CHAN_SETADDR _IOW('S', 0x13, unsigned int)
++#define MLB_CHAN_STARTUP _IO('S', 0x14)
++#define MLB_CHAN_SHUTDOWN _IO('S', 0x15)
++#define MLB_CHAN_GETEVENT _IOR('S', 0x16, unsigned long)
++
++#define MLB_SET_ISOC_BLKSIZE_188 _IO('S', 0x17)
++#define MLB_SET_ISOC_BLKSIZE_196 _IO('S', 0x18)
++#define MLB_SET_SYNC_QUAD _IOW('S', 0x19, unsigned int)
++#define MLB_IRQ_ENABLE _IO('S', 0x20)
++#define MLB_IRQ_DISABLE _IO('S', 0x21)
++
++/*!
++ * MLB event define
++ */
++enum {
++ MLB_EVT_TX_PROTO_ERR_CUR = 1 << 0,
++ MLB_EVT_TX_BRK_DETECT_CUR = 1 << 1,
++ MLB_EVT_TX_PROTO_ERR_PREV = 1 << 8,
++ MLB_EVT_TX_BRK_DETECT_PREV = 1 << 9,
++ MLB_EVT_RX_PROTO_ERR_CUR = 1 << 16,
++ MLB_EVT_RX_BRK_DETECT_CUR = 1 << 17,
++ MLB_EVT_RX_PROTO_ERR_PREV = 1 << 24,
++ MLB_EVT_RX_BRK_DETECT_PREV = 1 << 25,
++};
++
++
++#endif /* _MXC_MLB_H */
+diff -Nur linux-3.14.40.orig/include/uapi/linux/mxc_v4l2.h linux-3.14.40/include/uapi/linux/mxc_v4l2.h
+--- linux-3.14.40.orig/include/uapi/linux/mxc_v4l2.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/uapi/linux/mxc_v4l2.h 2015-05-01 14:58:06.223427001 -0500
+@@ -0,0 +1,56 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @file uapi/linux/mxc_v4l2.h
++ *
++ * @brief MXC V4L2 private header file
++ *
++ * @ingroup MXC V4L2
++ */
++
++#ifndef __ASM_ARCH_MXC_V4L2_H__
++#define __ASM_ARCH_MXC_V4L2_H__
++
++/*
++ * For IPUv1 and IPUv3, V4L2_CID_MXC_ROT means encoder ioctl ID.
++ * And V4L2_CID_MXC_VF_ROT is viewfinder ioctl ID only for IPUv1 and IPUv3.
++ */
++#define V4L2_CID_MXC_ROT (V4L2_CID_PRIVATE_BASE + 0)
++#define V4L2_CID_MXC_FLASH (V4L2_CID_PRIVATE_BASE + 1)
++#define V4L2_CID_MXC_VF_ROT (V4L2_CID_PRIVATE_BASE + 2)
++#define V4L2_CID_MXC_MOTION (V4L2_CID_PRIVATE_BASE + 3)
++#define V4L2_CID_MXC_SWITCH_CAM (V4L2_CID_PRIVATE_BASE + 6)
++
++#define V4L2_MXC_ROTATE_NONE 0
++#define V4L2_MXC_ROTATE_VERT_FLIP 1
++#define V4L2_MXC_ROTATE_HORIZ_FLIP 2
++#define V4L2_MXC_ROTATE_180 3
++#define V4L2_MXC_ROTATE_90_RIGHT 4
++#define V4L2_MXC_ROTATE_90_RIGHT_VFLIP 5
++#define V4L2_MXC_ROTATE_90_RIGHT_HFLIP 6
++#define V4L2_MXC_ROTATE_90_LEFT 7
++
++struct v4l2_mxc_offset {
++ uint32_t u_offset;
++ uint32_t v_offset;
++};
++
++#endif
+diff -Nur linux-3.14.40.orig/include/uapi/linux/ptp_clock.h linux-3.14.40/include/uapi/linux/ptp_clock.h
+--- linux-3.14.40.orig/include/uapi/linux/ptp_clock.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/include/uapi/linux/ptp_clock.h 2015-05-01 14:58:06.271427001 -0500
+@@ -50,7 +50,8 @@
+ int n_ext_ts; /* Number of external time stamp channels. */
+ int n_per_out; /* Number of programmable periodic signals. */
+ int pps; /* Whether the clock supports a PPS callback. */
+- int rsv[15]; /* Reserved for future use. */
++ int n_pins; /* Number of input/output pins. */
++ int rsv[14]; /* Reserved for future use. */
+ };
+
+ struct ptp_extts_request {
+@@ -80,6 +81,40 @@
+ struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
+ };
+
++enum ptp_pin_function {
++ PTP_PF_NONE,
++ PTP_PF_EXTTS,
++ PTP_PF_PEROUT,
++ PTP_PF_PHYSYNC,
++};
++
++struct ptp_pin_desc {
++ /*
++ * Hardware specific human readable pin name. This field is
++ * set by the kernel during the PTP_PIN_GETFUNC ioctl and is
++ * ignored for the PTP_PIN_SETFUNC ioctl.
++ */
++ char name[64];
++ /*
++ * Pin index in the range of zero to ptp_clock_caps.n_pins - 1.
++ */
++ unsigned int index;
++ /*
++ * Which of the PTP_PF_xxx functions to use on this pin.
++ */
++ unsigned int func;
++ /*
++ * The specific channel to use for this function.
++ * This corresponds to the 'index' field of the
++ * PTP_EXTTS_REQUEST and PTP_PEROUT_REQUEST ioctls.
++ */
++ unsigned int chan;
++ /*
++ * Reserved for future use.
++ */
++ unsigned int rsv[5];
++};
++
+ #define PTP_CLK_MAGIC '='
+
+ #define PTP_CLOCK_GETCAPS _IOR(PTP_CLK_MAGIC, 1, struct ptp_clock_caps)
+@@ -87,6 +122,8 @@
+ #define PTP_PEROUT_REQUEST _IOW(PTP_CLK_MAGIC, 3, struct ptp_perout_request)
+ #define PTP_ENABLE_PPS _IOW(PTP_CLK_MAGIC, 4, int)
+ #define PTP_SYS_OFFSET _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset)
++#define PTP_PIN_GETFUNC _IOWR(PTP_CLK_MAGIC, 6, struct ptp_pin_desc)
++#define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc)
+
+ struct ptp_extts_event {
+ struct ptp_clock_time t; /* Time event occured. */
+diff -Nur linux-3.14.40.orig/include/uapi/linux/pxp_device.h linux-3.14.40/include/uapi/linux/pxp_device.h
+--- linux-3.14.40.orig/include/uapi/linux/pxp_device.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/uapi/linux/pxp_device.h 2015-05-01 14:58:06.271427001 -0500
+@@ -0,0 +1,63 @@
++/*
++ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef _UAPI_PXP_DEVICE
++#define _UAPI_PXP_DEVICE
++
++#include <linux/pxp_dma.h>
++
++struct pxp_chan_handle {
++ unsigned int handle;
++ int hist_status;
++};
++
++struct pxp_mem_desc {
++ unsigned int handle;
++ unsigned int size;
++ dma_addr_t phys_addr;
++ unsigned int virt_uaddr; /* virtual user space address */
++ unsigned int mtype;
++};
++
++struct pxp_mem_flush {
++ unsigned int handle;
++ unsigned int type;
++};
++
++#define PXP_IOC_MAGIC 'P'
++
++#define PXP_IOC_GET_CHAN _IOR(PXP_IOC_MAGIC, 0, struct pxp_mem_desc)
++#define PXP_IOC_PUT_CHAN _IOW(PXP_IOC_MAGIC, 1, struct pxp_mem_desc)
++#define PXP_IOC_CONFIG_CHAN _IOW(PXP_IOC_MAGIC, 2, struct pxp_mem_desc)
++#define PXP_IOC_START_CHAN _IOW(PXP_IOC_MAGIC, 3, struct pxp_mem_desc)
++#define PXP_IOC_GET_PHYMEM _IOWR(PXP_IOC_MAGIC, 4, struct pxp_mem_desc)
++#define PXP_IOC_PUT_PHYMEM _IOW(PXP_IOC_MAGIC, 5, struct pxp_mem_desc)
++#define PXP_IOC_WAIT4CMPLT _IOWR(PXP_IOC_MAGIC, 6, struct pxp_mem_desc)
++#define PXP_IOC_FLUSH_PHYMEM _IOR(PXP_IOC_MAGIC, 7, struct pxp_mem_flush)
++
++/* Memory types supported*/
++#define MEMORY_TYPE_UNCACHED 0x0
++#define MEMORY_TYPE_WC 0x1
++#define MEMORY_TYPE_CACHED 0x2
++
++/* Cache flush operations */
++#define CACHE_CLEAN 0x1
++#define CACHE_INVALIDATE 0x2
++#define CACHE_FLUSH 0x4
++
++#endif
+diff -Nur linux-3.14.40.orig/include/uapi/linux/pxp_dma.h linux-3.14.40/include/uapi/linux/pxp_dma.h
+--- linux-3.14.40.orig/include/uapi/linux/pxp_dma.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/uapi/linux/pxp_dma.h 2015-05-01 14:58:06.271427001 -0500
+@@ -0,0 +1,173 @@
++/*
++ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef _UAPI_PXP_DMA
++#define _UAPI_PXP_DMA
++
++#include <linux/posix_types.h>
++#include <linux/types.h>
++
++#ifndef __KERNEL__
++typedef unsigned long dma_addr_t;
++typedef unsigned char bool;
++#endif
++
++/* PXP Pixel format definitions */
++/* Four-character-code (FOURCC) */
++#define fourcc(a, b, c, d)\
++ (((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24))
++
++/*!
++ * @name PXP Pixel Formats
++ *
++ * Pixel formats are defined with ASCII FOURCC code. The pixel format codes are
++ * the same used by V4L2 API.
++ */
++
++/*! @} */
++/*! @name RGB Formats */
++/*! @{ */
++#define PXP_PIX_FMT_RGB332 fourcc('R', 'G', 'B', '1') /*!< 8 RGB-3-3-2 */
++#define PXP_PIX_FMT_RGB555 fourcc('R', 'G', 'B', 'O') /*!< 16 RGB-5-5-5 */
++#define PXP_PIX_FMT_RGB565 fourcc('R', 'G', 'B', 'P') /*!< 1 6 RGB-5-6-5 */
++#define PXP_PIX_FMT_RGB666 fourcc('R', 'G', 'B', '6') /*!< 18 RGB-6-6-6 */
++#define PXP_PIX_FMT_BGR666 fourcc('B', 'G', 'R', '6') /*!< 18 BGR-6-6-6 */
++#define PXP_PIX_FMT_BGR24 fourcc('B', 'G', 'R', '3') /*!< 24 BGR-8-8-8 */
++#define PXP_PIX_FMT_RGB24 fourcc('R', 'G', 'B', '3') /*!< 24 RGB-8-8-8 */
++#define PXP_PIX_FMT_BGR32 fourcc('B', 'G', 'R', '4') /*!< 32 BGR-8-8-8-8 */
++#define PXP_PIX_FMT_BGRA32 fourcc('B', 'G', 'R', 'A') /*!< 32 BGR-8-8-8-8 */
++#define PXP_PIX_FMT_RGB32 fourcc('R', 'G', 'B', '4') /*!< 32 RGB-8-8-8-8 */
++#define PXP_PIX_FMT_RGBA32 fourcc('R', 'G', 'B', 'A') /*!< 32 RGB-8-8-8-8 */
++#define PXP_PIX_FMT_ABGR32 fourcc('A', 'B', 'G', 'R') /*!< 32 ABGR-8-8-8-8 */
++/*! @} */
++/*! @name YUV Interleaved Formats */
++/*! @{ */
++#define PXP_PIX_FMT_YUYV fourcc('Y', 'U', 'Y', 'V') /*!< 16 YUV 4:2:2 */
++#define PXP_PIX_FMT_UYVY fourcc('U', 'Y', 'V', 'Y') /*!< 16 YUV 4:2:2 */
++#define PXP_PIX_FMT_VYUY fourcc('V', 'Y', 'U', 'Y') /*!< 16 YVU 4:2:2 */
++#define PXP_PIX_FMT_YVYU fourcc('Y', 'V', 'Y', 'U') /*!< 16 YVU 4:2:2 */
++#define PXP_PIX_FMT_Y41P fourcc('Y', '4', '1', 'P') /*!< 12 YUV 4:1:1 */
++#define PXP_PIX_FMT_YUV444 fourcc('Y', '4', '4', '4') /*!< 24 YUV 4:4:4 */
++/* two planes -- one Y, one Cb + Cr interleaved */
++#define PXP_PIX_FMT_NV12 fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
++#define PXP_PIX_FMT_NV21 fourcc('N', 'V', '2', '1') /* 12 Y/CbCr 4:2:0 */
++#define PXP_PIX_FMT_NV16 fourcc('N', 'V', '1', '6') /* 12 Y/CbCr 4:2:2 */
++#define PXP_PIX_FMT_NV61 fourcc('N', 'V', '6', '1') /* 12 Y/CbCr 4:2:2 */
++/*! @} */
++/*! @name YUV Planar Formats */
++/*! @{ */
++#define PXP_PIX_FMT_GREY fourcc('G', 'R', 'E', 'Y') /*!< 8 Greyscale */
++#define PXP_PIX_FMT_GY04 fourcc('G', 'Y', '0', '4') /*!< 4 Greyscale */
++#define PXP_PIX_FMT_YVU410P fourcc('Y', 'V', 'U', '9') /*!< 9 YVU 4:1:0 */
++#define PXP_PIX_FMT_YUV410P fourcc('Y', 'U', 'V', '9') /*!< 9 YUV 4:1:0 */
++#define PXP_PIX_FMT_YVU420P fourcc('Y', 'V', '1', '2') /*!< 12 YVU 4:2:0 */
++#define PXP_PIX_FMT_YUV420P fourcc('I', '4', '2', '0') /*!< 12 YUV 4:2:0 */
++#define PXP_PIX_FMT_YUV420P2 fourcc('Y', 'U', '1', '2') /*!< 12 YUV 4:2:0 */
++#define PXP_PIX_FMT_YVU422P fourcc('Y', 'V', '1', '6') /*!< 16 YVU 4:2:2 */
++#define PXP_PIX_FMT_YUV422P fourcc('4', '2', '2', 'P') /*!< 16 YUV 4:2:2 */
++/*! @} */
++
++#define PXP_LUT_NONE 0x0
++#define PXP_LUT_INVERT 0x1
++#define PXP_LUT_BLACK_WHITE 0x2
++#define PXP_LUT_USE_CMAP 0x4
++
++#define NR_PXP_VIRT_CHANNEL 16
++
++/* Order significant! */
++enum pxp_channel_status {
++ PXP_CHANNEL_FREE,
++ PXP_CHANNEL_INITIALIZED,
++ PXP_CHANNEL_READY,
++};
++
++struct rect {
++ int top; /* Upper left coordinate of rectangle */
++ int left;
++ int width;
++ int height;
++};
++
++struct pxp_layer_param {
++ unsigned short width;
++ unsigned short height;
++ unsigned short stride; /* aka pitch */
++ unsigned int pixel_fmt;
++
++ /* layers combining parameters
++ * (these are ignored for S0 and output
++ * layers, and only apply for OL layer)
++ */
++ bool combine_enable;
++ unsigned int color_key_enable;
++ unsigned int color_key;
++ bool global_alpha_enable;
++ /* global alpha is either override or multiply */
++ bool global_override;
++ unsigned char global_alpha;
++ bool alpha_invert;
++ bool local_alpha_enable;
++
++ dma_addr_t paddr;
++};
++
++struct pxp_proc_data {
++ /* S0 Transformation Info */
++ int scaling;
++ int hflip;
++ int vflip;
++ int rotate;
++ int rot_pos;
++ int yuv;
++
++ /* Source rectangle (srect) defines the sub-rectangle
++ * within S0 to undergo processing.
++ */
++ struct rect srect;
++ /* Dest rect (drect) defines how to position the processed
++ * source rectangle (after resizing) within the output frame,
++ * whose dimensions are defined in pxp->pxp_conf_state.out_param
++ */
++ struct rect drect;
++
++ /* Current S0 configuration */
++ unsigned int bgcolor;
++
++ /* Output overlay support */
++ int overlay_state;
++
++ /* LUT transformation on Y data */
++ int lut_transform;
++ unsigned char *lut_map; /* 256 entries */
++ bool lut_map_updated; /* Map recently changed */
++ bool combine_enable;
++};
++
++struct pxp_config_data {
++ struct pxp_layer_param s0_param;
++ struct pxp_layer_param ol_param[8];
++ struct pxp_layer_param out_param;
++ struct pxp_proc_data proc_data;
++ int layer_nr;
++
++ /* Users don't touch */
++ int handle;
++};
++
++
++#endif
+diff -Nur linux-3.14.40.orig/include/video/mxc_edid.h linux-3.14.40/include/video/mxc_edid.h
+--- linux-3.14.40.orig/include/video/mxc_edid.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/video/mxc_edid.h 2015-05-01 14:58:06.271427001 -0500
+@@ -0,0 +1,105 @@
++/*
++ * Copyright 2009-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @defgroup Framebuffer Framebuffer Driver for SDC and ADC.
++ */
++
++/*!
++ * @file mxc_edid.h
++ *
++ * @brief MXC EDID tools
++ *
++ * @ingroup Framebuffer
++ */
++
++#ifndef MXC_EDID_H
++#define MXC_EDID_H
++
++#include <linux/fb.h>
++
++#define FB_VMODE_ASPECT_4_3 0x10
++#define FB_VMODE_ASPECT_16_9 0x20
++#define FB_VMODE_ASPECT_MASK (FB_VMODE_ASPECT_4_3 | FB_VMODE_ASPECT_16_9)
++
++enum cea_audio_coding_types {
++ AUDIO_CODING_TYPE_REF_STREAM_HEADER = 0,
++ AUDIO_CODING_TYPE_LPCM = 1,
++ AUDIO_CODING_TYPE_AC3 = 2,
++ AUDIO_CODING_TYPE_MPEG1 = 3,
++ AUDIO_CODING_TYPE_MP3 = 4,
++ AUDIO_CODING_TYPE_MPEG2 = 5,
++ AUDIO_CODING_TYPE_AACLC = 6,
++ AUDIO_CODING_TYPE_DTS = 7,
++ AUDIO_CODING_TYPE_ATRAC = 8,
++ AUDIO_CODING_TYPE_SACD = 9,
++ AUDIO_CODING_TYPE_EAC3 = 10,
++ AUDIO_CODING_TYPE_DTS_HD = 11,
++ AUDIO_CODING_TYPE_MLP = 12,
++ AUDIO_CODING_TYPE_DST = 13,
++ AUDIO_CODING_TYPE_WMAPRO = 14,
++ AUDIO_CODING_TYPE_RESERVED = 15,
++};
++
++struct mxc_hdmi_3d_format {
++ unsigned char vic_order_2d;
++ unsigned char struct_3d;
++ unsigned char detail_3d;
++ unsigned char reserved;
++};
++
++struct mxc_edid_cfg {
++ bool cea_underscan;
++ bool cea_basicaudio;
++ bool cea_ycbcr444;
++ bool cea_ycbcr422;
++ bool hdmi_cap;
++
++ /*VSD*/
++ bool vsd_support_ai;
++ bool vsd_dc_48bit;
++ bool vsd_dc_36bit;
++ bool vsd_dc_30bit;
++ bool vsd_dc_y444;
++ bool vsd_dvi_dual;
++
++ bool vsd_cnc0;
++ bool vsd_cnc1;
++ bool vsd_cnc2;
++ bool vsd_cnc3;
++
++ u8 vsd_video_latency;
++ u8 vsd_audio_latency;
++ u8 vsd_I_video_latency;
++ u8 vsd_I_audio_latency;
++
++ u8 physical_address[4];
++ u8 hdmi_vic[64];
++ struct mxc_hdmi_3d_format hdmi_3d_format[64];
++ u16 hdmi_3d_mask_all;
++ u16 hdmi_3d_struct_all;
++ u32 vsd_max_tmdsclk_rate;
++
++ u8 max_channels;
++ u8 sample_sizes;
++ u8 sample_rates;
++ u8 speaker_alloc;
++};
++
++int mxc_edid_var_to_vic(struct fb_var_screeninfo *var);
++int mxc_edid_mode_to_vic(const struct fb_videomode *mode);
++int mxc_edid_read(struct i2c_adapter *adp, unsigned short addr,
++ unsigned char *edid, struct mxc_edid_cfg *cfg, struct fb_info *fbi);
++int mxc_edid_parse_ext_blk(unsigned char *edid, struct mxc_edid_cfg *cfg,
++ struct fb_monspecs *specs);
++#endif
+diff -Nur linux-3.14.40.orig/include/video/mxc_hdmi.h linux-3.14.40/include/video/mxc_hdmi.h
+--- linux-3.14.40.orig/include/video/mxc_hdmi.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/include/video/mxc_hdmi.h 2015-05-01 14:58:06.271427001 -0500
+@@ -0,0 +1,1027 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __MXC_HDMI_H__
++#define __MXC_HDMI_H__
++
++/*
++ * Hdmi controller registers
++ */
++
++/* Identification Registers */
++#define HDMI_DESIGN_ID 0x0000
++#define HDMI_REVISION_ID 0x0001
++#define HDMI_PRODUCT_ID0 0x0002
++#define HDMI_PRODUCT_ID1 0x0003
++#define HDMI_CONFIG0_ID 0x0004
++#define HDMI_CONFIG1_ID 0x0005
++#define HDMI_CONFIG2_ID 0x0006
++#define HDMI_CONFIG3_ID 0x0007
++
++/* Interrupt Registers */
++#define HDMI_IH_FC_STAT0 0x0100
++#define HDMI_IH_FC_STAT1 0x0101
++#define HDMI_IH_FC_STAT2 0x0102
++#define HDMI_IH_AS_STAT0 0x0103
++#define HDMI_IH_PHY_STAT0 0x0104
++#define HDMI_IH_I2CM_STAT0 0x0105
++#define HDMI_IH_CEC_STAT0 0x0106
++#define HDMI_IH_VP_STAT0 0x0107
++#define HDMI_IH_I2CMPHY_STAT0 0x0108
++#define HDMI_IH_AHBDMAAUD_STAT0 0x0109
++
++#define HDMI_IH_MUTE_FC_STAT0 0x0180
++#define HDMI_IH_MUTE_FC_STAT1 0x0181
++#define HDMI_IH_MUTE_FC_STAT2 0x0182
++#define HDMI_IH_MUTE_AS_STAT0 0x0183
++#define HDMI_IH_MUTE_PHY_STAT0 0x0184
++#define HDMI_IH_MUTE_I2CM_STAT0 0x0185
++#define HDMI_IH_MUTE_CEC_STAT0 0x0186
++#define HDMI_IH_MUTE_VP_STAT0 0x0187
++#define HDMI_IH_MUTE_I2CMPHY_STAT0 0x0188
++#define HDMI_IH_MUTE_AHBDMAAUD_STAT0 0x0189
++#define HDMI_IH_MUTE 0x01FF
++
++/* Video Sample Registers */
++#define HDMI_TX_INVID0 0x0200
++#define HDMI_TX_INSTUFFING 0x0201
++#define HDMI_TX_GYDATA0 0x0202
++#define HDMI_TX_GYDATA1 0x0203
++#define HDMI_TX_RCRDATA0 0x0204
++#define HDMI_TX_RCRDATA1 0x0205
++#define HDMI_TX_BCBDATA0 0x0206
++#define HDMI_TX_BCBDATA1 0x0207
++
++/* Video Packetizer Registers */
++#define HDMI_VP_STATUS 0x0800
++#define HDMI_VP_PR_CD 0x0801
++#define HDMI_VP_STUFF 0x0802
++#define HDMI_VP_REMAP 0x0803
++#define HDMI_VP_CONF 0x0804
++#define HDMI_VP_STAT 0x0805
++#define HDMI_VP_INT 0x0806
++#define HDMI_VP_MASK 0x0807
++#define HDMI_VP_POL 0x0808
++
++/* Frame Composer Registers */
++#define HDMI_FC_INVIDCONF 0x1000
++#define HDMI_FC_INHACTV0 0x1001
++#define HDMI_FC_INHACTV1 0x1002
++#define HDMI_FC_INHBLANK0 0x1003
++#define HDMI_FC_INHBLANK1 0x1004
++#define HDMI_FC_INVACTV0 0x1005
++#define HDMI_FC_INVACTV1 0x1006
++#define HDMI_FC_INVBLANK 0x1007
++#define HDMI_FC_HSYNCINDELAY0 0x1008
++#define HDMI_FC_HSYNCINDELAY1 0x1009
++#define HDMI_FC_HSYNCINWIDTH0 0x100A
++#define HDMI_FC_HSYNCINWIDTH1 0x100B
++#define HDMI_FC_VSYNCINDELAY 0x100C
++#define HDMI_FC_VSYNCINWIDTH 0x100D
++#define HDMI_FC_INFREQ0 0x100E
++#define HDMI_FC_INFREQ1 0x100F
++#define HDMI_FC_INFREQ2 0x1010
++#define HDMI_FC_CTRLDUR 0x1011
++#define HDMI_FC_EXCTRLDUR 0x1012
++#define HDMI_FC_EXCTRLSPAC 0x1013
++#define HDMI_FC_CH0PREAM 0x1014
++#define HDMI_FC_CH1PREAM 0x1015
++#define HDMI_FC_CH2PREAM 0x1016
++#define HDMI_FC_AVICONF3 0x1017
++#define HDMI_FC_GCP 0x1018
++#define HDMI_FC_AVICONF0 0x1019
++#define HDMI_FC_AVICONF1 0x101A
++#define HDMI_FC_AVICONF2 0x101B
++#define HDMI_FC_AVIVID 0x101C
++#define HDMI_FC_AVIETB0 0x101D
++#define HDMI_FC_AVIETB1 0x101E
++#define HDMI_FC_AVISBB0 0x101F
++#define HDMI_FC_AVISBB1 0x1020
++#define HDMI_FC_AVIELB0 0x1021
++#define HDMI_FC_AVIELB1 0x1022
++#define HDMI_FC_AVISRB0 0x1023
++#define HDMI_FC_AVISRB1 0x1024
++#define HDMI_FC_AUDICONF0 0x1025
++#define HDMI_FC_AUDICONF1 0x1026
++#define HDMI_FC_AUDICONF2 0x1027
++#define HDMI_FC_AUDICONF3 0x1028
++#define HDMI_FC_VSDIEEEID0 0x1029
++#define HDMI_FC_VSDSIZE 0x102A
++#define HDMI_FC_VSDIEEEID1 0x1030
++#define HDMI_FC_VSDIEEEID2 0x1031
++#define HDMI_FC_VSDPAYLOAD0 0x1032
++#define HDMI_FC_VSDPAYLOAD1 0x1033
++#define HDMI_FC_VSDPAYLOAD2 0x1034
++#define HDMI_FC_VSDPAYLOAD3 0x1035
++#define HDMI_FC_VSDPAYLOAD4 0x1036
++#define HDMI_FC_VSDPAYLOAD5 0x1037
++#define HDMI_FC_VSDPAYLOAD6 0x1038
++#define HDMI_FC_VSDPAYLOAD7 0x1039
++#define HDMI_FC_VSDPAYLOAD8 0x103A
++#define HDMI_FC_VSDPAYLOAD9 0x103B
++#define HDMI_FC_VSDPAYLOAD10 0x103C
++#define HDMI_FC_VSDPAYLOAD11 0x103D
++#define HDMI_FC_VSDPAYLOAD12 0x103E
++#define HDMI_FC_VSDPAYLOAD13 0x103F
++#define HDMI_FC_VSDPAYLOAD14 0x1040
++#define HDMI_FC_VSDPAYLOAD15 0x1041
++#define HDMI_FC_VSDPAYLOAD16 0x1042
++#define HDMI_FC_VSDPAYLOAD17 0x1043
++#define HDMI_FC_VSDPAYLOAD18 0x1044
++#define HDMI_FC_VSDPAYLOAD19 0x1045
++#define HDMI_FC_VSDPAYLOAD20 0x1046
++#define HDMI_FC_VSDPAYLOAD21 0x1047
++#define HDMI_FC_VSDPAYLOAD22 0x1048
++#define HDMI_FC_VSDPAYLOAD23 0x1049
++#define HDMI_FC_SPDVENDORNAME0 0x104A
++#define HDMI_FC_SPDVENDORNAME1 0x104B
++#define HDMI_FC_SPDVENDORNAME2 0x104C
++#define HDMI_FC_SPDVENDORNAME3 0x104D
++#define HDMI_FC_SPDVENDORNAME4 0x104E
++#define HDMI_FC_SPDVENDORNAME5 0x104F
++#define HDMI_FC_SPDVENDORNAME6 0x1050
++#define HDMI_FC_SPDVENDORNAME7 0x1051
++#define HDMI_FC_SDPPRODUCTNAME0 0x1052
++#define HDMI_FC_SDPPRODUCTNAME1 0x1053
++#define HDMI_FC_SDPPRODUCTNAME2 0x1054
++#define HDMI_FC_SDPPRODUCTNAME3 0x1055
++#define HDMI_FC_SDPPRODUCTNAME4 0x1056
++#define HDMI_FC_SDPPRODUCTNAME5 0x1057
++#define HDMI_FC_SDPPRODUCTNAME6 0x1058
++#define HDMI_FC_SDPPRODUCTNAME7 0x1059
++#define HDMI_FC_SDPPRODUCTNAME8 0x105A
++#define HDMI_FC_SDPPRODUCTNAME9 0x105B
++#define HDMI_FC_SDPPRODUCTNAME10 0x105C
++#define HDMI_FC_SDPPRODUCTNAME11 0x105D
++#define HDMI_FC_SDPPRODUCTNAME12 0x105E
++#define HDMI_FC_SDPPRODUCTNAME13 0x105F
++#define HDMI_FC_SDPPRODUCTNAME14 0x1060
++#define HDMI_FC_SPDPRODUCTNAME15 0x1061
++#define HDMI_FC_SPDDEVICEINF 0x1062
++#define HDMI_FC_AUDSCONF 0x1063
++#define HDMI_FC_AUDSSTAT 0x1064
++#define HDMI_FC_DATACH0FILL 0x1070
++#define HDMI_FC_DATACH1FILL 0x1071
++#define HDMI_FC_DATACH2FILL 0x1072
++#define HDMI_FC_CTRLQHIGH 0x1073
++#define HDMI_FC_CTRLQLOW 0x1074
++#define HDMI_FC_ACP0 0x1075
++#define HDMI_FC_ACP28 0x1076
++#define HDMI_FC_ACP27 0x1077
++#define HDMI_FC_ACP26 0x1078
++#define HDMI_FC_ACP25 0x1079
++#define HDMI_FC_ACP24 0x107A
++#define HDMI_FC_ACP23 0x107B
++#define HDMI_FC_ACP22 0x107C
++#define HDMI_FC_ACP21 0x107D
++#define HDMI_FC_ACP20 0x107E
++#define HDMI_FC_ACP19 0x107F
++#define HDMI_FC_ACP18 0x1080
++#define HDMI_FC_ACP17 0x1081
++#define HDMI_FC_ACP16 0x1082
++#define HDMI_FC_ACP15 0x1083
++#define HDMI_FC_ACP14 0x1084
++#define HDMI_FC_ACP13 0x1085
++#define HDMI_FC_ACP12 0x1086
++#define HDMI_FC_ACP11 0x1087
++#define HDMI_FC_ACP10 0x1088
++#define HDMI_FC_ACP9 0x1089
++#define HDMI_FC_ACP8 0x108A
++#define HDMI_FC_ACP7 0x108B
++#define HDMI_FC_ACP6 0x108C
++#define HDMI_FC_ACP5 0x108D
++#define HDMI_FC_ACP4 0x108E
++#define HDMI_FC_ACP3 0x108F
++#define HDMI_FC_ACP2 0x1090
++#define HDMI_FC_ACP1 0x1091
++#define HDMI_FC_ISCR1_0 0x1092
++#define HDMI_FC_ISCR1_16 0x1093
++#define HDMI_FC_ISCR1_15 0x1094
++#define HDMI_FC_ISCR1_14 0x1095
++#define HDMI_FC_ISCR1_13 0x1096
++#define HDMI_FC_ISCR1_12 0x1097
++#define HDMI_FC_ISCR1_11 0x1098
++#define HDMI_FC_ISCR1_10 0x1099
++#define HDMI_FC_ISCR1_9 0x109A
++#define HDMI_FC_ISCR1_8 0x109B
++#define HDMI_FC_ISCR1_7 0x109C
++#define HDMI_FC_ISCR1_6 0x109D
++#define HDMI_FC_ISCR1_5 0x109E
++#define HDMI_FC_ISCR1_4 0x109F
++#define HDMI_FC_ISCR1_3 0x10A0
++#define HDMI_FC_ISCR1_2 0x10A1
++#define HDMI_FC_ISCR1_1 0x10A2
++#define HDMI_FC_ISCR2_15 0x10A3
++#define HDMI_FC_ISCR2_14 0x10A4
++#define HDMI_FC_ISCR2_13 0x10A5
++#define HDMI_FC_ISCR2_12 0x10A6
++#define HDMI_FC_ISCR2_11 0x10A7
++#define HDMI_FC_ISCR2_10 0x10A8
++#define HDMI_FC_ISCR2_9 0x10A9
++#define HDMI_FC_ISCR2_8 0x10AA
++#define HDMI_FC_ISCR2_7 0x10AB
++#define HDMI_FC_ISCR2_6 0x10AC
++#define HDMI_FC_ISCR2_5 0x10AD
++#define HDMI_FC_ISCR2_4 0x10AE
++#define HDMI_FC_ISCR2_3 0x10AF
++#define HDMI_FC_ISCR2_2 0x10B0
++#define HDMI_FC_ISCR2_1 0x10B1
++#define HDMI_FC_ISCR2_0 0x10B2
++#define HDMI_FC_DATAUTO0 0x10B3
++#define HDMI_FC_DATAUTO1 0x10B4
++#define HDMI_FC_DATAUTO2 0x10B5
++#define HDMI_FC_DATMAN 0x10B6
++#define HDMI_FC_DATAUTO3 0x10B7
++#define HDMI_FC_RDRB0 0x10B8
++#define HDMI_FC_RDRB1 0x10B9
++#define HDMI_FC_RDRB2 0x10BA
++#define HDMI_FC_RDRB3 0x10BB
++#define HDMI_FC_RDRB4 0x10BC
++#define HDMI_FC_RDRB5 0x10BD
++#define HDMI_FC_RDRB6 0x10BE
++#define HDMI_FC_RDRB7 0x10BF
++#define HDMI_FC_STAT0 0x10D0
++#define HDMI_FC_INT0 0x10D1
++#define HDMI_FC_MASK0 0x10D2
++#define HDMI_FC_POL0 0x10D3
++#define HDMI_FC_STAT1 0x10D4
++#define HDMI_FC_INT1 0x10D5
++#define HDMI_FC_MASK1 0x10D6
++#define HDMI_FC_POL1 0x10D7
++#define HDMI_FC_STAT2 0x10D8
++#define HDMI_FC_INT2 0x10D9
++#define HDMI_FC_MASK2 0x10DA
++#define HDMI_FC_POL2 0x10DB
++#define HDMI_FC_PRCONF 0x10E0
++
++#define HDMI_FC_GMD_STAT 0x1100
++#define HDMI_FC_GMD_EN 0x1101
++#define HDMI_FC_GMD_UP 0x1102
++#define HDMI_FC_GMD_CONF 0x1103
++#define HDMI_FC_GMD_HB 0x1104
++#define HDMI_FC_GMD_PB0 0x1105
++#define HDMI_FC_GMD_PB1 0x1106
++#define HDMI_FC_GMD_PB2 0x1107
++#define HDMI_FC_GMD_PB3 0x1108
++#define HDMI_FC_GMD_PB4 0x1109
++#define HDMI_FC_GMD_PB5 0x110A
++#define HDMI_FC_GMD_PB6 0x110B
++#define HDMI_FC_GMD_PB7 0x110C
++#define HDMI_FC_GMD_PB8 0x110D
++#define HDMI_FC_GMD_PB9 0x110E
++#define HDMI_FC_GMD_PB10 0x110F
++#define HDMI_FC_GMD_PB11 0x1110
++#define HDMI_FC_GMD_PB12 0x1111
++#define HDMI_FC_GMD_PB13 0x1112
++#define HDMI_FC_GMD_PB14 0x1113
++#define HDMI_FC_GMD_PB15 0x1114
++#define HDMI_FC_GMD_PB16 0x1115
++#define HDMI_FC_GMD_PB17 0x1116
++#define HDMI_FC_GMD_PB18 0x1117
++#define HDMI_FC_GMD_PB19 0x1118
++#define HDMI_FC_GMD_PB20 0x1119
++#define HDMI_FC_GMD_PB21 0x111A
++#define HDMI_FC_GMD_PB22 0x111B
++#define HDMI_FC_GMD_PB23 0x111C
++#define HDMI_FC_GMD_PB24 0x111D
++#define HDMI_FC_GMD_PB25 0x111E
++#define HDMI_FC_GMD_PB26 0x111F
++#define HDMI_FC_GMD_PB27 0x1120
++
++#define HDMI_FC_DBGFORCE 0x1200
++#define HDMI_FC_DBGAUD0CH0 0x1201
++#define HDMI_FC_DBGAUD1CH0 0x1202
++#define HDMI_FC_DBGAUD2CH0 0x1203
++#define HDMI_FC_DBGAUD0CH1 0x1204
++#define HDMI_FC_DBGAUD1CH1 0x1205
++#define HDMI_FC_DBGAUD2CH1 0x1206
++#define HDMI_FC_DBGAUD0CH2 0x1207
++#define HDMI_FC_DBGAUD1CH2 0x1208
++#define HDMI_FC_DBGAUD2CH2 0x1209
++#define HDMI_FC_DBGAUD0CH3 0x120A
++#define HDMI_FC_DBGAUD1CH3 0x120B
++#define HDMI_FC_DBGAUD2CH3 0x120C
++#define HDMI_FC_DBGAUD0CH4 0x120D
++#define HDMI_FC_DBGAUD1CH4 0x120E
++#define HDMI_FC_DBGAUD2CH4 0x120F
++#define HDMI_FC_DBGAUD0CH5 0x1210
++#define HDMI_FC_DBGAUD1CH5 0x1211
++#define HDMI_FC_DBGAUD2CH5 0x1212
++#define HDMI_FC_DBGAUD0CH6 0x1213
++#define HDMI_FC_DBGAUD1CH6 0x1214
++#define HDMI_FC_DBGAUD2CH6 0x1215
++#define HDMI_FC_DBGAUD0CH7 0x1216
++#define HDMI_FC_DBGAUD1CH7 0x1217
++#define HDMI_FC_DBGAUD2CH7 0x1218
++#define HDMI_FC_DBGTMDS0 0x1219
++#define HDMI_FC_DBGTMDS1 0x121A
++#define HDMI_FC_DBGTMDS2 0x121B
++
++/* HDMI Source PHY Registers */
++#define HDMI_PHY_CONF0 0x3000
++#define HDMI_PHY_TST0 0x3001
++#define HDMI_PHY_TST1 0x3002
++#define HDMI_PHY_TST2 0x3003
++#define HDMI_PHY_STAT0 0x3004
++#define HDMI_PHY_INT0 0x3005
++#define HDMI_PHY_MASK0 0x3006
++#define HDMI_PHY_POL0 0x3007
++
++/* HDMI Master PHY Registers */
++#define HDMI_PHY_I2CM_SLAVE_ADDR 0x3020
++#define HDMI_PHY_I2CM_ADDRESS_ADDR 0x3021
++#define HDMI_PHY_I2CM_DATAO_1_ADDR 0x3022
++#define HDMI_PHY_I2CM_DATAO_0_ADDR 0x3023
++#define HDMI_PHY_I2CM_DATAI_1_ADDR 0x3024
++#define HDMI_PHY_I2CM_DATAI_0_ADDR 0x3025
++#define HDMI_PHY_I2CM_OPERATION_ADDR 0x3026
++#define HDMI_PHY_I2CM_INT_ADDR 0x3027
++#define HDMI_PHY_I2CM_CTLINT_ADDR 0x3028
++#define HDMI_PHY_I2CM_DIV_ADDR 0x3029
++#define HDMI_PHY_I2CM_SOFTRSTZ_ADDR 0x302a
++#define HDMI_PHY_I2CM_SS_SCL_HCNT_1_ADDR 0x302b
++#define HDMI_PHY_I2CM_SS_SCL_HCNT_0_ADDR 0x302c
++#define HDMI_PHY_I2CM_SS_SCL_LCNT_1_ADDR 0x302d
++#define HDMI_PHY_I2CM_SS_SCL_LCNT_0_ADDR 0x302e
++#define HDMI_PHY_I2CM_FS_SCL_HCNT_1_ADDR 0x302f
++#define HDMI_PHY_I2CM_FS_SCL_HCNT_0_ADDR 0x3030
++#define HDMI_PHY_I2CM_FS_SCL_LCNT_1_ADDR 0x3031
++#define HDMI_PHY_I2CM_FS_SCL_LCNT_0_ADDR 0x3032
++
++/* Audio Sampler Registers */
++#define HDMI_AUD_CONF0 0x3100
++#define HDMI_AUD_CONF1 0x3101
++#define HDMI_AUD_INT 0x3102
++#define HDMI_AUD_CONF2 0x3103
++#define HDMI_AUD_N1 0x3200
++#define HDMI_AUD_N2 0x3201
++#define HDMI_AUD_N3 0x3202
++#define HDMI_AUD_CTS1 0x3203
++#define HDMI_AUD_CTS2 0x3204
++#define HDMI_AUD_CTS3 0x3205
++#define HDMI_AUD_INPUTCLKFS 0x3206
++#define HDMI_AUD_SPDIFINT 0x3302
++#define HDMI_AUD_CONF0_HBR 0x3400
++#define HDMI_AUD_HBR_STATUS 0x3401
++#define HDMI_AUD_HBR_INT 0x3402
++#define HDMI_AUD_HBR_POL 0x3403
++#define HDMI_AUD_HBR_MASK 0x3404
++
++/* Generic Parallel Audio Interface Registers */
++/* Not used as GPAUD interface is not enabled in hw */
++#define HDMI_GP_CONF0 0x3500
++#define HDMI_GP_CONF1 0x3501
++#define HDMI_GP_CONF2 0x3502
++#define HDMI_GP_STAT 0x3503
++#define HDMI_GP_INT 0x3504
++#define HDMI_GP_MASK 0x3505
++#define HDMI_GP_POL 0x3506
++
++/* Audio DMA Registers */
++#define HDMI_AHB_DMA_CONF0 0x3600
++#define HDMI_AHB_DMA_START 0x3601
++#define HDMI_AHB_DMA_STOP 0x3602
++#define HDMI_AHB_DMA_THRSLD 0x3603
++#define HDMI_AHB_DMA_STRADDR0 0x3604
++#define HDMI_AHB_DMA_STRADDR1 0x3605
++#define HDMI_AHB_DMA_STRADDR2 0x3606
++#define HDMI_AHB_DMA_STRADDR3 0x3607
++#define HDMI_AHB_DMA_STPADDR0 0x3608
++#define HDMI_AHB_DMA_STPADDR1 0x3609
++#define HDMI_AHB_DMA_STPADDR2 0x360a
++#define HDMI_AHB_DMA_STPADDR3 0x360b
++#define HDMI_AHB_DMA_BSTADDR0 0x360c
++#define HDMI_AHB_DMA_BSTADDR1 0x360d
++#define HDMI_AHB_DMA_BSTADDR2 0x360e
++#define HDMI_AHB_DMA_BSTADDR3 0x360f
++#define HDMI_AHB_DMA_MBLENGTH0 0x3610
++#define HDMI_AHB_DMA_MBLENGTH1 0x3611
++#define HDMI_AHB_DMA_STAT 0x3612
++#define HDMI_AHB_DMA_INT 0x3613
++#define HDMI_AHB_DMA_MASK 0x3614
++#define HDMI_AHB_DMA_POL 0x3615
++#define HDMI_AHB_DMA_CONF1 0x3616
++#define HDMI_AHB_DMA_BUFFSTAT 0x3617
++#define HDMI_AHB_DMA_BUFFINT 0x3618
++#define HDMI_AHB_DMA_BUFFMASK 0x3619
++#define HDMI_AHB_DMA_BUFFPOL 0x361a
++
++/* Main Controller Registers */
++#define HDMI_MC_SFRDIV 0x4000
++#define HDMI_MC_CLKDIS 0x4001
++#define HDMI_MC_SWRSTZ 0x4002
++#define HDMI_MC_OPCTRL 0x4003
++#define HDMI_MC_FLOWCTRL 0x4004
++#define HDMI_MC_PHYRSTZ 0x4005
++#define HDMI_MC_LOCKONCLOCK 0x4006
++#define HDMI_MC_HEACPHY_RST 0x4007
++
++/* Color Space Converter Registers */
++#define HDMI_CSC_CFG 0x4100
++#define HDMI_CSC_SCALE 0x4101
++#define HDMI_CSC_COEF_A1_MSB 0x4102
++#define HDMI_CSC_COEF_A1_LSB 0x4103
++#define HDMI_CSC_COEF_A2_MSB 0x4104
++#define HDMI_CSC_COEF_A2_LSB 0x4105
++#define HDMI_CSC_COEF_A3_MSB 0x4106
++#define HDMI_CSC_COEF_A3_LSB 0x4107
++#define HDMI_CSC_COEF_A4_MSB 0x4108
++#define HDMI_CSC_COEF_A4_LSB 0x4109
++#define HDMI_CSC_COEF_B1_MSB 0x410A
++#define HDMI_CSC_COEF_B1_LSB 0x410B
++#define HDMI_CSC_COEF_B2_MSB 0x410C
++#define HDMI_CSC_COEF_B2_LSB 0x410D
++#define HDMI_CSC_COEF_B3_MSB 0x410E
++#define HDMI_CSC_COEF_B3_LSB 0x410F
++#define HDMI_CSC_COEF_B4_MSB 0x4110
++#define HDMI_CSC_COEF_B4_LSB 0x4111
++#define HDMI_CSC_COEF_C1_MSB 0x4112
++#define HDMI_CSC_COEF_C1_LSB 0x4113
++#define HDMI_CSC_COEF_C2_MSB 0x4114
++#define HDMI_CSC_COEF_C2_LSB 0x4115
++#define HDMI_CSC_COEF_C3_MSB 0x4116
++#define HDMI_CSC_COEF_C3_LSB 0x4117
++#define HDMI_CSC_COEF_C4_MSB 0x4118
++#define HDMI_CSC_COEF_C4_LSB 0x4119
++
++/* HDCP Interrupt Registers */
++#define HDMI_A_APIINTCLR 0x5006
++#define HDMI_A_APIINTSTAT 0x5007
++#define HDMI_A_APIINTMSK 0x5008
++
++/* CEC Engine Registers */
++#define HDMI_CEC_CTRL 0x7D00
++#define HDMI_CEC_STAT 0x7D01
++#define HDMI_CEC_MASK 0x7D02
++#define HDMI_CEC_POLARITY 0x7D03
++#define HDMI_CEC_INT 0x7D04
++#define HDMI_CEC_ADDR_L 0x7D05
++#define HDMI_CEC_ADDR_H 0x7D06
++#define HDMI_CEC_TX_CNT 0x7D07
++#define HDMI_CEC_RX_CNT 0x7D08
++#define HDMI_CEC_TX_DATA0 0x7D10
++#define HDMI_CEC_TX_DATA1 0x7D11
++#define HDMI_CEC_TX_DATA2 0x7D12
++#define HDMI_CEC_TX_DATA3 0x7D13
++#define HDMI_CEC_TX_DATA4 0x7D14
++#define HDMI_CEC_TX_DATA5 0x7D15
++#define HDMI_CEC_TX_DATA6 0x7D16
++#define HDMI_CEC_TX_DATA7 0x7D17
++#define HDMI_CEC_TX_DATA8 0x7D18
++#define HDMI_CEC_TX_DATA9 0x7D19
++#define HDMI_CEC_TX_DATA10 0x7D1a
++#define HDMI_CEC_TX_DATA11 0x7D1b
++#define HDMI_CEC_TX_DATA12 0x7D1c
++#define HDMI_CEC_TX_DATA13 0x7D1d
++#define HDMI_CEC_TX_DATA14 0x7D1e
++#define HDMI_CEC_TX_DATA15 0x7D1f
++#define HDMI_CEC_RX_DATA0 0x7D20
++#define HDMI_CEC_RX_DATA1 0x7D21
++#define HDMI_CEC_RX_DATA2 0x7D22
++#define HDMI_CEC_RX_DATA3 0x7D23
++#define HDMI_CEC_RX_DATA4 0x7D24
++#define HDMI_CEC_RX_DATA5 0x7D25
++#define HDMI_CEC_RX_DATA6 0x7D26
++#define HDMI_CEC_RX_DATA7 0x7D27
++#define HDMI_CEC_RX_DATA8 0x7D28
++#define HDMI_CEC_RX_DATA9 0x7D29
++#define HDMI_CEC_RX_DATA10 0x7D2a
++#define HDMI_CEC_RX_DATA11 0x7D2b
++#define HDMI_CEC_RX_DATA12 0x7D2c
++#define HDMI_CEC_RX_DATA13 0x7D2d
++#define HDMI_CEC_RX_DATA14 0x7D2e
++#define HDMI_CEC_RX_DATA15 0x7D2f
++#define HDMI_CEC_LOCK 0x7D30
++#define HDMI_CEC_WKUPCTRL 0x7D31
++
++/* I2C Master Registers (E-DDC) */
++#define HDMI_I2CM_SLAVE 0x7E00
++#define HDMI_I2CM_ADDRESS 0x7E01
++#define HDMI_I2CM_DATAO 0x7E02
++#define HDMI_I2CM_DATAI 0x7E03
++#define HDMI_I2CM_OPERATION 0x7E04
++#define HDMI_I2CM_INT 0x7E05
++#define HDMI_I2CM_CTLINT 0x7E06
++#define HDMI_I2CM_DIV 0x7E07
++#define HDMI_I2CM_SEGADDR 0x7E08
++#define HDMI_I2CM_SOFTRSTZ 0x7E09
++#define HDMI_I2CM_SEGPTR 0x7E0A
++#define HDMI_I2CM_SS_SCL_HCNT_1_ADDR 0x7E0B
++#define HDMI_I2CM_SS_SCL_HCNT_0_ADDR 0x7E0C
++#define HDMI_I2CM_SS_SCL_LCNT_1_ADDR 0x7E0D
++#define HDMI_I2CM_SS_SCL_LCNT_0_ADDR 0x7E0E
++#define HDMI_I2CM_FS_SCL_HCNT_1_ADDR 0x7E0F
++#define HDMI_I2CM_FS_SCL_HCNT_0_ADDR 0x7E10
++#define HDMI_I2CM_FS_SCL_LCNT_1_ADDR 0x7E11
++#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12
++
++/* Random Number Generator Registers (RNG) */
++#define HDMI_RNG_BASE 0x8000
++
++
++/*
++ * Register field definitions
++ */
++enum {
++/* IH_FC_INT2 field values */
++ HDMI_IH_FC_INT2_OVERFLOW_MASK = 0x03,
++ HDMI_IH_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_IH_FC_INT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* IH_FC_STAT2 field values */
++ HDMI_IH_FC_STAT2_OVERFLOW_MASK = 0x03,
++ HDMI_IH_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_IH_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* IH_PHY_STAT0 field values */
++ HDMI_IH_PHY_STAT0_RX_SENSE3 = 0x20,
++ HDMI_IH_PHY_STAT0_RX_SENSE2 = 0x10,
++ HDMI_IH_PHY_STAT0_RX_SENSE1 = 0x8,
++ HDMI_IH_PHY_STAT0_RX_SENSE0 = 0x4,
++ HDMI_IH_PHY_STAT0_TX_PHY_LOCK = 0x2,
++ HDMI_IH_PHY_STAT0_HPD = 0x1,
++
++/* IH_CEC_STAT0 field values */
++ HDMI_IH_CEC_STAT0_WAKEUP = 0x40,
++ HDMI_IH_CEC_STAT0_ERROR_FOLL = 0x20,
++ HDMI_IH_CEC_STAT0_ERROR_INIT = 0x10,
++ HDMI_IH_CEC_STAT0_ARB_LOST = 0x8,
++ HDMI_IH_CEC_STAT0_NACK = 0x4,
++ HDMI_IH_CEC_STAT0_EOM = 0x2,
++ HDMI_IH_CEC_STAT0_DONE = 0x1,
++
++
++/* IH_MUTE_I2CMPHY_STAT0 field values */
++ HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYDONE = 0x2,
++ HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYERROR = 0x1,
++
++/* IH_PHY_STAT0 field values */
++ HDMI_IH_MUTE_PHY_STAT0_RX_SENSE3 = 0x20,
++ HDMI_IH_MUTE_PHY_STAT0_RX_SENSE2 = 0x10,
++ HDMI_IH_MUTE_PHY_STAT0_RX_SENSE1 = 0x8,
++ HDMI_IH_MUTE_PHY_STAT0_RX_SENSE0 = 0x4,
++ HDMI_IH_MUTE_PHY_STAT0_TX_PHY_LOCK = 0x2,
++ HDMI_IH_MUTE_PHY_STAT0_HPD = 0x1,
++
++/* IH and IH_MUTE convenience macro RX_SENSE | HPD*/
++ HDMI_DVI_IH_STAT = 0x3D,
++
++
++/* IH_AHBDMAAUD_STAT0 field values */
++ HDMI_IH_AHBDMAAUD_STAT0_ERROR = 0x20,
++ HDMI_IH_AHBDMAAUD_STAT0_LOST = 0x10,
++ HDMI_IH_AHBDMAAUD_STAT0_RETRY = 0x08,
++ HDMI_IH_AHBDMAAUD_STAT0_DONE = 0x04,
++ HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = 0x02,
++ HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = 0x01,
++
++/* IH_MUTE_FC_STAT2 field values */
++ HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK = 0x03,
++ HDMI_IH_MUTE_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_IH_MUTE_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* IH_MUTE_AHBDMAAUD_STAT0 field values */
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = 0x20,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = 0x10,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = 0x08,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = 0x04,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = 0x02,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = 0x01,
++
++/* IH_MUTE field values */
++ HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT = 0x2,
++ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT = 0x1,
++
++/* TX_INVID0 field values */
++ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_MASK = 0x80,
++ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_ENABLE = 0x80,
++ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE = 0x00,
++ HDMI_TX_INVID0_VIDEO_MAPPING_MASK = 0x1F,
++ HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET = 0,
++
++/* TX_INSTUFFING field values */
++ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_MASK = 0x4,
++ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE = 0x4,
++ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_DISABLE = 0x0,
++ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_MASK = 0x2,
++ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE = 0x2,
++ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_DISABLE = 0x0,
++ HDMI_TX_INSTUFFING_GYDATA_STUFFING_MASK = 0x1,
++ HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE = 0x1,
++ HDMI_TX_INSTUFFING_GYDATA_STUFFING_DISABLE = 0x0,
++
++/* VP_PR_CD field values */
++ HDMI_VP_PR_CD_COLOR_DEPTH_MASK = 0xF0,
++ HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET = 4,
++ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK = 0x0F,
++ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET = 0,
++
++/* VP_STUFF field values */
++ HDMI_VP_STUFF_IDEFAULT_PHASE_MASK = 0x20,
++ HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET = 5,
++ HDMI_VP_STUFF_IFIX_PP_TO_LAST_MASK = 0x10,
++ HDMI_VP_STUFF_IFIX_PP_TO_LAST_OFFSET = 4,
++ HDMI_VP_STUFF_ICX_GOTO_P0_ST_MASK = 0x8,
++ HDMI_VP_STUFF_ICX_GOTO_P0_ST_OFFSET = 3,
++ HDMI_VP_STUFF_YCC422_STUFFING_MASK = 0x4,
++ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE = 0x4,
++ HDMI_VP_STUFF_YCC422_STUFFING_DIRECT_MODE = 0x0,
++ HDMI_VP_STUFF_PP_STUFFING_MASK = 0x2,
++ HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE = 0x2,
++ HDMI_VP_STUFF_PP_STUFFING_DIRECT_MODE = 0x0,
++ HDMI_VP_STUFF_PR_STUFFING_MASK = 0x1,
++ HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE = 0x1,
++ HDMI_VP_STUFF_PR_STUFFING_DIRECT_MODE = 0x0,
++
++/* VP_CONF field values */
++ HDMI_VP_CONF_BYPASS_EN_MASK = 0x40,
++ HDMI_VP_CONF_BYPASS_EN_ENABLE = 0x40,
++ HDMI_VP_CONF_BYPASS_EN_DISABLE = 0x00,
++ HDMI_VP_CONF_PP_EN_ENMASK = 0x20,
++ HDMI_VP_CONF_PP_EN_ENABLE = 0x20,
++ HDMI_VP_CONF_PP_EN_DISABLE = 0x00,
++ HDMI_VP_CONF_PR_EN_MASK = 0x10,
++ HDMI_VP_CONF_PR_EN_ENABLE = 0x10,
++ HDMI_VP_CONF_PR_EN_DISABLE = 0x00,
++ HDMI_VP_CONF_YCC422_EN_MASK = 0x8,
++ HDMI_VP_CONF_YCC422_EN_ENABLE = 0x8,
++ HDMI_VP_CONF_YCC422_EN_DISABLE = 0x0,
++ HDMI_VP_CONF_BYPASS_SELECT_MASK = 0x4,
++ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER = 0x4,
++ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER = 0x0,
++ HDMI_VP_CONF_OUTPUT_SELECTOR_MASK = 0x3,
++ HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS = 0x3,
++ HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422 = 0x1,
++ HDMI_VP_CONF_OUTPUT_SELECTOR_PP = 0x0,
++
++/* VP_REMAP field values */
++ HDMI_VP_REMAP_MASK = 0x3,
++ HDMI_VP_REMAP_YCC422_24bit = 0x2,
++ HDMI_VP_REMAP_YCC422_20bit = 0x1,
++ HDMI_VP_REMAP_YCC422_16bit = 0x0,
++
++/* FC_INVIDCONF field values */
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_MASK = 0x40,
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH = 0x40,
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW = 0x00,
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_MASK = 0x20,
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH = 0x20,
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW = 0x00,
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_MASK = 0x10,
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH = 0x10,
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW = 0x00,
++ HDMI_FC_INVIDCONF_DVI_MODEZ_MASK = 0x8,
++ HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE = 0x8,
++ HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE = 0x0,
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_MASK = 0x2,
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH = 0x2,
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW = 0x0,
++ HDMI_FC_INVIDCONF_IN_I_P_MASK = 0x1,
++ HDMI_FC_INVIDCONF_IN_I_P_INTERLACED = 0x1,
++ HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE = 0x0,
++
++/* FC_AUDICONF0 field values */
++ HDMI_FC_AUDICONF0_CC_OFFSET = 4,
++ HDMI_FC_AUDICONF0_CC_MASK = 0x70,
++ HDMI_FC_AUDICONF0_CT_OFFSET = 0,
++ HDMI_FC_AUDICONF0_CT_MASK = 0xF,
++
++/* FC_AUDICONF1 field values */
++ HDMI_FC_AUDICONF1_SS_OFFSET = 3,
++ HDMI_FC_AUDICONF1_SS_MASK = 0x18,
++ HDMI_FC_AUDICONF1_SF_OFFSET = 0,
++ HDMI_FC_AUDICONF1_SF_MASK = 0x7,
++
++/* FC_AUDICONF3 field values */
++ HDMI_FC_AUDICONF3_LFEPBL_OFFSET = 5,
++ HDMI_FC_AUDICONF3_LFEPBL_MASK = 0x60,
++ HDMI_FC_AUDICONF3_DM_INH_OFFSET = 4,
++ HDMI_FC_AUDICONF3_DM_INH_MASK = 0x10,
++ HDMI_FC_AUDICONF3_LSV_OFFSET = 0,
++ HDMI_FC_AUDICONF3_LSV_MASK = 0xF,
++
++/* FC_AUDSCHNLS0 field values */
++ HDMI_FC_AUDSCHNLS0_CGMSA_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS0_CGMSA_MASK = 0x30,
++ HDMI_FC_AUDSCHNLS0_COPYRIGHT_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS0_COPYRIGHT_MASK = 0x01,
++
++/* FC_AUDSCHNLS3-6 field values */
++ HDMI_FC_AUDSCHNLS3_OIEC_CH0_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS3_OIEC_CH0_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS3_OIEC_CH1_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS3_OIEC_CH1_MASK = 0xf0,
++ HDMI_FC_AUDSCHNLS4_OIEC_CH2_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS4_OIEC_CH2_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS4_OIEC_CH3_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS4_OIEC_CH3_MASK = 0xf0,
++
++ HDMI_FC_AUDSCHNLS5_OIEC_CH0_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS5_OIEC_CH0_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS5_OIEC_CH1_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS5_OIEC_CH1_MASK = 0xf0,
++ HDMI_FC_AUDSCHNLS6_OIEC_CH2_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS6_OIEC_CH2_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS6_OIEC_CH3_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS6_OIEC_CH3_MASK = 0xf0,
++
++/* HDMI_FC_AUDSCHNLS7 field values */
++ HDMI_FC_AUDSCHNLS7_ACCURACY_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS7_ACCURACY_MASK = 0x30,
++
++/* HDMI_FC_AUDSCHNLS8 field values */
++ HDMI_FC_AUDSCHNLS8_ORIGSAMPFREQ_MASK = 0xf0,
++ HDMI_FC_AUDSCHNLS8_ORIGSAMPFREQ_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS8_WORDLEGNTH_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS8_WORDLEGNTH_OFFSET = 0,
++
++/* FC_AUDSCONF field values */
++ HDMI_FC_AUDSCONF_AUD_PACKET_SAMPFIT_MASK = 0xF0,
++ HDMI_FC_AUDSCONF_AUD_PACKET_SAMPFIT_OFFSET = 4,
++ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_MASK = 0x1,
++ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_OFFSET = 0,
++ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT1 = 0x1,
++ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT0 = 0x0,
++
++/* FC_STAT2 field values */
++ HDMI_FC_STAT2_OVERFLOW_MASK = 0x03,
++ HDMI_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* FC_INT2 field values */
++ HDMI_FC_INT2_OVERFLOW_MASK = 0x03,
++ HDMI_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_FC_INT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* FC_MASK2 field values */
++ HDMI_FC_MASK2_OVERFLOW_MASK = 0x03,
++ HDMI_FC_MASK2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_FC_MASK2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* FC_PRCONF field values */
++ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK = 0xF0,
++ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET = 4,
++ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F,
++ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0,
++
++/* FC_AVICONF0-FC_AVICONF3 field values */
++ HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03,
++ HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00,
++ HDMI_FC_AVICONF0_PIX_FMT_YCBCR422 = 0x01,
++ HDMI_FC_AVICONF0_PIX_FMT_YCBCR444 = 0x02,
++ HDMI_FC_AVICONF0_ACTIVE_FMT_MASK = 0x40,
++ HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT = 0x40,
++ HDMI_FC_AVICONF0_ACTIVE_FMT_NO_INFO = 0x00,
++ HDMI_FC_AVICONF0_BAR_DATA_MASK = 0x0C,
++ HDMI_FC_AVICONF0_BAR_DATA_NO_DATA = 0x00,
++ HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR = 0x04,
++ HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR = 0x08,
++ HDMI_FC_AVICONF0_BAR_DATA_VERT_HORIZ_BAR = 0x0C,
++ HDMI_FC_AVICONF0_SCAN_INFO_MASK = 0x30,
++ HDMI_FC_AVICONF0_SCAN_INFO_OVERSCAN = 0x10,
++ HDMI_FC_AVICONF0_SCAN_INFO_UNDERSCAN = 0x20,
++ HDMI_FC_AVICONF0_SCAN_INFO_NODATA = 0x00,
++
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_MASK = 0x0F,
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_USE_CODED = 0x08,
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3 = 0x09,
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9 = 0x0A,
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_14_9 = 0x0B,
++ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_MASK = 0x30,
++ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_NO_DATA = 0x00,
++ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3 = 0x10,
++ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9 = 0x20,
++ HDMI_FC_AVICONF1_COLORIMETRY_MASK = 0xC0,
++ HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA = 0x00,
++ HDMI_FC_AVICONF1_COLORIMETRY_SMPTE = 0x40,
++ HDMI_FC_AVICONF1_COLORIMETRY_ITUR = 0x80,
++ HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO = 0xC0,
++
++ HDMI_FC_AVICONF2_SCALING_MASK = 0x03,
++ HDMI_FC_AVICONF2_SCALING_NONE = 0x00,
++ HDMI_FC_AVICONF2_SCALING_HORIZ = 0x01,
++ HDMI_FC_AVICONF2_SCALING_VERT = 0x02,
++ HDMI_FC_AVICONF2_SCALING_HORIZ_VERT = 0x03,
++ HDMI_FC_AVICONF2_RGB_QUANT_MASK = 0x0C,
++ HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT = 0x00,
++ HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE = 0x04,
++ HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE = 0x08,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_MASK = 0x70,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601 = 0x00,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709 = 0x10,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_SYCC601 = 0x20,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_ADOBE_YCC601 = 0x30,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_ADOBE_RGB = 0x40,
++ HDMI_FC_AVICONF2_IT_CONTENT_MASK = 0x80,
++ HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA = 0x00,
++ HDMI_FC_AVICONF2_IT_CONTENT_VALID = 0x80,
++
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_MASK = 0x03,
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS = 0x00,
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_PHOTO = 0x01,
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_CINEMA = 0x02,
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GAME = 0x03,
++ HDMI_FC_AVICONF3_QUANT_RANGE_MASK = 0x0C,
++ HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED = 0x00,
++ HDMI_FC_AVICONF3_QUANT_RANGE_FULL = 0x04,
++
++/* FC_DBGFORCE field values */
++ HDMI_FC_DBGFORCE_FORCEAUDIO = 0x10,
++ HDMI_FC_DBGFORCE_FORCEVIDEO = 0x1,
++
++/* PHY_CONF0 field values */
++ HDMI_PHY_CONF0_PDZ_MASK = 0x80,
++ HDMI_PHY_CONF0_PDZ_OFFSET = 7,
++ HDMI_PHY_CONF0_ENTMDS_MASK = 0x40,
++ HDMI_PHY_CONF0_ENTMDS_OFFSET = 6,
++ HDMI_PHY_CONF0_SPARECTRL = 0x20,
++ HDMI_PHY_CONF0_GEN2_PDDQ_MASK = 0x10,
++ HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET = 4,
++ HDMI_PHY_CONF0_GEN2_TXPWRON_MASK = 0x8,
++ HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET = 3,
++ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_MASK = 0x4,
++ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_OFFSET = 2,
++ HDMI_PHY_CONF0_SELDATAENPOL_MASK = 0x2,
++ HDMI_PHY_CONF0_SELDATAENPOL_OFFSET = 1,
++ HDMI_PHY_CONF0_SELDIPIF_MASK = 0x1,
++ HDMI_PHY_CONF0_SELDIPIF_OFFSET = 0,
++
++/* PHY_TST0 field values */
++ HDMI_PHY_TST0_TSTCLR_MASK = 0x20,
++ HDMI_PHY_TST0_TSTCLR_OFFSET = 5,
++ HDMI_PHY_TST0_TSTEN_MASK = 0x10,
++ HDMI_PHY_TST0_TSTEN_OFFSET = 4,
++ HDMI_PHY_TST0_TSTCLK_MASK = 0x1,
++ HDMI_PHY_TST0_TSTCLK_OFFSET = 0,
++
++/* PHY_STAT0 field values */
++ HDMI_PHY_RX_SENSE3 = 0x80,
++ HDMI_PHY_RX_SENSE2 = 0x40,
++ HDMI_PHY_RX_SENSE1 = 0x20,
++ HDMI_PHY_RX_SENSE0 = 0x10,
++ HDMI_PHY_HPD = 0x02,
++ HDMI_PHY_TX_PHY_LOCK = 0x01,
++
++/* HDMI STAT convenience RX_SENSE | HPD */
++ HDMI_DVI_STAT = 0xF2,
++
++/* PHY_I2CM_SLAVE_ADDR field values */
++ HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2 = 0x69,
++ HDMI_PHY_I2CM_SLAVE_ADDR_HEAC_PHY = 0x49,
++
++/* PHY_I2CM_OPERATION_ADDR field values */
++ HDMI_PHY_I2CM_OPERATION_ADDR_WRITE = 0x10,
++ HDMI_PHY_I2CM_OPERATION_ADDR_READ = 0x1,
++
++/* HDMI_PHY_I2CM_INT_ADDR */
++ HDMI_PHY_I2CM_INT_ADDR_DONE_POL = 0x08,
++ HDMI_PHY_I2CM_INT_ADDR_DONE_MASK = 0x04,
++
++/* HDMI_PHY_I2CM_CTLINT_ADDR */
++ HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL = 0x80,
++ HDMI_PHY_I2CM_CTLINT_ADDR_NAC_MASK = 0x40,
++ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL = 0x08,
++ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_MASK = 0x04,
++
++/* AUD_CTS3 field values */
++ HDMI_AUD_CTS3_N_SHIFT_OFFSET = 5,
++ HDMI_AUD_CTS3_N_SHIFT_MASK = 0xe0,
++ HDMI_AUD_CTS3_N_SHIFT_1 = 0,
++ HDMI_AUD_CTS3_N_SHIFT_16 = 0x20,
++ HDMI_AUD_CTS3_N_SHIFT_32 = 0x40,
++ HDMI_AUD_CTS3_N_SHIFT_64 = 0x60,
++ HDMI_AUD_CTS3_N_SHIFT_128 = 0x80,
++ HDMI_AUD_CTS3_N_SHIFT_256 = 0xa0,
++ /* note that the CTS3 MANUAL bit has been removed
++ from our part. Can't set it, will read as 0. */
++ HDMI_AUD_CTS3_CTS_MANUAL = 0x10,
++ HDMI_AUD_CTS3_AUDCTS19_16_MASK = 0x0f,
++
++/* AHB_DMA_CONF0 field values */
++ HDMI_AHB_DMA_CONF0_SW_FIFO_RST_OFFSET = 7,
++ HDMI_AHB_DMA_CONF0_SW_FIFO_RST_MASK = 0x80,
++ HDMI_AHB_DMA_CONF0_HBR_OFFSET = 4,
++ HDMI_AHB_DMA_CONF0_HBR_MASK = 0x10,
++ HDMI_AHB_DMA_CONF0_EN_HLOCK_OFFSET = 3,
++ HDMI_AHB_DMA_CONF0_EN_HLOCK_MASK = 0x08,
++ HDMI_AHB_DMA_CONF0_INCR_TYPE_OFFSET = 1,
++ HDMI_AHB_DMA_CONF0_INCR_TYPE_MASK = 0x06,
++ HDMI_AHB_DMA_CONF0_INCR4 = 0x0,
++ HDMI_AHB_DMA_CONF0_INCR8 = 0x2,
++ HDMI_AHB_DMA_CONF0_INCR16 = 0x4,
++ HDMI_AHB_DMA_CONF0_BURST_MODE = 0x1,
++
++/* HDMI_AHB_DMA_START field values */
++ HDMI_AHB_DMA_START_START_OFFSET = 0,
++ HDMI_AHB_DMA_START_START_MASK = 0x01,
++
++/* HDMI_AHB_DMA_STOP field values */
++ HDMI_AHB_DMA_STOP_STOP_OFFSET = 0,
++ HDMI_AHB_DMA_STOP_STOP_MASK = 0x01,
++
++/* AHB_DMA_STAT, AHB_DMA_INT, AHB_DMA_MASK, AHB_DMA_POL field values */
++ HDMI_AHB_DMA_DONE = 0x80,
++ HDMI_AHB_DMA_RETRY_SPLIT = 0x40,
++ HDMI_AHB_DMA_LOSTOWNERSHIP = 0x20,
++ HDMI_AHB_DMA_ERROR = 0x10,
++ HDMI_AHB_DMA_FIFO_THREMPTY = 0x04,
++ HDMI_AHB_DMA_FIFO_FULL = 0x02,
++ HDMI_AHB_DMA_FIFO_EMPTY = 0x01,
++
++/* AHB_DMA_BUFFSTAT, AHB_DMA_BUFFINT, AHB_DMA_BUFFMASK, AHB_DMA_BUFFPOL field values */
++ HDMI_AHB_DMA_BUFFSTAT_FULL = 0x02,
++ HDMI_AHB_DMA_BUFFSTAT_EMPTY = 0x01,
++
++/* MC_CLKDIS field values */
++ HDMI_MC_CLKDIS_HDCPCLK_DISABLE = 0x40,
++ HDMI_MC_CLKDIS_CECCLK_DISABLE = 0x20,
++ HDMI_MC_CLKDIS_CSCCLK_DISABLE = 0x10,
++ HDMI_MC_CLKDIS_AUDCLK_DISABLE = 0x8,
++ HDMI_MC_CLKDIS_PREPCLK_DISABLE = 0x4,
++ HDMI_MC_CLKDIS_TMDSCLK_DISABLE = 0x2,
++ HDMI_MC_CLKDIS_PIXELCLK_DISABLE = 0x1,
++
++/* MC_SWRSTZ field values */
++ HDMI_MC_SWRSTZ_TMDSSWRST_REQ = 0x02,
++
++/* MC_FLOWCTRL field values */
++ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_MASK = 0x1,
++ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH = 0x1,
++ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS = 0x0,
++
++/* MC_PHYRSTZ field values */
++ HDMI_MC_PHYRSTZ_ASSERT = 0x0,
++ HDMI_MC_PHYRSTZ_DEASSERT = 0x1,
++
++/* MC_HEACPHY_RST field values */
++ HDMI_MC_HEACPHY_RST_ASSERT = 0x1,
++ HDMI_MC_HEACPHY_RST_DEASSERT = 0x0,
++
++/* CSC_CFG field values */
++ HDMI_CSC_CFG_INTMODE_MASK = 0x30,
++ HDMI_CSC_CFG_INTMODE_OFFSET = 4,
++ HDMI_CSC_CFG_INTMODE_DISABLE = 0x00,
++ HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1 = 0x10,
++ HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA2 = 0x20,
++ HDMI_CSC_CFG_DECMODE_MASK = 0x3,
++ HDMI_CSC_CFG_DECMODE_OFFSET = 0,
++ HDMI_CSC_CFG_DECMODE_DISABLE = 0x0,
++ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA1 = 0x1,
++ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA2 = 0x2,
++ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3 = 0x3,
++
++/* CSC_SCALE field values */
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK = 0xF0,
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP = 0x00,
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP = 0x50,
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP = 0x60,
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP = 0x70,
++ HDMI_CSC_SCALE_CSCSCALE_MASK = 0x03,
++
++/* I2CM_OPERATION field values */
++ HDMI_I2CM_OPERATION_WRITE = 0x10,
++ HDMI_I2CM_OPERATION_READ_EXT = 0x2,
++ HDMI_I2CM_OPERATION_READ = 0x1,
++
++/* HDMI_I2CM_INT */
++ HDMI_I2CM_INT_DONE_POL = 0x08,
++ HDMI_I2CM_INT_DONE_MASK = 0x04,
++
++/* HDMI_I2CM_CTLINT */
++ HDMI_I2CM_CTLINT_NAC_POL = 0x80,
++ HDMI_I2CM_CTLINT_NAC_MASK = 0x40,
++ HDMI_I2CM_CTLINT_ARBITRATION_POL = 0x08,
++ HDMI_I2CM_CTLINT_ARBITRATION_MASK = 0x04,
++
++};
++
++enum imx_hdmi_type {
++ IMX6DL_HDMI,
++ IMX6Q_HDMI,
++};
++
++/* IOCTL commands */
++#define HDMI_IOC_MAGIC 'H'
++
++#define HDMI_IOC_GET_RESOURCE _IO(HDMI_IOC_MAGIC, 0)
++#define HDMI_IOC_GET_CPU_TYPE _IO(HDMI_IOC_MAGIC, 1)
++
++
++#endif /* __MXC_HDMI_H__ */
+diff -Nur linux-3.14.40.orig/kernel/cpu.c linux-3.14.40/kernel/cpu.c
+--- linux-3.14.40.orig/kernel/cpu.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/kernel/cpu.c 2015-05-01 14:58:06.303427001 -0500
+@@ -722,3 +722,22 @@
+ {
+ cpumask_copy(to_cpumask(cpu_online_bits), src);
+ }
++
++static ATOMIC_NOTIFIER_HEAD(idle_notifier);
++void idle_notifier_register(struct notifier_block *n)
++{
++ atomic_notifier_chain_register(&idle_notifier, n);
++}
++EXPORT_SYMBOL_GPL(idle_notifier_register);
++
++void idle_notifier_unregister(struct notifier_block *n)
++{
++ atomic_notifier_chain_unregister(&idle_notifier, n);
++}
++EXPORT_SYMBOL_GPL(idle_notifier_unregister);
++
++void idle_notifier_call_chain(unsigned long val)
++{
++ atomic_notifier_call_chain(&idle_notifier, val, NULL);
++}
++EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
+diff -Nur linux-3.14.40.orig/kernel/irq/manage.c linux-3.14.40/kernel/irq/manage.c
+--- linux-3.14.40.orig/kernel/irq/manage.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/kernel/irq/manage.c 2015-05-01 14:58:06.339427001 -0500
+@@ -32,24 +32,10 @@
+ early_param("threadirqs", setup_forced_irqthreads);
+ #endif
+
+-/**
+- * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
+- * @irq: interrupt number to wait for
+- *
+- * This function waits for any pending IRQ handlers for this interrupt
+- * to complete before returning. If you use this function while
+- * holding a resource the IRQ handler may need you will deadlock.
+- *
+- * This function may be called - with care - from IRQ context.
+- */
+-void synchronize_irq(unsigned int irq)
++static void __synchronize_hardirq(struct irq_desc *desc)
+ {
+- struct irq_desc *desc = irq_to_desc(irq);
+ bool inprogress;
+
+- if (!desc)
+- return;
+-
+ do {
+ unsigned long flags;
+
+@@ -67,12 +53,56 @@
+
+ /* Oops, that failed? */
+ } while (inprogress);
++}
++
++/**
++ * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
++ * @irq: interrupt number to wait for
++ *
++ * This function waits for any pending hard IRQ handlers for this
++ * interrupt to complete before returning. If you use this
++ * function while holding a resource the IRQ handler may need you
++ * will deadlock. It does not take associated threaded handlers
++ * into account.
++ *
++ * Do not use this for shutdown scenarios where you must be sure
++ * that all parts (hardirq and threaded handler) have completed.
++ *
++ * This function may be called - with care - from IRQ context.
++ */
++void synchronize_hardirq(unsigned int irq)
++{
++ struct irq_desc *desc = irq_to_desc(irq);
+
+- /*
+- * We made sure that no hardirq handler is running. Now verify
+- * that no threaded handlers are active.
+- */
+- wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
++ if (desc)
++ __synchronize_hardirq(desc);
++}
++EXPORT_SYMBOL(synchronize_hardirq);
++
++/**
++ * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
++ * @irq: interrupt number to wait for
++ *
++ * This function waits for any pending IRQ handlers for this interrupt
++ * to complete before returning. If you use this function while
++ * holding a resource the IRQ handler may need you will deadlock.
++ *
++ * This function may be called - with care - from IRQ context.
++ */
++void synchronize_irq(unsigned int irq)
++{
++ struct irq_desc *desc = irq_to_desc(irq);
++
++ if (desc) {
++ __synchronize_hardirq(desc);
++ /*
++ * We made sure that no hardirq handler is
++ * running. Now verify that no threaded handlers are
++ * active.
++ */
++ wait_event(desc->wait_for_threads,
++ !atomic_read(&desc->threads_active));
++ }
+ }
+ EXPORT_SYMBOL(synchronize_irq);
+
+diff -Nur linux-3.14.40.orig/kernel/relay.c linux-3.14.40/kernel/relay.c
+--- linux-3.14.40.orig/kernel/relay.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/kernel/relay.c 2015-05-01 14:58:06.351427001 -0500
+@@ -227,7 +227,7 @@
+ * relay_remove_buf - remove a channel buffer
+ * @kref: target kernel reference that contains the relay buffer
+ *
+- * Removes the file from the fileystem, which also frees the
++ * Removes the file from the filesystem, which also frees the
+ * rchan_buf_struct and the channel buffer. Should only be called from
+ * kref_put().
+ */
+diff -Nur linux-3.14.40.orig/kernel/signal.c linux-3.14.40/kernel/signal.c
+--- linux-3.14.40.orig/kernel/signal.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/kernel/signal.c 2015-05-01 14:58:06.367427001 -0500
+@@ -2382,7 +2382,7 @@
+ * @regs: user register state
+ * @stepping: nonzero if debugger single-step or block-step in use
+ *
+- * This function should be called when a signal has succesfully been
++ * This function should be called when a signal has successfully been
+ * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
+ * is always blocked, and the signal itself is blocked unless %SA_NODEFER
+ * is set in @ka->sa.sa_flags. Tracing is notified.
+diff -Nur linux-3.14.40.orig/linaro/configs/android.conf linux-3.14.40/linaro/configs/android.conf
+--- linux-3.14.40.orig/linaro/configs/android.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/android.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,42 @@
++CONFIG_IPV6=y
++# CONFIG_IPV6_SIT is not set
++CONFIG_PANIC_TIMEOUT=0
++CONFIG_HAS_WAKELOCK=y
++CONFIG_WAKELOCK=y
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_DM_CRYPT=y
++CONFIG_POWER_SUPPLY=y
++CONFIG_ANDROID_PARANOID_NETWORK=y
++CONFIG_NET_ACTIVITY_STATS=y
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_UINPUT=y
++CONFIG_INPUT_GPIO=y
++CONFIG_USB_G_ANDROID=y
++CONFIG_SWITCH=y
++CONFIG_STAGING=y
++CONFIG_ANDROID=y
++CONFIG_ANDROID_BINDER_IPC=y
++CONFIG_ASHMEM=y
++CONFIG_ANDROID_LOGGER=y
++CONFIG_ANDROID_TIMED_OUTPUT=y
++CONFIG_ANDROID_TIMED_GPIO=y
++CONFIG_ANDROID_LOW_MEMORY_KILLER=y
++CONFIG_ANDROID_INTF_ALARM_DEV=y
++CONFIG_CRYPTO_TWOFISH=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_FUSE_FS=y
++CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
++CONFIG_ION=y
++CONFIG_SYNC=y
++CONFIG_SW_SYNC=y
++CONFIG_SW_SYNC_USER=y
++CONFIG_ION_TEST=y
++CONFIG_ION_DUMMY=y
++CONFIG_ADF=y
++CONFIG_ADF_FBDEV=y
++CONFIG_ADF_MEMBLOCK=y
++CONFIG_DMA_SHARED_BUFFER=y
++CONFIG_TUN=y
+diff -Nur linux-3.14.40.orig/linaro/configs/arndale.conf linux-3.14.40/linaro/configs/arndale.conf
+--- linux-3.14.40.orig/linaro/configs/arndale.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/arndale.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,66 @@
++CONFIG_KALLSYMS_ALL=y
++CONFIG_PARTITION_ADVANCED=y
++CONFIG_BSD_DISKLABEL=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_ARCH_EXYNOS=y
++CONFIG_S3C_LOWLEVEL_UART_PORT=2
++CONFIG_ARCH_EXYNOS5=y
++# CONFIG_EXYNOS_ATAGS is not set
++CONFIG_MACH_EXYNOS4_DT=y
++CONFIG_VMSPLIT_2G=y
++CONFIG_NR_CPUS=2
++CONFIG_HIGHMEM=y
++# CONFIG_COMPACTION is not set
++CONFIG_ARM_APPENDED_DTB=y
++CONFIG_ARM_ATAG_DTB_COMPAT=y
++CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init= mem=256M"
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
++CONFIG_VFP=y
++CONFIG_NEON=y
++CONFIG_PM_RUNTIME=y
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_SG=y
++CONFIG_ATA=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_SATA_EXYNOS=y
++CONFIG_AX88796=y
++CONFIG_AX88796_93CX6=y
++CONFIG_INPUT_EVDEV=y
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_SAMSUNG=y
++CONFIG_SERIAL_SAMSUNG_CONSOLE=y
++CONFIG_HW_RANDOM=y
++CONFIG_I2C=y
++CONFIG_I2C_S3C2410=y
++CONFIG_THERMAL=y
++CONFIG_CPU_THERMAL=y
++CONFIG_EXYNOS_THERMAL=y
++CONFIG_MFD_SEC_CORE=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_REGULATOR_S5M8767=y
++CONFIG_DRM=y
++CONFIG_DRM_LOAD_EDID_FIRMWARE=y
++CONFIG_DRM_EXYNOS=y
++CONFIG_DRM_EXYNOS_DMABUF=y
++CONFIG_DRM_EXYNOS_HDMI=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_LOGO=y
++CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
++CONFIG_MMC_DW=y
++CONFIG_MMC_DW_IDMAC=y
++CONFIG_MMC_DW_EXYNOS=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_S3C=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_DETECT_HUNG_TASK=y
++CONFIG_DEBUG_RT_MUTEXES=y
++CONFIG_DEBUG_SPINLOCK=y
++CONFIG_DEBUG_INFO=y
++CONFIG_RCU_CPU_STALL_TIMEOUT=60
++CONFIG_DEBUG_USER=y
++CONFIG_TUN=y
+diff -Nur linux-3.14.40.orig/linaro/configs/bigendian.conf linux-3.14.40/linaro/configs/bigendian.conf
+--- linux-3.14.40.orig/linaro/configs/bigendian.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/bigendian.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,4 @@
++CONFIG_CPU_BIG_ENDIAN=y
++CONFIG_CPU_ENDIAN_BE8=y
++# CONFIG_VIRTUALIZATION is not set
++# CONFIG_MMC_DW_IDMAC is not set
+diff -Nur linux-3.14.40.orig/linaro/configs/big-LITTLE-IKS.conf linux-3.14.40/linaro/configs/big-LITTLE-IKS.conf
+--- linux-3.14.40.orig/linaro/configs/big-LITTLE-IKS.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/big-LITTLE-IKS.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,5 @@
++CONFIG_BIG_LITTLE=y
++CONFIG_BL_SWITCHER=y
++CONFIG_ARM_DT_BL_CPUFREQ=y
++CONFIG_ARM_VEXPRESS_BL_CPUFREQ=y
++CONFIG_CPU_FREQ_GOV_USERSPACE=y
+diff -Nur linux-3.14.40.orig/linaro/configs/debug.conf linux-3.14.40/linaro/configs/debug.conf
+--- linux-3.14.40.orig/linaro/configs/debug.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/debug.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1 @@
++CONFIG_PROVE_LOCKING=y
+diff -Nur linux-3.14.40.orig/linaro/configs/distribution.conf linux-3.14.40/linaro/configs/distribution.conf
+--- linux-3.14.40.orig/linaro/configs/distribution.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/distribution.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,49 @@
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_CGROUPS=y
++# CONFIG_COMPAT_BRK is not set
++CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
++CONFIG_SECCOMP=y
++CONFIG_CC_STACKPROTECTOR=y
++CONFIG_SYN_COOKIES=y
++CONFIG_IPV6=y
++CONFIG_NETLABEL=y
++CONFIG_BRIDGE_NETFILTER=y
++CONFIG_NF_CONNTRACK=m
++CONFIG_NETFILTER_XT_CONNMARK=m
++CONFIG_NETFILTER_XT_MARK=m
++CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
++CONFIG_NF_CONNTRACK_IPV4=m
++CONFIG_NF_NAT_IPV4=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_NF_CONNTRACK_IPV6=m
++CONFIG_NF_NAT_IPV6=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE=m
++CONFIG_TUN=y
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=65536
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_UINPUT=y
++# CONFIG_DEVKMEM is not set
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_AUTOFS4_FS=y
++CONFIG_TMPFS_POSIX_ACL=y
++CONFIG_STRICT_DEVMEM=y
++CONFIG_SECURITY=y
++CONFIG_LSM_MMAP_MIN_ADDR=0
++CONFIG_SECURITY_SELINUX=y
++CONFIG_SECURITY_SMACK=y
++CONFIG_SECURITY_APPARMOR=y
++CONFIG_DEFAULT_SECURITY_APPARMOR=y
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++CONFIG_TRANSPARENT_HUGEPAGE=y
++CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
+diff -Nur linux-3.14.40.orig/linaro/configs/highbank.conf linux-3.14.40/linaro/configs/highbank.conf
+--- linux-3.14.40.orig/linaro/configs/highbank.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/highbank.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,40 @@
++CONFIG_EXPERIMENTAL=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_ARCH_HIGHBANK=y
++CONFIG_ARM_ERRATA_754322=y
++CONFIG_SMP=y
++CONFIG_SCHED_MC=y
++CONFIG_AEABI=y
++CONFIG_CMDLINE="console=ttyAMA0"
++CONFIG_CPU_IDLE=y
++CONFIG_VFP=y
++CONFIG_NEON=y
++CONFIG_NET=y
++CONFIG_SCSI=y
++CONFIG_BLK_DEV_SD=y
++CONFIG_ATA=y
++CONFIG_SATA_AHCI_PLATFORM=y
++CONFIG_SATA_HIGHBANK=y
++CONFIG_NETDEVICES=y
++CONFIG_NET_CALXEDA_XGMAC=y
++CONFIG_SERIAL_AMBA_PL011=y
++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
++CONFIG_IPMI_HANDLER=y
++CONFIG_IPMI_SI=y
++CONFIG_I2C=y
++CONFIG_I2C_DESIGNWARE_PLATFORM=y
++CONFIG_SPI=y
++CONFIG_SPI_PL022=y
++CONFIG_GPIO_PL061=y
++CONFIG_MMC=y
++CONFIG_MMC_SDHCI=y
++CONFIG_MMC_SDHCI_PLTFM=y
++CONFIG_EDAC=y
++CONFIG_EDAC_MM_EDAC=y
++CONFIG_EDAC_HIGHBANK_MC=y
++CONFIG_EDAC_HIGHBANK_L2=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_PL031=y
++CONFIG_DMADEVICES=y
++CONFIG_PL330_DMA=y
+diff -Nur linux-3.14.40.orig/linaro/configs/kvm-guest.conf linux-3.14.40/linaro/configs/kvm-guest.conf
+--- linux-3.14.40.orig/linaro/configs/kvm-guest.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/kvm-guest.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,11 @@
++CONFIG_BALLOON_COMPACTION=y
++CONFIG_VIRTIO_BLK=y
++CONFIG_VIRTIO_NET=y
++CONFIG_HVC_DRIVER=y
++CONFIG_VIRTIO_CONSOLE=y
++CONFIG_VIRTIO=y
++CONFIG_VIRTIO_BALLOON=y
++CONFIG_VIRTIO_MMIO=y
++CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
++CONFIG_VIRTUALIZATION=y
++# CONFIG_THUMB2_KERNEL is not set
+diff -Nur linux-3.14.40.orig/linaro/configs/kvm-host.conf linux-3.14.40/linaro/configs/kvm-host.conf
+--- linux-3.14.40.orig/linaro/configs/kvm-host.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/kvm-host.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,11 @@
++CONFIG_VIRTUALIZATION=y
++CONFIG_ARM_LPAE=y
++CONFIG_ARM_VIRT_EXT=y
++CONFIG_HAVE_KVM_IRQCHIP=y
++CONFIG_KVM_ARM_HOST=y
++CONFIG_KVM_ARM_MAX_VCPUS=4
++CONFIG_KVM_ARM_TIMER=y
++CONFIG_KVM_ARM_VGIC=y
++CONFIG_KVM_MMIO=y
++CONFIG_KVM=y
++CONFIG_BLK_DEV_NBD=m
+diff -Nur linux-3.14.40.orig/linaro/configs/linaro-base.conf linux-3.14.40/linaro/configs/linaro-base.conf
+--- linux-3.14.40.orig/linaro/configs/linaro-base.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/linaro-base.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,115 @@
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_LOG_BUF_SHIFT=16
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_EMBEDDED=y
++CONFIG_HOTPLUG=y
++CONFIG_PERF_EVENTS=y
++CONFIG_SLAB=y
++CONFIG_PROFILING=y
++CONFIG_OPROFILE=y
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_NO_HZ=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_SMP=y
++CONFIG_SCHED_MC=y
++CONFIG_SCHED_SMT=y
++CONFIG_THUMB2_KERNEL=y
++CONFIG_AEABI=y
++# CONFIG_OABI_COMPAT is not set
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
++CONFIG_CPU_IDLE=y
++CONFIG_BINFMT_MISC=y
++CONFIG_MD=y
++CONFIG_BLK_DEV_DM=y
++CONFIG_NET=y
++CONFIG_PACKET=y
++CONFIG_UNIX=y
++CONFIG_XFRM_USER=y
++CONFIG_NET_KEY=y
++CONFIG_NET_KEY_MIGRATE=y
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++CONFIG_IP_PNP_RARP=y
++# CONFIG_INET_LRO is not set
++CONFIG_NETFILTER=y
++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
++CONFIG_CONNECTOR=y
++CONFIG_MTD=y
++CONFIG_MTD_CMDLINE_PARTS=y
++CONFIG_MTD_BLOCK=y
++CONFIG_MTD_OOPS=y
++CONFIG_MTD_CFI=y
++CONFIG_MTD_CFI_INTELEXT=y
++CONFIG_MTD_NAND=y
++CONFIG_NETDEVICES=y
++CONFIG_EXT2_FS=y
++CONFIG_EXT3_FS=y
++CONFIG_EXT4_FS=y
++CONFIG_BTRFS_FS=y
++CONFIG_QUOTA=y
++CONFIG_QFMT_V2=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_TMPFS=y
++CONFIG_ECRYPT_FS=y
++CONFIG_JFFS2_FS=y
++CONFIG_JFFS2_SUMMARY=y
++CONFIG_JFFS2_FS_XATTR=y
++CONFIG_JFFS2_COMPRESSION_OPTIONS=y
++CONFIG_JFFS2_LZO=y
++CONFIG_JFFS2_RUBIN=y
++CONFIG_CRAMFS=y
++CONFIG_NETWORK_FILESYSTEMS=y
++CONFIG_NFS_FS=y
++# CONFIG_NFS_V2 is not set
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_ISO8859_1=y
++CONFIG_PRINTK_TIME=y
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_DEBUG_FS=y
++CONFIG_SCHEDSTATS=y
++CONFIG_TIMER_STATS=y
++CONFIG_KEYS=y
++CONFIG_CRYPTO_MICHAEL_MIC=y
++CONFIG_CRC_CCITT=y
++CONFIG_CRC_T10DIF=y
++CONFIG_CRC_ITU_T=y
++CONFIG_CRC7=y
++CONFIG_HW_PERF_EVENTS=y
++CONFIG_FUNCTION_TRACER=y
++CONFIG_ENABLE_DEFAULT_TRACERS=y
++CONFIG_PROC_DEVICETREE=y
++CONFIG_JUMP_LABEL=y
++CONFIG_STRICT_DEVMEM=y
++CONFIG_KGDB=y
++CONFIG_KGDB_TESTS=y
++CONFIG_OF_IDLE_STATES=y
++CONFIG_FTRACE=y
++CONFIG_FUNCTION_TRACER=y
++CONFIG_FTRACE_SYSCALLS=y
++CONFIG_STACK_TRACER=y
++CONFIG_FUNCTION_PROFILER=y
++CONFIG_MAILBOX=y
++CONFIG_AUDIT=y
++CONFIG_NF_CONNTRACK_SECMARK=y
++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
++CONFIG_NETFILTER_XT_TARGET_SECMARK=y
++CONFIG_IP_NF_SECURITY=y
++CONFIG_SECURITY=y
++CONFIG_SECURITY_NETWORK=y
++CONFIG_LSM_MMAP_MIN_ADDR=4096
++CONFIG_SECURITY_SELINUX=y
++CONFIG_EXT4_FS_SECURITY=y
+diff -Nur linux-3.14.40.orig/linaro/configs/omap4.conf linux-3.14.40/linaro/configs/omap4.conf
+--- linux-3.14.40.orig/linaro/configs/omap4.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/omap4.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,196 @@
++CONFIG_EXPERT=y
++CONFIG_KPROBES=y
++CONFIG_MODULE_FORCE_LOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++# CONFIG_BLK_DEV_BSG is not set
++CONFIG_PARTITION_ADVANCED=y
++CONFIG_GPIO_PCA953X=y
++CONFIG_OMAP_RESET_CLOCKS=y
++CONFIG_OMAP_MUX_DEBUG=y
++CONFIG_ARCH_OMAP3=y
++CONFIG_ARCH_OMAP4=y
++CONFIG_ARCH_OMAP2PLUS=y
++CONFIG_SOC_OMAP5=y
++# CONFIG_ARCH_OMAP2 is not set
++CONFIG_ARCH_VEXPRESS_CA9X4=y
++CONFIG_ARM_THUMBEE=y
++CONFIG_ARM_ERRATA_411920=y
++CONFIG_NR_CPUS=2
++CONFIG_ZBOOT_ROM_TEXT=0x0
++CONFIG_ZBOOT_ROM_BSS=0x0
++CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200"
++CONFIG_KEXEC=y
++CONFIG_PM_DEBUG=y
++CONFIG_CAN=m
++CONFIG_CAN_C_CAN=m
++CONFIG_CAN_C_CAN_PLATFORM=m
++CONFIG_BT=m
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++CONFIG_BT_HCIUART_LL=y
++CONFIG_BT_HCIBCM203X=m
++CONFIG_BT_HCIBPA10X=m
++CONFIG_CFG80211=m
++CONFIG_MAC80211=m
++CONFIG_MAC80211_RC_PID=y
++CONFIG_MAC80211_RC_DEFAULT_PID=y
++CONFIG_CMA=y
++CONFIG_MTD_NAND_OMAP2=y
++CONFIG_MTD_ONENAND=y
++CONFIG_MTD_ONENAND_VERIFY_WRITE=y
++CONFIG_MTD_ONENAND_OMAP2=y
++CONFIG_MTD_UBI=y
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_SENSORS_TSL2550=m
++CONFIG_SENSORS_LIS3_I2C=m
++CONFIG_SCSI=y
++CONFIG_BLK_DEV_SD=y
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_SCAN_ASYNC=y
++CONFIG_KS8851=y
++CONFIG_KS8851_MLL=y
++CONFIG_SMC91X=y
++CONFIG_SMSC911X=y
++CONFIG_TI_CPSW=y
++CONFIG_SMSC_PHY=y
++CONFIG_USB_USBNET=y
++CONFIG_USB_NET_SMSC95XX=y
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_AN2720=y
++CONFIG_USB_EPSON2888=y
++CONFIG_USB_KC2190=y
++CONFIG_LIBERTAS=m
++CONFIG_LIBERTAS_USB=m
++CONFIG_LIBERTAS_SDIO=m
++CONFIG_LIBERTAS_DEBUG=y
++CONFIG_INPUT_JOYDEV=y
++CONFIG_INPUT_EVDEV=y
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_KEYBOARD_MATRIX=m
++CONFIG_KEYBOARD_TWL4030=y
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_ADS7846=y
++CONFIG_INPUT_TWL4030_PWRBUTTON=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_LEGACY_PTYS is not set
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_NR_UARTS=32
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_MANY_PORTS=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++CONFIG_SERIAL_8250_DETECT_IRQ=y
++CONFIG_SERIAL_8250_RSA=y
++CONFIG_SERIAL_AMBA_PL011=y
++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
++CONFIG_SERIAL_OMAP=y
++CONFIG_SERIAL_OMAP_CONSOLE=y
++CONFIG_HW_RANDOM=y
++CONFIG_I2C_CHARDEV=y
++CONFIG_SPI=y
++CONFIG_SPI_OMAP24XX=y
++CONFIG_PINCTRL_SINGLE=y
++CONFIG_DEBUG_GPIO=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_GPIO_TWL4030=y
++CONFIG_W1=y
++CONFIG_SENSORS_LM75=m
++CONFIG_WATCHDOG=y
++CONFIG_OMAP_WATCHDOG=y
++CONFIG_TWL4030_WATCHDOG=y
++CONFIG_MFD_TPS65217=y
++CONFIG_MFD_TPS65910=y
++CONFIG_TWL6040_CORE=y
++CONFIG_REGULATOR_TPS65023=y
++CONFIG_REGULATOR_TPS6507X=y
++CONFIG_REGULATOR_TPS65217=y
++CONFIG_REGULATOR_TPS65910=y
++CONFIG_REGULATOR_TWL4030=y
++CONFIG_FB=y
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++CONFIG_OMAP2_DSS=m
++CONFIG_OMAP2_DSS_RFBI=y
++CONFIG_OMAP2_DSS_SDI=y
++CONFIG_OMAP2_DSS_DSI=y
++CONFIG_FB_OMAP2=m
++CONFIG_PANEL_GENERIC_DPI=m
++CONFIG_PANEL_TFP410=m
++CONFIG_PANEL_SHARP_LS037V7DW01=m
++CONFIG_PANEL_NEC_NL8048HL11_01B=m
++CONFIG_PANEL_TAAL=m
++CONFIG_PANEL_TPO_TD043MTEA1=m
++CONFIG_PANEL_ACX565AKM=m
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_LCD_CLASS_DEVICE=y
++CONFIG_LCD_PLATFORM=y
++CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
++CONFIG_FONTS=y
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++CONFIG_LOGO=y
++CONFIG_SOUND=m
++CONFIG_SND=m
++CONFIG_SND_VERBOSE_PRINTK=y
++CONFIG_SND_DEBUG=y
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_SOC=m
++CONFIG_SND_OMAP_SOC=m
++CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m
++CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=m
++CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
++CONFIG_USB=y
++CONFIG_USB_DEBUG=y
++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
++CONFIG_USB_MON=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_WDM=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB_TEST=y
++CONFIG_USB_PHY=y
++CONFIG_NOP_USB_XCEIV=y
++CONFIG_USB_GADGET=y
++CONFIG_USB_GADGET_DEBUG=y
++CONFIG_USB_GADGET_DEBUG_FILES=y
++CONFIG_USB_GADGET_DEBUG_FS=y
++CONFIG_USB_ZERO=m
++CONFIG_MMC=y
++CONFIG_MMC_UNSAFE_RESUME=y
++CONFIG_SDIO_UART=y
++CONFIG_MMC_ARMMMCI=y
++CONFIG_MMC_OMAP=y
++CONFIG_MMC_OMAP_HS=y
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_GPIO=y
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_TIMER=y
++CONFIG_LEDS_TRIGGER_ONESHOT=y
++CONFIG_LEDS_TRIGGER_HEARTBEAT=y
++CONFIG_LEDS_TRIGGER_BACKLIGHT=y
++CONFIG_LEDS_TRIGGER_CPU=y
++CONFIG_LEDS_TRIGGER_GPIO=y
++CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_TWL92330=y
++CONFIG_RTC_DRV_TWL4030=y
++CONFIG_RTC_DRV_OMAP=y
++CONFIG_DMADEVICES=y
++CONFIG_DMA_OMAP=y
++# CONFIG_EXT3_FS_XATTR is not set
++CONFIG_UBIFS_FS=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++# CONFIG_DEBUG_BUGVERBOSE is not set
++CONFIG_DEBUG_INFO=y
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
++CONFIG_LIBCRC32C=y
++# CONFIG_CPU_FREQ is not set
+diff -Nur linux-3.14.40.orig/linaro/configs/preempt-rt.conf linux-3.14.40/linaro/configs/preempt-rt.conf
+--- linux-3.14.40.orig/linaro/configs/preempt-rt.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/preempt-rt.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,4 @@
++CONFIG_PREEMPT=y
++CONFIG_PREEMPT_RT_FULL=y
++CONFIG_SLUB=y
++# CONFIG_CPU_FREQ is not set
+diff -Nur linux-3.14.40.orig/linaro/configs/vexpress64.conf linux-3.14.40/linaro/configs/vexpress64.conf
+--- linux-3.14.40.orig/linaro/configs/vexpress64.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/vexpress64.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,56 @@
++CONFIG_ARCH_VEXPRESS=y
++CONFIG_SMP=y
++CONFIG_NR_CPUS=8
++CONFIG_CMDLINE="console=ttyAMA0"
++CONFIG_COMPAT=y
++CONFIG_SMC91X=y
++CONFIG_INPUT_EVDEV=y
++CONFIG_SERIO_AMBAKMI=y
++CONFIG_SERIAL_AMBA_PL011=y
++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
++# CONFIG_SERIO_I8042 is not set
++CONFIG_FB=y
++CONFIG_FB_ARMCLCD=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++# CONFIG_VGA_CONSOLE is not set
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_MMC=y
++CONFIG_MMC_ARMMMCI=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_PL031=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_VIRTIO=y
++CONFIG_VIRTIO_BLK=y
++CONFIG_VIRTIO_MMIO=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_CMA=y
++CONFIG_DMA_CMA=y
++CONFIG_COMMON_CLK_SCPI=y
++CONFIG_SMSC911X=y
++CONFIG_I2C=y
++CONFIG_ARM_MHU_MBOX=y
++CONFIG_ARM_SCPI_PROTOCOL=y
++CONFIG_USB_HIDDEV=y
++CONFIG_SCSI=y
++CONFIG_BLK_DEV_SD=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB=y
++CONFIG_USB_ULPI=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_EHCI_HCD_SYNOPSYS=y
++CONFIG_USB_OHCI_HCD=y
++CONFIG_USB_PHY=y
++CONFIG_USB_ISP1301=y
++CONFIG_PM_OPP=y
++CONFIG_GENERIC_CPUFREQ_CPU0=y
++CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
++CONFIG_ARM_DT_BL_CPUFREQ=y
++CONFIG_ARM64_CPUIDLE=y
++CONFIG_ARM64_CRYPTO=y
+diff -Nur linux-3.14.40.orig/linaro/configs/vexpress.conf linux-3.14.40/linaro/configs/vexpress.conf
+--- linux-3.14.40.orig/linaro/configs/vexpress.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/vexpress.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,64 @@
++CONFIG_ARCH_VEXPRESS=y
++CONFIG_ARCH_VEXPRESS_CA9X4=y
++CONFIG_HAVE_ARM_ARCH_TIMER=y
++CONFIG_NR_CPUS=8
++CONFIG_HIGHMEM=y
++CONFIG_HIGHPTE=y
++CONFIG_ARM_PSCI=y
++CONFIG_MCPM=y
++CONFIG_ARCH_VEXPRESS_DCSCB=y
++CONFIG_ARCH_VEXPRESS_TC2_PM=y
++CONFIG_ARM_BIG_LITTLE_CPUIDLE=y
++CONFIG_BIG_LITTLE=y
++CONFIG_ARM_VEXPRESS_SPC_CPUFREQ=y
++CONFIG_PM_OPP=y
++CONFIG_CPU_FREQ=y
++CONFIG_CPU_FREQ_GOV_ONDEMAND=y
++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
++CONFIG_CMDLINE="console=ttyAMA0,38400n8 root=/dev/mmcblk0p2 rootwait mmci.fmax=4000000"
++CONFIG_VFP=y
++CONFIG_NEON=y
++CONFIG_SCSI=y
++CONFIG_BLK_DEV_SD=y
++CONFIG_SMSC911X=y
++CONFIG_SMC91X=y
++CONFIG_INPUT_EVDEV=y
++CONFIG_SERIO_AMBAKMI=y
++CONFIG_SERIAL_AMBA_PL011=y
++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
++CONFIG_FB=y
++CONFIG_FB_ARMCLCD=y
++CONFIG_FB_ARMHDLCD=y
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_SOUND=y
++CONFIG_SND=y
++CONFIG_SND_ARMAACI=y
++CONFIG_USB=y
++CONFIG_USB_ISP1760_HCD=y
++CONFIG_USB_STORAGE=y
++CONFIG_MMC=y
++CONFIG_MMC_ARMMMCI=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_PL031=y
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_ROOT_NFS=y
++CONFIG_VEXPRESS_CONFIG=y
++CONFIG_SENSORS_VEXPRESS=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_VEXPRESS=y
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_GPIO=y
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_HEARTBEAT=y
++CONFIG_LEDS_TRIGGER_CPU=y
++CONFIG_VIRTIO=y
++CONFIG_VIRTIO_BLK=y
++CONFIG_VIRTIO_MMIO=y
++CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+diff -Nur linux-3.14.40.orig/linaro/configs/vexpress-tuning.conf linux-3.14.40/linaro/configs/vexpress-tuning.conf
+--- linux-3.14.40.orig/linaro/configs/vexpress-tuning.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/vexpress-tuning.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1 @@
++# CONFIG_PROVE_LOCKING is not set
+diff -Nur linux-3.14.40.orig/linaro/configs/xen.conf linux-3.14.40/linaro/configs/xen.conf
+--- linux-3.14.40.orig/linaro/configs/xen.conf 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/linaro/configs/xen.conf 2015-05-01 14:58:06.367427001 -0500
+@@ -0,0 +1,7 @@
++CONFIG_XEN=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XENFS=y
++CONFIG_XEN_COMPAT_XENFS=y
+diff -Nur linux-3.14.40.orig/MAINTAINERS linux-3.14.40/MAINTAINERS
+--- linux-3.14.40.orig/MAINTAINERS 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/MAINTAINERS 2015-05-01 14:58:06.391427001 -0500
+@@ -5511,6 +5511,14 @@
+ F: drivers/net/macvlan.c
+ F: include/linux/if_macvlan.h
+
++MAILBOX API
++M: Jassi Brar <jassisinghbrar@gmail.com>
++L: linux-kernel@vger.kernel.org
++S: Maintained
++F: drivers/mailbox/
++F: include/linux/mailbox_client.h
++F: include/linux/mailbox_controller.h
++
+ MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
+ M: Michael Kerrisk <mtk.manpages@gmail.com>
+ W: http://www.kernel.org/doc/man-pages
+diff -Nur linux-3.14.40.orig/mm/cma.c linux-3.14.40/mm/cma.c
+--- linux-3.14.40.orig/mm/cma.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/mm/cma.c 2015-05-01 14:58:06.395427001 -0500
+@@ -0,0 +1,356 @@
++/*
++ * Contiguous Memory Allocator
++ *
++ * Copyright (c) 2010-2011 by Samsung Electronics.
++ * Copyright IBM Corporation, 2013
++ * Copyright LG Electronics Inc., 2014
++ * Written by:
++ * Marek Szyprowski <m.szyprowski@samsung.com>
++ * Michal Nazarewicz <mina86@mina86.com>
++ * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
++ * Joonsoo Kim <iamjoonsoo.kim@lge.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License or (at your optional) any later version of the license.
++ */
++
++#define pr_fmt(fmt) "cma: " fmt
++
++#ifdef CONFIG_CMA_DEBUG
++#ifndef DEBUG
++# define DEBUG
++#endif
++#endif
++
++#include <linux/memblock.h>
++#include <linux/err.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/log2.h>
++#include <linux/cma.h>
++#include <linux/highmem.h>
++
++struct cma {
++ unsigned long base_pfn;
++ unsigned long count;
++ unsigned long *bitmap;
++ unsigned int order_per_bit; /* Order of pages represented by one bit */
++ struct mutex lock;
++};
++
++static struct cma cma_areas[MAX_CMA_AREAS];
++static unsigned cma_area_count;
++static DEFINE_MUTEX(cma_mutex);
++
++phys_addr_t cma_get_base(struct cma *cma)
++{
++ return PFN_PHYS(cma->base_pfn);
++}
++
++unsigned long cma_get_size(struct cma *cma)
++{
++ return cma->count << PAGE_SHIFT;
++}
++
++static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
++{
++ return (1UL << (align_order >> cma->order_per_bit)) - 1;
++}
++
++static unsigned long cma_bitmap_maxno(struct cma *cma)
++{
++ return cma->count >> cma->order_per_bit;
++}
++
++static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
++ unsigned long pages)
++{
++ return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
++}
++
++static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
++{
++ unsigned long bitmap_no, bitmap_count;
++
++ bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
++ bitmap_count = cma_bitmap_pages_to_bits(cma, count);
++
++ mutex_lock(&cma->lock);
++ bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
++ mutex_unlock(&cma->lock);
++}
++
++static int __init cma_activate_area(struct cma *cma)
++{
++ int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
++ unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
++ unsigned i = cma->count >> pageblock_order;
++ struct zone *zone;
++
++ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
++
++ if (!cma->bitmap)
++ return -ENOMEM;
++
++ WARN_ON_ONCE(!pfn_valid(pfn));
++ zone = page_zone(pfn_to_page(pfn));
++
++ do {
++ unsigned j;
++
++ base_pfn = pfn;
++ for (j = pageblock_nr_pages; j; --j, pfn++) {
++ WARN_ON_ONCE(!pfn_valid(pfn));
++ /*
++ * alloc_contig_range requires the pfn range
++ * specified to be in the same zone. Make this
++ * simple by forcing the entire CMA resv range
++ * to be in the same zone.
++ */
++ if (page_zone(pfn_to_page(pfn)) != zone)
++ goto err;
++ }
++ init_cma_reserved_pageblock(pfn_to_page(base_pfn));
++ } while (--i);
++
++ mutex_init(&cma->lock);
++ return 0;
++
++err:
++ kfree(cma->bitmap);
++ return -EINVAL;
++}
++
++static int __init cma_init_reserved_areas(void)
++{
++ int i;
++
++ for (i = 0; i < cma_area_count; i++) {
++ int ret = cma_activate_area(&cma_areas[i]);
++
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++core_initcall(cma_init_reserved_areas);
++
++/**
++ * cma_declare_contiguous() - reserve custom contiguous area
++ * @base: Base address of the reserved area optional, use 0 for any
++ * @size: Size of the reserved area (in bytes),
++ * @limit: End address of the reserved memory (optional, 0 for any).
++ * @alignment: Alignment for the CMA area, should be power of 2 or zero
++ * @order_per_bit: Order of pages represented by one bit on bitmap.
++ * @fixed: hint about where to place the reserved area
++ * @res_cma: Pointer to store the created cma region.
++ *
++ * This function reserves memory from early allocator. It should be
++ * called by arch specific code once the early allocator (memblock or bootmem)
++ * has been activated and all other subsystems have already allocated/reserved
++ * memory. This function allows to create custom reserved areas.
++ *
++ * If @fixed is true, reserve contiguous area at exactly @base. If false,
++ * reserve in range from @base to @limit.
++ */
++int __init cma_declare_contiguous(phys_addr_t base,
++ phys_addr_t size, phys_addr_t limit,
++ phys_addr_t alignment, unsigned int order_per_bit,
++ bool fixed, struct cma **res_cma)
++{
++ struct cma *cma;
++ phys_addr_t memblock_end = memblock_end_of_DRAM();
++ phys_addr_t highmem_start = __pa(high_memory);
++ int ret = 0;
++
++ pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
++ __func__, (unsigned long)size, (unsigned long)base,
++ (unsigned long)limit, (unsigned long)alignment);
++
++ if (cma_area_count == ARRAY_SIZE(cma_areas)) {
++ pr_err("Not enough slots for CMA reserved regions!\n");
++ return -ENOSPC;
++ }
++
++ if (!size)
++ return -EINVAL;
++
++ if (alignment && !is_power_of_2(alignment))
++ return -EINVAL;
++
++ /*
++ * Sanitise input arguments.
++ * Pages both ends in CMA area could be merged into adjacent unmovable
++ * migratetype page by page allocator's buddy algorithm. In the case,
++ * you couldn't get a contiguous memory, which is not what we want.
++ */
++ alignment = max(alignment,
++ (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
++ base = ALIGN(base, alignment);
++ size = ALIGN(size, alignment);
++ limit &= ~(alignment - 1);
++
++ /* size should be aligned with order_per_bit */
++ if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
++ return -EINVAL;
++
++ /*
++ * adjust limit to avoid crossing low/high memory boundary for
++ * automatically allocated regions
++ */
++ if (((limit == 0 || limit > memblock_end) &&
++ (memblock_end - size < highmem_start &&
++ memblock_end > highmem_start)) ||
++ (!fixed && limit > highmem_start && limit - size < highmem_start)) {
++ limit = highmem_start;
++ }
++
++ if (fixed && base < highmem_start && base+size > highmem_start) {
++ ret = -EINVAL;
++ pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n",
++ (unsigned long)base, (unsigned long)highmem_start);
++ goto err;
++ }
++
++ /* Reserve memory */
++ if (base && fixed) {
++ if (memblock_is_region_reserved(base, size) ||
++ memblock_reserve(base, size) < 0) {
++ ret = -EBUSY;
++ goto err;
++ }
++ } else {
++ phys_addr_t addr = memblock_alloc_range(size, alignment, base,
++ limit);
++ if (!addr) {
++ ret = -ENOMEM;
++ goto err;
++ } else {
++ base = addr;
++ }
++ }
++
++ /*
++ * Each reserved area must be initialised later, when more kernel
++ * subsystems (like slab allocator) are available.
++ */
++ cma = &cma_areas[cma_area_count];
++ cma->base_pfn = PFN_DOWN(base);
++ cma->count = size >> PAGE_SHIFT;
++ cma->order_per_bit = order_per_bit;
++ *res_cma = cma;
++ cma_area_count++;
++
++ pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
++ (unsigned long)base);
++ return 0;
++
++err:
++ pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
++ return ret;
++}
++
++/**
++ * cma_alloc() - allocate pages from contiguous area
++ * @cma: Contiguous memory region for which the allocation is performed.
++ * @count: Requested number of pages.
++ * @align: Requested alignment of pages (in PAGE_SIZE order).
++ *
++ * This function allocates part of contiguous memory on specific
++ * contiguous memory area.
++ */
++struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
++{
++ unsigned long mask, pfn, start = 0;
++ unsigned long bitmap_maxno, bitmap_no, bitmap_count;
++ struct page *page = NULL;
++ int ret;
++
++ if (!cma || !cma->count)
++ return NULL;
++
++ pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
++ count, align);
++
++ if (!count)
++ return NULL;
++
++ mask = cma_bitmap_aligned_mask(cma, align);
++ bitmap_maxno = cma_bitmap_maxno(cma);
++ bitmap_count = cma_bitmap_pages_to_bits(cma, count);
++
++ for (;;) {
++ mutex_lock(&cma->lock);
++ bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
++ bitmap_maxno, start, bitmap_count, mask);
++ if (bitmap_no >= bitmap_maxno) {
++ mutex_unlock(&cma->lock);
++ break;
++ }
++ bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
++ /*
++ * It's safe to drop the lock here. We've marked this region for
++ * our exclusive use. If the migration fails we will take the
++ * lock again and unmark it.
++ */
++ mutex_unlock(&cma->lock);
++
++ pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
++ mutex_lock(&cma_mutex);
++ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
++ mutex_unlock(&cma_mutex);
++ if (ret == 0) {
++ page = pfn_to_page(pfn);
++ break;
++ }
++
++ cma_clear_bitmap(cma, pfn, count);
++ if (ret != -EBUSY)
++ break;
++
++ pr_debug("%s(): memory range at %p is busy, retrying\n",
++ __func__, pfn_to_page(pfn));
++ /* try again with a bit different memory target */
++ start = bitmap_no + mask + 1;
++ }
++
++ pr_debug("%s(): returned %p\n", __func__, page);
++ return page;
++}
++
++/**
++ * cma_release() - release allocated pages
++ * @cma: Contiguous memory region for which the allocation is performed.
++ * @pages: Allocated pages.
++ * @count: Number of allocated pages.
++ *
++ * This function releases memory allocated by alloc_cma().
++ * It returns false when provided pages do not belong to contiguous area and
++ * true otherwise.
++ */
++bool cma_release(struct cma *cma, struct page *pages, int count)
++{
++ unsigned long pfn;
++
++ if (!cma || !pages)
++ return false;
++
++ pr_debug("%s(page %p)\n", __func__, (void *)pages);
++
++ pfn = page_to_pfn(pages);
++
++ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
++ return false;
++
++ VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
++
++ free_contig_range(pfn, count);
++ cma_clear_bitmap(cma, pfn, count);
++
++ return true;
++}
+diff -Nur linux-3.14.40.orig/mm/Kconfig linux-3.14.40/mm/Kconfig
+--- linux-3.14.40.orig/mm/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/mm/Kconfig 2015-05-01 14:58:06.403427001 -0500
+@@ -514,6 +514,17 @@
+ processing calls such as dma_alloc_from_contiguous().
+ This option does not affect warning and error messages.
+
++config CMA_AREAS
++ int "Maximum count of the CMA areas"
++ depends on CMA
++ default 7
++ help
++ CMA allows to create CMA areas for particular purpose, mainly,
++ used as device private area. This parameter sets the maximum
++ number of CMA area in the system.
++
++ If unsure, leave the default value "7".
++
+ config ZBUD
+ tristate
+ default n
+diff -Nur linux-3.14.40.orig/mm/Makefile linux-3.14.40/mm/Makefile
+--- linux-3.14.40.orig/mm/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/mm/Makefile 2015-05-01 14:58:06.411427001 -0500
+@@ -61,3 +61,4 @@
+ obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
+ obj-$(CONFIG_ZBUD) += zbud.o
+ obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
++obj-$(CONFIG_CMA) += cma.o
+diff -Nur linux-3.14.40.orig/mm/memblock.c linux-3.14.40/mm/memblock.c
+--- linux-3.14.40.orig/mm/memblock.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/mm/memblock.c 2015-05-01 14:58:06.439427001 -0500
+@@ -974,22 +974,35 @@
+ }
+ #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+-static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
+- phys_addr_t align, phys_addr_t max_addr,
+- int nid)
++static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
++ phys_addr_t align, phys_addr_t start,
++ phys_addr_t end, int nid)
+ {
+ phys_addr_t found;
+
+ if (!align)
+ align = SMP_CACHE_BYTES;
+
+- found = memblock_find_in_range_node(size, align, 0, max_addr, nid);
++ found = memblock_find_in_range_node(size, align, start, end, nid);
+ if (found && !memblock_reserve(found, size))
+ return found;
+
+ return 0;
+ }
+
++phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
++ phys_addr_t start, phys_addr_t end)
++{
++ return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
++}
++
++static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
++ phys_addr_t align, phys_addr_t max_addr,
++ int nid)
++{
++ return memblock_alloc_range_nid(size, align, 0, max_addr, nid);
++}
++
+ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
+ {
+ return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+diff -Nur linux-3.14.40.orig/net/atm/svc.c linux-3.14.40/net/atm/svc.c
+--- linux-3.14.40.orig/net/atm/svc.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/net/atm/svc.c 2015-05-01 14:58:06.451427001 -0500
+@@ -263,17 +263,11 @@
+ goto out;
+ }
+ }
+-/*
+- * Not supported yet
+- *
+- * #ifndef CONFIG_SINGLE_SIGITF
+- */
++
+ vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp);
+ vcc->qos.txtp.pcr = 0;
+ vcc->qos.txtp.min_pcr = 0;
+-/*
+- * #endif
+- */
++
+ error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
+ if (!error)
+ sock->state = SS_CONNECTED;
+diff -Nur linux-3.14.40.orig/net/core/dev.c linux-3.14.40/net/core/dev.c
+--- linux-3.14.40.orig/net/core/dev.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/net/core/dev.c 2015-05-01 14:58:06.459427001 -0500
+@@ -3459,7 +3459,7 @@
+ * @rx_handler: receive handler to register
+ * @rx_handler_data: data pointer that is used by rx handler
+ *
+- * Register a receive hander for a device. This handler will then be
++ * Register a receive handler for a device. This handler will then be
+ * called from __netif_receive_skb. A negative errno code is returned
+ * on a failure.
+ *
+diff -Nur linux-3.14.40.orig/net/core/Makefile linux-3.14.40/net/core/Makefile
+--- linux-3.14.40.orig/net/core/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/net/core/Makefile 2015-05-01 14:58:06.483427001 -0500
+@@ -9,7 +9,7 @@
+
+ obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
+ neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
+- sock_diag.o dev_ioctl.o
++ sock_diag.o dev_ioctl.o tso.o
+
+ obj-$(CONFIG_XFRM) += flow.o
+ obj-y += net-sysfs.o
+diff -Nur linux-3.14.40.orig/net/core/rtnetlink.c linux-3.14.40/net/core/rtnetlink.c
+--- linux-3.14.40.orig/net/core/rtnetlink.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/net/core/rtnetlink.c 2015-05-01 14:58:06.499427001 -0500
+@@ -1157,73 +1157,7 @@
+ return -EMSGSIZE;
+ }
+
+-static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+-{
+- struct net *net = sock_net(skb->sk);
+- int h, s_h;
+- int idx = 0, s_idx;
+- struct net_device *dev;
+- struct hlist_head *head;
+- struct nlattr *tb[IFLA_MAX+1];
+- u32 ext_filter_mask = 0;
+- int err;
+- int hdrlen;
+-
+- s_h = cb->args[0];
+- s_idx = cb->args[1];
+-
+- rcu_read_lock();
+- cb->seq = net->dev_base_seq;
+-
+- /* A hack to preserve kernel<->userspace interface.
+- * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
+- * However, before Linux v3.9 the code here assumed rtgenmsg and that's
+- * what iproute2 < v3.9.0 used.
+- * We can detect the old iproute2. Even including the IFLA_EXT_MASK
+- * attribute, its netlink message is shorter than struct ifinfomsg.
+- */
+- hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
+- sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
+-
+- if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
+-
+- if (tb[IFLA_EXT_MASK])
+- ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+- }
+-
+- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+- idx = 0;
+- head = &net->dev_index_head[h];
+- hlist_for_each_entry_rcu(dev, head, index_hlist) {
+- if (idx < s_idx)
+- goto cont;
+- err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+- NETLINK_CB(cb->skb).portid,
+- cb->nlh->nlmsg_seq, 0,
+- NLM_F_MULTI,
+- ext_filter_mask);
+- /* If we ran out of room on the first message,
+- * we're in trouble
+- */
+- WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
+-
+- if (err <= 0)
+- goto out;
+-
+- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+-cont:
+- idx++;
+- }
+- }
+-out:
+- rcu_read_unlock();
+- cb->args[1] = idx;
+- cb->args[0] = h;
+-
+- return skb->len;
+-}
+-
+-const struct nla_policy ifla_policy[IFLA_MAX+1] = {
++static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
+ [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
+ [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
+ [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
+@@ -1250,7 +1184,6 @@
+ [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
+ [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
+ };
+-EXPORT_SYMBOL(ifla_policy);
+
+ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
+ [IFLA_INFO_KIND] = { .type = NLA_STRING },
+@@ -1284,6 +1217,61 @@
+ [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
+ };
+
++static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
++{
++ struct net *net = sock_net(skb->sk);
++ int h, s_h;
++ int idx = 0, s_idx;
++ struct net_device *dev;
++ struct hlist_head *head;
++ struct nlattr *tb[IFLA_MAX+1];
++ u32 ext_filter_mask = 0;
++
++ s_h = cb->args[0];
++ s_idx = cb->args[1];
++
++ rcu_read_lock();
++ cb->seq = net->dev_base_seq;
++
++ if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
++ ifla_policy) >= 0) {
++
++ if (tb[IFLA_EXT_MASK])
++ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
++ }
++
++ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
++ idx = 0;
++ head = &net->dev_index_head[h];
++ hlist_for_each_entry_rcu(dev, head, index_hlist) {
++ if (idx < s_idx)
++ goto cont;
++ if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
++ NETLINK_CB(cb->skb).portid,
++ cb->nlh->nlmsg_seq, 0,
++ NLM_F_MULTI,
++ ext_filter_mask) <= 0)
++ goto out;
++
++ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
++cont:
++ idx++;
++ }
++ }
++out:
++ rcu_read_unlock();
++ cb->args[1] = idx;
++ cb->args[0] = h;
++
++ return skb->len;
++}
++
++int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
++{
++ return nla_parse(tb, IFLA_MAX, head, len, ifla_policy);
++}
++EXPORT_SYMBOL(rtnl_nla_parse_ifla);
++
+ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
+ {
+ struct net *net;
+diff -Nur linux-3.14.40.orig/net/core/tso.c linux-3.14.40/net/core/tso.c
+--- linux-3.14.40.orig/net/core/tso.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/net/core/tso.c 2015-05-01 14:58:06.499427001 -0500
+@@ -0,0 +1,72 @@
++#include <net/ip.h>
++#include <net/tso.h>
++
++/* Calculate expected number of TX descriptors */
++int tso_count_descs(struct sk_buff *skb)
++{
++ /* The Marvell Way */
++ return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
++}
++
++void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
++ int size, bool is_last)
++{
++ struct iphdr *iph;
++ struct tcphdr *tcph;
++ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
++ int mac_hdr_len = skb_network_offset(skb);
++
++ memcpy(hdr, skb->data, hdr_len);
++ iph = (struct iphdr *)(hdr + mac_hdr_len);
++ iph->id = htons(tso->ip_id);
++ iph->tot_len = htons(size + hdr_len - mac_hdr_len);
++ tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
++ tcph->seq = htonl(tso->tcp_seq);
++ tso->ip_id++;
++
++ if (!is_last) {
++ /* Clear all special flags for not last packet */
++ tcph->psh = 0;
++ tcph->fin = 0;
++ tcph->rst = 0;
++ }
++}
++
++void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
++{
++ tso->tcp_seq += size;
++ tso->size -= size;
++ tso->data += size;
++
++ if ((tso->size == 0) &&
++ (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
++ skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
++
++ /* Move to next segment */
++ tso->size = frag->size;
++ tso->data = page_address(frag->page.p) + frag->page_offset;
++ tso->next_frag_idx++;
++ }
++}
++
++void tso_start(struct sk_buff *skb, struct tso_t *tso)
++{
++ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
++
++ tso->ip_id = ntohs(ip_hdr(skb)->id);
++ tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
++ tso->next_frag_idx = 0;
++
++ /* Build first data */
++ tso->size = skb_headlen(skb) - hdr_len;
++ tso->data = skb->data + hdr_len;
++ if ((tso->size == 0) &&
++ (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
++ skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
++
++ /* Move to next segment */
++ tso->size = frag->size;
++ tso->data = page_address(frag->page.p) + frag->page_offset;
++ tso->next_frag_idx++;
++ }
++}
+diff -Nur linux-3.14.40.orig/net/ieee802154/Kconfig linux-3.14.40/net/ieee802154/Kconfig
+--- linux-3.14.40.orig/net/ieee802154/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/net/ieee802154/Kconfig 2015-05-01 14:58:06.503427001 -0500
+@@ -15,7 +15,7 @@
+ depends on IEEE802154 && IPV6
+ select 6LOWPAN_IPHC
+ ---help---
+- IPv6 compression over IEEE 802.15.4.
++ IPv6 compression over IEEE 802.15.4.
+
+ config 6LOWPAN_IPHC
+ tristate
+diff -Nur linux-3.14.40.orig/net/mac80211/driver-ops.h linux-3.14.40/net/mac80211/driver-ops.h
+--- linux-3.14.40.orig/net/mac80211/driver-ops.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/net/mac80211/driver-ops.h 2015-05-01 14:58:06.543427001 -0500
+@@ -722,13 +722,19 @@
+ }
+
+ static inline void drv_flush(struct ieee80211_local *local,
++ struct ieee80211_sub_if_data *sdata,
+ u32 queues, bool drop)
+ {
++ struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL;
++
+ might_sleep();
+
++ if (sdata)
++ check_sdata_in_driver(sdata);
++
+ trace_drv_flush(local, queues, drop);
+ if (local->ops->flush)
+- local->ops->flush(&local->hw, queues, drop);
++ local->ops->flush(&local->hw, vif, queues, drop);
+ trace_drv_return_void(local);
+ }
+
+diff -Nur linux-3.14.40.orig/net/mac80211/ibss.c linux-3.14.40/net/mac80211/ibss.c
+--- linux-3.14.40.orig/net/mac80211/ibss.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/net/mac80211/ibss.c 2015-05-01 14:58:06.563427001 -0500
+@@ -386,7 +386,7 @@
+ presp->head_len, 0, GFP_KERNEL);
+ cfg80211_put_bss(local->hw.wiphy, bss);
+ netif_carrier_on(sdata->dev);
+- cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
++ cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL);
+ }
+
+ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
+diff -Nur linux-3.14.40.orig/net/mac80211/util.c linux-3.14.40/net/mac80211/util.c
+--- linux-3.14.40.orig/net/mac80211/util.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/net/mac80211/util.c 2015-05-01 14:58:06.571427001 -0500
+@@ -554,7 +554,7 @@
+ ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_FLUSH);
+
+- drv_flush(local, queues, false);
++ drv_flush(local, sdata, queues, false);
+
+ ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_FLUSH);
+diff -Nur linux-3.14.40.orig/net/wireless/core.h linux-3.14.40/net/wireless/core.h
+--- linux-3.14.40.orig/net/wireless/core.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/net/wireless/core.h 2015-05-01 14:58:06.595427001 -0500
+@@ -211,6 +211,7 @@
+ } dc;
+ struct {
+ u8 bssid[ETH_ALEN];
++ struct ieee80211_channel *channel;
+ } ij;
+ };
+ };
+@@ -258,7 +259,8 @@
+ struct net_device *dev, bool nowext);
+ int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, bool nowext);
+-void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid);
++void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
++ struct ieee80211_channel *channel);
+ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev);
+
+diff -Nur linux-3.14.40.orig/net/wireless/ibss.c linux-3.14.40/net/wireless/ibss.c
+--- linux-3.14.40.orig/net/wireless/ibss.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/net/wireless/ibss.c 2015-05-01 14:58:06.611427001 -0500
+@@ -14,7 +14,8 @@
+ #include "rdev-ops.h"
+
+
+-void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
++void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
++ struct ieee80211_channel *channel)
+ {
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_bss *bss;
+@@ -28,8 +29,7 @@
+ if (!wdev->ssid_len)
+ return;
+
+- bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
+- wdev->ssid, wdev->ssid_len,
++ bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, NULL, 0,
+ WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
+
+ if (WARN_ON(!bss))
+@@ -54,21 +54,26 @@
+ #endif
+ }
+
+-void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
++void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
++ struct ieee80211_channel *channel, gfp_t gfp)
+ {
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct cfg80211_event *ev;
+ unsigned long flags;
+
+- trace_cfg80211_ibss_joined(dev, bssid);
++ trace_cfg80211_ibss_joined(dev, bssid, channel);
++
++ if (WARN_ON(!channel))
++ return;
+
+ ev = kzalloc(sizeof(*ev), gfp);
+ if (!ev)
+ return;
+
+ ev->type = EVENT_IBSS_JOINED;
+- memcpy(ev->cr.bssid, bssid, ETH_ALEN);
++ memcpy(ev->ij.bssid, bssid, ETH_ALEN);
++ ev->ij.channel = channel;
+
+ spin_lock_irqsave(&wdev->event_lock, flags);
+ list_add_tail(&ev->list, &wdev->event_list);
+diff -Nur linux-3.14.40.orig/net/wireless/trace.h linux-3.14.40/net/wireless/trace.h
+--- linux-3.14.40.orig/net/wireless/trace.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/net/wireless/trace.h 2015-05-01 14:58:06.611427001 -0500
+@@ -2279,11 +2279,6 @@
+ TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(addr))
+ );
+
+-DEFINE_EVENT(cfg80211_rx_evt, cfg80211_ibss_joined,
+- TP_PROTO(struct net_device *netdev, const u8 *addr),
+- TP_ARGS(netdev, addr)
+-);
+-
+ DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame,
+ TP_PROTO(struct net_device *netdev, const u8 *addr),
+ TP_ARGS(netdev, addr)
+@@ -2294,6 +2289,24 @@
+ TP_ARGS(netdev, addr)
+ );
+
++TRACE_EVENT(cfg80211_ibss_joined,
++ TP_PROTO(struct net_device *netdev, const u8 *bssid,
++ struct ieee80211_channel *channel),
++ TP_ARGS(netdev, bssid, channel),
++ TP_STRUCT__entry(
++ NETDEV_ENTRY
++ MAC_ENTRY(bssid)
++ CHAN_ENTRY
++ ),
++ TP_fast_assign(
++ NETDEV_ASSIGN;
++ MAC_ASSIGN(bssid, bssid);
++ CHAN_ASSIGN(channel);
++ ),
++ TP_printk(NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", " CHAN_PR_FMT,
++ NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG)
++);
++
+ TRACE_EVENT(cfg80211_probe_status,
+ TP_PROTO(struct net_device *netdev, const u8 *addr, u64 cookie,
+ bool acked),
+diff -Nur linux-3.14.40.orig/net/wireless/util.c linux-3.14.40/net/wireless/util.c
+--- linux-3.14.40.orig/net/wireless/util.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/net/wireless/util.c 2015-05-01 14:58:06.615427001 -0500
+@@ -820,7 +820,8 @@
+ ev->dc.reason, true);
+ break;
+ case EVENT_IBSS_JOINED:
+- __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid);
++ __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid,
++ ev->ij.channel);
+ break;
+ }
+ wdev_unlock(wdev);
+diff -Nur linux-3.14.40.orig/scripts/Makefile.lib linux-3.14.40/scripts/Makefile.lib
+--- linux-3.14.40.orig/scripts/Makefile.lib 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/scripts/Makefile.lib 2015-05-01 14:58:06.623427001 -0500
+@@ -153,6 +153,7 @@
+ -I$(srctree)/arch/$(SRCARCH)/boot/dts \
+ -I$(srctree)/arch/$(SRCARCH)/boot/dts/include \
+ -I$(srctree)/drivers/of/testcase-data \
++ -I$(srctree)/include \
+ -undef -D__DTS__
+
+ # Finds the multi-part object the current object will be linked into
+diff -Nur linux-3.14.40.orig/scripts/mod/devicetable-offsets.c linux-3.14.40/scripts/mod/devicetable-offsets.c
+--- linux-3.14.40.orig/scripts/mod/devicetable-offsets.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/scripts/mod/devicetable-offsets.c 2015-05-01 14:58:06.643427001 -0500
+@@ -174,6 +174,9 @@
+ DEVID_FIELD(x86_cpu_id, model);
+ DEVID_FIELD(x86_cpu_id, vendor);
+
++ DEVID(cpu_feature);
++ DEVID_FIELD(cpu_feature, feature);
++
+ DEVID(mei_cl_device_id);
+ DEVID_FIELD(mei_cl_device_id, name);
+
+diff -Nur linux-3.14.40.orig/scripts/mod/file2alias.c linux-3.14.40/scripts/mod/file2alias.c
+--- linux-3.14.40.orig/scripts/mod/file2alias.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/scripts/mod/file2alias.c 2015-05-01 14:58:06.655427001 -0500
+@@ -1135,6 +1135,16 @@
+ }
+ ADD_TO_DEVTABLE("x86cpu", x86_cpu_id, do_x86cpu_entry);
+
++/* LOOKS like cpu:type:*:feature:*FEAT* */
++static int do_cpu_entry(const char *filename, void *symval, char *alias)
++{
++ DEF_FIELD(symval, cpu_feature, feature);
++
++ sprintf(alias, "cpu:type:*:feature:*%04X*", feature);
++ return 1;
++}
++ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry);
++
+ /* Looks like: mei:S */
+ static int do_mei_entry(const char *filename, void *symval,
+ char *alias)
+diff -Nur linux-3.14.40.orig/scripts/recordmcount.c linux-3.14.40/scripts/recordmcount.c
+--- linux-3.14.40.orig/scripts/recordmcount.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/scripts/recordmcount.c 2015-05-01 14:58:06.687427001 -0500
+@@ -40,6 +40,11 @@
+ #define R_METAG_NONE 3
+ #endif
+
++#ifndef EM_AARCH64
++#define EM_AARCH64 183
++#define R_AARCH64_ABS64 257
++#endif
++
+ static int fd_map; /* File descriptor for file being modified. */
+ static int mmap_failed; /* Boolean flag. */
+ static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */
+@@ -347,6 +352,8 @@
+ case EM_ARM: reltype = R_ARM_ABS32;
+ altmcount = "__gnu_mcount_nc";
+ break;
++ case EM_AARCH64:
++ reltype = R_AARCH64_ABS64; gpfx = '_'; break;
+ case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break;
+ case EM_METAG: reltype = R_METAG_ADDR32;
+ altmcount = "_mcount_wrapper";
+diff -Nur linux-3.14.40.orig/scripts/recordmcount.pl linux-3.14.40/scripts/recordmcount.pl
+--- linux-3.14.40.orig/scripts/recordmcount.pl 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/scripts/recordmcount.pl 2015-05-01 14:58:06.691427001 -0500
+@@ -278,6 +278,11 @@
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_ARM_(CALL|PC24|THM_CALL)" .
+ "\\s+(__gnu_mcount_nc|mcount)\$";
+
++} elsif ($arch eq "arm64") {
++ $alignment = 3;
++ $section_type = '%progbits';
++ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_AARCH64_CALL26\\s+_mcount\$";
++ $type = ".quad";
+ } elsif ($arch eq "ia64") {
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
+ $type = "data8";
+diff -Nur linux-3.14.40.orig/sound/soc/codecs/cs42888.c linux-3.14.40/sound/soc/codecs/cs42888.c
+--- linux-3.14.40.orig/sound/soc/codecs/cs42888.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/sound/soc/codecs/cs42888.c 2015-05-01 14:58:06.695427001 -0500
+@@ -0,0 +1,934 @@
++/*
++ * cs42888.c -- CS42888 ALSA SoC Audio Driver
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/pm.h>
++#include <linux/i2c.h>
++#include <linux/spi/spi.h>
++#include <linux/platform_device.h>
++#include <linux/regulator/consumer.h>
++
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/soc.h>
++#include <sound/soc-dapm.h>
++#include <sound/tlv.h>
++#include <sound/initval.h>
++#include <asm/div64.h>
++#include "cs42888.h"
++
++#define CS42888_NUM_SUPPLIES 4
++static const char *cs42888_supply_names[CS42888_NUM_SUPPLIES] = {
++ "VA",
++ "VD",
++ "VLS",
++ "VLC",
++};
++
++#define CS42888_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
++ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
++
++/* Private data for the CS42888 */
++struct cs42888_private {
++ struct clk *clk;
++ struct snd_soc_codec *codec;
++ u8 reg_cache[CS42888_NUMREGS + 1];
++ unsigned int mclk; /* Input frequency of the MCLK pin */
++ unsigned int slave_mode;
++ struct regulator_bulk_data supplies[CS42888_NUM_SUPPLIES];
++};
++
++/**
++ * cs42888_fill_cache - pre-fill the CS42888 register cache.
++ * @codec: the codec for this CS42888
++ *
++ * This function fills in the CS42888 register cache by reading the register
++ * values from the hardware.
++ *
++ * This CS42888 registers are cached to avoid excessive I2C I/O operations.
++ * After the initial read to pre-fill the cache, the CS42888 never updates
++ * the register values, so we won't have a cache coherency problem.
++ *
++ * We use the auto-increment feature of the CS42888 to read all registers in
++ * one shot.
++ */
++static int cs42888_fill_cache(struct snd_soc_codec *codec)
++{
++ u8 *cache = codec->reg_cache;
++ struct i2c_client *i2c_client = to_i2c_client(codec->dev);
++ s32 length;
++
++ length = i2c_smbus_read_i2c_block_data(i2c_client,
++ CS42888_FIRSTREG | CS42888_I2C_INCR, CS42888_NUMREGS, \
++ cache + 1);
++
++ if (length != CS42888_NUMREGS) {
++ dev_err(codec->dev, "i2c read failure, addr=0x%x\n",
++ i2c_client->addr);
++ return -EIO;
++ }
++ return 0;
++}
++
++#ifdef DEBUG
++static void dump_reg(struct snd_soc_codec *codec)
++{
++ int i, reg;
++ int ret;
++ u8 *cache = codec->reg_cache + 1;
++
++ dev_dbg(codec->dev, "dump begin\n");
++ dev_dbg(codec->dev, "reg value in cache\n");
++ for (i = 0; i < CS42888_NUMREGS; i++)
++ dev_dbg(codec->dev, "reg[%d] = 0x%x\n", i, cache[i]);
++
++ dev_dbg(codec->dev, "real reg value\n");
++ ret = cs42888_fill_cache(codec);
++ if (ret < 0) {
++ dev_err(codec->dev, "failed to fill register cache\n");
++ return ret;
++ }
++ for (i = 0; i < CS42888_NUMREGS; i++)
++ dev_dbg(codec->dev, "reg[%d] = 0x%x\n", i, cache[i]);
++
++ dev_dbg(codec->dev, "dump end\n");
++}
++#else
++static void dump_reg(struct snd_soc_codec *codec)
++{
++}
++#endif
++
++/* -127.5dB to 0dB with step of 0.5dB */
++static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
++/* -64dB to 24dB with step of 0.5dB */
++static const DECLARE_TLV_DB_SCALE(adc_tlv, -6400, 50, 1);
++
++static int cs42888_out_vu(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ return snd_soc_put_volsw_2r(kcontrol, ucontrol);
++}
++
++static int cs42888_info_volsw_s8(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ struct soc_mixer_control *mc =
++ (struct soc_mixer_control *)kcontrol->private_value;
++
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++ uinfo->count = 2;
++ uinfo->value.integer.min = 0;
++ uinfo->value.integer.max = mc->max - mc->min;
++ return 0;
++}
++
++static int cs42888_get_volsw_s8(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct soc_mixer_control *mc =
++ (struct soc_mixer_control *)kcontrol->private_value;
++ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
++ s8 val = snd_soc_read(codec, mc->reg);
++ ucontrol->value.integer.value[0] = val - mc->min;
++
++ val = snd_soc_read(codec, mc->rreg);
++ ucontrol->value.integer.value[1] = val - mc->min;
++ return 0;
++}
++
++int cs42888_put_volsw_s8(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct soc_mixer_control *mc =
++ (struct soc_mixer_control *)kcontrol->private_value;
++ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
++ unsigned short val;
++ int ret;
++
++ val = ucontrol->value.integer.value[0] + mc->min;
++ ret = snd_soc_write(codec, mc->reg, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return ret;
++ }
++
++ val = ucontrol->value.integer.value[1] + mc->min;
++ ret = snd_soc_write(codec, mc->rreg, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return ret;
++ }
++ return 0;
++}
++
++#define SOC_CS42888_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, \
++ xinvert, tlv_array) \
++{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
++ .name = (xname), \
++ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
++ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
++ .tlv.p = (tlv_array), \
++ .info = snd_soc_info_volsw, \
++ .get = snd_soc_get_volsw, \
++ .put = cs42888_out_vu, \
++ .private_value = (unsigned long)&(struct soc_mixer_control) \
++ {.reg = reg_left, \
++ .rreg = reg_right, \
++ .shift = xshift, \
++ .max = xmax, \
++ .invert = xinvert} \
++}
++
++#define SOC_CS42888_DOUBLE_R_S8_TLV(xname, reg_left, reg_right, xmin, xmax, \
++ tlv_array) \
++{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
++ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
++ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
++ .tlv.p = (tlv_array), \
++ .info = cs42888_info_volsw_s8, \
++ .get = cs42888_get_volsw_s8, \
++ .put = cs42888_put_volsw_s8, \
++ .private_value = (unsigned long)&(struct soc_mixer_control) \
++ {.reg = reg_left, \
++ .rreg = reg_right, \
++ .min = xmin, \
++ .max = xmax} \
++}
++
++static const char *cs42888_adcfilter[] = { "None", "High Pass" };
++static const char *cs42888_dacinvert[] = { "Disabled", "Enabled" };
++static const char *cs42888_adcinvert[] = { "Disabled", "Enabled" };
++static const char *cs42888_dacamute[] = { "Disabled", "AutoMute" };
++static const char *cs42888_dac_sngvol[] = { "Disabled", "Enabled" };
++static const char *cs42888_dac_szc[] = { "Immediate Change", "Zero Cross",
++ "Soft Ramp", "Soft Ramp on Zero Cross" };
++static const char *cs42888_mute_adc[] = { "UnMute", "Mute" };
++static const char *cs42888_adc_sngvol[] = { "Disabled", "Enabled" };
++static const char *cs42888_adc_szc[] = { "Immediate Change", "Zero Cross",
++ "Soft Ramp", "Soft Ramp on Zero Cross" };
++static const char *cs42888_dac_dem[] = { "No-De-Emphasis", "De-Emphasis" };
++static const char *cs42888_adc_single[] = { "Differential", "Single-Ended" };
++
++static const struct soc_enum cs42888_enum[] = {
++ SOC_ENUM_SINGLE(CS42888_ADCCTL, 7, 2, cs42888_adcfilter),
++ SOC_ENUM_DOUBLE(CS42888_DACINV, 0, 1, 2, cs42888_dacinvert),
++ SOC_ENUM_DOUBLE(CS42888_DACINV, 2, 3, 2, cs42888_dacinvert),
++ SOC_ENUM_DOUBLE(CS42888_DACINV, 4, 5, 2, cs42888_dacinvert),
++ SOC_ENUM_DOUBLE(CS42888_DACINV, 6, 7, 2, cs42888_dacinvert),
++ SOC_ENUM_DOUBLE(CS42888_ADCINV, 0, 1, 2, cs42888_adcinvert),
++ SOC_ENUM_DOUBLE(CS42888_ADCINV, 2, 3, 2, cs42888_adcinvert),
++ SOC_ENUM_SINGLE(CS42888_TRANS, 4, 2, cs42888_dacamute),
++ SOC_ENUM_SINGLE(CS42888_TRANS, 7, 2, cs42888_dac_sngvol),
++ SOC_ENUM_SINGLE(CS42888_TRANS, 5, 4, cs42888_dac_szc),
++ SOC_ENUM_SINGLE(CS42888_TRANS, 3, 2, cs42888_mute_adc),
++ SOC_ENUM_SINGLE(CS42888_TRANS, 2, 2, cs42888_adc_sngvol),
++ SOC_ENUM_SINGLE(CS42888_TRANS, 0, 4, cs42888_adc_szc),
++ SOC_ENUM_SINGLE(CS42888_ADCCTL, 5, 2, cs42888_dac_dem),
++ SOC_ENUM_SINGLE(CS42888_ADCCTL, 4, 2, cs42888_adc_single),
++ SOC_ENUM_SINGLE(CS42888_ADCCTL, 3, 2, cs42888_adc_single),
++};
++
++static const struct snd_kcontrol_new cs42888_snd_controls[] = {
++ SOC_CS42888_DOUBLE_R_TLV("DAC1 Playback Volume", CS42888_VOLAOUT1,
++ CS42888_VOLAOUT2, 0, 0xff, 1, dac_tlv),
++ SOC_CS42888_DOUBLE_R_TLV("DAC2 Playback Volume", CS42888_VOLAOUT3,
++ CS42888_VOLAOUT4, 0, 0xff, 1, dac_tlv),
++ SOC_CS42888_DOUBLE_R_TLV("DAC3 Playback Volume", CS42888_VOLAOUT5,
++ CS42888_VOLAOUT6, 0, 0xff, 1, dac_tlv),
++ SOC_CS42888_DOUBLE_R_TLV("DAC4 Playback Volume", CS42888_VOLAOUT7,
++ CS42888_VOLAOUT8, 0, 0xff, 1, dac_tlv),
++ SOC_CS42888_DOUBLE_R_S8_TLV("ADC1 Capture Volume", CS42888_VOLAIN1,
++ CS42888_VOLAIN2, -128, 48, adc_tlv),
++ SOC_CS42888_DOUBLE_R_S8_TLV("ADC2 Capture Volume", CS42888_VOLAIN3,
++ CS42888_VOLAIN4, -128, 48, adc_tlv),
++ SOC_ENUM("ADC High-Pass Filter Switch", cs42888_enum[0]),
++ SOC_ENUM("DAC1 Invert Switch", cs42888_enum[1]),
++ SOC_ENUM("DAC2 Invert Switch", cs42888_enum[2]),
++ SOC_ENUM("DAC3 Invert Switch", cs42888_enum[3]),
++ SOC_ENUM("DAC4 Invert Switch", cs42888_enum[4]),
++ SOC_ENUM("ADC1 Invert Switch", cs42888_enum[5]),
++ SOC_ENUM("ADC2 Invert Switch", cs42888_enum[6]),
++ SOC_ENUM("DAC Auto Mute Switch", cs42888_enum[7]),
++ SOC_ENUM("DAC Single Volume Control Switch", cs42888_enum[8]),
++ SOC_ENUM("DAC Soft Ramp and Zero Cross Control Switch", cs42888_enum[9]),
++ SOC_ENUM("Mute ADC Serial Port Switch", cs42888_enum[10]),
++ SOC_ENUM("ADC Single Volume Control Switch", cs42888_enum[11]),
++ SOC_ENUM("ADC Soft Ramp and Zero Cross Control Switch", cs42888_enum[12]),
++ SOC_ENUM("DAC Deemphasis Switch", cs42888_enum[13]),
++ SOC_ENUM("ADC1 Single Ended Mode Switch", cs42888_enum[14]),
++ SOC_ENUM("ADC2 Single Ended Mode Switch", cs42888_enum[15]),
++};
++
++
++static const struct snd_soc_dapm_widget cs42888_dapm_widgets[] = {
++ SND_SOC_DAPM_DAC("DAC1", "codec-Playback", CS42888_PWRCTL, 1, 1),
++ SND_SOC_DAPM_DAC("DAC2", "codec-Playback", CS42888_PWRCTL, 2, 1),
++ SND_SOC_DAPM_DAC("DAC3", "codec-Playback", CS42888_PWRCTL, 3, 1),
++ SND_SOC_DAPM_DAC("DAC4", "codec-Playback", CS42888_PWRCTL, 4, 1),
++
++ SND_SOC_DAPM_OUTPUT("AOUT1L"),
++ SND_SOC_DAPM_OUTPUT("AOUT1R"),
++ SND_SOC_DAPM_OUTPUT("AOUT2L"),
++ SND_SOC_DAPM_OUTPUT("AOUT2R"),
++ SND_SOC_DAPM_OUTPUT("AOUT3L"),
++ SND_SOC_DAPM_OUTPUT("AOUT3R"),
++ SND_SOC_DAPM_OUTPUT("AOUT4L"),
++ SND_SOC_DAPM_OUTPUT("AOUT4R"),
++
++ SND_SOC_DAPM_ADC("ADC1", "codec-Capture", CS42888_PWRCTL, 5, 1),
++ SND_SOC_DAPM_ADC("ADC2", "codec-Capture", CS42888_PWRCTL, 6, 1),
++
++ SND_SOC_DAPM_INPUT("AIN1L"),
++ SND_SOC_DAPM_INPUT("AIN1R"),
++ SND_SOC_DAPM_INPUT("AIN2L"),
++ SND_SOC_DAPM_INPUT("AIN2R"),
++
++ SND_SOC_DAPM_PGA_E("PWR", CS42888_PWRCTL, 0, 1, NULL, 0,
++ NULL, 0),
++};
++
++static const struct snd_soc_dapm_route audio_map[] = {
++ /* Playback */
++ { "PWR", NULL, "DAC1" },
++ { "PWR", NULL, "DAC1" },
++
++ { "PWR", NULL, "DAC2" },
++ { "PWR", NULL, "DAC2" },
++
++ { "PWR", NULL, "DAC3" },
++ { "PWR", NULL, "DAC3" },
++
++ { "PWR", NULL, "DAC4" },
++ { "PWR", NULL, "DAC4" },
++
++ { "AOUT1L", NULL, "PWR" },
++ { "AOUT1R", NULL, "PWR" },
++
++ { "AOUT2L", NULL, "PWR" },
++ { "AOUT2R", NULL, "PWR" },
++
++ { "AOUT3L", NULL, "PWR" },
++ { "AOUT3R", NULL, "PWR" },
++
++ { "AOUT4L", NULL, "PWR" },
++ { "AOUT4R", NULL, "PWR" },
++
++ /* Capture */
++ { "PWR", NULL, "AIN1L" },
++ { "PWR", NULL, "AIN1R" },
++
++ { "PWR", NULL, "AIN2L" },
++ { "PWR", NULL, "AIN2R" },
++
++ { "ADC1", NULL, "PWR" },
++ { "ADC1", NULL, "PWR" },
++
++ { "ADC2", NULL, "PWR" },
++ { "ADC2", NULL, "PWR" },
++};
++
++
++static int cs42888_add_widgets(struct snd_soc_codec *codec)
++{
++ snd_soc_dapm_new_controls(&codec->dapm, cs42888_dapm_widgets,
++ ARRAY_SIZE(cs42888_dapm_widgets));
++
++ snd_soc_dapm_add_routes(&codec->dapm, audio_map, ARRAY_SIZE(audio_map));
++
++ snd_soc_dapm_new_widgets(&codec->dapm);
++ return 0;
++}
++
++/**
++ * struct cs42888_mode_ratios - clock ratio tables
++ * @ratio: the ratio of MCLK to the sample rate
++ * @speed_mode: the Speed Mode bits to set in the Mode Control register for
++ * this ratio
++ * @mclk: the Ratio Select bits to set in the Mode Control register for this
++ * ratio
++ *
++ * The data for this chart is taken from Table 10 of the CS42888 reference
++ * manual.
++ *
++ * This table is used to determine how to program the Functional Mode register.
++ * It is also used by cs42888_set_dai_sysclk() to tell ALSA which sampling
++ * rates the CS42888 currently supports.
++ *
++ * @speed_mode is the corresponding bit pattern to be written to the
++ * MODE bits of the Mode Control Register
++ *
++ * @mclk is the corresponding bit pattern to be wirten to the MCLK bits of
++ * the Mode Control Register.
++ *
++ */
++struct cs42888_mode_ratios {
++ unsigned int ratio;
++ u8 speed_mode;
++ u8 mclk;
++};
++
++static struct cs42888_mode_ratios cs42888_mode_ratios[] = {
++ {64, CS42888_MODE_4X, CS42888_MODE_DIV1},
++ {96, CS42888_MODE_4X, CS42888_MODE_DIV2},
++ {128, CS42888_MODE_2X, CS42888_MODE_DIV1},
++ {192, CS42888_MODE_2X, CS42888_MODE_DIV2},
++ {256, CS42888_MODE_1X, CS42888_MODE_DIV1},
++ {384, CS42888_MODE_2X, CS42888_MODE_DIV4},
++ {512, CS42888_MODE_1X, CS42888_MODE_DIV3},
++ {768, CS42888_MODE_1X, CS42888_MODE_DIV4},
++ {1024, CS42888_MODE_1X, CS42888_MODE_DIV5}
++};
++
++/* The number of MCLK/LRCK ratios supported by the CS42888 */
++#define NUM_MCLK_RATIOS ARRAY_SIZE(cs42888_mode_ratios)
++
++/**
++ * cs42888_set_dai_sysclk - determine the CS42888 samples rates.
++ * @codec_dai: the codec DAI
++ * @clk_id: the clock ID (ignored)
++ * @freq: the MCLK input frequency
++ * @dir: the clock direction (ignored)
++ *
++ * This function is used to tell the codec driver what the input MCLK
++ * frequency is.
++ *
++ */
++static int cs42888_set_dai_sysclk(struct snd_soc_dai *codec_dai,
++ int clk_id, unsigned int freq, int dir)
++{
++ struct snd_soc_codec *codec = codec_dai->codec;
++ struct cs42888_private *cs42888 = snd_soc_codec_get_drvdata(codec);
++
++ cs42888->mclk = freq;
++ return 0;
++}
++
++/**
++ * cs42888_set_dai_fmt - configure the codec for the selected audio format
++ * @codec_dai: the codec DAI
++ * @format: a SND_SOC_DAIFMT_x value indicating the data format
++ *
++ * This function takes a bitmask of SND_SOC_DAIFMT_x bits and programs the
++ * codec accordingly.
++ *
++ * Currently, this function only supports SND_SOC_DAIFMT_I2S and
++ * SND_SOC_DAIFMT_LEFT_J. The CS42888 codec also supports right-justified
++ * data for playback only, but ASoC currently does not support different
++ * formats for playback vs. record.
++ */
++static int cs42888_set_dai_fmt(struct snd_soc_dai *codec_dai,
++ unsigned int format)
++{
++ struct snd_soc_codec *codec = codec_dai->codec;
++ struct cs42888_private *cs42888 = snd_soc_codec_get_drvdata(codec);
++ int ret = 0;
++ u8 val;
++
++ val = snd_soc_read(codec, CS42888_FORMAT);
++ val &= ~CS42888_FORMAT_DAC_DIF_MASK;
++ val &= ~CS42888_FORMAT_ADC_DIF_MASK;
++ /* set DAI format */
++ switch (format & SND_SOC_DAIFMT_FORMAT_MASK) {
++ case SND_SOC_DAIFMT_LEFT_J:
++ val |= DIF_LEFT_J << CS42888_FORMAT_DAC_DIF_OFFSET;
++ val |= DIF_LEFT_J << CS42888_FORMAT_ADC_DIF_OFFSET;
++ break;
++ case SND_SOC_DAIFMT_I2S:
++ val |= DIF_I2S << CS42888_FORMAT_DAC_DIF_OFFSET;
++ val |= DIF_I2S << CS42888_FORMAT_ADC_DIF_OFFSET;
++ break;
++ case SND_SOC_DAIFMT_RIGHT_J:
++ val |= DIF_RIGHT_J << CS42888_FORMAT_DAC_DIF_OFFSET;
++ val |= DIF_RIGHT_J << CS42888_FORMAT_ADC_DIF_OFFSET;
++ break;
++ default:
++ dev_err(codec->dev, "invalid dai format\n");
++ return -EINVAL;
++ }
++
++ ret = snd_soc_write(codec, CS42888_FORMAT, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return ret;
++ }
++
++ val = snd_soc_read(codec, CS42888_MODE);
++ /* set master/slave audio interface */
++ switch (format & SND_SOC_DAIFMT_MASTER_MASK) {
++ case SND_SOC_DAIFMT_CBS_CFS:
++ cs42888->slave_mode = 1;
++ val &= ~CS42888_MODE_SPEED_MASK;
++ val |= CS42888_MODE_SLAVE;
++ break;
++ case SND_SOC_DAIFMT_CBM_CFM:
++ cs42888->slave_mode = 0;
++ break;
++ default:
++ /* all other modes are unsupported by the hardware */
++ return -EINVAL;
++ }
++
++ ret = snd_soc_write(codec, CS42888_MODE, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return ret;
++ }
++
++ dump_reg(codec);
++ return ret;
++}
++
++/**
++ * cs42888_hw_params - program the CS42888 with the given hardware parameters.
++ * @substream: the audio stream
++ * @params: the hardware parameters to set
++
++ * @dai: the SOC DAI (ignored)
++ *
++ * This function programs the hardware with the values provided.
++ * Specifically, the sample rate and the data format.
++ *
++ * The .ops functions are used to provide board-specific data, like input
++ * frequencies, to this driver. This function takes that information,
++ * combines it with the hardware parameters provided, and programs the
++ * hardware accordingly.
++ */
++static int cs42888_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params,
++ struct snd_soc_dai *dai)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_codec *codec = rtd->codec;
++ struct cs42888_private *cs42888 = snd_soc_codec_get_drvdata(codec);
++ int ret;
++ u32 i, rate, ratio, val;
++
++ rate = params_rate(params); /* Sampling rate, in Hz */
++ ratio = cs42888->mclk / rate; /* MCLK/LRCK ratio */
++ for (i = 0; i < NUM_MCLK_RATIOS; i++) {
++ if (cs42888_mode_ratios[i].ratio == ratio)
++ break;
++ }
++
++ if (i == NUM_MCLK_RATIOS) {
++ /* We did not find a matching ratio */
++ dev_err(codec->dev, "could not find matching ratio\n");
++ return -EINVAL;
++ }
++
++ if (!cs42888->slave_mode) {
++ val = snd_soc_read(codec, CS42888_MODE);
++ val &= ~CS42888_MODE_SPEED_MASK;
++ val |= cs42888_mode_ratios[i].speed_mode;
++ val &= ~CS42888_MODE_DIV_MASK;
++ val |= cs42888_mode_ratios[i].mclk;
++ } else {
++ val = snd_soc_read(codec, CS42888_MODE);
++ val &= ~CS42888_MODE_SPEED_MASK;
++ val |= CS42888_MODE_SLAVE;
++ val &= ~CS42888_MODE_DIV_MASK;
++ val |= cs42888_mode_ratios[i].mclk;
++ }
++ ret = snd_soc_write(codec, CS42888_MODE, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return ret;
++ }
++
++ /* Unmute all the channels */
++ val = snd_soc_read(codec, CS42888_MUTE);
++ val &= ~CS42888_MUTE_ALL;
++ ret = snd_soc_write(codec, CS42888_MUTE, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return ret;
++ }
++
++ ret = cs42888_fill_cache(codec);
++ if (ret < 0) {
++ dev_err(codec->dev, "failed to fill register cache\n");
++ return ret;
++ }
++
++ dump_reg(codec);
++ return ret;
++}
++
++/**
++ * cs42888_shutdown - cs42888 enters into low power mode again.
++ * @substream: the audio stream
++ * @dai: the SOC DAI (ignored)
++ *
++ * The .ops functions are used to provide board-specific data, like input
++ * frequencies, to this driver. This function takes that information,
++ * combines it with the hardware parameters provided, and programs the
++ * hardware accordingly.
++ */
++static void cs42888_shutdown(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_codec *codec = rtd->codec;
++ int ret;
++ u8 val;
++
++ /* Mute all the channels */
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ val = snd_soc_read(codec, CS42888_MUTE);
++ val |= CS42888_MUTE_ALL;
++ ret = snd_soc_write(codec, CS42888_MUTE, val);
++ if (ret < 0)
++ dev_err(codec->dev, "i2c write failed\n");
++ }
++}
++
++static int cs42888_prepare(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_card *card = rtd->card;
++ struct snd_soc_dai *tmp_codec_dai;
++ struct snd_soc_pcm_runtime *tmp_rtd;
++ u32 i;
++
++ for (i = 0; i < card->num_rtd; i++) {
++ tmp_codec_dai = card->rtd[i].codec_dai;
++ tmp_rtd = (struct snd_soc_pcm_runtime *)(card->rtd + i);
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ cancel_delayed_work(&tmp_rtd->delayed_work);
++ }
++ return 0;
++}
++
++static struct snd_soc_dai_ops cs42888_dai_ops = {
++ .set_fmt = cs42888_set_dai_fmt,
++ .set_sysclk = cs42888_set_dai_sysclk,
++ .hw_params = cs42888_hw_params,
++ .shutdown = cs42888_shutdown,
++ .prepare = cs42888_prepare,
++};
++
++
++static struct snd_soc_dai_driver cs42888_dai = {
++ .name = "CS42888",
++ .playback = {
++ .stream_name = "codec-Playback",
++ .channels_min = 2,
++ .channels_max = 8,
++ .rates = SNDRV_PCM_RATE_8000_192000,
++ .formats = CS42888_FORMATS,
++ },
++ .capture = {
++ .stream_name = "codec-Capture",
++ .channels_min = 2,
++ .channels_max = 4,
++ .rates = SNDRV_PCM_RATE_8000_192000,
++ .formats = CS42888_FORMATS,
++ },
++ .ops = &cs42888_dai_ops,
++};
++
++/**
++ * cs42888_probe - ASoC probe function
++ * @pdev: platform device
++ *
++ * This function is called when ASoC has all the pieces it needs to
++ * instantiate a sound driver.
++ */
++static int cs42888_probe(struct snd_soc_codec *codec)
++{
++ struct cs42888_private *cs42888 = snd_soc_codec_get_drvdata(codec);
++ int ret, i, val;
++
++ cs42888->codec = codec;
++ /* setup i2c data ops */
++ ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
++ if (ret < 0) {
++ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
++ return ret;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(cs42888->supplies); i++)
++ cs42888->supplies[i].supply = cs42888_supply_names[i];
++
++ ret = devm_regulator_bulk_get(codec->dev,
++ ARRAY_SIZE(cs42888->supplies), cs42888->supplies);
++ if (ret) {
++ dev_err(codec->dev, "Failed to request supplies: %d\n",
++ ret);
++ return ret;
++ }
++
++ ret = regulator_bulk_enable(ARRAY_SIZE(cs42888->supplies),
++ cs42888->supplies);
++ if (ret) {
++ dev_err(codec->dev, "Failed to enable supplies: %d\n",
++ ret);
++ goto err;
++ }
++ msleep(1);
++
++ /* The I2C interface is set up, so pre-fill our register cache */
++ ret = cs42888_fill_cache(codec);
++ if (ret < 0) {
++ dev_err(codec->dev, "failed to fill register cache\n");
++ goto err;
++ }
++
++ /* Enter low power state */
++ val = snd_soc_read(codec, CS42888_PWRCTL);
++ val |= CS42888_PWRCTL_PDN_MASK;
++ ret = snd_soc_write(codec, CS42888_PWRCTL, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ goto err;
++ }
++
++ /* Disable auto-mute */
++ val = snd_soc_read(codec, CS42888_TRANS);
++ val &= ~CS42888_TRANS_AMUTE_MASK;
++ val &= ~CS42888_TRANS_DAC_SZC_MASK;
++ val |= CS42888_TRANS_DAC_SZC_SR;
++ ret = snd_soc_write(codec, CS42888_TRANS, val);
++ if (ret < 0) {
++ dev_err(codec->dev, "i2c write failed\n");
++ goto err;
++ }
++ /* Add the non-DAPM controls */
++ snd_soc_add_codec_controls(codec, cs42888_snd_controls,
++ ARRAY_SIZE(cs42888_snd_controls));
++
++ /* Add DAPM controls */
++ cs42888_add_widgets(codec);
++ return 0;
++err:
++ regulator_bulk_disable(ARRAY_SIZE(cs42888->supplies),
++ cs42888->supplies);
++ return ret;
++}
++
++/**
++ * cs42888_remove - ASoC remove function
++ * @pdev: platform device
++ *
++ * This function is the counterpart to cs42888_probe().
++ */
++static int cs42888_remove(struct snd_soc_codec *codec)
++{
++ struct cs42888_private *cs42888 = snd_soc_codec_get_drvdata(codec);
++
++ regulator_bulk_disable(ARRAY_SIZE(cs42888->supplies),
++ cs42888->supplies);
++
++ return 0;
++};
++
++/*
++ * ASoC codec device structure
++ *
++ * Assign this variable to the codec_dev field of the machine driver's
++ * snd_soc_device structure.
++ */
++static struct snd_soc_codec_driver cs42888_driver = {
++ .probe = cs42888_probe,
++ .remove = cs42888_remove,
++ .reg_cache_size = CS42888_NUMREGS + 1,
++ .reg_word_size = sizeof(u8),
++ .reg_cache_step = 1,
++};
++
++/**
++ * cs42888_i2c_probe - initialize the I2C interface of the CS42888
++ * @i2c_client: the I2C client object
++ * @id: the I2C device ID (ignored)
++ *
++ * This function is called whenever the I2C subsystem finds a device that
++ * matches the device ID given via a prior call to i2c_add_driver().
++ */
++static int cs42888_i2c_probe(struct i2c_client *i2c_client,
++ const struct i2c_device_id *id)
++{
++ struct cs42888_private *cs42888;
++ int ret, val;
++
++ /* Verify that we have a CS42888 */
++ val = i2c_smbus_read_byte_data(i2c_client, CS42888_CHIPID);
++ if (val < 0) {
++ dev_err(&i2c_client->dev, "Device with ID register %x is not a CS42888", val);
++ return -ENODEV;
++ }
++ /* The top four bits of the chip ID should be 0000. */
++ if ((val & CS42888_CHIPID_ID_MASK) != 0x00) {
++ dev_err(&i2c_client->dev, "device is not a CS42888\n");
++ return -ENODEV;
++ }
++
++ dev_info(&i2c_client->dev, "found device at i2c address %X\n",
++ i2c_client->addr);
++ dev_info(&i2c_client->dev, "hardware revision %X\n", val & 0xF);
++
++ /* Allocate enough space for the snd_soc_codec structure
++ and our private data together. */
++ cs42888 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42888_private), GFP_KERNEL);
++ if (!cs42888) {
++ dev_err(&i2c_client->dev, "could not allocate codec\n");
++ return -ENOMEM;
++ }
++
++ i2c_set_clientdata(i2c_client, cs42888);
++
++ cs42888->clk = devm_clk_get(&i2c_client->dev, NULL);
++ if (IS_ERR(cs42888->clk)) {
++ ret = PTR_ERR(cs42888->clk);
++ dev_err(&i2c_client->dev, "Cannot get the clock: %d\n", ret);
++ return ret;
++ }
++
++ cs42888->mclk = clk_get_rate(cs42888->clk);
++ switch (cs42888->mclk) {
++ case 24576000:
++ cs42888_dai.playback.rates = SNDRV_PCM_RATE_48000 |
++ SNDRV_PCM_RATE_96000 |
++ SNDRV_PCM_RATE_192000;
++ cs42888_dai.capture.rates = SNDRV_PCM_RATE_48000 |
++ SNDRV_PCM_RATE_96000 |
++ SNDRV_PCM_RATE_192000;
++ break;
++ case 16934400:
++ cs42888_dai.playback.rates = SNDRV_PCM_RATE_44100 |
++ SNDRV_PCM_RATE_88200 |
++ SNDRV_PCM_RATE_176400;
++ cs42888_dai.capture.rates = SNDRV_PCM_RATE_44100 |
++ SNDRV_PCM_RATE_88200 |
++ SNDRV_PCM_RATE_176400;
++ break;
++ default:
++ dev_err(&i2c_client->dev, "codec mclk is not supported %d\n", cs42888->mclk);
++ break;
++ }
++
++ ret = snd_soc_register_codec(&i2c_client->dev,
++ &cs42888_driver, &cs42888_dai, 1);
++ if (ret) {
++ dev_err(&i2c_client->dev, "Failed to register codec:%d\n", ret);
++ return ret;
++ }
++ return 0;
++}
++
++/**
++ * cs42888_i2c_remove - remove an I2C device
++ * @i2c_client: the I2C client object
++ *
++ * This function is the counterpart to cs42888_i2c_probe().
++ */
++static int cs42888_i2c_remove(struct i2c_client *i2c_client)
++{
++ snd_soc_unregister_codec(&i2c_client->dev);
++ return 0;
++}
++
++/*
++ * cs42888_i2c_id - I2C device IDs supported by this driver
++ */
++static struct i2c_device_id cs42888_i2c_id[] = {
++ {"cs42888", 0},
++ {}
++};
++MODULE_DEVICE_TABLE(i2c, cs42888_i2c_id);
++
++#ifdef CONFIG_PM
++/* This suspend/resume implementation can handle both - a simple standby
++ * where the codec remains powered, and a full suspend, where the voltage
++ * domain the codec is connected to is teared down and/or any other hardware
++ * reset condition is asserted.
++ *
++ * The codec's own power saving features are enabled in the suspend callback,
++ * and all registers are written back to the hardware when resuming.
++ */
++
++static int cs42888_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ struct cs42888_private *cs42888 = i2c_get_clientdata(client);
++ struct snd_soc_codec *codec = cs42888->codec;
++ int reg = snd_soc_read(codec, CS42888_PWRCTL) | CS42888_PWRCTL_PDN_MASK;
++ return snd_soc_write(codec, CS42888_PWRCTL, reg);
++}
++
++static int cs42888_i2c_resume(struct i2c_client *client)
++{
++ struct cs42888_private *cs42888 = i2c_get_clientdata(client);
++ struct snd_soc_codec *codec = cs42888->codec;
++ int reg;
++
++ /* In case the device was put to hard reset during sleep, we need to
++ * wait 500ns here before any I2C communication. */
++ ndelay(500);
++
++ /* first restore the entire register cache ... */
++ for (reg = CS42888_FIRSTREG; reg <= CS42888_LASTREG; reg++) {
++ u8 val = snd_soc_read(codec, reg);
++
++ if (i2c_smbus_write_byte_data(client, reg, val)) {
++ dev_err(codec->dev, "i2c write failed\n");
++ return -EIO;
++ }
++ }
++
++ /* ... then disable the power-down bits */
++ reg = snd_soc_read(codec, CS42888_PWRCTL);
++ reg &= ~CS42888_PWRCTL_PDN_MASK;
++ return snd_soc_write(codec, CS42888_PWRCTL, reg);
++}
++#else
++#define cs42888_i2c_suspend NULL
++#define cs42888_i2c_resume NULL
++#endif /* CONFIG_PM */
++
++/*
++ * cs42888_i2c_driver - I2C device identification
++ *
++ * This structure tells the I2C subsystem how to identify and support a
++ * given I2C device type.
++ */
++
++static const struct of_device_id cs42888_dt_ids[] = {
++ { .compatible = "cirrus,cs42888", },
++ { /* sentinel */ }
++};
++
++static struct i2c_driver cs42888_i2c_driver = {
++ .driver = {
++ .name = "cs42888",
++ .owner = THIS_MODULE,
++ .of_match_table = cs42888_dt_ids,
++ },
++ .probe = cs42888_i2c_probe,
++ .remove = cs42888_i2c_remove,
++ .suspend = cs42888_i2c_suspend,
++ .resume = cs42888_i2c_resume,
++ .id_table = cs42888_i2c_id,
++};
++
++module_i2c_driver(cs42888_i2c_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Cirrus Logic CS42888 ALSA SoC Codec Driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/sound/soc/codecs/cs42888.h linux-3.14.40/sound/soc/codecs/cs42888.h
+--- linux-3.14.40.orig/sound/soc/codecs/cs42888.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/sound/soc/codecs/cs42888.h 2015-05-01 14:58:06.695427001 -0500
+@@ -0,0 +1,123 @@
++/*
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef _CS42888_H
++#define _CS42888_H
++
++/* CS42888 registers addresses */
++#define CS42888_CHIPID 0x01 /* Chip ID */
++#define CS42888_PWRCTL 0x02 /* Power Control */
++#define CS42888_MODE 0x03 /* Functional Mode */
++#define CS42888_FORMAT 0x04 /* Interface Formats */
++#define CS42888_ADCCTL 0x05 /* ADC Control */
++#define CS42888_TRANS 0x06 /* Transition Control */
++#define CS42888_MUTE 0x07 /* Mute Control */
++#define CS42888_VOLAOUT1 0x08 /* Volume Control AOUT1*/
++#define CS42888_VOLAOUT2 0x09 /* Volume Control AOUT2*/
++#define CS42888_VOLAOUT3 0x0A /* Volume Control AOUT3*/
++#define CS42888_VOLAOUT4 0x0B /* Volume Control AOUT4*/
++#define CS42888_VOLAOUT5 0x0C /* Volume Control AOUT5*/
++#define CS42888_VOLAOUT6 0x0D /* Volume Control AOUT6*/
++#define CS42888_VOLAOUT7 0x0E /* Volume Control AOUT7*/
++#define CS42888_VOLAOUT8 0x0F /* Volume Control AOUT8*/
++#define CS42888_DACINV 0x10 /* DAC Channel Invert */
++#define CS42888_VOLAIN1 0x11 /* Volume Control AIN1 */
++#define CS42888_VOLAIN2 0x12 /* Volume Control AIN2 */
++#define CS42888_VOLAIN3 0x13 /* Volume Control AIN3 */
++#define CS42888_VOLAIN4 0x14 /* Volume Control AIN4 */
++#define CS42888_ADCINV 0x17 /* ADC Channel Invert */
++#define CS42888_STATUSCTL 0x18 /* Status Control */
++#define CS42888_STATUS 0x19 /* Status */
++#define CS42888_STATUSMASK 0x1A /* Status Mask */
++
++#define CS42888_FIRSTREG 0x01
++#define CS42888_LASTREG 0x1A
++#define CS42888_NUMREGS (CS42888_LASTREG - CS42888_FIRSTREG + 1)
++#define CS42888_I2C_INCR 0x80
++
++/* Bit masks for the CS42888 registers */
++#define CS42888_CHIPID_ID_MASK 0xF0
++#define CS42888_CHIPID_REV 0x0F
++#define CS42888_PWRCTL_PDN_ADC2_OFFSET 6
++#define CS42888_PWRCTL_PDN_ADC1_OFFSET 5
++#define CS42888_PWRCTL_PDN_DAC4_OFFSET 4
++#define CS42888_PWRCTL_PDN_DAC3_OFFSET 3
++#define CS42888_PWRCTL_PDN_DAC2_OFFSET 2
++#define CS42888_PWRCTL_PDN_DAC1_OFFSET 1
++#define CS42888_PWRCTL_PDN_OFFSET 0
++#define CS42888_PWRCTL_PDN_ADC2_MASK (1 << CS42888_PWRCTL_PDN_ADC2_OFFSET)
++#define CS42888_PWRCTL_PDN_ADC1_MASK (1 << CS42888_PWRCTL_PDN_ADC1_OFFSET)
++#define CS42888_PWRCTL_PDN_DAC4_MASK (1 << CS42888_PWRCTL_PDN_DAC4_OFFSET)
++#define CS42888_PWRCTL_PDN_DAC3_MASK (1 << CS42888_PWRCTL_PDN_DAC3_OFFSET)
++#define CS42888_PWRCTL_PDN_DAC2_MASK (1 << CS42888_PWRCTL_PDN_DAC2_OFFSET)
++#define CS42888_PWRCTL_PDN_DAC1_MASK (1 << CS42888_PWRCTL_PDN_DAC1_OFFSET)
++#define CS42888_PWRCTL_PDN_MASK (1 << CS42888_PWRCTL_PDN_OFFSET)
++
++#define CS42888_MODE_SPEED_MASK 0xF0
++#define CS42888_MODE_1X 0x00
++#define CS42888_MODE_2X 0x50
++#define CS42888_MODE_4X 0xA0
++#define CS42888_MODE_SLAVE 0xF0
++#define CS42888_MODE_DIV_MASK 0x0E
++#define CS42888_MODE_DIV1 0x00
++#define CS42888_MODE_DIV2 0x02
++#define CS42888_MODE_DIV3 0x04
++#define CS42888_MODE_DIV4 0x06
++#define CS42888_MODE_DIV5 0x08
++
++#define CS42888_FORMAT_FREEZE_OFFSET 7
++#define CS42888_FORMAT_AUX_DIF_OFFSET 6
++#define CS42888_FORMAT_DAC_DIF_OFFSET 3
++#define CS42888_FORMAT_ADC_DIF_OFFSET 0
++#define CS42888_FORMAT_FREEZE_MASK (1 << CS42888_FORMAT_FREEZE_OFFSET)
++#define CS42888_FORMAT_AUX_DIF_MASK (1 << CS42888_FORMAT_AUX_DIF_OFFSET)
++#define CS42888_FORMAT_DAC_DIF_MASK (7 << CS42888_FORMAT_DAC_DIF_OFFSET)
++#define CS42888_FORMAT_ADC_DIF_MASK (7 << CS42888_FORMAT_ADC_DIF_OFFSET)
++
++#define CS42888_TRANS_DAC_SNGVOL_OFFSET 7
++#define CS42888_TRANS_DAC_SZC_OFFSET 5
++#define CS42888_TRANS_AMUTE_OFFSET 4
++#define CS42888_TRANS_MUTE_ADC_SP_OFFSET 3
++#define CS42888_TRANS_ADC_SNGVOL_OFFSET 2
++#define CS42888_TRANS_ADC_SZC_OFFSET 0
++#define CS42888_TRANS_DAC_SNGVOL_MASK (1 << CS42888_TRANS_DAC_SNGVOL_OFFSET)
++#define CS42888_TRANS_DAC_SZC_MASK (3 << CS42888_TRANS_DAC_SZC_OFFSET)
++#define CS42888_TRANS_AMUTE_MASK (1 << CS42888_TRANS_AMUTE_OFFSET)
++#define CS42888_TRANS_MUTE_ADC_SP_MASK (1 << CS42888_TRANS_MUTE_ADC_SP_OFFSET)
++#define CS42888_TRANS_ADC_SNGVOL_MASK (1 << CS42888_TRANS_ADC_SNGVOL_OFFSET)
++#define CS42888_TRANS_ADC_SZC_MASK (3 << CS42888_TRANS_ADC_SZC_OFFSET)
++#define CS42888_TRANS_DAC_SZC_IC (0 << CS42888_TRANS_DAC_SZC_OFFSET)
++#define CS42888_TRANS_DAC_SZC_ZC (1 << CS42888_TRANS_DAC_SZC_OFFSET)
++#define CS42888_TRANS_DAC_SZC_SR (2 << CS42888_TRANS_DAC_SZC_OFFSET)
++#define CS42888_TRANS_DAC_SZC_SRZC (3 << CS42888_TRANS_DAC_SZC_OFFSET)
++
++#define CS42888_MUTE_AOUT8 (0x1 << 7)
++#define CS42888_MUTE_AOUT7 (0x1 << 6)
++#define CS42888_MUTE_AOUT6 (0x1 << 5)
++#define CS42888_MUTE_AOUT5 (0x1 << 4)
++#define CS42888_MUTE_AOUT4 (0x1 << 3)
++#define CS42888_MUTE_AOUT3 (0x1 << 2)
++#define CS42888_MUTE_AOUT2 (0x1 << 1)
++#define CS42888_MUTE_AOUT1 (0x1 << 0)
++#define CS42888_MUTE_ALL (CS42888_MUTE_AOUT1 | CS42888_MUTE_AOUT2 | \
++ CS42888_MUTE_AOUT3 | CS42888_MUTE_AOUT4 | \
++ CS42888_MUTE_AOUT5 | CS42888_MUTE_AOUT6 | \
++ CS42888_MUTE_AOUT7 | CS42888_MUTE_AOUT8)
++
++#define DIF_LEFT_J 0
++#define DIF_I2S 1
++#define DIF_RIGHT_J 2
++#define DIF_TDM 6
++
++
++#endif
+diff -Nur linux-3.14.40.orig/sound/soc/codecs/Kconfig linux-3.14.40/sound/soc/codecs/Kconfig
+--- linux-3.14.40.orig/sound/soc/codecs/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/codecs/Kconfig 2015-05-01 14:58:06.707427001 -0500
+@@ -37,6 +37,7 @@
+ select SND_SOC_CS42L73 if I2C
+ select SND_SOC_CS4270 if I2C
+ select SND_SOC_CS4271 if SND_SOC_I2C_AND_SPI
++ select SND_SOC_CS42888 if I2C
+ select SND_SOC_CX20442 if TTY
+ select SND_SOC_DA7210 if I2C
+ select SND_SOC_DA7213 if I2C
+@@ -254,6 +255,9 @@
+ config SND_SOC_CS4271
+ tristate
+
++config SND_SOC_CS42888
++ tristate
++
+ config SND_SOC_CX20442
+ tristate
+ depends on TTY
+diff -Nur linux-3.14.40.orig/sound/soc/codecs/Makefile linux-3.14.40/sound/soc/codecs/Makefile
+--- linux-3.14.40.orig/sound/soc/codecs/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/codecs/Makefile 2015-05-01 14:58:06.719427001 -0500
+@@ -23,6 +23,7 @@
+ snd-soc-cs42l73-objs := cs42l73.o
+ snd-soc-cs4270-objs := cs4270.o
+ snd-soc-cs4271-objs := cs4271.o
++snd-soc-cs42888-objs := cs42888.o
+ snd-soc-cx20442-objs := cx20442.o
+ snd-soc-da7210-objs := da7210.o
+ snd-soc-da7213-objs := da7213.o
+@@ -156,6 +157,7 @@
+ obj-$(CONFIG_SND_SOC_CS42L73) += snd-soc-cs42l73.o
+ obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o
+ obj-$(CONFIG_SND_SOC_CS4271) += snd-soc-cs4271.o
++obj-$(CONFIG_SND_SOC_CS42888) += snd-soc-cs42888.o
+ obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
+ obj-$(CONFIG_SND_SOC_DA7210) += snd-soc-da7210.o
+ obj-$(CONFIG_SND_SOC_DA7213) += snd-soc-da7213.o
+diff -Nur linux-3.14.40.orig/sound/soc/codecs/sgtl5000.c linux-3.14.40/sound/soc/codecs/sgtl5000.c
+--- linux-3.14.40.orig/sound/soc/codecs/sgtl5000.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/codecs/sgtl5000.c 2015-05-01 14:58:06.723427001 -0500
+@@ -756,7 +756,7 @@
+ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
+ struct snd_soc_codec *codec = (struct snd_soc_codec *)ldo->codec_data;
+ int reg;
+-
++dev_info(codec->dev, "%s(): enabled %u\n", __func__, ldo->enabled);
+ if (ldo_regulator_is_enabled(dev))
+ return 0;
+
+@@ -788,10 +788,16 @@
+ {
+ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
+ struct snd_soc_codec *codec = (struct snd_soc_codec *)ldo->codec_data;
++dev_info(codec->dev, "%s(): enabled %u\n", __func__, ldo->enabled);
++
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_LINREG_SIMPLE_POWERUP,
++ SGTL5000_LINREG_SIMPLE_POWERUP);
+
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_LINEREG_D_POWERUP,
+ 0);
++dev_info(codec->dev, "%s: ANA_POWER = 0x%04x\n", __func__, snd_soc_read(codec, SGTL5000_CHIP_ANA_POWER));
+
+ /* clear voltage info */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_LINREG_CTRL,
+@@ -849,6 +855,7 @@
+ config.dev = codec->dev;
+ config.driver_data = ldo;
+ config.init_data = init_data;
++ config.ena_gpio = -EINVAL;
+
+ ldo->dev = regulator_register(&ldo->desc, &config);
+ if (IS_ERR(ldo->dev)) {
+@@ -1314,7 +1321,7 @@
+ return ret;
+ }
+
+- ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(sgtl5000->supplies),
++ ret = devm_regulator_bulk_get(codec->dev, ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ if (ret)
+ goto err_ldo_remove;
+@@ -1322,16 +1329,13 @@
+ ret = regulator_bulk_enable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ if (ret)
+- goto err_regulator_free;
++ goto err_ldo_remove;
+
+ /* wait for all power rails bring up */
+ udelay(10);
+
+ return 0;
+
+-err_regulator_free:
+- regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+- sgtl5000->supplies);
+ err_ldo_remove:
+ if (!external_vddd)
+ ldo_regulator_remove(codec);
+@@ -1352,6 +1356,9 @@
+ return ret;
+ }
+
++ if (!devres_open_group(codec->dev, NULL, GFP_KERNEL))
++ return -ENOMEM;
++
+ ret = sgtl5000_enable_regulators(codec);
+ if (ret)
+ return ret;
+@@ -1408,8 +1415,9 @@
+ err:
+ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+- regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+- sgtl5000->supplies);
++
++ devres_release_group(codec->dev, NULL);
++
+ ldo_regulator_remove(codec);
+
+ return ret;
+@@ -1423,8 +1431,9 @@
+
+ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+- regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+- sgtl5000->supplies);
++
++ devres_release_group(codec->dev, NULL);
++
+ ldo_regulator_remove(codec);
+
+ return 0;
+diff -Nur linux-3.14.40.orig/sound/soc/codecs/spdif_transmitter.c linux-3.14.40/sound/soc/codecs/spdif_transmitter.c
+--- linux-3.14.40.orig/sound/soc/codecs/spdif_transmitter.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/codecs/spdif_transmitter.c 2015-05-01 14:58:06.747427001 -0500
+@@ -24,7 +24,7 @@
+
+ #define DRV_NAME "spdif-dit"
+
+-#define STUB_RATES SNDRV_PCM_RATE_8000_96000
++#define STUB_RATES SNDRV_PCM_RATE_8000_192000
+ #define STUB_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+diff -Nur linux-3.14.40.orig/sound/soc/codecs/wm8962.c linux-3.14.40/sound/soc/codecs/wm8962.c
+--- linux-3.14.40.orig/sound/soc/codecs/wm8962.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/codecs/wm8962.c 2015-05-01 14:58:06.759427001 -0500
+@@ -16,6 +16,7 @@
+ #include <linux/init.h>
+ #include <linux/delay.h>
+ #include <linux/pm.h>
++#include <linux/clk.h>
+ #include <linux/gcd.h>
+ #include <linux/gpio.h>
+ #include <linux/i2c.h>
+@@ -2942,7 +2943,8 @@
+ WM8962_DAC_MUTE, val);
+ }
+
+-#define WM8962_RATES SNDRV_PCM_RATE_8000_96000
++#define WM8962_RATES (SNDRV_PCM_RATE_8000_48000 |\
++ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+
+ #define WM8962_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+@@ -3536,6 +3538,15 @@
+ pdata->gpio_init[i] = 0x0;
+ }
+
++ pdata->codec_mclk = devm_clk_get(&i2c->dev, NULL);
++
++ /*
++ * If clk_get() failed, we assume that clock's enabled by default.
++ * Otherwise, we let driver prepare and control the clock source.
++ */
++ if (IS_ERR(pdata->codec_mclk))
++ pdata->codec_mclk = NULL;
++
+ return 0;
+ }
+
+@@ -3567,6 +3578,9 @@
+ return ret;
+ }
+
++ if (wm8962->pdata.codec_mclk)
++ clk_prepare(wm8962->pdata.codec_mclk);
++
+ for (i = 0; i < ARRAY_SIZE(wm8962->supplies); i++)
+ wm8962->supplies[i].supply = wm8962_supply_names[i];
+
+@@ -3669,6 +3683,27 @@
+ WM8962_MICBIAS_LVL,
+ wm8962->pdata.mic_cfg);
+
++ /* set the default volume for playback and record*/
++ snd_soc_update_bits(codec, WM8962_HPOUTL_VOLUME,
++ WM8962_HPOUTL_VOL_MASK, 0x5d);
++ snd_soc_update_bits(codec, WM8962_HPOUTR_VOLUME,
++ WM8962_HPOUTR_VOL_MASK, 0x5d);
++ snd_soc_update_bits(codec, WM8962_SPKOUTL_VOLUME,
++ WM8962_SPKOUTL_VOL_MASK, 0x72);
++ snd_soc_update_bits(codec, WM8962_SPKOUTR_VOLUME,
++ WM8962_SPKOUTR_VOL_MASK, 0x72);
++
++ snd_soc_update_bits(codec, WM8962_LEFT_INPUT_VOLUME,
++ WM8962_INL_VOL_MASK, 0x3f);
++ snd_soc_update_bits(codec, WM8962_RIGHT_INPUT_VOLUME,
++ WM8962_INR_VOL_MASK, 0x3f);
++ snd_soc_update_bits(codec, WM8962_LEFT_ADC_VOLUME,
++ WM8962_ADCL_VOL_MASK, 0xd8);
++ snd_soc_update_bits(codec, WM8962_RIGHT_ADC_VOLUME,
++ WM8962_ADCR_VOL_MASK, 0xd8);
++ snd_soc_update_bits(codec, WM8962_RIGHT_INPUT_MIXER_VOLUME,
++ WM8962_IN3R_MIXINR_VOL_MASK, 0x7);
++
+ /* Latch volume update bits */
+ regmap_update_bits(wm8962->regmap, WM8962_LEFT_INPUT_VOLUME,
+ WM8962_IN_VU, WM8962_IN_VU);
+@@ -3752,6 +3787,9 @@
+
+ regcache_cache_only(wm8962->regmap, true);
+
++ /* The cache-only should be turned on before we power down the codec */
++ regcache_cache_only(wm8962->regmap, true);
++
+ /* The drivers should power up as needed */
+ regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
+
+@@ -3760,11 +3798,19 @@
+ err_enable:
+ regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
+ err:
++ if (wm8962->pdata.codec_mclk)
++ clk_unprepare(wm8962->pdata.codec_mclk);
++
+ return ret;
+ }
+
+ static int wm8962_i2c_remove(struct i2c_client *client)
+ {
++ struct wm8962_priv *wm8962 = dev_get_drvdata(&client->dev);
++
++ if (wm8962->pdata.codec_mclk)
++ clk_unprepare(wm8962->pdata.codec_mclk);
++
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+ }
+@@ -3775,6 +3821,9 @@
+ struct wm8962_priv *wm8962 = dev_get_drvdata(dev);
+ int ret;
+
++ if (wm8962->pdata.codec_mclk)
++ clk_enable(wm8962->pdata.codec_mclk);
++
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8962->supplies),
+ wm8962->supplies);
+ if (ret != 0) {
+@@ -3834,6 +3883,10 @@
+ regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies),
+ wm8962->supplies);
+
++ if (wm8962->pdata.codec_mclk)
++ clk_disable(wm8962->pdata.codec_mclk);
++
++
+ return 0;
+ }
+ #endif
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/fsl_asrc.c linux-3.14.40/sound/soc/fsl/fsl_asrc.c
+--- linux-3.14.40.orig/sound/soc/fsl/fsl_asrc.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/sound/soc/fsl/fsl_asrc.c 2015-05-01 14:58:06.759427001 -0500
+@@ -0,0 +1,498 @@
++/*
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/i2c.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/mxc_asrc.h>
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/soc.h>
++#include <sound/initval.h>
++#include <sound/dmaengine_pcm.h>
++
++#include "fsl_asrc.h"
++#include "imx-pcm.h"
++
++static bool filter(struct dma_chan *chan, void *param)
++{
++ if (!imx_dma_is_general_purpose(chan))
++ return false;
++
++ chan->private = param;
++
++ return true;
++}
++
++static int asrc_p2p_request_channel(struct snd_pcm_substream *substream)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
++ struct fsl_asrc_p2p *asrc_p2p = snd_soc_dai_get_drvdata(cpu_dai);
++ enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ struct snd_dmaengine_dai_dma_data *dma_params_be = NULL;
++ struct snd_dmaengine_dai_dma_data *dma_params_fe = NULL;
++ struct imx_dma_data *fe_filter_data = NULL;
++ struct imx_dma_data *be_filter_data = NULL;
++
++ struct dma_slave_config slave_config;
++ dma_cap_mask_t mask;
++ struct dma_chan *chan;
++ int ret;
++ struct snd_soc_dpcm *dpcm;
++
++ /* find the be for this fe stream */
++ list_for_each_entry(dpcm, &rtd->dpcm[substream->stream].be_clients, list_be) {
++ if (dpcm->fe == rtd) {
++ struct snd_soc_pcm_runtime *be = dpcm->be;
++ struct snd_soc_dai *dai = be->cpu_dai;
++ struct snd_pcm_substream *be_substream;
++ be_substream = snd_soc_dpcm_get_substream(be, substream->stream);
++ dma_params_be = snd_soc_dai_get_dma_data(dai, be_substream);
++ break;
++ }
++ }
++
++ if (!dma_params_be) {
++ dev_err(rtd->card->dev, "can not get be substream\n");
++ return -EINVAL;
++ }
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ dma_params_fe = &asrc_p2p->dma_params_tx;
++ else
++ dma_params_fe = &asrc_p2p->dma_params_rx;
++
++ fe_filter_data = dma_params_fe->filter_data;
++ be_filter_data = dma_params_be->filter_data;
++
++ if (asrc_p2p->output_width == 16)
++ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ else
++ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
++
++ /* reconfig memory to FIFO dma request */
++ dma_params_fe->addr = asrc_p2p->asrc_ops.asrc_p2p_per_addr(
++ asrc_p2p->asrc_index, 1);
++ fe_filter_data->dma_request0 = asrc_p2p->dmarx[asrc_p2p->asrc_index];
++ dma_params_fe->maxburst = dma_params_be->maxburst;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++ dma_cap_set(DMA_CYCLIC, mask);
++
++ /* config p2p dma channel */
++ asrc_p2p->asrc_p2p_dma_data.peripheral_type = IMX_DMATYPE_ASRC;
++ asrc_p2p->asrc_p2p_dma_data.priority = DMA_PRIO_HIGH;
++ asrc_p2p->asrc_p2p_dma_data.dma_request1 = asrc_p2p->dmatx[asrc_p2p->asrc_index];
++ /* need to get target device's dma dma_addr, burstsize */
++ asrc_p2p->asrc_p2p_dma_data.dma_request0 = be_filter_data->dma_request0;
++
++ /* Request channel */
++ asrc_p2p->asrc_p2p_dma_chan =
++ dma_request_channel(mask, filter, &asrc_p2p->asrc_p2p_dma_data);
++
++ if (!asrc_p2p->asrc_p2p_dma_chan) {
++ dev_err(rtd->card->dev, "can not request dma channel\n");
++ goto error;
++ }
++ chan = asrc_p2p->asrc_p2p_dma_chan;
++
++ /*
++ * Buswidth is not used in the sdma for p2p. Here we set the maxburst fix to
++ * twice of dma_params's burstsize.
++ */
++ slave_config.direction = DMA_DEV_TO_DEV;
++ slave_config.src_addr = asrc_p2p->asrc_ops.asrc_p2p_per_addr(asrc_p2p->asrc_index, 0);
++ slave_config.src_addr_width = buswidth;
++ slave_config.src_maxburst = dma_params_be->maxburst * 2;
++ slave_config.dst_addr = dma_params_be->addr;
++ slave_config.dst_addr_width = buswidth;
++ slave_config.dst_maxburst = dma_params_be->maxburst * 2;
++ slave_config.dma_request0 = be_filter_data->dma_request0;
++ slave_config.dma_request1 = asrc_p2p->dmatx[asrc_p2p->asrc_index];
++
++ ret = dmaengine_slave_config(asrc_p2p->asrc_p2p_dma_chan,
++ &slave_config);
++ if (ret) {
++ dev_err(rtd->card->dev, "can not config dma channel\n");
++ goto error;
++ }
++
++ return 0;
++error:
++ if (asrc_p2p->asrc_p2p_dma_chan) {
++ dma_release_channel(asrc_p2p->asrc_p2p_dma_chan);
++ asrc_p2p->asrc_p2p_dma_chan = NULL;
++ }
++
++ return -EINVAL;
++}
++
++static int config_asrc(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
++ struct fsl_asrc_p2p *asrc_p2p = snd_soc_dai_get_drvdata(cpu_dai);
++ unsigned int rate = params_rate(params);
++ unsigned int channel = params_channels(params);
++ struct asrc_config config = {0};
++ int output_word_width = 0;
++ int input_word_width = 0;
++ int ret = 0;
++ if ((channel != 2) && (channel != 4) && (channel != 6)) {
++ dev_err(cpu_dai->dev, "param channel is not correct\n");
++ return -EINVAL;
++ }
++
++ ret = asrc_p2p->asrc_ops.asrc_p2p_req_pair(channel, &asrc_p2p->asrc_index);
++ if (ret < 0) {
++ dev_err(cpu_dai->dev, "Fail to request asrc pair\n");
++ return -EINVAL;
++ }
++
++ if (asrc_p2p->output_width == 16)
++ output_word_width = ASRC_WIDTH_16_BIT;
++ else
++ output_word_width = ASRC_WIDTH_24_BIT;
++
++ switch (params_format(params)) {
++ case SNDRV_PCM_FORMAT_U16:
++ case SNDRV_PCM_FORMAT_S16_LE:
++ case SNDRV_PCM_FORMAT_S16_BE:
++ input_word_width = ASRC_WIDTH_16_BIT;
++ break;
++ case SNDRV_PCM_FORMAT_S20_3LE:
++ case SNDRV_PCM_FORMAT_S20_3BE:
++ case SNDRV_PCM_FORMAT_S24_3LE:
++ case SNDRV_PCM_FORMAT_S24_3BE:
++ case SNDRV_PCM_FORMAT_S24_BE:
++ case SNDRV_PCM_FORMAT_S24_LE:
++ case SNDRV_PCM_FORMAT_U24_BE:
++ case SNDRV_PCM_FORMAT_U24_LE:
++ case SNDRV_PCM_FORMAT_U24_3BE:
++ case SNDRV_PCM_FORMAT_U24_3LE:
++ input_word_width = ASRC_WIDTH_24_BIT;
++ break;
++ case SNDRV_PCM_FORMAT_S8:
++ case SNDRV_PCM_FORMAT_U8:
++ case SNDRV_PCM_FORMAT_S32:
++ case SNDRV_PCM_FORMAT_U32:
++ default:
++ dev_err(cpu_dai->dev, "Format is not support!\n");
++ return -EINVAL;
++ }
++
++ config.input_word_width = input_word_width;
++ config.output_word_width = output_word_width;
++ config.pair = asrc_p2p->asrc_index;
++ config.channel_num = channel;
++ config.input_sample_rate = rate;
++ config.output_sample_rate = asrc_p2p->output_rate;
++ config.inclk = INCLK_NONE;
++
++ switch (asrc_p2p->per_dev) {
++ case SSI1:
++ config.outclk = OUTCLK_SSI1_TX;
++ break;
++ case SSI2:
++ config.outclk = OUTCLK_SSI2_TX;
++ break;
++ case SSI3:
++ config.outclk = OUTCLK_SSI3_TX;
++ break;
++ case ESAI:
++ config.outclk = OUTCLK_ESAI_TX;
++ break;
++ default:
++ dev_err(cpu_dai->dev, "peripheral device is not correct\n");
++ return -EINVAL;
++ }
++
++ ret = asrc_p2p->asrc_ops.asrc_p2p_config_pair(&config);
++ if (ret < 0) {
++ dev_err(cpu_dai->dev, "Fail to config asrc\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static int fsl_asrc_p2p_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params,
++ struct snd_soc_dai *cpu_dai)
++{
++ int ret = 0;
++
++ ret = config_asrc(substream, params);
++ if (ret < 0)
++ return ret;
++
++ return asrc_p2p_request_channel(substream);
++}
++
++static int fsl_asrc_p2p_hw_free(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *cpu_dai)
++{
++ struct fsl_asrc_p2p *asrc_p2p = snd_soc_dai_get_drvdata(cpu_dai);
++
++ if (asrc_p2p->asrc_p2p_dma_chan) {
++ /* Release p2p dma resource */
++ dma_release_channel(asrc_p2p->asrc_p2p_dma_chan);
++ asrc_p2p->asrc_p2p_dma_chan = NULL;
++ }
++
++ if (asrc_p2p->asrc_index != -1) {
++ asrc_p2p->asrc_ops.asrc_p2p_release_pair(asrc_p2p->asrc_index);
++ asrc_p2p->asrc_ops.asrc_p2p_finish_conv(asrc_p2p->asrc_index);
++ }
++ asrc_p2p->asrc_index = -1;
++
++ return 0;
++}
++
++static int fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream *substream,
++ struct fsl_asrc_p2p *asrc_p2p)
++{
++ struct dma_async_tx_descriptor *desc = asrc_p2p->asrc_p2p_desc;
++ struct dma_chan *chan = asrc_p2p->asrc_p2p_dma_chan;
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dev = rtd->platform->dev;
++
++ desc = dmaengine_prep_dma_cyclic(chan, 0xffff, 64, 64, DMA_DEV_TO_DEV, 0);
++ if (!desc) {
++ dev_err(dev, "failed to prepare slave dma\n");
++ return -EINVAL;
++ }
++
++ dmaengine_submit(desc);
++
++ return 0;
++}
++
++static int fsl_asrc_p2p_trigger(struct snd_pcm_substream *substream, int cmd,
++ struct snd_soc_dai *cpu_dai)
++{
++ struct fsl_asrc_p2p *asrc_p2p = snd_soc_dai_get_drvdata(cpu_dai);
++ int ret;
++
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ case SNDRV_PCM_TRIGGER_RESUME:
++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ ret = fsl_asrc_dma_prepare_and_submit(substream, asrc_p2p);
++ if (ret)
++ return ret;
++ dma_async_issue_pending(asrc_p2p->asrc_p2p_dma_chan);
++ asrc_p2p->asrc_ops.asrc_p2p_start_conv(asrc_p2p->asrc_index);
++ break;
++ case SNDRV_PCM_TRIGGER_SUSPEND:
++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++ case SNDRV_PCM_TRIGGER_STOP:
++ dmaengine_terminate_all(asrc_p2p->asrc_p2p_dma_chan);
++ asrc_p2p->asrc_ops.asrc_p2p_stop_conv(asrc_p2p->asrc_index);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++#define IMX_ASRC_RATES SNDRV_PCM_RATE_8000_192000
++
++#define IMX_ASRC_FORMATS \
++ (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE | \
++ SNDRV_PCM_FORMAT_S20_3LE)
++
++static struct snd_soc_dai_ops fsl_asrc_p2p_dai_ops = {
++ .trigger = fsl_asrc_p2p_trigger,
++ .hw_params = fsl_asrc_p2p_hw_params,
++ .hw_free = fsl_asrc_p2p_hw_free,
++};
++
++static int fsl_asrc_p2p_dai_probe(struct snd_soc_dai *dai)
++{
++ struct fsl_asrc_p2p *asrc_p2p = snd_soc_dai_get_drvdata(dai);
++
++ dai->playback_dma_data = &asrc_p2p->dma_params_tx;
++ dai->capture_dma_data = &asrc_p2p->dma_params_rx;
++
++ return 0;
++}
++
++static struct snd_soc_dai_driver fsl_asrc_p2p_dai = {
++ .probe = fsl_asrc_p2p_dai_probe,
++ .playback = {
++ .stream_name = "asrc-Playback",
++ .channels_min = 1,
++ .channels_max = 10,
++ .rates = IMX_ASRC_RATES,
++ .formats = IMX_ASRC_FORMATS,
++ },
++ .capture = {
++ .stream_name = "asrc-Capture",
++ .channels_min = 1,
++ .channels_max = 4,
++ .rates = IMX_ASRC_RATES,
++ .formats = IMX_ASRC_FORMATS,
++ },
++ .ops = &fsl_asrc_p2p_dai_ops,
++};
++
++static const struct snd_soc_component_driver fsl_asrc_p2p_component = {
++ .name = "fsl-asrc-p2p",
++};
++
++/*
++ * This function will register the snd_soc_pcm_link drivers.
++ */
++static int fsl_asrc_p2p_probe(struct platform_device *pdev)
++{
++ struct fsl_asrc_p2p *asrc_p2p;
++ struct device_node *np = pdev->dev.of_node;
++ const char *p;
++ const uint32_t *iprop_rate, *iprop_width;
++ int ret = 0;
++
++ if (!of_device_is_available(np)) {
++ dev_err(&pdev->dev, "There is no device node\n");
++ return -ENODEV;
++ }
++
++ asrc_p2p = devm_kzalloc(&pdev->dev, sizeof(struct fsl_asrc_p2p), GFP_KERNEL);
++ if (!asrc_p2p) {
++ dev_err(&pdev->dev, "can not alloc memory\n");
++ return -ENOMEM;
++ }
++ asrc_p2p->asrc_ops.asrc_p2p_start_conv = asrc_start_conv;
++ asrc_p2p->asrc_ops.asrc_p2p_stop_conv = asrc_stop_conv;
++ asrc_p2p->asrc_ops.asrc_p2p_per_addr = asrc_get_per_addr;
++ asrc_p2p->asrc_ops.asrc_p2p_req_pair = asrc_req_pair;
++ asrc_p2p->asrc_ops.asrc_p2p_config_pair = asrc_config_pair;
++ asrc_p2p->asrc_ops.asrc_p2p_release_pair = asrc_release_pair;
++ asrc_p2p->asrc_ops.asrc_p2p_finish_conv = asrc_finish_conv;
++
++ asrc_p2p->asrc_index = -1;
++
++ iprop_rate = of_get_property(np, "fsl,output-rate", NULL);
++ if (iprop_rate)
++ asrc_p2p->output_rate = be32_to_cpup(iprop_rate);
++ else {
++ dev_err(&pdev->dev, "There is no output-rate in dts\n");
++ return -EINVAL;
++ }
++ iprop_width = of_get_property(np, "fsl,output-width", NULL);
++ if (iprop_width)
++ asrc_p2p->output_width = be32_to_cpup(iprop_width);
++
++ if (asrc_p2p->output_width != 16 && asrc_p2p->output_width != 24) {
++ dev_err(&pdev->dev, "output_width is not acceptable\n");
++ return -EINVAL;
++ }
++
++ ret = of_property_read_u32_array(np,
++ "fsl,asrc-dma-tx-events", asrc_p2p->dmatx, 3);
++ if (ret) {
++ dev_err(&pdev->dev, "Failed to get fsl,asrc-dma-tx-events.\n");
++ return -EINVAL;
++ }
++
++ ret = of_property_read_u32_array(np,
++ "fsl,asrc-dma-rx-events", asrc_p2p->dmarx, 3);
++ if (ret) {
++ dev_err(&pdev->dev, "Failed to get fsl,asrc-dma-rx-events.\n");
++ return -EINVAL;
++ }
++
++ asrc_p2p->filter_data_tx.peripheral_type = IMX_DMATYPE_ASRC;
++ asrc_p2p->filter_data_rx.peripheral_type = IMX_DMATYPE_ASRC;
++
++ asrc_p2p->dma_params_tx.filter_data = &asrc_p2p->filter_data_tx;
++ asrc_p2p->dma_params_rx.filter_data = &asrc_p2p->filter_data_rx;
++
++ platform_set_drvdata(pdev, asrc_p2p);
++
++ p = strrchr(np->full_name, '/') + 1;
++ strcpy(asrc_p2p->name, p);
++ fsl_asrc_p2p_dai.name = asrc_p2p->name;
++
++ ret = snd_soc_register_component(&pdev->dev, &fsl_asrc_p2p_component,
++ &fsl_asrc_p2p_dai, 1);
++ if (ret) {
++ dev_err(&pdev->dev, "register DAI failed\n");
++ goto failed_register;
++ }
++
++ asrc_p2p->soc_platform_pdev = platform_device_register_simple(
++ "imx-pcm-asrc", -1, NULL, 0);
++ if (IS_ERR(asrc_p2p->soc_platform_pdev)) {
++ ret = PTR_ERR(asrc_p2p->soc_platform_pdev);
++ goto failed_pdev_alloc;
++ }
++
++ ret = imx_pcm_dma_init(asrc_p2p->soc_platform_pdev, SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
++ SND_DMAENGINE_PCM_FLAG_NO_DT |
++ SND_DMAENGINE_PCM_FLAG_COMPAT,
++ IMX_ASRC_DMABUF_SIZE);
++ if (ret) {
++ dev_err(&pdev->dev, "init pcm dma failed\n");
++ goto failed_pcm_init;
++ }
++
++ return 0;
++
++failed_pcm_init:
++ platform_device_unregister(asrc_p2p->soc_platform_pdev);
++failed_pdev_alloc:
++ snd_soc_unregister_component(&pdev->dev);
++failed_register:
++
++ return ret;
++}
++
++static int fsl_asrc_p2p_remove(struct platform_device *pdev)
++{
++ struct fsl_asrc_p2p *asrc_p2p = platform_get_drvdata(pdev);
++
++ platform_device_unregister(asrc_p2p->soc_platform_pdev);
++ snd_soc_unregister_component(&pdev->dev);
++
++ return 0;
++}
++
++static const struct of_device_id fsl_asrc_p2p_dt_ids[] = {
++ { .compatible = "fsl,imx6q-asrc-p2p", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver fsl_asrc_p2p_driver = {
++ .probe = fsl_asrc_p2p_probe,
++ .remove = fsl_asrc_p2p_remove,
++ .driver = {
++ .name = "fsl-asrc-p2p",
++ .owner = THIS_MODULE,
++ .of_match_table = fsl_asrc_p2p_dt_ids,
++ },
++};
++module_platform_driver(fsl_asrc_p2p_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX ASoC ASRC P2P driver");
++MODULE_ALIAS("platform:fsl-asrc-p2p");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/fsl_asrc.h linux-3.14.40/sound/soc/fsl/fsl_asrc.h
+--- linux-3.14.40.orig/sound/soc/fsl/fsl_asrc.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/sound/soc/fsl/fsl_asrc.h 2015-05-01 14:58:06.759427001 -0500
+@@ -0,0 +1,48 @@
++/*
++ * fsl_asrc.h - ALSA ASRC interface
++ *
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. This file is licensed
++ * under the terms of the GNU General Public License version 2. This
++ * program is licensed "as is" without any warranty of any kind, whether
++ * express or implied.
++ */
++
++#ifndef _FSL_ASRC_P2P_H
++#define _FSL_ASRC_P2P_H
++
++#include <linux/mxc_asrc.h>
++#include <sound/dmaengine_pcm.h>
++#include <linux/platform_data/dma-imx.h>
++
++enum peripheral_device_type {
++ UNKNOWN,
++ SSI1,
++ SSI2,
++ SSI3,
++ ESAI,
++};
++
++struct fsl_asrc_p2p {
++ int output_rate;
++ int output_width;
++ enum asrc_pair_index asrc_index;
++ enum peripheral_device_type per_dev;
++ struct asrc_p2p_ops asrc_ops;
++
++ struct snd_dmaengine_dai_dma_data dma_params_rx;
++ struct snd_dmaengine_dai_dma_data dma_params_tx;
++ struct imx_dma_data filter_data_tx;
++ struct imx_dma_data filter_data_rx;
++
++ struct dma_async_tx_descriptor *asrc_p2p_desc;
++ struct dma_chan *asrc_p2p_dma_chan;
++ struct imx_dma_data asrc_p2p_dma_data;
++ struct platform_device *soc_platform_pdev;
++
++ int dmarx[3];
++ int dmatx[3];
++
++ char name[32];
++};
++
++#endif
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/fsl_asrc_pcm.c linux-3.14.40/sound/soc/fsl/fsl_asrc_pcm.c
+--- linux-3.14.40.orig/sound/soc/fsl/fsl_asrc_pcm.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/sound/soc/fsl/fsl_asrc_pcm.c 2015-05-01 14:58:06.759427001 -0500
+@@ -0,0 +1,41 @@
++/*
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++
++
++/*
++ * Here add one platform module "imx-pcm-asrc" as pcm platform module.
++ * If we use the asrc_p2p node as the pcm platform, there will be one issue.
++ * snd_soc_dapm_new_dai_widgets will be called twice, one in probe link_dais,
++ * one in probe platform. so there will be two dai_widgets added to widget list.
++ * but only the seconed one will be recorded in dai->playback_widget.
++ * Machine driver will add the audio route, but when it go through the
++ * widget list, it will found the cpu_dai widget is the first one in the list.
++ * add use the first one to link the audio route.
++ * when use the fe/be architecture for asrc p2p, it need to go through from
++ * the fe->cpu_dai->playback_widget. but this is the second widget, so the
++ * result is that it can't find a availble audio route for p2p case. So here
++ * use another pcm platform to avoid this issue.
++ */
++static struct platform_driver imx_pcm_driver = {
++ .driver = {
++ .name = "imx-pcm-asrc",
++ .owner = THIS_MODULE,
++ },
++};
++
++module_platform_driver(imx_pcm_driver);
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX ASoC PCM driver");
++MODULE_ALIAS("platform:imx-pcm-asrc");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/fsl_esai.c linux-3.14.40/sound/soc/fsl/fsl_esai.c
+--- linux-3.14.40.orig/sound/soc/fsl/fsl_esai.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/fsl/fsl_esai.c 2015-05-01 14:58:06.775427001 -0500
+@@ -785,7 +785,7 @@
+ return ret;
+ }
+
+- ret = imx_pcm_dma_init(pdev);
++ ret = imx_pcm_dma_init(pdev, NULL, IMX_ESAI_DMABUF_SIZE);
+ if (ret)
+ dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
+
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/fsl_hdmi.c linux-3.14.40/sound/soc/fsl/fsl_hdmi.c
+--- linux-3.14.40.orig/sound/soc/fsl/fsl_hdmi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/sound/soc/fsl/fsl_hdmi.c 2015-05-01 14:58:06.775427001 -0500
+@@ -0,0 +1,614 @@
++/*
++ * ALSA SoC HDMI Audio Layer for Freescale i.MX
++ *
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ *
++ * Some code from patch_hdmi.c
++ * Copyright (c) 2008-2010 Intel Corporation. All rights reserved.
++ * Copyright (c) 2006 ATI Technologies Inc.
++ * Copyright (c) 2008 NVIDIA Corp. All rights reserved.
++ * Copyright (c) 2008 Wei Ni <wni@nvidia.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <sound/pcm.h>
++#include <sound/soc.h>
++#include <sound/asoundef.h>
++
++#include <video/mxc_hdmi.h>
++
++#include "imx-hdmi.h"
++
++
++static struct mxc_edid_cfg edid_cfg;
++
++static u32 playback_rates[HDMI_MAX_RATES];
++static u32 playback_sample_size[HDMI_MAX_SAMPLE_SIZE];
++static u32 playback_channels[HDMI_MAX_CHANNEL_CONSTRAINTS];
++
++static struct snd_pcm_hw_constraint_list playback_constraint_rates;
++static struct snd_pcm_hw_constraint_list playback_constraint_bits;
++static struct snd_pcm_hw_constraint_list playback_constraint_channels;
++
++#ifdef DEBUG
++static void dumpregs(struct snd_soc_dai *dai)
++{
++ u32 n, cts;
++
++ cts = (hdmi_readb(HDMI_AUD_CTS3) << 16) |
++ (hdmi_readb(HDMI_AUD_CTS2) << 8) |
++ hdmi_readb(HDMI_AUD_CTS1);
++
++ n = (hdmi_readb(HDMI_AUD_N3) << 16) |
++ (hdmi_readb(HDMI_AUD_N2) << 8) |
++ hdmi_readb(HDMI_AUD_N1);
++
++ dev_dbg(dai->dev, "HDMI_PHY_CONF0 0x%02x\n",
++ hdmi_readb(HDMI_PHY_CONF0));
++ dev_dbg(dai->dev, "HDMI_MC_CLKDIS 0x%02x\n",
++ hdmi_readb(HDMI_MC_CLKDIS));
++ dev_dbg(dai->dev, "HDMI_AUD_N[1-3] 0x%06x (%d)\n",
++ n, n);
++ dev_dbg(dai->dev, "HDMI_AUD_CTS[1-3] 0x%06x (%d)\n",
++ cts, cts);
++ dev_dbg(dai->dev, "HDMI_FC_AUDSCONF 0x%02x\n",
++ hdmi_readb(HDMI_FC_AUDSCONF));
++}
++#else
++static void dumpregs(struct snd_soc_dai *dai) {}
++#endif
++
++enum cea_speaker_placement {
++ FL = (1 << 0), /* Front Left */
++ FC = (1 << 1), /* Front Center */
++ FR = (1 << 2), /* Front Right */
++ FLC = (1 << 3), /* Front Left Center */
++ FRC = (1 << 4), /* Front Right Center */
++ RL = (1 << 5), /* Rear Left */
++ RC = (1 << 6), /* Rear Center */
++ RR = (1 << 7), /* Rear Right */
++ RLC = (1 << 8), /* Rear Left Center */
++ RRC = (1 << 9), /* Rear Right Center */
++ LFE = (1 << 10), /* Low Frequency Effect */
++ FLW = (1 << 11), /* Front Left Wide */
++ FRW = (1 << 12), /* Front Right Wide */
++ FLH = (1 << 13), /* Front Left High */
++ FCH = (1 << 14), /* Front Center High */
++ FRH = (1 << 15), /* Front Right High */
++ TC = (1 << 16), /* Top Center */
++};
++
++/*
++ * EDID SA bits in the CEA Speaker Allocation data block
++ */
++static int edid_speaker_allocation_bits[] = {
++ [0] = FL | FR,
++ [1] = LFE,
++ [2] = FC,
++ [3] = RL | RR,
++ [4] = RC,
++ [5] = FLC | FRC,
++ [6] = RLC | RRC,
++ [7] = FLW | FRW,
++ [8] = FLH | FRH,
++ [9] = TC,
++ [10] = FCH,
++};
++
++struct cea_channel_speaker_allocation {
++ int ca_index;
++ int speakers[8];
++
++ /* Derived values, just for convenience */
++ int channels;
++ int spk_mask;
++};
++
++/*
++ * This is an ordered list!
++ *
++ * The preceding ones have better chances to be selected by
++ * hdmi_channel_allocation().
++ */
++static struct cea_channel_speaker_allocation channel_allocations[] = {
++ /* channel: 7 6 5 4 3 2 1 0 */
++ { .ca_index = 0x00, .speakers = { 0, 0, 0, 0, 0, 0, FR, FL },},
++ /* 2.1 */
++ { .ca_index = 0x01, .speakers = { 0, 0, 0, 0, 0, LFE, FR, FL },},
++ /* Dolby Surround */
++ { .ca_index = 0x08, .speakers = { 0, 0, RR, RL, 0, 0, FR, FL },}, /* Prefer FL/FR/RL/RR over FL/FR/LFE/FC */
++ { .ca_index = 0x02, .speakers = { 0, 0, 0, 0, FC, 0, FR, FL },},
++ { .ca_index = 0x03, .speakers = { 0, 0, 0, 0, FC, LFE, FR, FL },},
++ { .ca_index = 0x04, .speakers = { 0, 0, 0, RC, 0, 0, FR, FL },},
++ { .ca_index = 0x05, .speakers = { 0, 0, 0, RC, 0, LFE, FR, FL },},
++ { .ca_index = 0x06, .speakers = { 0, 0, 0, RC, FC, 0, FR, FL },},
++ { .ca_index = 0x07, .speakers = { 0, 0, 0, RC, FC, LFE, FR, FL },},
++ { .ca_index = 0x09, .speakers = { 0, 0, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x0a, .speakers = { 0, 0, RR, RL, FC, 0, FR, FL },},
++ /* surround51 */
++ { .ca_index = 0x0b, .speakers = { 0, 0, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x0c, .speakers = { 0, RC, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x0d, .speakers = { 0, RC, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x0e, .speakers = { 0, RC, RR, RL, FC, 0, FR, FL },},
++ /* 6.1 */
++ { .ca_index = 0x0f, .speakers = { 0, RC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x10, .speakers = { RRC, RLC, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x11, .speakers = { RRC, RLC, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x12, .speakers = { RRC, RLC, RR, RL, FC, 0, FR, FL },},
++ /* surround71 */
++ { .ca_index = 0x13, .speakers = { RRC, RLC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x14, .speakers = { FRC, FLC, 0, 0, 0, 0, FR, FL },},
++ { .ca_index = 0x15, .speakers = { FRC, FLC, 0, 0, 0, LFE, FR, FL },},
++ { .ca_index = 0x16, .speakers = { FRC, FLC, 0, 0, FC, 0, FR, FL },},
++ { .ca_index = 0x17, .speakers = { FRC, FLC, 0, 0, FC, LFE, FR, FL },},
++ { .ca_index = 0x18, .speakers = { FRC, FLC, 0, RC, 0, 0, FR, FL },},
++ { .ca_index = 0x19, .speakers = { FRC, FLC, 0, RC, 0, LFE, FR, FL },},
++ { .ca_index = 0x1a, .speakers = { FRC, FLC, 0, RC, FC, 0, FR, FL },},
++ { .ca_index = 0x1b, .speakers = { FRC, FLC, 0, RC, FC, LFE, FR, FL },},
++ { .ca_index = 0x1c, .speakers = { FRC, FLC, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x1d, .speakers = { FRC, FLC, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x1e, .speakers = { FRC, FLC, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x1f, .speakers = { FRC, FLC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x20, .speakers = { 0, FCH, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x21, .speakers = { 0, FCH, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x22, .speakers = { TC, 0, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x23, .speakers = { TC, 0, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x24, .speakers = { FRH, FLH, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x25, .speakers = { FRH, FLH, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x26, .speakers = { FRW, FLW, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x27, .speakers = { FRW, FLW, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x28, .speakers = { TC, RC, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x29, .speakers = { TC, RC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x2a, .speakers = { FCH, RC, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x2b, .speakers = { FCH, RC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x2c, .speakers = { TC, FCH, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x2d, .speakers = { TC, FCH, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x2e, .speakers = { FRH, FLH, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x2f, .speakers = { FRH, FLH, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x30, .speakers = { FRW, FLW, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x31, .speakers = { FRW, FLW, RR, RL, FC, LFE, FR, FL },},
++};
++
++/* Compute derived values in channel_allocations[] */
++static void init_channel_allocations(void)
++{
++ struct cea_channel_speaker_allocation *p;
++ int i, j;
++
++ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
++ p = channel_allocations + i;
++ p->channels = 0;
++ p->spk_mask = 0;
++ for (j = 0; j < ARRAY_SIZE(p->speakers); j++)
++ if (p->speakers[j]) {
++ p->channels++;
++ p->spk_mask |= p->speakers[j];
++ }
++ }
++}
++
++/*
++ * The transformation takes two steps:
++ *
++ * speaker_alloc => (edid_speaker_allocation_bits[]) => spk_mask
++ * spk_mask => (channel_allocations[]) => CA
++ *
++ * TODO: it could select the wrong CA from multiple candidates.
++*/
++static int hdmi_channel_allocation(int channels)
++{
++ int spk_mask = 0, ca = 0, i, tmpchn, tmpspk;
++
++ /* CA defaults to 0 for basic stereo audio */
++ if (channels <= 2)
++ return 0;
++
++ /*
++ * Expand EDID's speaker allocation mask
++ *
++ * EDID tells the speaker mask in a compact(paired) form,
++ * expand EDID's notions to match the ones used by Audio InfoFrame.
++ */
++ for (i = 0; i < ARRAY_SIZE(edid_speaker_allocation_bits); i++) {
++ if (edid_cfg.speaker_alloc & (1 << i))
++ spk_mask |= edid_speaker_allocation_bits[i];
++ }
++
++ /* Search for the first working match in the CA table */
++ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
++ tmpchn = channel_allocations[i].channels;
++ tmpspk = channel_allocations[i].spk_mask;
++
++ if (channels == tmpchn && (spk_mask & tmpspk) == tmpspk) {
++ ca = channel_allocations[i].ca_index;
++ break;
++ }
++ }
++
++ return ca;
++}
++
++static void hdmi_set_audio_infoframe(unsigned int channels)
++{
++ u8 audiconf0, audiconf2;
++
++ /*
++ * From CEA-861-D spec:
++ * HDMI requires the CT, SS and SF fields to be set to 0 ("Refer
++ * to Stream Header") as these items are carried in the audio stream.
++ *
++ * So we only set the CC and CA fields.
++ */
++ audiconf0 = ((channels - 1) << HDMI_FC_AUDICONF0_CC_OFFSET) &
++ HDMI_FC_AUDICONF0_CC_MASK;
++
++ audiconf2 = hdmi_channel_allocation(channels);
++
++ hdmi_writeb(audiconf0, HDMI_FC_AUDICONF0);
++ hdmi_writeb(0, HDMI_FC_AUDICONF1);
++ hdmi_writeb(audiconf2, HDMI_FC_AUDICONF2);
++ hdmi_writeb(0, HDMI_FC_AUDICONF3);
++}
++
++static int cea_audio_rates[HDMI_MAX_RATES] = {
++ 32000, 44100, 48000, 88200, 96000, 176400, 192000,
++};
++
++static void fsl_hdmi_get_playback_rates(void)
++{
++ int i, count = 0;
++ u8 rates;
++
++ /* Always assume basic audio support */
++ rates = edid_cfg.sample_rates | 0x7;
++
++ for (i = 0 ; i < HDMI_MAX_RATES ; i++)
++ if ((rates & (1 << i)) != 0)
++ playback_rates[count++] = cea_audio_rates[i];
++
++ playback_constraint_rates.list = playback_rates;
++ playback_constraint_rates.count = count;
++
++ for (i = 0 ; i < playback_constraint_rates.count ; i++)
++ pr_debug("%s: constraint = %d Hz\n", __func__, playback_rates[i]);
++}
++
++static void fsl_hdmi_get_playback_sample_size(void)
++{
++ int i = 0;
++
++ /* Always assume basic audio support */
++ playback_sample_size[i++] = 16;
++
++ if (edid_cfg.sample_sizes & 0x4)
++ playback_sample_size[i++] = 32;
++
++ playback_constraint_bits.list = playback_sample_size;
++ playback_constraint_bits.count = i;
++
++ for (i = 0 ; i < playback_constraint_bits.count ; i++)
++ pr_debug("%s: constraint = %d bits\n", __func__, playback_sample_size[i]);
++}
++
++static void fsl_hdmi_get_playback_channels(void)
++{
++ int channels = 2, i = 0;
++
++ /* Always assume basic audio support */
++ playback_channels[i++] = channels;
++ channels += 2;
++
++ while ((i < HDMI_MAX_CHANNEL_CONSTRAINTS) &&
++ (channels <= edid_cfg.max_channels)) {
++ playback_channels[i++] = channels;
++ channels += 2;
++ }
++
++ playback_constraint_channels.list = playback_channels;
++ playback_constraint_channels.count = i;
++
++ for (i = 0 ; i < playback_constraint_channels.count ; i++)
++ pr_debug("%s: constraint = %d channels\n", __func__, playback_channels[i]);
++}
++
++static int fsl_hdmi_update_constraints(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ int edid_status, ret;
++
++ edid_status = hdmi_get_edid_cfg(&edid_cfg);
++
++ if (edid_status && !edid_cfg.hdmi_cap)
++ return -1;
++
++ fsl_hdmi_get_playback_rates();
++ ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
++ &playback_constraint_rates);
++ if (ret)
++ return ret;
++
++ fsl_hdmi_get_playback_sample_size();
++ ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
++ &playback_constraint_bits);
++ if (ret)
++ return ret;
++
++ fsl_hdmi_get_playback_channels();
++ ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
++ &playback_constraint_channels);
++ if (ret)
++ return ret;
++
++ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
++ if (ret)
++ return ret;
++
++ return 0;
++}
++
++static int fsl_hdmi_soc_startup(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct imx_hdmi *hdmi_data = snd_soc_dai_get_drvdata(dai);
++ int ret;
++
++ ret = fsl_hdmi_update_constraints(substream);
++ if (ret < 0)
++ return ret;
++
++ clk_prepare_enable(hdmi_data->isfr_clk);
++ clk_prepare_enable(hdmi_data->iahb_clk);
++
++ dev_dbg(dai->dev, "%s hdmi clks: isfr:%d iahb:%d\n", __func__,
++ (int)clk_get_rate(hdmi_data->isfr_clk),
++ (int)clk_get_rate(hdmi_data->iahb_clk));
++
++ /* Indicates the subpacket represents a flatline sample */
++ hdmi_audio_writeb(FC_AUDSCONF, AUD_PACKET_SAMPFIT, 0x0);
++
++ return 0;
++}
++
++static void fsl_hdmi_soc_shutdown(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct imx_hdmi *hdmi_data = snd_soc_dai_get_drvdata(dai);
++
++ clk_disable_unprepare(hdmi_data->iahb_clk);
++ clk_disable_unprepare(hdmi_data->isfr_clk);
++}
++
++static int fsl_hdmi_soc_prepare(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++
++ hdmi_set_audio_infoframe(runtime->channels);
++ hdmi_audio_writeb(FC_AUDSCONF, AUD_PACKET_LAYOUT,
++ (runtime->channels > 2) ? 0x1 : 0x0);
++ hdmi_set_sample_rate(runtime->rate);
++ dumpregs(dai);
++
++ return 0;
++}
++
++static struct snd_soc_dai_ops fsl_hdmi_soc_dai_ops = {
++ .startup = fsl_hdmi_soc_startup,
++ .shutdown = fsl_hdmi_soc_shutdown,
++ .prepare = fsl_hdmi_soc_prepare,
++};
++
++/* IEC60958 status functions */
++static int fsl_hdmi_iec_info(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
++ uinfo->count = 1;
++
++ return 0;
++}
++
++
++static int fsl_hdmi_iec_get(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uvalue)
++{
++ int i;
++
++ for (i = 0 ; i < 6 ; i++)
++ uvalue->value.iec958.status[i] = iec_header.status[i];
++
++ return 0;
++}
++
++static int fsl_hdmi_iec_put(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uvalue)
++{
++ int i;
++
++ /* Do not allow professional mode */
++ if (uvalue->value.iec958.status[0] & IEC958_AES0_PROFESSIONAL)
++ return -EPERM;
++
++ for (i = 0 ; i < 6 ; i++) {
++ iec_header.status[i] = uvalue->value.iec958.status[i];
++ pr_debug("%s status[%d]=0x%02x\n", __func__, i, iec_header.status[i]);
++ }
++
++ return 0;
++}
++
++static struct snd_kcontrol_new fsl_hdmi_ctrls[] = {
++ /* Status cchanel controller */
++ {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
++ .access = SNDRV_CTL_ELEM_ACCESS_READ |
++ SNDRV_CTL_ELEM_ACCESS_WRITE |
++ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
++ .info = fsl_hdmi_iec_info,
++ .get = fsl_hdmi_iec_get,
++ .put = fsl_hdmi_iec_put,
++ },
++};
++
++static int fsl_hdmi_soc_dai_probe(struct snd_soc_dai *dai)
++{
++ int ret;
++
++ init_channel_allocations();
++
++ ret = snd_soc_add_dai_controls(dai, fsl_hdmi_ctrls,
++ ARRAY_SIZE(fsl_hdmi_ctrls));
++ if (ret)
++ dev_warn(dai->dev, "failed to add dai controls\n");
++
++ return 0;
++}
++
++static struct snd_soc_dai_driver fsl_hdmi_dai = {
++ .probe = &fsl_hdmi_soc_dai_probe,
++ .playback = {
++ .channels_min = 2,
++ .channels_max = 8,
++ .rates = MXC_HDMI_RATES_PLAYBACK,
++ .formats = MXC_HDMI_FORMATS_PLAYBACK,
++ },
++ .ops = &fsl_hdmi_soc_dai_ops,
++};
++
++static const struct snd_soc_component_driver fsl_hdmi_component = {
++ .name = "fsl-hdmi",
++};
++
++static int fsl_hdmi_dai_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct imx_hdmi *hdmi_data;
++ int ret = 0;
++
++ if (!np)
++ return -ENODEV;
++
++ if (!hdmi_get_registered()) {
++ dev_err(&pdev->dev, "failed to probe. Load HDMI-video first.\n");
++ return -ENOMEM;
++ }
++
++ hdmi_data = devm_kzalloc(&pdev->dev, sizeof(*hdmi_data), GFP_KERNEL);
++ if (!hdmi_data) {
++ dev_err(&pdev->dev, "failed to alloc hdmi_data\n");
++ return -ENOMEM;
++ }
++
++ hdmi_data->pdev = pdev;
++
++ memcpy(&hdmi_data->cpu_dai_drv, &fsl_hdmi_dai, sizeof(fsl_hdmi_dai));
++ hdmi_data->cpu_dai_drv.name = np->name;
++
++ hdmi_data->isfr_clk = devm_clk_get(&pdev->dev, "hdmi_isfr");
++ if (IS_ERR(hdmi_data->isfr_clk)) {
++ ret = PTR_ERR(hdmi_data->isfr_clk);
++ dev_err(&pdev->dev, "failed to get HDMI isfr clk: %d\n", ret);
++ return -EINVAL;
++ }
++
++ hdmi_data->iahb_clk = devm_clk_get(&pdev->dev, "hdmi_iahb");
++ if (IS_ERR(hdmi_data->iahb_clk)) {
++ ret = PTR_ERR(hdmi_data->iahb_clk);
++ dev_err(&pdev->dev, "failed to get HDMI ahb clk: %d\n", ret);
++ return -EINVAL;
++ }
++
++ dev_set_drvdata(&pdev->dev, hdmi_data);
++ ret = snd_soc_register_component(&pdev->dev, &fsl_hdmi_component,
++ &hdmi_data->cpu_dai_drv, 1);
++ if (ret) {
++ dev_err(&pdev->dev, "register DAI failed\n");
++ return ret;
++ }
++
++ hdmi_data->codec_dev = platform_device_register_simple(
++ "hdmi-audio-codec", -1, NULL, 0);
++ if (IS_ERR(hdmi_data->codec_dev)) {
++ dev_err(&pdev->dev, "failed to register HDMI audio codec\n");
++ ret = PTR_ERR(hdmi_data->codec_dev);
++ goto fail;
++ }
++
++ hdmi_data->dma_dev = platform_device_alloc("imx-hdmi-audio", -1);
++ if (IS_ERR(hdmi_data->dma_dev)) {
++ ret = PTR_ERR(hdmi_data->dma_dev);
++ goto fail_dma;
++ }
++
++ platform_set_drvdata(hdmi_data->dma_dev, hdmi_data);
++
++ ret = platform_device_add(hdmi_data->dma_dev);
++ if (ret) {
++ platform_device_put(hdmi_data->dma_dev);
++ goto fail_dma;
++ }
++
++ return 0;
++
++fail_dma:
++ platform_device_unregister(hdmi_data->codec_dev);
++fail:
++ snd_soc_unregister_component(&pdev->dev);
++
++ return ret;
++}
++
++static int fsl_hdmi_dai_remove(struct platform_device *pdev)
++{
++ struct imx_hdmi *hdmi_data = platform_get_drvdata(pdev);
++
++ platform_device_unregister(hdmi_data->dma_dev);
++ platform_device_unregister(hdmi_data->codec_dev);
++ snd_soc_unregister_component(&pdev->dev);
++
++ return 0;
++}
++
++static const struct of_device_id fsl_hdmi_dai_dt_ids[] = {
++ { .compatible = "fsl,imx6dl-hdmi-audio", },
++ { .compatible = "fsl,imx6q-hdmi-audio", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, fsl_hdmi_dai_dt_ids);
++
++static struct platform_driver fsl_hdmi_driver = {
++ .probe = fsl_hdmi_dai_probe,
++ .remove = fsl_hdmi_dai_remove,
++ .driver = {
++ .name = "fsl-hdmi-dai",
++ .owner = THIS_MODULE,
++ .of_match_table = fsl_hdmi_dai_dt_ids,
++ },
++};
++module_platform_driver(fsl_hdmi_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IMX HDMI TX DAI");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:fsl-hdmi-dai");
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/fsl_spdif.c linux-3.14.40/sound/soc/fsl/fsl_spdif.c
+--- linux-3.14.40.orig/sound/soc/fsl/fsl_spdif.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/fsl/fsl_spdif.c 2015-05-01 14:58:06.791427001 -0500
+@@ -21,6 +21,8 @@
+ #include <linux/of_address.h>
+ #include <linux/of_device.h>
+ #include <linux/of_irq.h>
++#include <linux/pm_runtime.h>
++#include <linux/busfreq-imx6.h>
+
+ #include <sound/asoundef.h>
+ #include <sound/soc.h>
+@@ -53,7 +55,7 @@
+ spinlock_t ctl_lock;
+
+ /* IEC958 channel tx status bit */
+- unsigned char ch_status[4];
++ unsigned char ch_status[6];
+
+ /* User bits */
+ unsigned char subcode[2 * SPDIF_UBITS_SIZE];
+@@ -80,6 +82,7 @@
+ u8 rxclk_src;
+ struct clk *txclk[SPDIF_TXRATE_MAX];
+ struct clk *rxclk;
++ struct clk *sysclk;
+ struct snd_dmaengine_dai_dma_data dma_params_tx;
+ struct snd_dmaengine_dai_dma_data dma_params_rx;
+
+@@ -295,11 +298,11 @@
+ return -EBUSY;
+ }
+
+-static void spdif_set_cstatus(struct spdif_mixer_control *ctrl,
+- u8 mask, u8 cstatus)
++static inline void spdif_set_cstatus(struct spdif_mixer_control *ctrl,
++ u8 byteno, u8 mask, u8 cstatus)
+ {
+- ctrl->ch_status[3] &= ~mask;
+- ctrl->ch_status[3] |= cstatus & mask;
++ ctrl->ch_status[byteno] &= ~mask;
++ ctrl->ch_status[byteno] |= cstatus & mask;
+ }
+
+ static void spdif_write_channel_status(struct fsl_spdif_priv *spdif_priv)
+@@ -316,10 +319,16 @@
+
+ dev_dbg(&pdev->dev, "STCSCH: 0x%06x\n", ch_status);
+
+- ch_status = bitrev8(ctrl->ch_status[3]) << 16;
++ ch_status = bitrev8(ctrl->ch_status[3]) << 16 |
++ (bitrev8(ctrl->ch_status[4]) << 8) |
++ bitrev8(ctrl->ch_status[5]);
+ regmap_write(regmap, REG_SPDIF_STCSCL, ch_status);
+
+ dev_dbg(&pdev->dev, "STCSCL: 0x%06x\n", ch_status);
++
++ /* Set outgoing validity (0: pcm, 1: non-audio) */
++ regmap_update_bits(regmap, REG_SPDIF_SCR, SCR_VAL_MASK,
++ (ctrl->ch_status[0] & IEC958_AES0_NONAUDIO) ? 0 : SCR_VAL_CLEAR);
+ }
+
+ /* Set SPDIF PhaseConfig register for rx clock */
+@@ -347,23 +356,45 @@
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+ struct regmap *regmap = spdif_priv->regmap;
+ struct platform_device *pdev = spdif_priv->pdev;
+- unsigned long csfs = 0;
+ u32 stc, mask, rate;
+- u8 clk, div;
++ u8 clk, div, csfs, csofs;
+ int ret;
+
+ switch (sample_rate) {
+ case 32000:
+ rate = SPDIF_TXRATE_32000;
+ csfs = IEC958_AES3_CON_FS_32000;
++ csofs = IEC958_AES4_CON_ORIGFS_32000;
+ break;
+ case 44100:
+ rate = SPDIF_TXRATE_44100;
+ csfs = IEC958_AES3_CON_FS_44100;
++ csofs = IEC958_AES4_CON_ORIGFS_44100;
+ break;
+ case 48000:
+ rate = SPDIF_TXRATE_48000;
+ csfs = IEC958_AES3_CON_FS_48000;
++ csofs = IEC958_AES4_CON_ORIGFS_48000;
++ break;
++ case 88200:
++ rate = SPDIF_TXRATE_88200;
++ csfs = IEC958_AES3_CON_FS_88200;
++ csofs = IEC958_AES4_CON_ORIGFS_88200;
++ break;
++ case 96000:
++ rate = SPDIF_TXRATE_96000;
++ csfs = IEC958_AES3_CON_FS_96000;
++ csofs = IEC958_AES4_CON_ORIGFS_96000;
++ break;
++ case 176400:
++ rate = SPDIF_TXRATE_176400;
++ csfs = IEC958_AES3_CON_FS_176400;
++ csofs = IEC958_AES4_CON_ORIGFS_176400;
++ break;
++ case 192000:
++ rate = SPDIF_TXRATE_192000;
++ csfs = IEC958_AES3_CON_FS_192000;
++ csofs = IEC958_AES4_CON_ORIGFS_192000;
+ break;
+ default:
+ dev_err(&pdev->dev, "unsupported sample rate %d\n", sample_rate);
+@@ -399,7 +430,8 @@
+ clk_get_rate(spdif_priv->txclk[rate]));
+
+ /* set fs field in consumer channel status */
+- spdif_set_cstatus(ctrl, IEC958_AES3_CON_FS, csfs);
++ spdif_set_cstatus(ctrl, 3, IEC958_AES3_CON_FS, csfs);
++ spdif_set_cstatus(ctrl, 4, IEC958_AES4_CON_ORIGFS, csofs);
+
+ /* select clock source and divisor */
+ stc = STC_TXCLK_ALL_EN | STC_TXCLK_SRC_SET(clk) | STC_TXCLK_DIV(div);
+@@ -421,6 +453,8 @@
+ u32 scr, mask, i;
+ int ret;
+
++ pm_runtime_get_sync(cpu_dai->dev);
++
+ /* Reset module and interrupts only for first initialization */
+ if (!cpu_dai->active) {
+ ret = spdif_softreset(spdif_priv);
+@@ -485,6 +519,8 @@
+ regmap_update_bits(regmap, REG_SPDIF_SCR,
+ SCR_LOW_POWER, SCR_LOW_POWER);
+ }
++
++ pm_runtime_put_sync(cpu_dai->dev);
+ }
+
+ static int fsl_spdif_hw_params(struct snd_pcm_substream *substream,
+@@ -505,8 +541,8 @@
+ __func__, sample_rate);
+ return ret;
+ }
+- spdif_set_cstatus(ctrl, IEC958_AES3_CON_CLOCK,
+- IEC958_AES3_CON_CLOCK_1000PPM);
++ spdif_set_cstatus(ctrl, 3, IEC958_AES3_CON_CLOCK,
++ IEC958_AES3_CON_CLOCK_1000PPM);
+ spdif_write_channel_status(spdif_priv);
+ } else {
+ /* Setup rx clock source */
+@@ -576,14 +612,13 @@
+ static int fsl_spdif_pb_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uvalue)
+ {
++ int i;
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+
+- uvalue->value.iec958.status[0] = ctrl->ch_status[0];
+- uvalue->value.iec958.status[1] = ctrl->ch_status[1];
+- uvalue->value.iec958.status[2] = ctrl->ch_status[2];
+- uvalue->value.iec958.status[3] = ctrl->ch_status[3];
++ for (i = 0; i < ARRAY_SIZE(ctrl->ch_status); i++)
++ uvalue->value.iec958.status[i] = ctrl->ch_status[i];
+
+ return 0;
+ }
+@@ -591,14 +626,13 @@
+ static int fsl_spdif_pb_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uvalue)
+ {
++ int i;
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+
+- ctrl->ch_status[0] = uvalue->value.iec958.status[0];
+- ctrl->ch_status[1] = uvalue->value.iec958.status[1];
+- ctrl->ch_status[2] = uvalue->value.iec958.status[2];
+- ctrl->ch_status[3] = uvalue->value.iec958.status[3];
++ for (i = 0; i < ARRAY_SIZE(ctrl->ch_status); i++)
++ ctrl->ch_status[i] = uvalue->value.iec958.status[i];
+
+ spdif_write_channel_status(spdif_priv);
+
+@@ -754,7 +788,7 @@
+ clksrc = (phaseconf >> SRPC_CLKSRC_SEL_OFFSET) & 0xf;
+ if (srpc_dpll_locked[clksrc] && (phaseconf & SRPC_DPLL_LOCKED)) {
+ /* Get bus clock from system */
+- busclk_freq = clk_get_rate(spdif_priv->rxclk);
++ busclk_freq = clk_get_rate(spdif_priv->sysclk);
+ }
+
+ /* FreqMeas_CLK = (BUS_CLK * FreqMeas) / 2 ^ 10 / GAINSEL / 128 */
+@@ -999,7 +1033,7 @@
+ struct clk *clk, u64 savesub,
+ enum spdif_txrate index)
+ {
+- const u32 rate[] = { 32000, 44100, 48000 };
++ const u32 rate[] = { 32000, 44100, 48000, 88200, 96000, 176400, 192000 };
+ u64 rate_ideal, rate_actual, sub;
+ u32 div, arate;
+
+@@ -1017,7 +1051,7 @@
+ break;
+ } else if (arate / rate[index] == 1) {
+ /* A little bigger than expect */
+- sub = (arate - rate[index]) * 100000;
++ sub = (u64)(arate - rate[index]) * 100000;
+ do_div(sub, rate[index]);
+ if (sub < savesub) {
+ savesub = sub;
+@@ -1025,7 +1059,7 @@
+ }
+ } else if (rate[index] / arate == 1) {
+ /* A little smaller than expect */
+- sub = (rate[index] - arate) * 100000;
++ sub = (u64)(rate[index] - arate) * 100000;
+ do_div(sub, rate[index]);
+ if (sub < savesub) {
+ savesub = sub;
+@@ -1040,7 +1074,7 @@
+ static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv,
+ enum spdif_txrate index)
+ {
+- const u32 rate[] = { 32000, 44100, 48000 };
++ const u32 rate[] = { 32000, 44100, 48000, 88200, 96000, 176400, 192000 };
+ struct platform_device *pdev = spdif_priv->pdev;
+ struct device *dev = &pdev->dev;
+ u64 savesub = 100000, ret;
+@@ -1058,6 +1092,13 @@
+ if (!clk_get_rate(clk))
+ continue;
+
++ /* TODO: We here ignore sysclk source due to imperfect clock
++ * selecting mechanism: sysclk is a bit different which we can
++ * not change its clock rate but use another inner divider to
++ * derive a proper clock rate. */
++ if (i == SPDIF_CLK_SRC_SYSCLK)
++ continue;
++
+ ret = fsl_spdif_txclk_caldiv(spdif_priv, clk, savesub, index);
+ if (savesub == ret)
+ continue;
+@@ -1131,6 +1172,13 @@
+ return ret;
+ }
+
++ /* Get system clock for rx clock rate calculation */
++ spdif_priv->sysclk = devm_clk_get(&pdev->dev, "rxtx5");
++ if (IS_ERR(spdif_priv->sysclk)) {
++ dev_err(&pdev->dev, "no system clock(rxtx5) in devicetree\n");
++ return PTR_ERR(spdif_priv->sysclk);
++ }
++
+ /* Select clock source for rx/tx clock */
+ spdif_priv->rxclk = devm_clk_get(&pdev->dev, "rxtx1");
+ if (IS_ERR(spdif_priv->rxclk)) {
+@@ -1150,12 +1198,13 @@
+ spin_lock_init(&ctrl->ctl_lock);
+
+ /* Init tx channel status default value */
+- ctrl->ch_status[0] =
+- IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_5015;
++ ctrl->ch_status[0] = IEC958_AES0_CON_NOT_COPYRIGHT;
+ ctrl->ch_status[1] = IEC958_AES1_CON_DIGDIGCONV_ID;
+ ctrl->ch_status[2] = 0x00;
+ ctrl->ch_status[3] =
+ IEC958_AES3_CON_FS_44100 | IEC958_AES3_CON_CLOCK_1000PPM;
++ ctrl->ch_status[4] = IEC958_AES4_CON_ORIGFS_44100;
++ ctrl->ch_status[5] = IEC958_AES5_CON_CGMSA_COPYFREELY;
+
+ spdif_priv->dpll_locked = false;
+
+@@ -1164,6 +1213,8 @@
+ spdif_priv->dma_params_tx.addr = res->start + REG_SPDIF_STL;
+ spdif_priv->dma_params_rx.addr = res->start + REG_SPDIF_SRL;
+
++ pm_runtime_enable(&pdev->dev);
++
+ /* Register with ASoC */
+ dev_set_drvdata(&pdev->dev, spdif_priv);
+
+@@ -1174,13 +1225,34 @@
+ return ret;
+ }
+
+- ret = imx_pcm_dma_init(pdev);
++ ret = imx_pcm_dma_init(pdev, SND_DMAENGINE_PCM_FLAG_COMPAT,
++ IMX_SPDIF_DMABUF_SIZE);
+ if (ret)
+ dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
+
+ return ret;
+ }
+
++#ifdef CONFIG_PM_RUNTIME
++static int fsl_spdif_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static int fsl_spdif_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++#endif
++
++static const struct dev_pm_ops fsl_spdif_pm = {
++ SET_RUNTIME_PM_OPS(fsl_spdif_runtime_suspend,
++ fsl_spdif_runtime_resume,
++ NULL)
++};
++
+ static const struct of_device_id fsl_spdif_dt_ids[] = {
+ { .compatible = "fsl,imx35-spdif", },
+ {}
+@@ -1192,6 +1264,7 @@
+ .name = "fsl-spdif-dai",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_spdif_dt_ids,
++ .pm = &fsl_spdif_pm,
+ },
+ .probe = fsl_spdif_probe,
+ };
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/fsl_spdif.h linux-3.14.40/sound/soc/fsl/fsl_spdif.h
+--- linux-3.14.40.orig/sound/soc/fsl/fsl_spdif.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/fsl/fsl_spdif.h 2015-05-01 14:58:06.791427001 -0500
+@@ -157,13 +157,19 @@
+ #define STC_TXCLK_DIV(x) ((((x) - 1) << STC_TXCLK_DIV_OFFSET) & STC_TXCLK_DIV_MASK)
+ #define STC_TXCLK_SRC_MAX 8
+
++#define SPDIF_CLK_SRC_SYSCLK 5
++
+ /* SPDIF tx rate */
+ enum spdif_txrate {
+ SPDIF_TXRATE_32000 = 0,
+ SPDIF_TXRATE_44100,
+ SPDIF_TXRATE_48000,
++ SPDIF_TXRATE_88200,
++ SPDIF_TXRATE_96000,
++ SPDIF_TXRATE_176400,
++ SPDIF_TXRATE_192000,
+ };
+-#define SPDIF_TXRATE_MAX (SPDIF_TXRATE_48000 + 1)
++#define SPDIF_TXRATE_MAX (SPDIF_TXRATE_192000 + 1)
+
+
+ #define SPDIF_CSTATUS_BYTE 6
+@@ -173,7 +179,11 @@
+
+ #define FSL_SPDIF_RATES_PLAYBACK (SNDRV_PCM_RATE_32000 | \
+ SNDRV_PCM_RATE_44100 | \
+- SNDRV_PCM_RATE_48000)
++ SNDRV_PCM_RATE_48000 | \
++ SNDRV_PCM_RATE_88200 | \
++ SNDRV_PCM_RATE_96000 | \
++ SNDRV_PCM_RATE_176400| \
++ SNDRV_PCM_RATE_192000)
+
+ #define FSL_SPDIF_RATES_CAPTURE (SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_32000 | \
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/fsl_ssi.c linux-3.14.40/sound/soc/fsl/fsl_ssi.c
+--- linux-3.14.40.orig/sound/soc/fsl/fsl_ssi.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/fsl/fsl_ssi.c 2015-05-01 14:58:06.795427001 -0500
+@@ -3,7 +3,7 @@
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+- * Copyright 2007-2010 Freescale Semiconductor, Inc.
++ * Copyright (C) 2007-2013 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+@@ -30,6 +30,7 @@
+ * around this by not polling these bits but only wait a fixed delay.
+ */
+
++#include <linux/busfreq-imx6.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/module.h>
+@@ -43,6 +44,7 @@
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
+ #include <linux/of_platform.h>
++#include <linux/pm_runtime.h>
+
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+@@ -73,6 +75,24 @@
+ }
+ #endif
+
++#ifdef DEBUG
++#define NUM_OF_SSI_REG (sizeof(struct ccsr_ssi) / sizeof(__be32))
++
++void dump_reg(struct ccsr_ssi __iomem *ssi)
++{
++ u32 val, i;
++
++ for (i = 0; i < NUM_OF_SSI_REG; i++) {
++ if (&ssi->stx0 + i == NULL)
++ continue;
++ val = read_ssi(&ssi->stx0 + i);
++ pr_debug("REG %x = %x\n", (u32)(&ssi->stx0 + i) & 0xff, val);
++ }
++}
++#else
++void dump_reg(struct ccsr_ssi __iomem *ssi) {}
++#endif
++
+ /**
+ * FSLSSI_I2S_RATES: sample rates supported by the I2S
+ *
+@@ -171,8 +191,6 @@
+ struct clk *clk;
+ struct snd_dmaengine_dai_dma_data dma_params_tx;
+ struct snd_dmaengine_dai_dma_data dma_params_rx;
+- struct imx_dma_data filter_data_tx;
+- struct imx_dma_data filter_data_rx;
+ struct imx_pcm_fiq_params fiq_params;
+ /* Register values for rx/tx configuration */
+ struct fsl_ssi_rxtx_reg_val rxtx_reg_val;
+@@ -206,6 +224,26 @@
+ char name[1];
+ };
+
++#ifdef CONFIG_PM_RUNTIME
++static int fsl_ssi_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_AUDIO);
++ return 0;
++}
++
++static int fsl_ssi_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_AUDIO);
++ return 0;
++}
++#endif
++
++static const struct dev_pm_ops fsl_ssi_pm = {
++ SET_RUNTIME_PM_OPS(fsl_ssi_runtime_suspend,
++ fsl_ssi_runtime_resume,
++ NULL)
++};
++
+ static const struct of_device_id fsl_ssi_ids[] = {
+ { .compatible = "fsl,mpc8610-ssi", .data = (void *) FSL_SSI_MCP8610},
+ { .compatible = "fsl,imx51-ssi", .data = (void *) FSL_SSI_MX51},
+@@ -489,6 +527,23 @@
+ }
+ }
+
++static void fsl_ssi_clk_ctrl(struct fsl_ssi_private *ssi_private, bool enable)
++{
++ if (enable) {
++ if (ssi_private->ssi_on_imx) {
++ if (!IS_ERR(ssi_private->baudclk))
++ clk_enable(ssi_private->baudclk);
++ clk_enable(ssi_private->clk);
++ }
++ } else {
++ if (ssi_private->ssi_on_imx) {
++ if (!IS_ERR(ssi_private->baudclk))
++ clk_disable(ssi_private->baudclk);
++ clk_disable(ssi_private->clk);
++ }
++ }
++}
++
+ /*
+ * Enable/Disable a ssi configuration. You have to pass either
+ * ssi_private->rxtx_reg_val.rx or tx as vals parameter.
+@@ -509,6 +564,8 @@
+ else
+ avals = &ssi_private->rxtx_reg_val.rx;
+
++ fsl_ssi_clk_ctrl(ssi_private, enable);
++
+ /* If vals should be disabled, start with disabling the unit */
+ if (!enable) {
+ u32 scr = vals->scr & (vals->scr ^ avals->scr);
+@@ -748,6 +805,8 @@
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ unsigned long flags;
+
++ pm_runtime_get_sync(dai->dev);
++
+ /* First, we only do fsl_ssi_setup() when SSI is going to be active.
+ * Second, fsl_ssi_setup was already called by ac97_init earlier if
+ * the driver is in ac97 mode.
+@@ -1083,14 +1142,17 @@
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
++ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ fsl_ssi_tx_config(ssi_private, true);
+ else
+ fsl_ssi_rx_config(ssi_private, true);
++ dump_reg(ssi);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
++ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ fsl_ssi_tx_config(ssi_private, false);
+@@ -1119,6 +1181,12 @@
+ return 0;
+ }
+
++static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ pm_runtime_put_sync(dai->dev);
++}
++
+ static int fsl_ssi_dai_probe(struct snd_soc_dai *dai)
+ {
+ struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(dai);
+@@ -1138,6 +1206,7 @@
+ .set_sysclk = fsl_ssi_set_dai_sysclk,
+ .set_tdm_slot = fsl_ssi_set_dai_tdm_slot,
+ .trigger = fsl_ssi_trigger,
++ .shutdown = fsl_ssi_shutdown,
+ };
+
+ /* Template for the CPU dai driver structure */
+@@ -1257,13 +1326,13 @@
+ int ret = 0;
+ struct device_attribute *dev_attr = NULL;
+ struct device_node *np = pdev->dev.of_node;
++ u32 dmas[4];
+ const struct of_device_id *of_id;
+ enum fsl_ssi_type hw_type;
+ const char *p, *sprop;
+ const uint32_t *iprop;
+ struct resource res;
+ char name[64];
+- bool shared;
+ bool ac97 = false;
+
+ /* SSIs that are not connected on the board should have a
+@@ -1381,7 +1450,6 @@
+
+ if (hw_type == FSL_SSI_MX21 || hw_type == FSL_SSI_MX51 ||
+ hw_type == FSL_SSI_MX35) {
+- u32 dma_events[2], dmas[4];
+ ssi_private->ssi_on_imx = true;
+
+ ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
+@@ -1390,9 +1458,9 @@
+ dev_err(&pdev->dev, "could not get clock: %d\n", ret);
+ goto error_irqmap;
+ }
+- ret = clk_prepare_enable(ssi_private->clk);
++ ret = clk_prepare(ssi_private->clk);
+ if (ret) {
+- dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n",
++ dev_err(&pdev->dev, "clk_prepare failed: %d\n",
+ ret);
+ goto error_irqmap;
+ }
+@@ -1405,41 +1473,21 @@
+ dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
+ PTR_ERR(ssi_private->baudclk));
+ else
+- clk_prepare_enable(ssi_private->baudclk);
++ clk_prepare(ssi_private->baudclk);
+
+ /*
+ * We have burstsize be "fifo_depth - 2" to match the SSI
+ * watermark setting in fsl_ssi_startup().
+ */
+- ssi_private->dma_params_tx.maxburst =
+- ssi_private->fifo_depth - 2;
+- ssi_private->dma_params_rx.maxburst =
+- ssi_private->fifo_depth - 2;
++ ssi_private->dma_params_tx.maxburst = ssi_private->fifo_depth - 2;
++ ssi_private->dma_params_rx.maxburst = ssi_private->fifo_depth - 2;
+ ssi_private->dma_params_tx.addr =
+ ssi_private->ssi_phys + offsetof(struct ccsr_ssi, stx0);
+ ssi_private->dma_params_rx.addr =
+ ssi_private->ssi_phys + offsetof(struct ccsr_ssi, srx0);
+- ssi_private->dma_params_tx.filter_data =
+- &ssi_private->filter_data_tx;
+- ssi_private->dma_params_rx.filter_data =
+- &ssi_private->filter_data_rx;
+- if (!of_property_read_bool(pdev->dev.of_node, "dmas") &&
+- ssi_private->use_dma) {
+- /*
+- * FIXME: This is a temporary solution until all
+- * necessary dma drivers support the generic dma
+- * bindings.
+- */
+- ret = of_property_read_u32_array(pdev->dev.of_node,
+- "fsl,ssi-dma-events", dma_events, 2);
+- if (ret && ssi_private->use_dma) {
+- dev_err(&pdev->dev, "could not get dma events but fsl-ssi is configured to use DMA\n");
+- goto error_clk;
+- }
+- }
+- /* Should this be merge with the above? */
+- if (!of_property_read_u32_array(pdev->dev.of_node, "dmas", dmas, 4)
+- && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
++
++ ret = !of_property_read_u32_array(np, "dmas", dmas, 4);
++ if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
+ ssi_private->use_dual_fifo = true;
+ /* When using dual fifo mode, we need to keep watermark
+ * as even numbers due to dma script limitation.
+@@ -1447,14 +1495,6 @@
+ ssi_private->dma_params_tx.maxburst &= ~0x1;
+ ssi_private->dma_params_rx.maxburst &= ~0x1;
+ }
+-
+- shared = of_device_is_compatible(of_get_parent(np),
+- "fsl,spba-bus");
+-
+- imx_pcm_dma_params_init_data(&ssi_private->filter_data_tx,
+- dma_events[0], shared ? IMX_DMATYPE_SSI_SP : IMX_DMATYPE_SSI);
+- imx_pcm_dma_params_init_data(&ssi_private->filter_data_rx,
+- dma_events[1], shared ? IMX_DMATYPE_SSI_SP : IMX_DMATYPE_SSI);
+ }
+
+ /*
+@@ -1474,6 +1514,8 @@
+ }
+ }
+
++ pm_runtime_enable(&pdev->dev);
++
+ /* Register with ASoC */
+ dev_set_drvdata(&pdev->dev, ssi_private);
+
+@@ -1509,7 +1551,8 @@
+ if (ret)
+ goto error_pcm;
+ } else {
+- ret = imx_pcm_dma_init(pdev);
++ ret = imx_pcm_dma_init(pdev, NULL,
++ IMX_SSI_DMABUF_SIZE);
+ if (ret)
+ goto error_pcm;
+ }
+@@ -1565,12 +1608,16 @@
+ error_dev:
+ device_remove_file(&pdev->dev, dev_attr);
+
+-error_clk:
+ if (ssi_private->ssi_on_imx) {
+ if (!IS_ERR(ssi_private->baudclk))
+- clk_disable_unprepare(ssi_private->baudclk);
+- clk_disable_unprepare(ssi_private->clk);
++ clk_unprepare(ssi_private->baudclk);
++ clk_unprepare(ssi_private->clk);
+ }
++error_clk:
++ if (!IS_ERR(ssi_private->baudclk))
++ clk_unprepare(ssi_private->baudclk);
++ if (!IS_ERR(ssi_private->clk))
++ clk_unprepare(ssi_private->clk);
+
+ error_irqmap:
+ if (ssi_private->irq_stats)
+@@ -1590,8 +1637,8 @@
+ snd_soc_unregister_component(&pdev->dev);
+ if (ssi_private->ssi_on_imx) {
+ if (!IS_ERR(ssi_private->baudclk))
+- clk_disable_unprepare(ssi_private->baudclk);
+- clk_disable_unprepare(ssi_private->clk);
++ clk_unprepare(ssi_private->baudclk);
++ clk_unprepare(ssi_private->clk);
+ }
+ if (ssi_private->irq_stats)
+ irq_dispose_mapping(ssi_private->irq);
+@@ -1604,6 +1651,7 @@
+ .name = "fsl-ssi-dai",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_ssi_ids,
++ .pm = &fsl_ssi_pm,
+ },
+ .probe = fsl_ssi_probe,
+ .remove = fsl_ssi_remove,
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/imx-cs42888.c linux-3.14.40/sound/soc/fsl/imx-cs42888.c
+--- linux-3.14.40.orig/sound/soc/fsl/imx-cs42888.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/sound/soc/fsl/imx-cs42888.c 2015-05-01 14:58:06.795427001 -0500
+@@ -0,0 +1,369 @@
++/*
++ * Copyright (C) 2010-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/i2c.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/soc.h>
++#include <sound/initval.h>
++#include <sound/pcm_params.h>
++
++#include "fsl_esai.h"
++#include "fsl_asrc.h"
++
++#define CODEC_CLK_EXTER_OSC 1
++#define CODEC_CLK_ESAI_HCKT 2
++
++struct imx_priv {
++ int hw;
++ int fe_output_rate;
++ int fe_output_width;
++ unsigned int mclk_freq;
++ unsigned int codec_mclk;
++ struct platform_device *pdev;
++};
++
++static struct imx_priv card_priv;
++
++static int imx_cs42888_startup(struct snd_pcm_substream *substream)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
++ struct imx_priv *priv = &card_priv;
++
++ if (!cpu_dai->active)
++ priv->hw = 0;
++ return 0;
++}
++
++static void imx_cs42888_shutdown(struct snd_pcm_substream *substream)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
++ struct imx_priv *priv = &card_priv;
++
++ if (!cpu_dai->active)
++ priv->hw = 0;
++}
++
++static const struct {
++ int rate;
++ int ratio1;
++ int ratio2;
++} sr_vals[] = {
++ { 32000, 5, 3 },
++ { 48000, 5, 3 },
++ { 64000, 2, 1 },
++ { 96000, 2, 1 },
++ { 128000, 2, 1 },
++ { 44100, 5, 3 },
++ { 88200, 2, 1 },
++ { 176400, 0, 0 },
++ { 192000, 0, 0 },
++};
++
++static int imx_cs42888_surround_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
++ struct snd_soc_dai *codec_dai = rtd->codec_dai;
++ struct imx_priv *priv = &card_priv;
++ unsigned int rate = params_rate(params);
++ unsigned int lrclk_ratio = 0, i;
++ u32 dai_format = 0;
++
++ if (priv->hw)
++ return 0;
++
++ priv->hw = 1;
++
++ if (priv->codec_mclk & CODEC_CLK_ESAI_HCKT) {
++ for (i = 0; i < ARRAY_SIZE(sr_vals); i++) {
++ if (sr_vals[i].rate == rate) {
++ lrclk_ratio = sr_vals[i].ratio1;
++ break;
++ }
++ }
++ if (i == ARRAY_SIZE(sr_vals)) {
++ dev_err(&priv->pdev->dev, "Unsupported rate %dHz\n", rate);
++ return -EINVAL;
++ }
++
++ dai_format = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_CBS_CFS;
++
++ /* set the ESAI system clock as output */
++ snd_soc_dai_set_sysclk(cpu_dai, ESAI_CLK_EXTAL_DIV,
++ priv->mclk_freq, SND_SOC_CLOCK_OUT);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_TX_DIV_PM, 2);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_RX_DIV_PM, 2);
++ /* set codec Master clock */
++ snd_soc_dai_set_sysclk(codec_dai, 0, priv->mclk_freq,\
++ SND_SOC_CLOCK_IN);
++ } else if (priv->codec_mclk & CODEC_CLK_EXTER_OSC) {
++ for (i = 0; i < ARRAY_SIZE(sr_vals); i++) {
++ if (sr_vals[i].rate == rate) {
++ lrclk_ratio = sr_vals[i].ratio2;
++ break;
++ }
++ }
++ if (i == ARRAY_SIZE(sr_vals)) {
++ dev_err(&priv->pdev->dev, "Unsupported rate %dHz\n", rate);
++ return -EINVAL;
++ }
++
++ dai_format = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_CBS_CFS;
++
++ snd_soc_dai_set_sysclk(cpu_dai, ESAI_CLK_EXTAL,
++ priv->mclk_freq, SND_SOC_CLOCK_OUT);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_TX_DIV_PM, 0);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_RX_DIV_PM, 0);
++ snd_soc_dai_set_sysclk(codec_dai, 0, priv->mclk_freq,\
++ SND_SOC_CLOCK_IN);
++ }
++
++ /* set cpu DAI configuration */
++ snd_soc_dai_set_fmt(cpu_dai, dai_format);
++ /* set i.MX active slot mask */
++ snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 32);
++ /* set the ratio */
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_TX_DIV_PSR, 1);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_TX_DIV_FP, lrclk_ratio);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_RX_DIV_PSR, 1);
++ snd_soc_dai_set_clkdiv(cpu_dai, ESAI_RX_DIV_FP, lrclk_ratio);
++
++ /* set codec DAI configuration */
++ snd_soc_dai_set_fmt(codec_dai, dai_format);
++ return 0;
++}
++
++static struct snd_soc_ops imx_cs42888_surround_ops = {
++ .startup = imx_cs42888_startup,
++ .shutdown = imx_cs42888_shutdown,
++ .hw_params = imx_cs42888_surround_hw_params,
++};
++
++static const struct snd_soc_dapm_widget imx_cs42888_dapm_widgets[] = {
++ SND_SOC_DAPM_LINE("Line Out Jack", NULL),
++ SND_SOC_DAPM_LINE("Line In Jack", NULL),
++};
++
++static const struct snd_soc_dapm_route audio_map[] = {
++ /* Line out jack */
++ {"Line Out Jack", NULL, "AOUT1L"},
++ {"Line Out Jack", NULL, "AOUT1R"},
++ {"Line Out Jack", NULL, "AOUT2L"},
++ {"Line Out Jack", NULL, "AOUT2R"},
++ {"Line Out Jack", NULL, "AOUT3L"},
++ {"Line Out Jack", NULL, "AOUT3R"},
++ {"Line Out Jack", NULL, "AOUT4L"},
++ {"Line Out Jack", NULL, "AOUT4R"},
++ {"AIN1L", NULL, "Line In Jack"},
++ {"AIN1R", NULL, "Line In Jack"},
++ {"AIN2L", NULL, "Line In Jack"},
++ {"AIN2R", NULL, "Line In Jack"},
++ {"esai-Playback", NULL, "asrc-Playback"},
++ {"codec-Playback", NULL, "esai-Playback"},/*Playback is the codec dai*/
++};
++
++static int be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
++ struct snd_pcm_hw_params *params) {
++
++ struct imx_priv *priv = &card_priv;
++
++ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->min = priv->fe_output_rate;
++ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->max = priv->fe_output_rate;
++ snd_mask_none(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT));
++ if (priv->fe_output_width == 16)
++ snd_mask_set(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT),
++ SNDRV_PCM_FORMAT_S16_LE);
++ else
++ snd_mask_set(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT),
++ SNDRV_PCM_FORMAT_S24_LE);
++ return 0;
++}
++
++static struct snd_soc_dai_link imx_cs42888_dai[] = {
++ {
++ .name = "HiFi",
++ .stream_name = "HiFi",
++ .codec_dai_name = "CS42888",
++ .ops = &imx_cs42888_surround_ops,
++ },
++ {
++ .name = "HiFi-ASRC-FE",
++ .stream_name = "HiFi-ASRC-FE",
++ .codec_name = "snd-soc-dummy",
++ .codec_dai_name = "snd-soc-dummy-dai",
++ .dynamic = 1,
++ },
++ {
++ .name = "HiFi-ASRC-BE",
++ .stream_name = "HiFi-ASRC-BE",
++ .codec_dai_name = "CS42888",
++ .platform_name = "snd-soc-dummy",
++ .no_pcm = 1,
++ .ops = &imx_cs42888_surround_ops,
++ .be_hw_params_fixup = be_hw_params_fixup,
++ },
++};
++
++static struct snd_soc_card snd_soc_card_imx_cs42888 = {
++ .name = "cs42888-audio",
++ .dai_link = imx_cs42888_dai,
++ .dapm_widgets = imx_cs42888_dapm_widgets,
++ .num_dapm_widgets = ARRAY_SIZE(imx_cs42888_dapm_widgets),
++ .dapm_routes = audio_map,
++ .num_dapm_routes = ARRAY_SIZE(audio_map),
++};
++
++/*
++ * This function will register the snd_soc_pcm_link drivers.
++ */
++static int imx_cs42888_probe(struct platform_device *pdev)
++{
++ struct device_node *esai_np, *codec_np;
++ struct device_node *asrc_np;
++ struct platform_device *esai_pdev;
++ struct platform_device *asrc_pdev = NULL;
++ struct i2c_client *codec_dev;
++ struct imx_priv *priv = &card_priv;
++ struct clk *codec_clk = NULL;
++ const char *mclk_name;
++ int ret;
++
++ priv->pdev = pdev;
++
++ esai_np = of_parse_phandle(pdev->dev.of_node, "esai-controller", 0);
++ codec_np = of_parse_phandle(pdev->dev.of_node, "audio-codec", 0);
++ if (!esai_np || !codec_np) {
++ dev_err(&pdev->dev, "phandle missing or invalid\n");
++ ret = -EINVAL;
++ goto fail;
++ }
++
++ asrc_np = of_parse_phandle(pdev->dev.of_node, "asrc-controller", 0);
++ if (asrc_np) {
++ asrc_pdev = of_find_device_by_node(asrc_np);
++ if (asrc_pdev) {
++ struct fsl_asrc_p2p *asrc_p2p;
++ asrc_p2p = platform_get_drvdata(asrc_pdev);
++ asrc_p2p->per_dev = ESAI;
++ priv->fe_output_rate = asrc_p2p->output_rate;
++ priv->fe_output_width = asrc_p2p->output_width;
++ }
++ }
++
++ esai_pdev = of_find_device_by_node(esai_np);
++ if (!esai_pdev) {
++ dev_err(&pdev->dev, "failed to find ESAI platform device\n");
++ ret = -EINVAL;
++ goto fail;
++ }
++ codec_dev = of_find_i2c_device_by_node(codec_np);
++ if (!codec_dev) {
++ dev_err(&pdev->dev, "failed to find codec platform device\n");
++ ret = -EINVAL;
++ goto fail;
++ }
++
++ /*if there is no asrc controller, we only enable one device*/
++ if (!asrc_pdev) {
++ imx_cs42888_dai[0].codec_of_node = codec_np;
++ imx_cs42888_dai[0].cpu_dai_name = dev_name(&esai_pdev->dev);
++ imx_cs42888_dai[0].platform_of_node = esai_np;
++ snd_soc_card_imx_cs42888.num_links = 1;
++ } else {
++ imx_cs42888_dai[0].codec_of_node = codec_np;
++ imx_cs42888_dai[0].cpu_dai_name = dev_name(&esai_pdev->dev);
++ imx_cs42888_dai[0].platform_of_node = esai_np;
++ imx_cs42888_dai[1].cpu_dai_name = dev_name(&asrc_pdev->dev);
++ imx_cs42888_dai[1].platform_name = "imx-pcm-asrc";
++ imx_cs42888_dai[2].codec_of_node = codec_np;
++ imx_cs42888_dai[2].cpu_dai_name = dev_name(&esai_pdev->dev);
++ snd_soc_card_imx_cs42888.num_links = 3;
++ }
++
++ codec_clk = devm_clk_get(&codec_dev->dev, NULL);
++ if (IS_ERR(codec_clk)) {
++ ret = PTR_ERR(codec_clk);
++ dev_err(&codec_dev->dev, "failed to get codec clk: %d\n", ret);
++ goto fail;
++ }
++ priv->mclk_freq = clk_get_rate(codec_clk);
++
++ ret = of_property_read_string(codec_np, "clock-names", &mclk_name);
++ if (ret) {
++ dev_err(&pdev->dev, "%s: failed to get mclk source\n", __func__);
++ goto fail;
++ }
++ if (!strcmp(mclk_name, "codec_osc"))
++ priv->codec_mclk = CODEC_CLK_EXTER_OSC;
++ else if (!strcmp(mclk_name, "esai"))
++ priv->codec_mclk = CODEC_CLK_ESAI_HCKT;
++ else {
++ dev_err(&pdev->dev, "mclk source is not correct %s\n", mclk_name);
++ goto fail;
++ }
++
++ snd_soc_card_imx_cs42888.dev = &pdev->dev;
++
++ platform_set_drvdata(pdev, &snd_soc_card_imx_cs42888);
++
++ ret = snd_soc_register_card(&snd_soc_card_imx_cs42888);
++ if (ret)
++ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
++fail:
++ if (esai_np)
++ of_node_put(esai_np);
++ if (codec_np)
++ of_node_put(codec_np);
++ return ret;
++}
++
++static int imx_cs42888_remove(struct platform_device *pdev)
++{
++ snd_soc_unregister_card(&snd_soc_card_imx_cs42888);
++ return 0;
++}
++
++static const struct of_device_id imx_cs42888_dt_ids[] = {
++ { .compatible = "fsl,imx-audio-cs42888", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver imx_cs42888_driver = {
++ .probe = imx_cs42888_probe,
++ .remove = imx_cs42888_remove,
++ .driver = {
++ .name = "imx-cs42888",
++ .owner = THIS_MODULE,
++ .pm = &snd_soc_pm_ops,
++ .of_match_table = imx_cs42888_dt_ids,
++ },
++};
++module_platform_driver(imx_cs42888_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("ALSA SoC cs42888 Machine Layer Driver");
++MODULE_ALIAS("platform:imx-cs42888");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/imx-hdmi.c linux-3.14.40/sound/soc/fsl/imx-hdmi.c
+--- linux-3.14.40.orig/sound/soc/fsl/imx-hdmi.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/sound/soc/fsl/imx-hdmi.c 2015-05-01 14:58:06.795427001 -0500
+@@ -0,0 +1,113 @@
++/*
++ * ASoC HDMI Transmitter driver for IMX development boards
++ *
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ *
++ * based on stmp3780_devb_hdmi.c
++ *
++ * Vladimir Barinov <vbarinov@embeddedalley.com>
++ *
++ * Copyright 2008 SigmaTel, Inc
++ * Copyright 2008 Embedded Alley Solutions, Inc
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++#include <linux/module.h>
++#include <linux/of_platform.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <sound/soc.h>
++
++#include "imx-hdmi.h"
++
++/* imx digital audio interface glue - connects codec <--> CPU */
++static struct snd_soc_dai_link imx_hdmi_dai_link = {
++ .name = "i.MX HDMI Audio Tx",
++ .stream_name = "i.MX HDMI Audio Tx",
++ .codec_dai_name = "hdmi-hifi",
++ .codec_name = "hdmi-audio-codec",
++ .platform_name = "imx-hdmi-audio",
++};
++
++static struct snd_soc_card snd_soc_card_imx_hdmi = {
++ .name = "imx-hdmi-soc",
++ .dai_link = &imx_hdmi_dai_link,
++ .num_links = 1,
++};
++
++static int imx_hdmi_audio_probe(struct platform_device *pdev)
++{
++ struct device_node *hdmi_np, *np = pdev->dev.of_node;
++ struct snd_soc_card *card = &snd_soc_card_imx_hdmi;
++ struct platform_device *hdmi_pdev;
++ int ret = 0;
++
++ if (!hdmi_get_registered()) {
++ dev_err(&pdev->dev, "initialize HDMI-audio failed. load HDMI-video first!\n");
++ return -ENODEV;
++ }
++
++ hdmi_np = of_parse_phandle(np, "hdmi-controller", 0);
++ if (!hdmi_np) {
++ dev_err(&pdev->dev, "failed to find hdmi-audio cpudai\n");
++ ret = -EINVAL;
++ goto end;
++ }
++
++ hdmi_pdev = of_find_device_by_node(hdmi_np);
++ if (!hdmi_pdev) {
++ dev_err(&pdev->dev, "failed to find SSI platform device\n");
++ ret = -EINVAL;
++ goto end;
++ }
++
++ card->dev = &pdev->dev;
++ card->dai_link->cpu_dai_name = dev_name(&hdmi_pdev->dev);
++
++ platform_set_drvdata(pdev, card);
++
++ ret = snd_soc_register_card(card);
++ if (ret)
++ dev_err(&pdev->dev, "failed to register card: %d\n", ret);
++
++end:
++ if (hdmi_np)
++ of_node_put(hdmi_np);
++
++ return ret;
++}
++
++static int imx_hdmi_audio_remove(struct platform_device *pdev)
++{
++ struct snd_soc_card *card = platform_get_drvdata(pdev);
++
++ snd_soc_unregister_card(card);
++
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_dt_ids[] = {
++ { .compatible = "fsl,imx-audio-hdmi", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids);
++
++static struct platform_driver imx_hdmi_audio_driver = {
++ .probe = imx_hdmi_audio_probe,
++ .remove = imx_hdmi_audio_remove,
++ .driver = {
++ .of_match_table = imx_hdmi_dt_ids,
++ .name = "imx-audio-hdmi",
++ .owner = THIS_MODULE,
++ .pm = &snd_soc_pm_ops,
++ },
++};
++
++module_platform_driver(imx_hdmi_audio_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IMX HDMI TX ASoC driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:imx-audio-hdmi");
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/imx-hdmi-dma.c linux-3.14.40/sound/soc/fsl/imx-hdmi-dma.c
+--- linux-3.14.40.orig/sound/soc/fsl/imx-hdmi-dma.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/sound/soc/fsl/imx-hdmi-dma.c 2015-05-01 14:58:06.795427001 -0500
+@@ -0,0 +1,1240 @@
++/*
++ * imx-hdmi-dma.c -- HDMI DMA driver for ALSA Soc Audio Layer
++ *
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc.
++ *
++ * based on imx-pcm-dma-mx2.c
++ * Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
++ *
++ * This code is based on code copyrighted by Freescale,
++ * Liam Girdwood, Javier Martin and probably others.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <linux/platform_data/dma-imx.h>
++
++#include <video/mxc_hdmi.h>
++
++#include "imx-hdmi.h"
++
++#define HDMI_DMA_BURST_UNSPECIFIED_LEGNTH 0
++#define HDMI_DMA_BURST_INCR4 1
++#define HDMI_DMA_BURST_INCR8 2
++#define HDMI_DMA_BURST_INCR16 3
++
++#define HDMI_BASE_ADDR 0x00120000
++
++struct hdmi_sdma_script {
++ int control_reg_addr;
++ int status_reg_addr;
++ int dma_start_addr;
++ u32 buffer[20];
++};
++
++struct hdmi_dma_priv {
++ struct snd_pcm_substream *substream;
++ struct platform_device *pdev;
++
++ struct snd_dma_buffer hw_buffer;
++ unsigned long buffer_bytes;
++ unsigned long appl_bytes;
++
++ int periods;
++ int period_time;
++ int period_bytes;
++ int dma_period_bytes;
++ int buffer_ratio;
++
++ unsigned long offset;
++
++ snd_pcm_format_t format;
++ int sample_align;
++ int sample_bits;
++ int channels;
++ int rate;
++
++ int frame_idx;
++
++ bool tx_active;
++ spinlock_t irq_lock;
++
++ /* SDMA part */
++ dma_addr_t phy_hdmi_sdma_t;
++ struct hdmi_sdma_script *hdmi_sdma_t;
++ struct dma_chan *dma_channel;
++ struct imx_dma_data dma_data;
++ struct dma_async_tx_descriptor *desc;
++ struct imx_hdmi_sdma_params sdma_params;
++};
++
++/* bit 0:0:0:b:p(0):c:(u)0:(v)0 */
++/* max 8 channels supported; channels are interleaved */
++static u8 g_packet_head_table[48 * 8];
++
++/* channel remapping for hdmi_dma_copy_xxxx() */
++static u8 g_channel_remap_table[24];
++
++/* default mapping tables */
++static const u8 channel_maps_alsa_cea[5][8] = {
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 0CH: no remapping */
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 2CH: no remapping */
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 4CH: no remapping */
++ { 0, 1, 4, 5, 3, 2, 6, 7 }, /* 6CH: ALSA5.1 to CEA */
++ { 0, 1, 6, 7, 3, 2, 4, 5 } /* 8CH: ALSA7.1 to CEA */
++};
++
++static const u8 channel_maps_cea_alsa[5][8] = {
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 0CH: no remapping */
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 2CH: no remapping */
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 4CH: no remapping */
++ { 0, 1, 5, 4, 2, 3, 6, 7 }, /* 6CH: CEA to ALSA5.1 */
++ { 0, 1, 5, 4, 6, 7, 2, 3 } /* 8CH: CEA to ALSA7.1 */
++};
++
++union hdmi_audio_header_t iec_header;
++EXPORT_SYMBOL(iec_header);
++
++/*
++ * Note that the period size for DMA != period size for ALSA because the
++ * driver adds iec frame info to the audio samples (in hdmi_dma_copy).
++ *
++ * Each 4 byte subframe = 1 byte of iec data + 3 byte audio sample.
++ *
++ * A 16 bit audio sample becomes 32 bits including the frame info. Ratio=2
++ * A 24 bit audio sample becomes 32 bits including the frame info. Ratio=3:4
++ * If the 24 bit raw audio is in 32 bit words, the
++ *
++ * Original Packed into subframe Ratio of size Format
++ * sample how many size of DMA buffer
++ * (bits) bits to ALSA buffer
++ * -------- ----------- -------- -------------- ------------------------
++ * 16 16 32 2 SNDRV_PCM_FORMAT_S16_LE
++ * 24 24 32 1.33 SNDRV_PCM_FORMAT_S24_3LE*
++ * 24 32 32 1 SNDRV_PCM_FORMAT_S24_LE
++ *
++ * *so SNDRV_PCM_FORMAT_S24_3LE is not supported.
++ */
++
++/*
++ * The minimum dma period is one IEC audio frame (192 * 4 * channels).
++ * The maximum dma period for the HDMI DMA is 8K.
++ *
++ * channels minimum maximum
++ * dma period dma period
++ * -------- ------------------ ----------
++ * 2 192 * 4 * 2 = 1536 * 4 = 6144
++ * 4 192 * 4 * 4 = 3072 * 2 = 6144
++ * 6 192 * 4 * 6 = 4608 * 1 = 4608
++ * 8 192 * 4 * 8 = 6144 * 1 = 6144
++ *
++ * Bottom line:
++ * 1. Must keep the ratio of DMA buffer to ALSA buffer consistent.
++ * 2. frame_idx is saved in the private data, so even if a frame cannot be
++ * transmitted in a period, it can be continued in the next period. This
++ * is necessary for 6 ch.
++ */
++#define HDMI_DMA_PERIOD_BYTES (12288)
++#define HDMI_DMA_BUF_SIZE (1280 * 1024)
++#define HDMI_PCM_BUF_SIZE (1280 * 1024)
++
++#define hdmi_audio_debug(dev, reg) \
++ dev_dbg(dev, #reg ": 0x%02x\n", hdmi_readb(reg))
++
++#ifdef DEBUG
++static void dumpregs(struct device *dev)
++{
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_CONF0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_START);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_STOP);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_THRSLD);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_STRADDR0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_STPADDR0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BSTADDR0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_MBLENGTH0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_MBLENGTH1);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_STAT);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_INT);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_MASK);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_POL);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_CONF1);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BUFFSTAT);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BUFFINT);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BUFFMASK);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BUFFPOL);
++ hdmi_audio_debug(dev, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
++ hdmi_audio_debug(dev, HDMI_IH_AHBDMAAUD_STAT0);
++ hdmi_audio_debug(dev, HDMI_IH_MUTE);
++}
++
++static void dumppriv(struct device *dev, struct hdmi_dma_priv *priv)
++{
++ dev_dbg(dev, "channels = %d\n", priv->channels);
++ dev_dbg(dev, "periods = %d\n", priv->periods);
++ dev_dbg(dev, "period_bytes = %d\n", priv->period_bytes);
++ dev_dbg(dev, "dma period_bytes = %d\n", priv->dma_period_bytes);
++ dev_dbg(dev, "buffer_ratio = %d\n", priv->buffer_ratio);
++ dev_dbg(dev, "hw dma buffer = 0x%08x\n", (int)priv->hw_buffer.addr);
++ dev_dbg(dev, "dma buf size = %d\n", (int)priv->buffer_bytes);
++ dev_dbg(dev, "sample_rate = %d\n", (int)priv->rate);
++}
++#else
++static void dumpregs(struct device *dev) {}
++static void dumppriv(struct device *dev, struct hdmi_dma_priv *priv) {}
++#endif
++
++/*
++ * Conditions for DMA to work:
++ * ((final_addr - initial_addr)>>2)+1) < 2k. So max period is 8k.
++ * (inital_addr & 0x3) == 0
++ * (final_addr & 0x3) == 0x3
++ *
++ * The DMA Period should be an integer multiple of the IEC 60958 audio
++ * frame size, which is 768 bytes (192 * 4).
++ */
++static void hdmi_dma_set_addr(int start_addr, int dma_period_bytes)
++{
++ int final_addr = start_addr + dma_period_bytes - 1;
++
++ hdmi_write4(start_addr, HDMI_AHB_DMA_STRADDR0);
++ hdmi_write4(final_addr, HDMI_AHB_DMA_STPADDR0);
++}
++
++static void hdmi_dma_irq_set(bool set)
++{
++ u8 val = hdmi_readb(HDMI_AHB_DMA_MASK);
++
++ if (set)
++ val |= HDMI_AHB_DMA_DONE;
++ else
++ val &= (u8)~HDMI_AHB_DMA_DONE;
++
++ hdmi_writeb(val, HDMI_AHB_DMA_MASK);
++}
++
++static void hdmi_mask(int mask)
++{
++ u8 regval = hdmi_readb(HDMI_AHB_DMA_MASK);
++
++ if (mask)
++ regval |= HDMI_AHB_DMA_ERROR | HDMI_AHB_DMA_FIFO_EMPTY;
++ else
++ regval &= (u8)~(HDMI_AHB_DMA_ERROR | HDMI_AHB_DMA_FIFO_EMPTY);
++
++ hdmi_writeb(regval, HDMI_AHB_DMA_MASK);
++}
++
++static inline int odd_ones(unsigned a)
++{
++ a ^= a >> 16;
++ a ^= a >> 8;
++ a ^= a >> 4;
++ a ^= a >> 2;
++ a ^= a >> 1;
++
++ return a & 1;
++}
++
++/* Add frame information for one pcm subframe */
++static u32 hdmi_dma_add_frame_info(struct hdmi_dma_priv *priv,
++ u32 pcm_data, int subframe_idx)
++{
++ union hdmi_audio_dma_data_t subframe;
++ union hdmi_audio_header_t tmp_header;
++
++ subframe.U = 0;
++
++ if (priv->frame_idx < 42) {
++ tmp_header = iec_header;
++
++ /* fill v (validity) */
++ subframe.B.v = tmp_header.B.linear_pcm;
++
++ /* fill c (channel status) */
++ if (tmp_header.B.linear_pcm == 0)
++ tmp_header.B.channel = subframe_idx + 1;
++ subframe.B.c = tmp_header.U >> priv->frame_idx;
++ } else {
++ /* fill v (validity), c is always zero */
++ subframe.B.v = iec_header.B.linear_pcm;
++ }
++
++ /* fill data */
++ if (priv->sample_bits == 16)
++ pcm_data <<= 8;
++ subframe.B.data = pcm_data;
++
++ /* fill p (parity) Note: Do not include b ! */
++ subframe.B.p = odd_ones(subframe.U);
++
++ /* fill b (start-of-block) */
++ if (priv->frame_idx == 0)
++ subframe.B.b = 1;
++
++ return subframe.U;
++}
++
++static void init_table(int channels)
++{
++ int i, map_sel, ch;
++ unsigned char *p = g_packet_head_table;
++ union hdmi_audio_header_t tmp_header = iec_header;
++
++ for (i = 0; i < 48; i++) {
++ int b = 0;
++ if (i == 0)
++ b = 1;
++
++ for (ch = 0; ch < channels; ch++) {
++ int c = 0;
++ if (i < 42) {
++ tmp_header.B.channel = ch + 1;
++ c = (tmp_header.U >> i) & 0x1;
++ }
++ /* preset bit p as c */
++ *p++ = (b << 4) | (c << 2) | (c << 3);
++ }
++ }
++
++ map_sel = channels / 2;
++ for (i = 0; i < 24; i++) {
++ g_channel_remap_table[i] = (i / channels) * channels +
++ channel_maps_cea_alsa[map_sel][i % channels];
++ }
++}
++
++/* Optimization for IEC head */
++static void hdmi_dma_copy_16_c_lut(u16 *src, u32 *dst, int samples,
++ u8 *lookup_table)
++{
++ u32 sample, head;
++ int i = 0;
++
++ while (samples--) {
++ /* get source sample */
++ sample = src[g_channel_remap_table[i]];
++
++ /* get packet header and p-bit */
++ head = *lookup_table++ ^ (odd_ones(sample) << 3);
++
++ /* store sample and header */
++ *dst++ = (head << 24) | (sample << 8);
++
++ if (++i == 24) {
++ src += 24;
++ i = 0;
++ }
++ }
++}
++
++static void hdmi_dma_copy_16_c_fast(u16 *src, u32 *dst, int samples)
++{
++ u32 sample;
++ int i = 0;
++
++ while (samples--) {
++ /* get source sample */
++ sample = src[g_channel_remap_table[i]];
++
++ /* store sample and p-bit */
++ *dst++ = (odd_ones(sample) << (3+24)) | (sample << 8);
++
++ if (++i == 24) {
++ src += 24;
++ i = 0;
++ }
++ }
++}
++
++static void hdmi_dma_copy_24_c_lut(u32 *src, u32 *dst, int samples,
++ u8 *lookup_table)
++{
++ u32 sample, head;
++ int i = 0;
++
++ while (samples--) {
++ /* get source sample */
++ sample = src[g_channel_remap_table[i]] & 0x00ffffff;
++
++ /* get packet header and p-bit */
++ head = *lookup_table++ ^ (odd_ones(sample) << 3);
++
++ /* store sample and header */
++ *dst++ = (head << 24) | sample;
++
++ if (++i == 24) {
++ src += 24;
++ i = 0;
++ }
++ }
++}
++
++static void hdmi_dma_copy_24_c_fast(u32 *src, u32 *dst, int samples)
++{
++ u32 sample;
++ int i = 0;
++
++ while (samples--) {
++ /* get source sample */
++ sample = src[g_channel_remap_table[i]] & 0x00ffffff;
++
++ /* store sample and p-bit */
++ *dst++ = (odd_ones(sample) << (3+24)) | sample;
++
++ if (++i == 24) {
++ src += 24;
++ i = 0;
++ }
++ }
++}
++
++static void hdmi_mmap_copy(u8 *src, int samplesize, u32 *dst, int framecnt, int channelcnt)
++{
++ /* split input frames into 192-frame each */
++ int count_in_192 = (framecnt + 191) / 192;
++ int i;
++
++ typedef void (*fn_copy_lut)(u8 *src, u32 *dst, int samples, u8 *lookup_table);
++ typedef void (*fn_copy_fast)(u8 *src, u32 *dst, int samples);
++ fn_copy_lut copy_lut;
++ fn_copy_fast copy_fast;
++
++ if (samplesize == 4) {
++ copy_lut = (fn_copy_lut)hdmi_dma_copy_24_c_lut;
++ copy_fast = (fn_copy_fast)hdmi_dma_copy_24_c_fast;
++ } else {
++ copy_lut = (fn_copy_lut)hdmi_dma_copy_16_c_lut;
++ copy_fast = (fn_copy_fast)hdmi_dma_copy_16_c_fast;
++ }
++
++ for (i = 0; i < count_in_192; i++) {
++ int count, samples;
++
++ /* handles frame index [0, 48) */
++ count = (framecnt < 48) ? framecnt : 48;
++ samples = count * channelcnt;
++ copy_lut(src, dst, samples, g_packet_head_table);
++ framecnt -= count;
++ if (framecnt == 0)
++ break;
++
++ src += samples * samplesize;
++ dst += samples;
++
++ /* handles frame index [48, 192) */
++ count = (framecnt < 192 - 48) ? framecnt : 192 - 48;
++ samples = count * channelcnt;
++ copy_fast(src, dst, samples);
++ framecnt -= count;
++ src += samples * samplesize;
++ dst += samples;
++ }
++}
++
++static void hdmi_dma_mmap_copy(struct snd_pcm_substream *substream,
++ int offset, int count)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ struct device *dev = rtd->platform->dev;
++ u32 framecount, *dst;
++
++ framecount = count / (priv->sample_align * priv->channels);
++
++ /* hw_buffer is the destination for pcm data plus frame info. */
++ dst = (u32 *)(priv->hw_buffer.area + (offset * priv->buffer_ratio));
++
++ switch (priv->format) {
++ case SNDRV_PCM_FORMAT_S16_LE:
++ case SNDRV_PCM_FORMAT_S24_LE:
++ /* dma_buffer is the mmapped buffer we are copying pcm from. */
++ hdmi_mmap_copy(runtime->dma_area + offset,
++ priv->sample_align, dst, framecount, priv->channels);
++ break;
++ default:
++ dev_err(dev, "unsupported sample format %s\n",
++ snd_pcm_format_name(priv->format));
++ return;
++ }
++}
++
++static void hdmi_dma_data_copy(struct snd_pcm_substream *substream,
++ struct hdmi_dma_priv *priv, char type)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ unsigned long offset, count, appl_bytes, space_to_end;
++
++ if (runtime->access != SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
++ return;
++
++ appl_bytes = frames_to_bytes(runtime, runtime->status->hw_ptr);
++
++ switch (type) {
++ case 'p':
++ offset = (appl_bytes + 2 * priv->period_bytes) % priv->buffer_bytes;
++ count = priv->period_bytes;
++ space_to_end = priv->period_bytes;
++ break;
++ case 'b':
++ offset = appl_bytes % priv->buffer_bytes;
++ count = priv->buffer_bytes;
++ space_to_end = priv->buffer_bytes - offset;
++ break;
++ default:
++ return;
++ }
++
++ if (count <= space_to_end) {
++ hdmi_dma_mmap_copy(substream, offset, count);
++ } else {
++ hdmi_dma_mmap_copy(substream, offset, space_to_end);
++ hdmi_dma_mmap_copy(substream, 0, count - space_to_end);
++ }
++}
++
++static void hdmi_sdma_callback(void *data)
++{
++ struct hdmi_dma_priv *priv = (struct hdmi_dma_priv *)data;
++ struct snd_pcm_substream *substream = priv->substream;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ unsigned long flags;
++
++ spin_lock_irqsave(&priv->irq_lock, flags);
++
++ if (runtime && runtime->dma_area && priv->tx_active) {
++ priv->offset += priv->period_bytes;
++ priv->offset %= priv->period_bytes * priv->periods;
++
++ /* Copy data by period_bytes */
++ hdmi_dma_data_copy(substream, priv, 'p');
++
++ snd_pcm_period_elapsed(substream);
++ }
++
++ spin_unlock_irqrestore(&priv->irq_lock, flags);
++
++ return;
++}
++
++static int hdmi_dma_set_thrsld_incrtype(struct device *dev, int channels)
++{
++ u8 mask = HDMI_AHB_DMA_CONF0_BURST_MODE | HDMI_AHB_DMA_CONF0_INCR_TYPE_MASK;
++ u8 val = hdmi_readb(HDMI_AHB_DMA_CONF0) & ~mask;
++ int incr_type, threshold;
++
++ switch (hdmi_readb(HDMI_REVISION_ID)) {
++ case 0x0a:
++ incr_type = HDMI_DMA_BURST_INCR4;
++ if (channels == 2)
++ threshold = 126;
++ else
++ threshold = 124;
++ break;
++ case 0x1a:
++ incr_type = HDMI_DMA_BURST_INCR8;
++ threshold = 128;
++ break;
++ default:
++ dev_err(dev, "unknown hdmi controller!\n");
++ return -ENODEV;
++ }
++
++ hdmi_writeb(threshold, HDMI_AHB_DMA_THRSLD);
++
++ switch (incr_type) {
++ case HDMI_DMA_BURST_UNSPECIFIED_LEGNTH:
++ break;
++ case HDMI_DMA_BURST_INCR4:
++ val |= HDMI_AHB_DMA_CONF0_BURST_MODE;
++ break;
++ case HDMI_DMA_BURST_INCR8:
++ val |= HDMI_AHB_DMA_CONF0_BURST_MODE |
++ HDMI_AHB_DMA_CONF0_INCR8;
++ break;
++ case HDMI_DMA_BURST_INCR16:
++ val |= HDMI_AHB_DMA_CONF0_BURST_MODE |
++ HDMI_AHB_DMA_CONF0_INCR16;
++ break;
++ default:
++ dev_err(dev, "invalid increment type: %d!", incr_type);
++ return -EINVAL;
++ }
++
++ hdmi_writeb(val, HDMI_AHB_DMA_CONF0);
++
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_THRSLD);
++
++ return 0;
++}
++
++static int hdmi_dma_configure_dma(struct device *dev, int channels)
++{
++ int ret;
++ static u8 chan_enable[] = { 0x00, 0x03, 0x33, 0x3f, 0xff };
++
++ if (channels <= 0 || channels > 8 || channels % 2 != 0) {
++ dev_err(dev, "unsupported channel number: %d\n", channels);
++ return -EINVAL;
++ }
++
++ hdmi_audio_writeb(AHB_DMA_CONF0, EN_HLOCK, 0x1);
++
++ ret = hdmi_dma_set_thrsld_incrtype(dev, channels);
++ if (ret)
++ return ret;
++
++ hdmi_writeb(chan_enable[channels / 2], HDMI_AHB_DMA_CONF1);
++
++ return 0;
++}
++
++static void hdmi_dma_init_iec_header(void)
++{
++ iec_header.U = 0;
++
++ iec_header.B.consumer = 0; /* Consumer use */
++ iec_header.B.linear_pcm = 0; /* linear pcm audio */
++ iec_header.B.copyright = 1; /* no copyright */
++ iec_header.B.pre_emphasis = 0; /* 2 channels without pre-emphasis */
++ iec_header.B.mode = 0; /* Mode 0 */
++
++ iec_header.B.category_code = 0;
++
++ iec_header.B.source = 2; /* stereo */
++ iec_header.B.channel = 0;
++
++ iec_header.B.sample_freq = 0x02; /* 48 KHz */
++ iec_header.B.clock_acc = 0; /* Level II */
++
++ iec_header.B.word_length = 0x02; /* 16 bits */
++ iec_header.B.org_sample_freq = 0x0D; /* 48 KHz */
++
++ iec_header.B.cgms_a = 0; /* Copying is permitted without restriction */
++}
++
++static int hdmi_dma_update_iec_header(struct snd_pcm_substream *substream)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ struct device *dev = rtd->platform->dev;
++
++ iec_header.B.source = priv->channels;
++
++ switch (priv->rate) {
++ case 32000:
++ iec_header.B.sample_freq = 0x03;
++ iec_header.B.org_sample_freq = 0x0C;
++ break;
++ case 44100:
++ iec_header.B.sample_freq = 0x00;
++ iec_header.B.org_sample_freq = 0x0F;
++ break;
++ case 48000:
++ iec_header.B.sample_freq = 0x02;
++ iec_header.B.org_sample_freq = 0x0D;
++ break;
++ case 88200:
++ iec_header.B.sample_freq = 0x08;
++ iec_header.B.org_sample_freq = 0x07;
++ break;
++ case 96000:
++ iec_header.B.sample_freq = 0x0A;
++ iec_header.B.org_sample_freq = 0x05;
++ break;
++ case 176400:
++ iec_header.B.sample_freq = 0x0C;
++ iec_header.B.org_sample_freq = 0x03;
++ break;
++ case 192000:
++ iec_header.B.sample_freq = 0x0E;
++ iec_header.B.org_sample_freq = 0x01;
++ break;
++ default:
++ dev_err(dev, "unsupported sample rate\n");
++ return -EFAULT;
++ }
++
++ switch (priv->format) {
++ case SNDRV_PCM_FORMAT_S16_LE:
++ iec_header.B.word_length = 0x02;
++ break;
++ case SNDRV_PCM_FORMAT_S24_LE:
++ iec_header.B.word_length = 0x0b;
++ break;
++ default:
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++/*
++ * The HDMI block transmits the audio data without adding any of the audio
++ * frame bits. So we have to copy the raw dma data from the ALSA buffer
++ * to the DMA buffer, adding the frame information.
++ */
++static int hdmi_dma_copy(struct snd_pcm_substream *substream, int channel,
++ snd_pcm_uframes_t pos, void __user *buf,
++ snd_pcm_uframes_t frames)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ unsigned int count = frames_to_bytes(runtime, frames);
++ unsigned int pos_bytes = frames_to_bytes(runtime, pos);
++ int channel_no, pcm_idx, subframe_idx, bits_left, sample_bits, map_sel;
++ u32 pcm_data[8], pcm_temp, *hw_buf, sample_block, inc_mask;
++
++ /* Adding frame info to pcm data from userspace and copy to hw_buffer */
++ hw_buf = (u32 *)(priv->hw_buffer.area + (pos_bytes * priv->buffer_ratio));
++
++ sample_bits = priv->sample_align * 8;
++ sample_block = priv->sample_align * priv->channels;
++
++ if (iec_header.B.linear_pcm == 0) {
++ map_sel = priv->channels / 2;
++ inc_mask = 1 << (priv->channels - 1);
++ } else {
++ map_sel = 0;
++ inc_mask = 0xaa;
++ }
++
++ while (count > 0) {
++ if (copy_from_user(pcm_data, buf, sample_block))
++ return -EFAULT;
++
++ buf += sample_block;
++ count -= sample_block;
++
++ channel_no = pcm_idx = 0;
++ do {
++ pcm_temp = pcm_data[pcm_idx++];
++ bits_left = 32;
++ for (;;) {
++ /* re-map channels */
++ subframe_idx = channel_maps_alsa_cea[map_sel][channel_no];
++
++ /* Save the header info to the audio dma buffer */
++ hw_buf[subframe_idx] = hdmi_dma_add_frame_info(
++ priv, pcm_temp, subframe_idx);
++
++ if (inc_mask & (1 << channel_no)) {
++ if (++priv->frame_idx == 192)
++ priv->frame_idx = 0;
++ }
++
++ channel_no++;
++
++ if (bits_left <= sample_bits)
++ break;
++
++ bits_left -= sample_bits;
++ pcm_temp >>= sample_bits;
++ }
++ } while (channel_no < priv->channels);
++
++ hw_buf += priv->channels;
++ }
++
++ return 0;
++}
++
++static int hdmi_sdma_initbuf(struct device *dev, struct hdmi_dma_priv *priv)
++{
++ struct hdmi_sdma_script *hdmi_sdma_t = priv->hdmi_sdma_t;
++ u32 *head, *tail, i;
++
++ if (!hdmi_sdma_t) {
++ dev_err(dev, "hdmi private addr invalid!!!\n");
++ return -EINVAL;
++ }
++
++ hdmi_sdma_t->control_reg_addr = HDMI_BASE_ADDR + HDMI_AHB_DMA_START;
++ hdmi_sdma_t->status_reg_addr = HDMI_BASE_ADDR + HDMI_IH_AHBDMAAUD_STAT0;
++ hdmi_sdma_t->dma_start_addr = HDMI_BASE_ADDR + HDMI_AHB_DMA_STRADDR0;
++
++ head = &hdmi_sdma_t->buffer[0];
++ tail = &hdmi_sdma_t->buffer[1];
++
++ for (i = 0; i < priv->sdma_params.buffer_num; i++) {
++ *head = priv->hw_buffer.addr + i * priv->period_bytes * priv->buffer_ratio;
++ *tail = *head + priv->dma_period_bytes - 1;
++ head += 2;
++ tail += 2;
++ }
++
++ return 0;
++}
++
++static int hdmi_sdma_config(struct snd_pcm_substream *substream,
++ struct hdmi_dma_priv *priv)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dai_dev = &priv->pdev->dev;
++ struct device *dev = rtd->platform->dev;
++ struct dma_slave_config slave_config;
++ int ret;
++
++ priv->dma_channel = dma_request_slave_channel(dai_dev, "tx");
++ if (priv->dma_channel == NULL) {
++ dev_err(dev, "failed to alloc dma channel\n");
++ return -EBUSY;
++ }
++
++ priv->dma_data.data_addr1 = &priv->sdma_params.buffer_num;
++ priv->dma_data.data_addr2 = &priv->sdma_params.phyaddr;
++ priv->dma_channel->private = &priv->dma_data;
++
++ slave_config.direction = DMA_TRANS_NONE;
++ slave_config.dma_request0 = 0;
++ slave_config.dma_request1 = 0;
++
++ ret = dmaengine_slave_config(priv->dma_channel, &slave_config);
++ if (ret) {
++ dev_err(dev, "failed to config slave dma\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int hdmi_dma_hw_free(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++
++ if (priv->dma_channel) {
++ dma_release_channel(priv->dma_channel);
++ priv->dma_channel = NULL;
++ }
++
++ return 0;
++}
++
++static int hdmi_dma_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dev = rtd->platform->dev;
++ int ret;
++
++ priv->buffer_bytes = params_buffer_bytes(params);
++ priv->periods = params_periods(params);
++ priv->period_bytes = params_period_bytes(params);
++ priv->channels = params_channels(params);
++ priv->format = params_format(params);
++ priv->rate = params_rate(params);
++
++ priv->offset = 0;
++ priv->period_time = HZ / (priv->rate / params_period_size(params));
++
++ switch (priv->format) {
++ case SNDRV_PCM_FORMAT_S16_LE:
++ priv->buffer_ratio = 2;
++ priv->sample_align = 2;
++ priv->sample_bits = 16;
++ break;
++ case SNDRV_PCM_FORMAT_S24_LE:
++ /* 24 bit audio in 32 bit word */
++ priv->buffer_ratio = 1;
++ priv->sample_align = 4;
++ priv->sample_bits = 24;
++ break;
++ default:
++ dev_err(dev, "unsupported sample format: %d\n", priv->format);
++ return -EINVAL;
++ }
++
++ priv->dma_period_bytes = priv->period_bytes * priv->buffer_ratio;
++ priv->sdma_params.buffer_num = priv->periods;
++ priv->sdma_params.phyaddr = priv->phy_hdmi_sdma_t;
++
++ ret = hdmi_sdma_initbuf(dev, priv);
++ if (ret)
++ return ret;
++
++ ret = hdmi_sdma_config(substream, priv);
++ if (ret)
++ return ret;
++
++ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
++
++ ret = hdmi_dma_configure_dma(dev, priv->channels);
++ if (ret)
++ return ret;
++
++ hdmi_dma_set_addr(priv->hw_buffer.addr, priv->dma_period_bytes);
++
++ dumppriv(dev, priv);
++
++ hdmi_dma_update_iec_header(substream);
++
++ /* Init par for mmap optimizate */
++ init_table(priv->channels);
++
++ priv->appl_bytes = 0;
++ priv->frame_idx = 0;
++
++ return 0;
++}
++
++static void hdmi_dma_trigger_init(struct snd_pcm_substream *substream,
++ struct hdmi_dma_priv *priv)
++{
++ unsigned long status;
++ bool hbr;
++
++ /*
++ * Set HBR mode (>192kHz IEC-61937 HD audio bitstreaming).
++ * This is done this late because userspace may alter the AESx
++ * parameters until the stream is finally prepared.
++ */
++ hbr = (iec_header.B.linear_pcm != 0 && priv->channels == 8);
++ hdmi_audio_writeb(AHB_DMA_CONF0, HBR, !!hbr);
++
++ /*
++ * Override AES3 - parameter: This is a temporary hack for
++ * callers that provide incorrect information when opening
++ * the device. 0x09 (i.e. 768K) is the only acceptable value.
++ */
++ if (hbr) {
++ iec_header.B.sample_freq = 0x09;
++ iec_header.B.org_sample_freq = 0x00;
++ }
++
++ priv->offset = 0;
++
++ /* Copy data by buffer_bytes */
++ hdmi_dma_data_copy(substream, priv, 'b');
++
++ hdmi_audio_writeb(AHB_DMA_CONF0, SW_FIFO_RST, 0x1);
++
++ /* Delay after reset */
++ udelay(1);
++
++ status = hdmi_readb(HDMI_IH_AHBDMAAUD_STAT0);
++ hdmi_writeb(status, HDMI_IH_AHBDMAAUD_STAT0);
++}
++
++static int hdmi_dma_prepare_and_submit(struct snd_pcm_substream *substream,
++ struct hdmi_dma_priv *priv)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dev = rtd->platform->dev;
++
++ priv->desc = dmaengine_prep_dma_cyclic(priv->dma_channel, 0, 0, 0,
++ DMA_TRANS_NONE, 0);
++ if (!priv->desc) {
++ dev_err(dev, "failed to prepare slave dma\n");
++ return -EINVAL;
++ }
++
++ priv->desc->callback = hdmi_sdma_callback;
++ priv->desc->callback_param = (void *)priv;
++ dmaengine_submit(priv->desc);
++
++ return 0;
++}
++
++static int hdmi_dma_trigger(struct snd_pcm_substream *substream, int cmd)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ struct device *dev = rtd->platform->dev;
++ int ret;
++
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ case SNDRV_PCM_TRIGGER_RESUME:
++ if (!check_hdmi_state())
++ return 0;
++ hdmi_dma_trigger_init(substream, priv);
++
++ dumpregs(dev);
++
++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ priv->tx_active = true;
++ hdmi_audio_writeb(AHB_DMA_START, START, 0x1);
++ hdmi_dma_irq_set(false);
++ hdmi_set_dma_mode(1);
++ ret = hdmi_dma_prepare_and_submit(substream, priv);
++ if (ret)
++ return ret;
++ dma_async_issue_pending(priv->desc->chan);
++ break;
++ case SNDRV_PCM_TRIGGER_STOP:
++ case SNDRV_PCM_TRIGGER_SUSPEND:
++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++ dmaengine_terminate_all(priv->dma_channel);
++ hdmi_set_dma_mode(0);
++ hdmi_dma_irq_set(true);
++ hdmi_audio_writeb(AHB_DMA_STOP, STOP, 0x1);
++ priv->tx_active = false;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static snd_pcm_uframes_t hdmi_dma_pointer(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++
++ return bytes_to_frames(runtime, priv->offset);
++}
++
++static struct snd_pcm_hardware snd_imx_hardware = {
++ .info = SNDRV_PCM_INFO_INTERLEAVED |
++ SNDRV_PCM_INFO_BLOCK_TRANSFER |
++ SNDRV_PCM_INFO_MMAP |
++ SNDRV_PCM_INFO_MMAP_VALID |
++ SNDRV_PCM_INFO_PAUSE |
++ SNDRV_PCM_INFO_RESUME,
++ .formats = MXC_HDMI_FORMATS_PLAYBACK,
++ .rate_min = 32000,
++ .channels_min = 2,
++ .channels_max = 8,
++ .buffer_bytes_max = HDMI_PCM_BUF_SIZE,
++ .period_bytes_min = HDMI_DMA_PERIOD_BYTES / 2,
++ .period_bytes_max = HDMI_DMA_PERIOD_BYTES / 2,
++ .periods_min = 8,
++ .periods_max = HDMI_DMA_BUF_SIZE / HDMI_DMA_PERIOD_BYTES,
++ .fifo_size = 0,
++};
++
++static void hdmi_dma_irq_enable(struct hdmi_dma_priv *priv)
++{
++ unsigned long flags;
++
++ hdmi_writeb(0xff, HDMI_AHB_DMA_POL);
++ hdmi_writeb(0xff, HDMI_AHB_DMA_BUFFPOL);
++
++ spin_lock_irqsave(&priv->irq_lock, flags);
++
++ hdmi_writeb(0xff, HDMI_IH_AHBDMAAUD_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
++ hdmi_dma_irq_set(false);
++ hdmi_mask(0);
++
++ spin_unlock_irqrestore(&priv->irq_lock, flags);
++}
++
++static void hdmi_dma_irq_disable(struct hdmi_dma_priv *priv)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&priv->irq_lock, flags);
++
++ hdmi_dma_irq_set(true);
++ hdmi_writeb(0x0, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_AHBDMAAUD_STAT0);
++ hdmi_mask(1);
++
++ spin_unlock_irqrestore(&priv->irq_lock, flags);
++}
++
++static int hdmi_dma_open(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dev = rtd->platform->dev;
++ struct hdmi_dma_priv *priv = dev_get_drvdata(dev);
++ int ret;
++
++ runtime->private_data = priv;
++
++ ret = mxc_hdmi_register_audio(substream);
++ if (ret < 0) {
++ dev_err(dev, "HDMI Video is not ready!\n");
++ return ret;
++ }
++
++ hdmi_audio_writeb(AHB_DMA_CONF0, SW_FIFO_RST, 0x1);
++
++ ret = snd_pcm_hw_constraint_integer(substream->runtime,
++ SNDRV_PCM_HW_PARAM_PERIODS);
++ if (ret < 0)
++ return ret;
++
++ snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware);
++
++ hdmi_dma_irq_enable(priv);
++
++ return 0;
++}
++
++static int hdmi_dma_close(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++
++ hdmi_dma_irq_disable(priv);
++ mxc_hdmi_unregister_audio(substream);
++
++ return 0;
++}
++
++static struct snd_pcm_ops imx_hdmi_dma_pcm_ops = {
++ .open = hdmi_dma_open,
++ .close = hdmi_dma_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = hdmi_dma_hw_params,
++ .hw_free = hdmi_dma_hw_free,
++ .trigger = hdmi_dma_trigger,
++ .pointer = hdmi_dma_pointer,
++ .copy = hdmi_dma_copy,
++};
++
++static int imx_hdmi_dma_pcm_new(struct snd_soc_pcm_runtime *rtd)
++{
++ struct hdmi_dma_priv *priv = dev_get_drvdata(rtd->platform->dev);
++ struct snd_card *card = rtd->card->snd_card;
++ struct snd_pcm_substream *substream;
++ struct snd_pcm *pcm = rtd->pcm;
++ u64 dma_mask = DMA_BIT_MASK(32);
++ int ret = 0;
++
++ if (!card->dev->dma_mask)
++ card->dev->dma_mask = &dma_mask;
++ if (!card->dev->coherent_dma_mask)
++ card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++ substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
++
++ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
++ HDMI_PCM_BUF_SIZE, &substream->dma_buffer);
++ if (ret) {
++ dev_err(card->dev, "failed to alloc playback dma buffer\n");
++ return ret;
++ }
++
++ priv->substream = substream;
++
++ /* Alloc the hw_buffer */
++ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
++ HDMI_DMA_BUF_SIZE, &priv->hw_buffer);
++ if (ret) {
++ dev_err(card->dev, "failed to alloc hw dma buffer\n");
++ return ret;
++ }
++
++ return ret;
++}
++
++static void imx_hdmi_dma_pcm_free(struct snd_pcm *pcm)
++{
++ int stream = SNDRV_PCM_STREAM_PLAYBACK;
++ struct snd_pcm_substream *substream = pcm->streams[stream].substream;
++ struct snd_soc_pcm_runtime *rtd = pcm->private_data;
++ struct hdmi_dma_priv *priv = dev_get_drvdata(rtd->platform->dev);
++
++ if (substream) {
++ snd_dma_free_pages(&substream->dma_buffer);
++ substream->dma_buffer.area = NULL;
++ substream->dma_buffer.addr = 0;
++ }
++
++ /* Free the hw_buffer */
++ snd_dma_free_pages(&priv->hw_buffer);
++ priv->hw_buffer.area = NULL;
++ priv->hw_buffer.addr = 0;
++}
++
++static struct snd_soc_platform_driver imx_hdmi_platform = {
++ .ops = &imx_hdmi_dma_pcm_ops,
++ .pcm_new = imx_hdmi_dma_pcm_new,
++ .pcm_free = imx_hdmi_dma_pcm_free,
++};
++
++static int imx_soc_platform_probe(struct platform_device *pdev)
++{
++ struct imx_hdmi *hdmi_drvdata = platform_get_drvdata(pdev);
++ struct hdmi_dma_priv *priv;
++ int ret = 0;
++
++ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv) {
++ dev_err(&pdev->dev, "Failed to alloc hdmi_dma\n");
++ return -ENOMEM;
++ }
++
++ priv->hdmi_sdma_t = dma_alloc_coherent(NULL,
++ sizeof(struct hdmi_sdma_script),
++ &priv->phy_hdmi_sdma_t, GFP_KERNEL);
++ if (!priv->hdmi_sdma_t) {
++ dev_err(&pdev->dev, "Failed to alloc hdmi_sdma_t\n");
++ return -ENOMEM;
++ }
++
++ priv->tx_active = false;
++ spin_lock_init(&priv->irq_lock);
++
++ priv->pdev = hdmi_drvdata->pdev;
++
++ hdmi_dma_init_iec_header();
++
++ dev_set_drvdata(&pdev->dev, priv);
++
++ switch (hdmi_readb(HDMI_REVISION_ID)) {
++ case 0x0a:
++ snd_imx_hardware.period_bytes_max = HDMI_DMA_PERIOD_BYTES / 4;
++ snd_imx_hardware.period_bytes_min = HDMI_DMA_PERIOD_BYTES / 4;
++ snd_imx_hardware.periods_max = HDMI_DMA_BUF_SIZE / (HDMI_DMA_PERIOD_BYTES / 2);
++ break;
++ default:
++ break;
++ }
++
++ ret = snd_soc_register_platform(&pdev->dev, &imx_hdmi_platform);
++ if (ret)
++ goto err_plat;
++
++ return 0;
++
++err_plat:
++ dma_free_coherent(NULL, sizeof(struct hdmi_sdma_script),
++ priv->hdmi_sdma_t, priv->phy_hdmi_sdma_t);
++
++ return ret;
++}
++
++static int imx_soc_platform_remove(struct platform_device *pdev)
++{
++ struct hdmi_dma_priv *priv = dev_get_drvdata(&pdev->dev);
++
++ dma_free_coherent(NULL, sizeof(struct hdmi_sdma_script),
++ priv->hdmi_sdma_t, priv->phy_hdmi_sdma_t);
++
++ snd_soc_unregister_platform(&pdev->dev);
++
++ return 0;
++}
++
++static struct platform_driver imx_hdmi_dma_driver = {
++ .driver = {
++ .name = "imx-hdmi-audio",
++ .owner = THIS_MODULE,
++ },
++ .probe = imx_soc_platform_probe,
++ .remove = imx_soc_platform_remove,
++};
++
++module_platform_driver(imx_hdmi_dma_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX HDMI audio DMA");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/imx-hdmi.h linux-3.14.40/sound/soc/fsl/imx-hdmi.h
+--- linux-3.14.40.orig/sound/soc/fsl/imx-hdmi.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.14.40/sound/soc/fsl/imx-hdmi.h 2015-05-01 14:58:06.795427001 -0500
+@@ -0,0 +1,105 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __IMX_HDMI_H
++#define __IMX_HDMI_H
++
++struct imx_hdmi_sdma_params {
++ dma_addr_t phyaddr;
++ u32 buffer_num;
++ int dma;
++};
++
++struct imx_hdmi {
++ struct snd_soc_dai_driver cpu_dai_drv;
++ struct platform_device *codec_dev;
++ struct platform_device *dma_dev;
++ struct platform_device *pdev;
++ struct clk *isfr_clk;
++ struct clk *iahb_clk;
++};
++
++#define HDMI_MAX_RATES 7
++#define HDMI_MAX_SAMPLE_SIZE 3
++#define HDMI_MAX_CHANNEL_CONSTRAINTS 4
++
++#define MXC_HDMI_RATES_PLAYBACK \
++ (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
++ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
++ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
++
++#define MXC_HDMI_FORMATS_PLAYBACK \
++ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
++
++union hdmi_audio_header_t {
++ uint64_t U;
++ struct {
++ unsigned consumer:1;
++ unsigned linear_pcm:1;
++ unsigned copyright:1;
++ unsigned pre_emphasis:3;
++ unsigned mode:2;
++
++ unsigned category_code:8;
++
++ unsigned source:4;
++ unsigned channel:4;
++
++ unsigned sample_freq:4;
++ unsigned clock_acc:2;
++ unsigned reserved0:2;
++
++ unsigned word_length:4;
++ unsigned org_sample_freq:4;
++
++ unsigned cgms_a:2;
++ unsigned reserved1:6;
++
++ unsigned reserved2:8;
++
++ unsigned reserved3:8;
++ } B;
++ unsigned char status[8];
++};
++
++union hdmi_audio_dma_data_t {
++ uint32_t U;
++ struct {
++ unsigned data:24;
++ unsigned v:1;
++ unsigned u:1;
++ unsigned c:1;
++ unsigned p:1;
++ unsigned b:1;
++ unsigned reserved:3;
++ } B;
++};
++
++extern union hdmi_audio_header_t iec_header;
++
++#define hdmi_audio_writeb(reg, bit, val) \
++ do { \
++ hdmi_mask_writeb(val, HDMI_ ## reg, \
++ HDMI_ ## reg ## _ ## bit ## _OFFSET, \
++ HDMI_ ## reg ## _ ## bit ## _MASK); \
++ pr_debug("Set reg: HDMI_" #reg " (0x%x) "\
++ "bit: HDMI_" #reg "_" #bit " (%d) to val: %x\n", \
++ HDMI_ ## reg, HDMI_ ## reg ## _ ## bit ## _OFFSET, val); \
++ } while (0)
++
++#endif /* __IMX_HDMI_H */
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/imx-pcm-dma.c linux-3.14.40/sound/soc/fsl/imx-pcm-dma.c
+--- linux-3.14.40.orig/sound/soc/fsl/imx-pcm-dma.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/fsl/imx-pcm-dma.c 2015-05-01 14:58:06.815427001 -0500
+@@ -11,6 +11,10 @@
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/device.h>
+ #include <linux/platform_device.h>
+ #include <linux/dmaengine.h>
+ #include <linux/types.h>
+@@ -20,6 +24,7 @@
+ #include <sound/pcm.h>
+ #include <sound/soc.h>
+ #include <sound/dmaengine_pcm.h>
++#include <linux/platform_data/dma-imx.h>
+
+ #include "imx-pcm.h"
+
+@@ -40,28 +45,97 @@
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME,
+- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+- .buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
++ .formats = SNDRV_PCM_FMTBIT_S16_LE |
++ SNDRV_PCM_FMTBIT_S24_LE |
++ SNDRV_PCM_FMTBIT_S20_3LE,
++ .buffer_bytes_max = IMX_DEFAULT_DMABUF_SIZE,
+ .period_bytes_min = 128,
+ .period_bytes_max = 65535, /* Limited by SDMA engine */
+- .periods_min = 2,
++ .periods_min = 4,
+ .periods_max = 255,
+ .fifo_size = 0,
+ };
+
++static void imx_pcm_dma_set_config_from_dai_data(
++ const struct snd_pcm_substream *substream,
++ const struct snd_dmaengine_dai_dma_data *dma_data,
++ struct dma_slave_config *slave_config)
++{
++ struct imx_dma_data *filter_data = dma_data->filter_data;
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ slave_config->dst_addr = dma_data->addr;
++ slave_config->dst_maxburst = dma_data->maxburst;
++ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
++ slave_config->dst_addr_width = dma_data->addr_width;
++ } else {
++ slave_config->src_addr = dma_data->addr;
++ slave_config->src_maxburst = dma_data->maxburst;
++ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
++ slave_config->src_addr_width = dma_data->addr_width;
++ }
++
++ slave_config->slave_id = dma_data->slave_id;
++
++ /*
++ * In dma binding mode, there is no filter_data, so dma_request need to be
++ * set to zero.
++ */
++ if (filter_data) {
++ slave_config->dma_request0 = filter_data->dma_request0;
++ slave_config->dma_request1 = filter_data->dma_request1;
++ } else {
++ slave_config->dma_request0 = 0;
++ slave_config->dma_request1 = 0;
++ }
++}
++
++static int imx_pcm_dma_prepare_slave_config(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_dmaengine_dai_dma_data *dma_data;
++ int ret;
++
++ dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
++
++ ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
++ if (ret)
++ return ret;
++
++ imx_pcm_dma_set_config_from_dai_data(substream, dma_data,
++ slave_config);
++
++ return 0;
++}
++
+ static const struct snd_dmaengine_pcm_config imx_dmaengine_pcm_config = {
+ .pcm_hardware = &imx_pcm_hardware,
+- .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
++ .prepare_slave_config = imx_pcm_dma_prepare_slave_config,
+ .compat_filter_fn = filter,
+- .prealloc_buffer_size = IMX_SSI_DMABUF_SIZE,
++ .prealloc_buffer_size = IMX_DEFAULT_DMABUF_SIZE,
+ };
+
+-int imx_pcm_dma_init(struct platform_device *pdev)
++int imx_pcm_dma_init(struct platform_device *pdev, unsigned int flags, size_t size)
+ {
+- return devm_snd_dmaengine_pcm_register(&pdev->dev,
+- &imx_dmaengine_pcm_config,
+- SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
+- SND_DMAENGINE_PCM_FLAG_COMPAT);
++ struct snd_dmaengine_pcm_config *config;
++ struct snd_pcm_hardware *pcm_hardware;
++
++ config = devm_kzalloc(&pdev->dev,
++ sizeof(struct snd_dmaengine_pcm_config), GFP_KERNEL);
++ *config = imx_dmaengine_pcm_config;
++ if (size)
++ config->prealloc_buffer_size = size;
++
++ pcm_hardware = devm_kzalloc(&pdev->dev,
++ sizeof(struct snd_pcm_hardware), GFP_KERNEL);
++ *pcm_hardware = imx_pcm_hardware;
++ if (size)
++ pcm_hardware->buffer_bytes_max = size;
++
++ config->pcm_hardware = pcm_hardware;
++
++ return devm_snd_dmaengine_pcm_register(&pdev->dev, config, flags);
+ }
+ EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
+
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/imx-pcm.h linux-3.14.40/sound/soc/fsl/imx-pcm.h
+--- linux-3.14.40.orig/sound/soc/fsl/imx-pcm.h 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/fsl/imx-pcm.h 2015-05-01 14:58:06.815427001 -0500
+@@ -18,13 +18,17 @@
+ /*
+ * Do not change this as the FIQ handler depends on this size
+ */
++#define IMX_DEFAULT_DMABUF_SIZE (256 * 1024)
+ #define IMX_SSI_DMABUF_SIZE (64 * 1024)
++#define IMX_SPDIF_DMABUF_SIZE (64 * 1024)
++#define IMX_ESAI_DMABUF_SIZE (256 * 1024)
++#define IMX_ASRC_DMABUF_SIZE (256 * 1024)
+
+ static inline void
+ imx_pcm_dma_params_init_data(struct imx_dma_data *dma_data,
+ int dma, enum sdma_peripheral_type peripheral_type)
+ {
+- dma_data->dma_request = dma;
++ dma_data->dma_request0 = dma;
+ dma_data->priority = DMA_PRIO_HIGH;
+ dma_data->peripheral_type = peripheral_type;
+ }
+@@ -39,9 +43,10 @@
+ };
+
+ #if IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_DMA)
+-int imx_pcm_dma_init(struct platform_device *pdev);
++int imx_pcm_dma_init(struct platform_device *pdev, unsigned int flags, size_t size);
+ #else
+-static inline int imx_pcm_dma_init(struct platform_device *pdev)
++static inline int imx_pcm_dma_init(struct platform_device *pdev,
++ unsigned int flags, size_t size)
+ {
+ return -ENODEV;
+ }
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/imx-spdif.c linux-3.14.40/sound/soc/fsl/imx-spdif.c
+--- linux-3.14.40.orig/sound/soc/fsl/imx-spdif.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/fsl/imx-spdif.c 2015-05-01 14:58:06.815427001 -0500
+@@ -65,14 +65,15 @@
+ if (ret)
+ goto end;
+
++ platform_set_drvdata(pdev, &data->card);
++ snd_soc_card_set_drvdata(&data->card, data);
++
+ ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed: %d\n", ret);
+ goto end;
+ }
+
+- platform_set_drvdata(pdev, data);
+-
+ end:
+ if (spdif_np)
+ of_node_put(spdif_np);
+@@ -90,6 +91,7 @@
+ .driver = {
+ .name = "imx-spdif",
+ .owner = THIS_MODULE,
++ .pm = &snd_soc_pm_ops,
+ .of_match_table = imx_spdif_dt_ids,
+ },
+ .probe = imx_spdif_audio_probe,
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/imx-ssi.c linux-3.14.40/sound/soc/fsl/imx-ssi.c
+--- linux-3.14.40.orig/sound/soc/fsl/imx-ssi.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/fsl/imx-ssi.c 2015-05-01 14:58:06.823427001 -0500
+@@ -602,7 +602,8 @@
+ ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
+
+ ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
+- ssi->dma_init = imx_pcm_dma_init(pdev);
++ ssi->dma_init = imx_pcm_dma_init(pdev, SND_DMAENGINE_PCM_FLAG_NO_RESIDUE,
++ IMX_SSI_DMABUF_SIZE);
+
+ if (ssi->fiq_init && ssi->dma_init) {
+ ret = ssi->fiq_init;
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/imx-wm8962.c linux-3.14.40/sound/soc/fsl/imx-wm8962.c
+--- linux-3.14.40.orig/sound/soc/fsl/imx-wm8962.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/fsl/imx-wm8962.c 2015-05-01 14:58:06.823427001 -0500
+@@ -1,9 +1,9 @@
+ /*
+- * Copyright 2013 Freescale Semiconductor, Inc.
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * Based on imx-sgtl5000.c
+- * Copyright 2012 Freescale Semiconductor, Inc.
+- * Copyright 2012 Linaro Ltd.
++ * Copyright (C) 2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+@@ -16,9 +16,12 @@
+ #include <linux/module.h>
+ #include <linux/of_platform.h>
+ #include <linux/i2c.h>
++#include <linux/of_gpio.h>
+ #include <linux/slab.h>
++#include <linux/gpio.h>
+ #include <linux/clk.h>
+ #include <sound/soc.h>
++#include <sound/jack.h>
+ #include <sound/pcm_params.h>
+ #include <sound/soc-dapm.h>
+ #include <linux/pinctrl/consumer.h>
+@@ -33,15 +36,134 @@
+ struct snd_soc_card card;
+ char codec_dai_name[DAI_NAME_SIZE];
+ char platform_name[DAI_NAME_SIZE];
+- struct clk *codec_clk;
+ unsigned int clk_frequency;
+ };
+
+ struct imx_priv {
++ int hp_gpio;
++ int hp_active_low;
++ int mic_gpio;
++ int mic_active_low;
++ bool amic_mono;
++ bool dmic_mono;
++ struct snd_soc_codec *codec;
+ struct platform_device *pdev;
++ struct snd_pcm_substream *first_stream;
++ struct snd_pcm_substream *second_stream;
+ };
+ static struct imx_priv card_priv;
+
++static struct snd_soc_jack imx_hp_jack;
++static struct snd_soc_jack_pin imx_hp_jack_pins[] = {
++ {
++ .pin = "Headphone Jack",
++ .mask = SND_JACK_HEADPHONE,
++ },
++};
++static struct snd_soc_jack_gpio imx_hp_jack_gpio = {
++ .name = "headphone detect",
++ .report = SND_JACK_HEADPHONE,
++ .debounce_time = 250,
++ .invert = 0,
++};
++
++static struct snd_soc_jack imx_mic_jack;
++static struct snd_soc_jack_pin imx_mic_jack_pins[] = {
++ {
++ .pin = "AMIC",
++ .mask = SND_JACK_MICROPHONE,
++ },
++};
++static struct snd_soc_jack_gpio imx_mic_jack_gpio = {
++ .name = "microphone detect",
++ .report = SND_JACK_MICROPHONE,
++ .debounce_time = 250,
++ .invert = 0,
++};
++
++static int hpjack_status_check(void)
++{
++ struct imx_priv *priv = &card_priv;
++ struct platform_device *pdev = priv->pdev;
++ char *envp[3], *buf;
++ int hp_status, ret;
++
++ if (!gpio_is_valid(priv->hp_gpio))
++ return 0;
++
++ hp_status = gpio_get_value(priv->hp_gpio) ? 1 : 0;
++
++ buf = kmalloc(32, GFP_ATOMIC);
++ if (!buf) {
++ dev_err(&pdev->dev, "%s kmalloc failed\n", __func__);
++ return -ENOMEM;
++ }
++
++ if (hp_status != priv->hp_active_low) {
++ snprintf(buf, 32, "STATE=%d", 2);
++ snd_soc_dapm_disable_pin(&priv->codec->dapm, "Ext Spk");
++ ret = imx_hp_jack_gpio.report;
++ } else {
++ snprintf(buf, 32, "STATE=%d", 0);
++ snd_soc_dapm_enable_pin(&priv->codec->dapm, "Ext Spk");
++ ret = 0;
++ }
++
++ envp[0] = "NAME=headphone";
++ envp[1] = buf;
++ envp[2] = NULL;
++ kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
++ kfree(buf);
++
++ return ret;
++}
++
++static int micjack_status_check(void)
++{
++ struct imx_priv *priv = &card_priv;
++ struct platform_device *pdev = priv->pdev;
++ char *envp[3], *buf;
++ int mic_status, ret;
++
++ if (!gpio_is_valid(priv->mic_gpio))
++ return 0;
++
++ mic_status = gpio_get_value(priv->mic_gpio) ? 1 : 0;
++
++ if ((mic_status != priv->mic_active_low && priv->amic_mono)
++ || (mic_status == priv->mic_active_low && priv->dmic_mono))
++ snd_soc_update_bits(priv->codec, WM8962_THREED1,
++ WM8962_ADC_MONOMIX_MASK, WM8962_ADC_MONOMIX);
++ else
++ snd_soc_update_bits(priv->codec, WM8962_THREED1,
++ WM8962_ADC_MONOMIX_MASK, 0);
++
++ buf = kmalloc(32, GFP_ATOMIC);
++ if (!buf) {
++ dev_err(&pdev->dev, "%s kmalloc failed\n", __func__);
++ return -ENOMEM;
++ }
++
++ if (mic_status != priv->mic_active_low) {
++ snprintf(buf, 32, "STATE=%d", 2);
++ snd_soc_dapm_disable_pin(&priv->codec->dapm, "DMIC");
++ ret = imx_mic_jack_gpio.report;
++ } else {
++ snprintf(buf, 32, "STATE=%d", 0);
++ snd_soc_dapm_enable_pin(&priv->codec->dapm, "DMIC");
++ ret = 0;
++ }
++
++ envp[0] = "NAME=microphone";
++ envp[1] = buf;
++ envp[2] = NULL;
++ kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
++ kfree(buf);
++
++ return ret;
++}
++
++
+ static const struct snd_soc_dapm_widget imx_wm8962_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+@@ -49,14 +171,57 @@
+ SND_SOC_DAPM_MIC("DMIC", NULL),
+ };
+
+-static int sample_rate = 44100;
+-static snd_pcm_format_t sample_format = SNDRV_PCM_FORMAT_S16_LE;
+-
+ static int imx_hifi_hw_params(struct snd_pcm_substream *substream,
+- struct snd_pcm_hw_params *params)
++ struct snd_pcm_hw_params *params)
+ {
+- sample_rate = params_rate(params);
+- sample_format = params_format(params);
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_dai *codec_dai = rtd->codec_dai;
++ struct imx_priv *priv = &card_priv;
++ struct device *dev = &priv->pdev->dev;
++ struct snd_soc_card *card = codec_dai->codec->card;
++ struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
++ unsigned int sample_rate = params_rate(params);
++ snd_pcm_format_t sample_format = params_format(params);
++ u32 dai_format, pll_out;
++ int ret = 0;
++
++ if (!priv->first_stream) {
++ priv->first_stream = substream;
++ } else {
++ priv->second_stream = substream;
++
++ /* We suppose the two substream are using same params */
++ return 0;
++ }
++
++ dai_format = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_CBM_CFM;
++
++ /* set codec DAI configuration */
++ ret = snd_soc_dai_set_fmt(codec_dai, dai_format);
++ if (ret) {
++ dev_err(dev, "failed to set codec dai fmt: %d\n", ret);
++ return ret;
++ }
++
++ if (sample_format == SNDRV_PCM_FORMAT_S24_LE)
++ pll_out = sample_rate * 384;
++ else
++ pll_out = sample_rate * 256;
++
++ ret = snd_soc_dai_set_pll(codec_dai, WM8962_FLL, WM8962_FLL_MCLK,
++ data->clk_frequency, pll_out);
++ if (ret) {
++ dev_err(dev, "failed to start FLL: %d\n", ret);
++ return ret;
++ }
++
++ ret = snd_soc_dai_set_sysclk(codec_dai, WM8962_SYSCLK_FLL,
++ pll_out, SND_SOC_CLOCK_IN);
++ if (ret) {
++ dev_err(dev, "failed to set SYSCLK: %d\n", ret);
++ return ret;
++ }
+
+ return 0;
+ }
+@@ -133,6 +298,89 @@
+ return 0;
+ }
+
++static int imx_wm8962_gpio_init(struct snd_soc_pcm_runtime *rtd)
++{
++ struct snd_soc_codec *codec = rtd->codec;
++ struct imx_priv *priv = &card_priv;
++
++ priv->codec = codec;
++
++ if (gpio_is_valid(priv->hp_gpio)) {
++ imx_hp_jack_gpio.gpio = priv->hp_gpio;
++ imx_hp_jack_gpio.jack_status_check = hpjack_status_check;
++
++ snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE, &imx_hp_jack);
++ snd_soc_jack_add_pins(&imx_hp_jack,
++ ARRAY_SIZE(imx_hp_jack_pins), imx_hp_jack_pins);
++ snd_soc_jack_add_gpios(&imx_hp_jack, 1, &imx_hp_jack_gpio);
++ }
++
++ if (gpio_is_valid(priv->mic_gpio)) {
++ imx_mic_jack_gpio.gpio = priv->mic_gpio;
++ imx_mic_jack_gpio.jack_status_check = micjack_status_check;
++
++ snd_soc_jack_new(codec, "AMIC", SND_JACK_MICROPHONE, &imx_mic_jack);
++ snd_soc_jack_add_pins(&imx_mic_jack,
++ ARRAY_SIZE(imx_mic_jack_pins), imx_mic_jack_pins);
++ snd_soc_jack_add_gpios(&imx_mic_jack, 1, &imx_mic_jack_gpio);
++ } else if (priv->amic_mono || priv->dmic_mono) {
++ /*
++ * Permanent set monomix bit if only one microphone
++ * is present on the board while it needs monomix.
++ */
++ snd_soc_update_bits(priv->codec, WM8962_THREED1,
++ WM8962_ADC_MONOMIX_MASK, WM8962_ADC_MONOMIX);
++ }
++
++ return 0;
++}
++
++static ssize_t show_headphone(struct device_driver *dev, char *buf)
++{
++ struct imx_priv *priv = &card_priv;
++ int hp_status;
++
++ if (!gpio_is_valid(priv->hp_gpio)) {
++ strcpy(buf, "no detect gpio connected\n");
++ return strlen(buf);
++ }
++
++ /* Check if headphone is plugged in */
++ hp_status = gpio_get_value(priv->hp_gpio) ? 1 : 0;
++
++ if (hp_status != priv->hp_active_low)
++ strcpy(buf, "headphone\n");
++ else
++ strcpy(buf, "speaker\n");
++
++ return strlen(buf);
++}
++
++static DRIVER_ATTR(headphone, S_IRUGO | S_IWUSR, show_headphone, NULL);
++
++static ssize_t show_mic(struct device_driver *dev, char *buf)
++{
++ struct imx_priv *priv = &card_priv;
++ int mic_status;
++
++ if (!gpio_is_valid(priv->mic_gpio)) {
++ strcpy(buf, "no detect gpio connected\n");
++ return strlen(buf);
++ }
++
++ /* Check if analog microphone is plugged in */
++ mic_status = gpio_get_value(priv->mic_gpio) ? 1 : 0;
++
++ if (mic_status != priv->mic_active_low)
++ strcpy(buf, "amic\n");
++ else
++ strcpy(buf, "dmic\n");
++
++ return strlen(buf);
++}
++
++static DRIVER_ATTR(microphone, S_IRUGO | S_IWUSR, show_mic, NULL);
++
+ static int imx_wm8962_late_probe(struct snd_soc_card *card)
+ {
+ struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
+@@ -157,6 +405,7 @@
+ struct imx_priv *priv = &card_priv;
+ struct i2c_client *codec_dev;
+ struct imx_wm8962_data *data;
++ struct clk *codec_clk = NULL;
+ int int_port, ext_port;
+ int ret;
+
+@@ -219,25 +468,31 @@
+ goto fail;
+ }
+
++ priv->first_stream = NULL;
++ priv->second_stream = NULL;
++
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+- data->codec_clk = devm_clk_get(&codec_dev->dev, NULL);
+- if (IS_ERR(data->codec_clk)) {
+- ret = PTR_ERR(data->codec_clk);
++ codec_clk = devm_clk_get(&codec_dev->dev, NULL);
++ if (IS_ERR(codec_clk)) {
++ ret = PTR_ERR(codec_clk);
+ dev_err(&codec_dev->dev, "failed to get codec clk: %d\n", ret);
+ goto fail;
+ }
+
+- data->clk_frequency = clk_get_rate(data->codec_clk);
+- ret = clk_prepare_enable(data->codec_clk);
+- if (ret) {
+- dev_err(&codec_dev->dev, "failed to enable codec clk: %d\n", ret);
+- goto fail;
+- }
++ data->clk_frequency = clk_get_rate(codec_clk);
++
++ priv->amic_mono = of_property_read_bool(codec_np, "amic-mono");
++ priv->dmic_mono = of_property_read_bool(codec_np, "dmic-mono");
++
++ priv->hp_gpio = of_get_named_gpio_flags(np, "hp-det-gpios", 0,
++ (enum of_gpio_flags *)&priv->hp_active_low);
++ priv->mic_gpio = of_get_named_gpio_flags(np, "mic-det-gpios", 0,
++ (enum of_gpio_flags *)&priv->mic_active_low);
+
+ data->dai.name = "HiFi";
+ data->dai.stream_name = "HiFi";
+@@ -246,23 +501,23 @@
+ data->dai.cpu_dai_name = dev_name(&ssi_pdev->dev);
+ data->dai.platform_of_node = ssi_np;
+ data->dai.ops = &imx_hifi_ops;
++ data->dai.init = &imx_wm8962_gpio_init;
+ data->dai.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM;
+
+ data->card.dev = &pdev->dev;
+ ret = snd_soc_of_parse_card_name(&data->card, "model");
+ if (ret)
+- goto clk_fail;
++ goto fail;
+ ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
+ if (ret)
+- goto clk_fail;
++ goto fail;
+ data->card.num_links = 1;
+ data->card.dai_link = &data->dai;
+ data->card.dapm_widgets = imx_wm8962_dapm_widgets;
+ data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets);
+
+ data->card.late_probe = imx_wm8962_late_probe;
+- data->card.set_bias_level = imx_wm8962_set_bias_level;
+
+ platform_set_drvdata(pdev, &data->card);
+ snd_soc_card_set_drvdata(&data->card, data);
+@@ -270,16 +525,31 @@
+ ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+- goto clk_fail;
++ goto fail;
+ }
+
+- of_node_put(ssi_np);
+- of_node_put(codec_np);
++ if (gpio_is_valid(priv->hp_gpio)) {
++ ret = driver_create_file(pdev->dev.driver, &driver_attr_headphone);
++ if (ret) {
++ dev_err(&pdev->dev, "create hp attr failed (%d)\n", ret);
++ goto fail_hp;
++ }
++ }
+
+- return 0;
++ if (gpio_is_valid(priv->mic_gpio)) {
++ ret = driver_create_file(pdev->dev.driver, &driver_attr_microphone);
++ if (ret) {
++ dev_err(&pdev->dev, "create mic attr failed (%d)\n", ret);
++ goto fail_mic;
++ }
++ }
++
++ goto fail;
+
+-clk_fail:
+- clk_disable_unprepare(data->codec_clk);
++fail_mic:
++ driver_remove_file(pdev->dev.driver, &driver_attr_headphone);
++fail_hp:
++ snd_soc_unregister_card(&data->card);
+ fail:
+ if (ssi_np)
+ of_node_put(ssi_np);
+@@ -291,11 +561,8 @@
+
+ static int imx_wm8962_remove(struct platform_device *pdev)
+ {
+- struct snd_soc_card *card = platform_get_drvdata(pdev);
+- struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
+-
+- if (!IS_ERR(data->codec_clk))
+- clk_disable_unprepare(data->codec_clk);
++ driver_remove_file(pdev->dev.driver, &driver_attr_microphone);
++ driver_remove_file(pdev->dev.driver, &driver_attr_headphone);
+
+ return 0;
+ }
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/Kconfig linux-3.14.40/sound/soc/fsl/Kconfig
+--- linux-3.14.40.orig/sound/soc/fsl/Kconfig 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/fsl/Kconfig 2015-05-01 14:58:06.823427001 -0500
+@@ -11,6 +11,12 @@
+ config SND_SOC_FSL_ESAI
+ tristate
+
++config SND_SOC_FSL_ASRC
++ tristate
++
++config SND_SOC_FSL_HDMI
++ tristate
++
+ config SND_SOC_FSL_UTILS
+ tristate
+
+@@ -126,6 +132,11 @@
+ tristate
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+
++config SND_SOC_IMX_HDMI_DMA
++ bool
++ select SND_SOC_GENERIC_DMAENGINE_PCM
++ select SND_SOC_IMX_PCM_DMA
++
+ config SND_SOC_IMX_AUDMUX
+ tristate
+
+@@ -178,6 +189,18 @@
+ Enable I2S based access to the TLV320AIC23B codec attached
+ to the SSI interface
+
++config SND_SOC_IMX_CS42888
++ tristate "SoC Audio support for i.MX boards with cs42888"
++ depends on OF && I2C
++ select SND_SOC_CS42888
++ select SND_SOC_IMX_PCM_DMA
++ select SND_SOC_FSL_ESAI
++ select SND_SOC_FSL_UTILS
++ help
++ SoC Audio support for i.MX boards with cs42888
++ Say Y if you want to add support for SoC audio on an i.MX board with
++ a cs42888 codec.
++
+ config SND_SOC_IMX_WM8962
+ tristate "SoC Audio support for i.MX boards with wm8962"
+ depends on OF && I2C
+@@ -210,6 +233,17 @@
+ Say Y if you want to add support for SoC audio on an i.MX board with
+ a S/DPDIF.
+
++config SND_SOC_IMX_HDMI
++ tristate "SoC Audio support for i.MX boards with HDMI port"
++ depends on MFD_MXC_HDMI
++ select SND_SOC_IMX_HDMI_DMA
++ select SND_SOC_FSL_HDMI
++ select SND_SOC_HDMI_CODEC
++ help
++ SoC Audio support for i.MX boards with HDMI audio
++ Say Y if you want to add support for SoC audio on an i.MX board with
++ IMX HDMI.
++
+ config SND_SOC_IMX_MC13783
+ tristate "SoC Audio support for I.MX boards with mc13783"
+ depends on MFD_MC13XXX && ARM
+diff -Nur linux-3.14.40.orig/sound/soc/fsl/Makefile linux-3.14.40/sound/soc/fsl/Makefile
+--- linux-3.14.40.orig/sound/soc/fsl/Makefile 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/fsl/Makefile 2015-05-01 14:58:06.823427001 -0500
+@@ -14,13 +14,19 @@
+ snd-soc-fsl-sai-objs := fsl_sai.o
+ snd-soc-fsl-ssi-objs := fsl_ssi.o
+ snd-soc-fsl-spdif-objs := fsl_spdif.o
++snd-soc-fsl-hdmi-objs := fsl_hdmi.o
+ snd-soc-fsl-esai-objs := fsl_esai.o
++snd-soc-fsl-asrc-pcm-objs := fsl_asrc_pcm.o
++snd-soc-fsl-asrc-objs := fsl_asrc.o
+ snd-soc-fsl-utils-objs := fsl_utils.o
+ snd-soc-fsl-dma-objs := fsl_dma.o
+ obj-$(CONFIG_SND_SOC_FSL_SAI) += snd-soc-fsl-sai.o
+ obj-$(CONFIG_SND_SOC_FSL_SSI) += snd-soc-fsl-ssi.o
+ obj-$(CONFIG_SND_SOC_FSL_SPDIF) += snd-soc-fsl-spdif.o
++obj-$(CONFIG_SND_SOC_FSL_HDMI) += snd-soc-fsl-hdmi.o
+ obj-$(CONFIG_SND_SOC_FSL_ESAI) += snd-soc-fsl-esai.o
++obj-$(CONFIG_SND_SOC_FSL_ASRC) += snd-soc-fsl-asrc-pcm.o
++obj-$(CONFIG_SND_SOC_FSL_ASRC) += snd-soc-fsl-asrc.o
+ obj-$(CONFIG_SND_SOC_FSL_UTILS) += snd-soc-fsl-utils.o
+ obj-$(CONFIG_SND_SOC_POWERPC_DMA) += snd-soc-fsl-dma.o
+
+@@ -41,22 +47,27 @@
+
+ obj-$(CONFIG_SND_SOC_IMX_PCM_FIQ) += imx-pcm-fiq.o
+ obj-$(CONFIG_SND_SOC_IMX_PCM_DMA) += imx-pcm-dma.o
++obj-$(CONFIG_SND_SOC_IMX_HDMI_DMA) += imx-hdmi-dma.o
+
+ # i.MX Machine Support
+ snd-soc-eukrea-tlv320-objs := eukrea-tlv320.o
+ snd-soc-phycore-ac97-objs := phycore-ac97.o
+ snd-soc-mx27vis-aic32x4-objs := mx27vis-aic32x4.o
+ snd-soc-wm1133-ev1-objs := wm1133-ev1.o
++snd-soc-imx-cs42888-objs := imx-cs42888.o
+ snd-soc-imx-sgtl5000-objs := imx-sgtl5000.o
+ snd-soc-imx-wm8962-objs := imx-wm8962.o
+ snd-soc-imx-spdif-objs := imx-spdif.o
++snd-soc-imx-hdmi-objs := imx-hdmi.o
+ snd-soc-imx-mc13783-objs := imx-mc13783.o
+
+ obj-$(CONFIG_SND_SOC_EUKREA_TLV320) += snd-soc-eukrea-tlv320.o
+ obj-$(CONFIG_SND_SOC_PHYCORE_AC97) += snd-soc-phycore-ac97.o
+ obj-$(CONFIG_SND_SOC_MX27VIS_AIC32X4) += snd-soc-mx27vis-aic32x4.o
+ obj-$(CONFIG_SND_MXC_SOC_WM1133_EV1) += snd-soc-wm1133-ev1.o
++obj-$(CONFIG_SND_SOC_IMX_CS42888) += snd-soc-imx-cs42888.o
+ obj-$(CONFIG_SND_SOC_IMX_SGTL5000) += snd-soc-imx-sgtl5000.o
+ obj-$(CONFIG_SND_SOC_IMX_WM8962) += snd-soc-imx-wm8962.o
+ obj-$(CONFIG_SND_SOC_IMX_SPDIF) += snd-soc-imx-spdif.o
++obj-$(CONFIG_SND_SOC_IMX_HDMI) += snd-soc-imx-hdmi.o
+ obj-$(CONFIG_SND_SOC_IMX_MC13783) += snd-soc-imx-mc13783.o
+diff -Nur linux-3.14.40.orig/sound/soc/soc-pcm.c linux-3.14.40/sound/soc/soc-pcm.c
+--- linux-3.14.40.orig/sound/soc/soc-pcm.c 2015-04-29 03:32:58.000000000 -0500
++++ linux-3.14.40/sound/soc/soc-pcm.c 2015-05-01 14:58:06.831427001 -0500
+@@ -945,7 +945,7 @@
+ }
+ }
+
+- dev_err(card->dev, "ASoC: can't get %s BE for %s\n",
++ dev_dbg(card->dev, "ASoC: can't get %s BE for %s\n",
+ stream ? "capture" : "playback", widget->name);
+ return NULL;
+ }
+@@ -1062,7 +1062,7 @@
+ /* is there a valid BE rtd for this widget */
+ be = dpcm_get_be(card, list->widgets[i], stream);
+ if (!be) {
+- dev_err(fe->dev, "ASoC: no BE found for %s\n",
++ dev_dbg(fe->dev, "ASoC: no BE found for %s\n",
+ list->widgets[i]->name);
+ continue;
+ }